blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
1e016432cc8e24b453ee9c7d8c2156c6f0dc8cbe
|
Python
|
NaviaJin/SortAlgorithm
|
/heap_sort.py
|
UTF-8
| 1,911
| 3.65625
| 4
|
[] |
no_license
|
# '''
# 当前节点为k,
# 父节点为(k-1)/2
# 左子树为2k+1
# 右子树为2k+2
# '''
def adjust_heap(arr,i):
l_child = 2*i+1
r_child = 2*i+2
n = len(arr)
min = i
if i < int(n/2):
if l_child < n and arr[min] > arr[l_child]:
min = l_child
if r_child < n and arr[min] > arr[r_child]:
min = r_child
if min != i:
arr[i], arr[min] = arr[min], arr[i]
adjust_heap(arr,min)
def get_min(arr):
n = len(arr)
no_leaf =int(n/2)
for i in range(0, no_leaf)[::-1]:
adjust_heap(arr, i)
if arr:
min = arr[0]
temp = arr.pop(n-1)
if arr:
arr[0] = temp
# print(arr)
return min, arr
def heap_sort(arr,result):
min, arr = get_min(arr)
result.append(min)
while arr:
heap_sort(arr,result)
if __name__ == '__main__':
arr = [123, 312, 12, 3, 122, 313, 2, 566, 435, 23]
result = []
heap_sort(arr, result)
print(result)
# temp = arr.pop(4)
# print(temp)
# print(arr)
#
#
#
#list要处理的数组,i是第几个元素,size是lists的长度
# def adjust_heap(lists, i, size):
# lchild = 2 * i + 1
# rchild = 2 * i + 2
# max = i
# if i < int(size / 2):
# if lchild < size and lists[lchild] > lists[max]:
# max = lchild
# if rchild < size and lists[rchild] > lists[max]:
# max = rchild
# if max != i:
# lists[max], lists[i] = lists[i], lists[max]
# adjust_heap(lists, max, size)
#
#
# def build_heap(lists, size):
# for i in range(0, (int(size / 2)))[::-1]:
# adjust_heap(lists, i, size)
#
#
# def heap_sort(lists):
# size = len(lists)
# build_heap(lists, size)
# for i in range(0, size)[::-1]:
# lists[0], lists[i] = lists[i], lists[0]
# adjust_heap(lists, 0, i)
#
#
| true
|
2b0859c8d61918ed55e7f15f16cdf0634e00937b
|
Python
|
git4lhe/Lezhin_data_challenge
|
/haeunlee/core/transforms.py
|
UTF-8
| 1,996
| 2.796875
| 3
|
[] |
no_license
|
import numpy as np
from sklearn.preprocessing import (
StandardScaler,
OneHotEncoder,
)
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import FunctionTransformer
join = FunctionTransformer(' '.join, validate=True)
TOKENS_ALPHANUMERIC = '[A-Za-z0-9]+(?=\\s+)'
num_steps = [
(
"impute_nan_num",
SimpleImputer(
missing_values=np.nan, strategy="constant", fill_value=0, add_indicator=True
),
),
("standardscaler", StandardScaler()),
]
cat_steps = [
(
"impute_nan_cat",
SimpleImputer(
missing_values=np.nan, strategy='constant', fill_value='ABCD1234', add_indicator=True
),
),
# ("join", join),
("HashingVectorizer", HashingVectorizer(n_features=2 ** 5, binary = False, lowercase=False)),
]
class PipelineCreator:
def __init__(self, numeric_cols, str_cols, ignore = None):
"""
{
imputation: nan/unknown -> 같은 데이터로 처리, another category
standard scaler:
onehotencoder:
}
"""
self.num_steps = num_steps
self.cat_steps = cat_steps
self.numeric_cols = numeric_cols
self.str_cols = str_cols
self.final_pipe = []
def get_pipeline(self):
print(f"Pipeline numerical({len(self.numeric_cols)}): {self.numeric_cols}")
print(f"Pipeline string({len(self.str_cols)}): {self.str_cols}")
self.final_pipe.append(
("numerical", Pipeline(self.num_steps), self.numeric_cols)
)
self.final_pipe.append(
("string column transformation", Pipeline(self.cat_steps), self.str_cols)
)
pipe = ColumnTransformer(self.final_pipe, remainder="drop", verbose=True)
return pipe
def add_pipeline(self, **step):
print(step)
| true
|
1d41db94d36ba4739835a8c56f587445135e327e
|
Python
|
s570504071/learngit
|
/date/date0927/crawl_liuli.py
|
UTF-8
| 3,944
| 2.796875
| 3
|
[] |
no_license
|
#coding=utf-8
#琉璃神社首页 http://www.hacg.at/
#动漫页面 http://www.hacg.at/wp/category/all/anime/
import urllib2
import urllib
import json
import re
import logging
import pdb
from bs4 import BeautifulSoup as bs
class GetLink(object):
def __init__(self):
#self.url='http://www.hacg.at/wp/category/all/anime/'
#self.num=num
self.user_agent='Mozilla/5.0 (Window NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'
self.header={'User-Agent':self.user_agent}
self.l=[]
self.ani0={}
def gethtml(self,u):
self.ani={}
#y=self.loop(num)
#url_page=self.url+'page/'+str(self.num)+'/'
req=urllib2.Request(u,headers=self.header)
res=urllib2.urlopen(req)
html=res.read().decode('utf-8')
soup=bs(html,'html.parser')
titles=soup.select('h1.entry-title > a')
for title in titles:
self.ani0[title.get_text()]=title.get('href')
#用json格式输出汉字
#print json.dumps(ani).decode("unicode-escape")
return self.ani0
#循环得到更多页的信息,(原先实际上只得到当前页的信息!!!)
def loop(self,num):
url='http://www.hacg.at/wp/category/all/anime/'
for i in range(1,num):
print 'downing--webpage---%s'%i
url_page=url+'page/'+str(i)+'/'
self.gethtml(url_page)
self.ani.update(self.ani0)
#print len(self.ani)
return self.ani
class GetL(object):
def __init__(self):
self.user_agent='Mozilla/5.0 (Window NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'
self.header={'User-Agent':self.user_agent}
self.blue_link=''
def getlink(self,link):
#伪装,发送请求
req=urllib2.Request(link,headers=self.header)
res=urllib2.urlopen(req)
html=res.read().decode('utf-8')
soup=bs(html,'html.parser')
content=soup.select('div.entry-content')
try:
#正则表达式匹配
pattern=re.compile(r'([a-z0-9A-Z]{40}|[a-z0-9本站不提供下载A-Z]{54})</')
blue_link1=pattern.findall(str(content))
#利用切片操作得到列表中的字符串或内容,可再次切片对字符串得到想要的
blue_link0=blue_link1[0][:-2]
self.blue_link=r'magnet:?xt=urn:btih:'+str(blue_link0)
except Exception,e:
print e
finally:
return self.blue_link
#print self.blue_link
def findlink(self):
#获得标题和网页链接
l=GetLink()
g=l.loop(3)
print len(g)
b_link={}
for k,v in g.iteritems():
print k
#pdb.set_trace()
self.getlink(v)
b_link[k]=self.blue_link
#print b_link
print 'start'
with open('b_link1.json','w') as f:
f.write(str(b_link)+r'\n')
print 'save down'
return b_link
#获取所有最新信息
def main():
print '*'*8
l=GetLink()
g=l.loop(3)
print g
print '*'*8
#获取神秘代码
def main0():
#link='http://www.hacg.at/wp/all/anime/%e3%83%92%e3%83%88%e3%83%85%e3%83%9e%e3%83%a9%e3%82%a4%e3%83%95-%e3%83%af%e3%83%b3%e3%82%bf%e3%82%a4%e3%83%a0%e3%82%ae%e3%83%a3%e3%83%ab-%e5%89%8d%e7%b7%a8/'
#link='http://www.hacg.at/wp/all/anime/%e8%87%aa%e5%ae%85%e8%ad%a6%e5%82%99%e5%93%a1-3rd%e3%83%9f%e3%83%83%e3%82%b7%e3%83%a7%e3%83%b3-%e3%83%9b%e3%82%b7%e3%82%ac%e3%83%aa%e7%88%86%e4%b9%b3%e4%ba%ba%e5%a6%bb%e3%83%bb%e7%bf%94%e5%ad%90/'
#link='http://www.hacg.at/wp/all/anime/%e5%83%95%e3%81%a0%e3%81%91%e3%81%ae%e3%83%98%e3%83%b3%e3%82%bf%e3%82%a4%e3%82%ab%e3%83%8e%e3%82%b8%e3%83%a7-%e3%82%82%e3%81%a3%e3%81%a8-the-animation/'
k=GetL()
k.findlink()
if __name__=='__main__':
#main()
main0()
| true
|
dd2828173e9ba99516e67147bc939f0500f6c8b9
|
Python
|
okyanusoz/datasets
|
/tensorflow_datasets/core/utils/generic_path.py
|
UTF-8
| 3,599
| 2.65625
| 3
|
[
"Apache-2.0"
] |
permissive
|
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pathlib-like generic abstraction."""
import os
import typing
from typing import Callable, Dict, Tuple, Type, Union, TypeVar
from tensorflow_datasets.core.utils import gpath
from tensorflow_datasets.core.utils import type_utils
PathLike = type_utils.PathLike
ReadOnlyPath = type_utils.ReadOnlyPath
ReadWritePath = type_utils.ReadWritePath
PathLikeCls = Union[Type[ReadOnlyPath], Type[ReadWritePath]]
T = TypeVar('T')
_PATHLIKE_CLS: Tuple[PathLikeCls, ...] = (
gpath.PosixGPath,
gpath.WindowsGPath,
)
_URI_PREFIXES_TO_CLS: Dict[str, PathLikeCls] = {
# Even on Windows, `gs://`,... are PosixPath
uri_prefix: gpath.PosixGPath for uri_prefix in gpath.URI_PREFIXES
}
# pylint: disable=g-wrong-blank-lines
@typing.overload
def register_pathlike_cls(path_cls_or_uri_prefix: str) -> Callable[[T], T]:
...
@typing.overload
def register_pathlike_cls(path_cls_or_uri_prefix: T) -> T:
...
def register_pathlike_cls(path_cls_or_uri_prefix):
"""Register the class to be forwarded as-is in `as_path`.
```python
@utils.register_pathlike_cls('my_path://')
class MyPath(pathlib.PurePosixPath):
...
my_path = tfds.core.as_path('my_path://some-path')
```
Args:
path_cls_or_uri_prefix: If a uri prefix is given, then passing calling
`tfds.core.as_path('prefix://path')` will call the decorated class.
Returns:
The decorator or decoratorated class
"""
global _PATHLIKE_CLS
if isinstance(path_cls_or_uri_prefix, str):
def register_pathlike_decorator(cls: T) -> T:
_URI_PREFIXES_TO_CLS[path_cls_or_uri_prefix] = cls
return register_pathlike_cls(cls)
return register_pathlike_decorator
else:
_PATHLIKE_CLS = _PATHLIKE_CLS + (path_cls_or_uri_prefix,)
return path_cls_or_uri_prefix
# pylint: enable=g-wrong-blank-lines
def as_path(path: PathLike) -> ReadWritePath:
"""Create a generic `pathlib.Path`-like abstraction.
Depending on the input (e.g. `gs://`, `github://`, `ResourcePath`,...), the
system (Windows, Linux,...), the function will create the right pathlib-like
abstraction.
Args:
path: Pathlike object.
Returns:
path: The `pathlib.Path`-like abstraction.
"""
is_windows = os.name == 'nt'
if isinstance(path, str):
uri_splits = path.split('://', maxsplit=1)
if len(uri_splits) > 1: # str is URI (e.g. `gs://`, `github://`,...)
# On windows, `PosixGPath` is created for `gs://` paths
return _URI_PREFIXES_TO_CLS[uri_splits[0] + '://'](path) # pytype: disable=bad-return-type
elif is_windows:
return gpath.WindowsGPath(path)
else:
return gpath.PosixGPath(path)
elif isinstance(path, _PATHLIKE_CLS):
return path # Forward resource path, gpath,... as-is # pytype: disable=bad-return-type
elif isinstance(path, os.PathLike): # Other `os.fspath` compatible objects
path_cls = gpath.WindowsGPath if is_windows else gpath.PosixGPath
return path_cls(path)
else:
raise TypeError(f'Invalid path type: {path!r}')
| true
|
f2ecb193d12734cb7869a628a6e6e12521e88ca3
|
Python
|
fwb04/spider
|
/spydersql/population.py
|
UTF-8
| 4,913
| 3.09375
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
import requests
import json
import time
import sqlite3
from bs4 import BeautifulSoup
import matplotlib.pyplot as plt
# 获得时间戳
def gettime():
return int(round(time.time() * 1000))
# 爬取人口数据
def getpopulation():
# 用来自定义头部的
headers = {}
# 用来传递参数的
keyvalue = {}
# 目标网址
url = 'http://data.stats.gov.cn/easyquery.htm'
# 头部的填充
headers['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14) ' \
'AppleWebKit/605.1.15 (KHTML, like Gecko) ' \
'Version/12.0 Safari/605.1.15'
# 参数的填充
keyvalue['m'] = 'QueryData'
keyvalue['dbcode'] = 'hgnd'
keyvalue['rowcode'] = 'zb'
keyvalue['colcode'] = 'sj'
keyvalue['wds'] = '[]'
keyvalue['dfwds'] = '[{"wdcode":"zb","valuecode":"A0301"}]'
keyvalue['k1'] = str(gettime())
# 建立一个Session
s = requests.session()
# 在Session基础上进行一次请求
r = s.post(url, params=keyvalue, headers=headers)
# 修改dfwds字段内容
keyvalue['dfwds'] = '[{"wdcode":"sj","valuecode":"LAST20"}]'
# 再次进行请求
r = s.get(url, params=keyvalue, headers=headers)
r.encoding = 'utf-8'
# 定义人口数据存储数组
year = []
population = []
male = []
female = []
# 从json文件提取想要的数据
data = json.loads(r.text)
data_one = data['returndata']['datanodes']
for value in data_one:
# 提取年份和总人口
if ('A030101_sj' in value['code']):
year.append(value['code'][-4:])
population.append(int(value['data']['strdata']))
# 提取男性人口
if ('A030102_sj' in value['code']):
male.append(int(value['data']['strdata']))
# 提取女性人口
if ('A030103_sj' in value['code']):
female.append(int(value['data']['strdata']))
# 数组逆序存放
year.reverse()
population.reverse()
male.reverse()
female.reverse()
# 连接数据库,不存在时自动创建
conn = sqlite3.connect("population.db")
cur = conn.cursor()
cur.execute('''CREATE TABLE IF NOT EXISTS population
(year text, popu int, male int, female int)''')
cur.execute('select * from population ')
# 向表中插入数据
data = cur.fetchall()
if data == []:
for i in range(len(year)):
cur.execute("INSERT INTO population VALUES ('%s','%d','%d','%d')" % (year[i], population[i], male[i], female[i]))
conn.commit()
cur.close()
conn.close()
# 作年份-人口条形图
def plot1(year, population):
plt.figure(figsize=(10, 6))
ax = plt.subplot() # 创建图片区域
# Y轴坐标值范围
plt.ylim(125000, 140000)
# 字体、颜色
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
ax.bar(year, population, align='center', color='darkseagreen', edgecolor='white')
# 在图中添加数据值
for a, b in zip(year, population):
plt.text(a, b + 0.05, '%.0f' % b, ha='center', va='bottom', fontsize=11)
# 设置标题、坐标轴名称
plt.xlabel(u'年份')
plt.xticks(rotation=45)
plt.ylabel(u'人口数/万人')
plt.title(u'1999-2018年末总人口条形图')
plt.show()
# 作年份-男女人口占比折线图
def plot2(year, male, female):
# 计算男女人口占比并存入r1,r2
r1 = []
r2 = []
for i in range(len(male)):
r1.append(male[i] / (male[i] + female[i]))
r2.append(female[i] / (male[i] + female[i]))
plt.figure(figsize=(10, 5))
ax = plt.subplot() # 创建图片区域
plt.title("1999-2018年全国男性人口和女性人口占比变化折线图")
plt.xlabel(u'年份')
plt.xticks(rotation=45)
plt.ylabel(u'占比')
# 作出两条直线
line1, = plt.plot(year, r1)
line2, = plt.plot(year, r2)
for a, b in zip(year, r1):
plt.text(a, b + 0.03, '%.2f' % b, ha='center', va='bottom', fontsize=9)
plt.legend((line1, line2), ('男性人口占比', "女性人口占比"))
# 网格线设置
plt.grid(color='whitesmoke', ls='--')
plt.show()
# 作图
def plotdata():
# 从数据库中读取数据
conn = sqlite3.connect("population.db")
cur = conn.cursor()
cur.execute('select * from population ')
data = cur.fetchall()
cur.close()
conn.close()
year = [i[0] for i in data]
population = [i[1] for i in data]
male = [i[2] for i in data]
female = [i[3] for i in data]
print(population)
print(male)
print(female)
plot1(year, population)
plot2(year, male, female)
if __name__ == '__main__':
# 从国家统计局获取人口数据并存入本地数据库population.db
getpopulation()
# 作图
plotdata()
| true
|
bee2474a2912483eaa0632413b45efe2185c7136
|
Python
|
tathagata-raha/CP_python
|
/binarytree/binartree.py
|
UTF-8
| 4,325
| 3.09375
| 3
|
[] |
no_license
|
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class DiameterOfABinaryTree:
def __init__(self):
self.diameter = 0
def height(self,node):
if node is None:
return 0
lh = self.height(node.left)
rh = self.height(node.right)
tmp = lh + rh
if self.diameter < tmp:
self.diameter= tmp
return 1 + max(lh,rh)
def diameterOfBinaryTree(self, root: TreeNode) -> int:
self.height(root)
return self.diameter
class BuildFromInorderAndPostorder:
def build(self,sin, ein, spost, epost):
if ein==sin:
return None
root = TreeNode(self.postorder[epost-1])
if ein-sin == 1:
return root
idx = self.val_idx[root.val]
# print(sin, ein, spost, epost, idx)
root.left = self.build(sin,idx,spost, spost+(idx-sin))
root.right = self.build(idx+1, ein, spost+(idx-sin), epost-1)
return root
def buildTree(self, inorder: List[int], postorder: List[int]) -> TreeNode:
# print(inorder, postorder)
self.inorder = inorder
self.postorder = postorder
self.val_idx = {v:i for i,v in enumerate(inorder)}
root = self.build(0,len(postorder),0,len(postorder))
return root
class Solution:
def build(self,sin, ein, spost, epost):
if ein==sin:
return None
root = TreeNode(self.postorder[epost-1])
if ein-sin == 1:
return root
idx = self.val_idx[root.val]
# print(sin, ein, spost, epost, idx)
root.left = self.build(sin,idx,spost, spost+(idx-sin))
root.right = self.build(idx+1, ein, spost+(idx-sin), epost-1)
return root
def buildTree(self, inorder: List[int], postorder: List[int]) -> TreeNode:
# print(inorder, postorder)
self.inorder = inorder
self.postorder = postorder
self.val_idx = {v:i for i,v in enumerate(inorder)}
root = self.build(0,len(postorder),0,len(postorder))
return root
class TreeFunctions:
def inorderTraversal(self, root: TreeNode) -> List[int]:
res = []
def traversal(node):
if node:
traversal(node.left)
res.append(node.val)
traversal(node.right)
traversal(root)
return res
def preorderTraversal(self, root: TreeNode) -> List[int]:
st = []
res = []
curr = root
while True:
while curr is not None:
res.append(curr.val)
st.append(curr)
curr = curr.left
if len(st) == 0:
break
curr = st.pop().right
return res
def postorderTraversal(self, root: TreeNode) -> List[int]:
res = []
st = []
st2 = []
curr = root
while True:
while curr is not None:
st.append([curr, 0])
curr = curr.left
if len(st) == 0:
return res
while st[-1][1] != 0:
res.append(st.pop()[0].val)
if len(st) == 0:
return res
st[-1][1] = 1
curr = st[-1][0].right
def levelOrder(self, root: TreeNode) -> List[List[int]]:
q = deque()
if root is None:
return []
res = []
q.append((root,0))
while len(q)!=0:
tmp = q.popleft()
res.append((tmp[0].val, tmp[1]))
if tmp[0].left is not None:
q.append((tmp[0].left, tmp[1]+1))
if tmp[0].right is not None:
q.append((tmp[0].right, tmp[1]+1))
d = defaultdict(list)
for i in res:
d[i[1]].append(i[0])
return [i for i in d.values()]
def calculateDiameter(self, root):
tmp = DiameterOfABinaryTree()
return tmp.diameterOfBinaryTree(root)
def buildfrominandpost(self, inorder: List[int], postorder: List[int]):
tmp = BuildFromInorderAndPostorder()
return tmp.buildTree(inorder, postorder)
| true
|
f8af8fddcfacbdacab18b574c58d93e6d061a91f
|
Python
|
theSaab/leetcode
|
/good_pairs.py
|
UTF-8
| 178
| 3.203125
| 3
|
[] |
no_license
|
def pairs( nums ):
count = 0
for i,num in enumerate(nums):
for elem in nums[i+1:]:
if elem == num:
count += 1
return count
| true
|
d7a39b42fd4d4b4d816f0b58ac7de95da272b6c5
|
Python
|
ProfAvery/cpsc449
|
/stats/bin/stats.py
|
UTF-8
| 1,702
| 2.625
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
import contextlib
import datetime
import random
import sqlite3
import faker
DATABASE = './var/stats.db'
SCHEMA = './share/stats.sql'
NUM_STATS = 1_000_000
NUM_USERS = 100_000
YEAR = 2022
random.seed(YEAR)
fake = faker.Faker()
fake.seed(YEAR)
with contextlib.closing(sqlite3.connect(DATABASE)) as db:
with open(SCHEMA) as f:
db.executescript(f.read())
for _ in range(NUM_USERS):
while True:
try:
profile = fake.simple_profile()
db.execute('INSERT INTO users(username) VALUES(:username)', profile)
except sqlite3.IntegrityError:
continue
break
db.commit()
jan_1 = datetime.date(YEAR, 1, 1)
today = datetime.date.today()
num_days = (today - jan_1).days
i = 0
while i < NUM_STATS:
while True:
try:
user_id = random.randint(1, NUM_USERS)
game_id = random.randint(1, num_days)
finished = jan_1 + datetime.timedelta(random.randint(0, num_days))
# N.B. real game scores aren't uniformly distributed...
guesses = random.randint(1, 6)
# ... and people mostly play to win
won = random.choice([False, True, True, True])
db.execute(
"""
INSERT INTO games(user_id, game_id, finished, guesses, won)
VALUES(?, ?, ?, ?, ?)
""",
[user_id, game_id, finished, guesses, won]
)
except sqlite3.IntegrityError:
continue
i += 1
break
db.commit()
| true
|
92e2c95afe386cb1346ec5881098d3fbdca69667
|
Python
|
zebravid/python-examples
|
/acc.py
|
UTF-8
| 771
| 3.4375
| 3
|
[] |
no_license
|
class Acco:
def __init__(self,filename):
self.filepath=filename
with open(filename,"r") as file:
self.balance=int(file.read())
def withdrow(self,amount):
self.balance=self.balance-amount
self.commit()
def deposit(self,amount):
self.balance=self.balance+amount
self.commit()
def commit(self):
with open(self.filepath,'w') as file:
file.write(str(self.balance))
class Checking(Acco):
"""Example of inheritance
and class variable and doc string"""
type="checking"
def __init__(self,filepath,fee):
Acco.__init__(self,filepath)
self.fee=fee
def transfer(self,amount):
self.balance=self.balance-amount- self.fee
self.commit()
chec=Checking("bal.txt",1)
print(chec.balance)
chec.withdrow(100)
chec.transfer(15)
print(chec.balance)
| true
|
c0bb613bb9518444304d70fe8a1ae1b232820e96
|
Python
|
pitambar3210/exceptionhandling-assignment
|
/exception_handling_assignment.py
|
UTF-8
| 771
| 4.0625
| 4
|
[] |
no_license
|
#!/usr/bin/env python
# coding: utf-8
# In[4]:
# assignment-5
# exception handling assignment
# In[6]:
# question-1
# write a python program to implement 5/0 and use try/except to catch exceptions
# In[7]:
try:
a = int(input('enter the number: '))
result = 5/a
print(result)
except Exception as e:
print(e)
# In[8]:
# problem-2
# implement a python program to generate all sentences where subject is in ['americans','indians'] and verb is in ['play','watch'] and the object is in ['baseball','cricket']
# In[12]:
subjects = ['Americans','Indians']
verbs = ['play','watch']
objects = ['Baseball','cricket']
for i in subjects:
for j in verbs:
for k in objects:
print(i+' '+j+' '+k,end = '\n')
# In[ ]:
| true
|
86785476e34f5161a5b43e9b7aebe20119b5dc19
|
Python
|
mmyoungman/advent-of-code
|
/2017/python/08b.py
|
UTF-8
| 685
| 2.890625
| 3
|
[] |
no_license
|
file = open("08input.txt", 'r')
list = []
while True:
line = file.readline()
if line == '':
break
line = line.rstrip('\n')
list.append(line.split())
file.close()
registers = {}
maxReg = 0
for line in list:
if line[0] not in registers:
registers[line[0]] = 0
if line[4] not in registers:
registers[line[4]] = 0
if eval(str(registers[line[4]]) + ' ' + line[5] + ' ' + line[6]):
if line[1] == "inc":
registers[line[0]] += int(line[2])
else:
registers[line[0]] -= int(line[2])
if registers[max(registers, key=registers.get)] > maxReg:
maxReg = registers[max(registers, key=registers.get)]
print(maxReg)
| true
|
759096c663f01bfd41420c51b89985ec50f18127
|
Python
|
BK-notburgerking/Algorithm
|
/Programmers/2021DevMatching_행렬테두리회전하기.py
|
UTF-8
| 1,648
| 2.859375
| 3
|
[] |
no_license
|
def solution(rows, columns, queries):
arr = [([0] * columns) for _ in range(rows)]
for i in range(rows):
for j in range(columns):
arr[i][j] = (j + 1) + columns * i
def move(sr, sc, er, ec):
xr, xc = sr, sc # 이전좌표
ex_num = arr[xr][xc] # 이전 값
min_num = ex_num # 최소 값
for _ in range(ec - sc): # 우
nr, nc = xr, xc + 1 # 이동할 좌표
tmp = arr[nr][nc] # 다음 움직일 숫자
if tmp < min_num:
min_num = tmp
arr[nr][nc] = ex_num # 이동할 좌표에 이전 값 할당
xr, xc = nr, nc # 이전좌표 변경
ex_num = tmp # 이전좌표의 값 변경
for _ in range(er - sr): # 하
nr, nc = xr + 1, xc
tmp = arr[nr][nc]
if tmp < min_num:
min_num = tmp
arr[nr][nc] = ex_num
xr, xc = nr, nc
ex_num = tmp
for _ in range(ec - sc): # 좌
nr, nc = xr, xc - 1
tmp = arr[nr][nc]
if tmp < min_num:
min_num = tmp
arr[nr][nc] = ex_num
xr, xc = nr, nc
ex_num = tmp
for _ in range(er - sr): # 상
nr, nc = xr - 1, xc
tmp = arr[nr][nc]
if tmp < min_num:
min_num = tmp
arr[nr][nc] = ex_num
xr, xc = nr, nc
ex_num = tmp
return min_num
ans = []
for query in queries:
sr, sc, er, ec = query
ans.append(move(sr - 1, sc - 1, er - 1, ec - 1))
return ans
| true
|
d11882cc780bbcad65f1e1a4a2e777d19f438b6c
|
Python
|
BennyJane/Python_Project_Benny
|
/数据处理/second_获取极值.py
|
UTF-8
| 10,451
| 2.921875
| 3
|
[] |
no_license
|
#!/user/bin/env Python
#coding=utf-8
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mtk
#变量调整
#将第一段代码生成的文件路径拷贝到下方
FirstResult_filepath="E:/编程接单/2019-4-14/提取数据11.csv"
#变化的比率调整
The_Limition=0.001
#最值文件保存的位置及文件名,4列,每列两个点
Final_filename="E:/编程接单/2019-4-14/Second_data02.csv"
#设置图片最后储存位置
filepath="E:/编程接单/2019-4-14/Photo.png"
#设置图片标题
Picture_title="Photo Title"
xlabel=The_Limition
#-----------------------------------------------------------------------------------------------------------------------
df=pd.read_csv(FirstResult_filepath)
#处理数据
end_num = df .shape[0]
for i in range(0, end_num):
# print(df .iloc[i, 1])
df .iloc[i, 1] = df .iloc[i, 1][0:19]
# print(df)
#提取单独的均值列
Mean_df=df['bid/ask_price']
# print(Mean_df.info)
#求变化率
def compute(num1,num2):
result=(num1-num2)/num2
return result
# 构造储存的表
Left_df = pd.DataFrame(columns=['exchange_time', 'bid/ask_price'])
Right_df = pd.DataFrame(columns=['exchange_time', 'bid/ask_price'])
# 利用k值的奇偶性,来记录数值变化趋势
# 上一个k为奇数代表降,下一个找升;上一个k偶数代表上升,下一个要找降
k = 1
first_break_point = 0
# series 类型
end_num = Mean_df.shape[0]
#从第二个数据开始读取
for i in range(1, end_num):
#切片,最后一个i位不输出
Current_Process = Mean_df.iloc[:i]
# Current_Process=Mean_df[:i]
# print(Current_Process)
first_max_price = Current_Process.max()
first_min_price = Current_Process.min() #只有最值,没有索引号
last_price = Current_Process.iloc[i-1]
result = compute(first_max_price, first_min_price)
if result < The_Limition:
continue
else:
first_break_point = i
# >=The_Limition
# 找到第一组最值
# 判断升降
if last_price == first_max_price:
k = k + 1
# print(last_price, first_max_price)
# 找出最小值所在的行,注意保存的先后顺序
min_id = Current_Process.idxmin()
max_id = Current_Process.idxmax()
# print(min_id, max_id)
# 选出时间和价格,分别保存到起、终点的dataframe中,最后再考虑合并
Left_df.loc[Left_df.shape[0]] = df.iloc[min_id, 1:] # 只取出时间和价格
Right_df.loc[Right_df.shape[0]] = df.iloc[max_id, 1:] # 只取出时间和价格
# 先完成一段数据的查找
break
else:
# print(last_price, first_min_price)
# 找出最小值所在的行,注意保存的顺序
max_id = Current_Process.idxmax()
min_id = Current_Process.idxmin()
print(max_id, min_id)
# 选出时间和价格,分别保存到起、终点的dataframe中,最后再考虑合并
Left_df.loc[Left_df.shape[0]] = df.iloc[max_id, 1:] # 只取出时间和价格
Right_df.loc[Right_df.shape[0]] = df.iloc[min_id, 1:] # 只取出时间和价格
# 先完成一段数据的查找
break
n=first_break_point
j=first_break_point
while True :
j = j+1
if j <end_num:
#先判断第一段是升 or 降
if (k % 2)==0:
#上一个K为偶数,下一个找下降
#保证可以取到2个数以上
N_Process=Mean_df.iloc[n:j]
# print(n,j)
N_max_price = N_Process.max()
N_min_price = N_Process.min()
# print(type(N_Process))
# print(N_Process.index)
last_price = N_Process.loc[j-1]
result = compute(N_max_price, N_min_price)
if result >= The_Limition:
# 最后一个极值必须是最小值
if last_price == N_min_price:
# print(j)
k = k + 1
n = j
# 找出最大值所在的行,注意保存的顺序
# print(last_price,N_max_price, N_min_price)
max_id = N_Process.idxmax()
min_id = j-1
# print(max_id, min_id)
# 选出时间和价格,分别保存到起、终点的dataframe中,最后再考虑合并到一张表中
Left_df.loc[Left_df.shape[0]] = df.iloc[max_id, 1:] # 只取出时间和价格
Right_df.loc[Right_df.shape[0]] = df.iloc[min_id, 1:] # 只取出时间和价格
# 完成一段数据的查找
print('完成了一对极值的查找:%s' % k)
else:
#上一个K为奇数,下一个要找升
#保证可以取到2个数以上
N_Process=Mean_df.iloc[n:j]
N_max_price=N_Process.max()
N_min_price=N_Process.min()
last_price=N_Process.loc[j-1]
result=compute(N_max_price,N_min_price)
if result >= The_Limition:
#最后一个极值必须是最大值
if last_price ==N_max_price:
k=k+1
n = j
# print(last_price, N_min_price, N_max_price)
#找出最小值所在的行,注意保存的顺序
max_id=j-1
min_id=N_Process.idxmin()
# print(min_id,max_id)
#选出时间和价格,分别保存到起、终点的dataframe中,最后再考虑合并到一张表中
Left_df.loc[Left_df.shape[0]] = df.iloc[min_id, 1:]#只取出时间和价格
Right_df.loc[Right_df.shape[0]] = df.iloc[max_id, 1:]#只取出时间和价格
#先完成一段数据的查找
print('完成了一对极值的查找:%s' % k)
#break
else:
break
#·························绘制图形····························
#设置图片大小,分辨率
fig = plt.figure(figsize=(20, 6), dpi=90)
ax1 = fig.add_subplot(1, 1, 1)
#-------------------------------------------------------------预设值
#用下标代理原始时间戳数据
idx_pxy = np.arange(df.shape[0])
print(type(idx_pxy))
#下标-时间转换func
def x_fmt_func(x, pos=None):
idx =np.clip(int(x+0.5), 0, df.shape[0]-1)
return df['exchange_time'].iat[idx]
#绘图流程
def decorateAx(ax, xs, ys, x_func):
ax.plot(xs, ys, color="k", linewidth=0.3, linestyle="-")
# ax.plot(ax.get_xlim(), [0,0], color="blue", linewidth=0.5, linestyle="--")
if x_func:
#set数据代理func
ax.xaxis.set_major_formatter(mtk.FuncFormatter(x_func))
ax.grid(True)
return
def decorateAx02(ax, xs, ys, x_func):
ax.plot(xs, ys, color="r", linewidth=1, linestyle="-")
# ax.plot(ax.get_xlim(), [0,0], color="blue", linewidth=0.5, linestyle="--")
if x_func:
#set数据代理func
ax.xaxis.set_major_formatter(mtk.FuncFormatter(x_func))
ax.grid(True)
return
def decorateAx03(ax, xs, ys, x_func):
ax.plot(xs, ys, color="b", linewidth=1, linestyle="-")
# ax.plot(ax.get_xlim(), [0,0], color="blue", linewidth=0.5, linestyle="--")
if x_func:
#set数据代理func
ax.xaxis.set_major_formatter(mtk.FuncFormatter(x_func))
ax.grid(True)
return
#------------------------------------------------------------end-------------------------------------------------------
#绘制所有数据的图像
decorateAx(ax1, idx_pxy, df['bid/ask_price'], x_fmt_func)
#······························02····························
#绘制第二段数据图形
#排除两者不等长的情况,取较小的列
if Left_df.shape[0]!=Right_df.shape[0]:
if Left_df.shape[0]>Right_df.shape[0]:
nums=Right_df.shape[0]
else:
nums = Left_df.shape[0]
else:
nums=Left_df.shape[0]
# 先将左右两端数据合并到一个df文件中(可以改进合并的方式)
Simple_Col = pd.DataFrame(columns=['exchange_time', 'bid/ask_price'])
for i in range(0, nums):
Simple_Col = Simple_Col.append(Left_df.iloc[i, :], ignore_index=True)
Simple_Col = Simple_Col.append(Right_df.iloc[i, :], ignore_index=True)
# Simple_Col.to_csv("E:/编程接单/2019-4-14/Simple_Col.csv", index=None)
end_num = Simple_Col .shape[0]
for i in range(0, end_num):
# print(type(Simple_Col .iloc[i, 0]))
Simple_Col .iloc[i, 0] = str(Simple_Col .iloc[i, 0])[0:19]
# print(Simple_Col)
Simple_nums = Simple_Col.shape[0]
A_list = []
B_list = []
for j in range(0, Simple_nums):
# 每次读取两个数据,组成两个点
try:
price_01 = Simple_Col.iloc[j, 1]
price_02=Simple_Col.iloc[j+1, 1]
date1 = Simple_Col.iloc[j, 0]
date2 = Simple_Col.iloc[j + 1, 0]
list1 = []
a = df[(df["exchange_time"] == date1)&(df['bid/ask_price'] == price_01)].index.tolist()
b = df[(df["exchange_time"] == date2)&(df['bid/ask_price'] == price_02)].index.tolist()
list1.append(a[0])
list1.append(b[0])
# print(list1)
Indes_two = np.array(list1)
list2=[]
list2=[price_01,price_02]
B_list=np.array(list2)
# print(B_list,Indes_two,'/n')
except:
continue
# print(A_list)
if (j % 2) == 0:
decorateAx02(ax1, Indes_two,B_list , x_fmt_func)
else:
decorateAx03(ax1, Indes_two,B_list , x_fmt_func)
np.delete(B_list,(0,1),0)
np.delete(Indes_two, (0, 1), 0)
# 配置横坐标
plt.gcf().autofmt_xdate() # 自动旋转日期标记
plt.title(Picture_title)
# plt.ylabel(r"price",fontsize=20)
plt.xlabel(xlabel,fontsize=20)
#图片储存
plt.savefig(filepath)
plt.show()
#······························end····························
#将两张表合并
Newdf=pd.concat([Left_df,Right_df],axis=1)
#print(Newdf)
#重新命名表的列名称
Newdf.rename(columns={'exchange_time':'extreme_point', 'bid/ask_price':'Start_price', 'exchange_time':'confirm_point','bid/ask_price':'End_price'}, inplace = True)
Newdf.to_csv(Final_filename,index=None)
| true
|
9613ef678b3d0449c4b2df72b31fcc67786c03b9
|
Python
|
carson-1999/Personal-Python-Project
|
/爬虫相关/carson网易云.py
|
UTF-8
| 2,782
| 2.765625
| 3
|
[] |
no_license
|
import requests
import os
import bs4
from fake_useragent import UserAgent
from selenium import webdriver
from time import sleep
# 随机产生请求头
ua = UserAgent(verify_ssl=False, path='fake_useragent.json')
# 当前目录下 # 创建保存音乐的文件夹
path = os.path.join('网易云音乐')
if not os.path.exists(path):
os.mkdir(path)
# 配置浏览器驱动
options = webdriver.ChromeOptions()
# 关闭左上方 Chrome 正受到自动测试软件的控制的提示
# options.add_argument("--headless")
options.add_experimental_option('useAutomationExtension', False)
options.add_experimental_option("excludeSwitches", ['enable-automation'])
name = input('请输入待下载歌名:')
# 初始化browser对象
browser = webdriver.Chrome(options=options)
# 获取音乐名称 id 演唱者
def get__name_id_singer(url):
browser.get(url=url)
browser.switch_to.frame('g_iframe')
sleep(1)
page_text = browser.execute_script("return document.documentElement.outerHTML")
soup = bs4.BeautifulSoup(page_text, 'html.parser')
music_names = soup.select("div[class='td w0'] a b")
music_name = music_names[0].get("title") # 获取歌曲名
music_ids = soup.select("div[class='td w0'] a")
music_id = music_ids[0].get("href") # 获取音乐链接
music_id = music_id.split('=')[-1] # 字符串切片获取id
music_singers = soup.select("div[class='td w1'] a")
music_singer = music_singers[0].string # 获取歌手名字
return music_name, music_id, music_singer
# 下载音乐
def download_music(url, song_name, singer):
headers = {
"accept-encoding": "gzip",
"user-agent": ua.random
}
response = requests.get(url=url, headers=headers)
music_data = response.content
music_path_name = '{}_{}演唱.mp3'.format(song_name, singer)
music_path = path + '/' + music_path_name
with open(music_path, 'wb') as f:
f.write(music_data)
print(music_path_name, '------->已下载成功!')
# 主函数调用
def main():
url = 'https://music.163.com/#/search/m/?s=' + name + '&type=1'
# 接收返回的音乐名称 id 演唱者
music_name, music_id, musice_singer = get__name_id_singer(url)
music_url = 'http://music.163.com/song/media/outer/url?id=' + music_id + '.mp3'
browser.get(url=music_url)
sleep(0.5)
page_text = browser.execute_script("return document.documentElement.outerHTML")
soup = bs4.BeautifulSoup(page_text, 'html.parser')
music_source = soup.select("video source")
# 下载歌曲
source_url = music_source[0].get('src')
download_music(source_url, music_name, musice_singer)
if __name__ == '__main__':
main()
browser.quit()
| true
|
0ce70934824ce96706f4ee53b380db9666a8d459
|
Python
|
hkchengrex/so
|
/drawing_cross_51455622/main.py
|
UTF-8
| 1,754
| 3.03125
| 3
|
[] |
no_license
|
import cv2
import matplotlib.pyplot as plt
IMG_SIZE = 224
im = cv2.cvtColor(cv2.imread('lena.jpg'), cv2.COLOR_BGR2GRAY)
im = cv2.resize(im, (IMG_SIZE, IMG_SIZE))
# Your detector results
detected_region = [
[(10, 20) , (80, 100)],
[(50, 0) , (220, 190)],
[(100, 143) , (180, 200)],
[(110, 45) , (180, 150)]
]
# Global states
x_scale = 1.0
y_scale = 1.0
x_shift = 0
y_shift = 0
x1, y1 = 0, 0
x2, y2 = IMG_SIZE-1, IMG_SIZE-1
i = 0
for region in detected_region:
i += 1
# Detection
x_scale = IMG_SIZE / (x2-x1)
y_scale = IMG_SIZE / (y2-y1)
x_shift = x1
y_shift = y1
cur_im = cv2.resize(im[y1:y2, x1:x2], (IMG_SIZE, IMG_SIZE))
# Assuming the detector return these results
cv2.rectangle(cur_im, region[0], region[1], (255))
plt.imshow(cur_im)
plt.savefig('%d.png'%i, dpi=200)
plt.show()
# Zooming in, using part of your code
context_pixels = 16
x1 = max(region[0][0] - context_pixels, 0) / x_scale + x_shift
y1 = max(region[0][1] - context_pixels, 0) / y_scale + y_shift
x2 = min(region[1][0] + context_pixels, IMG_SIZE) / x_scale + x_shift
y2 = min(region[1][1] + context_pixels, IMG_SIZE) / y_scale + y_shift
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
# Assuming the detector confirm its choice here
print('Confirmed detection: ', x1, y1, x2, y2)
# This time no padding
x1 = detected_region[-1][0][0] / x_scale + x_shift
y1 = detected_region[-1][0][1] / y_scale + y_shift
x2 = detected_region[-1][1][0] / x_scale + x_shift
y2 = detected_region[-1][1][1] / y_scale + y_shift
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
cv2.rectangle(im, (x1, y1), (x2, y2), (255, 0, 0))
plt.imshow(im)
plt.savefig('final.png', dpi=300)
plt.show()
| true
|
b32a1c6cb78d97704ef19e57ac7f352df2675aca
|
Python
|
deanmolinaro/EpicToolbox
|
/python/EpicToolbox/mkdirfile.py
|
UTF-8
| 258
| 2.75
| 3
|
[
"MIT"
] |
permissive
|
import os
def mkdirfile(outfile):
outfolder,outextension=os.path.splitext(outfile)
if outextension=='':
os.makedirs(outfolder,exist_ok=True)
else:
outfolder,_=os.path.split(outfolder)
os.makedirs(outfolder,exist_ok=True)
| true
|
8c505fa9a95d9490a1d10c92e66c0f2c6a9eac37
|
Python
|
parkwisdom/Python-Study-step3
|
/day01/day01-01.py
|
UTF-8
| 407
| 3.21875
| 3
|
[] |
no_license
|
#퀴즈 1. 1부터 100까지 3의 배수의 합계.
#클래스 선언부
#함수 선언부
#변수 선언부
start,end,hap = [0]*3
#메인 코드부
if __name__=='__main__':
for i in range(1,101,1):
if i%3==0:
hap +=i
else:
pass
print(hap)
# s=[]
# for i in range(1,101):
# if i%3==0:
# a=+i
# s.append(a)
# print(sum(s))
| true
|
d28cded78d668322935dd6ddc47c8a278addc115
|
Python
|
lilberick/Competitive-programming
|
/online-judge-solutions/Codeforces/1631A.py
|
UTF-8
| 375
| 3.15625
| 3
|
[] |
no_license
|
#https://codeforces.com/problemset/problem/1631/A
#Lang : Python 3.8
#Time : 46 ms
#Memory : 0 KB
for _ in range(int(input())):
n=int(input())
a,b=list(map(int,input().split()))[:n],list(map(int,input().split()))[:n]
a2,b2=list(range(n)),list(range(n))
for i in range(n):
a2[i],b2[i]=max(a[i],b[i]),min(a[i],b[i])
print(max(a2)*max(b2))
| true
|
d53610caeb4a37f8846daabd7d0cb91eed3f9775
|
Python
|
PiyushChaturvedii/My-Leetcode-Solutions-Python-
|
/Leetcode 5/Maximize Distance to Closest Person.py
|
UTF-8
| 559
| 3.265625
| 3
|
[] |
no_license
|
class Solution:
def maxDistToClosest(self, seats):
"""
:type seats: List[int]
:rtype: int
"""
distance=1
i=0
n=len(seats)
while i<n and seats[i]==0:
i+=1
distance=max(distance,i)
while i<n:
j=i+1
while j<n and seats[j]==0:
j+=1
if j<n:
distance=max(distance,(j-i)//2)
else:
distance=max(distance,n-1-i)
i=j
return distance
| true
|
493def4a2db9dfc2476d6690ca32a793fb3a0db1
|
Python
|
tlechien/PythonCrash
|
/Chapter 6/6.9.py
|
UTF-8
| 598
| 4.53125
| 5
|
[] |
no_license
|
"""
6-9. Favorite Places: Make a dictionary called favorite_places. Think of three
names to use as keys in the dictionary, and store one to three favorite places
for each person. To make this exercise a bit more interesting, ask some friends
to name a few of their favorite places. Loop through the dictionary, and print
each person’s name and their favorite places.
"""
if __name__ == '__main__':
favorite_places = {
"Matthias": "Paris",
"Albert": "Rio",
"Eric": "Rome"
}
print(*map(lambda x: "\n{} likes {}".format(x, favorite_places[x]), favorite_places))
| true
|
bfbfa5820c7500639a08ce08b79d6cf54fabaa3b
|
Python
|
Saranya-sharvi/saranya-training-prgm
|
/exc-pgm/subclass.py
|
UTF-8
| 278
| 3.546875
| 4
|
[] |
no_license
|
"""Define a class named American and its subclass NewYorker"""
#parent class creation
class American(object):
pass
#subclass creation
class NewYorker(American):
pass
anAmerican = American()
aNewYorker = NewYorker()
#print result
print(anAmerican)
print(aNewYorker)
| true
|
8e11e3746e769e5becab109eec2993d0b7954923
|
Python
|
ljinwoo9633/Stock-Bot
|
/merge.py
|
UTF-8
| 545
| 2.78125
| 3
|
[] |
no_license
|
import csv
resultFile = open('./mergedStock.csv', 'w', encoding='euc-kr', newline='')
fileOne = open('./mergedStock1.csv', 'r', encoding='euc-kr')
fileTwo = open('./mergedStock2.csv', 'r', encoding='euc-kr')
readerOne = csv.reader(fileOne)
readerTwo = csv.reader(fileTwo)
writer = csv.writer(resultFile)
index = 0
for line in readerOne:
if(index == 0):
pass
writer.writerow(line)
index = 0
for line in readerTwo:
if(index == 0):
pass
writer.writerow(line)
fileOne.close()
fileTwo.close()
resultFile.close()
| true
|
b0bada48bc0e69f19660dbbfa48503799782f0eb
|
Python
|
abheeshta97/college-project
|
/fib_encrypt_1.py
|
UTF-8
| 3,221
| 3.40625
| 3
|
[] |
no_license
|
from tkinter import Tk, messagebox
import creationANDopening as note
#---INITIALIZATION OF LOWER AND UPPER ASCII LIMIT
ASCII_MIN = 33
ASCII_MAX = 126
#---FUNCTION TO CONVERT LIST TO STRING---
def convertToString(s):
#---INITIALIZATION OF STRING---
new = ""
#---TRAVERSES THE STRING---
for x in s:
new += x
return new
#---FUNCTION TO REVERSE STRING---
def reverse_string(s):
"""Return a reversed copy of `s`"""
chars = list(s)
for i in range(len(s) // 2):
tmp = chars[i]
chars[i] = chars[len(s) - i - 1]
chars[len(s) - i - 1] = tmp
return ''.join(chars)
#---FUNCTION TO ENCRYPT MESSAGE FROM FILE---
def encrypt(inputMessage):
try:
file_1 = open(inputMessage, "r")
fileMessage = file_1.read()
file_1.close()
reversedMessage=[]
fileMessage=fileMessage.split()
for i in fileMessage:
reversedChar=reverse_string(i)
reversedMessage.append(reversedChar)
fileMessage=reversedMessage
#print('\nThe reversed Message in list format=',fileMessage)
message=fileMessage
dataAppend=[] #IS DEFINED TO HOLD THE ENCRYPTED MESSAGE IN LIST TYPE
for word in message:
counter=True
n1=0
n2=1
for letter in word:
loopLetter = letter
if counter==True:
data_num = ord(loopLetter)
for num in range(0, len(word)): #LOOP TO RUN N TIMES FOR EACH CHAR
for i in range(0, n2): #LOOP TO INCREMENT THE POSITION
if(data_num == ASCII_MAX):
data_num = ASCII_MIN #CHECKS UPPER LIMIT
else:
data_num += 1
data = chr(data_num)
temp = n1
n1 = n2
n2 = temp + n1
counter = False
n1 = 0
n2 = 1
else:
data_num = ord(loopLetter)
for num in range(0, len(word)): # LOOP TO RUN
for j in range(0, n2): # LOOP TO DECREMENT THE POSITION
if (data_num == ASCII_MIN):
data_num = ASCII_MAX # CHECKS LOWER LIMIT
else:
data_num -= 1
data = chr(data_num)
temp = n1
n1 = n2
n2 = temp + n1
counter = True
n1 = 0
n2 = 1
dataAppend += data
dataAppend += " "
#print(dataAppend)
newMessage=convertToString(dataAppend) #ENCRYPTED LIST CONVERTED TO STRING
return(newMessage)
except:
messagebox.showwarning("ERROR", ".txt FILE REQUIRED")
def encrypted(encryptedmessage):
encryptedData=encrypt(encryptedmessage)
messagebox.showinfo("Information", "ENTER IN THE NAME FOR FILE TO ENCRYPT ")
note.save_as(encryptedData)
| true
|
c95ce89ff463342acdabedf185c79d4a5f46bc46
|
Python
|
acad2/crypto
|
/designs/other/math/printnumbers.py
|
UTF-8
| 1,058
| 3.4375
| 3
|
[] |
no_license
|
# every N numbers has N as a factor
# 1 2 3 4 5 6 7 8 9
# 2 4 6 8
# 3 3 3
# 5 5
# 7
from crypto.utilities import prime_generator
def prime_generator():
filter = dict()
prime = 2
filter[2] = 4
for number in itertools.count(3):
if number not in filter:
filter.appen
def generate_primes_until(n):
for prime in prime_generator():
if prime < n:
yield prime
else:
raise StopIteration()
def print_numbers_up_to(n=30):
print(' '.join(str(item) for item in range(n)))
for index, p in enumerate(generate_primes_until(n)):
spacing = ' ' * len(' '.join(str(item) for item in range(p)))
print(spacing.join(str(item) for item in range(0, n, p)))
if __name__ == "__main__":
import sys
with open("numbers.txt", "w") as _file:
_backup = sys.stdout
sys.stdout = _file
print_numbers_up_to(64)
_file.flush()
sys.stdout = _backup
| true
|
bd27243d1395cf33d2e7538a9e653eefb60765fe
|
Python
|
bobqywei/curiosity-driven-exploration
|
/icm.py
|
UTF-8
| 2,289
| 2.546875
| 3
|
[] |
no_license
|
import torch
import torch.nn as nn
from modules import FeatureEncoderNet
class ICMAgent(nn.Module):
def __init__(self, action_space_size, config, device):
super(ICMAgent, self).__init__()
features_size = 288 # same as ActorCritic
self.device = device
self.ac_size = action_space_size
# feature network
self.extract_feats = FeatureEncoderNet(
buf_size=config['parallel_envs'],
ch_in=config['state_frames'],
conv_out_size=features_size,
lstm_hidden_size=features_size,
use_lstm=False) # original paper used 256
'''
Forward Model from paper
f = tf.nn.relu(linear(f, size, "f1", normalized_columns_initializer(0.01)))
f = linear(f, phi1.get_shape()[1].value, "flast", normalized_columns_initializer(0.01))
'''
self.forward_model = torch.nn.Sequential(
nn.Linear(features_size+action_space_size, features_size),
nn.ReLU(),
nn.Linear(features_size, features_size))
'''
Inverse Model from paper
g = tf.nn.relu(linear(g, size, "g1", normalized_columns_initializer(0.01)))
logits = linear(g, ac_space, "glast", normalized_columns_initializer(0.01))
'''
self.inverse_model = torch.nn.Sequential(
nn.Linear(features_size*2, features_size),
nn.ReLU(),
nn.Linear(features_size, action_space_size))
def forward(self, one_hot_action, curr_state, next_state):
# output will be next predicted state & action, and intrinsic reward
# get features from both states (phi st and phi st+1 from paper)
curr_state_features = self.extract_feats(curr_state)
next_state_features = self.extract_feats(next_state)
# forward model next predicted state
next_pred_state_features = self.forward_model(torch.cat([one_hot_action, curr_state_features], dim=1))
pred_action_logits = self.inverse_model(torch.cat([curr_state_features, next_state_features], dim=1))
with torch.no_grad():
reward = torch.sum((next_pred_state_features-next_state_features)**2, dim=1)
return reward, next_pred_state_features, pred_action_logits, next_state_features
| true
|
e8d47ef4cae1e15485df40de60d5257dd94b59a5
|
Python
|
e-south/CS506Spring2021Repository
|
/Police_Budget_Overtime_Project/code/count_event_clus_desc_records.py
|
UTF-8
| 2,623
| 2.9375
| 3
|
[] |
no_license
|
"""
count_event_clus_desc_records.py
Counts/plots number of records in each file in /event_clus_desc
"""
import pandas as pd
import matplotlib.pyplot as plt
def plot_records():
# List of cluster descriptions
desc = ['BAA_BOSTON_MARATHON',
'BFS_EVENT_ACTIVITY',
'BRIGHTON_DAY_PARADE',
'CARIBBEAN_CARNIVAL',
'CHINESE_NEW_YEAR',
'DYKE_MARCH',
'EVACUATION_DAY_PARAD',
'FIRST_NIGHT',
'GREEK_INDEP_DAY_PARA',
'HAITIAN_AMER_UNITY_P',
'HALLOWEEN_COVERAGE',
'INDEPENDENCE_DAY',
'MASS_MELNEA',
'NUISANCE_PATROL',
'STATE_OF_THE_CITY_AD',
'TD_GARDEN_EVENTS']
# List counting number of records in each cluster
rec = []
rec.append(pd.read_csv('../data/event_clus_desc/BAA_BOSTON_MARATHON').shape[0])
rec.append(pd.read_csv('../data/event_clus_desc/BFS_EVENT_ACTIVITY').shape[0])
rec.append(pd.read_csv('../data/event_clus_desc/BRIGHTON_DAY_PARADE').shape[0])
rec.append(pd.read_csv('../data/event_clus_desc/CARIBBEAN_CARNIVAL').shape[0])
rec.append(pd.read_csv('../data/event_clus_desc/CHINESE_NEW_YEAR').shape[0])
rec.append(pd.read_csv('../data/event_clus_desc/DYKE_MARCH').shape[0])
rec.append(pd.read_csv('../data/event_clus_desc/EVACUATION_DAY_PARAD').shape[0])
rec.append(pd.read_csv('../data/event_clus_desc/FIRST_NIGHT').shape[0])
rec.append(pd.read_csv('../data/event_clus_desc/GREEK_INDEP_DAY_PARA').shape[0])
rec.append(pd.read_csv('../data/event_clus_desc/HAITIAN_AMER_UNITY_P').shape[0])
rec.append(pd.read_csv('../data/event_clus_desc/HALLOWEEN_COVERAGE').shape[0])
rec.append(pd.read_csv('../data/event_clus_desc/INDEPENDENCE_DAY').shape[0])
rec.append(pd.read_csv('../data/event_clus_desc/MASS_MELNEA').shape[0])
rec.append(pd.read_csv('../data/event_clus_desc/NUISANCE_PATROL').shape[0])
rec.append(pd.read_csv('../data/event_clus_desc/STATE_OF_THE_CITY_AD').shape[0])
rec.append(pd.read_csv('../data/event_clus_desc/TD_GARDEN_EVENTS').shape[0])
# Store lists in dictionary, sort
d = dict(zip(desc, rec))
d_sort = {k: v for k, v in sorted(d.items(), key=lambda item: item[1])}
fig, axes = plt.subplots(figsize=(17, 10))
# Plot results, save to file
plt.barh(list(d_sort.keys()), list(d_sort.values()))
for index, value in enumerate(d_sort.values()):
plt.text(value, index, str(value))
plt.title('Number of Records in Each \'event_clus_desc\' Cluster')
plt.ylabel('Cluster Name')
plt.xlabel('# of Records')
plt.savefig("../img/count_event_clus_desc_records.png", bbox_inches='tight')
plt.show()
plot_records()
| true
|
ed6ab5823065d56f7dfd2db98a1cd1033fe1a769
|
Python
|
dvandra/fabric8-analytics-nvd-toolkit
|
/src/toolkit/transformers/hooks.py
|
UTF-8
| 2,377
| 3.484375
| 3
|
[
"Apache-2.0"
] |
permissive
|
"""This module contains the Hook class to handle pipeline hooks."""
import weakref
class Hook(object):
"""Convenient class for handling hooks.
:param key: str, unique identifier of the hook
:param func: function to be called by the hook
The function can not modify any items fed by its arguments.
:param default_kwargs: default `func` keyword argument values
Example:
def foo(x, verbose=False):
if verbose:
print('verbosity on')
return x
# init with default kwargs
foo_hook = Hook('foo', foo, verbose=True)
# and on the call
foo_hook(x=None) # prints 'verbosity on'
:param reuse: whether to reuse (share) the Hook
"""
__INSTANCES = weakref.WeakSet()
def __init__(self, key: str, func, reuse=False, **default_kwargs):
"""Initialize hook."""
if key in Hook.get_current_keys():
if not reuse:
raise ValueError("Hook with key `%s` already exists" % key)
else:
# TODO: share the existing hook instead of creating a new one
pass
# attr initialization
self._key = str(key)
self._func = func
self._default_kwargs = default_kwargs
# add the key to the class
Hook.__INSTANCES.add(self)
@property
def key(self):
"""Get hook key."""
return self._key
@property
def default_kwargs(self):
"""Get hook default keyword arguments."""
return self._default_kwargs
@default_kwargs.setter
def default_kwargs(self, kwargs):
self._default_kwargs = kwargs
def __call__(self, *args, **kwargs):
"""Call the hooked function."""
return self._func(*args, **kwargs)
@classmethod
def get_current_hooks(cls) -> list:
"""Return instances of this class."""
return list(cls.__INSTANCES)
@classmethod
def get_current_keys(cls) -> set:
"""Return keys to the instances of this class."""
return set([hook.key for hook in cls.__INSTANCES])
@classmethod
def clear_current_instances(cls):
"""Clean up the references held by the class.
This function is not usually called by user, mainly used for tests
where cleanup is needed.
"""
cls.__INSTANCES.clear()
| true
|
1dd233b59745f489fabad83100e6461141c217a1
|
Python
|
Patergia/pwp-capstones
|
/TomeRater/TomeRater.py
|
UTF-8
| 5,960
| 3.28125
| 3
|
[] |
no_license
|
class User(object):
def __init__(self, name, email):
self.name = name
self.email = email
self.books = {}
def get_email(self):
return self.email
def change_email(self, address):
self.email = address
print(self.name + "'s email address has been updated")
def __repr__(self):
return "User " + self.name + " at " + self.email + " has read " + str(len(self.books)) + " books."
def __eq__(self, other_user):
if self.name == other_user.name and self.email == other_user.email:
return True
else:
return False
def read_book(self, book, rating=None):
self.books[book] = rating
def get_average_rating(self):
total_ratings = 0
for book in self.books.keys():
total_ratings += self.books[book]
try:
average_ratings = total_ratings / len(self.books.keys())
return average_ratings
except ZeroDivisionError:
return 0
patergia = User("Patrick Wright", "patergia@yahoo.com")
#records isbn's to help ensure no duplicates
all_isbn = []
class Book(object):
def __init__(self, title, isbn):
self.title = title
self.isbn = isbn
self.ratings = []
def get_title(self):
return self.title
def get_isbn(self):
return self.isbn
def set_isbn(self, new_isbn):
self.isbn = new_isbn
print("the ISBN of " + self.title + " has been updated.")
def add_rating(self, rating):
if rating >= 0 and rating <= 4:
self.ratings.append(rating)
else:
print("Invalid Rating")
def __eq__(self, other_book):
if self.title == other_book.title and self.isbn == other_book.isbn:
return True
else:
return False
def get_ratings(self):
return self.ratings
def get_average_rating(self):
total_ratings = 0
for rating in self.ratings:
total_ratings += rating
return total_ratings / len(self.ratings)
def __hash__(self):
return hash((self.title, self.isbn))
first_book = Book("How to Train Your Dragon", 112358)
class Fiction(Book):
def __init__(self, title, author, isbn):
super().__init__(title, isbn)
self.author = author
def get_author(self):
return self.author
def __repr__(self):
return self.title + " by " + self.author
class Non_Fiction(Book):
def __init__(self , title , subject , level, isbn):
super().__init__(title, isbn)
self.subject = subject
self.level = level
def get_subject(self):
return self.subject
def get_level(self):
return self.level
def __repr__(self):
return self.title + " , a " + self.level + " manual on " + self.subject
class TomeRater():
def __init__(self):
self.users = {}
self.books = {}
def create_book(self, title, new_isbn):
all_isbn.sort()
for old_isbn in all_isbn:
if old_isbn == new_isbn:
new_isbn = all_isbn[-1] + 1
break
all_isbn.append(new_isbn)
return Book(title, new_isbn)
def create_novel(self, title, author, new_isbn):
all_isbn.sort()
for old_isbn in all_isbn:
if old_isbn == new_isbn:
new_isbn = all_isbn[-1] + 1
break
all_isbn.append(new_isbn)
return Fiction(title, author, new_isbn)
def create_non_fiction(self, title, subject, level, new_isbn):
all_isbn.sort()
for old_isbn in all_isbn:
if old_isbn == new_isbn:
new_isbn = all_isbn[-1] + 1
break
all_isbn.append(new_isbn)
return Non_Fiction(title, subject, level, new_isbn)
def add_book_to_user(self, book, email, rating=None):
try:
self.users[email].read_book(book, rating)
book.add_rating(rating)
if self.books.get(book) == None:
self.books[book] = 1
else:
self.books[book] += 1
except KeyError:
print("No user with email " + email + "!")
def add_user(self, name, email, user_books=None):
if email in self.users.keys():
print("This user already exists!")
else:
new_user = User(name, email)
if user_books != None:
for user_book in user_books:
self.add_book_to_user(user_book, new_user.get_email())
self.users[email] = new_user
def print_catalog(self):
for book in self.books.keys():
print(book)
def print_users(self):
for address in self.users:
print(self.users[address])
def get_most_read_book(self):
most_read_value = 0
most_read_key = None
for book in self.books.keys():
if self.books[book] > most_read_value:
most_read_value = self.books[book]
most_read_key = book
return most_read_key
def highest_rated_book(self):
highest_rating = 0
highest_rated_book = None
for book in self.books.keys():
if book.get_average_rating() > highest_rating:
highest_rating = book.get_average_rating()
highest_rated_book = book
return highest_rated_book
def most_positive_user(self):
highest_rating = 0
most_positive_user = None
for email in self.users.keys():
if self.users[email].get_average_rating() > highest_rating:
highest_rating = self.users[email].get_average_rating()
most_positive_user = self.users[email]
return most_positive_user
| true
|
70589b89b7ae9910315be51ad1a8c31190a8c916
|
Python
|
nekapoor7/Python-and-Django
|
/PythonNEW/Function/UppercaseAndLowercase.py
|
UTF-8
| 383
| 3.84375
| 4
|
[] |
no_license
|
"""Write a Python function that accepts a string and calculate the number of upper case letters and lower case letters.
Go to the editor
Sample String : 'The quick Brow Fox'
Expected Output :
No. of Upper case characters : 3
No. of Lower case Characters : 12"""
import re
t = input()
upper = re.findall(r'[A-Z]',t)
lower = re.findall(r'[a-z]',t)
print(len(upper))
print(len(lower))
| true
|
d0a9081808fccdee53da61feabdeb38cf45379f8
|
Python
|
rikard-helgegren/Big_Data_ST10
|
/plotting boundaries/Main.py
|
UTF-8
| 2,694
| 2.921875
| 3
|
[] |
no_license
|
from read_CSV import read_CSV
from split_data import split_data_list
from misslabel_data import misslabel_data_list
from convert_data import convert_data
import copy
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import make_pipeline
def plot_decision_boundary(clf, X, Y, cmap=plt.cm.RdYlBu):
h = 0.02
x_min, x_max = X[:,0].min() - 10*h, X[:,0].max() + 10*h
y_min, y_max = X[:,1].min() - 10*h, X[:,1].max() + 10*h
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.figure(figsize=(5,5))
plt.contourf(xx, yy, Z, cmap=cmap, alpha=0.25)
plt.contour(xx, yy, Z, colors='k', linewidths=0.7)
plt.scatter(X[:,0], X[:,1], c=Y, cmap=cmap, edgecolors='k');
def randome_forest_classifier_crossval(train, nr_trees=20, max_depth=2):
folds = 5
# Separate labels from data
data = [row[:-1] for row in train]
labels = [row[-1] for row in train]
clf = RandomForestClassifier(n_estimators=nr_trees, max_depth=max_depth,
random_state=0)
scores = cross_val_score(clf, data, labels, cv=folds)
mean_score = sum(scores)/len(scores)
return mean_score
def randome_forest_classifier_plot(train, nr_trees=20, max_depth=2):
#make list to np.array
train = convert_data(train)
# Separate labels from data
data = train[:, :4]
labels = train[:, -1]
#define which parameters to plot
data_to_plot = data[:, [2, 3]]
clf = RandomForestClassifier(n_estimators=nr_trees, max_depth=max_depth,
random_state=0)
clf.fit(data_to_plot, labels)
scores = clf.score(data_to_plot, labels)
plot_decision_boundary(clf, data_to_plot, labels)
plt.draw()
return scores
data = read_CSV('iris.csv')
#remove param descriptions
data = data[1:]
[train, validation]= split_data_list(data, 0.2)
# Need deep coppy in order not to change in list
misslabel_data=misslabel_data_list(copy.deepcopy(train), 0.2)
print("Score cross validation:", randome_forest_classifier_crossval(data))
print("Score cross validation misslabeled:", randome_forest_classifier_crossval(misslabel_data))
print("Score fit:", randome_forest_classifier_plot(data))
print("Score fit misslabeled:", randome_forest_classifier_plot(misslabel_data))
plt.show()
| true
|
e490b3bc823929801543aca2d18ea26cdff6ffd0
|
Python
|
shangguanxiaoguan/Python-
|
/requestsdemo/17_pytest_fixture/test_pytest_fixture.py
|
UTF-8
| 291
| 2.609375
| 3
|
[] |
no_license
|
import pytest
@pytest.fixture
def first_fix():
return ["a"]
def test_case01(first_fix):
first_fix.append("b")
assert first_fix == ["a", "b"]
print(first_fix)
if __name__ == '__main__':
pytest.main(["-s"]) # 以这种方式运行,文件名必须以test_开头
| true
|
9b0d91806c3cff5f9682da3a9f3840b913743cb4
|
Python
|
Andrey-Raspopov/PyXelate
|
/pyxelate.py
|
UTF-8
| 562
| 2.640625
| 3
|
[] |
no_license
|
import PIL.Image as Image
from imageio import imwrite
from Image import PyImage
from Palettes import Palettes
color_palette = Palettes['bw']
if __name__ == "__main__":
img = PyImage('test.png', 400, Palettes['bw'])
img.load()
img.pyxelate()
filename_parts = img.filename.rsplit('.', 1)
filename_parts[0] += '_pixelated'
filename = '.'.join(filename_parts)
print("Saving as", filename)
imwrite(filename, img.data)
img1 = Image.open(filename)
img1.thumbnail((img.num_cols, img.num_rows))
img1.save(filename, 'PNG')
| true
|
deb4052645e7300fa1d7c26c3aeee44bdee050fa
|
Python
|
BanisharifM/Problems
|
/Quera/3429/solution.py
|
UTF-8
| 98
| 3.09375
| 3
|
[] |
no_license
|
#Yakhdar chi
T=int(input())
if T>100 :
print("Steam")
elif T<0 :
print("Ice")
else :
print("Water")
| true
|
e7d7f8a90a56f05219bc6c1fd2115ed7230618df
|
Python
|
dibovdmitry/laba5
|
/Hard.py
|
UTF-8
| 282
| 3.09375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python 3
# -*- coding: utf-8 -*-
import sys
if __name__ == '__main__':
p1 = input('Напишите первое предложение 1 ').split()
p2 = input('Напишите второе предложение 2 ')
print(*(i for i in p1 if i in p2))
| true
|
7cd99c4fb6cd24103cea76b0a98621e9e2b5f77b
|
Python
|
Silver-L/TFRecord_example
|
/read_record.py
|
UTF-8
| 1,125
| 2.65625
| 3
|
[] |
no_license
|
import os
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
def main():
file_name = ['./tfrecord/recordfile_{}'.format(i+1) for i in range(60)]
dataset = tf.data.TFRecordDataset(file_name)
dataset = dataset.map(lambda x: _parse_function(x, image_size=[28, 28]), num_parallel_calls=os.cpu_count())
dataset = dataset.shuffle(buffer_size = 10000)
dataset = dataset.repeat()
dataset = dataset.batch(256)
iterator = dataset.make_one_shot_iterator()
X = iterator.get_next()
with tf.Session() as sess:
mnist = sess.run(X)
plt.figure(figsize=(10, 10))
plt.imshow(mnist[1], origin="upper", cmap="gray")
plt.show()
# # load tfrecord function
def _parse_function(record, image_size=[28, 28, 1]):
keys_to_features = {
'img_raw': tf.FixedLenFeature(np.prod(image_size), tf.float32),
}
parsed_features = tf.parse_single_example(record, keys_to_features)
image = parsed_features['img_raw']
image = tf.reshape(image, image_size)
return image
if __name__ == '__main__':
main()
| true
|
9becbfc9c013856e9c139e316f1784a54500ae87
|
Python
|
serkanishchi/zerosleap
|
/zerosleap/gui/composer.py
|
UTF-8
| 7,482
| 2.921875
| 3
|
[] |
no_license
|
"""
Manages video reading, video processing, track
processing and compose raw frames with processed
data. Acts as a provider for video player.
This class is a content producer for video player.
And generates frames with raw and processed data.
"""
from threading import Thread
import time
from queue import Queue
import numpy as np
from zerosleap.comp.processor import VideoProcessor
from zerosleap.gui.metaframe import MetaFrame
from zerosleap.video.raeder import VideoReader
class VideoComposer:
def __init__(self, path, buffer_size=256, chunk_size=32):
""""
Initialize video composer.
Args:
path: Video filename path.
buffer_size:
chunk_size:
"""
self.video_reader = VideoReader(path)
# Asynchronously processing raw video frames with chunks.
# If the process is not complete, not blocks update loop.
# This is a consumer of video_reader object.
# Chunk is necessary for improving processing speed
# especially at GPU.
self.video_processor = VideoProcessor(9999)
# Buffer for raw video frames
self._buffer = []
# Buffer for the processed frames
# Keeps also peaks, tracks and heatmaps (optional)
self._meta_frames = Queue(maxsize=buffer_size)
self._run_flag = True
self._reset_buffer_flag = False
self._frame_index_changed =False
# Request for heatmap
self._heatmaps_flag = False
self._chunk_size = chunk_size
self._frame_index = 0
# intialize the thread with the update function
# Update function is a non blocking control loop
# Except file reading
self.thread = Thread(target=self.update, args=())
self.thread.daemon = True
def start(self):
# start a thread to read frames from the file video stream
self.thread.start()
return self
def update(self):
"""Generate extended frames with raw and processed data."""
# Controller loop
while self._run_flag:
# If frame index changed manually and _reset_buffer_flag is set
# empty the _frames queue and _buffer
if self._reset_buffer_flag:
with self._meta_frames.mutex: self._meta_frames.queue.clear()
self._buffer = []
self._reset_buffer_flag = False
# Continue to grab images until the _frames queue is full
if not self._meta_frames.full():
# Prevent unnecessary index changing
frame_index = None
if self._frame_index_changed:
frame_index = self._frame_index
self._frame_index_changed = False
# read the next frame from the file
(grabbed, frame) = self.video_reader.read(frame_index)
# If the reader reaches end of the file and
# the _frames queue is empty wait for another action
if not grabbed and self._buffer == []:
time.sleep(0.1)
continue
else:
self._frame_index += 1
# If frame is grabed from video reader
# Add frames to the buffer for processing with chunk
if grabbed:
self._buffer.append(frame[:, :, :1])
result = None
# If size of the buffer bigger than the chunk size
# or if we reached end of the file and the size of the
# buffer bigger than 0, process the frames
if len(self._buffer) >= self._chunk_size or \
(not grabbed and len(self._buffer) >= 0):
# Try to get processed frames from processing server
result = self.video_processor.recv()
# Keeps raw frames as global for adding to _frames
frames = []
# If the results ready from the processing server
if result is not None:
result_length = len(result["peaks"])
# Take the processed raw frames
frames = self._buffer[:result_length]
# Remove them from the buffer.
self._buffer = self._buffer[result_length:]
# Try to get a chunk from buffer
chunk = None
# If buffer size bigger than the chunk size
# Take the chunk and send it to the video processor
if len(self._buffer) >= self._chunk_size:
self.video_processor.send(np.stack(self._buffer[:self._chunk_size], axis=0),
peaks=True,
heatmaps=self._heatmaps_flag)
# If the buffer size lower than the chunk size and
# we reached end of the file, take the rest
elif len(self._buffer) > 0 and not grabbed:
self.video_processor.send(np.stack(self._buffer[:], axis=0),
peaks=True,
heatmaps=self._heatmaps_flag)
# If there is no frame in the _buffer just continue to
# wait in the loop until somebody changed the _run_flag
# or changed the _frame_index.
else:
time.sleep(0.1)
continue
if result is not None:
# take peaks from the result
peaks = result["peaks"]
heatmaps = None
if "heatmaps" in result:
heatmaps = result["heatmaps"]
# Create Frame object for each result and add to _frames queue
for i in range(len(frames)):
if heatmaps is not None:
frame = MetaFrame(frame=frames[i],
peaks=peaks[i],
heatmap=heatmaps[i])
else:
frame = MetaFrame(frame=frames[i],
peaks=peaks[i])
self._meta_frames.put(frame)
def read(self) -> MetaFrame:
"""Reads next frame in _frames queue"""
return self._meta_frames.get()
# Change the frame_index and reset all buffers with setting _reset_buffer flag
def seek(self, frame_index: int):
"""
Changes active _frame_index
Args:
frame_index: target frame index number for seeking
"""
# Set _frame_index
self._frame_index = frame_index
# Set _frame_index_changed flag
self._frame_index_changed = True
# Set _rest_buffer_flag
self._reset_buffer_flag = True
def toggle_heatmap(self):
"""Toggle _heatmaps_flag"""
self._heatmaps_flag = not self._heatmaps_flag
def stop(self):
# Set _run_flag as False to exit thread loop
self._run_flag = False
time.sleep(1)
self.video_processor.stop()
| true
|
583ffa23a18e2dd7ac957bcb462bbc0a7e2eba75
|
Python
|
kenesbekov/PPII2021SPRING
|
/tsis6/13.py
|
UTF-8
| 307
| 3
| 3
|
[] |
no_license
|
def pascal_triangle(n):
row = [1]
y = [0] # for validate working zip [1, 1, 0] + [0, 1, 1] = [1, 2, 1]
# (1, 0), (1, 1), (0, 1) l l l r r r l+r l+r l+r
for _ in range(n):
print(row)
row = [l+r for l,r in zip(row+y, y+row)]
pascal_triangle(int(input()))
| true
|
92afd13ad2744f1153e0152ef56461d45979d5a8
|
Python
|
kdm604/TIL
|
/알고리즘문제/말이 되고픈 원숭이.py
|
UTF-8
| 1,384
| 2.984375
| 3
|
[] |
no_license
|
import sys
from collections import deque
# K = 0 ~ 30 , W,H = 1 ~ 200 최대 200 X 200 판
dx = [0, 0, 1, -1]
dy = [1, -1, 0, 0]
hdx = [-2, -2, 2, 2, -1, 1, -1, 1]
hdy = [-1, 1, -1, 1, -2, -2, 2, 2]
def bfs(x, y, cnt, move):
global ans
Q = deque()
Q.append((x, y, cnt, move))
while len(Q):
x, y, cnt, move = Q.popleft()
if x == H-1 and y == W-1:
ans = move
break
for d in range(4):
nx = x+dx[d]
ny = y+dy[d]
if 0 <= nx < H and 0 <= ny < W and nxm[nx][ny] != 1:
if visited[cnt][nx][ny] == 0:
visited[cnt][nx][ny] = 1
Q.append((nx, ny, cnt, move+1))
if cnt > 0:
for d in range(8):
nx = x + hdx[d]
ny = y + hdy[d]
if 0 <= nx < H and 0 <= ny < W and nxm[nx][ny] != 1:
if visited[cnt-1][nx][ny] == 0:
visited[cnt-1][nx][ny] = 1
Q.append((nx, ny, cnt-1, move+1))
K = int(input())
W, H = map(int, input().split())
nxm = [[0 for _ in range(W)]for _ in range(H)]
visited = [[[0 for _ in range(W)]for _ in range(H)]for _ in range(K+1)]
ans = 987654321
for z in range(H):
nxm[z] = list(map(int, input().split()))
bfs(0, 0, K, 0)
if ans == 987654321:
print(-1)
else:
print(ans)
| true
|
16ba54df176ae8b1b5bda469584f6ea1ad15c4f4
|
Python
|
Junhyun-Nam-Olin/SoftDesSp15
|
/proj3/word_frequency.py
|
UTF-8
| 1,991
| 3.09375
| 3
|
[] |
no_license
|
def process_text(filename):
"""Makes histogram of text"""
d = dict()
fp = open(filename, 'r')
for line in fp:
for word in line.split():
while not (word == '' or word[0].isalpha() or word[0].isdigit()):
word = word[1:]
while not (word == '' or word[-1].isalpha() or word[-1].isdigit()):
word = word[0:-1]
word = word.lower()
if word != '':
d[word] = d.get(word, 0) + 1
return d
def inverse_dict(d):
"""Reverse keys and values of dictionary"""
inverse = dict()
for key in d:
val = d[key]
if val not in inverse:
inverse[val] = [key]
else:
inverse[val].append(key)
return inverse
def subtract_common(freq, freq_word):
"""subtrace most common 100 words from inversed dictionary"""
common_freq = ['the', 'be', 'to', 'of', 'and', 'a', 'in', 'that', 'have', 'i',
'it', 'for', 'not', 'on', 'with', 'he', 'as', 'you', 'do', 'at'
'this', 'but', 'his', 'by', 'from', 'they', 'we', 'say', 'her', 'she',
'or', 'an', 'will', 'my', 'one', 'all', 'would', 'there', 'their', 'what',
'so', 'up', 'out', 'if', 'about', 'who', 'get', 'which', 'go', 'me',
'when', 'make', 'can', 'like', 'time', 'no', 'just', 'him', 'know', 'take',
'people', 'into', 'year', 'your', 'good', 'some', 'could', 'them', 'see', 'other',
'than', 'now', 'look', 'only', 'come', 'its', 'over', 'think', 'also',
'back', 'after', 'use', 'two', 'how', 'our', 'work', 'first', 'well', 'way',
'even', 'new', 'want', 'because', 'any', 'these', 'give', 'day', 'most', 'us',
'are', 'is', 'have', 'has', 'were', 'was', 'been', 'had']
top10_freq = []
for number in freq:
if freq_word[number][0] not in common_freq:
top10_freq.append(number)
if len(top10_freq) == 10:
break
top10_freq.sort()
top10_freq.reverse()
return top10_freq
stat = process_text('alice_in_wonderland.txt')
freq_word = inverse_dict(stat)
freq = freq_word.keys()
freq.sort()
freq.reverse()
top10_freq = subtract_common(freq, freq_word)
for number in top10_freq:
print (freq_word[number][0], number)
| true
|
c8100ca8e45a12602154fb9d1811b78f9d7f3457
|
Python
|
lyfree132/Relation_Extraction-1
|
/preprocess_ace/c_relation.py
|
UTF-8
| 5,863
| 2.703125
| 3
|
[] |
no_license
|
#coding:utf-8
import numpy as np
class Relation:
# Definition a class to turn relation mention to embedding
def __init__(self):
self.mention = "" # content of mention
self.mention_pos = [0, 0] # bias of mention
self.arg1 = "" # content of arg1
self.arg1_pos = [0, 0] # bias of arg1
self.arg2 = "" # content of arg2
self.arg2_pos = [0, 0] # bias of arg2
self.out_form = np.zeros((50, 400), dtype="float32")
# embedding combined word-embedding and position embedding
self.type = "" # relation of this mention
self.PF = {'1': [], '2': []} # position embedding for each word
self.sub_type = ""
self.spilt_form = []
# embedding combined with word-embedding and position embedding and
# split into three parts for our CNN architecture
def show(self):
print(self.arg1, self.type, self.sub_type, self.arg2)
print(self.mention)
def mention_clean(self):
# lower-casing the sentence and removing punctuation
for i in range(0, len(self.mention)):
self.mention[i] = self.mention[i].lower()
for mark in [',', '.', '?']:
self.mention[i] = self.mention[i].replace(mark, "")
def combine(self, vector, v_dim, sen_dim, pf_r1, pf_r2, pf_dim, pf_size):
'''
# Combine the word-embedding and position embedding
# vector ==> word2vec matrix
# v_dim ==> dimension of word in vector
# sen_dim ==> max length of sentence
# pf_r1, pf_r2 ==> random matrix used to represent position embedding
# pf_size ==> row_vector of pf_1 and pf_2
'''
for i_t in range(0, len(self.mention)):
if i_t < sen_dim and self.mention[i_t] in vector:
self.out_form[i_t, 0:v_dim] = vector[self.mention[i_t]]
self.out_form[i_t, v_dim:v_dim+pf_dim] = pf_r1[int(self.PF['1'][i_t] + pf_size/2)]
self.out_form[i_t, v_dim+pf_dim:len(self.out_form[i_t])] = pf_r2[int(self.PF['2'][i_t] + pf_size/2)]
elif i_t < sen_dim:
self.out_form[i_t, 0:v_dim] = 0
self.out_form[i_t, v_dim:v_dim+pf_dim] = pf_r1[int(self.PF['1'][i_t] + pf_size/2)]
self.out_form[i_t, v_dim+pf_dim:len(self.out_form[i_t])] = pf_r2[int(self.PF['2'][i_t] + pf_size/2)]
elif i_t >= sen_dim:
break
else:
self.out_form[i_t, 0:len(self.out_form[i_t])] = 0
def split(self, vector, v_dim, pf_r1, pf_r2, pf_dim, pf_size):
'''
# Combine the word-embedding and position embedding
# vector ==> word2vec matrix
# v_dim ==> dimension of word in vector
# sen_dim ==> max length of sentence
# pf_r1, pf_r2 ==> random matrix used to represent position embedding
# pf_size ==> row_vector of pf_1 and pf_2
'''
pos1 = min(self.arg1_pos[0], self.arg2_pos[0])
pos2 = max(self.arg1_pos[0], self.arg2_pos[0])
dim = 15
temp_mat = np.zeros((dim, v_dim+pf_dim*2), dtype="float32")
for i in range(0, pos1):
if dim-1-i < 0:
break
if pos1-i >= 0 and self.mention[pos1-i] in vector:
temp_mat[dim-1-i, 0:v_dim] = vector[self.mention[pos1-i]]
temp_mat[dim-1-i, v_dim:v_dim+pf_dim] = pf_r1[int(self.PF['1'][pos1-i] + pf_size/2)]
temp_mat[dim-1-i, v_dim+pf_dim:len(temp_mat[dim-1-i])] = pf_r2[int(self.PF['2'][pos1-i] + pf_size/2)]
elif pos1-i >= 0:
temp_mat[dim-1-i, 0:v_dim] = 0
temp_mat[dim-1-i, v_dim:v_dim+pf_dim] = pf_r1[int(self.PF['1'][pos1-i] + pf_size/2)]
temp_mat[dim-1-i, v_dim+pf_dim:len(temp_mat[dim-1-i])] = pf_r2[int(self.PF['2'][pos1-i] + pf_size/2)]
else:
temp_mat[dim-1-i, 0:len(temp_mat[dim-1-i])] = 0
self.spilt_form.append(temp_mat)
dim = 15
temp_mat = np.zeros((dim, v_dim+pf_dim*2), dtype="float32")
for i in range(pos1, pos2 + 1):
if i-pos1 < dim and self.mention[i] in vector:
temp_mat[i-pos1, 0:v_dim] = vector[self.mention[i]]
temp_mat[i-pos1, v_dim:v_dim+pf_dim] = pf_r1[int(self.PF['1'][i] + pf_size/2)]
temp_mat[i-pos1, v_dim+pf_dim:len(temp_mat[i-pos1])] = pf_r2[int(self.PF['2'][i] + pf_size/2)]
elif i-pos1 < dim:
temp_mat[i-pos1, 0:v_dim] = 0
temp_mat[i-pos1, v_dim:v_dim+pf_dim] = pf_r1[int(self.PF['1'][i] + pf_size/2)]
temp_mat[i-pos1, v_dim+pf_dim:len(temp_mat[i-pos1])] = pf_r2[int(self.PF['2'][i] + pf_size/2)]
elif i-pos1 >= dim:
break
else:
temp_mat[i-pos1, 0:len(temp_mat[i-pos1])] = 0
self.spilt_form.append(temp_mat)
dim = 20
temp_mat = np.zeros((dim, v_dim+pf_dim*2), dtype="float32")
for i in range(pos2 + 1, len(self.mention)):
if i-pos2-1 < dim and self.mention[i] in vector:
temp_mat[i-pos2-1, 0:v_dim] = vector[self.mention[i]]
temp_mat[i-pos2-1, v_dim:v_dim+pf_dim] = pf_r1[int(self.PF['1'][i] + pf_size/2)]
temp_mat[i-pos2-1, v_dim+pf_dim:len(temp_mat[i-pos2-1])] = pf_r2[int(self.PF['2'][i] + pf_size/2)]
elif i-pos2-1 < dim:
temp_mat[i-pos2-1, 0:v_dim] = 0
temp_mat[i-pos2-1, v_dim:v_dim+pf_dim] = pf_r1[int(self.PF['1'][i] + pf_size/2)]
temp_mat[i-pos2-1, v_dim+pf_dim:len(temp_mat[i-pos2-1])] = pf_r2[int(self.PF['2'][i] + pf_size/2)]
elif i-pos2-1 >= dim:
break
else:
temp_mat[i-pos2-1, 0:len(temp_mat[i-pos2-1])] = 0
self.spilt_form.append(temp_mat)
| true
|
80295d537df8a4358cd98c96e6d441f11d24f61f
|
Python
|
nandakoryaaa/pypy
|
/app/models/usertable.py
|
UTF-8
| 823
| 2.921875
| 3
|
[
"CC0-1.0"
] |
permissive
|
from app.models.userdata import UserData
class UserTable:
COUNT = 10
def __init__(self, file = None):
self.table = [None] * self.COUNT
self.file = file
for i in range(self.COUNT):
self.table[i] = UserData()
if file is not None:
self.read()
def read(self):
fp = open(self.file, 'r')
lines = fp.readlines()
line_count = len(lines)
for i in range(self.COUNT):
if i == line_count:
break
user_data = self.table[i]
user_data.from_string(lines[i])
fp.close()
def write(self):
fp = open(self.file, 'w')
for i in range(self.COUNT):
s = self.table[i].to_string()
fp.write(s)
fp.write("\n")
fp.close()
def sort(self):
self.table.sort(key = lambda x: x.score, reverse = True)
| true
|
5cf4eaf8ce8d5a1bb8f0ce325a88bd11a124e743
|
Python
|
nikhilsopori/Speech-To-Text
|
/Python/Speech Recognition.py
|
UTF-8
| 662
| 2.9375
| 3
|
[] |
no_license
|
#Installing of the Python Package
#**Note that the SpeechRecoginition will only work for Python (2,2.7,3,3.3,3.4,3.5,3.6)
!pip install SpeechRecognition
!pip install librosa
!pip install soundfile
#Import Speech Recognition Package
import speech_recognition as sr
import librosa
import soundfile as sf
import wave
r = sr.Recognizer()
#Librosa is Used for Converting MP3 file into wav or any audio type
x,_= librosa.load('BANNERS - Start A Riot.mp3',sr=16000)
sf.write('temp.wav',x,16000)
audio = sr.AudioFile("temp.wav")
print("......... Running ")
with audio as text:
text1 = r.record(text,offset=10,duration=400)
print(r.recognize_google(text1))
| true
|
d32a770ed3303fc423256a17d5e1041bb6f265da
|
Python
|
ytsmm/mybot
|
/tokenizer.py
|
UTF-8
| 194
| 3.03125
| 3
|
[] |
no_license
|
import nltk
# Разбиение на предложения и слова
def tokenizer(raw):
raw = raw.lower()
word_tokens = nltk.word_tokenize(raw)
return word_tokens
| true
|
83e980a39c54fb2fb2d88457c7c21dff67ca43ff
|
Python
|
alanboaventura/trabalho2-ia
|
/caixeiroviajante/funcaoAptidao.py
|
UTF-8
| 1,424
| 3.359375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy
def apt_func(populacao, distanciacidades, n_rotas):
# Gera a matriz 20x21 da população onde a última coluna é a cópia da primeira coluna.
# A última coluna é igual a primeira pois o caixeiro viajante precisa retornar a cidade original.
tour = numpy.c_[populacao, populacao[:, 0]]
# Cria uma matrix 20x2 para receber a aptidão de cada cromossomo.
# A primeira coluna corresponde ao índice do cromossomo na matriz de população.
# A segunda coluna representa o valor de aptidão
v_aptidao = numpy.zeros((20, 2), dtype=numpy.float)
# Percorre o total de rotas existentes no algoritmo. Neste exemplo, cada rota representa um membro da população.
for i in range(0, n_rotas):
v_aptidao[i, 0] = i
v_aptidao[i, 1] = 0
# Para cada cidade calcula a v_aptidaoância entre ela e a próxima cidade.
for j in range(0, n_rotas):
v_aptidao[i, 1] += distanciacidades[int(tour[i, j]), int(tour[i, j + 1])]
# Loop para converter os índices do vetor de aptidão em inteiros.
for i in range(0, n_rotas):
v_aptidao[i, 0] = int(v_aptidao[i, 0])
# Utiliza a função sorted para ordenar a matriz pela segunda coluna, correspondente ao valor de aptidão.
v_aptidao = sorted(v_aptidao, key=lambda x: x[1])
v_aptidao = numpy.array(v_aptidao)
return v_aptidao
| true
|
9cdde04b021695620bbbf8627c50b82a8fe412da
|
Python
|
ro13hit/Competitive
|
/alienpiano.py
|
UTF-8
| 551
| 3.0625
| 3
|
[] |
no_license
|
def main():
n= int(input())
a = list(map(int,input().split()))
b,c,cnt = 0,0,0
for i in range(n):
if a[i]>a[i-1]:
b+=1
c=0
if b == 4:
b =0
cnt+=1
elif a[i]<a[i-1]:
c+=1
b=0
if c ==4:
c = 0
cnt+=1
return cnt
try:
for test in range(1,int(input())+1):
print("Case #{}: {}".format(test,main()))
except:
pass
| true
|
8818af93a371466c16b73a575ad14dd96e7dee9a
|
Python
|
xiaoheizai/python_for_leetcode
|
/腾讯top50/89 格雷编码.py
|
UTF-8
| 1,324
| 3.640625
| 4
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Apr 13、4
@author: xiaoheizai
"""
'''
格雷编码是一个二进制数字系统,在该系统中,两个连续的数值仅有一个位数的差异。
给定一个代表编码总位数的非负整数 n,打印其格雷编码序列。格雷编码序列必须以 0 开头。
示例 1:
输入: 2
输出: [0,1,3,2]
解释:
00 - 0
01 - 1
11 - 3
10 - 2
对于给定的 n,其格雷编码序列并不唯一。
例如,[0,2,3,1] 也是一个有效的格雷编码序列。
00 - 0
10 - 2
11 - 3
01 - 1
示例 2:
输入: 0
输出: [0]
解释: 我们定义格雷编码序列必须以 0 开头。
给定编码总位数为 n 的格雷编码序列,其长度为 2n。当 n = 0 时,长度为 20 = 1。
因此,当 n = 0 时,其格雷编码序列为 [0]。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/gray-code
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
'''
class Solution(object):
def grayCode(self, n):
"""
:type n: int
:rtype: List[int]
"""
res = [0]
head = 1
for i in range(n):
for j in range(len(res)-1, -1, -1):
res.append(res[j] + head)
head <<= 1
return res
| true
|
855271ed376161d286ef28c15247ff8609648f56
|
Python
|
Qsingle/MedicalImage
|
/datasets.py
|
UTF-8
| 8,327
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
#-*- coding:utf8 -*-
#!/usr/bin/env python
'''
@Author:qiuzhongxi
@Filename:datasets.py
@Date:2020/3/7
@Software:PyCharm
Some Dataset Class for this project
'''
from torch.utils.data import Dataset
import torch
import cv2
from PIL import Image
import numpy as np
import os
from matplotlib import pyplot as plt
from albumentations import Compose
from albumentations import HorizontalFlip,VerticalFlip,RandomGamma,RandomBrightnessContrast,PadIfNeeded,ShiftScaleRotate
from albumentations import Normalize
class FolderDataset(Dataset):
def __init__(self, data_dir, transform=None):
super(FolderDataset, self).__init__()
assert os.path.exists(data_dir), "The directory {} not exists".format(data_dir)
self.paths, self.ids = self.get_paths(data_dir)
self.transform = transform
def process(self, img, img_size=224):
img = img.resize((img_size, img_size))
img = np.asarray(img)
if img.ndim > 2:
img = np.transpose(img, [2, 1, 0])
return img
def __getitem__(self, index):
path = self.paths[index]
label = self.ids[index]
img = Image.open(path)
img = img.convert("RGB")
if self.transform is not None:
img = self.transform(img)
return img, label
img = self.process(img)
return img, label
def __len__(self):
return len(self.paths)
def show_batch(self, rows=5, cols=None):
if cols is None:
cols = rows
total = 5 * 5
font_dict = {'fontsize': 7,
'fontweight': 2,
'verticalalignment': 'baseline',
'horizontalalignment': "center"}
plt.figure(dpi=224)
for i in range(total):
random = np.random.randint(0, self.__len__())
plt.subplot(rows, cols, i + 1)
plt.title(self.classes[self.ids[random]], fontdict=font_dict, pad=1.2)
img = Image.open(self.paths[random])
img = img.resize((96, 96))
img = np.asarray(img)
if img.ndim <= 2:
plt.imshow(img, cmap="gray")
else:
plt.imshow(img)
plt.xticks([])
plt.yticks([])
plt.show()
def statistic(self):
counters = []
for unique in np.unique(self.ids):
counters.append(np.sum(unique == self.ids))
plt.figure(dpi=224)
plt.bar(range(len(counters)), counters)
#plt.show()
plt.savefig("out.png")
def get_paths(self, data_dir):
paths = []
ids = []
class_dict = dict()
classes = []
cl = 0
for home, dirs, _ in os.walk(data_dir):
for dir in dirs:
if dir not in class_dict:
classes.append(dir)
class_dict[dir] = cl
cl += 1
img_dir = os.listdir(os.path.join(home, dir))
for path in img_dir:
if path.endswith("jpg") or path.endswith("png") or path.endswith("jpeg"):
paths.append(os.path.join(home, dir, path))
id = class_dict[dir]
ids.append(id)
self.class_dict = class_dict
self.classes = classes
return paths, ids
class SegPathsDataset(Dataset):
def __init__(self, image_paths, label_paths, augmentation=True,img_size=256):
super(SegPathsDataset,self).__init__()
assert len(image_paths) == len(label_paths), "The length is not equal, len(image_paths)/len(label_paths)={}/" \
"{}".format(len(image_paths), len(label_paths))
self.image_paths = image_paths
self.label_paths = label_paths
self.augmentation = augmentation
self.length = len(image_paths)
self.img_size = img_size
def __len__(self):
return self.length
def __getitem__(self, index):
img_path = self.image_paths[index]
mask_path = self.label_paths[index]
if img_path.endswith(".npy"):
img = np.load(img_path)
else:
img = cv2.imread(img_path)
if mask_path.endswith(".npy"):
mask = np.load(mask_path)
else:
mask = cv2.imread(mask_path, 0)
if self.augmentation:
task = [
HorizontalFlip(p=0.5),
VerticalFlip(p=0.5),
RandomGamma(),
RandomBrightnessContrast(p=0.5),
PadIfNeeded(self.img_size,self.img_size),
ShiftScaleRotate(scale_limit=0.5, p=0.5),
#Normalize(mean=[0.210, 0.210, 0.210], std=[0.196, 0.196, 0.196], always_apply=True)
]
aug = Compose(task)
aug_data = aug(image=img, mask=mask)
img, mask = aug_data["image"], aug_data["mask"]
img = self._normalize(img)
img = cv2.resize(img,(self.img_size,self.img_size))
mask = cv2.resize(mask,(self.img_size,self.img_size))
mask = mask // 255.0
if img.ndim < 3:
img = np.expand_dims(img, 0)
else:
img = np.transpose(img, axes=[2, 0, 1])
return torch.from_numpy(img), torch.from_numpy(mask)
def _normalize(self, img):
normal_img = np.clip(img, 0, 255)
maxval = np.max(img)
minval = np.min(img)
normal_img = (img - minval) / max( maxval- minval, 1e-3)
return normal_img
class PathsDataset(Dataset):
def __init__(self, paths:list, data_dir, classes_dict=None, ids=None,augumentation=False,img_size=224,transform=None,suffix=".png"):
super(PathsDataset, self).__init__()
self.filename = paths
self.data_dir = data_dir
self.ids = ids
self.transform = transform
self.length = len(self.filename)
self.classes_dict = classes_dict
self.augumentation = augumentation
self.img_size = img_size
if suffix.startswith("."):
self.suffix = suffix
else:
self.suffix = ".{}".format(suffix)
def show_batch(self, rows=5, cols=None):
if cols is None:
cols = rows
font_dict = {'fontsize': 7,
'fontweight': 2,
'verticalalignment': 'baseline',
'horizontalalignment': "center"}
plt.figure(dpi=224)
for i in range(cols*rows):
random = np.random.randint(0, self.length-1)
path = os.path.join(self.data_dir, self.filename[random]+self.suffix)
img = Image.open(path)
img = np.asarray(img)
plt.subplot(cols, rows, i+1)
text = self.classes_dict[self.ids[random]]
plt.title(text, font_dict, pad=1.2)
if img.ndim <= 2:
plt.imshow(img,cmap="gray")
else:
plt.imshow(img)
plt.xticks([])
plt.yticks([])
plt.show()
def __getitem__(self, index):
path = os.path.join(self.data_dir, self.filename[index]+self.suffix)
label = None if self.ids is None else self.ids[index]
img = Image.open(path)
img = img.convert("RGB")
if self.transform is not None:
img = self.transform(img)
else:
if self.augumentation:
img = np.asarray(img)
task = [
HorizontalFlip(p=0.5),
VerticalFlip(p=0.5),
RandomGamma(),
RandomBrightnessContrast(p=0.5),
PadIfNeeded(self.img_size,self.img_size),
ShiftScaleRotate(scale_limit=0.5, p=0.5)
]
aug = Compose(task)
aug_data = aug(image=img)
img = aug_data["image"]
#img = cv2.resize(img,(self.img_size,self.img_size))
img = Image.fromarray(img)
img = self.transform(img)
if label is not None:
return img,label
else:
return img
def __len__(self):
return self.length
if __name__ == "__main__":
dataset = FolderDataset("../../data/256_ObjectCategories/")
dataset.show_batch()
| true
|
525b209c4d2485771d3a2e028db74761d73068f8
|
Python
|
RyanArnasonML/stock-analysis
|
/stock_analysis/candles.py
|
UTF-8
| 3,004
| 3.40625
| 3
|
[
"MIT"
] |
permissive
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
"""This cell defineds the plot_candles function"""
def plot_candles(pricing, title=None, volume_bars=False, color_function=None, technicals=None):
""" Plots a candlestick chart using quantopian pricing data.
Author: Daniel Treiman
Args:
pricing: A pandas dataframe with columns ['open_price', 'close_price', 'high', 'low', 'volume']
title: An optional title for the chart
volume_bars: If True, plots volume bars
color_function: A function which, given a row index and price series, returns a candle color.
technicals: A list of additional data series to add to the chart. Must be the same length as pricing.
"""
def default_color(index, open_price, close_price, low, high):
return 'r' if open_price[index] > close_price[index] else 'g'
color_function = color_function or default_color
technicals = technicals or []
open_price = pricing['open_price']
close_price = pricing['close_price']
low = pricing['low']
high = pricing['high']
oc_min = pd.concat([open_price, close_price], axis=1).min(axis=1)
oc_max = pd.concat([open_price, close_price], axis=1).max(axis=1)
if volume_bars:
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, gridspec_kw={'height_ratios': [3,1]},figsize=(7,7))
else:
fig, ax1 = plt.subplots(1, 1)
if title:
ax1.set_title(title)
fig.tight_layout()
x = np.arange(len(pricing))
candle_colors = [color_function(i, open_price, close_price, low, high) for i in x]
candles = ax1.bar(x, oc_max-oc_min, bottom=oc_min, color=candle_colors, linewidth=0)
lines = ax1.vlines(x , low, high, color=candle_colors, linewidth=1)
ax1.xaxis.grid(True)
ax1.yaxis.grid(True)
ax1.xaxis.set_tick_params(which='major', length=3.0, direction='in', top='off')
ax1.set_yticklabels([])
# Assume minute frequency if first two bars are in the same day.
frequency = 'minute' if (pricing.index[1] - pricing.index[0]).days == 0 else 'day'
time_format = '%d-%m-%Y'
if frequency == 'minute':
time_format = '%H:%M'
# Set X axis tick labels.
#plt.xticks(x, [date.strftime(time_format) for date in pricing.index], rotation='vertical')
for indicator in technicals:
ax1.plot(x, indicator)
if volume_bars:
volume = pricing['volume']
volume_scale = None
scaled_volume = volume
if volume.max() > 1000000:
volume_scale = 'M'
scaled_volume = volume / 1000000
elif volume.max() > 1000:
volume_scale = 'K'
scaled_volume = volume / 1000
ax2.bar(x, scaled_volume, color=candle_colors)
volume_title = 'Volume'
if volume_scale:
volume_title = 'Volume (%s)' % volume_scale
#ax2.set_title(volume_title)
ax2.xaxis.grid(True)
ax2.set_yticklabels([])
ax2.set_xticklabels([])
return fig
| true
|
acd7ce4fe5cc43a928bce0fd3154ded1c6b4990a
|
Python
|
HukLab/3d-integ-analysis
|
/bin/mle.py
|
UTF-8
| 8,422
| 2.828125
| 3
|
[] |
no_license
|
import logging
import itertools
import numpy as np
from scipy.optimize import minimize
logging.basicConfig(level=logging.DEBUG)
APPROX_ZERO = 0.0001
add_dicts = lambda a, b: dict(a.items() + b.items())
make_dict = lambda key_order, thetas: dict(zip(key_order, thetas))
def theta_to_dict(thetas, theta_key_order, theta):
thetas_lookup = make_dict(theta_key_order, thetas)
thetas_preset = dict((key, val) for key, val in thetas_lookup.iteritems() if val is not None)
keys_left = [key for key in theta_key_order if thetas_lookup[key] is None]
return add_dicts(thetas_preset, make_dict(keys_left, theta))
def generic_fit(fcn, theta_key_order, thetas_to_fit, theta_defaults, quick, ts, bins, coh, guesses=None):
"""
theta_key_order is e.g. ['A', 'B', 'T']
thetas_to_fit is e.g. {'A': True, 'B': False, 'T': True}
theta_defaults is e.g. {'A': 1.0, 'B': 0.5, 'T': 100.0}
"""
fit_found = True
thetas = [None if thetas_to_fit[t] else theta_defaults[t] for t in theta_key_order]
ths = fcn(ts, thetas, quick=quick, guesses=guesses)
if ths:
th = pick_best_theta(ths)
else:
msg = 'No fits found. Using {0}'.format(theta_defaults)
logging.warning(msg)
fit_found = False
th = [theta_defaults[t] for t in theta_key_order if thetas_to_fit[t]]
msg = '{0}% {2}: {1}'.format(int(coh*100), th, '[current fit]')
# logging.info(msg)
return theta_to_dict(thetas, theta_key_order, th), th, fit_found
def make_guesses(thetas, theta_key_order, guesses_lookup):
thetas_lookup = make_dict(theta_key_order, thetas)
guesses = []
for key in theta_key_order:
if thetas_lookup[key] is None:
guesses.append(guesses_lookup[key])
return list(itertools.product(*guesses)) # cartesian product
def make_bounds(thetas, theta_key_order, bounds_lookup):
return [bounds_lookup[key] for key, val in zip(theta_key_order, thetas) if val is None]
def log_likelihood(arr, fcn, thetas):
"""
arr is array of [[x0, y0], [x1, y1], ...]
where each yi in {0, 1}
fcn if function, and will be applied to each xi
thetas is tuple, a set of parameters passed to fcn along with each xi
calculates the sum of the log-likelihood of arr
= sum_i fcn(xi, *thetas)^(yi) * (1 - fcn(xi, *thetas))^(1-yi)
"""
if type(thetas) is dict:
fcn_x = lambda x: fcn(x, **thetas)
elif type(thetas) is list or type(thetas) is tuple:
fcn_x = lambda x: fcn(x, *thetas)
likelihood = lambda row: fcn_x(row[0]) if row[1] else 1-fcn_x(row[0])
log_likeli = lambda row: np.log(likelihood(row))
val = sum(map(log_likeli, arr))
return val
def pick_best_theta(thetas):
close_enough = lambda x,y: abs(x-y) < APPROX_ZERO
min_th = min(thetas, key=lambda d: d['fun'])
if len(thetas) > 1:
ths = [th for th in thetas if close_enough(th['fun'], min_th['fun'])]
msg = '{0} out of {1} guesses found minima of {2}'.format(len(ths), len(thetas), min_th['fun'])
# logging.info(msg)
return min_th['x']
def keep_solution(theta, bnds, ymin):
"""
theta is return value of scipy.optimize.minimize,
where theta['x'] is list [t1, ...] of solution values
bnds is list [(lb1, rb1), ...] of bounds for each ti in theta['x']
ymin is previously-found minimum solution
returns True iff theta is a success, lower than ymin, and has solutions not near its bounds
"""
if not theta['success']:
return False
if theta['fun'] >= ymin:
return False
close_enough = lambda x, b: abs(x-b) < APPROX_ZERO*10
at_left_bound = lambda x, lb: close_enough(x, lb) if lb else False
at_right_bound = lambda x, rb: close_enough(x, rb) if rb else False
at_bounds = lambda x, (lb, rb): at_left_bound(x, lb) or at_right_bound(x, rb)
return not any([at_bounds(th, bnd) for th, bnd in zip(theta['x'], bnds)])
def mle(data, log_likelihood_fcn, guesses, bounds=None, constraints=None, quick=False, method='TNC', opts=None):
"""
data is list [(dur, resp)]
dur is float
resp is bool
quick is bool
chooses the first solution not touching the bounds
method is str
bounds only for: L-BFGS-B, TNC, SLSQP
constraints only for: COBYLA, SLSQP
NOTE: SLSQP tends to give a lot of run-time errors...
"""
if len(data) == 0 or len(guesses) == 0:
return []
if bounds is None:
bounds = []
if constraints is None:
constraints = []
if opts is None:
opts = {}
thetas = []
poor_thetas = []
ymin = float('inf')
ymin_poor = float('inf')
for guess in guesses:
theta = minimize(log_likelihood_fcn, guess, method=method, bounds=bounds, constraints=constraints, options=opts)
if keep_solution(theta, bounds, ymin):
ymin = theta['fun']
thetas.append(theta)
if quick:
# logging.info(theta)
return thetas
msg = '{0}, {1}'.format(theta['x'], theta['fun'])
# logging.info(msg)
elif theta['success'] and theta['fun'] < ymin_poor:
ymin_poor = theta['fun']
poor_thetas.append(theta)
elif not theta['success']:
pass
# logging.warning('Not successful: ' + str(theta))
if len(thetas) == 0:
# logging.warning('Using theta against bounds.')
return poor_thetas
return thetas
def log_likelihood_factory(data, fcn, thetas, theta_key_order):
"""
I am so sorry.
This whole function makes me sad.
The bottom portion
is what I wanted,
but it is inefficient.
Thus the shit below.
"""
presets = [x is not None for x in thetas]
if len(presets) == 3:
if presets == [1,0,0]:
return lambda t: -log_likelihood(data, fcn, (thetas[0], t[0], t[1]))
elif presets == [0,1,0]:
return lambda t: -log_likelihood(data, fcn, (t[0], thetas[1], t[1]))
elif presets == [0,0,1]:
return lambda t: -log_likelihood(data, fcn, (t[0], t[1], thetas[2]))
elif presets == [1,1,0]:
return lambda t: -log_likelihood(data, fcn, (thetas[0], thetas[1], t[0]))
elif presets == [1,0,1]:
return lambda t: -log_likelihood(data, fcn, (thetas[0], t[0], thetas[2]))
elif presets == [0,1,1]:
return lambda t: -log_likelihood(data, fcn, (t[0], thetas[1], thetas[2]))
elif presets == [0,0,0]:
return lambda t: -log_likelihood(data, fcn, (t[0], t[1], t[2]))
else:
raise Exception("MLE ERROR: Internal.")
elif len(presets) == 2:
if presets == [1,0]:
return lambda t: -log_likelihood(data, fcn, (thetas[0], t[0]))
elif presets == [0,1]:
return lambda t: -log_likelihood(data, fcn, (t[0], thetas[1]))
elif presets == [0,0]:
return lambda t: -log_likelihood(data, fcn, (t[0], t[1]))
else:
raise Exception("MLE ERROR: Internal.")
# [1,0] or [1,1] or [0,0]
elif len(presets) == 1:
if presets == [1]:
return lambda t: -log_likelihood(data, fcn, (thetas[0],))
elif presets == [0]:
return lambda t: -log_likelihood(data, fcn, (t[0],))
else:
raise Exception("MLE ERROR: Internal.")
else:
raise Exception("MLE ERROR: Too many parameters in fitting method.")
"""
This next part is much shorter than the above, but...it's slower. The lambda function has to make all those dicts on each evaluation!
"""
thetas_lookup = make_dict(theta_key_order, thetas)
thetas_preset = dict((key, val) for key, val in thetas_lookup.iteritems() if val is not None)
keys_left = [key for key in theta_key_order if thetas_lookup[key] is None]
return lambda theta: -log_likelihood(data, fcn, add_dicts(thetas_preset, make_dict(keys_left, theta)))
def fit_mle(data, inner_likelihood_fcn, thetas, theta_key_order, guesses_lookup, bounds_lookup, constraints, quick=False, guesses=None, method='SLSQP'):
if guesses is None:
guesses = make_guesses(thetas, theta_key_order, guesses_lookup)
bounds = make_bounds(thetas, theta_key_order, bounds_lookup)
return mle(data, log_likelihood_factory(data, inner_likelihood_fcn, thetas, theta_key_order), guesses, bounds, constraints, quick, method)
| true
|
9994101c97c30cc222aed6aff7bec89e0e49fde2
|
Python
|
piushvaish/pythonProgrammingExcercises
|
/List/List.py
|
UTF-8
| 1,695
| 3.796875
| 4
|
[] |
no_license
|
# ipAddress = input('please enter an ip address : ')
# print(ipAddress.count('.'))
# parrotList = [' non pinin',' no more',' a stiff',' bereft of live']
#
# parrotList.append('Norwegian blue')
# for state in parrotList:
# print('The parrot is ' + state)
# even = [2,4,6,8]
# odd = [1,3,5,7,9]
#
# numbers = even + odd
# #numbers.sort()
# print("The numbers are {}".format(sorted(numbers)))
# list1 =[]
# list2 = list()
#
# print("List 1: {}".format(list1))
#
# print("List 2: {}".format(list2))
#
# if list1 == list2:
# print("the lists are equal")
#
# print(list("the lists are equal"))
# even = [2,4,6,8]
#
# anotherEven = list(even)
#
# print(anotherEven is even)
# anotherEven.sort(reverse=True)
# print(even)
# even = [2,4,6,8]
# odd = [1,3,5,7]
#
# numbers = [even , odd]
#
# for numberSet in numbers:
# print(numberSet)
#
# for value in numberSet:
# print(value)
# menu = []
# menu.append(['egg','spam','bacon'])
# menu.append(['egg','sausage','bacon'])
# menu.append(['egg','spam'])
# menu.append(['egg','bacon','spam'])
# menu.append(['egg','bacon','sausage','spam'])
# menu.append(['spam','bacon','sausage','spam'])
# menu.append(['spa,','egg','spam','spam','bacon','spam'])
# menu.append(['spam','egg','sausgae','spam'])
#print(menu)
# for meal in menu:
# if not 'spam' in meal:
# print(meal)
#
# for ing in meal:
# print(ing)
list1 = [1,2,3,4,5,6,7,8,9,10]
listIterator = iter(list1)
for eachNumber in range(0,len(list1)):
print(next(listIterator))
# for char in string:
# print(char)
#my_iterator = iter(string)
#print(my_iterator)# shows the object and the place in memory
#print(next(my_iterator))
| true
|
4126e0004f24474d851b694b6acb08d19c2221f1
|
Python
|
santoshghimire/typeform
|
/postapi/typeform.py
|
UTF-8
| 8,056
| 2.6875
| 3
|
[] |
no_license
|
#!/usr/bin/env python2
import json
import sys
import urllib
import pprint
from sheets_typeform import Sheets
TYPEFORM_JSON_API = 'https://api.typeform.com/v1/form/njxhSJ?key=cd3c5967bd6331d8fdbe134f81cc9accfdeecfc4'
def tf_load_data(json_file_path=None, answers_json=None):
'''
Row structure:
B. #
C. Email
D. Date of birth
E. Expected age of retirement
F. Pension Provider
// groups start
G. Investment name
H. Investment value
I. Value date
J. Annual fee on pension
K. Would you like to add additional...?
// group end x 5
AF. Do you have additional pensions you wish to include in the projection?
AG. Pension provider
// second groups
AH. Investment name
AI. Investment value
AJ. Value date
AK. Annual fee on pension
AL. Would you like to add additional...?
// second groups end x 5
BG. Pension fund you intend to contribute to over this time period
BH. Amount you intend to contrivute to over this time period
BI. Date you intend those contributions to start
BJ. Annual fee on pension
BK. Include my basic rate tax relief in the contribution projections
BL. Please click the tick box to agree to our terms and conditions
BM. Start Date (UTC)
BN. Submit Date (UTC)
BO. Network ID
'''
def get_group_col_pos(question):
if 'Investment name' in question:
return 0
elif 'Investment value' in question:
return 1
elif 'Value date' in question:
return 2
elif 'Annual fee' in question:
return 3
elif 'Would you like to' in question:
return 4
else:
return -1
def get_end_pos(question):
if 'Pension fund you intend' in question:
return 0
elif 'Amount you intend to' in question:
return 1
elif 'Date you intend' in question:
return 2
elif 'Annual fee on pension' in question:
return 3
elif 'Include my basic rate' in question:
return 4
else:
return -1
if json_file_path:
with open(json_file_path, 'r') as json_file:
answers_json = json.load(json_file)
typeform_json = get_typeform_data()
answers = dict( (int(answer['field']['id']), answer) for answer in answers_json['form_response']['answers'] )
field_titles = dict( (question['field_id'], question['question']) for question in typeform_json['questions'] )
labeled_answers = dict()
for question in typeform_json['questions']:
t = dict()
t['id'] = question['field_id']
t['other_id'] = question['id']
t['question'] = field_titles[question['field_id']]
t['group'] = 0 if 'group' not in question.keys() else question['group']
if t['id'] in answers.keys():
answer = answers[t['id']]
datatype = answer['type']
if datatype in ['text', 'number', 'email']:
t['value'] = answer[datatype]
elif datatype == 'boolean':
t['value'] = int(answer[datatype])
elif datatype == 'choice':
t['value'] = answer['choice']['label']
elif datatype == 'date':
t['value'] = answer['text'].split('T')[0]
else:
t['value'] = ''
labeled_answers[t['id']] = t
pension_providers = sorted([ question for question in labeled_answers.values() if 'Pension' in question['question'] and 'textfield_' in question['other_id'] ], key=lambda x: x['id'])
# constructing the rows:
groups = sorted([ sorted([question for question in labeled_answers.values() \
if question['group'] == g ], key=lambda x: x['id']) \
for g in set( ans['group'] for ans in labeled_answers.values() \
if ans['group'] != 0 ) ], key=lambda x: x[0]['group'])
investments_for_p1 = list()
investments_for_p2 = list()
end_ques = list()
for group in groups:
question = [ question['question'] for question in group if 'Would you like to ' in question['question'] ]
if len(question) != 1:
end_ques.append(group)
continue
else:
question = question[0]
if str(pension_providers[0]['id']) in question:
investments_for_p1.append(group)
elif str(pension_providers[1]['id']) in question:
investments_for_p2.append(group)
# pprint.PrettyPrinter().pprint([len(group) for group in investments_for_p1])
# pprint.PrettyPrinter().pprint(investments_for_p2)
row = []
row.append( answers_json['form_response']['token'] )
# print( labeled_answers[20699463]['question'] )
row.append( labeled_answers[20699463]['value'] ) # email
# print( labeled_answers[20699464]['question'] )
row.append( labeled_answers[20699464]['value'] ) # dob
# print( labeled_answers[20699465]['question'] )
row.append( labeled_answers[20699465]['value'] ) # exp age of ret
# print( pension_providers[0]['question'] )
row.append( pension_providers[0]['value'] ) # pension provider
for investment_group in investments_for_p1:
sub_row = [''] * 5
for question in investment_group:
pos = get_group_col_pos(question['question'])
if(pos == -1):
continue
sub_row[pos] = question['value']
row += sub_row
# print( labeled_answers[20702491]['question'] )
row.append( labeled_answers[20702491]['value'] ) # next pension provider
# print( pension_providers[1]['question'] )
row.append( pension_providers[1]['value'] ) # pension provider
for investment_group in investments_for_p2:
sub_row = [''] * 5
for question in investment_group:
pos = get_group_col_pos(question['question'])
if(pos == -1):
continue
sub_row[pos] = question['value']
row += sub_row
for q in end_ques:
sub_row = [''] * 5
for question in q:
pos = get_end_pos(question['question'])
if pos == -1:
continue
sub_row[pos] = question['value']
row += sub_row
row.append( labeled_answers[21247735]['value'] ) # please click the tick box
row.append( '' ) # start date
row.append( answers_json['form_response']['submitted_at'].replace('Z','').replace('T',' ') ) # submit date
row.append( answers_json['event_id'] ) # network id
# sheet = Sheets(spreadsheetId = '1brAVs0c-Vzm5AEVBNaEWe3O4_9JfuIImv0XVIrbFt74', # test
sheet = Sheets(spreadsheetId = '1M00WGCdkA49JbprZ631t3Dbr1tBIDvTXvLDBjEutI_A', # prod
client_secret_file = 'client_secret.json',
application_name = 'FinancialData',
sheet_name = 'TF data')
sheet.append_row(row)
return len(row)
def get_typeform_data(grouped = False):
# try:
# response = urllib.urlopen(TYPEFORM_JSON_API)
# data = json.loads(response.read())
# except:
# print('no internet. trying to load local file')
try:
with open('form_questions_data.json') as f:
data = json.load(f)
except:
prinf('no file as well')
if not grouped:
return data
d = dict()
for question in data['questions']:
if 'group' in question.keys():
if question['group'] not in d.keys():
d[question['group']] = list()
d[question['group']].append(question)
else:
if 0 not in d.keys():
d[0] = list()
d[0].append(question)
return d
if __name__=='__main__':
if(len(sys.argv) != 2):
print('Invalid usage')
sys.exit(1)
json_file_path = sys.argv[1]
tf_load_data(json_file_path=json_file_path)
# pp = pprint.PrettyPrinter()
# pp.pprint(get_typeform_data(True))
| true
|
a340bcdc4c12b705b1a52dfe508ffc84aec10083
|
Python
|
k123321141/ADLxMLDS2017
|
/project/mnist-cluttered/png2npz.py
|
UTF-8
| 1,950
| 2.625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
import os, sys
import argparse
import numpy as np
from scipy.ndimage import imread
from os.path import join
def parse():
parser = argparse.ArgumentParser(description='utils')
parser.add_argument('train_dir', help='png image files directory')
parser.add_argument('valid_dir', help='png image files directory')
parser.add_argument('test_dir', help='png image files directory')
parser.add_argument('-o','--output', default='./output.npz', help='path to save npz file.')
parser.add_argument('-q','--quiet', action='store_true', default=True, help='show the log')
args = parser.parse_args()
return args
def read_dir(dir_path):
file_list = [join(dir_path, f) for f in os.listdir(dir_path) if f.endswith('.png')]
#get sample info : width, height, channels
sample = file_list[0]
img = imread(sample)
#(60, 60)
assert len(img.shape) == 2
w, h = img.shape
num = len(file_list)
#data
data_buf = []
for f in file_list:
img = imread(f)
assert img.shape == (w, h)
img = img.reshape([1, w, h])
data_buf.append(img)
#label
label_buf = []
for f in file_list:
name = os.path.basename(f)
buf = name.split('_')[-1]
#3.png
label = int(buf[:-4])
label_buf.append( np.array([label]).reshape([1,1]) )
return np.vstack(data_buf), np.vstack(label_buf)
def main():
args = parse()
x_train, y_train = read_dir(args.train_dir)
x_valid, y_valid = read_dir(args.valid_dir)
x_test, y_test = read_dir(args.test_dir)
print x_train.shape, y_train.shape
print('start writing output file %s' % args.output)
with open(args.output,'w') as output:
np.savez(output, x_train=x_train, y_train=y_train,
x_valid=x_valid, y_valid=y_valid,
x_test=x_test, y_test=y_test)
print('Done')
if __name__ == '__main__':
main()
| true
|
0aa59a1e1255bd2e2dfce82226cb5fc0e0d79b0e
|
Python
|
karthikbharadwaj/CodeRepo
|
/Practice/leetcode/is_palindrome.py
|
UTF-8
| 355
| 3.34375
| 3
|
[] |
no_license
|
__author__ = 'karthikb'
class Solution:
# @return a boolean
def isPalindrome(self, x):
x = str(x)
length = len(x)
i,j = 0,length-1
while i <=j:
if x[i] != x[j]:
return False
i += 1
j -= 1
return True
s = Solution()
print s.isPalindrome(111111111112111)
| true
|
416c041e0fda16d02bbd39b54dd36091e8ee1bb5
|
Python
|
IntroToCompBioLSU/week12
|
/assignments/ewhart1/try_except.py
|
UTF-8
| 199
| 3.484375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
try:
int = int(input("Enter a number: "))
except:
print("Sorry, the instructions weren't very specific. Please enter an integer next time.")
# DB: Simple, but good example!
| true
|
3fd85b2b22357df4249f43d37d2c7bff02db36d2
|
Python
|
HaohanWang/backProjection
|
/CNN/optimizers.py
|
UTF-8
| 3,914
| 2.921875
| 3
|
[] |
no_license
|
#!/usr/bin/env python
import theano
import theano.tensor as T
import numpy as np
__author__ = "Sandeep Subramanian"
__maintainer__ = "Sandeep Subramanian"
__email__ = "sandeep.subramanian@gmail.com"
class Optimizer:
"""
Optimization methods for backpropagation
"""
def __init__(self):
"""
__TODO__ add gradient clipping
"""
def sgd(self, cost, params, lr=0.01):
"""
Stochatic Gradient Descent.
"""
lr = theano.shared(np.float64(lr).astype(theano.config.floatX))
gradients = T.grad(cost, params)
updates = []
for param, gradient in zip(params, gradients):
updates.append((param, param - lr * gradient))
return updates
def adagrad(self, cost, params, lr=0.01, epsilon=1e-6):
"""
Adaptive Gradient Optimization.
"""
lr = theano.shared(np.float64(lr).astype(theano.config.floatX))
epsilon = theano.shared(np.float64(epsilon).astype(theano.config.floatX))
gradients = T.grad(cost, params)
updates = []
for param, gradient in zip(params, gradients):
accumulated_gradient = theano.shared(np.zeros_like(param.get_value(borrow=True)).astype(np.float64), borrow=True)
accumulated_gradient_new = accumulated_gradient + gradient ** 2
updates.append((accumulated_gradient, accumulated_gradient_new))
updates.append((param, param - lr * gradient / T.sqrt(accumulated_gradient_new + epsilon)))
return updates
def rmsprop(self, cost, params, lr=0.01, rho=0.9, epsilon=1e-6):
"""
RMSProp - Root Mean Square
Reference - http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf
"""
lr = theano.shared(np.float64(lr).astype(theano.config.floatX))
epsilon = theano.shared(np.float64(epsilon).astype(theano.config.floatX))
rho = theano.shared(np.float64(rho).astype(theano.config.floatX))
gradients = T.grad(cost, params)
updates = []
for param, gradient in zip(params, gradients):
accumulated_gradient = theano.shared(np.zeros_like(param.get_value(borrow=True)).astype(np.float64), borrow=True)
accumulated_gradient_new = accumulated_gradient * rho + gradient ** 2 * (1 - rho)
updates.append((accumulated_gradient, accumulated_gradient_new))
updates.append((param, param - lr * gradient / T.sqrt(accumulated_gradient_new + epsilon)))
return updates
def adam(self, cost, params, lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8):
"""
ADAM
Reference - http://arxiv.org/pdf/1412.6980v8.pdf - Page 2
"""
lr = theano.shared(np.float64(lr).astype(theano.config.floatX))
epsilon = theano.shared(np.float64(epsilon).astype(theano.config.floatX))
beta_1 = theano.shared(np.float64(beta_1).astype(theano.config.floatX))
beta_2 = theano.shared(np.float64(beta_2).astype(theano.config.floatX))
t = theano.shared(np.float64(1.0).astype(theano.config.floatX))
gradients = T.grad(cost, params)
updates = []
for param, gradient in zip(params, gradients):
param_value = param.get_value(borrow=True)
m_tm_1 = theano.shared(np.zeros_like(param_value).astype(np.float64), borrow=True)
v_tm_1 = theano.shared(np.zeros_like(param_value).astype(np.float64), borrow=True)
m_t = beta_1 * m_tm_1 + (1 - beta_1) * gradient
v_t = beta_2 * v_tm_1 + (1 - beta_2) * gradient ** 2
m_hat = m_t / (1 - beta_1)
v_hat = v_t / (1 - beta_2)
updated_param = param - (lr * m_hat) / (T.sqrt(v_hat) + epsilon)
updates.append((m_tm_1, m_t))
updates.append((v_tm_1, v_t))
updates.append((param, updated_param))
updates.append((t, t + 1.0))
return updates
| true
|
09feba41ac70cb3b8c9d7793948d70abceb2ec85
|
Python
|
Candy-YangLi/ylscript
|
/python/firstdemos/demo170704.py
|
UTF-8
| 99
| 3.71875
| 4
|
[] |
no_license
|
g = [x * x for x in range(1,10)]
g = (x * x for x in range(1,10))
for n in g:
print(n,end=' ')
| true
|
ae19bdf6504573431f7763531d18f6172642f978
|
Python
|
EruditePanda/ingestors
|
/ingestors/support/pdf.py
|
UTF-8
| 1,542
| 2.578125
| 3
|
[
"MIT"
] |
permissive
|
import os
import glob
import uuid
from normality import collapse_spaces # noqa
from pdflib import Document
from ingestors.support.temp import TempFileSupport
from ingestors.support.shell import ShellSupport
from ingestors.support.ocr import OCRSupport
class PDFSupport(ShellSupport, TempFileSupport, OCRSupport):
"""Provides helpers for PDF file context extraction."""
def pdf_extract(self, pdf):
"""Extract pages and page text from a PDF file."""
self.result.flag(self.result.FLAG_PDF)
temp_dir = self.make_empty_directory()
for page in pdf:
self.pdf_extract_page(temp_dir, page)
def pdf_alternative_extract(self, pdf_path):
self.result.emit_pdf_alternative(pdf_path)
pdf = Document(pdf_path.encode('utf-8'))
self.pdf_extract(pdf)
def pdf_extract_page(self, temp_dir, page):
"""Extract the contents of a single PDF page, using OCR if need be."""
pagenum = page.page_no
texts = page.lines
image_path = os.path.join(temp_dir, str(uuid.uuid4()))
page.extract_images(path=image_path.encode('utf-8'), prefix=b'img')
for image_file in glob.glob(os.path.join(image_path, "*.png")):
with open(image_file, 'rb') as fh:
text = self.extract_text_from_image(fh.read())
# text = collapse_spaces(text)
if text is not None:
texts.append(text)
text = ' \n'.join(texts).strip()
self.result.emit_page(int(pagenum), text)
| true
|
f75344e3c13c6bc297f8a5dacdd8dc4c612e3000
|
Python
|
roynwang/mbt_test
|
/Action.py
|
UTF-8
| 297
| 2.5625
| 3
|
[] |
no_license
|
class Action(object):
def __init__(self):
self.name = 'test'
def check(self):
raise NotImplementedError()
#it should return a status
def transfer(self,status):
raise NotImplementedError()
def execute(self):
raise NotImplementedError()
def __str__(self):
return str(self.name)
| true
|
260757c56c34dbb3446e159e1e426ace5292d695
|
Python
|
danong/leetcode-solutions
|
/solutions/group_anagrams.py
|
UTF-8
| 563
| 3.390625
| 3
|
[] |
no_license
|
from collections import Counter, defaultdict
def counter_to_tuple(counter):
chars = [0] * 26
for char, count in counter.items():
chars[ord(char) - ord('a')] = count
return tuple(chars)
class Solution:
def groupAnagrams(self, strs):
"""
:type strs: List[str]
:rtype: List[List[str]]
"""
anagrams = defaultdict(list)
for word in strs:
wc = Counter(word)
anagrams[counter_to_tuple(wc)].append(word)
return [group for group in anagrams.values()]
| true
|
f4efab80b62284434978c0f2be11f6ca30b8097d
|
Python
|
BogiTheNinjaTester/TestAppChat
|
/wtform_fields.py
|
UTF-8
| 1,973
| 2.75
| 3
|
[] |
no_license
|
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField
from wtforms.validators import InputRequired, Length, EqualTo, ValidationError
from models import User
from passlib.hash import pbkdf2_sha256
def invalid_credentials(form, field):
''' Method checks if credentials are valid. '''
username = form.username.data
password = field.data
user_object = User.query.filter_by(username=username).first()
if user_object is None:
raise ValidationError('Username or password is incorrect!')
elif not pbkdf2_sha256.verify(password, user_object.password):
raise ValidationError('Username or password is incorrect!')
class RegistrationForm(FlaskForm):
''' Registration form. '''
username = StringField('username', validators= [InputRequired(message= 'Username required!'),
Length(min=4, max=25, message= 'Username must be between 4 and 25 characters')])
password = PasswordField('password', validators=[InputRequired(message= 'Password required!'),
Length(min=4, max=25, message= 'Password must be between 4 and 25 characters')] )
confirm_password = PasswordField('confirm_password', validators= [InputRequired(message='Password required!'),
EqualTo('password', message='Password must match!')])
submit_button = SubmitField('Create')
def validate_username(self, username):
''' Method which validates username field. '''
user_object = User.query.filter_by(username=username.data).first()
if user_object:
raise ValidationError('Username already exist! Select different username.')
class LoginForm(FlaskForm):
''' Login form. '''
username = StringField('username', validators= [InputRequired(message= 'Username required!')])
password = PasswordField('password', validators=[InputRequired(message= 'Password required!'), invalid_credentials])
submit_button = SubmitField('Login')
| true
|
a1f4885f6c7a0d69591b57947deac0af456381f6
|
Python
|
kimx3129/Simon_Data-Science
|
/AWSLearners/9장/dynamodb_bulk_upload.py
|
UTF-8
| 1,230
| 2.65625
| 3
|
[] |
no_license
|
### 다이나모디비 실습 Lambda Function 코드 ###
# 코드 - 다이나모디비 다량의 데이터 업로드
import boto3
def lambda_handler(event, context):
client = boto3.resource('dynamodb')
table = client.Table('aws-learner-customer-transaction-table')
with table.batch_writer() as batch:
batch.put_item(
Item={
'customer_id': '95IUZ',
'transaction_date': '2020-10-24',
'item_category': 'Desk',
'price': 120000
}
)
batch.put_item(
Item={
'customer_id': '72MUE',
'transaction_date': '2020-10-28',
'item_category': 'Chair',
'price': 250000
}
)
batch.put_item(
Item={
'customer_id': '28POR',
'transaction_date': '2020-11-05',
'item_category': 'Shampoo',
'price': 50000
}
)
batch.put_item(
Item={
'customer_id': '43NCH',
'transaction_date': '2020-10-12',
'item_category': 'Pulse',
'price': 320000
}
)
| true
|
a0f40a97c162d152309cf4ed701492c810db63ef
|
Python
|
evespimrose/for_2Dgameprograming
|
/for_In_Class/09_10.py
|
UTF-8
| 475
| 2.640625
| 3
|
[] |
no_license
|
from pico2d import *
open_canvas()
boy = load_image('C:/Users/Jang/Desktop/gitupload/for_In_Class/run_animation.png')
gra = load_image('C:/Users/Jang/Desktop/gitupload/for_In_Class/grass.png')
x = 0
frame = 0
while (x<800):
clear_canvas()
gra.draw(400,30)
boy.clip_draw(frame*100,0,100,100,x,90)
update_canvas()
frame = (frame + 1) % 8
x += 5
delay(0.05)
get_events()
delay(5)
close_canvas()
| true
|
e80023de6facc15e97871cbe1438f0d05317e8e6
|
Python
|
fovegage/learn-python
|
/Pytho内建函数/进制.py
|
UTF-8
| 289
| 2.859375
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
# @Time : 2018/12/29 17:20
# @Author : fovegage
# @Email : fovegage@gmail.com
# @File : 进制.py
# @Software: PyCharm
# oct 八 bin 二 hex 十六
# oct 八进制
# return oct
print(oct(6))
# bin() 二进制
# return bin
print(bin(6))
# hex 16进制
print(hex(6))
| true
|
cc8896da1a8c4d536f290c9e4c90af488c303c7f
|
Python
|
somnoynadno/shift_summer_2019
|
/ssrf/app/port_scan.py
|
UTF-8
| 220
| 2.859375
| 3
|
[] |
no_license
|
import requests
for x in range(1,65536):
r = requests.get('http://127.0.0.1/get_url_requests?url=http://127.0.0.1:'+str(x)+'/')
if r.status_code != 200:
print("port", x, "closed");
else:
print("port", x, "open");
| true
|
0bff07bd9a2945de412a6a6c14e16e5e51a45417
|
Python
|
kushal200/python
|
/Basic/triangle.py
|
UTF-8
| 239
| 3.59375
| 4
|
[] |
no_license
|
a=int(input("Enter the value of a:"))
b=int(input("Enter the value of b:"))
c=int(input("Enter the value of c:"))
s=(a+b+c)/2
print("s=",s)
Area_Of_Triangle=(s*(s-a)*(s-b)*(s-c))-0.5
print("The area of taingle is :%0.2f" %Area_Of_Triangle)
| true
|
0d142dcd7bad8206fdec1dc5ca63ff4e7ac24b31
|
Python
|
williamhogman/operant
|
/operant/currency.py
|
UTF-8
| 1,987
| 3.078125
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
"""Module for currency systems.
Currencies are a closely related to points, but differ in that they
are often exchanged for rewards of some kind. The value of a
currencies stem from what they are traded in, while points often carry
some intrinsic value. A currency loses this intrinsic value because it
is redeemable.
"""
from __future__ import (with_statement, print_function,
division, absolute_import)
from operant.base import Registry
class Currency(object):
"""An instance of this class represents currency"""
def __init__(self, currency_id):
self.currency_id = currency_id
def _add_currency_to_user(self, store, user, amount, callback):
store.add_balance(user.operant_id(), self, amount, callback)
def _deduct_currency_from_user(self, store, user, amount, callback):
store.deduct_balance(user.operant_id(), self, amount, callback)
def award(self, store, user, amount=1, callback=None):
"""Awards the passed in amount of this currency"""
def _cb(n):
store.track_event("currency.awarded." + self.currency_id,
user.operant_id(), dict(amount=amount))
callback(n)
self._add_currency_to_user(store, user, amount, _cb)
def deduct_balance(self, store, user, amount=1, callback=None):
"""Deducts the passed in amount of this currency from the player"""
def _cb(n):
store.track_event("currency.deducted." + self.currency_id,
user.operant_id(), dict(amount=amount))
callback(n)
self._deduct_currency_from_user(store, user, amount, _cb)
def get_balance(self, store, user, callback=None):
"""Gets the users balance in the passed in currency"""
store.get_balance(user.operant_id(), self, callback)
Currencies = Registry("currency", "currency_id")
Currencies.set_str_handler(Currency)
get = Currencies.get
register = Currencies.register
| true
|
90dcfc13541f244ddd3147dfbd06c67494b2ca10
|
Python
|
gixita/pulsarnews
|
/tests/conftest.py
|
UTF-8
| 1,465
| 2.578125
| 3
|
[
"MIT"
] |
permissive
|
import pytest
from app import create_app, db
from app.models import User
from configtest import Config
@pytest.fixture(scope='module')
def new_user():
user = User(username="kris", email="kris@pulsarnews.io")
return user
@pytest.fixture(scope='module')
def test_client():
flask_app = create_app(Config)
# Create a test client using the Flask application configured for testing
with flask_app.test_client() as testing_client:
# Establish an application context
with flask_app.app_context():
yield testing_client # this is where the testing happens!
@pytest.fixture(scope='module')
def init_database(test_client):
# Create the database and the database table
db.create_all()
# Insert user data
user1 = User(username="info", email="info@pulsarnews.io")
user2 = User(username="support", email="support@pulsarnews.io")
user1.set_password("password")
user2.set_password("password")
db.session.add(user1)
db.session.add(user2)
# Commit the changes for the users
db.session.commit()
yield # this is where the testing happens!
db.drop_all()
@pytest.fixture(scope='function')
def login_default_user(test_client):
test_client.post('/auth/login_password',
data=dict(email='info@pulsarnews.io', password='password'),
follow_redirects=True)
yield # this is where the testing happens!
test_client.get('/auth/logout', follow_redirects=True)
| true
|
fef6bdb68e79d6bd1508d5b78e63d56b82f85791
|
Python
|
BandiSaikumar/PythonDjangoFullStackDevolpment
|
/Python/PythonIntroduction/lesson2/task6/assignments.py
|
UTF-8
| 95
| 3.125
| 3
|
[] |
no_license
|
a = 54.0
print("a = " + str(a))
a -= 4
print("a = " + str(a))
a += 10
print("a = " + str(a))
| true
|
927c2f9e84a7193cb4e8e785271d6427bef23266
|
Python
|
xogxog/SWEA
|
/IM대비/1926.간단한369게임.py
|
UTF-8
| 345
| 3.28125
| 3
|
[] |
no_license
|
N = int(input())
for i in range(1,N+1) :
cnt = 0
num = i
# print(num, cnt)
while num>0 :
if num % 10 == 3 or num % 10 == 6 or num % 10 == 9 :
cnt += 1
num //= 10
# print(num,cnt)
if cnt == 0 :
print('{} '.format(i),end='')
else :
print('{} '.format('-'*cnt),end='')
| true
|
7c68eec0d945113994ffee5d632c2d59122a3d16
|
Python
|
bitwoman/curso-em-video-python
|
/Curso de Python 3 - Mundo 1 - Fundamentos/#015.py
|
UTF-8
| 514
| 4.34375
| 4
|
[
"MIT"
] |
permissive
|
#Exercício Python 015: Escreva um programa que pergunte a quantidade de Km percorridos por um carro alugado e a
#quantidade de dias pelos quais ele foi alugado. Calcule o preço a pagar, sabendo que o carro custa R$60 por dia e R$0,15 por Km rodado.
km_traveled = float(input('Enter km travaled by car: '))
days_used = int(input('Enter the days that the car was used: '))
rent_day = (days_used * 60.00)
rent_km = km_traveled * 0.15
rent_total = rent_day + rent_km
print('Total to pay is: R$ %.2f' %rent_total)
| true
|
2f13946cdce3d20746e3ba8658a859583c1f6ba6
|
Python
|
05satyam/Buggy-Pinball-alpha
|
/Gradient Descent and Variants/GDmeasures.py
|
UTF-8
| 1,654
| 3.203125
| 3
|
[] |
no_license
|
import numpy as np
import matplotlib.pyplot as plt
from functions import *
from learningRates import *
import random
import time
times=[]
a = simpleLR(0.2)
#gama=0.9
gama1=0.8
gama2=0.999
e=1e-6 #standard value to avoid dividing with 0
algo_trials=100000
low=-10
high=10
for exp in range(0, algo_trials):
start_time=time.time()
#algorithm
x = random.uniform(low, high) # initial solutions
y = random.uniform(low, high)
z = booth(x, y)
#print("Initial guess: x =",x," y =",y," z =",z)
#i=1
Dx=0 #difference between last two variable values
Dy=0
Gx=0 #the matrix of the derirative
Gy=0
limits=False
while z>0.000001: #z>-1.913222 or z<-1.913223:
Dx = gama1*Dx + (1-gama1)*booth_dx(x, y)
Dy = gama1*Dy + (1-gama1)*booth_dy(x, y)
Gx = gama2*Gx + (1-gama2)*booth_dx(x, y)**2
Gy = gama2*Gy + (1-gama2)*booth_dy(x, y)**2
x = x - a*Dx/(np.sqrt(Gx)+e)
y = y - a*Dy/(np.sqrt(Gy)+e)
z = booth(x, y)
#print("Iteration ",i,": x =",x," y =",y," z =",z)
#print("Learning rate is ", a)
if x<low or x>high or y<low or y>high:
print("Boundaries have been exceeded. Results not valid. x=",x," y=",y)
limits=True
break
#i+=1
if limits==False:
total_time=time.time()-start_time
times.append(total_time)
print(exp)
time_average=sum(times)/len(times)
print("Algorithm needed an average of ", time_average, " seconds to reach global minimum. Times stuck to local minimum.",algo_trials-len(times))
| true
|
7c77a5d9001a9d32e62dde65c52bd78259e65c29
|
Python
|
Sanchi02/Dojo
|
/LeetCode/Logical/MeetingRooms.py
|
UTF-8
| 1,299
| 3.28125
| 3
|
[] |
no_license
|
# Given an array of meeting time intervals consisting of start and end times [[s1,e1],[s2,e2],...] (si < ei), find the minimum number of conference rooms required.
# Example 1:
# Input: [[0, 30],[5, 10],[15, 20]]
# Output: 2
# Example 2:
# Input: [[7,10],[2,4]]
# Output: 1
class Solution:
def minMeetingRooms(self, intervals: List[List[int]]) -> int:
if(len(intervals)==0):
return 0
if(len(intervals)==1):
return 1
intervals = sorted(intervals, key=lambda x:x[0])
rooms = [intervals[0]]
count = 1
brflag = False
for i in range(1,len(intervals)):
# print("rooms = {}".format(rooms))
brflag = False
if(len(rooms)==0):
rooms.append(intervals[i])
else:
s = intervals[i][0]
e = intervals[i][1]
for r in rooms:
if(s>=r[1]):
rooms.remove(r)
rooms.append(intervals[i])
brflag = True
break
if(brflag==False):
rooms.append(intervals[i])
count += 1
return count
| true
|
4ee2bfac8eb86c474a47637bcc31b7afa2a9810d
|
Python
|
gkupdx/CS350-Project
|
/_pythonCodeForTestFiles/measuringResult.py
|
UTF-8
| 4,209
| 3.25
| 3
|
[] |
no_license
|
# This file measure the time of sorting experimental data using the different sorting algorithms
from generateBinary import arraySetBinaryDigits
from generateRandom import arraySetRandom
from generateReverseSorted import arraySetRevSorted
from generateSorted import arraySetFiftyUnsorted
from generateSorted import arraySetTenUnSorted
from generateSorted import arraySorted
import time
import sys
import copy
from heapSort import heapSort
from mergeSort import mergeSort
from hoarePartition import quickSortHoare
from lomutoPartition import quickSortLomuto
from quickSortOpenSource2 import quickSort
recursion = 1000000
sys.setrecursionlimit(recursion) #this function increase the recursion limit.
array = arraySetBinaryDigits #change the variable to switch to different test
#array = arraySetRevSorted #comment out to use
#array = arraySetRandom #comment out to use
#array = arraySetFiftyUnsorted #comment out to use
#array = arraySetTenUnSorted #comment out to use
#array = arraySorted
array1 = copy.deepcopy(array)
array2 = copy.deepcopy(array)
array3 = copy.deepcopy(array)
array4 = copy.deepcopy(array)
array5 = copy.deepcopy(array)
array6 = copy.deepcopy(array)
array7 = copy.deepcopy(array)
array8 = copy.deepcopy(array)
array9 = copy.deepcopy(array)
array_set = [array,array1,array2,array3,array4,array5,array6,array7,array8,array9]
array_set1 = copy.deepcopy(array_set)
array_set2 = copy.deepcopy(array_set)
array_set3 = copy.deepcopy(array_set)
sys.setrecursionlimit(1000000)
number_of_element = len(array_set)
number_of_array = len(array) #set the range of the array
"""
print array[5]
array[5] = quickSort(array[5])
#quickSortLomuto(array[3])
#quickSortHoare(array[3])
#mergeSort(array[11])
#heapSort(array[3])
print ("Sorted array is:")
print (array[5])
#print sortedArray
"""
for j in range(0,number_of_element):
for i in range (0, number_of_array):
#now = datetime.datetime.now() #print the time and date for the record
#print now
start = time.time()
#sorted(array[i])
#mergeSort(array[i])
quickSortHoare(array_set[j][i])
#quickSortLomuto(array_set[j][i])
#quickSortOpenSource(array[i])
#heapSort(array[i])
#array[i] = quickSort(array[i])
end = time.time()
print (end-start)
print "END"
print "----------------------------------"
for j in range(0,number_of_element):
for i in range (0, number_of_array):
#now = datetime.datetime.now() #print the time and date for the record
#print now
start = time.time()
#sorted(array[i])
mergeSort(array_set1[j][i])
#quickSortHoare(array_set[j][i])
#quickSortLomuto(array[i])
#quickSortOpenSource(array[i])
#heapSort(array[i])
#array[i] = quickSort(array[i])
end = time.time()
print (end-start)
print "END"
print "----------------------------------"
for j in range(0,number_of_element):
for i in range (0, number_of_array):
#now = datetime.datetime.now() #print the time and date for the record
#print now
start = time.time()
#sorted(array[i])
#mergeSort(array_set1[j][i])
#quickSortHoare(array_set[j][i])
#quickSortLomuto(array[i])
#quickSortOpenSource(array[i])
heapSort(array_set2[j][i])
#array[i] = quickSort(array[i])
end = time.time()
print (end-start)
print "END"
print "----------------------------------"
for j in range(0,number_of_element):
for i in range (0, number_of_array):
#now = datetime.datetime.now() #print the time and date for the record
#print now
start = time.time()
#sorted(array[i])
#mergeSort(array_set1[j][i])
#quickSortHoare(array_set[j][i])
#quickSortLomuto(array[i])
#quickSortOpenSource(array[i])
#heapSort(array_set2[j][i])
array_set3[j][i] = quickSort(array_set3[j][i])
end = time.time()
print len(array_set3[j][i])
print (end-start)
print "END"
print "----------------------------------"
| true
|
888ea6949978849ac56d11f2d59f0dfeefd727ff
|
Python
|
FrancisFan98/algorithm-practices
|
/POKERS.py
|
UTF-8
| 2,433
| 3.359375
| 3
|
[] |
no_license
|
#!/usr/bin/python
import random, math, collections
def shuffle(deck):
length = len(deck)
for e in range(0, length-1):
swap(deck, e, random.randrange(e, length))
def swap(deck, i, j):
deck[i], deck[j] = deck[j], deck[i]
def test_shuffle(shuffler, deck, n = 100000):
ex = (n*1.)/math.factorial(len(deck))
result = collections.defaultdict(int)
for e in range(0, n):
Input = list(deck)
shuffler(Input)
result["".join(Input)] += 1
ok = all([(0.9*ex) <= result[item] <= (1.1*ex) for item in result])
for item in result:
print "%s%s%4.2f%s" % (item, " : " , result[item]*100./100000, "%")
print ("ok" if ok else "***BAD***")
def allMax(hands, key = None):
result = []
current_max = None
key = key or (lambda x:x)
hand_max = None
for hand in hands:
if key(hand) > current_max:
result = []
current_max = key(hand)
hand_max = hand
if key(hand) == current_max:
result.append(key(hand))
return result, hand_max
def hand_ranks(hand):
ranks = ["--23456789TJQKA".index(r) for r,s in hand]
return sorted(ranks, reverse = True)
def kind(n, ranks):
value = None
for rank in ranks:
if ranks.count(rank) == n:
return rank
return False
def two_pair(ranks):
first = kind(2, ranks)
second = kind(2, list(reversed(ranks)))
if first != second and first:
return (first, second)
return False
def straight(ranks):
return (len(set(ranks)) == 5 and ranks[0] - ranks[-1] == 4)
def flush(hand):
color = [s for r,s in hand]
return len(set(color)) == 1
def card_rank(hand):
ranks = hand_ranks(hand)
if straight(ranks) and flush(hand):
return (9, ranks[0])
elif kind(4, ranks):
return (8, kind(4, ranks), kind(1, ranks))
elif kind(3, ranks) and kind(2, ranks):
return (7, kind(3, ranks), kind(2, ranks))
elif flush(hand):
return (6, hand)
elif straight(ranks):
return (5, ranks[0])
elif kind(3, ranks):
return (4, kind(3, ranks), kind(1, ranks), kind(1, list(reversed(ranks))))
elif two_pair(ranks):
return (3, two_pair(ranks), kind(1, ranks))
elif kind(2, ranks):
return (2, ranks)
else:
return (1, ranks)
deck = [r + s for r in "23456789TJQKA" for s in "shdc"]
shuffle(deck)
def deal(deck, number, n = 5):
output = []
for i in range(number):
output.append(deck[i*n:n*(i+1)])
return output
hands = deal(deck, 5)
def poker(hands):
return allMax(hands, key = card_rank)
print hands
print poker(hands)
| true
|
24bc5fc0cf6053fb4e69ca145ac3b3f7862aa36f
|
Python
|
trevorandersen/colour
|
/colour/examples/contrast/examples_contrast.py
|
UTF-8
| 3,310
| 2.96875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
# -*- coding: utf-8 -*-
"""
Showcases contrast sensitivity computations.
"""
from pprint import pprint
import numpy as np
from scipy.optimize import fmin
import colour
from colour.utilities import as_float, message_box
from colour.plotting import colour_style, plot_single_function
message_box('Contrast Sensitivity Computations')
colour_style()
message_box(('Computing the contrast sensitivity for a spatial frequency "u" '
'of 4, an angular size "X_0" of 60 and a retinal illuminance "E" '
'of 65 using "Barten (1999)" method.'))
pprint(colour.contrast_sensitivity_function(u=4, X_0=60, E=65))
pprint(
colour.contrast.contrast_sensitivity_function_Barten1999(
u=4, X_0=60, E=65))
print('\n')
message_box(('Computing the minimum detectable contrast with the assumed '
'conditions for UHDTV applications as given in "ITU-R BT.2246-4"'
'"Figure 31" and using "Barten (1999)" method.'))
settings_BT2246 = {
'k': 3.0,
'T': 0.1,
'X_max': 12,
'N_max': 15,
'n': 0.03,
'p': 1.2274 * 10 ** 6,
'phi_0': 3 * 10 ** -8,
'u_0': 7,
}
def maximise_spatial_frequency(L):
"""
Maximises the spatial frequency :math:`u` for given luminance value.
Parameters
----------
L : numeric or array_like
Luminance value at which to maximize the spatial frequency :math:`u`.
Returns
-------
numeric or ndarray
Maximised spatial frequency :math:`u`.
"""
maximised_spatial_frequency = []
for L_v in L:
X_0 = 60
d = colour.contrast.pupil_diameter_Barten1999(L_v, X_0)
sigma = colour.contrast.sigma_Barten1999(0.5 / 60, 0.08 / 60, d)
E = colour.contrast.retinal_illuminance_Barten1999(L_v, d, True)
maximised_spatial_frequency.append(
fmin(lambda x: (
-colour.contrast.contrast_sensitivity_function_Barten1999(
u=x,
sigma=sigma,
X_0=X_0,
E=E,
**settings_BT2246)
), 0, disp=False)[0])
return as_float(np.array(maximised_spatial_frequency))
L = np.logspace(np.log10(0.01), np.log10(100), 100)
X_0 = Y_0 = 60
d = colour.contrast.barten1999.pupil_diameter_Barten1999(L, X_0, Y_0)
sigma = colour.contrast.barten1999.sigma_Barten1999(0.5 / 60, 0.08 / 60, d)
E = colour.contrast.barten1999.retinal_illuminance_Barten1999(L, d)
u = maximise_spatial_frequency(L)
pprint(1 / colour.contrast_sensitivity_function(
u=u, sigma=sigma, E=E, X_0=X_0, Y_0=Y_0, **settings_BT2246) * 2 *
(1 / 1.27))
pprint(1 / colour.contrast.contrast_sensitivity_function_Barten1999(
u=u, sigma=sigma, E=E, X_0=X_0, Y_0=Y_0, **settings_BT2246) * 2 *
(1 / 1.27))
plot_single_function(
lambda x: (
1 / colour.contrast.contrast_sensitivity_function_Barten1999(
u=u, sigma=sigma, E=E, X_0=X_0, Y_0=Y_0, **settings_BT2246)
* 2 * (1 / 1.27)),
samples=L,
log_x=10,
**{
'title':
'Examples of HVS Minimum Detectable Contrast Characteristics',
'x_label':
'Luminance ($cd/m^2$)',
'y_label':
'Minimum Detectable Contrast',
'axes.grid.which':
'both'
})
| true
|
229e35564b9270f7cfe40a63479d391773a6efb0
|
Python
|
dashang/ga-learner-dsmp-repo
|
/Banking_Inference_from_Datacode.py
|
UTF-8
| 2,473
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
# --------------
import pandas as pd
import scipy.stats as stats
import math
import numpy as np
import warnings
warnings.filterwarnings('ignore')
#Sample_Size
sample_size=2000
#Z_Critical Score
z_critical = stats.norm.ppf(q = 0.95)
# path [File location variable]
data = pd.read_csv(path)
data_sample = data.sample(n=sample_size,random_state=0)
sample_mean = np.mean(data_sample['installment'])
sample_std = data_sample['installment'].std()
margin_of_error = z_critical*sample_std/math.sqrt(sample_size)
confidence_interval = (sample_mean-margin_of_error,sample_mean+margin_of_error)
true_mean = np.mean(data['installment'])
print(true_mean,confidence_interval)
# --------------
import matplotlib.pyplot as plt
import numpy as np
#Different sample sizes to take
sample_size=np.array([20,50,100])
#Code starts here
fig,axes = plt.subplots(3,1,figsize=(10,4))
for i in range(len(sample_size)):
m=[]
for j in range(1000):
d_sample = data.sample(n=sample_size[i])
m.append(np.mean(d_sample['installment']))
mean_series = pd.Series(m)
axes[i].hist(mean_series[i],bins=10)
plt.show()
# --------------
#Importing header files
from statsmodels.stats.weightstats import ztest
#Code starts here
data['int.rate'] = data['int.rate'].str.replace('%','').apply(float)/100
x1 = data[data['purpose']=='small_business']['int.rate']
value=data['int.rate'].mean()
z_statistic, p_value = ztest(x1,value=value,alternative='larger')
print(p_value)
# --------------
#Importing header files
from statsmodels.stats.weightstats import ztest
#Code starts here
x1 = data[data['paid.back.loan'] == 'No']['installment']
x2 = data[data['paid.back.loan']=='Yes']['installment']
z_statistic,p_value = ztest(x1,x2)
print(p_value)
# --------------
#Importing header files
from scipy.stats import chi2_contingency
#Critical value
critical_value = stats.chi2.ppf(q = 0.95, # Find the critical value for 95% confidence*
df = 6) # Df = number of variable categories(in purpose) - 1
#Code starts here
yes = data[data['paid.back.loan']=='Yes']['purpose'].value_counts().sort_index()
no = data[data['paid.back.loan']=='No']['purpose'].value_counts().sort_index()
observed = pd.concat([yes.transpose(),no.transpose()],axis=1,keys=['Yes','No'])
chi2,p,dof,ex = chi2_contingency(observed)
print(observed)
print(chi2,p,dof,ex)
| true
|
22dbde10dd1b79866048a613616ebad1dcb3f8b2
|
Python
|
davis-lin/Python-Practices
|
/4.7.6.py
|
UTF-8
| 89
| 2.59375
| 3
|
[] |
no_license
|
def echo(anything):
'echo returns its input argument'
return anything
help(echo)
| true
|
119235e1dba8ab57283113573240a645b45b2e32
|
Python
|
LwqDeveloper/ToolShell
|
/selectorUnrefs/FindSelectorUnrefs.py
|
UTF-8
| 11,719
| 2.609375
| 3
|
[] |
no_license
|
# coding:utf-8
import os
import re
import sys
import getopt
reserved_prefixs = ["-[", "+["]
# 获取入参参数
def input_parameter():
opts, args = getopt.getopt(sys.argv[1:], '-a:-p:-w:-b:',
['app_path=', 'project_path=', 'black_list_Str', 'white_list_str'])
black_list_str = ''
white_list_str = ''
white_list = []
black_list = []
# 入参判断
for opt_name, opt_value in opts:
if opt_name in ('-a', '--app_path'):
# .app文件路径
app_path = opt_value
if opt_name in ('-p', '--project_path'):
# 项目文件路径
project_path = opt_value
if opt_name in ('-b', '--black_list_Str'):
# 检测黑名单前缀,不检测谁
black_list_Str = opt_value
if opt_name in ('-w', '--white_list_str'):
# 检测白名单前缀,只检测谁
white_list_str = opt_value
if len(black_list_str) > 0:
black_list = black_list_str.split(",")
if len(white_list_str) > 0:
white_list = white_list_str.split(",")
if len(white_list) > 0 and len(black_list) > 0:
print("\033[0;31;40m白名单【-w】和黑名单【-b】不能同时存在\033[0m")
exit(1)
# 判断文件路径存不存在
if not os.path.exists(project_path):
print("\033[0;31;40m输入的项目文件路径【-p】不存在\033[0m")
exit(1)
app_path = verified_app_path(app_path)
if not app_path:
exit('输入的app路径不存在,停止运行')
return app_path, project_path, black_list, white_list
def verified_app_path(path):
if path.endswith('.app'):
appname = path.split('/')[-1].split('.')[0]
path = os.path.join(path, appname)
if appname.endswith('-iPad'):
path = path.replace(appname, appname[:-5])
if not os.path.isfile(path):
return None
if not os.popen('file -b ' + path).read().startswith('Mach-O'):
return None
return path
# 获取protocol中所有的方法
def header_protocol_selectors(file_path):
# 删除路径前后的空格
file_path = file_path.strip()
if not os.path.isfile(file_path):
return None
protocol_sels = set()
file = open(file_path, 'r')
is_protocol_area = False
# 开始遍历文件内容
for line in file.readlines():
# 删除注释信息
# delete description
line = re.sub('\".*\"', '', line)
# delete annotation
line = re.sub('//.*', '', line)
# 检测是否是 @protocol
# match @protocol
if re.compile('\s*@protocol\s*\w+').findall(line):
is_protocol_area = True
# match @end
if re.compile('\s*@end').findall(line):
is_protocol_area = False
# match sel
if is_protocol_area and re.compile('\s*[-|+]\s*\(').findall(line):
sel_content_match_result = None
# - (CGPoint)convertPoint:(CGPoint)point toCoordinateSpace:(id <UICoordinateSpace>)coordinateSpace
if ':' in line:
# match sel with parameters
# 【"convertPoint:","toCoordinateSpace:"]
sel_content_match_result = re.compile('\w+\s*:').findall(line)
else:
# - (void)invalidate;
# match sel without parameters
# invalidate;
sel_content_match_result = re.compile('\w+\s*;').findall(line)
if sel_content_match_result:
# 方法参数拼接
# convertPoint:toCoordinateSpace:
funcList = ''.join(sel_content_match_result).replace(';', '')
protocol_sels.add(funcList)
file.close()
return protocol_sels
# 获取所有protocol定义的方法
def protocol_selectors(path, project_path):
print('获取所有的protocol中的方法...')
header_files = set()
protocol_sels = set()
# 获取当前引用的系统库中的方法列表
system_base_dir = '/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk'
# get system librareis
lines = os.popen('otool -L ' + path).readlines()
for line in lines:
# 去除首尾空格
line = line.strip()
# /System/Library/Frameworks/MediaPlayer.framework/MediaPlayer (compatibility version 1.0.0, current version 1.0.0)
# /System/Library/Frameworks/MediaPlayer.framework/MediaPlayer
# delete description,
line = re.sub('\(.*\)', '', line).strip()
if line.startswith('/System/Library/'):
# [0:-1],获取数组的左起第一个,到倒数最后一个,不包含最后一个,[1,-1)左闭右开
library_dir = system_base_dir + '/'.join(line.split('/')[0:-1])
if os.path.isdir(library_dir):
# 获取当前系统架构中所有的类
# 获取合集
header_files = header_files.union(os.popen('find %s -name \"*.h\"' % library_dir).readlines())
if not os.path.isdir(project_path):
exit('Error: project path error')
# 获取当前路径下面所有的.h文件路径
header_files = header_files.union(os.popen('find %s -name \"*.h\"' % project_path).readlines())
for header_path in header_files:
# 获取所有查找到的文件下面的protocol方法,这些方法,不能用来统计
header_protocol_sels = header_protocol_selectors(header_path)
if header_protocol_sels:
protocol_sels = protocol_sels.union(header_protocol_sels)
return protocol_sels
def imp_selectors(path):
print('获取所有的方法,除了setter and getter方法...')
# return struct: {'setupHeaderShadowView':['-[TTBaseViewController setupHeaderShadowView]']}
# imp 0x100001260 -[AppDelegate setWindow:] ==>> -[AppDelegate setWindow:],setWindow:
re_sel_imp = re.compile('\s*imp\s*0x\w+ ([+|-]\[.+\s(.+)\])')
re_properties_start = re.compile('\s*baseProperties 0x\w{9}')
re_properties_end = re.compile('\w{16} 0x\w{9} _OBJC_CLASS_\$_(.+)')
re_property = re.compile('\s*name\s*0x\w+ (.+)')
imp_sels = {}
is_properties_area = False
# “otool - ov”将输出Objective - C类结构及其定义的方法。
for line in os.popen('/usr/bin/otool -oV %s' % path).xreadlines():
results = re_sel_imp.findall(line)
if results:
# imp 0x100001260 -[AppDelegate setWindow:] ==>> [-[AppDelegate setWindow:],setWindow:]
(class_sel, sel) = results[0]
if sel in imp_sels:
imp_sels[sel].add(class_sel)
else:
imp_sels[sel] = set([class_sel])
else:
# delete setter and getter methods as ivar assignment will not trigger them
# 删除相关的set方法
if re_properties_start.findall(line):
is_properties_area = True
if re_properties_end.findall(line):
is_properties_area = False
if is_properties_area:
property_result = re_property.findall(line)
if property_result:
property_name = property_result[0]
if property_name and property_name in imp_sels:
# properties layout in mach-o is after func imp
imp_sels.pop(property_name)
# 拼接set方法
setter = 'set' + property_name[0].upper() + property_name[1:] + ':'
# 干掉set方法
if setter in imp_sels:
imp_sels.pop(setter)
return imp_sels
def ref_selectors(path):
print('获取所有被调用的方法...')
re_selrefs = re.compile('__TEXT:__objc_methname:(.+)')
ref_sels = set()
lines = os.popen('/usr/bin/otool -v -s __DATA __objc_selrefs %s' % path).readlines()
for line in lines:
results = re_selrefs.findall(line)
if results:
ref_sels.add(results[0])
return ref_sels
def ignore_selectors(sel):
if sel == '.cxx_destruct':
return True
if sel == 'load':
return True
return False
def filter_selectors(sels):
filter_sels = set()
for sel in sels:
for prefix in reserved_prefixs:
if sel.startswith(prefix):
filter_sels.add(sel)
return filter_sels
def unref_selectors(path, project_path):
# 获取所有类的protocol的方法集合
protocol_sels = protocol_selectors(path, project_path)
# 获取项目所有的引用方法
ref_sels = ref_selectors(path)
if len(ref_sels) == 0:
exit('获取项目所有的引用方法为空....')
# 获取所有的方法,除了set方法
imp_sels = imp_selectors(path)
print("\n")
if len(imp_sels) == 0:
exit('Error: imp selectors count null')
unref_sels = set()
for sel in imp_sels:
# 所有的方法,忽略白名单
if ignore_selectors(sel):
continue
# 如果当前的方法不在protocol中,也不再引用的方法中,那么认为这个方法没有被用到
# protocol sels will not apppear in selrefs section
if sel not in ref_sels and sel not in protocol_sels:
unref_sels = unref_sels.union(filter_selectors(imp_sels[sel]))
return unref_sels
# 黑白名单过滤
def filtration_list(unref_sels, black_list, white_list):
# 黑名单过滤
temp_unref_sels = list(unref_sels)
if len(black_list) > 0:
# 如果黑名单存在,那么将在黑名单中的前缀都过滤掉
for unref_sel in temp_unref_sels:
for black_prefix in black_list:
class_method = "+[%s" % black_prefix
instance_method = "-[%s" % black_prefix
if (unref_sel.startswith(class_method) or unref_sel.startswith(
instance_method)) and unref_sel in unref_sels:
unref_sels.remove(unref_sel)
break
# 白名单过滤
temp_array = []
if len(white_list) > 0:
# 如果白名单存在,只留下白名单中的部分
for unref_sel in unref_sels:
for white_prefix in white_list:
class_method = "+[%s" % white_prefix
instance_method = "-[%s" % white_prefix
if unref_sel.startswith(class_method) or unref_sel.startswith(instance_method):
temp_array.append(unref_sel)
break
unref_sels = temp_array
return unref_sels
# 整理结果,写入文件
def write_to_file(unref_sels):
file_name = 'selector_unrefs.txt'
f = open(os.path.join(sys.path[0].strip(), file_name), 'w')
unref_sels_num_str = '查找到未被使用的方法: %d个\n' % len(unref_sels)
print(unref_sels_num_str)
f.write(unref_sels_num_str)
num = 1
for unref_sel in unref_sels:
unref_sels_str = '%d : %s' % (num, unref_sel)
print(unref_sels_str)
f.write(unref_sels_str + '\n')
num = num + 1
f.close()
print('\n项目中未使用方法检测完毕,相关结果存储到当前目录 %s 中' % file_name)
print('请在项目中进行二次确认后处理')
if __name__ == '__main__':
# 获取入参
app_path, project_path, black_list, white_list = input_parameter()
# 获取未使用方法
unref_sels = unref_selectors(app_path, project_path)
# 黑白名单过滤
unref_sels = filtration_list(unref_sels, black_list, white_list)
# 打印写入文件
write_to_file(unref_sels)
| true
|
72308ed09c968b3db6f38f5ca92e8814c1d32ce9
|
Python
|
VamsiMohanRamineedi/Algorithms
|
/538. Convert BST to Greater Tree.py
|
UTF-8
| 1,541
| 3.78125
| 4
|
[] |
no_license
|
# Convert BST to Greater Tree: Time: O(n), space: O(log n) average case and O(n) worst case
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def __init__(self):
self.total = 0
def convertBST(self, root):
if root is not None:
self.convertBST(root.right)
self.total += root.val
root.val = self.total
self.convertBST(root.left)
return root
'''
class Solution:
def convertBST(self, root: 'TreeNode') -> 'TreeNode':
if not root:
return root
self.sum = 0 # to maintain sum of the nodes on the right side
def helper(node):
# leaf node, sum will be sum till then plus leaf node value
if not node.left and not node.right:
self.sum += node.val
node.val = self.sum
return self.sum
# always traverse to the right most node in the tree, which is the largest value in BST
if node.right:
node.val += helper(node.right)
self.sum = node.val
if not node.right:
node.val += self.sum
if node.left:
self.sum = node.val
return helper(node.left)
return self.sum
helper(root)
return root
'''
| true
|
7aa8788a240f74e05010a453615346c0eb7b1a7b
|
Python
|
mohitreddy1996/IEEE_Summer_Projects-2015
|
/IEEE-Applied-Python/merge_sort.py
|
UTF-8
| 563
| 3.015625
| 3
|
[] |
no_license
|
import sys
def merge(a,p,q,r):
n1=q-p+1
n2=r-q
L=[0]*(n1)
R=[0]*(n2)
for i in range(0,n1):
L[i]=a[p+i]
L.append(sys.maxint)
for i in range(0,n2):
R[i]=a[q+i+1]
R.append(sys.maxint)
w=0
e=0
for i in range(p,r+1):
if L[w]<R[e]:
a[i]=L[w]
w=w+1
else:
a[i]=R[e]
e=e+1
def mergesort(a,p,r):
if p<r:
q=(p+r)/2
mergesort(a,p,q)
mergesort(a,q+1,r)
merge(a,p,q,r)
data1=raw_input("Enter the array ")
data1=data1.split(' ')
len1=len(data1)
for i in range(0,len1):
data1[i]=int(data1[i])
mergesort(data1,0,len1-1)
print data1
| true
|
a5aa56d9b5a2d611c63bf64888d0b28262e406f9
|
Python
|
yiming1012/MyLeetCode
|
/LeetCode/递归/779. 第K个语法符号.py
|
UTF-8
| 1,153
| 3.703125
| 4
|
[] |
no_license
|
"""
779. 第K个语法符号
在第一行我们写上一个 0。接下来的每一行,将前一行中的0替换为01,1替换为10。
给定行数 N 和序数 K,返回第 N 行中第 K个字符。(K从1开始)
例子:
输入: N = 1, K = 1
输出: 0
输入: N = 2, K = 1
输出: 0
输入: N = 2, K = 2
输出: 1
输入: N = 4, K = 5
输出: 1
解释:
第一行: 0
第二行: 01
第三行: 0110
第四行: 01101001
注意:
N 的范围 [1, 30].
K 的范围 [1, 2^(N-1)].
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/k-th-symbol-in-grammar
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
from functools import lru_cache
class Solution:
def kthGrammar(self, N: int, K: int) -> int:
@lru_cache(None)
def dfs(n, k):
if k == 1: return 0
if n == 1: return 1
if k & 1:
return dfs(n - 1, (k + 1) // 2)
else:
return 1 - dfs(n - 1, k // 2)
return dfs(N, K)
if __name__ == '__main__':
N, K = 2, 2
print(Solution().kthGrammar(N, K))
| true
|
adf0c78ac09a808f316f2db05d20b38db31105cd
|
Python
|
Algolytics/dq_client
|
/dq/response.py
|
UTF-8
| 1,191
| 2.734375
| 3
|
[
"Apache-2.0"
] |
permissive
|
# -*- coding: utf-8 -*-
import json
from .error import DQError
class Response:
def __init__(self, method, status, content):
self.method = method
self.status = status
self.content = content
def is_ok(self):
return self.status and int(self.status) < 400
def json(self):
return self.result
def object(self):
return json.loads(self.content)
def __repr__(self):
return "%s %s" % (self.status, self.content)
class from_response():
def __init__(self, clazz=None):
self.clazz = clazz
def __call__(self, f):
def wrapper(*args, **kwargs):
response = f(*args, **kwargs)
if response.status == 404:
return None
if not response.is_ok():
raise DQError(status=response.status, message=response.content)
obj = response.object()
if isinstance(obj, list):
return [self.__to_object(item) for item in obj]
return self.__to_object(obj)
return wrapper
def __to_object(self, obj):
if self.clazz is None:
return obj
return self.clazz(**obj)
| true
|
993fbda9136d96d97b2956c35e86932c9f24c2c9
|
Python
|
ezradiniz/blockchain-from-scratch
|
/blockchain.py
|
UTF-8
| 702
| 2.953125
| 3
|
[
"MIT"
] |
permissive
|
from block import Block
class Blockchain(object):
def __init__(self):
self.chain = [Block.genesis()]
def add_block(self, data):
block = Block.mine_block(self.chain[len(self.chain) - 1], data)
self.chain.append(block)
return block
def is_valid_chain(self, chain):
genesis = str(Block.genesis())
block_init = str(chain[0])
if genesis != block_init:
return False
for i in range(1, len(chain)):
block = chain[i]
last_block = chain[i - 1]
if block.last_hash != last_block.cur_hash or block.cur_hash != Block.block_hash(block):
return False
return True
| true
|
100c35d9f0d8e1acd373668aba11aca7fdd73af6
|
Python
|
amoisoo/APPJINYOUNG
|
/00_doc/test/soup4.py
|
UTF-8
| 3,045
| 2.6875
| 3
|
[] |
no_license
|
table = """
<table align="center" class="table mb-0 table-bordered table-sm table-width-80">
<thead>
<tr>
<th style="width: 19.9197%;">label</th>
<th style="width: 19.881%;">한국</th>
<th style="width: 19.8809%;">중국</th>
<th style="width: 20.0803%;">일본</th>
<th style="width: 20.0000%;">인도</th>
</tr>
</thead>
<tbody>
<tr>
<td style="width: 19.9197%;">10대</td>
<td style="width: 19.881%;">11</td>
<td style="width: 19.8809%;">21</td>
<td style="width: 20.0803%;">31</td>
<td style="width: 20.0000%;">41</td>
</tr>
<tr>
<td style="width: 19.9197%;">20대</td>
<td style="width: 19.881%;">12</td>
<td style="width: 19.8809%;">22</td>
<td style="width: 20.0803%;">32</td>
<td style="width: 20.0000%;">42</td>
</tr>
<tr>
<td style="width: 19.9197%;">30대</td>
<td style="width: 19.881%;">13</td>
<td style="width: 19.8809%;">23</td>
<td style="width: 20.0803%;">33</td>
<td style="width: 20.0000%;">43</td>
</tr>
<tr>
<td style="width: 19.9197%;">40대</td>
<td style="width: 19.881%;">14</td>
<td style="width: 19.8809%;">24</td>
<td style="width: 20.0803%;">34</td>
<td style="width: 20.0000%;">44</td>
</tr>
<tr>
<td style="width: 19.9197%;">50대</td>
<td style="width: 19.881%;">15</td>
<td style="width: 19.8809%;">25</td>
<td style="width: 20.0803%;">35</td>
<td style="width: 20.0000%;">45</td>
</tr>
</tbody>
</table>
<br>
"""
from bs4 import BeautifulSoup
class TABLE:
def __init__(self, url = "" ):
self.DATA = BeautifulSoup(url, 'html.parser')
self.getHEADER = self.DATA.table.thead.tr
self.getTABLEBODY = self.DATA.table.tbody.children
def getTableHEADER(self):
HEADER = self.getHEADER
result = []
for index, i in enumerate(HEADER):
if (i.name != "th"): continue
result.append( i.string )
return result
def getTableBODY(self):
BODY = self.getTABLEBODY
result = []
for index, i in enumerate( BODY ):
if ( i.name != "tr" ) : continue
try:
column = []
for j in i.children :
if(j.name == "td"):
#print(j.name, j.string)
column.append( j.string )
result.append( column )
except:pass
return result
def get_RAW(self):
result = []
result.append( self.getTableHEADER() )
result.append( self.getTableBODY() )
return result
def CHART_SINGLE(self):
getDATA = self.get_RAW()
result = []
for index, i in enumerate(getDATA[1]):
result.append( [ i[0], i[1] ] )
print(i[0], i[1])
return result
def CHART_PIE(self):
getDATA = self.get_RAW()
for index, i in enumerate(getDATA[0]):
print(i)
print( getDATA[1][index] )
DATA = TABLE(table)
getList = DATA.CHART_SINGLE()
| true
|
f7a7d3f17025e29f3f032972153ebb99a6b8090f
|
Python
|
hyeyeonjung/YONI
|
/word.py
|
UTF-8
| 783
| 3.953125
| 4
|
[] |
no_license
|
word1 = input("글자를 입력하세요.")
if (len(word1)==3):
while True:
word2 = input("글자를 입력하세요.")
if(len(word2)==3) and (word2[0]==word1[2]):
print("정답입니다.")
else:
print("오답입니다.",word2[0],word1[2])
break
else:
print("오답입니다.")
print("게임이 끝났습니다.")
# while True:
# word2 = input("글자를 입력하세요.")
# if(len(word2)==3) and (word2[0]==word1[2]):
# {
# print("정답입니다.")
# }
# else:
# {
# print("오답입니다.")
# }
# break
| true
|
35905d46f31421232959fe1b79fdd44301d1c22c
|
Python
|
thomas-rohde/Classes-Python
|
/exercises/exe81 - 90/exe085.py
|
UTF-8
| 275
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
matriz = [[], [], []]
R = list(range(0, 3))
for c in R:
for i in R:
matriz[c].append(int(input(f'Digite um valor para[{c}, {i}]: ')))
print('-' * 30)
for d in R:
print('(', end=' ')
for j in r:
print(f'[{matriz[d][j]:^5}]', end=' ')
print(')')
| true
|
061e688a1067fdf6bd4bf4babca62e155c4947f3
|
Python
|
zhuchangzhan/SEAS
|
/deprecated/atmosphere_effects/mixing_ratio_generator.py
|
UTF-8
| 4,966
| 2.71875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python
#
# Copyright (C) 2017 - Massachusetts Institute of Technology (MIT)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
This is a logistic module that generates the mixing ratio files
This module does not handle the physics and chemistry that goes into
determining the actual mixing ratio of the atmosphere
Currently have added constant, increasing and decreasing ratios
but how do you handle some more complex variable mixing ratios?
How should they be loaded and interpreted?
"""
import os
import sys
import numpy as np
from SEAS_Utils import to_float
from SEAS_Utils.common_utils.data_saver import check_file_exist, check_path_exist
class mixing_ratio_generator():
def __init__(self,
ratio_input,
filler = True,
filler_molecule = "N2",
pressures = [100000,10000,1000,100,10,1,0.1,0.01,0.001,0.0001,0.00001],
path = "../../input/atmosphere_data/Mixing_Ratio",
name = "Temp.txt",
overwrite = False
):
self.ratio_input = ratio_input
self.filler = filler
self.filler_molecule = filler_molecule
self.pressures = pressures
self.path = path
self.name = name
self.overwrite = overwrite
def generate(self):
# create pressures
self.data = [["Pressure"]]
for P in self.pressures:
self.data.append([str(P)])
Surface_Pressure = self.pressures[0]
# add each molecules
for Molecule in self.ratio_input:
self.data[0].append(Molecule)
Surface_Ratio = to_float(self.ratio_input[Molecule]["Surface_Ratio"])
Type = self.ratio_input[Molecule]["Type"]
Transition = self.ratio_input[Molecule]["Transition"]
Start_Pressure = to_float(self.ratio_input[Molecule]["Start_Pressure"])
End_Pressure = to_float(self.ratio_input[Molecule]["End_Pressure"])
End_Ratio = to_float(self.ratio_input[Molecule]["End_Ratio"])
for j,pres in enumerate(self.pressures):
if Type == "constant":
self.data[j+1].append(str(Surface_Ratio))
elif Type in ["decrease","increase"]:
if pres >= Start_Pressure:
self.data[j+1].append(str(Surface_Ratio))
elif pres <= End_Pressure:
self.data[j+1].append(str(End_Ratio))
else:
current = Surface_Ratio+(End_Ratio-Surface_Ratio)*(1-np.log10(pres/End_Pressure)/np.log10(Start_Pressure/End_Pressure))
self.data[j+1].append(str(current))
# assuming single filler for now
if self.filler:
if self.filler_molecule in self.data[0]:
print "Simulation Terminated"
print "Filler Molecule %s already in simulation molecules"%self.filler_molecule
print "Please remove filler from list or select a new filler"
sys.exit()
self.data[0].append(self.filler_molecule)
for k,ratio in enumerate(self.data[1:]):
total_ratio = sum([float(x) for x in ratio[1:]])
if total_ratio > 100:
print "Total Mixing Ratio exceed maximum, check mixing ratio generation"
print self.data[0]
print ratio
sys.exit()
self.data[k+1].append(str(100-total_ratio))
print "Mixing Ratio File Generated!"
return self.data
def save(self):
check_path_exist(self.path)
save_path = os.path.join(self.path,self.name)
check_file_exist(save_path)
with open(save_path,"w") as file:
for i,info in enumerate(self.data):
file.write(" ".join(info))
if i == len(self.data)-1:
break
file.write("\n")
print "Mixing Ratio file saved to %s"%save_path
| true
|
af655f62c6045848b8621c62f26802f557e63902
|
Python
|
Hamitay/MO443
|
/trab4/ex1.py
|
UTF-8
| 3,559
| 3.234375
| 3
|
[] |
no_license
|
import numpy as np
import cv2
from matplotlib import pyplot as plt
# Loads the image in grayscale
def load_image():
img_path = "img/bitmap.pbm"
return cv2.imread(img_path, 0)
def display_image(img, img_title):
# Converts to unsigned 8 bit int
#abs_img = cv2.convertScaleAbs(img)
cv2.imwrite(f'{img_title}.jpg', img)
#cv2.imshow(img_title, abs_img)
def end_program():
# Cleans images
cv2.waitKey(0)
cv2.destroyAllWindows()
def createKernel(height, width):
return np.ones((height, width))
def dilate(img, kernel):
return cv2.dilate(img, kernel, iterations=1)
def erode(img, kernel):
return cv2.erode(img, kernel, iterations=1)
def close(img, kernel):
return cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
def connected_component(img):
return cv2.connectedComponents(img)
img = load_image()
display_image(img, 'Imagem Original em jpg')
#Invert the image
img = (255 - img)
display_image(img, 'Imagem Negativada')
# Step 1
kernel = createKernel(1, 100)
step_1 = dilate(img, kernel)
display_image(step_1, 'Passo 1 Dilatação - Elemento Estruturante 100x1')
# Step 2
step_2 = erode(step_1, kernel)
display_image(step_2, 'Passo 2 Erosão - Elemento Estruturante 100x1')
# Step 3
kernel = createKernel(200, 1)
step_3 = dilate(img, kernel)
display_image(step_3, 'Passo 3 Dilatação - Elemento Estruturante 1x200')
# Step 4
step_4 = erode(step_3, kernel)
display_image(step_4, 'Passo 4 Erosão - Elemento Estruturante 1x200')
# Step 5
step_5 = step_2 & step_4
display_image(step_5, 'Passo 5 Operação AND - Passos 2 e 4')
# Step 6
kernel = createKernel(1, 30)
step_6 = close(step_5, kernel)
display_image(step_6, 'Passo 6 Fechamento - Passo 5')
# Step 7
ret, labels = connected_component(step_6)
total_size = img.size
text_rectangles = []
word_rectangles = []
display_image(img, 'Componentes Conexos e suas Labels')
for label in range(1,ret):
mask = np.array(labels, dtype=np.uint8)
# Make our connected component white
mask[mask != label] = 0
mask[labels == label] = 255
#Get the rectangle and find the component in the original image
contours = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[1]
(x,y,w,h) = cv2.boundingRect(contours[0])
rectangle = img[y:y+h,x:x+w]
# Count number of white pixels (black in original image)
num_white = np.count_nonzero(rectangle == 255)
ratio = num_white/(w*h)
# Use empyrical ration to find word lines
has_word = ratio < 0.52 and ratio > 0.1
# If it has words we separate them
if has_word:
text_rectangles.append((x,y,w,h))
# Count number of words
# Dilate and close to aggroup the words
kernel = createKernel(10, 10)
step_1 = dilate(rectangle, kernel)
step_2 = close(step_1, createKernel(2,2))
word_ret, word_labels = connected_component(step_2)
for word_label in range(1, word_ret):
word_mask = np.array(word_labels, dtype=np.uint8)
word_mask[word_mask != word_label] = 0
word_mask[word_labels == word_label] = 255
#Get the rectangle and find the component in the original image
word_contours = cv2.findContours(word_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[1]
(wx,wy,ww,wh) = cv2.boundingRect(word_contours[0])
word_rectangles.append((wx+x, wy+y, ww, wh))
for rectangle in word_rectangles:
x,y,w,h = rectangle
img = cv2.rectangle(img, (x,y), (x+w, y+h), (255, 255, 255), 2)
display_image(img, 'Componentes Conexos de Palavras de Texto')
end_program()
| true
|
72be3a8c34b2150c044de316d99af5f41ef893fb
|
Python
|
AlvarocJesus/Exercicios_Python
|
/AulaTeorica/exerciciosModulos/exercicio2/pitagoras.py
|
UTF-8
| 98
| 2.625
| 3
|
[] |
no_license
|
from math import sqrt
def pitagoras(cateto1, cateto2):
return sqrt(((cateto1**2)+(cateto2**2)))
| true
|
e918bde3c6fa82fe08670d907c895751bebef139
|
Python
|
Todorovikj/InstagramScraper
|
/index.py
|
UTF-8
| 5,393
| 2.65625
| 3
|
[] |
no_license
|
from selenium import webdriver
from time import sleep
from bs4 import BeautifulSoup
import os
import requests
import shutil
from xlsxwriter import Workbook
# advice: use delays to avoid getting blocked, try torrequest for changing your IP
# driver.switch_to.window(driver.window_handles[1]) changig active tab in driver chrome
chromePath="D:\\neco skola i rabota\\rabota\\learning dollars\\ChromeDriver\\chromedriver"
class App:
def __init__(self,username='leposava10.02',password='WebScraper',targetUser='lence1970',path="D:\\neco skola i rabota\\rabota\\learning dollars\\instaScrape"):
self.username=username
self.password=password
self.targetUser=targetUser
self.path=path
self.driver=webdriver.Chrome("D:\\neco skola i rabota\\rabota\\learning dollars\\ChromeDriver\\chromedriver")
self.driver.get("https://instagram.com")
self.error=False
self.logIn()
if self.error is False:
self.openTargetProfile()
if self.error is False:
self.scrollDown()
if not os.path.exists(path) and self.error is False:
os.mkdir(path)
if self.error is False:
self.downloadImages()
if self.error is False:
self.getCaptions()
print("Scraper has finished scraping!!!")
self.driver.close()
def getCaptions(self):
try:
file=Workbook(self.path+"\\captions.xlsx")
worksheet=file.add_worksheet()
sleep(2)
soup=BeautifulSoup(self.driver.page_source,'lxml')
allImgs=soup.find_all('div',attrs={'class':['v1Nh3','kIKUG',' _bz0w']})
for index,img in enumerate(allImgs):
link="https://instagram.com"+img.a['href']
self.driver.get(link)
sleep(2) #wait for content to load
soup=BeautifulSoup(self.driver.page_source,'lxml')
try:
caption=soup.find('div',attrs={'class':'C4VMK'}).span.string
except Exception:
caption="No caption available"
i=index+1 #because profile photo downloaded has index 0
imgTxt='image'+str(i)+'.jpg'
worksheet.write(index,0,imgTxt)
worksheet.write(index,1,caption)
except Exception:
print("getCaptions Exception")
self.error=True
finally:
file.close() # you must close the file
def downloadImages(self):
try:
sleep(2)
soup=BeautifulSoup(self.driver.page_source,'lxml')
allImgs=soup.find_all('img')
print(len(allImgs))
for index,img in enumerate(allImgs):
fileName="image"+str(index)+".jpg"
imagePath=os.path.join(self.path,fileName)
link = img['src']
response=requests.get(link,stream=True)
print("downloading image:"+str(index))
try:
with open(imagePath,'wb') as file:
shutil.copyfileobj(response.raw,file)
except Exception:
self.error=True
print("Error writing to disk")
except Exception:
self.error=True
print("error while downloading")
def scrollDown(self):
try:
sleep(2)
nPosts=self.driver.find_element_by_class_name("g47SY")
nPosts=str(nPosts.text).replace(',',"") #for bigger numbers
nPosts=int(nPosts)
self.nPosts=nPosts
if(self.nPosts>12):
nScrolls=int(self.nPosts/12)+3
for val in range(nScrolls):
#print(val)
self.driver.execute_script('window.scrollTo(0, document.body.scrollHeight);')
sleep(1)
except Exception:
self.error=True
print("Problem with scroll down")
def openTargetProfile(self):
try:
sleep(1) #it's better to wait 1s than having a bug
searchBar=self.driver.find_element_by_xpath("//input[@placeholder='Search']")
searchBar.send_keys(self.targetUser)
sleep(1) #wait for results to show up
targetUrl="https://instagram.com/"+self.targetUser+"/"
self.driver.get(targetUrl)
sleep(3)
except Exception:
self.error=True
print("Openning profile error")
def logIn(self):
try:
sleep(1)
loginBtn=self.driver.find_element_by_link_text("Log in")
loginBtn.click()
sleep(3) #must sleep, because it needs some time to load the page !!!
userNameTxt=self.driver.find_element_by_xpath("//input[@name='username']")
passTxt=self.driver.find_element_by_xpath("//input[@name='password']")
userNameTxt.send_keys(self.username)
passTxt.send_keys(self.password)
passTxt.submit()
sleep(2) #wait page to load
notNowBtn=self.driver.find_element_by_xpath("//button[@class='aOOlW HoLwm ']") # do not turn on notifications
notNowBtn.click()
except Exception:
self.error=True
print("Log in error")
if __name__=='__main__':
app=App()
| true
|
24b2f1475308cfeec8c77eeda05c8697c3634196
|
Python
|
brodyzt/Testing
|
/Classes.py
|
UTF-8
| 350
| 3.125
| 3
|
[] |
no_license
|
'''__author__ = 'brodyzt'
class Car:
def __init__(self):
self.color = None
def printColor(self):
print(self.color)
myCar = Car()
myCar.color = "Red"
myCar.printColor()'''
class Test:
def __init__(self):
self.structure = [1,2,3,4,5]
def __iter__(self):
return self.structure
mine = Test()
print()
| true
|
a214f79eb13e3c04080a666390e7216d23e0d9a8
|
Python
|
Raision-seudun-koulutuskuntayhtyma/Painonhallinta
|
/sanity2.py
|
UTF-8
| 4,499
| 3.34375
| 3
|
[
"CC0-1.0"
] |
permissive
|
# Tiivistetty versio Sanity.py-modulista eli suurinpiirtein se, mitä tuhosin vahingossa
def liukuluvuksi(syote):
"""Tarkistaa syötteen ja muuttaa sen liukuluvuksi
Args:
syote (string): Käyttäjän syöttämä arvo
Returns:
list: virhekoodi, virhesanoma ja syöte liukulukuna
"""
# Asetetaan palautusarvojen oletukset
virhekoodi = 0
virhesanoma = 'Syöte OK'
arvo = 0
# Puhdistetaan syöte ylimääräisistä merkeistä (Whitespace)
syote = syote.strip()
# Selvitetään sisältääko syöte mahdollisen desimaalipilkun ja korvataan se pisteellä
if syote.find(',') != -1:
syote = syote.replace(',', '.')
# Selvitetään sisältääkö syöte desimaalipisteen ja jaetaan syöte pisteen kohdalta useammaksi merkkijonoksi
if syote.find('.') != -1:
osat = syote.split('.')
# Selvitetään onko osia enemmän kuin 2, eli onko useita pisteitä
if len(osat) > 2:
virhekoodi = 1
virhesanoma = 'Syöte sisältää useita erottimia. Vain yksi arvo on sallittu'
# Jos osia on 2
else:
osa = str(osat[0])
# Jos ensimmäinen osa on numeerinen ts. ei sisällä muita merkkejä kuin 0...9
if osa.isnumeric():
osa = str(osat[1])
# Jos toinenkin osa on numeerinen
if osa.isnumeric():
arvo = float(syote)
else:
virhekoodi = 4
virhesanoma = 'Desimaalierottimen jälkeen ylimääräisiä merkkejä: vain numerot ja desimaalipiste on sallittu'
else:
virhekoodi = 3
virhesanoma = 'Ennen desimaalierotinta ylimääräisiä merkkejä: vain numerot ja desimaalipiste on sallittu'
# Tarkistetaan onko desimaaliton syöte numeerista
elif syote.isnumeric():
arvo = float(syote)
else:
virhekoodi = 2
virhesanoma = 'Syötteessä ylimäärisiä merkkejä: vain numerot ja desimaalipiste tai pilkku on sallittu'
# Muodostetaan funktion paluuarvo ja palautetaan se
paluuarvo = [virhekoodi, virhesanoma, arvo]
return paluuarvo
# Funktio, jolla tarkistetaan, että syötetty arvo on haluttujen rajojen sisällä
def rajatarkistus(arvo, alaraja, ylaraja):
"""Tarkistaa, että syötetty arvo on suurempi tai yhtäsuuri kuin alaraja ja pienempi tai yhtäsuuri kuin yläraja
Args:
arvo (float): tarkistettava arvo
alaraja (float): pienin sallittu arvo
ylaraja (float): suurin sallittu arvo
Returns:
list: virhekoodi, virheilmoitus
"""
# Määritellään virheiden oletusarvot
virhekoodi = 0
virhesanoma = 'Arvo OK'
# Arvo alle alarajan
if arvo < alaraja:
virhekoodi = 1
virhesanoma = 'Arvo on alle alarajan (' + str(alaraja) + ')'
# Arvo yli ylärajan
if arvo > ylaraja:
virhekoodi = 2
virhesanoma = 'Arvo on yli ylärajan (' + str(ylaraja) + ')'
# Paluuarvon määritys ja palautus
paluuarvo = [virhekoodi, virhesanoma]
return paluuarvo
# TODO: Tähän funktio, jolla tarkistetaan, että syöte on tekstiä
# Funktioiden testaus
if __name__ == '__main__':
# 1. Syötteen tarkistus, syöte oikein
syote = '123.5'
print('Syöte:', syote, 'Tulokset: ', liukuluvuksi(syote))
# 2. Syötteessä desimaalipilkku, muuten oikein
syote = '123,5'
print('Syöte:', syote, 'Tulokset: ', liukuluvuksi(syote))
# 3. Syötteessä useita osia
syote = '12.3.2'
print('Syöte:', syote, 'Tulokset: ', liukuluvuksi(syote))
# 4. Syöttessä alussa tekstiä
syote = 'paino 75.4'
print('Syöte:', syote, 'Tulokset: ', liukuluvuksi(syote))
# 5. Syötteen lopussa tekstiä
syote = '75.4 kg'
print('Syöte:', syote, 'Tulokset: ', liukuluvuksi(syote))
# Syöte kokonaisuudessaan tekstiä
syote = 'sataviisi'
print('Syöte:', syote, 'Tulokset: ', liukuluvuksi(syote))
# Rajatarkistukset
alaraja = 1
ylaraja = 3
# 1. Rajojen sisällä
arvo = 1.8
print('Arvo:', arvo, 'Tulokset:', rajatarkistus(arvo, alaraja, ylaraja))
# 2. Alle alarajan
arvo = 0.8
print('Arvo:', arvo, 'Tulokset:', rajatarkistus(arvo, alaraja, ylaraja))
# 3. Yli ylärajan
arvo = 3.8
print('Arvo:', arvo, 'Tulokset:', rajatarkistus(arvo, alaraja, ylaraja))
| true
|
2ca1528f4f380a64b8ff64b2e603a15b93c82b47
|
Python
|
Tatyana-jl/TrialTests
|
/Futurealms/test.py
|
UTF-8
| 531
| 3.09375
| 3
|
[] |
no_license
|
import numpy
matrica = numpy.random.random_integers(0, 9, (10,10,10))
coordinate=0
for x in range(0,len(matrica)):
while coordinate==0:
for y in range(0,len(matrica)):
while coordinate==0:
for z in range(0,len(matrica)):
if matrica[x][y][z]==0:
coordinate=[x,y,z]
print ("N of column х:",coordinate[0])
print ("N of column y:",coordinate[1])
print ("N of column z:",coordinate[2])
print ("Intersection point",coordinate)
| true
|
ba5f2fbf438d1a1912f56fc84ab67449674742de
|
Python
|
xiao2mo/script-python
|
/linelength/length.py
|
UTF-8
| 156
| 2.65625
| 3
|
[] |
no_license
|
import os
import sys
with open(sys.argv[1]) as fin:
lines = fin.readlines()
lines.sort(key=lambda x:len(x))
for line in lines:
print line.rstrip("\n")
| true
|
6689b16caaab4cdee458b139be95eca903cbc7e7
|
Python
|
koushik1330/Emails_Classifications_using_ClassificationMachineLearingAlgorithms
|
/Emails-Classification-UsingSupervisedLeraningTechniques/4. Email Classification using Ada Boost Classifier.py
|
UTF-8
| 1,188
| 3.5625
| 4
|
[] |
no_license
|
"""
Using an Ada Boost Classifier to identify emails by their authors
authors and labels:
Sara has label 0
Chris has label 1
"""
import sys
from time import time
sys.path.append("C:\\Users\\satyam\\Desktop\\MajorProject Final\\Emails-Classification-UsingSupervisedLeraningTechniques\\")
from email_preprocess import preprocess
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
### features_train and features_test are the features for the training
### and testing datasets, respectively
### labels_train and labels_test are the corresponding item labels
features_train, features_test, labels_train, labels_test = preprocess()
# defining the classifier
clf = AdaBoostClassifier(n_estimators=100, random_state=0)
#predicting the time of train and testing
t0 = time()
clf.fit(features_train, labels_train)
print("\nTraining time:", round(time()-t0, 3), "s\n")
t1 = time()
pred = clf.predict(features_test)
print("Predicting time:", round(time()-t1, 3), "s\n")
#calculating and printing the accuracy of the algorithm
print("Accuracy of Ada Boost Classifier: ", accuracy_score(pred,labels_test))
| true
|
2412217f8dbe3c5a81b25ed7083ae24a78547b89
|
Python
|
canadaduane/sydney-m4
|
/rtaparser.py
|
UTF-8
| 1,820
| 2.828125
| 3
|
[] |
no_license
|
import csv
import datetime
rhCSV = csv.reader(open('RTAData.csv')) # read in the data
whf = open('lcb_submit2.csv','w') # create a file where the entry will be saved
wh = csv.writer(whf, lineterminator='\n');
date_format = "%Y-%m-%d %H:%M"
timeStamp = ["2010-08-03 10:28","2010-08-06 18:55","2010-08-09 16:19","2010-08-12 17:22","2010-08-16 12:13","2010-08-19 17:43","2010-08-22 10:19","2010-08-26 16:16","2010-08-29 15:04","2010-09-01 09:07","2010-09-04 09:07","2010-09-07 08:37","2010-09-10 15:46","2010-09-13 18:43","2010-09-16 07:40","2010-09-20 08:46","2010-09-24 07:25","2010-09-28 08:01","2010-10-01 13:04","2010-10-05 09:22","2010-10-08 16:43","2010-10-12 18:10","2010-10-15 14:19","2010-10-19 17:16","2010-10-23 10:28","2010-10-26 19:34","2010-10-29 11:34","2010-11-03 17:49","2010-11-07 08:01"]; # an Array with the cut-off points
forecastHorizon = [1,2,3,4,6,8,24,48,72,96]; # forecast horizon in multiples of 15 minutes
cutoff_times = set()
for t in timeStamp:
cutoff_times.add(datetime.datetime.strptime(t, date_format))
header = next(rhCSV) # extract the header first
wh.writerow([""] + header[1:])
for data in rhCSV: # loop through the each remaining line
current_date = datetime.datetime.strptime(data[0], date_format)
if current_date in cutoff_times:
# for each forecast horizon write the cut-off travel
# time as the forecast (the definition of Naive)
for i in forecastHorizon:
# calculate the prediction's datetime
nextDate = current_date + datetime.timedelta(minutes=15*i)
dateStr = datetime.datetime.strftime(nextDate, date_format)
# write the timestamp and predictions to the first column of the CSV
wh.writerow([dateStr] + data[1:])
# Done
whf.close()
| true
|
6bf32a0aca46a61ce6ad18a2cdbfc6d199710eda
|
Python
|
saratiedt/python-exercises
|
/semana 3/ImparPar.py
|
UTF-8
| 242
| 3.375
| 3
|
[] |
no_license
|
total = [9,5,6,4,8,12,11,15,0,1,3,2]
impar = []
par = []
for i in range(len(total)):
if total[i] % 2 == 0:
par.append(total[i])
else:
impar.append(total[i])
print(f'Total: {total}')
print(f'Par: {par}')
print(f'Impar: {impar}')
| true
|
a64c2f84dfa6de9294ceaf7a7d8a26e5b4ff31f6
|
Python
|
hixio-mh/PUBGMovieDelete
|
/Test Fragments/Menu.py
|
UTF-8
| 260
| 3.03125
| 3
|
[] |
no_license
|
print("Choose and option from the following:")
print("[1] Auto detect game files")
print("[2] Use the location from path.txt")
option = input()
if int(option) == 1:
print("eureka")
input("Press ENTER to terminate this program")
raise SystemExit(0)
| true
|
514ed702121bed7423f64361575a3f5385320cf9
|
Python
|
hffan/yjy015_prj
|
/element_opt/read_sta_mag_1m.py
|
UTF-8
| 5,454
| 2.59375
| 3
|
[] |
no_license
|
#--coding:utf-8--
# date: 2019-08-14
# function: read Real-time Interplanetary Magnetic Field Values sampled once per minute
import os
import sys
import time
import calendar
import datetime
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
matplotlib.rcParams['xtick.direction'] = 'in'
matplotlib.rcParams['ytick.direction'] = 'in'
def read_data(fullpath):
data={}
if not os.path.exists(fullpath):
return data
fh=open(fullpath)
for line in fh.readlines()[0:20]:
print(line.strip())
fh=open(fullpath)
MissingVal=-999.9
for line in fh.readlines()[20:]:
lineList=list(line.strip().split())
YR=int(lineList[0])
MO=int(lineList[1])
DA=int(lineList[2])
HH=int(lineList[3][0:2])
MM=int(lineList[3][2:])
Day=int(lineList[4])
Sec=int(lineList[5])
S=int(lineList[6])
# BR=float(lineList[7])if float(lineList[7])!=MissingVal else np.nan
# BT=float(lineList[8])if float(lineList[8])!=MissingVal else np.nan
# BN=float(lineList[9])if float(lineList[9])!=MissingVal else np.nan
# Btotal=float(lineList[10])if float(lineList[10])!=MissingVal else np.nan
# lat=float(lineList[11])if float(lineList[11])!=MissingVal else np.nan
# lon=float(lineList[12])if float(lineList[12])!=MissingVal else np.nan
BR=float(lineList[7])
BT=float(lineList[8])
BN=float(lineList[9])
Btotal=float(lineList[10])
lat=float(lineList[11])
lon=float(lineList[12])
timeStamp=datetime.datetime(YR,MO,DA,HH,MM)
strTimeStamp=timeStamp.strftime('%Y-%m-%d %H:%M')
format='%s'+3*'%6d'+6*'%10.2e'
# print(format%(strTimeStamp,Day,Sec,S,BR,BT,BN,Btotal,lat,lon))
# 绘图
# data[timeStamp]={
# 'S': S,
# 'BR': BR,
# 'BT': BT,
# 'BN': BN,
# 'Btotal': Btotal,
# 'Lat':lat,
# 'Lon':lon,
# 'website':'SWPC',
# 'category_abbr_en': 'SWPC_STEA_mag',}
# 入库
data[strTimeStamp]={
'S': S,
'BR': BR,
'BT': BT,
'BN': BN,
'Btotal': Btotal,
'Lat':lat,
'Lon':lon,
'website':'SWPC',
'category_abbr_en': 'SWPC_STEA_mag',}
return data
def plot_data(res):
if data=={}:
return
timeStampArr=[]
BxArr,ByArr,BzArr,BtArr,latArr,lonArr=[],[],[],[],[],[]
for key in data.keys():
timeStamp=key
BR=data[key]['BR']
Btotal=data[key]['Btotal']
BN=data[key]['BN']
Btotal=data[key]['Btotal']
timeStampArr.append(timeStamp)
BxArr.append(BR)
ByArr.append(Btotal)
BzArr.append(BN)
BtArr.append(Btotal)
font={
'family':'Times New Roman',\
'style':'normal',\
'weight':'normal',\
'color':'black',\
'size':12
}
plt.figure(figsize=(8, 6), dpi=150)
ax1 = plt.subplot(4,1,1)
plt.plot(timeStampArr, BxArr,'-.')
plt.xlim([timeStampArr[0],timeStampArr[-1]+datetime.timedelta(minutes=1)])
plt.ylim([-10,10])
plt.xlabel('UT',fontdict=font)
plt.ylabel('BR',fontdict=font)
plt.tick_params(labelsize=10)
labels = ax1.get_xticklabels() + ax1.get_yticklabels()
[label.set_fontname('Times New Roman') for label in labels]
plt.title('Real-time Interplanetary Magnetic Field Values sampled once per minute',fontdict=font)
plt.grid()
ax1 = plt.subplot(4,1,2)
plt.plot(timeStampArr, ByArr,'-.')
plt.xlim([timeStampArr[0],timeStampArr[-1]+datetime.timedelta(minutes=1)])
plt.ylim([-10,10])
plt.xlabel('UT',fontdict=font)
plt.ylabel('Btotal',fontdict=font)
plt.tick_params(labelsize=10)
labels = ax1.get_xticklabels() + ax1.get_yticklabels()
[label.set_fontname('Times New Roman') for label in labels]
plt.grid()
ax1 = plt.subplot(4,1,3)
plt.plot(timeStampArr, BzArr,'-.')
plt.xlim([timeStampArr[0],timeStampArr[-1]+datetime.timedelta(minutes=1)])
plt.ylim([-10,10])
plt.xlabel('UT',fontdict=font)
plt.ylabel('BN',fontdict=font)
plt.tick_params(labelsize=10)
labels = ax1.get_xticklabels() + ax1.get_yticklabels()
[label.set_fontname('Times New Roman') for label in labels]
plt.grid()
ax1 = plt.subplot(4,1,4)
plt.plot(timeStampArr, BtArr,'-.')
plt.xlim([timeStampArr[0],timeStampArr[-1]+datetime.timedelta(minutes=1)])
plt.ylim([0,10])
plt.xlabel('UT',fontdict=font)
plt.ylabel('Btotal',fontdict=font)
plt.tick_params(labelsize=10)
labels = ax1.get_xticklabels() + ax1.get_yticklabels()
[label.set_fontname('Times New Roman') for label in labels]
plt.grid()
plt.savefig('sta_mag.png')
plt.show()
if __name__ == '__main__':
# 1,获取文件全路径
cwd = os.getcwd()
filename='sta_mag_1m.txt'
fullpath=cwd+'/'+filename
# 2,读取数据
data=read_data(fullpath)
# 3,绘制图像
plot_data(data)
| true
|
9ab297b34eeafe35a4680a37bf1ae52dbf9d35ee
|
Python
|
TARENTOO/DUB
|
/dub/main.py
|
UTF-8
| 1,328
| 2.65625
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Módulos
import sys, os
sys.path.append(os.path.abspath(".."))
import pygame
from pygame.locals import *
from dub import images
from dub import objetos
WIDTH = 400
HEIGHT = 128
IMAGES = os.path.abspath(".") + "\imagenes"
# Constantes
# Clases
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Funciones
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
def main():
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Dub game")
background = images.load_image(IMAGES + "\mountains.png")
valdemar = objetos.Player("Valdemar")
clock = pygame.time.Clock()
while True:
time = clock.tick(60)
keys = pygame.key.get_pressed()
for eventos in pygame.event.get():
if eventos.type == QUIT:
sys.exit(0)
valdemar.gravedad(time)
valdemar.actualizar(time, keys)
screen.blit(background, (0, 0))
screen.blit(valdemar.image, valdemar.rect)
pygame.display.flip()
return 0
if __name__ == '__main__':
pygame.init()
main()
| true
|