blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
50ff4a273d3f0c6af02555598bfcf6c504af52f4
|
931221727641ed3e56e9a30b13e3c15055722a85
|
/btre/listings/migrations/0003_auto_20190409_2240.py
|
2e5fba8bea5cace42d1efa490a847837222ef1bf
|
[] |
no_license
|
gowthamseenu/ML_Shopping
|
c4ed9db5db2cf85477add6be6f16b6186e92d004
|
099f9c45df4398647610f5dbf44abce91ac8b562
|
refs/heads/master
| 2021-09-08T20:08:51.768035
| 2021-08-30T09:53:07
| 2021-08-30T09:53:07
| 239,966,290
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,479
|
py
|
# Generated by Django 2.2 on 2019-04-09 17:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('listings', '0002_auto_20190409_2235'),
]
operations = [
migrations.AlterField(
model_name='product',
name='brand',
field=models.CharField(choices=[('dell', 'dell'), ('apple', 'apple'), ('onepluse', 'onepluse'), ('hp', 'hp')], max_length=200),
),
migrations.AlterField(
model_name='product',
name='display_size',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=2),
),
migrations.AlterField(
model_name='product',
name='processor_spped',
field=models.DecimalField(blank=True, decimal_places=1, max_digits=4),
),
migrations.AlterField(
model_name='product',
name='storage',
field=models.DecimalField(blank=True, decimal_places=1, max_digits=4),
),
migrations.AlterField(
model_name='product',
name='sub_type',
field=models.CharField(choices=[('Electronic_product', 'Electronic_product'), ('accessories', 'accessories')], max_length=100),
),
migrations.AlterField(
model_name='product',
name='weight',
field=models.DecimalField(blank=True, decimal_places=1, max_digits=6),
),
]
|
[
"gowthamseenu@biztechnosys.com"
] |
gowthamseenu@biztechnosys.com
|
868fe6c11626d232cb99c5706fa1127364bc90ac
|
fdaacf88d92eda56f9a6668f3604ebf82e2fcc4c
|
/web_site/views.py
|
d1fb448f6c5cf35c2cda0a925c6d1a00237720c7
|
[] |
no_license
|
PavelShumbasov/sem_work1
|
070da6560a188d092d9076523927cc4879fc267c
|
59255ef0192d63aae2d828810305e1d4276abd14
|
refs/heads/master
| 2023-09-04T15:14:51.827525
| 2021-11-06T09:31:38
| 2021-11-06T09:31:38
| 424,680,639
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,997
|
py
|
from flask import Blueprint, render_template, request, redirect, url_for, json
from flask_login import login_required, current_user
from . import db
from .menu import menu
from .models import Board, Task
views = Blueprint("views", __name__)
@views.route("/")
def home():
boards = Board.query.all()
return render_template("home.html", boards=boards, menu=menu)
@views.route("/add_board", methods=['GET', 'POST'])
@login_required
def add_board():
if request.method == 'POST':
name = request.form.get("name")
board_private = request.form.get("is_private") == ""
new_board = Board(name=name, author=current_user.id, is_private=board_private)
db.session.add(new_board)
db.session.commit()
return redirect(url_for("views.home"))
return render_template("add_board.html", menu=menu)
@views.route("/board/<id>", methods=['GET', 'POST'])
@login_required
def view_board(id):
board = Board.query.filter_by(id=id).first()
if board:
can_delete = board.author == current_user.id
if not board or (board.author != current_user.id and board.is_private):
return render_template("no_board.html")
if request.method == "POST":
if current_user.id == board.author:
text = request.form.get("text")
new_task = Task(text=text, author=current_user.id, board_id=id)
db.session.add(new_task)
db.session.commit()
tasks = Task.query.filter_by(board_id=id)
return render_template("view_board.html", board=board, tasks=tasks, can_delete=can_delete, menu=menu)
@views.route("/my_boards", methods=['GET'])
@login_required
def my_boards():
boards = Board.query.filter_by(author=current_user.id)
return render_template("my_boards.html", boards=boards, menu=menu)
@views.route("/delete/board/<id>", methods=['GET'])
@login_required
def delete_board(id):
board = Board.query.filter_by(id=id).first()
if not board or board.author != current_user.id:
return render_template("no_board.html", menu=menu)
db.session.delete(board)
db.session.commit()
return redirect(url_for("views.my_boards"))
@views.route("/delete/task/<id>", methods=['GET'])
@login_required
def delete_task(id):
task = Task.query.filter_by(id=id).first()
if not task or task.author != current_user.id:
return render_template("no_board.html", menu=menu)
db.session.delete(task)
db.session.commit()
return redirect(url_for("views.view_board", id=task.board_id))
@views.route("/find_board", methods=['GET', 'POST'])
def find_board():
name = request.form['name']
query = f"SELECT * FROM board WHERE name = '{name}';"
result = list(db.engine.execute(query))
if result:
path = "views.view_board"
answer = {"result": '<a href=' + f'{url_for(path, id=result[0][0])}' + '> Найденная доска<a>'}
else:
answer = {"result": "Такой доски нет"}
return json.dumps(answer)
|
[
"pavelshumbasov2335@gmail.com"
] |
pavelshumbasov2335@gmail.com
|
2c72fc48e73c2fcf5db27a84c63d3341b2696983
|
ed7fde0483a4836bfc9ef3ab887cf1220559bfc7
|
/masters_scripts/EC17_get_allele_dist_1.py
|
80bb3023acd365ccf7683c6816f51994e190d9c1
|
[] |
no_license
|
cizydorczyk/python_scripts
|
326b3142a3c6ce850237e8b13e229854699c6359
|
b914dcff60727bbfaa2b32e1a634ca9ca354eeeb
|
refs/heads/master
| 2023-05-11T14:29:44.548144
| 2023-05-05T19:39:28
| 2023-05-05T19:39:28
| 116,588,201
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,850
|
py
|
from sys import argv
import numpy as np
import itertools
script, inputallelicdepth, outputfile = argv
print "Working on file: " + inputallelicdepth.split('/')[-1]
with open(inputallelicdepth, 'r') as infile1:
lines = infile1.read().splitlines()
del lines[0]
proportions_breakdown = {1:[], 2:[], 3:[], 4:[]}
proportions = []
for i in lines:
line = i.strip().split('\t')
ad = [float(j) for j in line[-1].split(',')]
adsum = sum(ad)
numbases = len(ad[0:-1])
if adsum != 0.0:
for k in ad[0:-1]:
proportions_breakdown[numbases].append(round((k/adsum),2))
proportions.append(round((k/adsum),2))
elif adsum == 0.0:
# proportions[numbases].append(0.00)
continue
# Count total proportions:
proportions_dict = {}
for i in np.arange(0,1.01, 0.01):
proportions_dict[str(i)] = proportions.count(i)
# Count proportions with 2, 3, and 4 bases separately:
proportions_2_dict = {}
proportions_3_dict = {}
proportions_4_dict = {}
for i in np.arange(0,1.01, 0.01):
proportions_2_dict[str(i)] = proportions_breakdown[2].count(i)
for i in np.arange(0,1.01, 0.01):
proportions_3_dict[str(i)] = proportions_breakdown[3].count(i)
for i in np.arange(0,1.01, 0.01):
proportions_4_dict[str(i)] = proportions_breakdown[4].count(i)
with open(outputfile, 'w') as outfile1:
outfile1.write('proportion\ttotal_count\tcount_2\tcount_3\tcount_4\n')
for keyt, key2, key3, key4 in itertools.izip(sorted(proportions_dict.keys()), sorted(proportions_2_dict.keys()), sorted(proportions_3_dict.keys()), sorted(proportions_4_dict.keys())):
outfile1.write(str(keyt) + '\t' + str(proportions_dict[keyt]) + '\t' + str(proportions_2_dict[key2]) + '\t' + str(proportions_3_dict[key3]) + '\t' + str(proportions_4_dict[key4]) + '\n')
# for key, value in sorted(proportions_dict.iteritems()):
# outfile1.write(str(key) + '\t' + str(value) + '\n')
|
[
"conradizydorczyk@gmail.com"
] |
conradizydorczyk@gmail.com
|
87015919007428f2852be00dba827a3230d85010
|
3abb60c61f1e5aba68620d4c5f9e81700100bbf5
|
/model/SVM/test.py
|
d63d07628926294ee8999f8053402735843f191e
|
[
"Apache-2.0"
] |
permissive
|
chan8616/PoAI
|
249ff39e49b781c9142ea5da5265dd0479c0a7b6
|
9bc4b69f434c8be4215f483cefbf2bd171803219
|
refs/heads/master
| 2023-02-04T17:00:42.750265
| 2020-12-16T08:25:06
| 2020-12-16T08:25:06
| 141,091,264
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,882
|
py
|
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
import pandas as pd
import pickle
import numpy as np
import matplotlib.pyplot as plt
import time
import os
def test(config):
data_path = config.data_path
save_directory = config.save_directory
save_figure = config.save_figure
pretrained_file_path = config.pretrained_file_path
data = pd.read_csv(data_path)
x_columns = config.x_columns
x_columns = x_columns.split(',')
X = data[x_columns]
y_column = config.y_column
Y = data[y_column]
X_test = X
Y_test = Y
model = pickle.load(open(pretrained_file_path, 'rb'))
print("load pretrained model")
y_test_predict = model.predict(X_test)
acc = accuracy_score(Y_test, y_test_predict)
print("The model performance for testing set")
print("--------------------------------------")
print('accuracy score is {}'.format(acc))
if save_figure is True:
X_test_a = np.array(X_test)
h = .02 # step size in the mesh
x_min, x_max = X_test_a[:, 0].min() - 1, X_test_a[:, 0].max() + 1
y_min, y_max = X_test_a[:, 1].min() - 1, X_test_a[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X_test_a[:, 0], X_test_a[:, 1], c=Y_test, cmap=plt.cm.Paired, edgecolors='k')
plt.title('classification result')
plt.axis('tight')
time_stamp = time.strftime("%Y%m%d_%H%M%S", time.localtime((time.time())))[2:]
file_name = 'svm_model_' + time_stamp + '.png'
plt.savefig(os.path.join(save_directory, file_name))
|
[
"dudgus1727@postech.ac.kr"
] |
dudgus1727@postech.ac.kr
|
a5252f74fdb425b662bfc873101bded2e39d470d
|
52e7007ed2b9a9525cfb0c483065bffd6ecbcded
|
/基本操作.py
|
ec4c8ab1568c364a568e001f46a5f8a5c01a427a
|
[] |
no_license
|
hnzhangbinghui/selenium
|
2801618b60c2b7622fbd80945809ccfe5b50309e
|
15e2dbde337abf856038df72263ae1245293a36b
|
refs/heads/master
| 2022-11-14T08:56:16.500728
| 2020-07-12T10:15:48
| 2020-07-12T10:15:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,701
|
py
|
a="asdfggjytehtrwgrevreqf"
print(len(a))
b='123456'
#连接字符串,用‘+’号;
print(a+b)
#python不允许在+表达式中出现其他类型;
#字符串转换
age=33
print(int(age))
print(len(str(age)))
print(float(age))
#字符串和列表的转换
#list()方法用于将元祖或字符串转换为列表(重点)
string='Hello,world!!'
l=list(string)
print(l)
#元祖转化为列表
uname=('laozhang','zhangbinghui','binghui')
listu=list(uname)
print("列表元素:",listu)
#join()方法用于将序列中的元素以指定的字符串连接生产一个新的字符串;
s='-'
ss=s.join(listu)
print(ss)
#replace()方法把字符串中的old字符串,替换成new字符串,如果指定第三个参max,则替换不超过max次
#str.replace(old,new[,max])
str="this is string example... owe,this is really string!!"
print(str.replace('is','was'));
print(str.replace('is','was',3));
name="zhangbinghui"
print(name[0])
print(name[-1])
#不含上边界(重点)
print(name[1:5])
print(len(name))
print(name[1:10:2])
#s[a:b:-2] 步长为负数,两个边界意义反转了,表示从b+1到a,步长-2
print(name[10:1:-2])
#字符串内建函数
print(name.capitalize())#第一个字符大写
print(name.title())
print(name.upper())
print(name.lower())
#center() 方法返回一个指定的宽度 width 居中的字符串,fillchar 为填充的字符,默认为空格。
#原字符居中,空格填充至width长度
#返回一个指定的宽度 width 居中的字符串,如果 width 小于字符串宽度直接返回字符串,否则使用 fillchar 去填充。
print(name.center(30,'*'))
#Python count() 方法用于统计字符串里某个字符出现的次数。可选参数为在字符串搜索的开始与结束位置。
#str.count(sub, start= 0,end=len(string))
print(name.count('i',0,12))
print(name.count('i'))
#decode() 方法以指定的编码格式解码 bytes 对象。默认编码为 'utf-8'。
bianma="张冰辉"
name1=bianma.encode('utf-8')
print(name1)
print(bianma.encode('GBK','strict'))
"""str = "菜鸟教程";
str_utf8 = str.encode("UTF-8")
str_gbk = str.encode("GBK")
print(str)
print("UTF-8 编码:", str_utf8)
print("GBK 编码:", str_gbk)
print("UTF-8 解码:", str_utf8.decode('UTF-8','strict'))
print("GBK 解码:", str_gbk.decode('GBK','strict'))"""
"""endswith() 方法用于判断字符串是否以指定后缀结尾,
如果以指定后缀结尾返回True,否则返回False。可选参数"start"与"end"为检索字符串的开始与结束位置"""
#str.endswith(suffix[, start[, end]])
print(name.endswith('hui'))
print(name.endswith('zhagn'))
print(name.endswith('hui',3,5))
print(name.endswith('hui',0,12))
print("aaaaaaaaaaaaaa")
print(name.startswith('zhang'))
print('\n')
#expandtabs() 方法把字符串中的 tab 符号('\t')转为空格,tab 符号('\t')默认的空格数是 8。
name1="zhang\tbing\thui"
print(name)
print(name1.expandtabs())
print(name1.expandtabs(12))
"""find() 方法检测字符串中是否包含子字符串 str ,如果指定 beg(开始)
和 end(结束) 范围,则检查是否包含在指定范围内,如果指定范围内如果包含指定索引值,
返回的是索引值在字符串中的起始位置。如果不包含索引值,返回-1。"""
print(name.find('bing'))
print(name.find('bing',0,len(name)))
print(name.find('zhagn'))
"""index() 方法检测字符串中是否包含子字符串 str ,如果指定 beg(开始) 和 end(结束) 范围,则检查是否包含在指定范围内,该方法与 python find()方法一样,只不过如果str不在 string中会报一个异常。"""
"""isalnum() 方法检测字符串是否由字母和数字组成"""
"""如果 string 至少有一个字符并且所有字符都是字母或数字则返回 True,否则返回 False"""
print(name.isalnum())
print(bianma.isalnum())
bm="www.baidu.com"
print(bm.isalnum())
print('\n')
"""
Python isalpha() 方法检测字符串是否只由字母组成。
如果字符串至少有一个字符并且所有字符都是字母则返回 True,否则返回 False
"""
daima="abc123"
print(daima.isalnum())
print(daima.isalpha())
print('\n')
"""
ljust() 方法返回一个原字符串左对齐,
并使用空格填充至指定长度的新字符串。
如果指定的长度小于原字符串的长度则返回原字符串。
返回一个原字符串左对齐,并使用空格填充至指定长度的新字符串。
如果指定的长度小于原字符串的长度则返回原字符串。
"""
print(name.ljust(30,'.'))
print(name.ljust(30,'*'))
print(name.center(30,'*'))
print('\n')
"""
lstrip([chars]) 方法用于截掉字符串左边的空格或指定字符。
chars --指定截取的字符。
"""
str1=" zhangbinghui"
print(len(str1))
print(str1.lstrip())
print(len(str1.lstrip()))
str2='22222222zhangbinghui'
print(str2.lstrip('2'))
"""
partition() 方法用来根据指定的分隔符将字符串进行分割。
如果字符串包含指定的分隔符,则返回一个3元的元组,
第一个为分隔符左边的子串,
第二个为分隔符本身,第三个为分隔符右边的子串
"""
a2='www.baidu.com'
print(a2.partition('.'))
"""
Python split() 通过指定分隔符对字符串进行切片,
如果参数 num 有指定值,则分隔 num+1 个子字符串
str.split(str="", num=string.count(str)).
num -- 分割次数。默认为 -1, 即分隔所有。
"""
print(a2.split('.'))
a3='q.w.e.r.t.y.u.i.4.5.6'
a4=a3.split('.')
print(a4)
print(list(a4))
a5='qwtaqtadtlllt'
print(a5.split('t'))
print('\n')
"""
Python splitlines() 按照行('\r', '\r\n', \n')分隔,
返回一个包含各行作为元素的列表,如果参数 keepends 为 False,
不包含换行符,如果为 True,则保留换行符。
str.splitlines([keepends])
keepends -- 在输出结果里是否去掉换行符('\r', '\r\n', \n'),
默认为 False,不包含换行符,如果为 True,则保留换行符。
"""
atr='ab c\n\nde fg\rkl\r\n'
print(atr.splitlines())
print(atr.splitlines(True))
"""
Python strip() 方法用于移除字符串头尾指定的字符(默认为空格)或字符序列。
注意:该方法只能删除开头或是结尾的字符,不能删除中间部分的字符。
str.strip([chars]);
"""
str3='*****zhangbing*hui******'
print(str3.strip('*'))
#swapcase() 方法用于对字符串的大小写字母进行转换。
str5='ZHANGbingHUI'
print(str5.swapcase())
"""
Python zfill() 方法返回指定长度的字符串,原字符串右对齐,前面填充0。
width -- 指定字符串的长度。原字符串右对齐,前面填充0。
"""
print(name.zfill(30))
print(name.zfill(20))
print(name,'%o')
print(name,'%s')
|
[
"hnzhangbinghui@163.com"
] |
hnzhangbinghui@163.com
|
45bd5115c7a3405823961182633a568318a1d2ef
|
7234e6c72eb3f09c4a66dbe91f00fdf7742f010f
|
/algo/arrays/binarysearch/shiftedBinarySearch.py
|
fc901758206f1662bac912102f0b1b7740f4186f
|
[] |
no_license
|
srinathalla/python
|
718ac603473e7bed060ba66aa3d39a90cf7ef69d
|
b6c546070b1738350303df3939888d1b0e90e89b
|
refs/heads/master
| 2021-06-13T06:11:42.653311
| 2021-02-19T06:01:41
| 2021-02-19T06:01:41
| 150,374,828
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 687
|
py
|
#
# T.C : O(logn) S.C : O(1)
# #
def shiftedBinarySearch(array, target):
l = 0
r = len(array)-1
while l < r:
m = (l + r)//2
if array[m] == target:
return m
elif array[m] < array[r]:
if array[m] < target and target <= array[r]:
l = m + 1
else:
r = m - 1
elif array[m] > array[r]:
if array[l] <= target and target < array[m]:
r = m - 1
else:
l = m + 1
return l if array[l] == target else -1
print(shiftedBinarySearch([5, 23, 111, 1], 111))
print(shiftedBinarySearch([45, 61, 71, 72, 73, 0, 1, 21, 33, 45], 33))
|
[
"srinathb10j.ik@gmail.com"
] |
srinathb10j.ik@gmail.com
|
f0ad64d5af44dc38b8d2591e88fadc4ec83a03c5
|
37bbd8f1d26a1dd70bc13f597f0306d98d8db7ed
|
/cl_user/migrations/0001_initial.py
|
3ab13420ead76e479b9a4ae7bc8cde2448e6711d
|
[] |
no_license
|
GitHubQinDong/clwh
|
601f4461e70c24f1e76c40ab11661562064db8a9
|
41c373627831dba33afd47dcc691b802258ca5b6
|
refs/heads/master
| 2021-01-24T20:41:21.056243
| 2018-03-02T06:40:08
| 2018-03-02T06:40:08
| 123,257,983
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 889
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-01-20 06:55
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uname', models.CharField(max_length=20)),
('upwd', models.CharField(max_length=40)),
('uemil', models.EmailField(max_length=30)),
('urelname', models.CharField(default='', max_length=20)),
('uadr', models.CharField(default='', max_length=100)),
('uphone', models.CharField(default='', max_length=11)),
],
),
]
|
[
"451880559@qq.com"
] |
451880559@qq.com
|
70a40cde8c7c2fb6a06e19d3642aa3632d2cbae5
|
0abe9956d5ff6eae5026121bdf9a77d917de674a
|
/createTable.py
|
83599f118c94a498ff74c3641023b4eecf083566
|
[] |
no_license
|
Not2Day2Die/PySnow
|
421e5bddfaecaaecfdd6aa339ead55820b7c4d5b
|
059ac3c1ebb16952dc11f6f24d01246df7ed1a62
|
refs/heads/master
| 2020-04-19T05:46:39.700376
| 2019-01-29T01:57:57
| 2019-01-29T01:57:57
| 167,998,135
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 550
|
py
|
import pymssql
conn = pymssql.connect(host='60.251.238.43',
user='sa',
password='8179311!QAZ',
database='db8780',
charset='utf8',
port=8433)
#查看连接是否成功
cursor = conn.cursor()
'CREATE TABLE Customer(First_Name char(50),Last_Name char(50),Address char(50),City char(50),Country char(25),Birth_Date datetime);'
sql = ''
cursor.execute(sql)
#用一个rs变量获取数据
rs = cursor.fetchall()
print(rs)
|
[
"noreply@github.com"
] |
Not2Day2Die.noreply@github.com
|
1d98d5123707340b7b8490ded539ce18888d5e68
|
cc6476571b5dc2b3ed61d9ae2833ddbe122edbcb
|
/Ingestion.py
|
520ed96425d84b6c93c5a2951f84ef58e94a2339
|
[] |
no_license
|
jackhulbertpdx/GoingPlacesWithPraw
|
905660f4744ead56e4c53d6b63f0ba232eb74434
|
8c95b3efa382d126b1571f74918e96ae355cf033
|
refs/heads/main
| 2023-04-03T19:40:52.004660
| 2021-04-07T23:31:24
| 2021-04-07T23:31:24
| 348,434,020
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,588
|
py
|
#############################################################################################
# Ingestion
# by Jack Hulbert
# April 2020
# https://github.com/jackhulbertpdx/GoingPlacesWithPraw
# -----------------------------------------------------------------------------------------
# Ingests data from the Reddit PRAW Wrapper from user-defined subreddit feeds, filters, and
# loads data into a PostGreSQL table defined in Create Database.py
# In order to use this script you must first acquire your user credentials and create an app
# using a Reddit developer account.
#############################################################################################
import csv
import io
from io import StringIO
import psycopg2
import glob
import os
import pandas as pd
import numpy as np
from pandas import DataFrame
import datetime as dt
from datetime import datetime
import praw
import sys
from dateutil import tz
import time
# This script extracts data from the Reddit using the PRAW wrapper
# from a list of Subreddits and appends them into a csv object and loads into a PostgreSQL table
def get_reddit_data():
#Define Output Directory for csv Files
output_directory = "/Users/Mydirectory/"
#Datetime value that will be appended to csv file name
today = dt.datetime.now()
#Create container for PRAW data and intercept fields from the Subreddit class
list_of_items = []
fields = ('id','title', 'url','selftext','name', 'created_utc', 'num_comments','permalink')
#Define list of Subreddits to query using PRAW
subs = ['Toyota','ToyotaTundra','ToyotaTacoma','Prius','4Runner','ToyotaHighlander','ToyotaSupra','cars','ToyotaPickup','JDM']
#Authenticate PRAW with Client Secret, User Agent, and ID
r = praw.Reddit(client_id='id',
client_secret='secret',
user_agent='agent')
# Function that initiates a call to each subreddit in the defined list
# and appends the data to a dict and dumps the csv file into our directory.
for i in subs:
for submission in r.subreddit(i).new(limit=None):
to_dict = vars(submission)
sub_dict = {field:to_dict[field] for field in fields}
list_of_items.append(sub_dict)
data=DataFrame(list_of_items)
data[['id','title', 'url','selftext','name', 'created_utc', 'num_comments','permalink']]= data[['id','title', 'url','selftext','name', 'created_utc', 'num_comments','permalink']].astype(str)
#Convert UTC to Datetime
data['created_utc']=(pd.to_datetime(data['created_utc'],unit='s'))
#Write Output File to directory
data.to_csv(str(output_directory)+'reddit_data'+str(today)+'.csv', index = False, doublequote=True)
####################################################
# Initiate PostGreSQL
conn = psycopg2.connect("dbname=db user=user password=pw port=port")
cur = conn.cursor()
# Grab most recent file written to copy into PG table
list_of_files = glob.glob('directory/*')
latest_file = max(list_of_files, key=os.path.getctime)
print(latest_file)
with open(latest_file) as f:
cur.copy_expert('COPY submissions(id, title,url,selftext,name,created_utc,num_comments,permalink) FROM STDIN WITH HEADER CSV', f)
# Make the changes to the database persistent
conn.commit()
cur.close()
conn.close()
get_reddit_data()
|
[
"noreply@github.com"
] |
jackhulbertpdx.noreply@github.com
|
87971f05c8467924589ee22ef9e25a88bcc0bd19
|
bac9c5f0e980c967189b01f7a407b6d64b29ffdb
|
/chapter01-/01-买苹果买香蕉的支持度和置信度.py
|
021d3c8e85e5b48bd406dae694131541b573f838
|
[] |
no_license
|
appbanana/DataMining
|
8047e33684e1f5533c05935b6cc532d31d0da046
|
b815fbf29fb1fbb7acc7ceda5aced616bfd04db6
|
refs/heads/master
| 2020-04-07T15:08:51.989381
| 2018-12-01T09:03:38
| 2018-12-01T09:03:38
| 158,474,127
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 880
|
py
|
import numpy as np
"""买苹果 --------> 买香蕉"""
dataset_filename = "../data/affinity_dataset.txt"
data = np.loadtxt(dataset_filename)
# 文件affinity_dataset.txt是生成的数据,得我们来指定列
# 购买苹果的数量
num_apple_buy = 0
# 符合既买苹果又买香蕉的
rule_valid = 0
# 买苹果不买香蕉的
rule_invalid = 0
for sample in data:
if sample[3] == 1:
num_apple_buy += 1
if sample[4] == 1:
rule_valid += 1
else:
rule_invalid += 1
print("买苹果的有{0}人".format(num_apple_buy))
print("买苹果的又买香蕉有{0}人".format(rule_valid))
print("买苹果的不买香蕉有{0}人".format(rule_invalid))
print("买苹果又买香蕉的支持度{0}".format(rule_invalid / (rule_valid + rule_invalid)))
print("买苹果又买香蕉的置信度为{0}".format(rule_valid / num_apple_buy))
|
[
"1243684438@qq.com"
] |
1243684438@qq.com
|
75625cd03c5efadbd61429c15127863b35d48b1a
|
e030b7fc33326c6b885255fd08643413ab871797
|
/pass_through_controllers/examples/script/cartesian_trajectory_action_client.py
|
f4bdf836ba6cc305c64c1509b44e866182a176e2
|
[
"BSD-3-Clause"
] |
permissive
|
gavanderhoorn/cartesian_ros_control
|
1963bf10b32fb67d31cf0e27d4e4c68452fe4bd2
|
2d6262c59a725d6030bbf7bab43fe6aa2915fc5a
|
refs/heads/master
| 2023-04-19T19:11:51.796707
| 2021-04-06T14:34:05
| 2021-04-06T14:34:05
| 355,232,781
| 2
| 0
|
NOASSERTION
| 2021-04-06T15:08:27
| 2021-04-06T15:08:26
| null |
UTF-8
|
Python
| false
| false
| 3,186
|
py
|
#!/usr/bin/env python
"""
Simple action client for testing Cartesian-based PassThroughControllers
Use this to fire-off a quick random Cartesian trajectory goal for testing.
The trajectory will last 10 seconds.
"""
from __future__ import print_function
import rospy
import actionlib
import signal
import sys
import os
import numpy as np
from cartesian_control_msgs.msg import FollowCartesianTrajectoryAction, FollowCartesianTrajectoryGoal, CartesianTrajectoryPoint
from urdf_parser_py.urdf import URDF
from kdl_parser_py.urdf import treeFromUrdfModel
import PyKDL
class Client(object):
def __init__(self):
self.client = actionlib.SimpleActionClient(
'/hw_interface/forward_cartesian_trajectories/follow_cartesian_trajectory',
FollowCartesianTrajectoryAction)
self.client.wait_for_server()
# Suppress spam output of urdf parsing.
# urdf_parser_py is unhappy with various visual tags in the robot_description.
tmp = sys.stderr
sys.stderr = open(os.devnull, 'w')
robot = URDF.from_parameter_server()
sys.stderr = tmp
_, tree = treeFromUrdfModel(robot)
self.fk_solver = PyKDL.ChainFkSolverPos_recursive(tree.getChain('base_link', 'tool0'))
def test(self):
""" Follow two-point, random Cartesian trajectory
This samples uniformly in [-pi, +pi] for each joint to compute two
random poses within the robots reach. It then traverses these points
within 10 seconds.
"""
def random_point():
p_kdl = PyKDL.Frame()
joints = PyKDL.JntArray(6)
for i in range(6):
joints[i] = (np.random.random_sample() * 2 - 1) * np.pi
self.fk_solver.JntToCart(joints, p_kdl)
p = CartesianTrajectoryPoint()
p.pose.position.x = p_kdl.p[0]
p.pose.position.y = p_kdl.p[1]
p.pose.position.z = p_kdl.p[2]
q = PyKDL.Rotation.GetQuaternion(p_kdl.M)
p.pose.orientation.x = q[0]
p.pose.orientation.y = q[1]
p.pose.orientation.z = q[2]
p.pose.orientation.w = q[3]
return p
# Random 2-point trajectory
duration = 10
p1 = random_point()
p2 = random_point()
p1.time_from_start = rospy.Duration(0.5 * duration)
p2.time_from_start = rospy.Duration(duration)
goal = FollowCartesianTrajectoryGoal()
goal.trajectory.points.append(p1)
goal.trajectory.points.append(p2)
self.client.send_goal(goal)
self.client.wait_for_result()
return self.client.get_result()
def clean_shutdown(self, msg=None):
""" Cancel goal on Ctrl-C """
self.client.cancel_goal()
if msg is not None:
print(msg)
sys.exit(0)
if __name__ == '__main__':
try:
rospy.init_node('action_test_client')
client = Client()
signal.signal(signal.SIGINT, lambda sig, frame: client.clean_shutdown("\nGoal canceled."))
result = client.test()
print("Result: {}".format(result))
except rospy.ROSInterruptException:
pass
|
[
"scherzin@fzi.de"
] |
scherzin@fzi.de
|
1340c41b1786d13b22ac88a7c234937f8a15b359
|
667c8a6baefcaa2ff1d8662f63b4891aca98c09e
|
/KUing/asgi.py
|
553f9077d2c7889e3f05d256ccc355d9d0da5ea8
|
[] |
no_license
|
JeonJaewon/KUing
|
4247a9a2f6bda3d903cab455dd6e0b75259b0a9f
|
4585400660524df80272fdf43d883070ae500c12
|
refs/heads/master
| 2023-04-19T16:00:08.994498
| 2021-05-06T09:23:29
| 2021-05-06T09:23:29
| 295,392,362
| 0
| 1
| null | 2021-05-06T09:23:29
| 2020-09-14T11:18:56
|
HTML
|
UTF-8
|
Python
| false
| false
| 387
|
py
|
"""
ASGI config for KUing project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'KUing.settings')
application = get_asgi_application()
|
[
"wksk04515@naver.com"
] |
wksk04515@naver.com
|
793fabc710ab61e60bc4ad701ef6d70a64ebffcc
|
5d0f91e3a4c75375a2ba9b12cf3cbd4350c2ccdf
|
/geopdf/__init__.py
|
11df3297614cf7a212aab066ac7d3ed89a52d353
|
[
"MIT"
] |
permissive
|
garnertb/geopdf
|
8fac6419e62db9d880d48bb4b202cfbf11729629
|
175073cb44a308513bdb6db32092dd806029afc0
|
refs/heads/master
| 2021-01-10T18:50:22.802931
| 2015-06-09T13:53:43
| 2015-06-09T13:53:43
| 29,563,939
| 1
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,121
|
py
|
# -*- coding: utf-8 -*-
"""Adds GeoPDF functionality to ReportLab"""
from reportlab.lib.colors import black
from reportlab.pdfbase.pdfdoc import PDFArray, PDFDictionary, PDFName, PDFString
from reportlab.pdfbase import pdfdoc
from reportlab.pdfgen import canvas
class GeoPDFBase(object, PDFDictionary):
"""
Base class for GeoPDF dicts.
"""
def __init__(self, dict=None):
"""dict should be namestring to value eg "a": 122 NOT pdfname to value NOT "/a":122"""
if dict is None:
self.dict = {}
else:
self.dict = dict.copy()
self.set_defaults()
def set_defaults(self):
"""
A hook for creating default values.
"""
return
def is_valid(self):
"""
Test the validity of the dict.
"""
return True
class Projection(GeoPDFBase):
"""
A Projection dict.
"""
def set_defaults(self):
self.dict.setdefault('ProjectionType', PDFString('GEOGRAPHIC'))
self.dict.setdefault('Type', PDFName('Projection'))
class LGIDict(GeoPDFBase):
"""
The LGI dict.
"""
def set_defaults(self):
self.dict.setdefault('Type', PDFString('LGIDict'))
self.dict.setdefault('Version', PDFString('2.1'))
self.dict.setdefault('Projection', Projection({'Datum': PDFString('WE')}))
def is_valid(self):
if not any(map(lambda key: key in self.dict, 'Registration CTM'.split())):
return False
for key, value in self.dict.items():
if hasattr(value, 'is_valid') and getattr(value, 'is_valid')() is False:
return False
return True
class GeoCanvas(canvas.Canvas, object):
LGIDict = PDFArray([])
def _startPage(self):
# now get ready for the next one
super(GeoCanvas, self)._startPage()
self.LGIDict = PDFArray([])
def showPage(self):
"""Close the current page and possibly start on a new page."""
# ensure a space at the end of the stream - Acrobat does
# not mind, but Ghostscript dislikes 'Qendstream' even if
# the length marker finishes after 'Q'
pageWidth = self._pagesize[0]
pageHeight = self._pagesize[1]
cM = self._cropMarks
code = self._code
if cM:
bw = max(0, getattr(cM, 'borderWidth', 36))
if bw:
markLast = getattr(cM, 'markLast', 1)
ml = min(bw, max(0, getattr(cM, 'markLength', 18)))
mw = getattr(cM, 'markWidth', 0.5)
mc = getattr(cM, 'markColor', black)
mg = 2 * bw - ml
cx0 = len(code)
if ml and mc:
self.saveState()
self.setStrokeColor(mc)
self.setLineWidth(mw)
self.lines([
(bw, 0, bw, ml),
(pageWidth + bw, 0, pageWidth + bw, ml),
(bw, pageHeight + mg, bw, pageHeight + 2 * bw),
(pageWidth + bw, pageHeight + mg, pageWidth + bw, pageHeight + 2 * bw),
(0, bw, ml, bw),
(pageWidth + mg, bw, pageWidth + 2 * bw, bw),
(0, pageHeight + bw, ml, pageHeight + bw),
(pageWidth + mg, pageHeight + bw, pageWidth + 2 * bw, pageHeight + bw)
])
self.restoreState()
if markLast:
# if the marks are to be drawn after the content
# save the code we just drew for later use
L = code[cx0:]
del code[cx0:]
cx0 = len(code)
bleedW = max(0, getattr(cM, 'bleedWidth', 0))
self.saveState()
self.translate(bw - bleedW, bw - bleedW)
if bleedW:
# scale everything
self.scale(1 + (2.0 * bleedW) / pageWidth, 1 + (2.0 * bleedW) / pageHeight)
# move our translation/expansion code to the beginning
C = code[cx0:]
del code[cx0:]
code[0:0] = C
self.restoreState()
if markLast:
code.extend(L)
pageWidth = 2 * bw + pageWidth
pageHeight = 2 * bw + pageHeight
code.append(' ')
page = pdfdoc.PDFPage()
page.__NoDefault__ = """Parent
MediaBox Resources Contents CropBox Rotate Thumb Annots B Dur Hid Trans AA
PieceInfo LastModified SeparationInfo ArtBox TrimBox BleedBox ID PZ
Trans LGIDict""".split()
page.pagewidth = pageWidth
page.pageheight = pageHeight
if getattr(self, 'LGIDict', None):
if len(self.LGIDict.sequence) == 1:
page.LGIDict = self.LGIDict.sequence[0]
else:
page.LGIDict = self.LGIDict
page.Rotate = self._pageRotation
page.hasImages = self._currentPageHasImages
page.setPageTransition(self._pageTransition)
page.setCompression(self._pageCompression)
if self._pageDuration is not None:
page.Dur = self._pageDuration
strm = self._psCommandsBeforePage + [self._preamble] + code + self._psCommandsAfterPage
page.setStream(strm)
self._setColorSpace(page)
self._setExtGState(page)
self._setXObjects(page)
self._setShadingUsed(page)
self._setAnnotations(page)
self._doc.addPage(page)
if self._onPage:
self._onPage(self._pageNumber)
self._startPage()
def addGeo(self, **kwargs):
"""
Adds the LGIDict to the document.
:param kwargs: Keyword arguments that are used to update the LGI Dictionary.
"""
lgi = LGIDict()
lgi.dict.update(kwargs)
if not lgi.is_valid():
return
pdf_obj = lgi.format(self._doc)
self.LGIDict.sequence.append(pdf_obj)
return pdf_obj
|
[
"garnertb@gmail.com"
] |
garnertb@gmail.com
|
22751858f26587a040475c2a453193329cb78837
|
31a7204221570f5157a38b48913f131e310812c6
|
/Fame/wsgi.py
|
82d89c03f27bc81006bf904a01ddc2a5bb8aaaa8
|
[] |
no_license
|
judeyouzhi/livefit
|
b7573029f644a71b572e1ad383a3ffc5d46b6ea7
|
a13d5d7dfd510e4384d4e2bb1086b5cb370839b9
|
refs/heads/master
| 2021-01-20T11:13:30.849633
| 2016-12-26T06:39:04
| 2016-12-26T06:39:04
| 77,363,530
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 385
|
py
|
"""
WSGI config for Fame project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Fame.settings")
application = get_wsgi_application()
|
[
"judeyou@judeyou-macbookpro.roam.corp.google.com"
] |
judeyou@judeyou-macbookpro.roam.corp.google.com
|
0ce838a536ab26cb226a156c28df2b839d116bf8
|
6d6fb9b30dc5856bbfec6b7a8877494709ed0d5d
|
/cap8/ex_173.py
|
ef54a08365f2ee2634053760df102484e500222a
|
[] |
no_license
|
ignaziocapuano/workbook_ex
|
2b5b644d5d4d1940c871f9083764413671482213
|
ff6d2625e46a2f17af804105f4e88cf8772a39a3
|
refs/heads/main
| 2023-01-04T20:28:08.956999
| 2020-11-02T07:48:18
| 2020-11-02T07:48:18
| 304,552,342
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 429
|
py
|
#Total the Values
"""
Leggi da input una serie di valori(con terminatore il carattere nullo).
Alla fine stampa la somma. (0.0 se il primo carattere inserito dall'utente è vuoto)
"""
def readAndSum():
n=input("Inserisci numero(vuoto per terminare:")
if n=="":
return 0.0
else:
return float(n)+readAndSum()
def main():
total=readAndSum()
print(total)
if __name__ == '__main__':
main()
|
[
"72966219+ignaziocapuano@users.noreply.github.com"
] |
72966219+ignaziocapuano@users.noreply.github.com
|
34cd64af2f789023c8c435d22d7308d5972f46d4
|
9ff5aa15d0fffd991c6640e7e3f2c443424c3a3a
|
/shunxu_search.py
|
f23daa56b3bcfdecbb2e89f691ab51e446200dc5
|
[] |
no_license
|
SongLiu0828/data-structures-algorithms
|
a40209a4bc7cb1f5fb75268ac36b8105470c91ec
|
463bc6c174b533e8536e615b7306f3c9b1317799
|
refs/heads/master
| 2020-03-24T06:39:22.673529
| 2018-07-27T06:22:46
| 2018-07-27T06:22:46
| 142,536,292
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 315
|
py
|
def shunxu_search(alist, item):
i = 0
found = False
while i < len(alist) and not found:
if alist[i] == item:
found = True
else:
i += 1
return found
alist = [54, 26, 93, 17, 77, 31, 44, 55, 20]
print(shunxu_search(alist, 54))
print(shunxu_search(alist, 19))
|
[
"song@SongdeMacBook-Pro.local"
] |
song@SongdeMacBook-Pro.local
|
6cdaa4435e0e15d1f90e91b2cdd9468848c117bf
|
9a258d81d612b855e244e4a03594ebe312ff3268
|
/webapp/tests/test_urls.py
|
8a82dcab33b5fefed07c162dd7d7b024a90d642f
|
[
"MIT"
] |
permissive
|
erischon/p10_digitalocean
|
19fb39f7442e0eec669fbd1ef5b2d49464c37493
|
a850dfb97470da57117fa1dfc62c4614a602fe40
|
refs/heads/master
| 2023-04-27T16:52:04.158502
| 2021-05-15T22:44:34
| 2021-05-15T22:44:34
| 360,518,773
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 427
|
py
|
from django.test import SimpleTestCase
from django.urls import reverse, resolve
from webapp.views import home_page, mentions
class WebappTestUrls(SimpleTestCase):
def test_home_url_is_resolved(self):
url = reverse('home')
self.assertEqual(resolve(url).func, home_page)
def test_mentions_url_is_resolved(self):
url = reverse('mentions')
self.assertEqual(resolve(url).func, mentions)
|
[
"erischon@gmail.com"
] |
erischon@gmail.com
|
e4fbb87cc08aaac02be49eda20357561270994c1
|
61863d68d64c9319cd49d280b20d2c2a40957363
|
/r2lab.inria.fr/users/views.py
|
b878f3e298472f439face96a6b9b5c151f7c9383
|
[] |
no_license
|
sfehlandt/r2lab
|
fd781637d258a7fc40043f4f8cddef9ec672b563
|
b4f8ddd84327c426b20fe8f772a4e5e47e0cce31
|
refs/heads/master
| 2021-01-22T18:46:29.143500
| 2017-03-08T17:38:29
| 2017-03-08T17:38:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 38
|
py
|
from .plcapi_users import UsersProxy
|
[
"thierry.parmentelat@inria.fr"
] |
thierry.parmentelat@inria.fr
|
154bc5967613ed4e7873580bb05f5a3989ed423b
|
06d241ddb5066e65ce3d87d2fd059928d92f57a2
|
/mim.py
|
a4fc5f207952729312055dace5b36fe6be4df1cb
|
[] |
no_license
|
geobreze/networking-lab2
|
e662e9c097f5628471683844b751b3282d0192df
|
707289b5e05468ab73c0203591bd79be6d50061c
|
refs/heads/master
| 2022-12-28T16:24:50.375281
| 2020-10-08T21:05:24
| 2020-10-08T21:05:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,515
|
py
|
import socket
import threading
from select import select
from common import crypt
from common.socket_util import Socket, REFRESH, AES_ENCODED, INPUT_WANTED
class MIMServer:
def __init__(self, host, port, s_host, s_port, backlog=10):
self.sessions = []
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.bind((host, port))
self.sock.listen(backlog)
self.s_host = s_host
self.s_port = s_port
self.s_sock = Socket(socket.create_connection((self.s_host, self.s_port)))
def accept(self):
while True:
ready, _, _ = select([self.sock], [], [], 1)
if ready:
(client_socket, remote_addr) = self.sock.accept()
session = MIMSession(Socket(client_socket), self.s_sock)
self.sessions.append(session)
t = threading.Thread(target=session.handle_request)
t.start()
class MIMSession:
def __init__(self, client_socket: Socket, server_socket: Socket):
self.sock = client_socket
self.s_sock = server_socket
self.key = None
self.client_rsa_pub = None
self.rsa_pub, self.rsa_pri = crypt.generate_rsa_keypair()
def handle_request(self):
self.authenticate(is_first=True)
c_input_wanted_flag = INPUT_WANTED
s_input_wanted_flag = INPUT_WANTED
while True:
if c_input_wanted_flag:
s_response = self.printing_replicate_from_server()
s_input_wanted_flag = s_response.input_wanted_flag
if s_response.response_code == REFRESH:
self.authenticate()
continue
if s_input_wanted_flag == INPUT_WANTED:
c_response = self.printing_replicate_from_client()
c_input_wanted_flag = c_response.input_wanted_flag
def authenticate(self, is_first=False):
if is_first:
response = self.sock.recv()
self.client_rsa_pub = response.body
self.s_sock.send(self.rsa_pub)
encoded_key = self.s_sock.recv().body
self.key = crypt.decrypt_rsa(self.rsa_pri, encoded_key)
encoded_for_client_key = crypt.encrypt_rsa(self.client_rsa_pub, self.key)
self.sock.send(encoded_for_client_key)
if is_first:
self.printing_replicate_from_server()
self.printing_replicate_from_client()
self.printing_replicate_from_server()
self.printing_replicate_from_client()
self.printing_replicate_from_server()
def printing_replicate_from_client(self):
response = self.sock.recv()
print(response.body)
if response.encoded_flag == AES_ENCODED:
print(crypt.decrypt_aes(self.key, response.body))
self.s_sock.send(response.body, flag=response.encoded_flag, input_wanted=response.input_wanted_flag,
response_code=response.response_code)
return response
def printing_replicate_from_server(self):
response = self.s_sock.recv()
print(response.body)
if response.encoded_flag == AES_ENCODED:
print(crypt.decrypt_aes(self.key, response.body))
self.sock.send(response.body, flag=response.encoded_flag, input_wanted=response.input_wanted_flag,
response_code=response.response_code)
return response
if __name__ == '__main__':
MIMServer('0.0.0.0', 8080, '127.0.0.1', 8081).accept()
|
[
"uladzislau.valashchuk@ah.nl"
] |
uladzislau.valashchuk@ah.nl
|
6d90cd28b0daa3a1deec3937e83f5b60a2762741
|
87fe498c13fa85bb3df2764405d0ad3e06f5d428
|
/Reference_scriptsandFiles/Multiscalemethod_np.py
|
c3b010db1ad375a3a1aa02ca910206f53a0caf97
|
[] |
no_license
|
miladkh7/Multiscale-Modeling
|
7a7fd4b282dc0e0495cb16c42ea6ec9d4d5f23de
|
a260112b3e6b8e5246b53d7f04b3d7de6ec16394
|
refs/heads/master
| 2020-03-10T18:27:13.823944
| 2018-04-11T08:26:52
| 2018-04-11T08:26:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24,097
|
py
|
from random import *
from math import *
import numpy as np
from multiprocessing import cpu_count
numCpus = cpu_count()
print '%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\nMultiscale modelling on microscale \nnumCpus = ',numCpus
#RVEmodell SMA
def lagreparametere(Q):
g = open(parameterpath, "w")
g.write('Q' + '\t' + 'r' + '\t' + 'nf' + '\t' + 'Vf' + '\t' + 'wiggle' + '\t' + 'coordpath' + '\t\t\t' + 'iterasjonsgrense' + '\t' + 'rtol' + '\t' + 'gtol' + '\t' + 'dL'+'\n'+
str(Q) + '\t' + str(r) + '\t' + str(nf) + '\t' + str(Vf) + '\t' + str(wiggle) + '\t' + coordpath + '\t' + str(iterasjonsgrense) + '\t' + str(rtol) + '\t' +str(gtol)+ '\t' +str(dL)) # til fiber modellering
g.close()
def hentePopulation(coordpath):
#Les fiber matrix populasjon
xy=list()
f = open(coordpath,'r')
tekst = f.read()
f.close()
lines = tekst.split('\n')
#lagre koordinater til stottefil
for line in lines:
data = line.split('\t')
a = float(data[0])
b = float(data[1])
xy.append([a,b])
print 'Antall fiber = ',int(nf),'\tAntall fiberkoordinater = '+str(len(xy))
print '\n',xy,'\n \n'
return xy
#Abaqus
def createModel(xydata):
import section
import regionToolset
import displayGroupMdbToolset as dgm
import part
import material
import assembly
import step
import interaction
import load
import mesh
import job
import sketch
import visualization
import xyPlot
import displayGroupOdbToolset as dgo
import connectorBehavior
Mdb() #reset
#
model = mdb.Model(name=modelName, modelType=STANDARD_EXPLICIT) # Lag model
del mdb.models['Model-1'] # Slett standard model
mod = mdb.models[modelName]
dx=dL/2.0
dy=dL/2.0
#Lag sketch
s1 = model.ConstrainedSketch(name='__profile__',sheetSize=2*dL)
s1.setPrimaryObject(option=STANDALONE)
#Tegne Firkant
s1.Line(point1=(-dx, -dy), point2=(dx, -dy))
s1.Line(point1=(dx, -dy), point2=(dx, dy))
s1.Line(point1=(dx,dy), point2=(-dx,dy))
s1.Line(point1=(-dx,dy), point2=(-dx,-dy))
p = mod.Part(name='Part-1', dimensionality=THREE_D,
type=DEFORMABLE_BODY)
p = model.parts['Part-1']
p.BaseShell(sketch=s1)
s1.unsetPrimaryObject()
#del mod.sketches['__profile__']
if not nf == 0:
f1, e, = p.faces, p.edges
t = p.MakeSketchTransform(sketchPlane=f1.findAt(coordinates=(0.0,
0.0, 0.0), normal=(0.0, 0.0, 1.0)),
sketchUpEdge=e.findAt(
coordinates=(dx, 0.0, 0.0)), sketchPlaneSide=SIDE1, origin=(0.0,
0.0, 0.0))
s1 = model.ConstrainedSketch(name='__profile__',
sheetSize=2*dL, gridSpacing=dL / 25.0, transform=t)
s1.setPrimaryObject(option=SUPERIMPOSE)
p.projectReferencesOntoSketch(sketch=s1, filter=COPLANAR_EDGES)
rcos45 = r * cos(45.0 * pi / 180.0)
for data in xydata:
x = data[0]
y = data[1]
done = 0
if done == 0 and x >= dx:
s1.CircleByCenterPerimeter(center=(x, y), point1=(x + r, y))
done = 1
if done == 0 and x <= -dx:
s1.CircleByCenterPerimeter(center=(x, y), point1=(x - r, y))
done = 1
if done == 0 and y >= dx:
s1.CircleByCenterPerimeter(center=(x, y), point1=(x, y + r))
done = 1
if done == 0 and y <= -dx:
s1.CircleByCenterPerimeter(center=(x, y), point1=(x, y - r))
done = 1
if done == 0 and x >= 0 and y >= 0:
s1.CircleByCenterPerimeter(center=(x, y), point1=(x - rcos45, y - rcos45))
done = 1
if done == 0 and x >= 0 and y <= 0:
s1.CircleByCenterPerimeter(center=(x, y), point1=(x - rcos45, y + rcos45))
done = 1
if done == 0 and x <= 0 and y <= 0:
s1.CircleByCenterPerimeter(center=(x, y), point1=(x + rcos45, y + rcos45))
done = 1
if done == 0 and x <= 0 and y >= 0:
s1.CircleByCenterPerimeter(center=(x, y), point1=(x + rcos45, y - rcos45))
done = 1
# Create partioned planar shell from sketch
f = p.faces
pickedFaces = f.findAt(((0.0, 0.0, 0.0),))
e1, d2 = p.edges, p.datums
p.PartitionFaceBySketch(sketchUpEdge=e1.findAt(coordinates=(dx, 0.0,
0.0)), faces=pickedFaces, sketch=s1)
s1.unsetPrimaryObject()
#del model.sketches['__profile__'], f, pickedFaces, e1, d2, f1, e, t
#del s1, model
#Partioned planar shell
# mesh
p = mod.parts['Part-1']
p.seedPart(size=meshsize, deviationFactor=0.1, minSizeFactor=0.1)
p = mod.parts['Part-1']
p.generateMesh()
p = mod.parts['Part-1']
# meshed
mdb.meshEditOptions.setValues(enableUndo=True, maxUndoCacheElements=0.5)
pickedElemFacesSourceSide = mod.parts['Part-1'].elementFaces
vector = ((0.0, 0.0, 0.0), (0.0, 0.0, 2.0))
p.generateBottomUpExtrudedMesh(elemFacesSourceSide=pickedElemFacesSourceSide,
extrudeVector=vector, numberOfLayers=2)
p = mod.parts['Part-1']
n = p.nodes
nodes = n.getByBoundingBox(-dL, -dL, -0.01, dL, dL, 0.01)
p.deleteNode(nodes=nodes)
p.PartFromMesh(name='Part-1-mesh-1', copySets=True)
p = mod.parts['Part-1-mesh-1']
n = p.nodes
nodes = n.getByBoundingBox(-dL, -dL, -0.01, dL, dL, 0.01)
p.deleteNode(nodes=nodes)
# Created extruded mesh part
# This is where the fibers are chosen and put together in set
p = mod.parts['Part-1-mesh-1']
p.Set(name='AllE', elements=p.elements)
x = xydata[0][0]
y = xydata[0][1]
fiber = p.elements.getByBoundingCylinder((x, y, -10.0), (x, y, 10.0), r + 0.01)
for i in range(1, len(xydata)):
x = xydata[i][0]
y = xydata[i][1]
temp = p.elements.getByBoundingCylinder((x, y, -10.0), (x, y, 10.0), r + 0.01)
fiber = fiber + temp
p.Set(name='Fibers', elements=fiber)
p.SetByBoolean(name='Matrix', sets=(p.sets['AllE'], p.sets['Fibers'],), operation=DIFFERENCE)
mod.Material(name='glass')
mod.materials['glass'].Elastic(table=((70000.0, 0.22),))
mod.Material(name='resin')
mod.materials['resin'].Elastic(table=((3500.0, 0.33),))
mod.HomogeneousSolidSection(name='Fibers', material='glass',
thickness=None)
mod.HomogeneousSolidSection(name='matrix', material='resin',
thickness=None)
p = mod.parts['Part-1-mesh-1']
region = p.sets['Fibers']
p.SectionAssignment(region=region, sectionName='Fibers', offset=0.0,
offsetType=MIDDLE_SURFACE, offsetField='',
thicknessAssignment=FROM_SECTION)
region = p.sets['Matrix']
p.SectionAssignment(region=region, sectionName='matrix', offset=0.0,
offsetType=MIDDLE_SURFACE, offsetField='',
thicknessAssignment=FROM_SECTION)
del x, y
else:
p.seedPart(size=meshsize, deviationFactor=0.1, minSizeFactor=0.1)
p.generateMesh()
mdb.meshEditOptions.setValues(enableUndo=True, maxUndoCacheElements=0.5)
pickedElemFacesSourceSide = mod.parts['Part-1'].elementFaces
vector = ((0.0, 0.0, 0.0), (0.0, 0.0, 2.0))
p.generateBottomUpExtrudedMesh(elemFacesSourceSide=pickedElemFacesSourceSide,
extrudeVector=vector, numberOfLayers=2)
p.PartFromMesh(name='Part-1-mesh-1', copySets=True)
# extruded mesh and make orphan mesh
p = mod.parts['Part-1-mesh-1']
n = p.nodes
nodes = n.getByBoundingBox(-dL, -dL, -0.01, dL, dL, 0.01)
p.deleteNode(nodes=nodes)
# delete shell nodes
p.Set(name='AllE', elements=p.elements)
mod.Material(name='resin')
mod.materials['resin'].Elastic(table=((3500.0, 0.33),))
mod.HomogeneousSolidSection(name='Matrix', material='resin', thickness=None)
region = p.sets['AllE']
p.SectionAssignment(region=region, sectionName='Matrix', offset=0.0,
offsetType=MIDDLE_SURFACE, offsetField='',
thicknessAssignment=FROM_SECTION)
#del mod.parts['Part-1'], p, n, mod, region
print '\nModel created, meshed and assigned properties'
def createCEq():
mod = mdb.models[modelName]
a = mod.rootAssembly
a.DatumCsysByDefault(CARTESIAN)
p = mdb.models[modelName].parts['Part-1-mesh-1']
a.Instance(name=instanceName, part=p, dependent=ON)
#Flytte modellen til origo og sette x i fiberretning.
a.translate(instanceList=(instanceName, ), vector=(0.0, 0.0, -1.0))
a.rotate(instanceList=(instanceName, ), axisPoint=(0.0, 0.0, 0.0),
axisDirection=(0.0, 1.0, 0.0), angle=90.0)
tol = 0.01
# Finding the dimensions
xmax, ymax, zmax, xmin, ymin, zmin = 1.0, dL/2, dL/2, 0.0, -dL/2, -dL/2
# Creating reference point
a.ReferencePoint(point=( xmin - 0.2 * (zmax - zmin),0.0, 0.0))
refPoints = (a.referencePoints[a.features['RP-1'].id],)
a.Set(referencePoints=refPoints, name='RPX')
a.ReferencePoint(point=(0.0, ymin - 0.2 * (ymax - ymin), 0.0))
refPoints = (a.referencePoints[a.features['RP-2'].id],)
a.Set(referencePoints=refPoints, name='RPY')
a.ReferencePoint(point=(0.0, 0.0,zmin - 0.2 * (zmax - zmin)))
refPoints = (a.referencePoints[a.features['RP-3'].id],)
a.Set(referencePoints=refPoints, name='RPZ')
allNodes = a.instances[instanceName].nodes
for n in allNodes:
x, y, z = n.coordinates[0], n.coordinates[1], n.coordinates[2]
xmax = max(xmax, x)
ymax = max(ymax, y)
zmax = max(zmax, z)
xmin = min(xmin, x)
ymin = min(ymin, y)
zmin = min(zmin, z)
# CE between x-normal surfaces:
nodesXa = allNodes.getByBoundingBox(xmin - tol, ymin - tol, zmin - tol, xmin + tol, ymax + tol, zmax + tol)
nodesXb = allNodes.getByBoundingBox(xmax - tol, ymin - tol, zmin - tol, xmax + tol, ymax + tol, zmax + tol)
counter = 0
for n in nodesXa:
name1 = "Xa%i" % (counter)
nodes1 = nodesXa[counter:counter + 1]
a.Set(nodes=nodes1, name=name1)
x, y, z = n.coordinates[0], n.coordinates[1], n.coordinates[2]
name2 = "Xb%i" % (counter)
nodes2 = nodesXb.getByBoundingBox(x + (xmax - xmin) - tol, y - tol, z - tol, x + (xmax - xmin) + tol, y + tol,
z + tol)
a.Set(nodes=nodes2, name=name2)
mod.Equation(name="Cq11x%i" % (counter),
terms=((1.0, name2, 1), (-1.0, name1, 1), (-(xmax - xmin), 'RPX', 1),)) # 11
mod.Equation(name="Cq21x%i" % (counter),
terms=((1.0, name2, 2), (-1.0, name1, 2), (-(xmax - xmin) / 2, 'RPX', 2),)) # 21
mod.Equation(name="Cq31x%i" % (counter),
terms=((1.0, name2, 3), (-1.0, name1, 3), (-(xmax - xmin) / 2, 'RPX', 3),)) # 31
counter = counter + 1
# CE between y-normal surfaces
# Note: excluding the nodes at xmax:
nodesYa = allNodes.getByBoundingBox(xmin - tol, ymin - tol, zmin - tol, xmax - tol, ymin + tol, zmax + tol)
nodesYb = allNodes.getByBoundingBox(xmin - tol, ymax - tol, zmin - tol, xmax - tol, ymax + tol, zmax + tol)
counter = 0
for n in nodesYa:
name1 = "Ya%i" % (counter)
nodes1 = nodesYa[counter:counter + 1]
a.Set(nodes=nodes1, name=name1)
x, y, z = n.coordinates[0], n.coordinates[1], n.coordinates[2]
name2 = "Yb%i" % (counter)
nodes2 = nodesYb.getByBoundingBox(x - tol, y + (ymax - ymin) - tol, z - tol, x + tol, y + (ymax - ymin) + tol,
z + tol)
a.Set(nodes=nodes2, name=name2)
mod.Equation(name="Cq12y%i" % (counter),
terms=((1.0, name2, 1), (-1.0, name1, 1), (-(ymax - ymin) / 2, 'RPY', 1),)) # 12
mod.Equation(name="Cq22y%i" % (counter),
terms=((1.0, name2, 2), (-1.0, name1, 2), (-(ymax - ymin), 'RPY', 2),)) # 22
mod.Equation(name="Cq32y%i" % (counter),
terms=((1.0, name2, 3), (-1.0, name1, 3), (-(ymax - ymin) / 2, 'RPY', 3),)) # 32
counter = counter + 1
# CE between z-normal surfaces
# Note: excluding the nodes at xmax and ymax :
nodesZa = allNodes.getByBoundingBox(xmin - tol, ymin - tol, zmin - tol, xmax - tol, ymax - tol, zmin + tol)
nodesZb = allNodes.getByBoundingBox(xmin - tol, ymin - tol, zmax - tol, xmax - tol, ymax - tol, zmax + tol)
counter = 0
for n in nodesZa:
name1 = "Za%i" % (counter)
nodes1 = nodesZa[counter:counter + 1]
a.Set(nodes=nodes1, name=name1)
x, y, z = n.coordinates[0], n.coordinates[1], n.coordinates[2]
name2 = "Zb%i" % (counter)
nodes2 = nodesZb.getByBoundingBox(x - tol, y - tol, z + (zmax - zmin) - tol, x + tol, y + tol,
z + (zmax - zmin) + tol)
a.Set(nodes=nodes2, name=name2)
mod.Equation(name="Cq13z%i" % (counter),
terms=((1.0, name2, 1), (-1.0, name1, 1), (-(zmax - zmin) / 2, 'RPZ', 1),)) # 13
mod.Equation(name="Cq23z%i" % (counter),
terms=((1.0, name2, 2), (-1.0, name1, 2), (-(zmax - zmin) / 2, 'RPZ', 2),)) # 23
mod.Equation(name="Cq33z%i" % (counter),
terms=((1.0, name2, 3), (-1.0, name1, 3), (-(zmax - zmin), 'RPZ', 3),)) # 33
counter = counter + 1
print 'Constraint equ. applied'
def run_Job(Jobe, modelName):
mdb.Job(name=Jobe, model=modelName, description='', type=ANALYSIS,
atTime=None, waitMinutes=0, waitHours=0, queue=None, memory=90,
memoryUnits=PERCENTAGE, getMemoryFromAnalysis=True,
explicitPrecision=SINGLE, nodalOutputPrecision=SINGLE, echoPrint=OFF,
modelPrint=OFF, contactPrint=OFF, historyPrint=OFF, userSubroutine='',
scratch='', resultsFormat=ODB, multiprocessingMode=DEFAULT, numCpus=numCpus,
numDomains=numCpus, numGPUs=1000)
#mdb.jobs[Jobe].submit(consistencyChecking=OFF)
#mdb.jobs[Jobe].waitForCompletion()
def create_unitstrainslastcases(stepName):
id = np.identity(6) # Identity matrix for normalised load cases.'Exx','Eyy','Ezz','Exy','Exz','Eyz'
mod = mdb.models[modelName]
a = mod.rootAssembly
#Create step Linear step
mod.StaticStep(name=stepName, previous='Initial')
#Request outputs
mod.fieldOutputRequests['F-Output-1'].setValues(variables=('S', 'EVOL','U'))
#Run the simulations to create stiffnessmatrix
print '\nComputing stresses for normalized strains'
for i in range(0,6):# arg: + ,len(id)+1
#Laste inn toyningscase
exx, eyy, ezz, exy, exz, eyz = id[i]
mod.DisplacementBC(name='BCX', createStepName=stepName,
region=a.sets['RPX'], u1=exx, u2=exy, u3=exz, ur1=UNSET, ur2=UNSET, ur3=UNSET,
amplitude=UNSET, fixed=OFF, distributionType=UNIFORM, fieldName='', localCsys=None)
mod.DisplacementBC(name='BCY', createStepName=stepName,
region=a.sets['RPY'], u1=exy, u2=eyy, u3=eyz, ur1=UNSET, ur2=UNSET, ur3=UNSET,
amplitude=UNSET, fixed=OFF, distributionType=UNIFORM, fieldName='', localCsys=None)
mod.DisplacementBC(name='BCZ', createStepName=stepName,
region=a.sets['RPZ'], u1=exz, u2=eyz, u3=ezz, ur1=UNSET, ur2=UNSET, ur3=UNSET,
amplitude=UNSET, fixed=OFF, distributionType=UNIFORM, fieldName='', localCsys=None)
run_Job(Enhetstoyinger[i],modelName)
del exx, eyy, ezz, exy, exz, eyz
def get_stiffness():
stiffmatrix = []
for i in range(0,6):
path = workpath + Enhetstoyinger[i]
odb = session.openOdb(path+'.odb')
instance = odb.rootAssembly.instances[instanceName]
sag=[0.0] * 6
for j in range(0,len(instance.elements)):
v = odb.steps[stepName].frames[-1].fieldOutputs['S'].getSubset(position=CENTROID)
elvol = odb.steps[stepName].frames[-1].fieldOutputs['EVOL']
for p in range(0,6):
sag[p] = sag[p]+v.values[j].data[p]*elvol.values[j].data
odb.close()
for k in range(0,6):
sag[k]= sag[k]/(1*(dL)**2) #Volume
stiffmatrix.append(sag)
print '\n'
g = open(lagrestiffpath, "w")
print '\nStiffnessmatrix stored\n'
for a in range(0, 6):
g.write(str(float(stiffmatrix[0][a]))+'\t'+str(float(stiffmatrix[1][a]))+'\t'+str(float(stiffmatrix[2][a]))+'\t'+str(float(stiffmatrix[3][a]))+'\t'+str(float(stiffmatrix[4][a]))+'\t'+str(float(stiffmatrix[5][a])))
if not a==5:
g.write('\t\t')
print '%7f \t %7f \t %7f \t %7f \t %7f \t %7f' % (stiffmatrix[0][a], stiffmatrix[1][a], stiffmatrix[2][a], stiffmatrix[3][a], stiffmatrix[4][a], stiffmatrix[5][a])
g.write('\n')
g.close()
return stiffmatrix
def get_compliance(Stiffmatrix):
print '\nCompliancematrix found'
try:
inverse = np.linalg.inv(Stiffmatrix)
except np.linalg.LinAlgError:
# Not invertible. Skip this one.
print 'ERROR in inverting with numpy'
pass #intended break
for a in range(0, 6):
print inverse[0][a],'\t', inverse[1][a],'\t', inverse[2][a],'\t', inverse[3][a],'\t',inverse[4][a],'\t', inverse[5][a]
return inverse
def sweep_sig2_sig3(Compliancematrix,sweepresolution):
sweep=list()
x= np.arange(0,2*pi,sweepresolution)
print '\nStrains from stress sweep \n',
print x,'\n'
for d in range(0, len(x)):
sig2 = cos(x[d])
sig3 = sin(x[d])
a=np.dot([0,sig2,sig3,0,0,0],Compliancematrix)
a = a.tolist()
print a
sweep.append(a)
return sweep
def create_sweepedlastcases(sweep, cases):
mod = mdb.models[modelName]
a = mod.rootAssembly
mod.fieldOutputRequests['F-Output-1'].setValues(variables=('S', 'MISES', 'E', 'U', 'ELEDEN'))
mod.steps.changeKey(fromName=stepName, toName=difstpNm)
print '\nComputing strains for normalized load sweep'
#Lagring av output data base filer .odb
for lol in range(0,cases):
Jobw =Sweeptoyinger[lol]
print '\nLoad at'+str(360*lol/cases)+'deg'
exx, eyy, ezz, exy, exz, eyz = sweep[lol]
mod.boundaryConditions['BCX'].setValues(u1=exx, u2=exy, u3=exz)
mod.boundaryConditions['BCZ'].setValues(u1=exy, u2=eyy, u3=eyz)
mod.boundaryConditions['BCY'].setValues(u1=exz, u2=eyz, u3=ezz)
run_Job(Jobw, modelName)
print 'Computing stresses for '+str(cases)+' sweep cases'
del a, mod, Jobw, lol
def Extract_parameterdata():
for kaare in range(0,sweepcases):
odb = session.openOdb(workpath + Sweeptoyinger[kaare] + '.odb')
Matrix = odb.rootAssembly.instances[instanceName].elementSets['MATRIX']
nodalStresses = odb.steps[difstpNm].frames[-1].fieldOutputs['S'].getSubset(position=ELEMENT_NODAL, region= Matrix).values
norm=list()
sher=list()
for j in range(0,len(nodalStresses)):
for p in range(0,3):
norm.append(float(nodalStresses[j].data[p]))
sher.append(float(nodalStresses[j].data[p+3]))
odb.close()
print len(norm),'max norm', max(norm), ' min = ',min(norm)
print len(sher), 'max sher =', max(sher), ' min = ',min(sher)
"""$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$"""
""" ALT OVER ER FUNKSJONER """
#Variabler
Vf = 0.6
nf = 4
r = 1.0 # radiusen paa fiberne er satt til aa vaere uniforme, dette kan endres med en liste og random funksjon med data om faktisk variasjon i fibertype. Kommer det til aa gjore noe forskjell?
n = 1 # sweep variabel 1 naa = antall random seed(n)
meshsize = r * 0.3
sweepcases = 4
#Andre variabler
if 1:
#Er RVE tomt?
if nf ==0 or Vf==0: # Fiberfri RVE
nf=0
Vf=0
dL = 10
else:
dL = ((nf * pi * r ** 2) / (Vf)) ** 0.5 # RVE storrelsen er satt til aa vaere relativ av nf og V
#RVE_Modelleringsparametere
rtol = 0.025 * r #Mellomfiber toleranse
gtol = r * 0.025 #Dodsone klaring toleranse
ytredodgrense = r+gtol #Parametere for dodzonegrense
indredodgrense= r-gtol
iterasjonsgrense =10000
# Tekstfiler
GitHub ='C:/Multiscale-Modeling/'
parameterpath = GitHub+'Parametere.txt'
coordpath = GitHub+'coordst.txt'
lagrestiffpath = GitHub+'Stiffness.txt'
workpath = 'C:/Users/Rockv/Desktop/Temp/'
""" ABAQUS """
modelName = 'Model-A'
instanceName = 'PART-1-MESH-1-1'
stepName = 'Enhetstoyninger'
difstpNm = 'Lasttoyinger'
#Composite sweep stresses
sweepresolution = 2*pi / sweepcases #stepsize
print '\nQ\tr\tnf\tVf\twiggle\t\tcoordpath\tLoops\trtol\tgtoL\tdL'
#execfile('C:\Multiscale-Modeling\Multiscalemethod_np.py')
#
"""$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$"""
""" Micromodelleringsfunksjon av (n) kompositt """
for Q in range(0,n):
from abaqus import *
from abaqusConstants import *
from odbAccess import *
seed(Q) # Q er randomfunksjonensnokkelen
wiggle = random()*r # Omplasseringsgrenser for fiberomplassering
#Abaqus navn
Enhetstoyinger = ['Exx' + str(nf) + '_' + str(Q), 'Eyy' + str(nf) + '_' + str(Q), 'Ezz' + str(nf) + '_' + str(Q),
'Exy' + str(nf) + '_' + str(Q), 'Exz' + str(nf) + '_' + str(Q),
'Eyz' + str(nf) + '_' + str(Q)] # Enhetstoyingene fra 0 til 5. Alle 6
Sweeptoyinger = [''] * sweepcases
for g in range(0,sweepcases):
Sweeptoyinger[g] = 'Sweep_strain_at'+str(int(g*180*sweepresolution/pi))+'__'+str(int(Q))
#Lagre parametere til stottefiler
lagreparametere(Q)
"""Prosess"""
xydata = None
# Maa vi ha fiber populasjon?
if not (nf==0):
# create a random population
execfile(GitHub+'GenerereFiberPopTilFil.py') #modellereRVEsnitt()
# hente fibercoordinater
xydata= hentePopulation(coordpath)
# Lage Abaqus strain-cases
createModel( xydata)
createCEq()
create_unitstrainslastcases(stepName)
#Faa ut stiffnessmatrix
Stiffmatrix=get_stiffness()
#Finne strains for sweep stress caser
Compliancematrix = get_compliance(Stiffmatrix)
sweepstrains = sweep_sig2_sig3(Compliancematrix,sweepresolution)
# Abaqus Sweep Cases
create_sweepedlastcases(sweepstrains, sweepcases)
Extract_parameterdata()
print 'torke'
#Mdb()
# stats
#if not nf <= 1:
# fiberdist, avgfdist = fiberdistances(dL, xydata)
#analyticalfiberdist = 0.521
#session.mdbData.summary()
#
#o1 = session.openOdbs(names=(workpath+Toying[0]+'.odb', workpath+Toying[1]+'.odb',
# workpath+Toying[2]+'.odb', workpath+Toying[3]+'.odb', workpath+Toying[4]+'.odb',
# workpath+Toying[5]+'.odb'))
#session.viewports['Viewport: 1'].setValues(displayedObject=o1)
#Preform konvergence tests
|
[
"36338470+SondreRokvam@users.noreply.github.com"
] |
36338470+SondreRokvam@users.noreply.github.com
|
92df14d9f3a7a7b18fe39ebd9d18ab9b452e8f22
|
44032f82bcb767175cf86aeccee623eb6cfbd40e
|
/server/dvaapp/task_shared.py
|
8733e35fc2bb8ee34d3eb83854a172bd24e95358
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
veyselkoparal/DeepVideoAnalytics
|
3628d41f8e06547e177a7badd20b399bd7f9028a
|
013f7e1efcc11f9ed5762192a91589aa6b4df359
|
refs/heads/master
| 2020-03-16T04:22:46.603989
| 2018-05-07T06:55:47
| 2018-05-07T06:55:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,253
|
py
|
import os, json, requests, copy, time, subprocess, logging, shutil, zipfile, uuid, calendar, shlex, sys, tempfile, uuid
from models import Video, QueryRegion, QueryRegionIndexVector, DVAPQL, Region, Frame, Segment, IndexEntries, TEvent,\
Worker, TrainedModel
from django.conf import settings
from PIL import Image
from . import serializers
from dva.in_memory import redis_client
from .fs import ensure, upload_file_to_remote, upload_video_to_remote, get_path_to_file, \
download_video_from_remote_to_local, upload_file_to_path
def pid_exists(pid):
try:
os.kill(pid, 0)
except OSError:
return False
else:
return True
def relaunch_failed_task(old, app):
"""
TODO: Relaunch failed tasks, requires a rethink in how we store number of attempts.
Cleanup of objects created by previous task that that failed.
:param old:
:param app:
:return:
"""
if old.errored:
next_task = TEvent.objects.create(video=old.video, operation=old.operation, arguments=old.arguments,
parent=old.parent, parent_process=old.parent_process, queue=old.queue)
app.send_task(next_task.operation, args=[next_task.pk, ], queue=old.queue)
else:
raise ValueError("Task not errored")
def launch_worker(queue_name, worker_name):
p = subprocess.Popen(['./startq.py','{}'.format(queue_name)], close_fds=True)
message = "launched {} with pid {} on {}".format(queue_name, p.pid, worker_name)
return message
def import_path(dv,path,export=False,framelist=False):
if export:
dv.create_directory(create_subdirs=False)
output_filename = "{}/{}/{}.zip".format(settings.MEDIA_ROOT, dv.pk, dv.pk)
else:
dv.create_directory(create_subdirs=True)
extension = path.split('?')[0].split('.')[-1]
if framelist:
output_filename = "{}/{}/framelist.{}".format(settings.MEDIA_ROOT, dv.pk, extension)
else:
output_filename = "{}/{}/video/{}.{}".format(settings.MEDIA_ROOT, dv.pk, dv.pk, extension)
get_path_to_file(path,output_filename)
def count_framelist(dv):
frame_list = dv.get_frame_list()
return len(frame_list['frames'])
def load_dva_export_file(dv):
video_id = dv.pk
if settings.ENABLE_CLOUDFS:
fname = "/{}/{}.zip".format(video_id, video_id)
logging.info("Downloading {}".format(fname))
ensure(fname)
zipf = zipfile.ZipFile("{}/{}/{}.zip".format(settings.MEDIA_ROOT, video_id, video_id), 'r')
zipf.extractall("{}/{}/".format(settings.MEDIA_ROOT, video_id))
zipf.close()
video_root_dir = "{}/{}/".format(settings.MEDIA_ROOT, video_id)
old_key = None
for k in os.listdir(video_root_dir):
unzipped_dir = "{}{}".format(video_root_dir, k)
if os.path.isdir(unzipped_dir):
for subdir in os.listdir(unzipped_dir):
shutil.move("{}/{}".format(unzipped_dir, subdir), "{}".format(video_root_dir))
shutil.rmtree(unzipped_dir)
break
with open("{}/{}/table_data.json".format(settings.MEDIA_ROOT, video_id)) as input_json:
video_json = json.load(input_json)
importer = serializers.VideoImporter(video=dv, json=video_json, root_dir=video_root_dir)
importer.import_video()
source_zip = "{}/{}.zip".format(video_root_dir, video_id)
os.remove(source_zip)
def export_video_to_file(video_obj,export,task_obj):
if settings.ENABLE_CLOUDFS:
download_video_from_remote_to_local(video_obj)
video_id = video_obj.pk
export_uuid = str(uuid.uuid4())
file_name = '{}.dva_export.zip'.format(export_uuid)
try:
os.mkdir("{}/{}".format(settings.MEDIA_ROOT, 'exports'))
except:
pass
shutil.copytree('{}/{}'.format(settings.MEDIA_ROOT, video_id),
"{}/exports/{}".format(settings.MEDIA_ROOT, export_uuid))
a = serializers.VideoExportSerializer(instance=video_obj)
data = copy.deepcopy(a.data)
data['labels'] = serializers.serialize_video_labels(video_obj)
with file("{}/exports/{}/table_data.json".format(settings.MEDIA_ROOT, export_uuid), 'w') as output:
json.dump(data, output)
zipper = subprocess.Popen(['zip', file_name, '-r', '{}'.format(export_uuid)],
cwd='{}/exports/'.format(settings.MEDIA_ROOT))
zipper.wait()
shutil.rmtree("{}/exports/{}".format(settings.MEDIA_ROOT, export_uuid))
local_path = "{}/exports/{}".format(settings.MEDIA_ROOT, file_name)
path = task_obj.arguments.get('path', None)
if path:
if not path.endswith('dva_export.zip'):
if path.endswith('.zip'):
path = path.replace('.zip', '.dva_export.zip')
else:
path = '{}.dva_export.zip'.format(path)
upload_file_to_path(local_path, path)
os.remove(local_path)
export.url = path
else:
if settings.ENABLE_CLOUDFS:
upload_file_to_remote("/exports/{}".format(file_name))
export.url = "{}/exports/{}".format(settings.MEDIA_URL,file_name).replace('//exports','/exports')
def build_queryset(args,video_id=None,query_id=None,target=None,filters=None):
if target is None:
target = args['target']
if filters is None:
kwargs = args.get('filters',{})
else:
kwargs = filters
if video_id:
kwargs['video_id'] = video_id
if target == 'frames':
queryset = Frame.objects.all().filter(**kwargs)
elif target == 'regions':
queryset = Region.objects.all().filter(**kwargs)
elif target == 'query':
kwargs['pk'] = query_id
queryset = DVAPQL.objects.all().filter(**kwargs)
elif target == 'index_entries':
queryset = IndexEntries.objects.all().filter(**kwargs)
elif target == 'query_regions':
queryset = QueryRegion.objects.all().filter(**kwargs)
elif target == 'query_region_index_vectors':
queryset = QueryRegionIndexVector.objects.all().filter(**kwargs)
elif target == 'segments':
queryset = Segment.objects.filter(**kwargs)
else:
raise ValueError("target {} not found".format(target))
return queryset,target
def load_frame_list(dv,event_id,frame_index__gte=0,frame_index__lt=-1):
"""
Add ability load frames & regions specified in a JSON file and then automatically
retrieve them in a distributed manner them through CPU workers.
"""
frame_list = dv.get_frame_list()
temp_path = "{}.jpg".format(uuid.uuid1()).replace('-', '_')
video_id = dv.pk
frame_index_to_regions = {}
frames = []
for i, f in enumerate(frame_list['frames']):
if i == frame_index__lt:
break
elif i >= frame_index__gte:
try:
get_path_to_file(f['path'],temp_path)
im = Image.open(temp_path)
w, h = im.size
im.close()
except:
logging.exception("Failed to get {}".format(f['path']))
pass
else:
df, drs = serializers.import_frame_json(f,i,event_id,video_id,w,h)
frame_index_to_regions[i] = drs
frames.append(df)
shutil.move(temp_path,df.path())
fids = Frame.objects.bulk_create(frames,1000)
regions = []
for f in fids:
region_list = frame_index_to_regions[f.frame_index]
for dr in region_list:
dr.frame_id = f.id
regions.append(dr)
Region.objects.bulk_create(regions,1000)
def download_and_get_query_path(start):
local_path = "{}/queries/{}_{}.png".format(settings.MEDIA_ROOT, start.pk, start.parent_process.uuid)
if not os.path.isfile(local_path):
source_path = "/queries/{}.png".format(start.parent_process.uuid)
image_data = redis_client.get(source_path)
if image_data:
with open(local_path, 'w') as fh:
fh.write(str(image_data))
else:
ensure(source_path,safe=True)
shutil.copy("{}{}".format(settings.MEDIA_ROOT,source_path),local_path)
return local_path
def download_and_get_query_region_path(start,regions):
query_local_path = download_and_get_query_path(start)
imdata = Image.open(query_local_path)
rpaths = []
for r in regions:
region_path = "{}/queries/region_{}_{}.png".format(settings.MEDIA_ROOT, r.pk, start.parent_process.uuid)
img2 = imdata.crop((r.x, r.y, r.x + r.w, r.y + r.h))
img2.save(region_path)
rpaths.append(region_path)
return rpaths
def get_query_dimensions(start):
query_local_path = download_and_get_query_path(start)
imdata = Image.open(query_local_path)
width, height = imdata.size
return width, height
def crop_and_get_region_path(df,images,temp_root):
if not df.materialized:
frame_path = df.frame_path()
if frame_path not in images:
images[frame_path] = Image.open(frame_path)
img2 = images[frame_path].crop((df.x, df.y, df.x + df.w, df.y + df.h))
region_path = df.path(temp_root=temp_root)
img2.save(region_path)
else:
return df.path()
return region_path
def ensure_files(queryset, target):
dirnames = {}
if target == 'frames':
for k in queryset:
ensure(k.path(media_root=''),dirnames)
elif target == 'regions':
for k in queryset:
if k.materialized:
ensure(k.path(media_root=''), dirnames)
else:
ensure(k.frame_path(media_root=''), dirnames)
elif target == 'segments':
for k in queryset:
ensure(k.path(media_root=''),dirnames)
elif target == 'indexes':
for k in queryset:
ensure(k.npy_path(media_root=''), dirnames)
else:
raise NotImplementedError
def import_frame_regions_json(regions_json,video,event_id):
"""
Import regions from a JSON with frames identified by immutable identifiers such as filename/path
:param regions_json:
:param video:
:param event_id:
:return:
"""
video_id = video.pk
filename_to_pk = {}
frame_index_to_pk = {}
if video.dataset:
# For dataset frames are identified by subdir/filename
filename_to_pk = { df.original_path(): (df.pk, df.frame_index)
for df in Frame.objects.filter(video_id=video_id)}
else:
# For videos frames are identified by frame index
frame_index_to_pk = { df.frame_index: (df.pk, df.segment_index) for df in
Frame.objects.filter(video_id=video_id)}
regions = []
not_found = 0
for k in regions_json:
if k['target'] == 'filename':
fname = k['filename']
if not fname.startswith('/'):
fname = '/{}'.format(fname)
if fname in filename_to_pk:
pk,findx = filename_to_pk[fname]
regions.append(serializers.import_region_json(k,frame_index=findx, frame_id=pk, video_id=video_id,
event_id=event_id))
else:
not_found += 1
elif k['target'] == 'index':
findx = k['frame_index']
pk,sindx = frame_index_to_pk[findx]
regions.append(serializers.import_region_json(k, frame_index=findx, frame_id=pk, video_id=video_id,
event_id=event_id))
else:
raise ValueError('invalid target: {}'.format(k['target']))
logging.info("{} filenames not found in the dataset".format(not_found))
Region.objects.bulk_create(regions,1000)
def get_sync_paths(dirname,task_id):
if dirname == 'indexes':
f = [k.npy_path(media_root="") for k in IndexEntries.objects.filter(event_id=task_id) if k.features_file_name]
elif dirname == 'frames':
f = [k.path(media_root="") for k in Frame.objects.filter(event_id=task_id)]
elif dirname == 'segments':
f = []
for k in Segment.objects.filter(event_id=task_id):
f.append(k.path(media_root=""))
elif dirname == 'regions':
e = TEvent.objects.get(pk=task_id)
if e.operation == 'perform_transformation': # TODO: transformation events merely materialize, fix this
fargs = copy.deepcopy(e.arguments['filters'])
fargs['materialized'] = True
fargs['video_id'] = e.video_id
f = [k.path(media_root="") for k in Region.objects.filter(**fargs)]
else:
f = [k.path(media_root="") for k in Region.objects.filter(event_id=task_id) if k.materialized]
else:
raise NotImplementedError,"dirname : {} not configured".format(dirname)
return f
def upload(dirname,event_id,video_id):
if dirname:
fnames = get_sync_paths(dirname, event_id)
logging.info("Syncing {} containing {} files".format(dirname, len(fnames)))
for fp in fnames:
upload_file_to_remote(fp)
if fnames: # if files are uploaded, sleep three seconds to ensure that files are available before launching
time.sleep(3)
else:
upload_video_to_remote(video_id)
|
[
"akshayubhat@gmail.com"
] |
akshayubhat@gmail.com
|
5ef22ce22e6cb63932236fbbcfcccd0a28bd676d
|
7041d7976f8964a0f1a11e1d0046a7857d900c27
|
/sysdoc/main.py
|
f492dbe7b10804a6ba69a0f43802544745a2e721
|
[] |
no_license
|
moniccax/azure_first
|
a466bb96f3bcc94d6c1856cee66e6883c2be51cc
|
1a2eb2c190894b03a3d694b1262dbea68170e5e2
|
refs/heads/master
| 2022-04-13T21:00:23.938178
| 2020-02-28T12:06:12
| 2020-02-28T12:06:12
| 240,284,516
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,709
|
py
|
# -*- coding: latin-1 -*-
from flask import request, jsonify, render_template, flash, url_for, Blueprint
from docx import Document as docxDocument
from datetime import timedelta
from subprocess import call
from utils import *
import os
import io
import zipfile
import sys
import hashlib
import random
import string
import flask
#@application.before_request,
#def make_session_permanent():
# session.permanent = True
# application.permanent_session_lifetime = timedelta(minutes=15)
from admin import admin
application.register_blueprint(admin, subdomain='admin')
from docs import docs
application.register_blueprint(docs, subdomain='documentos')
from connect import connect
application.register_blueprint(connect, subdomain='conecta')
from cursos import cursos
application.register_blueprint(cursos, subdomain='cursos')
from api import api
application.register_blueprint(api, subdomain='api')
from app import mobile
application.register_blueprint(mobile, subdomain='app')
from alumni import alumni
application.register_blueprint(alumni, subdomain='alumni')
from estagios import estagios
application.register_blueprint(estagios, subdomain='estagios')
from bi import bi
application.register_blueprint(bi, subdomain='bi')
application.register_blueprint(admin,subdomain='app', url_prefix='/admin')
application.register_blueprint(docs,subdomain='app', url_prefix='/documentos')
application.register_blueprint(connect,subdomain='app', url_prefix='/conecta')
application.register_blueprint(cursos,subdomain='app', url_prefix='/cursos')
application.register_blueprint(alumni,subdomain='app', url_prefix='/alumni')
application.register_blueprint(estagios,subdomain='app', url_prefix='/estagios')
application.register_blueprint(bi,subdomain='app', url_prefix='/bi')
@application.route('/')
def index():
return redirect(url_for('admin.index'))
# PÁGINAS DE ERRO
@application.errorhandler(404)
def page_error404(e):
return redirect("/")
#return render_template('erro.html', textError='Página não encontrada!'), 404
@application.errorhandler(500)
def page_error500(e):
return render_template('erro.html', textError='O sistema se comportou de forma inesperada!'), 500
@application.errorhandler(403)
def page_error403(e):
return render_template('erro.html', textError='Você não possui permissão para acessar esta página!'), 403
@application.after_request
def add_headers(response):
response.headers.add('Access-Control-Allow-Credentials', 'true')
response.headers.add('Vary', 'Origin')
response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
return response
|
[
"monica_emediato@hotmail.com"
] |
monica_emediato@hotmail.com
|
4a668cda898e9c471a683852233d9dee97b94e38
|
697e02fa4c280a9d9581a84c4e2a88e13666f7ce
|
/Auth/urls.py
|
b71441188ce1eeda0742fa443e63aa1d62a6c545
|
[] |
no_license
|
devnikhilmhatre/fynd_task
|
df588ad208ac1e9e34f94e4fe436ced50f1a9653
|
4cc79441d7350167c22b1fcd4e0f7c2e1fb22a76
|
refs/heads/master
| 2022-05-04T09:08:59.788071
| 2019-06-08T17:34:04
| 2019-06-08T17:34:04
| 190,915,764
| 0
| 0
| null | 2022-04-22T21:40:47
| 2019-06-08T17:23:08
|
Python
|
UTF-8
|
Python
| false
| false
| 161
|
py
|
from django.urls import path
from .views import SignIn, SignOut
urlpatterns = [
path('login/', SignIn.as_view()),
path('logout/', SignOut.as_view())
]
|
[
"dev,nikhilmhatre@gmail.com"
] |
dev,nikhilmhatre@gmail.com
|
f7aa38867d10a632f17743daa400364c915a957c
|
758e68638e21cbc763b515d1edb9e428ffd3f64b
|
/pangolinHustle.py
|
7083f0ec269de7ecd272c68311f1d1b6ccb6d41c
|
[] |
no_license
|
aliisakroe/PangolinPygame
|
9b180b659e366c35e25c270e326012a9d4654e20
|
ec85bfbe565f20532f628b36cd97318e73747e4c
|
refs/heads/master
| 2021-01-10T14:29:55.124761
| 2016-01-26T17:10:41
| 2016-01-26T17:10:41
| 50,433,498
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,149
|
py
|
#Aliisa Roe
#Project 1 -- pygame
#Oct 5, 2015
"""This is a simple game where the user moves a pangolin image with
arrow keys to get to the top of the screen without colliding with moving
snake sprites--- an experiment with pygame."""
#I used pygame tutorial videos @ Kris Occhipinti
#spritesheet from #colorado.edu/StudentGroups/uchc/trips.html creative commons
# help on spriteCode from http://pygame.org/wiki/Spritesheet
#sound from SoundBible.com creative commons
#win from Mr Smith
#lose from DavinCammas
#hit from Mark DiAngelo
#pangolin.png from https://pixabay.com/en/animal-pangolin-wild-157578/ openSource vectors
import pygame, random, time, pygame.mixer
from pygame.locals import *
pygame.init()
clock = pygame.time.Clock() #will monitor frames per second for consistent speed on different hardware
size = width, height = 400, 430
screen = pygame.display.set_mode(size)
screen.fill((255, 255, 255))#black
hitSound = pygame.mixer.Sound('hit.wav')
winSound = pygame.mixer.Sound('meow.wav')
loseSound = pygame.mixer.Sound('siren.wav')
class Spritesheet(object): #edited code from _____
def __init__(self, filename):
self.sheet = pygame.image.load(filename).convert()
def image_at(self, rectangle):
rect = pygame.Rect(rectangle)
image = pygame.Surface(rect.size).convert()
image.blit(self.sheet, (0, 0), rect)
image = pygame.transform.flip(image, True, False)
return image
class Sprite(object):
def __init__(self, sprite, y):
self.startingX = random.randrange(370)
self.x = self.startingX
self.y = y
self.direction = "right"
self.goingRight = True
screen.blit(sprite, (self.x, self.y))
def get_y(self):
return self.y
def update_x(self, newX):
self.x = newX
def get_x(self):
return self.x
def get_rect(self):
return (pygame.Rect((self.x, self.y), (50, 30)))
def hit_side(self):
x = self.x
if x >= 360 or x <= 0 :
return True
else:
return False
def newDirection(self):
if self.direction == "left":
self.direction = "right"
elif self.direction == "right":
self.direction = "left"
def introPrint():
print ("Welcome, to the pangolin hustle!")
time.sleep(2)
print ("Freddy the pangolin is trying to get to the other side of the screen, but he is very allergic to snakes.")
time.sleep(3)
print "Won't you help Freddy get accross?"
print "Let's get started! Use your arrow keys to get Freddy to the top of the screen."
time.sleep(3)
print "And DON'T TOUCH ANY SNAKES!!"
print "Press 'q' to quit."
print "GO!"
time.sleep(3)
def main():
introPrint()
ss = Spritesheet('char2.png')
snakes = [] #enemy
snakes.append(ss.image_at((0, 44, 44, 33)))
snakes.append(ss.image_at((63, 44, 50, 33)))
snakes.append(ss.image_at((130, 44, 50, 33)))
snakes.append(ss.image_at((190, 44, 50, 33)))
snakeRowList = []
yVal = 0
for i in range(13): #makes 13 rows of enemy snakes
snakeRowList.append(Sprite(snakes[0], yVal))
yVal += 30
pangolin = pygame.image.load("pangolin.png").convert() #player
pangolin = pygame.transform.scale(pangolin, (50, 50))
#initialize values
x = 0
y = 0
snakeSprite = 0
hit = 0
arrowY = 400
arrowX = 200
beenHit = False
#GAME LOOP
running = True
while running:
screen.blit(pangolin, (arrowX,arrowY))
pangolinPos = pygame.Rect((arrowX, arrowY), (50, 50))
for event in pygame.event.get():
#QUIT types
if event.type == pygame.QUIT:
running = False
elif event.type == KEYDOWN and (event.key == K_ESCAPE or event.key == K_q):
running = False
elif event.type == KEYDOWN and (event.key == K_UP):
if 0 < arrowY <= 400:
arrowY -= 10
elif event.type == KEYDOWN and (event.key == K_DOWN):
if 0 < arrowY < 400:
arrowY += 10
elif event.type == KEYDOWN and (event.key == K_LEFT):
if 0 < arrowX < 400:
arrowX -= 10
elif event.type == KEYDOWN and (event.key == K_RIGHT):
if 0 < arrowX < 400:
arrowX += 10
""" #screenshots
elif event.type == KEYDOWN and event.key == K_SPACE:
pygame.image.save(screen, "screenshot1.png")""" #here for screenshots that didn't capture my snake sprites :(
#choose sprite image from snakeRowList
if snakeSprite >= 3:
snakeSprite -= 1
else:
snakeSprite += 1
#animate Sprites, check for pangolin collision
for row in snakeRowList:
if row.hit_side():
row.newDirection()
if row.direction == "right":
row.update_x((row.get_x() + 8))
elif row.direction == "left":
row.update_x((row.get_x() - 8))
screen.blit(snakes[snakeSprite], (row.get_x(), row.get_y()))
if (pangolinPos).colliderect(row.get_rect()): #is there a better way?
beenHit = True
if beenHit == True:
hit += 1
print "OW! only", (10-hit), "more lives!"
hitSound.play()
beenHit = False
screen.fill((250, 0, 0))
#game scores
if hit >= 9:
loseSound.play()
print "You died..."
running = False
elif hit <= 10 and arrowY <= 0:
screen.fill((0, 250, 0))
print "YOU WIN!"
winSound.play()
running = False
pygame.display.flip()
screen.fill((0,0,0))
clock.tick(5)
time.sleep(2)
pygame.quit()
main()
|
[
"aliisakroe@gmail.com"
] |
aliisakroe@gmail.com
|
64cff1a16079803de459b9aa3b7b4eff8e8cd29b
|
c8945fe03675fac27fccf95fde3fc102f8dbef1b
|
/metrics/custom_metric.py
|
7e87378acd3a2c616bd873a7988db60868c68bbd
|
[] |
no_license
|
damianmcdonald/gke-metrics-helm
|
b8cd6c24959087aa9d0264cbe9c65878cbe8117e
|
86fc74452fa858d1129912bd34cae86b32f4ae68
|
refs/heads/master
| 2023-02-24T07:43:52.633272
| 2021-01-22T11:26:46
| 2021-01-22T11:26:46
| 329,100,300
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,825
|
py
|
import os
import sys
import argparse
import datetime
import pprint
import random
import time
import googleapiclient.discovery
def create_custom_metric(client, project_resource,
custom_metric_type, metric_kind,
metric_display_name, metric_description):
"""Create custom metric descriptor"""
metrics_descriptor = {
"type": custom_metric_type,
"labels": [
{
"key": "labelKey",
"valueType": "STRING",
"description": "An arbitrary measurement"
}
],
"metricKind": metric_kind,
"valueType": "INT64",
"unit": "items",
"description": metric_description,
"displayName": metric_display_name
}
return client.projects().metricDescriptors().create(
name=project_resource, body=metrics_descriptor).execute()
def delete_metric_descriptor(client, custom_metric_name):
"""Delete a custom metric descriptor."""
client.projects().metricDescriptors().delete(
name=custom_metric_name).execute()
if __name__ == "__main__":
print(f"Arguments count: {len(sys.argv)}")
for i, arg in enumerate(sys.argv):
print(f"Argument {i}: {arg}")
operation = sys.argv[1]
if operation != "CREATE" and operation != "DELETE":
print("Invalid operator. Must be CREATE or DELETE.")
print(f"Usage: python custom_metric.py CREATE project_id metric_descriptor metric_description")
print(f"Usage: python custom_metric.py DELETE project_id metric_descriptor")
print(f"Create example: python custom_metric.py CREATE gcp_project_123 my_metric 'This is a metric that does something'")
print(f"Delete example: python custom_metric.py DELETE gcp_project_123 my_metric")
sys.exit(999)
project_id = sys.argv[2]
metric_descriptor = sys.argv[3]
# This is the namespace for all custom metrics
CUSTOM_METRIC_DOMAIN = "custom.googleapis.com"
# This is our specific metric path
custom_metric_path = f"{CUSTOM_METRIC_DOMAIN}/{metric_descriptor}"
if operation == "CREATE":
metric_description = sys.argv[4]
METRIC_KIND = "GAUGE"
project_resource = f"projects/{project_id}"
client = googleapiclient.discovery.build('monitoring', 'v3')
create_custom_metric(
client,
project_resource,
custom_metric_path,
METRIC_KIND,
metric_descriptor,
metric_description
)
if operation == "DELETE":
project_resource = f"projects/{project_id}"
client = googleapiclient.discovery.build('monitoring', 'v3')
delete_metric_descriptor(client, f"{project_resource}/metricDescriptors/{custom_metric_path}")
|
[
"damian.mcdonald1979@gmail.com"
] |
damian.mcdonald1979@gmail.com
|
03e79839472824d49009eb882c9be785ea788325
|
1c6283303ceb883add8de4ee07c5ffcfc2e93fab
|
/Jinja2/lib/python3.7/site-packages/uhd_restpy/testplatform/sessions/ixnetwork/globals/globals.py
|
d957b41f406b4b6a75b7525b1800f265fe66875b
|
[] |
no_license
|
pdobrinskiy/devcore
|
0f5b3dfc2f3bf1e44abd716f008a01c443e14f18
|
580c7df6f5db8c118990cf01bc2b986285b9718b
|
refs/heads/main
| 2023-07-29T20:28:49.035475
| 2021-09-14T10:02:16
| 2021-09-14T10:02:16
| 405,919,390
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,362
|
py
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
from typing import List, Any, Union
class Globals(Base):
"""This object holds the configurable global values of IxNetwork for interfaces and the protocol stack.
The Globals class encapsulates a required globals resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'globals'
_SDM_ATT_MAP = {
'ApplicationName': 'applicationName',
'BuildNumber': 'buildNumber',
'ConfigFileName': 'configFileName',
'ConfigSummary': 'configSummary',
'IsConfigDifferent': 'isConfigDifferent',
'PersistencePath': 'persistencePath',
'ProductVersion': 'productVersion',
'RpfPort': 'rpfPort',
'Username': 'username',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(Globals, self).__init__(parent, list_op)
@property
def AppErrors(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.globals.apperrors.apperrors.AppErrors): An instance of the AppErrors class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.globals.apperrors.apperrors import AppErrors
if self._properties.get('AppErrors', None) is not None:
return self._properties.get('AppErrors')
else:
return AppErrors(self)
@property
def Diagnostics(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.globals.diagnostics.diagnostics.Diagnostics): An instance of the Diagnostics class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.globals.diagnostics.diagnostics import Diagnostics
if self._properties.get('Diagnostics', None) is not None:
return self._properties.get('Diagnostics')
else:
return Diagnostics(self)._select()
@property
def Interfaces(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.globals.interfaces.interfaces.Interfaces): An instance of the Interfaces class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.globals.interfaces.interfaces import Interfaces
if self._properties.get('Interfaces', None) is not None:
return self._properties.get('Interfaces')
else:
return Interfaces(self)._select()
@property
def Licensing(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.globals.licensing.licensing.Licensing): An instance of the Licensing class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.globals.licensing.licensing import Licensing
if self._properties.get('Licensing', None) is not None:
return self._properties.get('Licensing')
else:
return Licensing(self)._select()
@property
def PortTestOptions(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.globals.porttestoptions.porttestoptions.PortTestOptions): An instance of the PortTestOptions class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.globals.porttestoptions.porttestoptions import PortTestOptions
if self._properties.get('PortTestOptions', None) is not None:
return self._properties.get('PortTestOptions')
else:
return PortTestOptions(self)._select()
@property
def Preferences(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.globals.preferences.preferences.Preferences): An instance of the Preferences class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.globals.preferences.preferences import Preferences
if self._properties.get('Preferences', None) is not None:
return self._properties.get('Preferences')
else:
return Preferences(self)._select()
@property
def ProgressDialog(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.globals.progressdialog.progressdialog.ProgressDialog): An instance of the ProgressDialog class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.globals.progressdialog.progressdialog import ProgressDialog
if self._properties.get('ProgressDialog', None) is not None:
return self._properties.get('ProgressDialog')
else:
return ProgressDialog(self)._select()
@property
def ProtocolStack(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.globals.protocolstack.protocolstack.ProtocolStack): An instance of the ProtocolStack class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.globals.protocolstack.protocolstack import ProtocolStack
if self._properties.get('ProtocolStack', None) is not None:
return self._properties.get('ProtocolStack')
else:
return ProtocolStack(self)._select()
@property
def Testworkflow(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.globals.testworkflow.testworkflow.Testworkflow): An instance of the Testworkflow class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.globals.testworkflow.testworkflow import Testworkflow
if self._properties.get('Testworkflow', None) is not None:
return self._properties.get('Testworkflow')
else:
return Testworkflow(self)._select()
@property
def Topology(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.globals.topology.topology_678a8dc80c9b4b2b5c741072eab4305d.Topology): An instance of the Topology class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.globals.topology.topology_678a8dc80c9b4b2b5c741072eab4305d import Topology
if self._properties.get('Topology', None) is not None:
return self._properties.get('Topology')
else:
return Topology(self)._select()
@property
def ApplicationName(self):
# type: () -> str
"""
Returns
-------
- str:
"""
return self._get_attribute(self._SDM_ATT_MAP['ApplicationName'])
@property
def BuildNumber(self):
# type: () -> str
"""
Returns
-------
- str: The IxNetwork software build number.
"""
return self._get_attribute(self._SDM_ATT_MAP['BuildNumber'])
@property
def ConfigFileName(self):
# type: () -> str
"""
Returns
-------
- str: The name of the configuration file.
"""
return self._get_attribute(self._SDM_ATT_MAP['ConfigFileName'])
@property
def ConfigSummary(self):
"""
Returns
-------
- list(dict(arg1:str,arg2:str,arg3:list[dict(arg1:str,arg2:str)])): A high level summary description of the currently loaded configuration
"""
return self._get_attribute(self._SDM_ATT_MAP['ConfigSummary'])
@property
def IsConfigDifferent(self):
# type: () -> bool
"""
Returns
-------
- bool: (Read only) If true, then the current IxNetwork configuration is different than the configuration that was previously loaded.
"""
return self._get_attribute(self._SDM_ATT_MAP['IsConfigDifferent'])
@property
def PersistencePath(self):
# type: () -> str
"""
Returns
-------
- str: This attribute returns a directory of the IxNetwork API server machine, where users can drop their files from the client scripts using IxNetwork APIs. To Put files in this directory, users do not require to run IxNetwork API server in administrative mode
"""
return self._get_attribute(self._SDM_ATT_MAP['PersistencePath'])
@property
def ProductVersion(self):
# type: () -> str
"""
Returns
-------
- str:
"""
return self._get_attribute(self._SDM_ATT_MAP['ProductVersion'])
@property
def RpfPort(self):
# type: () -> int
"""
Returns
-------
- number:
"""
return self._get_attribute(self._SDM_ATT_MAP['RpfPort'])
@property
def Username(self):
# type: () -> str
"""
Returns
-------
- str: The name of the user.
"""
return self._get_attribute(self._SDM_ATT_MAP['Username'])
|
[
"pdobrinskiy@yahoo.com"
] |
pdobrinskiy@yahoo.com
|
72c7e92b479a0a11d374e58e34f3467c43e67821
|
e8c76797b194bce6702adf9721a96c2b440efd5c
|
/test/modules/http2/htdocs/cgi/hello.py
|
20974bfdd3f143dd3a0f49ab33f2b24bfad99305
|
[
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain",
"Apache-2.0",
"LicenseRef-scancode-zeusbench",
"BSD-3-Clause",
"RSA-MD",
"LicenseRef-scancode-rsa-1990",
"Beerware",
"LicenseRef-scancode-other-permissive",
"Spencer-94",
"metamail",
"LicenseRef-scancode-rsa-md4",
"HPND-sell-variant"
] |
permissive
|
apache/httpd
|
86bfac3d6e2e9b48f5bfca5be7ec616fa9b14e9a
|
b9e029c8036fd036281ac266010db91aed6079b2
|
refs/heads/trunk
| 2023-09-04T07:18:59.681233
| 2023-08-30T12:56:11
| 2023-08-30T12:56:11
| 205,423
| 3,159
| 1,329
|
Apache-2.0
| 2023-09-11T13:50:41
| 2009-05-20T02:02:59
|
C
|
UTF-8
|
Python
| false
| false
| 825
|
py
|
#!/usr/bin/env python3
import os
print("Content-Type: application/json")
print()
print("{")
print(" \"https\" : \"%s\"," % (os.getenv('HTTPS', '')))
print(" \"host\" : \"%s\"," % (os.getenv('X_HOST', '') \
if 'X_HOST' in os.environ else os.getenv('SERVER_NAME', '')))
print(" \"server\" : \"%s\"," % (os.getenv('SERVER_NAME', '')))
print(" \"h2_original_host\" : \"%s\"," % (os.getenv('H2_ORIGINAL_HOST', '')))
print(" \"port\" : \"%s\"," % (os.getenv('SERVER_PORT', '')))
print(" \"protocol\" : \"%s\"," % (os.getenv('SERVER_PROTOCOL', '')))
print(" \"ssl_protocol\" : \"%s\"," % (os.getenv('SSL_PROTOCOL', '')))
print(" \"h2\" : \"%s\"," % (os.getenv('HTTP2', '')))
print(" \"h2push\" : \"%s\"," % (os.getenv('H2PUSH', '')))
print(" \"h2_stream_id\" : \"%s\"" % (os.getenv('H2_STREAM_ID', '')))
print("}")
|
[
"icing@apache.org"
] |
icing@apache.org
|
5aa325b1239d92c5a5dc206f02581984ff7c032f
|
8e5a146e2b11c0d9e924cc708392d2273fb419de
|
/I0320011_soal2_tugas2.py
|
c3ce049567d71103bd7e9cc1b40fb9d7ac96d6d4
|
[] |
no_license
|
Aratiakiana/Aratia-Kiana-Piandhani_I0320011_Wildan-Rusyadani_Tugas2
|
9c7ea21f90c7848a921ba590e6ed8d4df8270433
|
fb40d09638665e93d543e12c2bf38e76d50fa45a
|
refs/heads/main
| 2023-03-15T11:29:11.798144
| 2021-03-11T15:50:08
| 2021-03-11T15:50:08
| 346,230,046
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,583
|
py
|
#Memasukkan data diri
nama = "Aratia Kiana Piandhani"
namaPanggilan = "Ara"
nim = "I0320011"
kelas = "A"
prodi = "Teknik Industri"
angkatan = "2020"
universitas = "Universitas Sebelas Maret"
jenisKelamin = "Perempuan"
agama = "Islam"
print("Halo perkenalkan nama saya", str(nama)+",biasa dipanggil", str(namaPanggilan)+".")
print("NIM saya", nim, "dari kelas", str(kelas)+".")
print("Saya mahasiswa prodi", prodi, "dari angkatan", angkatan, "di", str(universitas)+".")
print("Saya memiliki jenis kelamin", jenisKelamin, "dan beragama", str(agama)+".")
#Memasukkan tanggal lahir
tempatLahir = "Surakarta"
tanggalLahir = int(5*4+6)
bulanLahir = "Desember"
tahunLahir = int(200*10+2)
umur = int(30/2+3)
print("Saya lahir di" ,tempatLahir, "tanggal", tanggalLahir, bulanLahir, tahunLahir)
print("Sekarang saya berumur", umur, "tahun.")
#Memasukkan data alamat rumah
R_T = int(6*2-10)
R_W = int(2.5*2)
kodePos = 57557
print("Alamat rumah saya di Kembang", "RT", R_T,"RW", R_W, "Trosemi,Gatak,Sukoharjo kode pos", str(kodePos)+".")
#Memasukkan data pendukung
beratBadan = float(107/2)
tinggiBadan = int((50*2)+(8*7))
ukuranSepatu = int((6*6)+3)
ukuranBaju = "M atau L"
hobi = "Memasak, mendengarkan musik, menonton film, dan jalan-jalan"
print("Berat badan saya yaitu ", beratBadan, "kg.")
print("Tinggi badan saya yaitu", tinggiBadan, "cm.")
print("Saya lebih menyukai sepatu sneakers daripada flatshoes,biasanya sepatu yang saya beli berukuran", ukuranSepatu)
print("Untuk ukuran baju biasanya saya menggunakan ukuran", str(ukuranBaju)+".")
print("Hobi saya yaitu", str(hobi)+".")
|
[
"aratiakiana.p@gmail.com"
] |
aratiakiana.p@gmail.com
|
30cc1d1fc50d0f446d0341344fbc5cfd52d78242
|
9df89a1652d183d8fc654acd728f9a578d6d1912
|
/cli/psym/graphql/query/customers.py
|
cc9b41c460503454a4b358260df7649396259444
|
[
"BSD-3-Clause"
] |
permissive
|
duranrojasm/symphony
|
b37d54a134e29093edacb80442e204fc71a37fbe
|
55b3d0c20b669374303bafb10e9c96c734647c9c
|
refs/heads/main
| 2023-08-24T02:00:33.433220
| 2021-10-28T20:35:23
| 2021-10-28T20:35:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,209
|
py
|
#!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from dataclasses import dataclass, field as _field
from ...config import custom_scalars, datetime
from gql_client.runtime.variables import encode_variables
from gql import gql, Client
from gql.transport.exceptions import TransportQueryError
from functools import partial
from numbers import Number
from typing import Any, AsyncGenerator, Dict, List, Generator, Optional
from time import perf_counter
from dataclasses_json import DataClassJsonMixin, config
from ..fragment.customer import CustomerFragment, QUERY as CustomerFragmentQuery
# fmt: off
QUERY: List[str] = CustomerFragmentQuery + ["""
query CustomersQuery {
customers {
edges {
node {
...CustomerFragment
}
}
}
}
"""
]
class CustomersQuery:
@dataclass(frozen=True)
class CustomersQueryData(DataClassJsonMixin):
@dataclass(frozen=True)
class CustomerConnection(DataClassJsonMixin):
@dataclass(frozen=True)
class CustomerEdge(DataClassJsonMixin):
@dataclass(frozen=True)
class Customer(CustomerFragment):
pass
node: Optional[Customer]
edges: List[CustomerEdge]
customers: Optional[CustomerConnection]
# fmt: off
@classmethod
def execute(cls, client: Client) -> Optional[CustomersQueryData.CustomerConnection]:
variables: Dict[str, Any] = {}
new_variables = encode_variables(variables, custom_scalars)
response_text = client.execute(
gql("".join(set(QUERY))), variable_values=new_variables
)
res = cls.CustomersQueryData.from_dict(response_text)
return res.customers
# fmt: off
@classmethod
async def execute_async(cls, client: Client) -> Optional[CustomersQueryData.CustomerConnection]:
variables: Dict[str, Any] = {}
new_variables = encode_variables(variables, custom_scalars)
response_text = await client.execute_async(
gql("".join(set(QUERY))), variable_values=new_variables
)
res = cls.CustomersQueryData.from_dict(response_text)
return res.customers
|
[
"jcaroper@everis.com"
] |
jcaroper@everis.com
|
aa49a4d64508c9fa62c1e3f29026d15008e407f4
|
23611933f0faba84fc82a1bc0a85d97cf45aba99
|
/google-cloud-sdk/lib/surface/app/versions/delete.py
|
fe4a27d6de672df18ddf9b85bc4ecc86e88036db
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
KaranToor/MA450
|
1f112d1caccebdc04702a77d5a6cee867c15f75c
|
c98b58aeb0994e011df960163541e9379ae7ea06
|
refs/heads/master
| 2021-06-21T06:17:42.585908
| 2020-12-24T00:36:28
| 2020-12-24T00:36:28
| 79,285,433
| 1
| 1
|
Apache-2.0
| 2020-12-24T00:38:09
| 2017-01-18T00:05:44
|
Python
|
UTF-8
|
Python
| false
| false
| 4,568
|
py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Delete command."""
import copy
from googlecloudsdk.api_lib.app import appengine_api_client
from googlecloudsdk.api_lib.app import service_util
from googlecloudsdk.api_lib.app import version_util
from googlecloudsdk.calliope import base
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core.console import console_io
from googlecloudsdk.core.resource import resource_printer
from googlecloudsdk.core.util import text
class VersionsDeleteError(exceptions.Error):
"""Errors occurring when deleting versions."""
pass
class Delete(base.DeleteCommand):
"""Delete a specified version.
You cannot delete a version of a service that is currently receiving traffic.
"""
detailed_help = {
'DESCRIPTION': '{description}',
'EXAMPLES': """\
To delete a specific version of a specific service, run:
$ {command} --service myService v1
To delete a named version across all services, run:
$ {command} v1
To delete multiple versions of a specific service, run:
$ {command} --service myService v1 v2
To delete multiple named versions across all services, run:
$ {command} v1 v2
""",
}
@staticmethod
def Args(parser):
parser.add_argument('versions', nargs='+', help=(
'The versions to delete (optionally filtered by the --service flag).'))
parser.add_argument('--service', '-s',
help=('If specified, only delete versions from the '
'given service.'))
def Run(self, args):
client = appengine_api_client.GetApiClient()
services = client.ListServices()
all_versions = client.ListVersions(services)
# Sort versions to make behavior deterministic enough for unit testing.
versions = sorted(version_util.GetMatchingVersions(all_versions,
args.versions,
args.service))
services_to_delete = []
for service in sorted(services):
if (len([v for v in all_versions if v.service == service.id]) ==
len([v for v in versions if v.service == service.id])):
services_to_delete.append(service)
for version in copy.copy(versions):
if version.service == service.id:
versions.remove(version)
for version in versions:
if version.traffic_split:
# TODO(user): mention `migrate` once it's implemented.
# TODO(b/32869800): collect info on all versions before raising.
raise VersionsDeleteError(
'Version [{version}] is currently serving {allocation:.2f}% of '
'traffic for service [{service}].\n\n'
'Please move all traffic away by deploying a new version with the'
'`--promote` argument or running `gcloud app services '
'set-traffic`.'.format(
version=version.id,
allocation=version.traffic_split * 100,
service=version.service))
if services_to_delete:
word = text.Pluralize(len(services_to_delete), 'service')
log.warn('Requested deletion of all existing versions for the following '
'{0}:'.format(word))
resource_printer.Print(services_to_delete, 'list', out=log.status)
console_io.PromptContinue(prompt_string=(
'\nYou cannot delete all versions of a service. Would you like to '
'delete the entire {0} instead?').format(word), cancel_on_no=True)
service_util.DeleteServices(client, services_to_delete)
if versions:
fmt = 'list[title="Deleting the following versions:"]'
resource_printer.Print(versions, fmt, out=log.status)
console_io.PromptContinue(cancel_on_no=True)
else:
if not services_to_delete:
log.warn('No matching versions found.')
version_util.DeleteVersions(client, versions)
|
[
"toork@uw.edu"
] |
toork@uw.edu
|
8831c689d0b7f8a70f197c219c2585d1b69ff287
|
9101cc8041d53ee4493941cecf8c5cbb5d7d4257
|
/P4HW2_RunningTotal_RiedToneshia.py
|
38f678f6402c5b660449a83c3acedf7bef3ebaa5
|
[] |
no_license
|
princess2597/CTI110
|
6c1943413318aed448a84f951ed274f114ae8960
|
64016e50f04bf775e9b807b8483f127587754116
|
refs/heads/master
| 2021-04-29T23:50:50.255991
| 2018-05-13T15:44:51
| 2018-05-13T15:44:51
| 121,564,314
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 266
|
py
|
#CTI - 110
#P4HW2: Running Total
#Toneshia Ried
#March 20, 2018
total = 0
userNumber = float( input("Enter a number? "))
while userNumber > -1:
total = total + userNumber
userNumber = float(input("Enter a number? "))
print("\nTotal:",total)
|
[
"noreply@github.com"
] |
princess2597.noreply@github.com
|
3a15676d41e89576cf66bdb38fe8953de3db54a0
|
347ae4f64d15c8af4455185adc0858362862bb3e
|
/pose_txt_data_merge.py
|
536eec6fefb2a6b391198c7e2eed79793b289236
|
[] |
no_license
|
AIHGF/Coding-Recording
|
f93b291dc78da1a54006b80df9ad58594c082745
|
60c179365d81af94e412ccd56129aa88c5b2d95f
|
refs/heads/master
| 2021-06-07T09:43:39.036211
| 2016-10-25T01:59:48
| 2016-10-25T01:59:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,335
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
random selecting some images
'''
import os
import re
import shutil
import random
if __name__ == '__main__':
# Read the json data from text file
f = open('pose.txt','r')
datas = f.readlines()
f.close()
#print datas
f_one = open('one_person.txt','r')
f_one_pose = open("one_pose.txt", 'w')
# print f.read()
datas_one = f_one.readlines()
print len(datas_one)
for fzdata in datas_one:
fzdatasplit = re.split('/ddpose/', fzdata)
fzdatasplit = fzdatasplit[1]
#print 'fzdatasplit:',fzdatasplit
#f_one_pose.write(fzdatasplit)
a = fzdatasplit[-35:-1]
#print 'a',str(a)
for data in datas:
imageName = re.split('.jpg', data)
imagesName = imageName[0]+'.jpg'
b = imagesName[-34:]
#print 'b',str(b)
if str(a)==str(b):
print 'Hello world!'
fzstring = imagesName + imageName[1]
print fzstring
f_one_pose.write(fzstring)
f_one_pose.close()
# slices = random.sample(datas, 500)
# #print slices
# for data in slices:
# datasplit = re.split('.jpg', data)
# fileName = datasplit[0]
# fileName = fileName+'.jpg'
# #print 'Processing the image: ', fileName
#
# #shutil.move(fileName,"./test/")
|
[
"noreply@github.com"
] |
AIHGF.noreply@github.com
|
ed7c9da316d6117584cb538fa5bdb542fb66e51c
|
7a4ca209d8c2c55ae7a721426be57fcb47bf1247
|
/Swansong/swansong/au/coreFunctions.py
|
baa05eb5b17812cd29d20bd736fee906ba95d161
|
[] |
no_license
|
fuankarion/SwS
|
1ca8a0c60b7f091621bae601b792a946c14f00cf
|
9ea4ac5547dc5708a0da2fba4068ce79dbc49301
|
refs/heads/master
| 2021-06-18T22:20:08.060272
| 2017-03-13T23:50:36
| 2017-03-13T23:50:36
| 82,957,583
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,329
|
py
|
from pylab import *
def trainNetworkBetterTrainDataLogTop2(solver, niter, batchesForTraining, targetLogFile, test_iters, logsPerEpoch, smallSetProportion):
train_loss = 0.0
train_accuracy = 0.0
train_accuracyTop3 = 0.0
smallSetIters = int(round(test_iters / smallSetProportion))#test with a small set inbetween epochs
for it in range(niter):
solver.step(1)
print('Iteration ', it)
train_loss = train_loss + solver.net.blobs['loss'].data
train_accuracy = train_accuracy + solver.net.blobs['accuracy'].data
train_accuracyTop3 = train_accuracyTop3 + solver.net.blobs['accuracyTop2'].data
#print('Iteration ', it, ' train_accuracy ', solver.net.blobs['accuracy'].data[0], ' train_accuracyTop3 ', solver.net.blobs['accuracyTop5'].data[0] )
#get loss and accuracy, by doing test_iters forward pass and avergaing results per batch
if (it % round(batchesForTraining / logsPerEpoch)) == 0 and it > 0:# logsPerEpoch
if (it % (round(batchesForTraining / logsPerEpoch) * logsPerEpoch)) != 0:#Do small test?
adaptedTest_iters = smallSetIters
else:
adaptedTest_iters = test_iters
test_acc = 0.0
test_loss = 0.0
test_accTop3 = 0.0
for i in range(adaptedTest_iters):
solver.test_nets[0].forward()#TODO what if we use more tha 1 test net
accuracyTemp = solver.test_nets[0].blobs['accuracy'].data
accuracyTempTop3 = solver.test_nets[0].blobs['accuracyTop2'].data
lossTemp = solver.test_nets[0].blobs['loss'].data
test_acc = test_acc + accuracyTemp
test_accTop3 = test_accTop3 + accuracyTempTop3
test_loss = test_loss + lossTemp
print('On test stage iter : ', i, ' accuracy ', accuracyTemp, ' loss ', lossTemp, ' accuracy Top2 ', accuracyTempTop3)
test_acc = test_acc / adaptedTest_iters
test_loss = test_loss / adaptedTest_iters
test_accTop3 = test_accTop3 / adaptedTest_iters
train_accuracy = train_accuracy / (batchesForTraining / logsPerEpoch)
train_accuracyTop3 = train_accuracyTop3 / (batchesForTraining / logsPerEpoch)
train_loss = train_loss / (batchesForTraining / logsPerEpoch)
print ('iter ', it, 'train loss:', train_loss, 'train accuracy ', train_accuracy, 'test losss', test_loss,
'test accuracy:', test_acc, 'test accuracy top 2 ', test_accTop3)
print ('')
if (it % (round(batchesForTraining / logsPerEpoch) * logsPerEpoch)) != 0:#write small test
with open(targetLogFile, 'a') as myfile:
myfile.write(str(it) + ',' + str(train_loss) + ',' + str(train_accuracy) + ',' + str(train_accuracyTop3) +
',' + str(test_loss) + ',' + str(test_acc) + ',' + str(test_accTop3) + '\n')
else:
with open(targetLogFile, 'a') as myfile:
myfile.write(str(it) + ',' + str(train_loss) + ',' + str(train_accuracy) + ',' + str(train_accuracyTop3) +
',' + str(test_loss) + ',' + str(test_acc) + ',' + str(test_accTop3) + ',X \n')
train_loss = 0.0
train_accuracy = 0.0
train_accuracyTop3 = 0.0
print 'You Actually got here :)'
def trainNetworkLog(solver, niter, batchesForTraining, targetLogFile, test_iters, logsPerEpoch, smallSetProportion):
train_loss = 0.0
train_accuracy = 0.0
smallSetIters = int(round(test_iters / smallSetProportion))#test with a small set inbetween epochs
for it in range(niter):
solver.step(1)
print('Iteration ', it)
train_loss = train_loss + solver.net.blobs['loss'].data
train_accuracy = train_accuracy + solver.net.blobs['accuracy'].data
#get loss and accuracy, by doing test_iters forward pass and avergaing results per batch
if (it % round(batchesForTraining / logsPerEpoch)) == 0 and it > 0:# logsPerEpoch
if (it % (round(batchesForTraining / logsPerEpoch) * logsPerEpoch)) != 0:#Do small test?
adaptedTest_iters = smallSetIters
else:
adaptedTest_iters = test_iters
test_acc = 0.0
test_loss = 0.0
for i in range(adaptedTest_iters):
solver.test_nets[0].forward()#TODO what if we use more tha 1 test net
accuracyTemp = solver.test_nets[0].blobs['accuracy'].data
test_acc = test_acc + accuracyTemp
lossTemp = solver.test_nets[0].blobs['loss'].data
test_loss = test_loss + lossTemp
print('On test stage iter : ', i, ' accuracy ', accuracyTemp, ' loss ', lossTemp)
test_acc = test_acc / adaptedTest_iters
test_loss = test_loss / adaptedTest_iters
train_accuracy = train_accuracy / (batchesForTraining / logsPerEpoch)
train_loss = train_loss / (batchesForTraining / logsPerEpoch)
print 'iter ', it, 'train loss:', train_loss, 'train accuracy ', train_accuracy, 'test losss', test_loss, 'test accuracy:', test_acc
print ''
if (it % (round(batchesForTraining / logsPerEpoch) * logsPerEpoch)) != 0:#write small test
with open(targetLogFile, 'a') as myfile:
myfile.write(str(it) + ',' + str(train_loss) + ',' + str(train_accuracy) + ',' + str(test_loss) + ',' + str(test_acc) + '\n')
else:
with open(targetLogFile, 'a') as myfile:
myfile.write(str(it) + ',' + str(train_loss) + ',' + str(train_accuracy) + ',' + str(test_loss) + ',' + str(test_acc) + ',X \n')
train_loss = 0.0
train_accuracy = 0.0
print 'You Actually got here :)'
|
[
"fuankarion@gmail.com"
] |
fuankarion@gmail.com
|
92136573be8db1267421978b237419950e01cd8b
|
2eec69f014b2111680904208e0a9bcb4f1c1e922
|
/module/bert_optim.py
|
b15fe5fffb82b4af73a861bf3567c6a7ceab3419
|
[
"MIT"
] |
permissive
|
UKPLab/mdl-stance-robustness
|
fc873d2ec95ee02866e03041123d8316bd677411
|
a8ef3f498e7f238d5224debe9bfce478e480201f
|
refs/heads/master
| 2023-07-19T21:51:14.086577
| 2022-05-17T12:55:18
| 2022-05-17T12:55:18
| 229,263,983
| 37
| 13
|
MIT
| 2023-07-06T21:36:14
| 2019-12-20T12:48:42
|
Python
|
UTF-8
|
Python
| false
| false
| 5,903
|
py
|
# Copyright (c) Microsoft. All rights reserved.
import math
import torch
from torch.optim import Optimizer
from torch.nn.utils import clip_grad_norm_
from pytorch_pretrained_bert.optimization import warmup_constant, warmup_cosine, warmup_linear
def warmup_linear_xdl(x, warmup=0.002):
if x < warmup:
return x/warmup
return (1.0 - x)/(1.0 - warmup)
def schedule_func(sch):
try:
f = eval(sch)
except:
f = warmup_linear
return f
class Adamax(Optimizer):
"""Implements BERT version of Adam algorithm with weight decay fix (and no ).
Params:
lr: learning rate
warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
t_total: total number of training steps for the learning
rate schedule, -1 means constant learning rate. Default: -1
schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'
b1: Adams b1. Default: 0.9
b2: Adams b2. Default: 0.999
e: Adams epsilon. Default: 1e-6
weight_decay_rate: Weight decay. Default: 0.01
max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0
by xiaodl
"""
def __init__(self, params, lr, warmup=-1, t_total=-1, schedule='warmup_linear',
betas=(0.9, 0.999), eps=1e-6, weight_decay_rate=0.01,
max_grad_norm=1.0):
if not lr >= 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if not 0.0 <= warmup < 1.0 and not warmup == -1:
raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,
betas=betas, eps=eps, weight_decay_rate=weight_decay_rate,
max_grad_norm=max_grad_norm)
super(Adamax, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0:
return [0]
if group['t_total'] != -1:
schedule_fct = schedule_func(group['schedule'])
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
lr.append(lr_scheduled)
return lr
def to(self, device):
""" Move the optimizer state to a specified device"""
for state in self.state.values():
state['exp_avg'].to(device)
state['exp_avg_sq'].to(device)
def initialize_step(self, initial_step):
"""Initialize state with a defined step (but we don't have stored averaged).
Arguments:
initial_step (int): Initial step number.
"""
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
# State initialization
state['step'] = initial_step
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_inf'] = torch.zeros_like(p.data)
exp_avg, exp_inf = state['exp_avg'], state['exp_inf']
beta1, beta2 = group['betas']
eps = group['eps']
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Update biased first moment estimate.
exp_avg.mul_(beta1).add_(1 - beta1, grad)
# Update the exponentially weighted infinity norm.
norm_buf = torch.cat([
exp_inf.mul_(beta2).unsqueeze(0),
grad.abs().add_(eps).unsqueeze_(0)
], 0)
torch.max(norm_buf, 0, keepdim=False, out=(exp_inf, exp_inf.new().long()))
update = exp_avg / (exp_inf + eps)
if group['weight_decay_rate'] > 0.0:
update += group['weight_decay_rate'] * p.data
if group['t_total'] != -1:
schedule_fct = schedule_func(group['schedule'])
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
update_with_lr = lr_scheduled * update
p.data.add_(-update_with_lr)
state['step'] += 1
return loss
|
[
"schiller@ukp.informatik.tu-darmstadt.de"
] |
schiller@ukp.informatik.tu-darmstadt.de
|
dd36c366f4d20cc72cc9ef6c741e12b99e6c982a
|
f6150e02de746599964dc3fc4d2bde839f9b6e69
|
/preprocessing/preprocessing_factory.py
|
b00081b8ca1685bf0fe88684ecf348f3ef996b31
|
[] |
no_license
|
margaux-schorn/classification_visages
|
0be004645ecd942df860eebc43e11fa9c6319933
|
d6c1d740aa0e74b430bf384f943e33e990a226c1
|
refs/heads/master
| 2020-03-08T20:16:15.226824
| 2018-05-18T09:15:51
| 2018-05-18T09:15:51
| 128,377,941
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,032
|
py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains a factory for building various models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from preprocessing import cifarnet_preprocessing
from preprocessing import inception_preprocessing
from preprocessing import lenet_preprocessing
from preprocessing import vgg_preprocessing
slim = tf.contrib.slim
def get_preprocessing(name, is_training=False):
"""Returns preprocessing_fn(image, height, width, **kwargs).
Args:
name: The name of the preprocessing function.
is_training: `True` if the model is being used for training and `False`
otherwise.
Returns:
preprocessing_fn: A function that preprocessing a single image (pre-batch).
It has the following signature:
image = preprocessing_fn(image, output_height, output_width, ...).
Raises:
ValueError: If Preprocessing `name` is not recognized.
"""
preprocessing_fn_map = {
'cifarnet': cifarnet_preprocessing,
'inception': inception_preprocessing,
'inception_v1': inception_preprocessing,
'inception_v2': inception_preprocessing,
'inception_v3': inception_preprocessing,
'inception_v4': inception_preprocessing,
'inception_resnet_v2': inception_preprocessing,
'lenet': lenet_preprocessing,
'mobilenet_v1': inception_preprocessing,
'nasnet_mobile': inception_preprocessing,
'nasnet_large': inception_preprocessing,
'pnasnet_large': inception_preprocessing,
'resnet_v1_50': vgg_preprocessing,
'resnet_v1_101': vgg_preprocessing,
'resnet_v1_152': vgg_preprocessing,
'resnet_v1_200': vgg_preprocessing,
'resnet_v2_50': vgg_preprocessing,
'resnet_v2_101': vgg_preprocessing,
'resnet_v2_152': vgg_preprocessing,
'resnet_v2_200': vgg_preprocessing,
'vgg': vgg_preprocessing,
'vgg_a': vgg_preprocessing,
'vgg_16': vgg_preprocessing,
'vgg_19': vgg_preprocessing,
}
if name not in preprocessing_fn_map:
raise ValueError('Preprocessing name [%s] was not recognized' % name)
def preprocessing_fn(image, output_height, output_width, **kwargs):
return preprocessing_fn_map[name].preprocess_image(
image, output_height, output_width, is_training=is_training, **kwargs)
return preprocessing_fn
|
[
"margaux@Margauxs-MacBook-Pro.local"
] |
margaux@Margauxs-MacBook-Pro.local
|
c281f4e1e98d33496387c7144dae0dd964528490
|
22c686df4887171b8f3c13ed3e60823fc78bcfd3
|
/venv/Lib/site-packages/dotenv/parser.py
|
bd60cab4026488e11ffa624d74cac3513389a21a
|
[] |
no_license
|
farooqiusman/Discord_bot
|
5ffb2b5576ebafc6f5418eda28645a4fc128add9
|
990a305ffcc3359ed2a59d211e32212e1d5447df
|
refs/heads/master
| 2023-07-14T07:55:24.780523
| 2021-09-03T18:06:42
| 2021-09-03T18:06:42
| 234,797,260
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,005
|
py
|
import codecs
import re
from .compat import IS_TYPE_CHECKING, to_text
if IS_TYPE_CHECKING:
from typing import ( # noqa:F401
IO, Iterator, Match, NamedTuple, Optional, Pattern, Sequence, Text,
Tuple
)
def make_regex(string, extra_flags=0):
# type: (str, int) -> Pattern[Text]
return re.compile(to_text(string), re.UNICODE | extra_flags)
_newline = make_regex(r"(\r\n|\n|\r)")
_whitespace = make_regex(r"\s*", extra_flags=re.MULTILINE)
_export = make_regex(r"(?:export[^\S\r\n]+)?")
_single_quoted_key = make_regex(r"'([^']+)'")
_unquoted_key = make_regex(r"([^=\#\s]+)")
_equal_sign = make_regex(r"([^\S\r\n]*=[^\S\r\n]*)?")
_single_quoted_value = make_regex(r"'((?:\\'|[^'])*)'")
_double_quoted_value = make_regex(r'"((?:\\"|[^"])*)"')
_unquoted_value_part = make_regex(r"([^ \r\n]*)")
_comment = make_regex(r"(?:\s*#[^\r\n]*)?")
_end_of_line = make_regex(r"[^\S\r\n]*(?:\r\n|\n|\r)?")
_rest_of_line = make_regex(r"[^\r\n]*(?:\r|\n|\r\n)?")
_double_quote_escapes = make_regex(r"\\[\\'\"abfnrtv]")
_single_quote_escapes = make_regex(r"\\[\\']")
try:
# this is necessary because we only import these from typing
# when we are type checking, and the linter is upset if we
# re-import
import typing
Original = typing.NamedTuple(
"Original",
[
("string", typing.Text),
("line", int),
],
)
Binding = typing.NamedTuple(
"Binding",
[
("key", typing.Optional[typing.Text]),
("value", typing.Optional[typing.Text]),
("original", Original),
],
)
except ImportError:
from collections import namedtuple
Original = namedtuple( # type: ignore
"Original",
[
"string",
"line",
],
)
Binding = namedtuple( # type: ignore
"Binding",
[
"key",
"value",
"original",
],
)
class Position:
def __init__(self, chars, line):
# type: (int, int) -> None
self.chars = chars
self.line = line
@classmethod
def start(cls):
# type: () -> Position
return cls(chars=0, line=1)
def set(self, other):
# type: (Position) -> None
self.chars = other.chars
self.line = other.line
def advance(self, string):
# type: (Text) -> None
self.chars += len(string)
self.line += len(re.findall(_newline, string))
class Error(Exception):
pass
class Reader:
def __init__(self, stream):
# type: (IO[Text]) -> None
self.string = stream.read()
self.position = Position.start()
self.mark = Position.start()
def has_next(self):
# type: () -> bool
return self.position.chars < len(self.string)
def set_mark(self):
# type: () -> None
self.mark.set(self.position)
def get_marked(self):
# type: () -> Original
return Original(
string=self.string[self.mark.chars:self.position.chars],
line=self.mark.line,
)
def peek(self, count):
# type: (int) -> Text
return self.string[self.position.chars:self.position.chars + count]
def read(self, count):
# type: (int) -> Text
result = self.string[self.position.chars:self.position.chars + count]
if len(result) < count:
raise Error("read: End of string")
self.position.advance(result)
return result
def read_regex(self, regex):
# type: (Pattern[Text]) -> Sequence[Text]
match = regex.match(self.string, self.position.chars)
if match is None:
raise Error("read_regex: Pattern not found")
self.position.advance(self.string[match.start():match.end()])
return match.groups()
def decode_escapes(regex, string):
# type: (Pattern[Text], Text) -> Text
def decode_match(match):
# type: (Match[Text]) -> Text
return codecs.decode(match.group(0), 'unicode-escape') # type: ignore
return regex.sub(decode_match, string)
def parse_key(reader):
# type: (Reader) -> Text
char = reader.peek(1)
if char == "'":
(key,) = reader.read_regex(_single_quoted_key)
else:
(key,) = reader.read_regex(_unquoted_key)
return key
def parse_unquoted_value(reader):
# type: (Reader) -> Text
value = u""
while True:
(part,) = reader.read_regex(_unquoted_value_part)
value += part
after = reader.peek(2)
if len(after) < 2 or after[0] in u"\r\n" or after[1] in u" #\r\n":
return value
value += reader.read(2)
def parse_value(reader):
# type: (Reader) -> Text
char = reader.peek(1)
if char == u"'":
(value,) = reader.read_regex(_single_quoted_value)
return decode_escapes(_single_quote_escapes, value)
elif char == u'"':
(value,) = reader.read_regex(_double_quoted_value)
return decode_escapes(_double_quote_escapes, value)
elif char in (u"", u"\n", u"\r"):
return u""
else:
return parse_unquoted_value(reader)
def parse_binding(reader):
# type: (Reader) -> Binding
reader.set_mark()
try:
reader.read_regex(_whitespace)
reader.read_regex(_export)
key = parse_key(reader)
(sign,) = reader.read_regex(_equal_sign)
value = parse_value(reader) if sign else None
reader.read_regex(_comment)
reader.read_regex(_end_of_line)
return Binding(
key=key,
value=value,
original=reader.get_marked(),
)
except Error:
reader.read_regex(_rest_of_line)
return Binding(
key=None,
value=None,
original=reader.get_marked(),
)
def parse_stream(stream):
# type: (IO[Text]) -> Iterator[Binding]
reader = Reader(stream)
while reader.has_next():
yield parse_binding(reader)
|
[
"34846941+Chobaka78@users.noreply.github.com"
] |
34846941+Chobaka78@users.noreply.github.com
|
d694c8662397c906eb3675d91e5b303572a9b9ff
|
e543218db21c0232ab6c688cbbec7d49cf54e1de
|
/M5HW3_Factorial_lee.py
|
64031e77807c5cc854c7415d06957b995901d2d1
|
[] |
no_license
|
jeffnivy/cti110
|
edb77132f223a903d86ab107c9d9584e144367a3
|
2e42c9a36cfc594265524825f79e1b4f440466ea
|
refs/heads/master
| 2021-01-23T10:37:02.426149
| 2017-12-11T16:02:08
| 2017-12-11T16:02:08
| 102,623,562
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 543
|
py
|
#CTI 110
#M5HW3 Factorial
#Jeffrey Lee
#17 Oct 2017
#Write a program that asks the user for a nonnegative integer
#then uses a loop to calculate the factorial of that number
#Display the factorial
userInteger = int( input( "Please enter a number: " ) )
while userInteger < 1:
userInteger = int( input( "Please enter a positive number please: " ) )
factorial = 1
for currentNumber in range( 1, userInteger + 1 ):
factorial = factorial * currentNumber
print( "The factorial of", userInteger, "is" , factorial )
|
[
"noreply@github.com"
] |
jeffnivy.noreply@github.com
|
faf45b629da2c9b6f878c086d6691fdf8be9c9f5
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/CodeJamData/15/31/5.py
|
0b76face2b61578a0a63ae7ae2bee12b06fe88cd
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 745
|
py
|
import os
import sys
from collections import defaultdict
problem_id = 'A'
sys.setrecursionlimit(10**9)
input_path = '%s.in' % problem_id
output_path = '%s.out' % problem_id
def read_line():
line = ''
while len(line) == 0:
line = input_file.readline().strip()
return line
def write_line(line):
print line
return output_file.write(line + os.linesep)
def solve():
r, c, w = map(int, read_line().split(' '))
nc = (c / w) * r + (w - 1)
if c % w:
nc += 1
return '%s' % nc
input_file = open(input_path, "r")
output_file = open(output_path, "w+")
T = int(read_line())
for case_id in xrange(1, T + 1):
write_line("Case #%d: %s" % (case_id, solve()))
input_file.close()
output_file.close()
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
7d0b9e321fad687717ba261f712748cb57d968a3
|
7848ded2f7b1cf5cc33380d739e0ceee5718ffec
|
/imrunicorn/activity_log/migrations/0006_auto_20210218_0756.py
|
73aa939743b218d1fe05de35fdd5684fce3b3c7e
|
[] |
no_license
|
benspelledabc/djangosite
|
cbed1a7da3eb6ba6eee05897ec928b350831fc6b
|
fa8004b20f790f56fc69e9d158128a867be700f3
|
refs/heads/master
| 2023-04-17T19:24:48.908640
| 2021-05-02T19:05:38
| 2021-05-02T19:05:38
| 294,891,690
| 1
| 1
| null | 2021-05-02T19:05:38
| 2020-09-12T07:16:11
|
Python
|
UTF-8
|
Python
| false
| false
| 474
|
py
|
# Generated by Django 3.0.7 on 2021-02-18 12:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('activity_log', '0005_activity_sfw'),
]
operations = [
migrations.AlterModelOptions(
name='activityphotovalidation',
options={'ordering': ('-activity_log', 'id'), 'verbose_name': 'Activity Photo Validation', 'verbose_name_plural': 'Activity Photo Validations'},
),
]
|
[
"admin@benspelledabc.me"
] |
admin@benspelledabc.me
|
a9e4612761077a7438abaa3443204f7a17ee97f7
|
dae1ea21e75f80513594ea1efd887db9b4413ac2
|
/Spaceship.py
|
553889eb672925dbbe08a87176079fcd3abafa5f
|
[] |
no_license
|
Immortalits/Python-OOP
|
70aeadbd718b2017ca2669250010715ac9d00548
|
45396c24235b90bb7ec039b702209b24cfc29221
|
refs/heads/main
| 2023-08-14T17:10:26.880888
| 2021-10-10T14:02:23
| 2021-10-10T14:02:23
| 410,756,262
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,288
|
py
|
class Spaceship:
fuel = 400
passengers = ["John", "Steve", "Sam", "Danielle"]
Shields = True
Speedometer = 0
def listPassengers(self):
for passenger in self.passengers:
print(f'Passenger: {passenger}')
def add_passenger(self, new_passenger):
self.passengers.append(new_passenger)
print(f'{new_passenger} was added to the ship.')
def travel(self, distance):
print(f'Trying to travel {distance}.')
if self.fuel <= 0:
print("Can't go further, tank is empty.")
else:
self.fuel = self.fuel - (distance / 2)
if self.fuel < 0:
distance = (distance - (self.fuel * -2))
print(f"Can only travel {distance}.")
self.fuel = 0
self.Speedometer += distance
if self.fuel < 30 and self.Shields:
self.Shields = False
print("Fuel is low, turning off shields!")
print(f"The spaceship is at {self.Speedometer}.")
print(f"The spaceship has {self.fuel} fuel.")
mySpaceship = Spaceship()
mySpaceship.listPassengers()
mySpaceship.add_passenger('Lindsay')
mySpaceship.listPassengers()
mySpaceship.travel(750)
mySpaceship.travel(200)
mySpaceship.travel(100)
|
[
"zazib03@gmail.com"
] |
zazib03@gmail.com
|
e1ea4c169eac6a692d0243c2fe8e607a7bc281e2
|
2c74bb301f1ed83b79254944183ac5a18a639fdf
|
/tests/components/github/test_diagnostics.py
|
80dfaec24459735e6cd3e4ebee2a1a78979dbbc2
|
[
"Apache-2.0"
] |
permissive
|
Adminiuga/home-assistant
|
5bec93007ddac1a268cc359bf7e48530c5f73b38
|
dcf68d768e4f628d038f1fdd6e40bad713fbc222
|
refs/heads/dev
| 2023-02-22T22:03:31.013931
| 2022-11-09T00:27:20
| 2022-11-09T00:27:20
| 123,929,062
| 5
| 4
|
Apache-2.0
| 2023-02-22T06:14:31
| 2018-03-05T14:11:09
|
Python
|
UTF-8
|
Python
| false
| false
| 2,437
|
py
|
"""Test GitHub diagnostics."""
import json
from aiogithubapi import GitHubException
from aiohttp import ClientSession
from homeassistant.components.github.const import CONF_REPOSITORIES, DOMAIN
from homeassistant.core import HomeAssistant
from .common import setup_github_integration
from tests.common import MockConfigEntry, load_fixture
from tests.components.diagnostics import get_diagnostics_for_config_entry
from tests.test_util.aiohttp import AiohttpClientMocker
async def test_entry_diagnostics(
hass: HomeAssistant,
hass_client: ClientSession,
mock_config_entry: MockConfigEntry,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test config entry diagnostics."""
mock_config_entry.options = {CONF_REPOSITORIES: ["home-assistant/core"]}
response_json = json.loads(load_fixture("graphql.json", DOMAIN))
response_json["data"]["repository"]["full_name"] = "home-assistant/core"
aioclient_mock.post(
"https://api.github.com/graphql",
json=response_json,
headers=json.loads(load_fixture("base_headers.json", DOMAIN)),
)
aioclient_mock.get(
"https://api.github.com/rate_limit",
json={"resources": {"core": {"remaining": 100, "limit": 100}}},
headers={"Content-Type": "application/json"},
)
await setup_github_integration(hass, mock_config_entry, aioclient_mock)
result = await get_diagnostics_for_config_entry(
hass,
hass_client,
mock_config_entry,
)
assert result["options"]["repositories"] == ["home-assistant/core"]
assert result["rate_limit"] == {
"resources": {"core": {"remaining": 100, "limit": 100}}
}
assert (
result["repositories"]["home-assistant/core"]["full_name"]
== "home-assistant/core"
)
async def test_entry_diagnostics_exception(
hass: HomeAssistant,
hass_client: ClientSession,
init_integration: MockConfigEntry,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test config entry diagnostics with exception for ratelimit."""
aioclient_mock.get(
"https://api.github.com/rate_limit",
exc=GitHubException("error"),
)
result = await get_diagnostics_for_config_entry(
hass,
hass_client,
init_integration,
)
assert (
result["rate_limit"]["error"]
== "Unexpected exception for 'https://api.github.com/rate_limit' with - error"
)
|
[
"noreply@github.com"
] |
Adminiuga.noreply@github.com
|
65b580895a9731ed5298866a09c1fbe51152a79f
|
a131266796fecf211f666d23274f12a20cc1f366
|
/render.py
|
2dead4d479662cd0cb0cee984a2ee564d7a79cdd
|
[] |
no_license
|
Praron/3D-render-learning
|
d07afc74291f17f011d005f36513749ae8e0d07d
|
edfae239c165469a3bcfa499a36b7d1c4c31cca4
|
refs/heads/master
| 2021-01-20T18:07:50.051555
| 2016-07-03T23:39:29
| 2016-07-03T23:39:29
| 62,517,716
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,438
|
py
|
from PIL import Image
from vector import *
import re
import random
from functools import partial
filename = 'head.obj'
H = 1000
W = 1000
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
def parseVertices(file):
file.seek(0)
# Get only stirngs with vertices
strings = filter(lambda s: re.match('^v +', s), file.readlines())
# Get list of tuples-vertices([1:] to pass a 'v' from strings)
return list(map(lambda s: tuple(map(float, s.split()[1:])), strings))
def parseFaces(file):
file.seek(0)
strings = filter(lambda s: re.match('^f +', s), file.readlines())
return list(map(lambda s: tuple(map(lambda p: int(p.split('/')[0]) - 1,
s.split()[1:])), strings))
def frange(x=0, y=1, step=1.0):
while x < y:
yield x
x += step
def _resize(p, w, h): # From .obj to normal coordinate system
return ((p[0] + 1) * W / 2, (-1 * p[1] + 1) * H / 2)
def dot(pixels, x, y, color):
try:
pixels[x, y] = tuple([int(c) for c in color])
except IndexError:
pass
def line(pixels, x0, y0, x1, y1, color):
if abs(x0 - x1) < abs(y0 - y1):
x0, y0, x1, y1 = y0, x0, y1, x1
steep = True
else:
steep = False
if x0 > x1:
x0, y0, x1, y1 = x1, y1, x0, y0
dx = x1 - x0
y = y1 - y0
derror = 2 * abs(y)
error = 0
y = y0
for x in frange(x0, x1):
if steep:
dot(pixels, y, x, color)
else:
dot(pixels, x, y, color)
error += derror
if error > dx:
y += 1 if y0 < y1 else -1
error -= 2 * dx
def triangle(pixels, p0, p1, p2, color):
# Sort and make Vectors from tuples
p0, p1, p2 = map(lambda p: Vector(*p),
sorted([p0, p1, p2], key=lambda Vector: Vector[1]))
total_h = p2.y - p0.y + 1
first_h = p1.y - p0.y + 1
second_h = p2.y - p1.y + 1
total_w = p2.x - p0.x
first_w = p1.x - p0.x
second_w = p2.x - p1.x
for y in frange(0, total_h):
first_part = y < first_h and first_h != 0
current_h = first_h if first_part else second_h
a = y / total_h
b = (y - ((0 if first_part else first_h))) / current_h
ax = p0.x + total_w * a
bx = p0.x + first_w * b if first_part else p1.x + second_w * b
if ax > bx:
ax, bx = bx, ax
line(pixels, ax, p0.y + y, bx, p0.y + y, color)
def render_obj(pixels, verts, faces, color):
for face in faces:
screen_vec = Vector(*(_resize(verts[face[i]], W, H)
for i in range(3)))
world_vec = [Vector(*verts[face[i]]) for i in range(3)]
n = (world_vec[2] - world_vec[0]) % (world_vec[1] - world_vec[0])
n = n.normalize()
light = -1 * n * Vector(0, 0, 0.5)
if light > 0:
triangle(pixels, *screen_vec,
color=(tuple(light * c for c in color)))
def main():
img = Image.new('RGB', (W, H), 'black')
file = open(filename, 'r')
verts = parseVertices(file)
faces = parseFaces(file)
pixels = img.load()
render_obj(pixels, verts, faces, (255, 150, 100))
img.show()
# img.save('/home/escapsit/Programming/3D rendering/result.png')
if __name__ == '__main__':
main()
|
[
"koctr123@mail.ru"
] |
koctr123@mail.ru
|
b28c1d72325c7045c6e87f2201c4ee48fe23cab0
|
cc1e28c1647729b50db74a7acacb537ba8c72255
|
/main.py
|
fdd3b53e970bd996939e0f312d80314244990c9a
|
[] |
no_license
|
myrepositorygithub/GemFarm
|
9e8e61808348584f3154c374af33df74905c6148
|
7dd27cbe60f0b29b805acd39d265732a239eed69
|
refs/heads/master
| 2020-05-03T03:30:54.795740
| 2017-08-05T00:32:49
| 2017-08-05T00:32:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,881
|
py
|
import multiprocessing
import pyHook
import pythoncom
from actions import *
from time import time
import sys,os
from sub import *
from datetime import date
def OnKeyboardEvent(event):
global ant,macros
millis = int(round(time() * 1000))
#print 'KeyID:', event.KeyID,millis-ant
#print '.',
if event.KeyID == 97:# 1
state = (macros.getState(0) + 1 )%2
macros.setState(0,state);
#print 'Macro N 1 Activated'
pass
if event.KeyID == 98:# 2
state = (macros.getState(1) + 1 )%2
macros.setState(1,state);
#print 'Macro N 1 Activated'
pass
if event.KeyID == 99:# 3
state = (macros.getState(2) + 1 )%2
macros.setState(2,state);
#print 'Macro N 1 Activated'
pass
if event.KeyID == 100:# 4
state = (macros.getState(3) + 1 )%2
macros.setState(3,state);
if state == 1:
farmThread = multiprocessing.Process(target=farmaSiena, args=(3,macros,))
farmThread.start()
pass
if event.KeyID == 101:# 5
state = (macros.getState(4) + 1 )%2
macros.setState(4,state);
if state == 1:
farmThread = multiprocessing.Process(target=quebraItens, args=(macros,))
farmThread.start()
pass
if event.KeyID == 102:# 6
quebraItens(macros)
#carregaOpc()
#state = (macros.getState(3) + 1 )%2
#macros.setState(3,state);
pass
if event.KeyID == 103:# 7
state = (macros.getState(5) + 1 )%2
macros.setState(5,state);
if state == 1:
farmThread = multiprocessing.Process(target=apagaChar, args=(5,macros,))
farmThread.start()
#carregaOpc()
#state = (macros.getState(4) + 1 )%2
#macros.setState(4,state);
#apagaChar(macros)
pass
if event.KeyID == 104:# 8
state = (macros.getState(6) + 1 )%2
macros.setState(6,state);
if state == 1:
farmThread = multiprocessing.Process(target=abreItens, args=(6,macros,))
farmThread.start()
#loga()
pass
if event.KeyID == 105:# 9
state = (macros.getState(7) + 1 )%2
macros.setState(7,state);
if state == 1:
farmThread = multiprocessing.Process(target=criaChar, args=(7,macros,))
farmThread.start()
#criaChar(macros)
pass
if event.KeyID == 27:# esc
#verificaSub()
#demuxSub(pegaSub())
macros.carregaOpc()
print '************* Loaded Configs *************'
#os._exit(0)
ant = millis
return True
if __name__ == '__main__':
d0 = date.today()
d1 = date(2018, 6, 19)
delta = d1 - d0
print delta.days, 'Days of free use'
if delta.days < 0:
print '************ Expirado **********'
os.system('timeout 10')
os._exit(0)
hm = pyHook.HookManager()
multiprocessing.freeze_support()
m_flag = [0]
ant = int(round(time() * 1000))
try:
f = open('config/macros.cfg','r')
except:
f = open('config/macros.cfg','r')
f.write('z{wait9999}\n'*3)
f.close()
f = open('config/macros.cfg','r')
comandos = f.read().split('\n')
f.close()
macros = semaforo(0)
#macros.carregaOpc()
#print comandos
m1 = multiprocessing.Process(target=worker, args=(0,macros,))
m2 = multiprocessing.Process(target=worker, args=(1,macros,))
m3 = multiprocessing.Process(target=worker, args=(2,macros,))
m1.start()
m2.start()
m3.start()
print '************* Ready *************'
hm.KeyDown = OnKeyboardEvent
hm.HookKeyboard()
pythoncom.PumpMessages()
|
[
"thiagoo.cabral@gmail.com"
] |
thiagoo.cabral@gmail.com
|
fd287cb7728cad58dbce3d3fc63cf575d98d8dc6
|
a1a6afe657678e3d08c9e5e06da33c803a3b3ab5
|
/script.py
|
969fdcb8c0df82e8e8151fc740238c99b751c600
|
[
"Unlicense"
] |
permissive
|
mattnewell/karabiner-windows-mode
|
dbfbe89d79a013a9b12098635502168e70383151
|
25f43c24219bb89f7018733c93abdb2d7e5dcdde
|
refs/heads/master
| 2020-09-05T11:58:13.378450
| 2019-11-07T12:51:30
| 2019-11-07T12:51:30
| 220,096,937
| 0
| 0
|
Unlicense
| 2019-11-06T21:49:45
| 2019-11-06T21:49:45
| null |
UTF-8
|
Python
| false
| false
| 578
|
py
|
import json
with open("../karabiner-windows-mode/windows_shortcuts.json", "r") as read_file:
data = json.load(read_file)
for rule in data["rules"]:
for manipulator in rule["manipulators"]:
if manipulator.get("conditions"):
for condition in manipulator["conditions"]:
condition["bundle_identifiers"].append("^com\\.jetbrains\\.pycharm$")
# for ident in condition["bundle_identifiers"]:
# ident.append("foo")
with open("data_file.json", "w") as write_file:
json.dump(data, write_file, indent=2)
|
[
"matthew.newell@northwesternmutual.com"
] |
matthew.newell@northwesternmutual.com
|
93c28615161ad665f97e89b09222deb2b306f097
|
5c46090e9c71be328e0ff01ef8967ca0aef3000b
|
/0x01-python-if_else_loops_functions/2-print_alphabet.py
|
d267221c90ebe7cfd6c5f91ad266aab5f609174a
|
[] |
no_license
|
DiegoRmsR/holbertonschool-higher_level_programming
|
160f3da74f09701ff1e4fd4ec74f71336800044b
|
59aaefddf2a4e155e13c603cf81bf9577f774309
|
refs/heads/master
| 2020-07-23T00:27:56.986587
| 2020-02-14T02:53:47
| 2020-02-14T02:53:47
| 207,383,431
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 96
|
py
|
#!/usr/bin/python3
import binascii
for alp in range(97, 123):
print(end="{:c}".format(alp))
|
[
"Diegoramos.mt@gmail.com"
] |
Diegoramos.mt@gmail.com
|
26e87590c5d6f4db03a3f56df1693c4555695eb6
|
1c2a0744f5a858894bdefd96687cb64b91fbb7c4
|
/assignment3/q2_rnn.py
|
d8c335f2d1fb5167198ea4aacf86296a76feae40
|
[] |
no_license
|
Kolento93/cs224n
|
516f5deb56124c91c900dbdf89eb4e5ea75592c4
|
1c3709ab05022e4547fc0a0280315b83143d98a5
|
refs/heads/master
| 2021-08-23T23:44:13.607862
| 2017-12-07T03:57:33
| 2017-12-07T03:57:33
| 112,143,923
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26,759
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Q2: Recurrent neural nets for NER
"""
from __future__ import absolute_import
from __future__ import division
import argparse
import logging
import sys
import time
from datetime import datetime
import tensorflow as tf
import numpy as np
from util import print_sentence, write_conll, read_conll
from data_util import load_and_preprocess_data, load_embeddings, ModelHelper
from ner_model import NERModel
from defs import LBLS
from q2_rnn_cell import RNNCell
from q3_gru_cell import GRUCell
logger = logging.getLogger("hw3.q2")
logger.setLevel(logging.DEBUG)
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
class Config:
"""Holds model hyperparams and data information.
The config class is used to store various hyperparameters and dataset
information parameters. Model objects are passed a Config() object at
instantiation.
"""
n_word_features = 2 # Number of features for every word in the input.
window_size = 1
n_features = (2 * window_size + 1) * n_word_features # Number of features for every word in the input.
max_length = 120 # longest sequence to parse
n_classes = 5
dropout = 0.5
embed_size = 50
hidden_size = 300
batch_size = 32
n_epochs = 10
max_grad_norm = 10.
lr = 0.001
#np.array()
def __init__(self, args):
self.cell = args.cell
if "model_path" in args:
# Where to save things.
self.output_path = args.model_path
else:
self.output_path = "results/{}/{:%Y%m%d_%H%M%S}/".format(self.cell, datetime.now())
self.model_output = self.output_path + "model.weights"
self.eval_output = self.output_path + "results.txt"
self.conll_output = self.output_path + "{}_predictions.conll".format(self.cell)
self.log_output = self.output_path + "log"
def pad_sequences(data, max_length):
"""Ensures each input-output seqeunce pair in @data is of length
@max_length by padding it with zeros and truncating the rest of the
sequence.
TODO: In the code below, for every sentence, labels pair in @data,
(a) create a new sentence which appends zero feature vectors until
the sentence is of length @max_length. If the sentence is longer
than @max_length, simply truncate the sentence to be @max_length
long.
(b) create a new label sequence similarly.
(c) create a _masking_ sequence that has a True wherever there was a
token in the original sequence, and a False for every padded input.
Example: for the (sentence, labels) pair: [[4,1], [6,0], [7,0]], [1,
0, 0], and max_length = 5, we would construct
- a new sentence: [[4,1], [6,0], [7,0], [0,0], [0,0]]
- a new label seqeunce: [1, 0, 0, 4, 4], and
- a masking seqeunce: [True, True, True, False, False].
Args:
data: is a list of (sentence, labels) tuples. @sentence is a list
containing the words in the sentence and @label is a list of
output labels. Each word is itself a list of
@n_features features. For example, the sentence "Chris
Manning is amazing" and labels "PER PER O O" would become
([[1,9], [2,9], [3,8], [4,8]], [1, 1, 4, 4]). Here "Chris"
the word has been featurized as "[1, 9]", and "[1, 1, 4, 4]"
is the list of labels.
max_length: the desired length for all input/output sequences.
Returns:
a new list of data points of the structure (sentence', labels', mask).
Each of sentence', labels' and mask are of length @max_length.
See the example above for more details.
"""
ret = []
# Use this zero vector when padding sequences.
zero_vector = [0] * Config.n_features
zero_label = 4 # corresponds to the 'O' tag
for sentence, labels in data:
### YOUR CODE HERE (~4-6 lines)
labels = labels[:max_length] + [zero_label] * max(0,max_length - len(sentence))
mask = [False] * max_length
mask[:len(sentence)] = [True] * min(len(sentence),max_length)
sentence = sentence[:max_length] + [zero_vector] * max(0,max_length - len(sentence))
ret.append((sentence,labels,mask))
### END YOUR CODE ###
return ret
class RNNModel(NERModel):
"""
Implements a recursive neural network with an embedding layer and
single hidden layer.
This network will predict a sequence of labels (e.g. PER) for a
given token (e.g. Henry) using a featurized window around the token.
"""
def add_placeholders(self):
"""Generates placeholder variables to represent the input tensors
These placeholders are used as inputs by the rest of the model building and will be fed
data during training. Note that when "None" is in a placeholder's shape, it's flexible
(so we can use different batch sizes without rebuilding the model).
Adds following nodes to the computational graph
input_placeholder: Input placeholder tensor of shape (None, self.max_length, n_features), type tf.int32
labels_placeholder: Labels placeholder tensor of shape (None, self.max_length), type tf.int32
mask_placeholder: Mask placeholder tensor of shape (None, self.max_length), type tf.bool
dropout_placeholder: Dropout value placeholder (scalar), type tf.float32
TODO: Add these placeholders to self as the instance variables
self.input_placeholder
self.labels_placeholder
self.mask_placeholder
self.dropout_placeholder
HINTS:
- Remember to use self.max_length NOT Config.max_length
(Don't change the variable names)
"""
### YOUR CODE HERE (~4-6 lines)
self.input_placeholder = tf.placeholder(shape = (None,self.max_length,self.config.n_features),dtype = tf.int32)
self.labels_placeholder = tf.placeholder(shape = (None,self.max_length),dtype = tf.int32)
self.mask_placeholder = tf.placeholder(shape = (None,self.max_length),dtype = tf.bool)
self.dropout_placeholder = tf.placeholder(dtype = tf.float32)
### END YOUR CODE
def create_feed_dict(self, inputs_batch, mask_batch, labels_batch=None, dropout=1):
"""Creates the feed_dict for the dependency parser.
A feed_dict takes the form of:
feed_dict = {
<placeholder>: <tensor of values to be passed for placeholder>,
....
}
Hint: The keys for the feed_dict should be a subset of the placeholder
tensors created in add_placeholders.
Hint: When an argument is None, don't add it to the feed_dict.
Args:
inputs_batch: A batch of input data.
mask_batch: A batch of mask data.
labels_batch: A batch of label data.
dropout: The dropout rate.
Returns:
feed_dict: The feed dictionary mapping from placeholders to values.
"""
### YOUR CODE (~6-10 lines)
feed_dict = {self.input_placeholder:inputs_batch,self.mask_placeholder:mask_batch,\
self.dropout_placeholder:dropout}
if labels_batch is not None:
feed_dict[self.labels_placeholder] = labels_batch
### END YOUR CODE
return feed_dict
def add_embedding(self):
"""Adds an embedding layer that maps from input tokens (integers) to vectors and then
concatenates those vectors:
TODO:
- Create an embedding tensor and initialize it with self.pretrained_embeddings.
- Use the input_placeholder to index into the embeddings tensor, resulting in a
tensor of shape (None, max_length, n_features, embed_size).
- Concatenates the embeddings by reshaping the embeddings tensor to shape
(None, max_length, n_features * embed_size).
HINTS:
- You might find tf.nn.embedding_lookup useful.
- You can use tf.reshape to concatenate the vectors. See
following link to understand what -1 in a shape means.
https://www.tensorflow.org/api_docs/python/array_ops/shapes_and_shaping#reshape.
Returns:
embeddings: tf.Tensor of shape (None, max_length, n_features*embed_size)
"""
### YOUR CODE HERE (~4-6 lines)
embeddings = tf.nn.embedding_lookup(self.pretrained_embeddings,self.input_placeholder)
embeddings = tf.reshape(embeddings,shape = (-1,self.max_length,self.config.n_features*self.config.embed_size))
### END YOUR CODE
return embeddings
def add_prediction_op(self):
"""Adds the unrolled RNN:
h_0 = 0
for t in 1 to T:
o_t, h_t = cell(x_t, h_{t-1})
o_drop_t = Dropout(o_t, dropout_rate)
y_t = o_drop_t U + b_2
TODO: There a quite a few things you'll need to do in this function:
- Define the variables U, b_2.
- Define the vector h as a constant and inititalize it with
zeros. See tf.zeros and tf.shape for information on how
to initialize this variable to be of the right shape.
https://www.tensorflow.org/api_docs/python/constant_op/constant_value_tensors#zeros
https://www.tensorflow.org/api_docs/python/array_ops/shapes_and_shaping#shape
- In a for loop, begin to unroll the RNN sequence. Collect
the predictions in a list.
- When unrolling the loop, from the second iteration
onwards, you will HAVE to call
tf.get_variable_scope().reuse_variables() so that you do
not create new variables in the RNN cell.
See https://www.tensorflow.org/versions/master/how_tos/variable_scope/
- Concatenate and reshape the predictions into a predictions
tensor.
Hint: You will find the function tf.pack (similar to np.asarray)
useful to assemble a list of tensors into a larger tensor.
https://www.tensorflow.org/api_docs/python/array_ops/slicing_and_joining#pack
Hint: You will find the function tf.transpose and the perms
argument useful to shuffle the indices of the tensor.
https://www.tensorflow.org/api_docs/python/array_ops/slicing_and_joining#transpose
Remember:
* Use the xavier initilization for matrices.
* Note that tf.nn.dropout takes the keep probability (1 - p_drop) as an argument.
The keep probability should be set to the value of self.dropout_placeholder
Returns:
pred: tf.Tensor of shape (batch_size, max_length, n_classes)
"""
x = self.add_embedding()
dropout_rate = self.dropout_placeholder
preds = [] # Predicted output at each timestep should go here!
# Use the cell defined below. For Q2, we will just be using the
# RNNCell you defined, but for Q3, we will run this code again
# with a GRU cell!
if self.config.cell == "rnn":
cell = RNNCell(Config.n_features * Config.embed_size, Config.hidden_size)
elif self.config.cell == "gru":
cell = GRUCell(Config.n_features * Config.embed_size, Config.hidden_size)
else:
raise ValueError("Unsuppported cell type: " + self.config.cell)
# Define U and b2 as variables.
# Initialize state as vector of zeros.
### YOUR CODE HERE (~4-6 lines)
U = tf.get_variable(name = 'U',shape = (self.config.hidden_size,self.config.n_classes),\
initializer = tf.contrib.layers.xavier_initializer())
b_2 = tf.get_variable(name = 'b_2',shape = (self.config.n_classes),\
initializer = tf.contrib.layers.xavier_initializer())
state = tf.zeros(name = 'h_0',shape = (tf.shape(x)[0],self.config.hidden_size))#tf.shape(x)[0] == self.config.batch_size ?!
### END YOUR CODE
with tf.variable_scope("RNN"):
for time_step in range(self.max_length):
### YOUR CODE HERE (~6-10 lines)
if time_step > 0:
tf.get_variable_scope().reuse_variables()
_,state = cell(x[:,time_step,:],state,scope = "RNN")
o_drop_t = tf.nn.dropout(state,keep_prob = 1 - self.dropout_placeholder)
y_t = tf.matmul(o_drop_t,U) + b_2
preds.append(y_t)
### END YOUR CODE
# Make sure to reshape @preds here.
### YOUR CODE HERE (~2-4 lines)
preds = tf.stack(preds)
preds = tf.reshape(preds,shape = (-1,self.max_length,self.config.n_classes))
### END YOUR CODE
assert preds.get_shape().as_list() == [None, self.max_length, self.config.n_classes], "predictions are not of the right shape. Expected {}, got {}".format([None, self.max_length, self.config.n_classes], preds.get_shape().as_list())
return preds
def add_loss_op(self, preds):
"""Adds Ops for the loss function to the computational graph.
TODO: Compute averaged cross entropy loss for the predictions.
Importantly, you must ignore the loss for any masked tokens.
Hint: You might find tf.boolean_mask useful to mask the losses on masked tokens.
Hint: You can use tf.nn.sparse_softmax_cross_entropy_with_logits to simplify your
implementation. You might find tf.reduce_mean useful.
Args:
pred: A tensor of shape (batch_size, max_length, n_classes) containing the output of the neural
network before the softmax layer.
Returns:
loss: A 0-d tensor (scalar)
"""
### YOUR CODE HERE (~2-4 lines)
preds_maksed = tf.boolean_mask(preds,self.mask_placeholder)
y_masked = tf.boolean_mask(self.labels_placeholder,self.mask_placeholder)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = y_masked,logits = preds_maksed)
loss = tf.reduce_mean(loss)
### END YOUR CODE
return loss
def add_training_op(self, loss):
"""Sets up the training Ops.
Creates an optimizer and applies the gradients to all trainable variables.
The Op returned by this function is what must be passed to the
`sess.run()` call to cause the model to train. See
https://www.tensorflow.org/versions/r0.7/api_docs/python/train.html#Optimizer
for more information.
Use tf.train.AdamOptimizer for this model.
Calling optimizer.minimize() will return a train_op object.
Args:
loss: Loss tensor, from cross_entropy_loss.
Returns:
train_op: The Op for training.
"""
### YOUR CODE HERE (~1-2 lines)
train_op = tf.train.AdamOptimizer(learning_rate = self.config.lr).minimize(loss = loss)
### END YOUR CODE
return train_op
def preprocess_sequence_data(self, examples):
def featurize_windows(data, start, end, window_size = 1):
"""Uses the input sequences in @data to construct new windowed data points.
"""
ret = []
for sentence, labels in data:
from util import window_iterator
sentence_ = []
for window in window_iterator(sentence, window_size, beg=start, end=end):
sentence_.append(sum(window, []))
ret.append((sentence_, labels))
return ret
examples = featurize_windows(examples, self.helper.START, self.helper.END)
return pad_sequences(examples, self.max_length)
def consolidate_predictions(self, examples_raw, examples, preds):
"""Batch the predictions into groups of sentence length.
"""
assert len(examples_raw) == len(examples)
assert len(examples_raw) == len(preds)
ret = []
for i, (sentence, labels) in enumerate(examples_raw):
_, _, mask = examples[i]
labels_ = [l for l, m in zip(preds[i], mask) if m] # only select elements of mask.
assert len(labels_) == len(labels)
ret.append([sentence, labels, labels_])
return ret
def predict_on_batch(self, sess, inputs_batch, mask_batch):
feed = self.create_feed_dict(inputs_batch=inputs_batch, mask_batch=mask_batch)
predictions = sess.run(tf.argmax(self.pred, axis=2), feed_dict=feed)
return predictions
def train_on_batch(self, sess, inputs_batch, labels_batch, mask_batch):
feed = self.create_feed_dict(inputs_batch, labels_batch=labels_batch, mask_batch=mask_batch,
dropout=Config.dropout)
_, loss = sess.run([self.train_op, self.loss], feed_dict=feed)
return loss
def __init__(self, helper, config, pretrained_embeddings, report=None):
super(RNNModel, self).__init__(helper, config, report)
self.max_length = min(Config.max_length, helper.max_length)
Config.max_length = self.max_length # Just in case people make a mistake.
self.pretrained_embeddings = pretrained_embeddings
# Defining placeholders.
self.input_placeholder = None
self.labels_placeholder = None
self.mask_placeholder = None
self.dropout_placeholder = None
self.build()
def test_pad_sequences():
Config.n_features = 2
data = [
([[4,1], [6,0], [7,0]], [1, 0, 0]),
([[3,0], [3,4], [4,5], [5,3], [3,4]], [0, 1, 0, 2, 3]),
]
ret = [
([[4,1], [6,0], [7,0], [0,0]], [1, 0, 0, 4], [True, True, True, False]),
([[3,0], [3,4], [4,5], [5,3]], [0, 1, 0, 2], [True, True, True, True])
]
ret_ = pad_sequences(data, 4)
assert len(ret_) == 2, "Did not process all examples: expected {} results, but got {}.".format(2, len(ret_))
for i in range(2):
assert len(ret_[i]) == 3, "Did not populate return values corrected: expected {} items, but got {}.".format(3, len(ret_[i]))
for j in range(3):
assert ret_[i][j] == ret[i][j], "Expected {}, but got {} for {}-th entry of {}-th example".format(ret[i][j], ret_[i][j], j, i)
def do_test1(_):
logger.info("Testing pad_sequences")
test_pad_sequences()
logger.info("Passed!")
def do_test2(args):
logger.info("Testing implementation of RNNModel")
config = Config(args)
helper, train, dev, train_raw, dev_raw = load_and_preprocess_data(args)
embeddings = load_embeddings(args, helper)
config.embed_size = embeddings.shape[1]
with tf.Graph().as_default():
logger.info("Building model...",)
start = time.time()
model = RNNModel(helper, config, embeddings)
logger.info("took %.2f seconds", time.time() - start)
init = tf.global_variables_initializer()
saver = None
with tf.Session() as session:
session.run(init)
model.fit(session, saver, train, dev)
logger.info("Model did not crash!")
logger.info("Passed!")
def do_train(args):
# Set up some parameters.
config = Config(args)
helper, train, dev, train_raw, dev_raw = load_and_preprocess_data(args)
embeddings = load_embeddings(args, helper)
config.embed_size = embeddings.shape[1]
helper.save(config.output_path)
handler = logging.FileHandler(config.log_output)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logging.getLogger().addHandler(handler)
report = None #Report(Config.eval_output)
with tf.Graph().as_default():
logger.info("Building model...",)
start = time.time()
model = RNNModel(helper, config, embeddings)
logger.info("took %.2f seconds", time.time() - start)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as session:
session.run(init)
model.fit(session, saver, train, dev)
if report:
report.log_output(model.output(session, dev_raw))
report.save()
else:
# Save predictions in a text file.
output = model.output(session, dev_raw)
sentences, labels, predictions = zip(*output)
predictions = [[LBLS[l] for l in preds] for preds in predictions]
output = zip(sentences, labels, predictions)
with open(model.config.conll_output, 'w') as f:
write_conll(f, output)
with open(model.config.eval_output, 'w') as f:
for sentence, labels, predictions in output:
print_sentence(f, sentence, labels, predictions)
def do_evaluate(args):
config = Config(args)
helper = ModelHelper.load(args.model_path)
input_data = read_conll(args.data)
embeddings = load_embeddings(args, helper)
config.embed_size = embeddings.shape[1]
with tf.Graph().as_default():
logger.info("Building model...",)
start = time.time()
model = RNNModel(helper, config, embeddings)
logger.info("took %.2f seconds", time.time() - start)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as session:
session.run(init)
saver.restore(session, model.config.model_output)
for sentence, labels, predictions in model.output(session, input_data):
predictions = [LBLS[l] for l in predictions]
print_sentence(args.output, sentence, labels, predictions)
def do_shell(args):
config = Config(args)
helper = ModelHelper.load(args.model_path)
embeddings = load_embeddings(args, helper)
config.embed_size = embeddings.shape[1]
with tf.Graph().as_default():
logger.info("Building model...",)
start = time.time()
model = RNNModel(helper, config, embeddings)
logger.info("took %.2f seconds", time.time() - start)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as session:
session.run(init)
saver.restore(session, model.config.model_output)
print("""Welcome!
You can use this shell to explore the behavior of your model.
Please enter sentences with spaces between tokens, e.g.,
input> Germany 's representative to the European Union 's veterinary committee .
""")
while True:
# Create simple REPL
try:
sentence = raw_input("input> ")
tokens = sentence.strip().split(" ")
for sentence, _, predictions in model.output(session, [(tokens, ["O"] * len(tokens))]):
predictions = [LBLS[l] for l in predictions]
print_sentence(sys.stdout, sentence, [""] * len(tokens), predictions)
except EOFError:
print("Closing session.")
break
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Trains and tests an NER model')
subparsers = parser.add_subparsers()
command_parser = subparsers.add_parser('test1', help='')
command_parser.set_defaults(func=do_test1)
command_parser = subparsers.add_parser('test2', help='')
command_parser.add_argument('-dt', '--data-train', type=argparse.FileType('r'), default="D:/Github_code/new/cs224n/assignment3/data/tiny.conll", help="Training data")
command_parser.add_argument('-dd', '--data-dev', type=argparse.FileType('r'), default="D:/Github_code/new/cs224n/assignment3/data/tiny.conll", help="Dev data")
command_parser.add_argument('-v', '--vocab', type=argparse.FileType('r'), default="D:/Github_code/new/cs224n/assignment3/data/vocab.txt", help="Path to vocabulary file")
command_parser.add_argument('-vv', '--vectors', type=argparse.FileType('r'), default="D:/Github_code/new/cs224n/assignment3/data/wordVectors.txt", help="Path to word vectors file")
command_parser.add_argument('-c', '--cell', choices=["rnn", "gru"], default="rnn", help="Type of RNN cell to use.")
command_parser.set_defaults(func=do_test2)
command_parser = subparsers.add_parser('train', help='')
command_parser.add_argument('-dt', '--data-train', type=argparse.FileType('r'), default="data/train.conll", help="Training data")
command_parser.add_argument('-dd', '--data-dev', type=argparse.FileType('r'), default="data/dev.conll", help="Dev data")
command_parser.add_argument('-v', '--vocab', type=argparse.FileType('r'), default="data/vocab.txt", help="Path to vocabulary file")
command_parser.add_argument('-vv', '--vectors', type=argparse.FileType('r'), default="data/wordVectors.txt", help="Path to word vectors file")
command_parser.add_argument('-c', '--cell', choices=["rnn", "gru"], default="rnn", help="Type of RNN cell to use.")
command_parser.set_defaults(func=do_train)
command_parser = subparsers.add_parser('evaluate', help='')
command_parser.add_argument('-d', '--data', type=argparse.FileType('r'), default="data/dev.conll", help="Training data")
command_parser.add_argument('-m', '--model-path', help="Training data")
command_parser.add_argument('-v', '--vocab', type=argparse.FileType('r'), default="data/vocab.txt", help="Path to vocabulary file")
command_parser.add_argument('-vv', '--vectors', type=argparse.FileType('r'), default="data/wordVectors.txt", help="Path to word vectors file")
command_parser.add_argument('-c', '--cell', choices=["rnn", "gru"], default="rnn", help="Type of RNN cell to use.")
command_parser.add_argument('-o', '--output', type=argparse.FileType('w'), default=sys.stdout, help="Training data")
command_parser.set_defaults(func=do_evaluate)
command_parser = subparsers.add_parser('shell', help='')
command_parser.add_argument('-m', '--model-path', help="Training data")
command_parser.add_argument('-v', '--vocab', type=argparse.FileType('r'), default="data/vocab.txt", help="Path to vocabulary file")
command_parser.add_argument('-vv', '--vectors', type=argparse.FileType('r'), default="data/wordVectors.txt", help="Path to word vectors file")
command_parser.add_argument('-c', '--cell', choices=["rnn", "gru"], default="rnn", help="Type of RNN cell to use.")
command_parser.set_defaults(func=do_shell)
ARGS = parser.parse_args()
if ARGS.func is None:
parser.print_help()
sys.exit(1)
else:
ARGS.func(ARGS)
|
[
"haorangu93@hotmail.com"
] |
haorangu93@hotmail.com
|
02a552e03cd80033dca15ca2ee26e699ec517010
|
b9ea2504f4b2118f0722d22df29c1ddd7b391e79
|
/exhaustive_mixed_fixed_data_gaussian_var_impt.py
|
2cf5a9c1e83a602329faa7da902e4fbeb6cea920
|
[] |
no_license
|
patrickvossler18/ps_job
|
ecf181f4ccc1461fd8833fc6ee83074fe507a59e
|
ba170d1d8883e4abfea7109e20d978f19269cf23
|
refs/heads/master
| 2020-04-14T18:41:45.350947
| 2020-01-29T01:34:48
| 2020-01-29T01:34:48
| 164,029,643
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,986
|
py
|
import numpy as np
import pandas as pd
from DeepKnockoffs import KnockoffMachine
from DeepKnockoffs import GaussianKnockoffs
import sys
sys.path.insert(1, "/home/pvossler/ps_job")
# sys.path.append("/home/pvossler/ps_job")
import data
import parameters
from sklearn.covariance import MinCovDet, LedoitWolf
import utils
import datetime
training_params = parameters.GetTrainingHyperParams(model)
# training_params['LAMBDA'] = 0.0078
# training_params['DELTA'] = 0.0078
training_params['LAMBDA'] = lambda_val
training_params['DELTA'] = delta_val
p = X_train.shape[1]
print(X_train.shape)
chunk_list = [num_cuts] * (ncat)
# Set the parameters for training deep knockoffs
pars = dict()
pars['avg_corr'] = avg_corr
pars['avg_corr_cat'] = avg_corr_cat
pars['avg_corr_cont'] = avg_corr_cont
# Number of epochs
pars['epochs'] = 15
# Number of iterations over the full data per epoch
pars['epoch_length'] = 100
# Data type, either "continuous" or "binary"
pars['family'] = "continuous"
# Dimensions of the data
pars['p'] = p
# List of categorical variables
pars['cat_var_idx'] = np.arange(0, (ncat * (num_cuts)))
# Number of discrete variables
pars['ncat'] = ncat
# Number of categories
pars['num_cuts'] = num_cuts
# Number of categories for each categorical variable
pars['chunk_list'] = chunk_list
# Size of regularizer
# pars['regularizer'] = grid_results[0]
# Boolean for using different weighting structure for decorr
pars['use_weighting'] = False
# Multiplier for weighting discrete variables
pars['kappa'] = 1
# Boolean for using the different decorr loss function from the paper
pars['diff_decorr'] = False
# Boolean for using mixed data in forward function
pars['mixed_data'] = True
# Size of the test set
pars['test_size'] = 0
# Batch size
pars['batch_size'] = int(0.5*n)
# Learning rate
pars['lr'] = 0.01
# When to decrease learning rate (unused when equal to number of epochs)
pars['lr_milestones'] = [pars['epochs']]
# Width of the network (number of layers is fixed to 6)
pars['dim_h'] = int(10*p)
# Penalty for the MMD distance
pars['GAMMA'] = training_params['GAMMA']
# Penalty encouraging second-order knockoffs
pars['LAMBDA'] = training_params['LAMBDA']
# Decorrelation penalty hyperparameter
pars['DELTA'] = training_params['DELTA']
# Target pairwise correlations between variables and knockoffs
pars['target_corr'] = corr_g
# Kernel widths for the MMD measure (uniform weights)
pars['alphas'] = [1., 2., 4., 8., 16., 32., 64., 128.]
pars_name = MODEL_DIRECTORY + 'pars' + '_p_' + str(p_size) + timestamp + '.npy'
# Save parameters
np.save(pars_name, pars)
# Where to store the machine
checkpoint_name = MODEL_DIRECTORY + model + timestamp + '_p_' + str(p_size)
# Where to print progress information
logs_name = MODEL_DIRECTORY + model + timestamp + '_p_' + str(p_size) + "_progress.txt"
# Initialize the machine
machine = KnockoffMachine(pars, checkpoint_name=checkpoint_name, logs_name=logs_name)
# Train the machine
machine.train(X_train)
print(timestamp)
|
[
"patrick.vossler18@gmail.com"
] |
patrick.vossler18@gmail.com
|
ac00c1af94ea642896b57f7c815bbdbb346a67a4
|
16a8060f641e94a79aad3b70004d54eab812780f
|
/superlists/settings.py
|
f0588697223403b0c925f2aa3252647f50712c48
|
[] |
no_license
|
kayushka/TDD
|
65214550e7269fb4e2b8c392a2fdf426a37541b9
|
1340ac83465359a6ada483acd7028bea966a6c45
|
refs/heads/master
| 2020-04-26T05:05:12.308644
| 2019-03-28T11:22:26
| 2019-03-28T11:22:26
| 173,322,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,193
|
py
|
"""
Django settings for superlists project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '2f6u56&p&o(nk9n+(zy)cwk-4otm8lvnwskszqkq(g5j&k2fe#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
#'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'lists',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'superlists.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'superlists.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, '../database/db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.abspath(os.path.join(BASE_DIR, '../static'))
|
[
"machal.kaja@gmail.com"
] |
machal.kaja@gmail.com
|
308265d321cee40072f7e45374d65e6f3a7d9041
|
9302e92395e6fe35c00a565eb317d62c15ebaf4b
|
/converting.py
|
1f894d79232545e84e21b090989083c5b23c2e32
|
[] |
no_license
|
kollaa/python-milestone1
|
053135314921a1f7fd91061483a83a7f101d339a
|
425ab992c99545708955d3b8eaf745a79f754abf
|
refs/heads/main
| 2023-01-02T11:22:47.783068
| 2020-10-21T04:55:37
| 2020-10-21T04:55:37
| 305,290,437
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,071
|
py
|
from docx2pdf import convert
from flask import Flask
from flask_restful import reqparse, abort, Api, Resource
import csv
import pandas as pd
import json
import os
import win32com.client
import pythoncom
from pywintypes import com_error
app = Flask(__name__)
api = Api(app)
class ConvertingDocx(Resource):
def get(self,file_name):
pythoncom.CoInitialize()
save_path = os.getcwd()
WB_PATH = file_name
BASE_NAME = os.path.basename(WB_PATH)
print(BASE_NAME)
PATH_TO_PDF = WB_PATH[0:WB_PATH.find(BASE_NAME)] + "new" + BASE_NAME[0:-5] + ".pdf"
completeName = os.path.join(save_path,PATH_TO_PDF)
print(PATH_TO_PDF)
convert(file_name, completeName)
return 'converting docx to pdf is done', 200
class ConvertingXlsx(Resource):
def get(self,file_name):
pythoncom.CoInitialize()
save_path = os.getcwd()
WB_PATH = file_name
newpath = os.path.abspath(file_name)
BASE_NAME = os.path.basename(WB_PATH)
PATH_TO_PDF = WB_PATH[0:WB_PATH.find(BASE_NAME)] + "new" + BASE_NAME[0:-5] + ".pdf"
completeName = os.path.join(save_path,PATH_TO_PDF)
excel = win32com.client.Dispatch("Excel.Application")
#app.logger.info(excel)
excel.Visible = False
try:
print('Start conversion to PDF')
wbs = excel.Workbooks.Open(newpath)
ws_index_list = [1]
wbs.WorkSheets(ws_index_list).Select()
wbs.ActiveSheet.ExportAsFixedFormat(0, completeName)
except com_error as e:
print('failed.')
else:
print('Succeeded.')
finally:
wbs.Close()
excel.Quit()
return 'converting xlsx to pdf is done', 200
api.add_resource(ConvertingDocx, '/convertingdocx/<string:file_name>')
api.add_resource(ConvertingXlsx, '/convertingxlsx/<string:file_name>')
if __name__ == '__main__':
app.run(port = 5002, debug = True )
|
[
"noreply@github.com"
] |
kollaa.noreply@github.com
|
bc538e6823fea20fa055d230beca6fb129d1ec17
|
38b6e0c4ba6dce2d87fdf53ca1fc92014f86ae3f
|
/Parser.py
|
9fd478a4f52e3f83e2e0c44364eb541c9bdbcc4d
|
[] |
no_license
|
Arcensoth/MCC
|
f0068d2cd362658001b38c7b1aaf23a85b203257
|
357375473761d869d885ba9ea931b60b32f33e8a
|
refs/heads/master
| 2020-03-28T11:47:48.259585
| 2018-08-31T22:09:40
| 2018-08-31T22:09:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 49,961
|
py
|
import sublime, re
from .Blocks import BLOCKS
from .Items import ITEMS
from .Data import *
from .CommandTree import COMMAND_TREE
class Parser:
add_regions_flags = sublime.DRAW_NO_OUTLINE
regex = {
"axes" : re.compile("[xyz]+"),
"click_event_action": re.compile("(?:run|suggest)_command|open_url|change_page"),
"color" : re.compile("none|black|dark_blue|dark_green|dark_aqua|dark_red|dark_purple|gold|gray|dark_gray|blue|green|aqua|red|light_purple|yellow|white"),
"command" : re.compile('[\t ]*(/?)([a-z]+)'),
"comment" : re.compile('^[\t ]*#.*$'),
"entity_anchor" : re.compile("feet|eyes"),
"entity_tag_advancement_key" : re.compile("([a-z_\-1-9]+:)?(\w+)[\t ]*(=)"),
"entity_tag_key" : re.compile("(\w+)[\t ]*(=)"),
"float" : re.compile("-?(\d+(\.\d+)?|\.\d+)"),
"gamemode" : re.compile("survival|creative|adventure|spectator"),
"greedy_string" : re.compile(".*$"),
"hex4" : re.compile("[0-9a-fA-F]{4}"),
"hover_event_action" : re.compile("show_(?:text|item|entity|achievement)"),
"integer" : re.compile("-?\d+"),
"item_block_id" : re.compile("(#?[a-z_]+:)?([a-z_]+)"),
"item_slot" : re.compile("armor\.(?:chest|feet|head|legs)|container\.(5[0-3]|[1-4]?\d)|(enderchest|inventory)\.(2[0-6]|1?\d)|horse\.(\d|1[0-4]|armor|chest|saddle)|hotbar\.[0-8]|village\.[0-7]|weapon(?:\.mainhand|\.offhand)?"),
"namespace" : re.compile("(#?[a-z_\-0-9\.]+:)([a-z_\-0-9\.]+(?:\/[a-z_\-0-9\.]+)*)(\/?)"),
"nbt_key" : re.compile("(\w+)[\t ]*:"),
"operation" : re.compile("[+\-\*\%\/]?=|>?<|>"),
"position-2" : re.compile("(~?-?\d*\.?\d+|~)[\t ]+(~?-?\d*\.?\d+|~)"),
"position-3" : re.compile("([~\^]?-?\d*\.?\d+|[~\^])[\t ]+([~\^]?-?\d*\.?\d+|[~\^])[\t ]+([~\^]?-?\d*\.?\d+|[~\^])"),
"resource_location" : re.compile("([\w\.]+:)?([\w\.]+)"),
"scoreboard_slot" : re.compile("belowName|list|sidebar(?:.team.(?:black|dark_blue|dark_green|dark_aqua|dark_red|dark_purple|gold|gray|dark_gray|blue|green|aqua|red|light_purple|yellow|white))?"),
"sort" : re.compile("nearest|furthest|random|arbitrary"),
"username" : re.compile("[\w\(\)\.\<\>_\-]+"),
"vec4" : re.compile("((?:\d*\.)?\d+)[\t ]+((?:\d*\.)?\d+)[\t ]+((?:\d*\.)?\d+)[\t ]+((?:\d*\.)?\d+)"),
"word_string" : re.compile("\w+"),
"white_space" : re.compile("^\s+$")
}
def __init__(self, view):
self.current = 0
self.view = view
self.mcccomment = []
self.mcccommand = []
self.mccconstant = []
self.mccstring = []
self.mccentity = []
self.mccliteral = []
self.invalid = []
#The order of this list corresponds to the ordering of nbt_tag_lists.
# The tuples are ordered like this
# (isList, parser, item suffix scope, item suffix)
self.nbt_value_parsers = [
(True, self.string_parser, None, ""),
(True, self.float_parser, self.mccconstant, "d"),
(True, self.integer_parser, None, ""),
(False, self.float_parser, self.mccconstant, "d"),
(True, self.nbt_parser, None, ""),
(True, self.float_parser, self.mccconstant, "f"),
(False, self.float_parser, self.mccconstant, "f"),
(False, self.integer_parser, self.mccconstant, "L"),
(False, self.integer_parser, self.mccconstant, "s"),
(False, self.string_parser, None, ""),
(False, self.nbt_parser, None, ""),
(False, self.nbt_byte_parser, None, ""),
(False, self.integer_parser, None, ""),
(False, self.json_in_nbt_parser, None, ""),
(True, self.json_in_nbt_parser, None, ""),
(False, self.nbt_tags_parser, None, "")
]
def score_parser(properties):
return self.nested_entity_tag_parser(self.int_range_parser, do_nested=False, properties=properties)
def advancement_parser(properties):
return self.nested_entity_tag_parser(self.boolean_parser, do_nested=True)
def name_or_string_parser(properties):
start = self.current
self.current = self.username_parser(properties)
if start != self.current:
return self.current
old_string_type = properties["type"]
properties["type"] = "strict"
self.current = self.string_parser(properties)
properties["type"] = old_string_type
return self.current
# Data for target selector parsing
# order for tuple:
# (isNegatable, isRange, parser)
self.target_selector_value_parsers = [
(False, True, self.integer_parser),
(False, False, self.integer_parser),
(False, True, self.float_parser),
(True, False, name_or_string_parser),
(True, False, self.gamemode_parser),
(True, False, self.sort_parser),
(True, False, self.entity_location_parser),
(False, False, score_parser),
(False, False, advancement_parser),
(False, False, self.nbt_parser)
]
def add_regions(self):
self.view.add_regions("mcccomment", self.mcccomment, "mcccomment", flags=self.add_regions_flags)
self.view.add_regions("mcccommand", self.mcccommand, "mcccommand", flags=self.add_regions_flags)
self.view.add_regions("mccconstant", self.mccconstant, "mccconstant", flags=self.add_regions_flags)
self.view.add_regions("mccstring", self.mccstring, "mccstring", flags=self.add_regions_flags)
self.view.add_regions("mccentity", self.mccentity, "mccentity", flags=self.add_regions_flags)
self.view.add_regions("mccliteral", self.mccliteral, "mccliteral", flags=self.add_regions_flags)
self.view.add_regions("invalid", self.invalid, "invalid.illegal", flags=self.add_regions_flags)
def append_region(self, region_list, start, end):
region_list.append(sublime.Region(self.region_begin + start, self.region_begin + end))
def highlight(self, command_tree, line_region, current):
self.current = current
if ("redirect" in command_tree):
redirect_command = command_tree["redirect"][0]
if redirect_command == "root":
new_command_tree = COMMAND_TREE
else:
new_command_tree = COMMAND_TREE["children"][redirect_command]
#print("Redirecting to: " + redirect_command + ", " + str(self.current))
return self.highlight(new_command_tree, line_region, self.current)
elif not "children" in command_tree or self.current >= line_region.size():
if not "executable" in command_tree or not command_tree["executable"]:
self.append_region(self.invalid, 0, line_region.size())
self.current = self.region.size()
return False
else:
while (self.current < len(self.string) and self.string[self.current] in " \t"):
self.current += 1
if self.current < line_region.size():
self.append_region(self.invalid, self.current, line_region.size())
self.current = line_region.size()
return False
return True
self.string = self.view.substr(line_region)
if self.regex["white_space"].match(self.string):
return True
self.region = line_region
self.region_begin = self.region.begin()
comment_match = self.regex["comment"].match(self.string, self.current)
if comment_match:
self.append_region(self.mcccomment, comment_match.start(), comment_match.end())
self.current = comment_match.end()
return True
elif command_tree["type"] == "root":
command_match = self.regex["command"].match(self.string, self.current)
if not command_match:
self.append_region(self.invalid, 0, line_region.size())
return False
command = command_match.group(2)
#print("command: " + command)
if command in command_tree["children"]:
self.append_region(self.invalid, command_match.start(1), command_match.end(1))
self.current = command_match.end(2)
if self.highlight(command_tree["children"][command], line_region, command_match.end()):
self.append_region(self.mcccommand, command_match.start(2), command_match.end(2))
return True
else:
self.append_region(self.invalid, command_match.start(2), command_match.end(2))
return False
else:
self.append_region(self.invalid, 0, line_region.size())
return False
else:
was_space = False
while (self.current < len(self.string) and self.string[self.current] in " \t"):
self.current += 1
was_space = True
if self.current >= len(self.string):
if not "executable" in command_tree or not command_tree["executable"]:
return False
else:
return True
elif not was_space:
return False
start = self.current
for key, properties in command_tree["children"].items():
if properties["type"] == "literal" and self.string.startswith(key, self.current):
self.append_region(self.mccliteral, self.current, self.current + len(key))
self.current += len(key)
success = self.highlight(properties, line_region, self.current)
if success:
return True
else:
self.current = start
self.mccliteral.pop()
elif properties["type"] == "argument":
parser_name = properties["parser"]
parse_function = self.parsers[parser_name]
old_current = self.current
if "properties" in properties:
#print("using properties for " + parser_name)
self.current = parse_function(self, properties["properties"])
else:
self.current = parse_function(self)
if old_current != self.current:
success = self.highlight(properties, line_region, self.current)
if success:
return True
else:
self.invalid.pop()
self.current = start
while (self.current < len(self.string) and self.string[self.current] in " \t"):
self.current += 1
if self.current < line_region.size():
self.append_region(self.invalid, self.current, line_region.size())
self.current = line_region.size()
if not "executable" in properties or not properties["executable"]:
return False
else:
return True
# Returns True if the end of the string is reached, else False and will advacne self.current to the next non-whitespace character
# this will error highlight the section from err_start until the end of the string
def skip_whitespace(self, err_start):
start = self.current
if self.current >= len(self.string):
return True
while self.string[self.current] in " \t":
self.current += 1
if self.current >= len(self.string):
self.current = start
return True
return False
def entity_parser(self, properties={}):
start = self.current
self.current = self.target_selector_parser(properties)
if start != self.current:
return self.current
return self.username_parser(properties)
def target_selector_parser(self, properties={}):
if self.current >= len(self.string):
return self.current
if self.string[self.current] == "*" and "amount" in properties and properties["amount"] == "multiple":
self.append_region(self.mccentity, self.current, self.current + 1)
return self.current + 1
if self.string[self.current] != "@" or self.current + 1 >= len(self.string) or not self.string[self.current+1] in "pears": #Checks to see if it's a valid entity selector
return self.current
self.append_region(self.mccentity, self.current, self.current + 2)
self.current += 2
if (self.current < len(self.string) and self.string[self.current] == "["):
self.append_region(self.mccentity, self.current, self.current + 1)
self.current += 1
continue_parsing = True
while continue_parsing:
reached_end = self.skip_whitespace(self.current)
if reached_end:
return self.current
start_of_key = self.current
key_match = self.regex["entity_tag_key"].match(self.string, self.current)
if not key_match:
return self.current
key = key_match.group(1)
self.append_region(self.mcccommand, key_match.start(2), key_match.end(2))
self.append_region(self.mccstring, key_match.start(1), key_match.end(1))
self.current = key_match.end(2)
reached_end = self.skip_whitespace(start_of_key)
if reached_end:
self.mcccommand.pop()
self.mccstring.pop()
return start_of_key
properties["min"] = 0
matched = False
for i in range(len(TARGET_KEY_LISTS)):
if key in TARGET_KEY_LISTS[i]:
isNegatable, isRange, parser = self.target_selector_value_parsers[i]
if isNegatable and self.string[self.current] == "!":
self.append_region(self.mcccommand, self.current, self.current + 1)
self.current += 1
reached_end = self.skip_whitespace(start_of_key)
if reached_end:
return start_of_key
old_current = self.current
if isRange:
self.current = self.range_parser(parser, {})
else:
self.current = parser(properties)
if old_current != self.current:
matched = True
break
if not matched:
self.append_region(self.invalid, start_of_key, self.current)
return self.current + 1
reached_end = self.skip_whitespace(start_of_key)
if reached_end:
return self.current
if self.string[self.current] == ",":
self.current += 1
elif self.string[self.current] == "]":
continue_parsing = False
else:
self.append_region(self.invalid, self.current, self.current + 1)
return self.current + 1
self.append_region(self.mccentity, self.current, self.current + 1)
return self.current + 1
return self.current
def int_range_parser(self, properties={}):
return self.range_parser(self.integer_parser, properties)
def range_parser(self, parse_function, properties={}):
matched = False
start = self.current
self.current = parse_function(properties)
if start != self.current:
matched = True
if self.current + 2 <= len(self.string) and self.string[self.current:self.current + 2] == "..":
self.append_region(self.mcccommand, self.current, self.current + 2)
self.current += 2
start = self.current
self.current = parse_function(properties)
if start != self.current:
matched = True
if not matched:
return start
return self.current
def nested_entity_tag_parser(self, parser, do_nested=False, properties={}): # scores= and advancements=
if self.string[self.current] != "{":
return self.current
elif "min" in properties:
old_min = properties["min"]
properties.pop("min")
else:
old_min = None
bracket_start = self.current
self.current += 1
continue_parsing = True
while continue_parsing:
reached_end = self.skip_whitespace(self.current)
if reached_end:
if old_min != None:
properties["min"] = old_min
return self.current
start_of_key = self.current
key_match = self.regex["entity_tag_advancement_key"].match(self.string, self.current)
if not key_match:
if old_min != None:
properties["min"] = old_min
return self.current
elif not do_nested and key_match.group(1): # If theres a nested tag where there shouldn't be
self.append_region(self.invalid, self.current, key_match.end())
self.current = key_match.end()
if old_min != None:
properties["min"] = old_min
return self.current
self.append_region(self.mccstring, key_match.start(2), key_match.end(2))
self.append_region(self.mcccommand, key_match.start(3), key_match.end(3))
self.current = key_match.end()
reached_end = self.skip_whitespace(start_of_key)
if reached_end:
if old_min != None:
properties["min"] = old_min
return self.current
if key_match.group(1) != None:
self.append_region(self.mccliteral, key_match.start(1), key_match.end(1))
self.current = self.nested_entity_tag_parser(parser, do_nested=False, properties=properties)
if self.string[self.current - 1] != "}": #This tests to see if the parse was successful
if old_min != None:
properties["min"] = old_min
return self.current
else:
old_current = self.current
self.current = parser(properties)
if old_current == self.current:
self.mccstring.pop()
self.mcccommand.pop()
if old_min != None:
properties["min"] = old_min
return self.current
reached_end = self.skip_whitespace(start_of_key)
if reached_end:
if old_min != None:
properties["min"] = old_min
return self.current
if self.string[self.current] == ",":
self.current += 1
elif self.string[self.current] != "}":
self.append_region(self.invalid, self.current, self.current + 1)
if old_min != None:
properties["min"] = old_min
return self.current + 1
else:
continue_parsing = False
self.current += 1
if old_min != None:
properties["min"] = old_min
return self.current
# Word means "up to the next space", phrase is "an unquoted word or quoted string", and greedy is "everything from this point to the end of input".
# strict means only a regular quote enclosed string will work
def string_parser(self, properties={}):
if self.current >= len(self.string):
return self.current
if not "escape_depth" in properties:
escape_depth = 0
else:
escape_depth = properties["escape_depth"]
if properties["type"] == "word":
old_current = self.current
self.current = self.regex_parser(self.regex["word_string"], [self.mccstring])
if old_current != self.current:
return self.current
if properties["type"] == "greedy":
old_current = self.current
self.current = self.regex_parser(self.regex["greedy_string"], [self.mccstring])
elif properties["type"] in {"strict", "word"}:
quote = self.generate_quote(escape_depth)
escape = self.generate_quote(escape_depth + 1)[:-1] # Gets the needed backslashes to escape
string_start = self.current
start = self.current
if not self.string.startswith(quote, self.current):
return self.current
self.current += len(quote)
continue_parsing = True
while continue_parsing:
if self.current >= len(self.string):
self.append_region(self.mccstring, start, self.current - 1)
self.append_region(self.invalid, self.current - 1, self.current)
return self.current
elif self.string.startswith(quote, self.current):
self.append_region(self.mccstring, start, self.current + len(quote))
self.current += len(quote)
continue_parsing = False
elif self.string.startswith(escape, self.current) and self.current + len(escape) < len(self.string):
escape_char = self.string[self.current + len(escape)]
if escape_char in "\"\\/bfnrt":
if self.current - start > 0:
self.append_region(self.mccstring, start, self.current)
self.append_region(self.mccconstant, self.current, self.current + len(escape) + 1)
self.current += len(escape) + 1
start = self.current
elif escape_char == "u":
if self.current - start > 0:
self.append_region(self.mccstring, start, self.current)
hex_match = self.regex["hex4"].match(self.string, self.current + len(escape) + 1)
if not hex_match:
self.append_region(self.mccstring, start, self.current - 1)
self.append_region(self.invalid, self.current, self.current + len(escape) + 1)
return self.current + len(escape) + 1
self.append_region(self.mccconstant, self.current, self.current + len(escape) + 5)
self.current += len(escape) + 5
start = self.current
else:
self.append_region(self.mccstring, start, self.current - 1)
self.append_region(self.invalid, self.current, self.current + 1)
return self.current + 1
elif self.string[self.current] in "\"\\":
self.append_region(self.mccstring, start, self.current - 1)
self.append_region(self.invalid, self.current, self.current + 1)
return self.current + 1
else:
self.current += 1
return self.current
# Todo: add entity highlighting
def message_parser(self, properties={}):
self.append_region(self.mccstring, self.current, self.region.size())
return len(self.string)
def nbt_parser(self, properties={}):
if self.current >= len(self.string) or self.string[self.current] != "{":
return self.current
elif not "escape_depth" in properties:
properties["escape_depth"] = 0
braces_start = self.current
self.current += 1
nbt_value_parsers = self.nbt_value_parsers
while self.string[self.current] != "}":
reached_end = self.skip_whitespace(braces_start)
if reached_end:
return self.current
start_of_key = self.current
key_match = self.regex["nbt_key"].match(self.string, self.current)
if not key_match:
if self.current < len(self.string):
self.append_region(self.invalid, self.current, self.current + 1)
return self.current + 1
key = key_match.group(1)
self.append_region(self.mccstring, key_match.start(1), key_match.end(1))
self.current = key_match.end()
reached_end = self.skip_whitespace(start_of_key)
if reached_end:
return self.current
old_type = None
if "type" in properties:
old_type = properties["type"]
properties["type"] = "word"
matched = False
for i in range(len(NBT_KEY_LISTS)):
keys = NBT_KEY_LISTS[i]
if key in keys:
is_list, value_parser, suffix_scope, suffix = nbt_value_parsers[i]
old_current = self.current
if is_list:
self.current = self.nbt_list_parser(value_parser, suffix_scope, suffix, properties)
else:
self.current = self.nbt_value_parser(value_parser, suffix_scope, suffix, properties)
if old_current != self.current:
matched = True
break
if old_type != None:
properties["type"] = old_type
if not matched:
self.append_region(self.invalid, self.current, self.current + 1)
return self.current + 1
reached_end = self.skip_whitespace(start_of_key)
if reached_end:
return self.current
if self.string[self.current] == ",":
self.current += 1
elif self.string[self.current] != "}":
self.append_region(self.invalid, self.current, self.current + 1)
return self.current + 1
self.current += 1
return self.current
def nbt_tags_parser(self, properties={}):
if self.current >= len(self.string) or self.string[self.current] != "{":
return self.current
elif not "escape_depth" in properties:
properties["escape_depth"] = 0
braces_start = self.current
self.current += 1
while self.string[self.current] != "}":
reached_end = self.skip_whitespace(braces_start)
if reached_end:
return self.current
start_of_key = self.current
key_match = self.regex["nbt_key"].match(self.string, self.current)
if not key_match:
if self.current < len(self.string):
self.append_region(self.invalid, self.current, self.current + 1)
return self.current + 1
self.append_region(self.mccstring, key_match.start(1), key_match.end(1))
self.current = key_match.end()
reached_end = self.skip_whitespace(start_of_key)
if reached_end:
return self.current
start = self.current
self.current = self.nbt_value_parser(self.nbt_byte_parser, None, "", properties)
if start == self.current:
self.append_region(self.invalid, self.current, self.current + 1)
return self.current + 1
reached_end = self.skip_whitespace(start_of_key)
if reached_end:
return self.current
if self.string[self.current] == ",":
self.current += 1
elif self.string[self.current] != "}":
self.append_region(self.invalid, self.current, self.current + 1)
return self.current + 1
self.current += 1
return self.current
def nbt_list_parser(self, item_parser, suffix_scope, item_suffix, properties={}):
if self.string[self.current] != "[":
return self.current
start_of_list = self.current
self.current += 1
continue_parsing = True
while continue_parsing:
reached_end = self.skip_whitespace(start_of_list)
if reached_end:
return start_of_list
start_of_value = self.current
self.current = self.nbt_value_parser(item_parser, suffix_scope, item_suffix, properties)
if start_of_value == self.current:
return start_of_list
reached_end = self.skip_whitespace(start_of_value)
if reached_end:
return start_of_list
if self.string[self.current] == ",":
self.current += 1
elif self.string[self.current] != "]":
return start_of_list
else:
continue_parsing = False
self.current += 1
return self.current
def nbt_value_parser(self, parser, suffix_scope, suffix, properties={}):
start = self.current
self.current = parser(properties)
if start != self.current:
if (len(suffix) > 0 and
self.current + len(suffix) <= len(self.string) and
self.string[self.current:self.current + len(suffix)] == suffix):
self.append_region(suffix_scope, self.current, self.current + len(suffix))
return self.current + len(suffix)
return start
def nbt_byte_parser(self, properties={}):
start = self.current
self.current = self.integer_parser(properties)
if start != self.current:
if self.current < len(self.string) and self.string[self.current] == "b":
self.append_region(self.mccconstant, self.current, self.current + 1)
return self.current + 1
else:
return start
return self.boolean_parser(properties)
def integer_parser(self, properties={}):
integer_match = self.regex["integer"].match(self.string, self.current)
if integer_match:
value = int(integer_match.group())
if "min" in properties and value < properties["min"] or "max" in properties and value > properties["max"]:
self.append_region(self.invalid, integer_match.start(), integer_match.end())
else:
self.append_region(self.mccconstant, integer_match.start(), integer_match.end())
return integer_match.end()
return self.current
def block_parser(self, properties={}):
start = self.current
lenient = False
if self.current < len(self.string) and self.string[self.current] == "#":
lenient=True
self.current += 1
block_match = self.regex["item_block_id"].match(self.string, self.current)
if block_match:
if block_match.group(1) != None:
self.append_region(self.mccliteral, start, block_match.end(1))
elif self.string[start] == "#":
self.append_region(self.invalid, start, start+1)
self.append_region(self.mccstring, block_match.start(2), block_match.end(2))
self.current = block_match.end()
if block_match.start(1) == block_match.end(1):
block_name = "minecraft:" + block_match.group(2)
else:
block_name = block_match.group(0)
if block_name in BLOCKS and "properties" in BLOCKS[block_name]:
properties = BLOCKS[block_name]["properties"]
else:
properties = {}
if self.current >= len(self.string) or self.string[self.current] != "[":
return self.nbt_parser(properties)
start_of_bracket = self.current
self.current += 1
while self.string[self.current] != "]":
reached_end = self.skip_whitespace(self.current)
if reached_end:
return self.current
start_of_key = self.current
key_match = self.regex["entity_tag_key"].match(self.string, self.current)
if not key_match:
self.append_region(self.invalid, self.current, self.current + 1)
return self.current + 1
key = key_match.group(1)
if lenient or key in properties:
self.append_region(self.mccstring, key_match.start(1), key_match.end(1))
else:
self.append_region(self.invalid, key_match.start(1), key_match.end(1))
self.append_region(self.mcccommand, key_match.start(2), key_match.end(2))
self.current = key_match.end()
reached_end = self.skip_whitespace(start_of_key)
if reached_end:
return self.current
value_match = self.regex["word_string"].match(self.string, self.current)
if not value_match:
self.append_region(self.invalid, self.current, self.current + 1)
return self.current + 1
if lenient or (key in properties and value_match.group() in properties[key]):
self.append_region(self.mccstring, value_match.start(), value_match.end())
else:
self.append_region(self.invalid, value_match.start(), value_match.end())
self.current = value_match.end()
reached_end = self.skip_whitespace(start_of_key)
if reached_end:
return self.current
if self.string[self.current] == ",":
self.current += 1
elif self.string[self.current] != "]":
self.append_region(self.invalid, self.current, self.current + 1)
return self.current + 1
self.current += 1
return self.nbt_parser(properties)
return start
def nbt_path_parser(self, properties={}):
start = self.current
while self.current < len(self.string):
start_of_segment = self.current
old_current = self.current
self.current = self.string_parser({"type":"word"})
if self.current < len(self.string) and self.string[self.current] == "[":
self.current += 1
old_current = self.current
self.current = self.integer_parser({"min":0})
if old_current == self.current or (self.current < len(self.string) and self.string[self.current] != "]"):
return start
else:
self.current += 1
if self.current < len(self.string) and self.string[self.current] == "." and start_of_segment != self.current:
self.current += 1
else:
self.append_region(self.mccstring, start, self.current)
if start_of_segment == self.current and self.string[self.current - 1] == ".":
self.append_region(self.invalid, self,ccurrent - 1, self.current)
return self.current
return start
def float_parser(self, properties={}):
float_match = self.regex["float"].match(self.string, self.current)
if float_match:
value = float(float_match.group())
if ("min" in properties and value < properties["min"]) or ("max" in properties and value > properties["max"]):
self.append_region(self.invalid, float_match.start(), float_match.end())
else:
self.append_region(self.mccconstant, float_match.start(), float_match.end())
return float_match.end()
return self.current
def boolean_parser(self, properties={}):
if self.current + 4 <= len(self.string) and self.string[self.current:self.current+4] == "true":
self.append_region(self.mccconstant, self.current, self.current + 4)
return self.current + 4
elif self.current + 5 <= len(self.string) and self.string[self.current:self.current + 5] == "false":
self.append_region(self.mccconstant, self.current, self.current + 5)
return self.current + 5
return self.current
def axes_parser(self, properties={}):
axes = set("xyz")
axes_match = self.regex["axes"].match(self.string, self.current)
if axes_match and len(set(axes_match.group())) == len(axes_match.group()) and axes.issuperset(axes_match.group()):
self.append_region(self.mccconstant, self.current, axes_match.end())
return axes_match.end()
return self.current
def score_holder_parser(self, properties={}):
start = self.current
if self.string[self.current] == "#":
self.current = self.current + 1
username_parser = self.parsers["minecraft:game_profile"]
username_start = self.current
self.current = username_parser(self, properties)
if username_start != self.current:
self.append_region(self.mccstring, start, start + 1)
return self.current
return self.entity_parser(properties)
def particle_parser(self, properties={}):
particle_match = self.regex["item_block_id"].match(self.string, self.current)
if particle_match and particle_match.group(2) in PARTICLES and particle_match.group(1) in [None, "minecraft:"]:
self.append_region(self.mccliteral, particle_match.start(1), particle_match.end(1))
self.append_region(self.mccstring, particle_match.start(2), particle_match.end(2))
self.current = particle_match.end(2)
if particle_match.group(2) == "block" or particle_match.group(2) == "falling_dust":
self.skip_whitespace(self.current)
return self.block_parser(self.current)
elif particle_match.group(2) == "item":
self.skip_whitespace(self.current)
return self.item_parser(self.current)
elif particle_match.group(2) == "dust":
self.skip_whitespace(self.current)
return self.regex_parser(self.regex["vec4"], [self.mccconstant, self.mccconstant, self.mccconstant, self.mccconstant])
return self.current
# https://www.json.org/
def json_parser(self, properties={}):
if not "escape_depth" in properties:
properties["escape_depth"] = 0
if self.string[self.current] == "[":
return self.json_array_parser(properties)
elif self.string[self.current] == "{":
return self.json_object_parser(properties)
properties["type"] = "strict"
return self.string_parser(properties)
def json_object_parser(self, properties={}):# The '{}' one
if self.string[self.current] != "{":
return self.current
quote = self.generate_quote(properties["escape_depth"])
start_of_object = self.current
self.current += 1
finished_parsing = False
while not finished_parsing:
reached_end = self.skip_whitespace(self.current)
if reached_end:
return self.current
start_of_key = self.current
self.current = self.string_parser(properties={"type":"strict","escape_depth":properties["escape_depth"]})
if start_of_key == self.current:
self.append_region(self.invalid, self.current, self.current + 1)
return self.current + 1
key = self.string[start_of_key + len(quote) : self.current - len(quote)]
reached_end = self.skip_whitespace(start_of_key)
if reached_end:
return self.current
if not self.string[self.current] in ",:}":
self.append_region(self.invalid, self.current, self.current + 1)
return self.current + 1
elif self.string[self.current] == ":":
self.current += 1
reached_end = self.skip_whitespace(start_of_key)
if reached_end:
return self.current
matched = False
if key in JSON_STRING_KEYS:
start_of_value = self.current
self.current = self.string_parser(properties={"type":"strict","escape_depth":properties["escape_depth"]})
if start_of_value != self.current:
matched = True
if not matched and key in JSON_ENTITY_KEYS:
start_of_value = self.current
self.current = self.quoted_parser(self.entity_parser, properties)
if start_of_value != self.current:
matched = True
if not matched and key in JSON_BOOLEAN_KEYS:
start_of_value = self.current
self.current = self.boolean_parser(properties)
if start_of_value != self.current:
matched = True
if not matched and key in JSON_NESTED_KEYS:
self.current = self.json_parser(properties)
if not self.string[self.current - 1] in "}]":
return self.current
matched = True
if not matched and key == "color":
start_of_value = self.current
self.current = self.quoted_parser(self.color_parser, properties)
if start_of_value != self.current:
matched = True
if not matched and key == "clickEvent":
self.current = self.json_event_parser(regex["click_event_action"], properties)
if not self.string[self.current - 1] in "}":
return self.current
matched = True
if not matched and key == "hoverEvent":
self.current = self.json_event_parser(regex["hover_event_action"], properties)
if not self.string[self.current - 1] in "}":
return self.current
matched = True
if not matched and key == "score":
self.current = self.json_score_parser(properties)
if not self.string[self.current - 1] in "}":
return self.current
matched = True
if not matched:
self.mccstring.pop()
self.append_region(self.invalid, start_of_key, self.current)
return self.current
reached_end = self.skip_whitespace(start_of_key)
if reached_end:
return self.current
if self.string[self.current] == ",":
self.current += 1
elif self.string[self.current] != "}":
self.append_region(self.invalid, self.current, self.current + 1)
return self.current + 1
else:
finished_parsing = True
return self.current + 1
def json_array_parser(self, properties={}): # The '[]' one
if self.string[self.current] != "[":
return self.current
start_of_list = self.current
self.current += 1
def null_parser(properties={}):
if self.current + 4 < len(self.string) and self.string[self.current : self.current + 4] == "null":
self.append_region(self.mccconstant, self.current, self.current + 4)
self.current += 4
return self.current
possible_parsers = [
null_parser,
self.string_parser,
self.float_parser,
self.json_parser,
self.boolean_parser
]
old_type = None
if "type" in properties:
old_type = properties["type"]
properties["type"] = "strict"
continue_parsing = True
while continue_parsing:
reached_end = self.skip_whitespace(self.current)
if reached_end:
if old_type != None:
properties["type"] = old_type
return self.current
start_of_value = self.current
for parser in possible_parsers:
old_current = self.current
self.current = parser(properties)
if old_current != self.current:
break
if old_type != None:
properties["type"] = old_type
if start_of_value == self.current:
if self.current < len(self.string):
self.append_region(self.invalid, self.current, self.current + 1)
if old_type != None:
properties["type"] = old_type
return self.current
reached_end = self.skip_whitespace(start_of_value)
if reached_end:
if old_type != None:
properties["type"] = old_type
return self.current
if self.string[self.current] == ",":
self.current += 1
elif self.string[self.current] != "]":
if old_type != None:
properties["type"] = old_type
self.append_region(self.invalid, self.current, self.current + 1)
return self.current + 1
else:
continue_parsing = False
if old_type != None:
properties["type"] = old_type
self.current += 1
return self.current
def json_event_parser(self, action_regex, properties={}):
if self.string[self.current] != "{": #Can't be [] since it's an object
return self.current
current += 1
quote = self.generate_quote(properties["escape_depth"])
start_of_object = self.current
while self.string[self.current] != "}":
reached_end = self.skip_whitespace(self.current)
if reached_end:
return self.current
start_of_key = self.current
self.current = self.string_parser(properties={"type":"strict","escape_depth":properties["escape_depth"]})
if start_of_key == self.current:
self.append_region(self.invalid, self.current, self.current + 1)
return self.current+1
key = self.string[start_of_key + len(quote) : self.current - len(quote)]
reached_end = self.skip_whitespace(start_of_object)
if reached_end:
return self.current
if self.string[self.current] != ":":
self.append_region(self.invalid, self.current, self.current + 1)
return self.current + 1
self.current += 1
reached_end = self.skip_whitespace(start_of_key)
if reached_end:
return self.current
success = False
if key == "action":
def action_parser(properties={}):
return self.regex_parser(action_regex, [self.mccstring])
start_of_value = self.current
self.current = self.quoted_parser(action_parser)
if start_of_value != self.current:
success = True
if key == "value":
start_of_value = self.current
self.current = self.string_parser(properties={"type":"strict","escape_depth":properties["escape_depth"]})
if start_of_value == self.current:
success = True
if not success:
self.mccstring.pop()
self.append_region(self.invalid, start_of_key, self.current)
return self.current
reached_end = self.skip_whitespace(self.current)
if reached_end:
return self.current
if self.string[self.current] == ",":
self.current += 1
elif self.string[self.current] != "}":
self.append_region(self.invalid, self.current, self.current + 1)
return self.current + 1
return self.current + 1
def json_score_parser(self, properties={}):
if self.string[self.current] != "{": #Can't be [] since its an object
return self.current
self.current += 1
quote = self.generate_quote(properties["escape_depth"])
start_of_object = self.current
while self.string[self.current] != "}":
reached_end = self.skip_whitespace(start_of_object)
if reached_end:
return self.current
start_of_key = self.current
self.current = self.string_parser(properties={"type":"strict","escape_depth":properties["escape_depth"]})
if start_of_key == self.current:
self.append_region(self.invalid, self.current, self.current + 1)
return self.current + 1
key = self.string[start_of_key + len(quote) : self.current - len(quote)]
reached_end = self.skip_whitespace(start_of_object)
if reached_end:
return self.current
if self.string[self.current] != ":":
self.mccstring.pop()
self.append_region(self.invalid, start_of_key, self.current)
return self.current + 1
self.current += 1
reached_end = self.skip_whitespace(start_of_object)
if reached_end:
return self.current
success = False
if key == "name":
start_of_value = self.current
self.current = self.quoted_parser(self.score_holder_parser, properties)
if start_of_value != self.current:
success = True
elif key == "objective":
start_of_value = self.current
self.current = self.quoted_parser(self.username_parser, properties)
if start_of_value != self.current:
success = True
elif key == "value":
start_of_value = self.current
self.current = self.integer_parser(properties)
if start_of_value == self.current:
success = True
if not success:
self.mccstring.pop()
self.append_region(self.invalid, start_of_key, self.current)
return self.current
reached_end = self.skip_whitespace(self.current)
if reached_end:
return self.current
if self.string[self.current] == ",":
self.current += 1
elif self.string[self.current] != "}":
self.append_region(self.invalid, self.current, self.current + 1)
return self.current + 1
self.current += 1
return self.current
def objective_criteria_parser(self, properties={}):
criteria_match = self.regex["resource_location"].match(self.string, self.current)
if criteria_match:
criteria = criteria_match.group()
namespace = criteria_match.group(1)
if criteria_match.group(2).startswith("minecraft."):
location = criteria_match.group(2)[10:]
else:
location = criteria_match.group(2)
if (criteria in OBJECTIVE_CRITERIA or
(namespace in CRITERIA_BLOCKS and location in BLOCKS) or
(namespace in CRITERIA_ITEMS and location in ITEMS) or
(namespace in CRITERIA_ENTITIES and location in ENTITIES)):
if namespace != None:
self.append_region(self.mccliteral, criteria_match.start(1), criteria_match.end(1))
self.append_region(self.mccstring, criteria_match.start(2), criteria_match.end(2))
self.current = criteria_match.end()
return self.current
def entity_location_parser(self, properties={}):
return self.location_from_list_parser(self.regex["item_block_id"], ENTITIES)
def resource_location_parser(self, properties={}):
return self.regex_parser(self.regex["resource_location"], [self.mccliteral, self.mccstring])
def function_parser(self, properties={}):
return self.regex_parser(self.regex["namespace"], [self.mccstring, self.mccliteral, self.invalid])
def username_parser(self, properties={}):
return self.regex_parser(self.regex["username"], [self.mccstring])
def vec3d_parser(self, properties={}):
return self.regex_parser(self.regex["position-3"], [self.mccconstant, self.mccconstant, self.mccconstant])
def vec2d_parser(self, properties={}):
return self.regex_parser(self.regex["position-2"], [self.mccconstant, self.mccconstant])
def item_slot_parser(self, properties={}):
return self.regex_parser(self.regex["item_slot"], [self.mccstring])
def scoreboard_slot_parser(self, properties={}):
return self.regex_parser(self.regex["scoreboard_slot"], [self.mccstring])
def color_parser(self, properties={}):
return self.regex_parser(self.regex["color"], [self.mccconstant])
def entity_anchor_parser(self, properties={}):
return self.regex_parser(self.regex["entity_anchor"], [self.mccstring])
def scoreboard_operation_parser(self, properties={}):
return self.regex_parser(self.regex["operation"], [self.mcccommand])
def mob_effect_parser(self, proeprties={}):
return self.location_from_list_parser(self.regex["item_block_id"], POTIONS)
def sound_parser(self, properties={}):
return self.location_from_list_parser(self.regex["resource_location"], SOUNDS)
def gamemode_parser(self, properties={}):
return self.regex_parser(self.regex["gamemode"], [self.mccstring])
def sort_parser(self, properties={}):
return self.regex_parser(self.regex["sort"], [self.mccliteral])
def item_parser(self, properties={}):
old_current = self.current
self.current = self.location_from_list_parser(self.regex["item_block_id"], ITEMS)
if self.current != old_current:
return self.nbt_parser(properties)
return self.current
def location_from_list_parser(self, regex, possibilities):
match = regex.match(self.string, self.current)
if match and match.group(1) != None and match.group(1)[0] == "#" or (
match and match.group(2) in possibilities and match.group(1) in [None, "minecraft:"]):
self.append_region(self.mccliteral, match.start(1), match.end(1))
self.append_region(self.mccstring, match.start(2), match.end(2))
self.current = match.end()
return self.current
# properties["type"] must equal "word". This should be done already.
def json_in_nbt_parser(self, properties):
if not "escape_depth" in properties:
escape_depth = 0
else:
escape_depth = properties["escape_depth"]
quote = self.generate_quote(escape_depth)
if not self.string.startswith(quote, self.current):
return self.string_parser(properties)
start = self.current
self.append_region(self.mccstring, self.current, self.current + len(quote))
self.current += len(quote)
old_current = self.current
properties["escape_depth"] = escape_depth + 1
self.current = self.json_parser(properties)
if old_current == self.current:
self.mccstring.pop()
properties["escape_depth"] = escape_depth
self.current = start
return self.string_parser(properties)
if not self.string.startswith(quote, self.current):
if self.current < len(self.string):
delta = 1
else:
delta = -1
self.append_region(self.invalid, self.current, self.current + delta)
return self.current + max(0, delta)
self.append_region(self.mccstring, self.current, self.current + len(quote))
self.current += len(quote)
return self.current
def regex_parser(self, pattern, scopes, properties={}):
pattern_match = pattern.match(self.string, self.current)
if pattern_match:
if len(scopes) == 1:
scopes[0].append(sublime.Region(self.region_begin + pattern_match.start(), self.region_begin + pattern_match.end()))
else:
for i in range(len(scopes)):
scopes[i].append(sublime.Region(self.region_begin + pattern_match.start(i + 1),
self.region_begin + pattern_match.end(i + 1)))
self.current = pattern_match.end()
return self.current
def quoted_parser(self, parser, properties={}):
if not "escape_depth" in properties:
escape_depth = 0
else:
escape_depth = properties["escape_depth"]
start = self.current
quote = self.generate_quote(escape_depth)
if not self.string.startswith(quote, self.current):
return self.current
self.append_region(self.mccstring, self.current, self.current + len(quote))
self.current += len(quote)
old_current = self.current
self.current = parser(properties)
if old_current == self.current:
self.mccstring.pop()
return self.current
if not self.string.startswith(quote, self.current):
self.mccstring.pop()
return start
self.append_region(self.mccstring, self.current, self.current + len(quote))
return self.current + len(quote)
def generate_quote(self, escape_depth):
quotes = ["\"", "\\\"", "\\\\\\\"", "\\\\\\\\\\\\\\\"", "\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\""]
if escape_depth <= 4:
return quotes[escape_depth]
for i in range(0, escape_depth):
quote += "\\"
return quote + self.generate_quote(escape_depth - 1)
parsers = { # Master list of what function the parser name in commands.json corresponds to
"minecraft:resource_location" : resource_location_parser,
"minecraft:function" : function_parser,
"minecraft:entity" : entity_parser,
"brigadier:string" : string_parser, #type = word and type= greedy
"minecraft:game_profile" : username_parser,
"minecraft:message" : message_parser,
"minecraft:block_pos" : vec3d_parser,
"minecraft:nbt" : nbt_parser,
"minecraft:item_stack" : item_parser,
"minecraft:item_predicate" : item_parser,
"brigadier:integer" : integer_parser, #Properties has min and max
"minecraft:block_state" : block_parser,
"minecraft:block_predicate" : block_parser,
"minecraft:nbt_path" : nbt_path_parser,
"brigadier:float" : float_parser,
"brigadier:double" : float_parser,
"brigadier:bool" : boolean_parser,
"minecraft:swizzle" : axes_parser, # any cobination of x, y, and z e.g. x, xy, xz. AKA swizzle
"minecraft:score_holder" : score_holder_parser, #Has options to include wildcard or not
"minecraft:objective" : username_parser,
"minecraft:vec3" : vec3d_parser,
"minecraft:vec2" : vec2d_parser,
"minecraft:particle" : particle_parser,
"minecraft:item_slot" : item_slot_parser, #Check the wiki on this one I guess
"minecraft:scoreboard_slot" : scoreboard_slot_parser,
"minecraft:team" : username_parser,
"minecraft:color" : color_parser,
"minecraft:rotation" : vec2d_parser, # [yaw, pitch], includes relative changes
"minecraft:component" : json_parser,
"minecraft:entity_anchor" : entity_anchor_parser,
"minecraft:operation" : scoreboard_operation_parser, # +=, = , <>, etc
"minecraft:int_range" : int_range_parser,
"minecraft:mob_effect" : mob_effect_parser,
"minecraft:sound" : sound_parser,
"minecraft:objective_criteria":objective_criteria_parser,
"minecraft:entity_summon" : entity_location_parser
}
|
[
"42iscool42@gmail.com"
] |
42iscool42@gmail.com
|
8e56a302ab72b021d83ee70f0ad1e776d0ef9fc3
|
1956b7c652d8c2e22a9edc22032a1ee5a64b6b7b
|
/apps/partner/migrations/016_auto__change_data_type__commission_field.py
|
0a86af3822ac6019bf771eb379a3bc08602d411f
|
[] |
no_license
|
quantmScubism/django_oscar
|
939bb5fd0d4caa17747e966a0a847939646808c1
|
e283abbe89a0ca0488fc6442de0a0eb5b53f0149
|
refs/heads/master
| 2020-04-16T02:30:18.269115
| 2017-06-24T14:41:28
| 2017-06-24T14:41:28
| 95,303,096
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,247
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from oscar.core.compat import AUTH_USER_MODEL, AUTH_USER_MODEL_NAME
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Stockrecord.commission'
db.alter_column('partner_stockrecord', 'commission',
self.gf('django.db.models.fields.DecimalField')(default=0, null=True, max_digits=12, decimal_places=2, blank=True))
def backwards(self, orm):
# Deleting field 'Stockrecord.commission'
db.delete_column('partner_stockrecord', 'commission')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'catalogue.attributeentity': {
'Meta': {'object_name': 'AttributeEntity'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entities'", 'to': "orm['catalogue.AttributeEntityType']"})
},
'catalogue.attributeentitytype': {
'Meta': {'object_name': 'AttributeEntityType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'})
},
'catalogue.attributeoption': {
'Meta': {'object_name': 'AttributeOption'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['catalogue.AttributeOptionGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'catalogue.attributeoptiongroup': {
'Meta': {'object_name': 'AttributeOptionGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'catalogue.category': {
'Meta': {'ordering': "['full_name']", 'object_name': 'Category'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'})
},
'catalogue.option': {
'Meta': {'object_name': 'Option'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Required'", 'max_length': '128'})
},
'catalogue.product': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'Product'},
'attributes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.ProductAttribute']", 'through': "orm['catalogue.ProductAttributeValue']", 'symmetrical': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Category']", 'through': "orm['catalogue.ProductCategory']", 'symmetrical': 'False'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_discountable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'variants'", 'null': 'True', 'to': "orm['catalogue.Product']"}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductClass']", 'null': 'True'}),
'product_options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'rating': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'recommended_products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Product']", 'symmetrical': 'False', 'through': "orm['catalogue.ProductRecommendation']", 'blank': 'True'}),
'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'relations'", 'blank': 'True', 'to': "orm['catalogue.Product']"}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'upc': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'catalogue.productattribute': {
'Meta': {'ordering': "['code']", 'object_name': 'ProductAttribute'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128'}),
'entity_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntityType']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'option_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOptionGroup']", 'null': 'True', 'blank': 'True'}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attributes'", 'null': 'True', 'to': "orm['catalogue.ProductClass']"}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'})
},
'catalogue.productattributevalue': {
'Meta': {'object_name': 'ProductAttributeValue'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductAttribute']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attribute_values'", 'to': "orm['catalogue.Product']"}),
'value_boolean': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'value_entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntity']", 'null': 'True', 'blank': 'True'}),
'value_float': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_integer': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'value_option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOption']", 'null': 'True', 'blank': 'True'}),
'value_richtext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'catalogue.productcategory': {
'Meta': {'ordering': "['-is_canonical']", 'object_name': 'ProductCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_canonical': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'catalogue.productclass': {
'Meta': {'ordering': "['name']", 'object_name': 'ProductClass'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'requires_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}),
'track_stock': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalogue.productrecommendation': {
'Meta': {'object_name': 'ProductRecommendation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'primary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_recommendations'", 'to': "orm['catalogue.Product']"}),
'ranking': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'recommendation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'partner.partner': {
'Meta': {'object_name': 'Partner'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'partners'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"})
},
'partner.stockalert': {
'Meta': {'ordering': "('-date_created',)", 'object_name': 'StockAlert'},
'date_closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Open'", 'max_length': '128'}),
'stockrecord': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'alerts'", 'to': "orm['partner.StockRecord']"}),
'threshold': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'partner.stockrecord': {
'Meta': {'unique_together': "(('partner', 'partner_sku'),)", 'object_name': 'StockRecord'},
'cost_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'low_stock_threshold': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_allocated': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_in_stock': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'partner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['partner.Partner']"}),
'partner_sku': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'price_currency': ('django.db.models.fields.CharField', [], {'default': "'GBP'", 'max_length': '12'}),
'price_excl_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'price_retail': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stockrecords'", 'to': "orm['catalogue.Product']"}),
'selected_partner': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True', 'default': '0', 'max_length': '128'}),
}
}
complete_apps = ['partner']
|
[
"012kinglight@gmail.com"
] |
012kinglight@gmail.com
|
fe9b034ff0b1c5cb685a9b5928c8193be3d0bba5
|
caa8c0915d21ce22358aeadd0af3a71916189fa5
|
/FlaskORM2/manage.py
|
e78d9bfaa97d7687bdecfc73860031a6c9d710ef
|
[
"MIT"
] |
permissive
|
lxdzz/item
|
49d4d60da8fc7e9d6e365160adb78b6ffd7cf323
|
1024c53baa51bcdc98ec7a987eb3433fc4478d00
|
refs/heads/master
| 2022-12-16T15:02:18.809063
| 2019-10-08T09:18:28
| 2019-10-08T09:18:28
| 213,304,826
| 0
| 0
|
MIT
| 2019-10-07T06:26:22
| 2019-10-07T05:48:00
|
CSS
|
UTF-8
|
Python
| false
| false
| 292
|
py
|
import os
from app import models,create
from flask_script import Manager
from flask_migrate import MigrateCommand,Migrate
app=create()
manage=Manager(app)
migrate=Migrate(app,models)
app.secret_key="123456"
manage.add_command("db",MigrateCommand)
if __name__=="__main__":
manage.run()
|
[
"zdhr@163.com"
] |
zdhr@163.com
|
843d8fb2fb90c80110e6a1f94182e4440e561463
|
7a07d957316172fe78b341c6f5215df2ccdb24f6
|
/assignment/EasyAI_all_program.py
|
c6fbae635760a88672fcd1070d47c597c1a75d57
|
[] |
no_license
|
chandraprakashh/Python_with_AI
|
87ff4655c44eef9d0459cf0f2ceedabde88b0f1f
|
6d76eeea94e0cb7402330a2beea1fc4a7ab73e29
|
refs/heads/master
| 2020-07-18T18:18:06.463302
| 2019-12-11T08:20:12
| 2019-12-11T08:20:12
| 206,291,055
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,008
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 13 22:50:57 2019
@author: user
"""
# Code => 1
from easyAI import TwoPlayersGame, Human_Player, AI_Player, Negamax
class GameOfBones( TwoPlayersGame ):
def __init__(self, players):
self.players = players
self.pile = 20
self.nplayer = 1
def possible_moves(self): return ['1','2','3']
def make_move(self,move): self.pile -= int(move)
def win(self): return self.pile<=0
def is_over(self): return self.win()
def show(self): print ("%d bones left in the pile" % self.pile)
def scoring(self): return 100 if game.win() else 0
ai = Negamax(13)
game = GameOfBones( [ Human_Player(), AI_Player(ai) ] )
history = game.play()
# Code => 2
from easyAI import TwoPlayersGame, AI_Player, Negamax
from easyAI.Player import Human_Player
class GameController(TwoPlayersGame):
def __init__(self, players):
self.players = players
self.nplayer = 1
self.board = [0] * 9
def possible_moves(self):
return [a + 1 for a, b in enumerate(self.board) if b == 0]
def make_move(self, move):
self.board[int(move) - 1] = self.nplayer
def loss_condition(self):
possible_combinations = [[1,2,3], [4,5,6], [7,8,9],
[1,4,7], [2,5,8], [3,6,9], [1,5,9], [3,5,7]]
return any([all([(self.board[i-1] == self.nopponent)
for i in combination]) for combination in possible_combinations])
def is_over(self):
return (self.possible_moves() == []) or self.loss_condition()
def show(self):
print('\n'+'\n'.join([' '.join([['. ', 'O', 'X'][self.board[3*j + i]]
for i in range(3)]) for j in range(3)]))
def scoring(self):
return -100 if self.loss_condition() else 0
if __name__ == "__main__":
algorithm = Negamax(7)
GameController([Human_Player(), AI_Player(algorithm)]).play()
|
[
"noreply@github.com"
] |
chandraprakashh.noreply@github.com
|
f027e2fef6d80f6cee29c3c460427d5ff4690d31
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_073/ch18_2020_03_09_13_23_07_056737.py
|
5580c305acffebf7d622cc6890b83f53b3de7ef7
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 203
|
py
|
def verifica_idade(idade):
if idade>=21:
return 'liberado EUA e BRASILl'
if idad>=1 and idade<18:
return 'Não está liberado'
else:
return 'esta liberado BRASIL'
|
[
"you@example.com"
] |
you@example.com
|
1ac5c2d34e64826c6b8bdb12f4a443e2c74c4ab7
|
1e81ae6498fad9321767a0c3bf6a83751a2fb992
|
/baiTap/bai10.py
|
de7804eb5690b473f048d1eb73f12f76fe641fb3
|
[] |
no_license
|
tdhieu756/pythonBasic
|
4f8305ff81e071d0d4df41adeacfbf7387f3e488
|
b14756380ff0a9524dc4a38917a7b0430cbc6268
|
refs/heads/master
| 2020-05-25T07:06:04.492782
| 2019-05-21T01:54:59
| 2019-05-21T01:54:59
| 187,677,479
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 194
|
py
|
x = int(input('Nhap X '))
y = int(input('Nhap Y '))
l = []
print(l)
for i in range(x):
c =[]
for j in range(y):
#them vao cuoi list
c.append(j*i)
l.append(c)
print(l)
|
[
"tdhieu756@gmail.com"
] |
tdhieu756@gmail.com
|
14652fb38016928ddefc74fa43e0a8c3e8ada405
|
63b0fed007d152fe5e96640b844081c07ca20a11
|
/ABC/ABC200~ABC299/ABC245/d2.py
|
39575b7b194f9b94d10060fb30a0af67e9572081
|
[] |
no_license
|
Nikkuniku/AtcoderProgramming
|
8ff54541c8e65d0c93ce42f3a98aec061adf2f05
|
fbaf7b40084c52e35c803b6b03346f2a06fb5367
|
refs/heads/master
| 2023-08-21T10:20:43.520468
| 2023-08-12T09:53:07
| 2023-08-12T09:53:07
| 254,373,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 538
|
py
|
def polydiv(xs, ys):
xn = len(xs)
yn = len(ys)
zs = xs.copy()
qs = []
for _ in range(xn - yn + 1):
temp = zs[0] // ys[0]
for i in range(yn):
zs[i] -= temp * ys[i]
qs.append(temp)
zs = zs[1:]
if qs == []: qs = [0.]
return qs
n,m=map(int,input().split())
a=list(map(int,input().split()))
c=list(map(int,input().split()))
a=list(reversed(a))
c=list(reversed(c))
ans=[]
p=polydiv(c,a)
for i in range(len(p)):
ans.append(int(p[i]))
ans=list(reversed(ans))
print(*ans)
|
[
"ymdysk911@gmail.com"
] |
ymdysk911@gmail.com
|
b1163a31c4f06fe80bd4a61d7898539c29b8a4fc
|
48315e4fd2cdc491ea6d1160331dfd836b410e5c
|
/testfile.py
|
0ef295aedf17a6892ff998561e293cbac3a3d7e6
|
[
"MIT"
] |
permissive
|
jesse-toftum/cash_ml
|
9561d3c47e88b54fb0312bacf1b650a3b46cd563
|
316121a41359f8d18358c17f9be2ab90ad69bcb2
|
refs/heads/master
| 2021-06-11T06:07:42.256317
| 2021-02-19T08:16:00
| 2021-02-19T08:16:00
| 160,003,235
| 4
| 3
|
MIT
| 2019-01-10T04:38:10
| 2018-12-02T01:30:21
|
Python
|
UTF-8
|
Python
| false
| false
| 1,172
|
py
|
from cash_ml import Predictor
from cash_ml.utils import get_boston_dataset
from cash_ml.utils_models import load_ml_model
# Load data
df_train, df_test = get_boston_dataset()
# Tell auto_ml which column is 'output'
# Also note columns that aren't purely numerical
# Examples include ['nlp', 'date', 'categorical', 'ignore']
column_descriptions = {
'MEDV': 'output'
, 'CHAS': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
ml_predictor.train(df_train)
# Score the model on test data
test_score = ml_predictor.score(df_test, df_test.MEDV)
# auto_ml is specifically tuned for running in production
# It can get predictions on an individual row (passed in as a dictionary)
# A single prediction like this takes ~1 millisecond
# Here we will demonstrate saving the trained model, and loading it again
file_name = ml_predictor.save()
trained_model = load_ml_model(file_name)
# .predict and .predict_proba take in either:
# A pandas DataFrame
# A list of dictionaries
# A single dictionary (optimized for speed in production evironments)
predictions = trained_model.predict(df_test)
print(predictions)
|
[
"u0803072@utah.edu"
] |
u0803072@utah.edu
|
7678c21d2e011e118d23455f36514f5d73e162d6
|
8454441f899c3beb9fcea26cffc2f4c3cf75ff6a
|
/common/code/snippets/py/flask-get-header.py
|
a040c637e90ee07be18f7cd6ed97246a58f26c1e
|
[
"MIT"
] |
permissive
|
nevesnunes/env
|
4a837e8fcf4a6a597992103e0a0c3d0db93e1c78
|
f2cd7d884d46275a2fcb206eeeac5a8e176b12af
|
refs/heads/master
| 2023-08-22T15:49:35.897161
| 2023-08-15T13:51:08
| 2023-08-15T13:51:08
| 199,400,869
| 9
| 6
|
MIT
| 2023-06-22T10:59:51
| 2019-07-29T07:24:47
|
Python
|
UTF-8
|
Python
| false
| false
| 630
|
py
|
#!/usr/bin/env python3
from flask import Flask, request
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello!"
@app.route("/<path:text>", methods=["GET", "POST"])
def echo(text):
return f"You said (len = {len(text)}): {bytes(text, 'latin-1')}"
@app.after_request
def after(response):
red_foo = b"\x1b\x5b\x33\x31\x6d\x66\x6f\x6f\x1b\x28\x42\x1b\x5b\x6d"
response.headers["X-Foo"] = red_foo
response.headers["X-Bar"] = "".join(
[chr(x) if x not in (ord("\r"), ord("\n")) else "" for x in range(0, 255)]
)
return response
if __name__ == "__main__":
app.run(port=18123)
|
[
"9061071+nevesnunes@users.noreply.github.com"
] |
9061071+nevesnunes@users.noreply.github.com
|
88163ffa4c39f9c08b7cefc81c2eb7c2b7c7bed4
|
f146cef3f2172275c8d7f526dab92951fa50eb2c
|
/COURSE/group project -week9/backup -backend day3/backend/app/users/views.py
|
d0da6f0f9c4c96335aafbf7f3314c9c3e1305e26
|
[] |
no_license
|
mehranj73/Bootcamp
|
fed04d3858d6d0bc1cdad94e1f05bd4f7a47c0ec
|
bd575cd02329ad1ce21b05350380dfbf17cbdd89
|
refs/heads/master
| 2023-02-09T06:50:00.590751
| 2019-08-22T18:56:02
| 2019-08-22T18:56:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,413
|
py
|
from django.contrib.auth.models import User
from rest_framework import filters
from rest_framework.generics import RetrieveAPIView, ListCreateAPIView, ListAPIView, UpdateAPIView
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from app.permissions import IsOwnerOrReadOnly
from .serializers import MyProfileSerializer, UserSerializer, UserProfileSerializer, MyUserSerializer
from .models import UserProfile
#GET my profile
# URL 'me/'
class GetMyProfile(RetrieveAPIView):
# allow this action only to the user who owns the profile or to admin
#permission_classes = (IsAuthenticated, IsOwnerOrReadOnly)
permission_classes = (IsAuthenticated, IsOwnerOrReadOnly,)
queryset = UserProfile.objects.all()
serializer_class = MyProfileSerializer
def get(self, request, *args, **kwargs):
user = self.request.user
me = user.user_profile
serializer = self.get_serializer(me)
return Response(serializer.data)
#GET: to get all users
# URL 'list/'
class GenericGetUsersView(ListCreateAPIView):
# queryset = User.objects.all()
serializer_class = UserSerializer
def get_queryset(self):
return User.objects.all()
#GET userprofile by user ID
# URL <int:pk>
class GetUserProfileById(RetrieveAPIView):
queryset = UserProfile.objects.all()
serializer_class = UserProfileSerializer
lookup_url_kwarg = 'pk'
#POST: update user profile - userprofile model part (in front end to be united in same page with "update user profile-user model part)
#URL 'me/update/user-profile/'
class UpdateUserProfileView(UpdateAPIView):
serializer_class = MyProfileSerializer
queryset = UserProfile.objects.all()
permission_classes = [
IsAuthenticated,
IsOwnerOrReadOnly,
]
def update(self, request, *args, **kwargs):
user = self.request.user
serializer = MyProfileSerializer(instance=user.user_profile, data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
return Response( "User profile updated.", status=200)
else:
return Response( "Unable to perform request. Please try again later.", status=400)
#POST: update user profile - user model part (in front end to be united in same page with "update user profile-userprofile model part)
#URL 'me/update/user-profile/'
class UpdateUserProfileViewMyUser(UpdateAPIView):
serializer_class = MyProfileSerializer
queryset = User.objects.all()
permission_classes = [
IsAuthenticated,
IsOwnerOrReadOnly,
]
def update(self, request, *args, **kwargs):
user = self.request.user
serializer = MyUserSerializer(instance=user, data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
return Response( "User profile updated.", status=200)
else:
return Response( "Unable to perform request. Please try again later.", status=400)
#GET: to search by username or first name or last name
class SearchUsers(ListAPIView):
"""
GET: Search users
in Postman add in Params key: search, value: string
"""
serializer_class = UserSerializer
queryset = User.objects.all()
filter_backends = (filters.SearchFilter,)
search_fields = ('username', 'first_name', 'last_name')
|
[
"rachovsky@gmail.com"
] |
rachovsky@gmail.com
|
e564dfeb822503e42485517c3f70e61be60181c9
|
2398ce6219cc23a937d4fb9d3459f0453357d28d
|
/cityshaper/movements/gearLash.py
|
50a38dc854ae87b4a1efac455c864e40d36355fc
|
[] |
no_license
|
fllctrlz/robot
|
5c38f5e780fc5e3796f5d710e9c1167a7a5ccdd0
|
94757e3a0e386088c1ece2edc4855fc84dc0a994
|
refs/heads/master
| 2020-09-03T01:40:31.774516
| 2020-01-10T22:07:37
| 2020-01-10T22:07:37
| 219,353,164
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 652
|
py
|
#!/usr/bin/env python3
from util import robot
from time import sleep
from util.exitConditions import distance, time, light
def run(direction, delay = 0.2, power = 5):
'''Moves the motors at a very low power for a short time
in order to minimize the slippage of the gears inside the motors of the robot.'''
robot.resetStartTime()
robot.resetMotors()
motorPower = power*direction*robot.motorDirection
robot.safeMotorsOn(motorPower, motorPower)
# Check if the time is equal to the amount set at the beginning of the function
while not condition(time, robot.timer, delay, 0):
pass
robot.driveBase.stop()
|
[
"fllctrlzfll@gmail.com"
] |
fllctrlzfll@gmail.com
|
b2b45242b51f0a73e883f1bc773c5c35069ae906
|
fbfbe314015ae4b6d0cb12039ea24ea07bbb8012
|
/src/models/blocks/encoder_v1.py
|
c6f7d9694dbc8580b1fa56c75fe888e7a19a3d18
|
[
"MIT"
] |
permissive
|
GuoleiSun/composite-tasking
|
9fefd98e2a512b64d5c5284c0f40c3e5339f9c56
|
c759a87be783912aae02e0cb9a8d18c52b3c8930
|
refs/heads/main
| 2023-05-30T10:09:40.234706
| 2021-06-17T23:13:06
| 2021-06-17T23:13:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,983
|
py
|
import torch
import torch.nn as nn
import torchvision
class EncoderV1(nn.Module):
def __init__(self, cfg):
super().__init__()
# Load and check the model configuration dictionary
self.cfg = cfg
self._check_cfg()
# Create the encoder architecture
self._create_encoder()
def _check_cfg(self):
assert isinstance(self.cfg["encoder_arch"], str)
assert isinstance(self.cfg["encoder_pre_trained"], bool)
def _create_encoder(self):
# Load the encoder backbone
if self.cfg["encoder_arch"] == "resnet18":
base_model = torchvision.models.resnet.resnet18(pretrained=self.cfg["encoder_pre_trained"], progress=False)
elif self.cfg["encoder_arch"] == "resnet34":
base_model = torchvision.models.resnet.resnet34(pretrained=self.cfg["encoder_pre_trained"], progress=False)
elif self.cfg["encoder_arch"] == "resnet50":
base_model = torchvision.models.resnet.resnet50(pretrained=self.cfg["encoder_pre_trained"], progress=False)
elif self.cfg["encoder_arch"] == "resnet101":
base_model = torchvision.models.resnet.resnet101(pretrained=self.cfg["encoder_pre_trained"], progress=False)
else:
raise NotImplementedError
base_layers = list(base_model.children())
# Encoder layers:
# -----------
# Layer output size=(N, n_ch_level_1, H/2, W/2)
self.layer1 = nn.Sequential(*base_layers[:3])
# Layer output size=(N, n_ch_level_2, H/4, W/4)
self.layer2 = nn.Sequential(*base_layers[3:5])
# Layer output size=(N, n_ch_level_3, H/8, W/8)
self.layer3 = base_layers[5]
# Layer output size=(N, n_ch_level_4, H/16, W/16)
self.layer4 = base_layers[6]
# Layer output size=(N, n_ch_level_5, H/32, W/32)
self.layer5 = base_layers[7]
def forward(self, x):
# Compute the Encoder and its intermediate results
layer1 = self.layer1(x)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
layer5 = self.layer5(layer4)
output_levels = {
"0_input": x,
"1_s2": layer1,
"2_s4": layer2,
"3_s8": layer3,
"4_s16": layer4,
"5_s32": layer5,
}
return output_levels
def get_n_channels_in_each_level(self):
if self.cfg["encoder_arch"] in ["resnet18", "resnet34"]:
n_ch = {
"1": 64,
"2": 64,
"3": 128,
"4": 256,
"5": 512,
}
elif self.cfg["encoder_arch"] in ["resnet50", "resnet101"]:
n_ch = {
"1": 64,
"2": 256,
"3": 512,
"4": 1024,
"5": 2048,
}
else:
raise NotImplementedError
return n_ch
|
[
"npopovic3794@gmail.com"
] |
npopovic3794@gmail.com
|
dccac459597573c89d2f70c7fd8e454001aa54e5
|
aab8a88302af603fcfb5770d885b674e28edab38
|
/WebApp/clustering.py
|
ead5cf91d682ba42cf2db61d938980899a606914
|
[] |
no_license
|
rmenyhart/AccidentPredictor
|
ea235834c445b68d5aba31832662c403b5b54450
|
467984419e8d3c25e1a922e50f8943a6750cc3e4
|
refs/heads/master
| 2023-07-17T16:55:36.195579
| 2021-09-06T17:00:43
| 2021-09-06T17:00:43
| 380,519,094
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 727
|
py
|
from dtos.location import Location
from sklearn.metrics.pairwise import haversine_distances
from math import radians
class Clustering:
cluster_centers = []
def load_centers(self, filename):
file = open(filename, 'r')
for line in file:
coords = line.split(',')
self.cluster_centers.append(Location(float(coords[0]), float(coords[1])))
def is_clustered(self, point, distance):
for center in self.cluster_centers:
if (self.haversine(point, center) <= distance):
return True
return False
def haversine(self, loc1, loc2):
loc1_rad = [radians(loc1.lat), radians(loc1.lng)]
loc2_rad = [radians(loc2.lat), radians(loc2.lng)]
result = haversine_distances([loc1_rad, loc2_rad])
return result[0][1]
|
[
"menyhartrobert98@gmail.com"
] |
menyhartrobert98@gmail.com
|
7827a4ff1d73bcdedd452e2b87970b445bb26252
|
3b3b8f400e1e175cd271169806452bce8017a39f
|
/lib/python/treadmill_aws/cli/admin/cell.py
|
9ce5a9950c08c1df864a54705bfe130ebda21e12
|
[
"Apache-2.0"
] |
permissive
|
drienyov/treadmill-aws
|
d9c9af84fa12514f24b4bb30864659b8267deb55
|
a723999e9fa6e9c4b66155cbfca3082be5daff21
|
refs/heads/master
| 2020-03-25T00:04:53.359365
| 2018-07-31T15:16:21
| 2018-07-31T15:16:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,669
|
py
|
"""Admin Cell CLI module
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import time
import click
import jinja2
from ldap3.core import exceptions as ldap_exceptions
import six
from treadmill import admin
from treadmill import cli
from treadmill import context
from treadmill import yamlwrapper as yaml
from treadmill.api import instance
from treadmill.scheduler import masterapi
from treadmill import sysinfo
from treadmill.syscall import krb5
from treadmill_aws import awscontext
from treadmill_aws import cli as aws_cli
_LOGGER = logging.getLogger(__name__)
# TODO: full list of cell apps:
# adminapi, wsapi, app-dns, stateapi, cellapi
_CELL_APPS = [
'adminapi',
'app-dns',
'appmonitor',
'cellapi',
'cellsync',
'scheduler',
'stateapi',
'trace-cleanup',
'wsapi',
]
class CellCtx:
"""Cell context."""
def __init__(self, cors=None, krb_realm=None):
self.cell = context.GLOBAL.cell
admin_cell = admin.Cell(context.GLOBAL.ldap.conn)
cell = admin_cell.get(self.cell)
self.proid = cell['username']
self.data = cell.get('data')
# Default cors origin to top level dns domain. The value is passed to
# manifest verbatim, so need to shell escape it.
if not cors:
last_two = context.GLOBAL.dns_domain.split('.')[-2:]
self.cors = '\\.'.join(last_two)
else:
self.cors = '\\.'.join(cors.strip('.').split('.'))
self.krb_realm = krb_realm
if not self.krb_realm:
realms = krb5.get_host_realm(sysinfo.hostname())
if realms:
self.krb_realm = realms[0]
def _render(name, ctx):
"""Render named template."""
jinja_env = jinja2.Environment(loader=jinja2.PackageLoader(__name__))
template = jinja_env.get_template(name)
return yaml.load(template.render(**ctx.obj.__dict__))
def _render_app(appname, ctx):
"""Render manifest for given app."""
app = _render(appname, ctx)
fullname = '{}.{}.{}'.format(ctx.obj.proid, appname, ctx.obj.cell)
return fullname, app
def _monitors(ctx):
"""Load monitor definitions."""
return _render('monitors', ctx)
def _appgroups(ctx):
"""Load appgroups definitions."""
return _render('appgroups', ctx)
def _ident_groups(ctx):
"""Load identity group definitions."""
return _render('identity-groups', ctx)
def init():
"""Admin Cell CLI module"""
# pylint: disable=too-many-statements
@click.group(name='cell')
@click.option('--cors-origin', help='CORS origin for API.')
@click.option('--cell', required=True,
envvar='TREADMILL_CELL',
is_eager=True, callback=cli.handle_context_opt,
expose_value=False)
@click.option('--krb-realm', help='Kerberos realm',
envvar='TREADMILL_KRB_REALM',
required=False)
@click.option(
'--cell', required=True,
envvar='TREADMILL_CELL',
is_eager=True, callback=cli.handle_context_opt,
expose_value=False
)
@click.option(
'--krb-realm', help='Kerberos realm',
envvar='TREADMILL_KRB_REALM',
required=False
)
@click.option(
'--ipa-certs', required=False, envvar='TREADMILL_IPA_CERTS',
callback=aws_cli.handle_context_opt,
is_eager=True,
default='/etc/ipa/ca.crt',
expose_value=False
)
@click.pass_context
def cell_grp(ctx, cors_origin, krb_realm):
"""Manage treadmill cell."""
ctx.obj = CellCtx(cors=cors_origin, krb_realm=krb_realm)
@cell_grp.command(name='configure-apps')
@click.option('--apps', type=cli.LIST, help='List of apps to configure.')
@click.pass_context
def configure_apps(ctx, apps):
"""Configure cell API."""
admin_app = admin.Application(context.GLOBAL.ldap.conn)
# For apps that need write access to LDAP. The context LDAP must have
# write access because this is what we use to write manifests here.
write_uri = admin_app.admin.write_uri
ctx.obj.admin_ldap_url = ','.join(write_uri) if write_uri else None
if not apps:
apps = _CELL_APPS
# Configure apps identity groups
identity_groups = _ident_groups(ctx)
for groupname, count in six.iteritems(identity_groups):
masterapi.update_identity_group(
context.GLOBAL.zk.conn,
groupname,
count
)
# Configure apps
for appname in apps:
fullname, app = _render_app(appname, ctx)
print(fullname)
print(yaml.dump(app))
try:
admin_app.create(fullname, app)
except ldap_exceptions.LDAPEntryAlreadyExistsResult:
admin_app.replace(fullname, app)
@cell_grp.command(name='configure-monitors')
@click.option('--monitors', type=cli.DICT,
help='Key/value pairs for monitor count overrides.')
@click.pass_context
def configure_monitors(ctx, monitors):
"""Configure system apps monitors."""
if not monitors:
monitors = _monitors(ctx)
for name, count in six.iteritems(monitors):
print(name, count)
masterapi.update_appmonitor(
context.GLOBAL.zk.conn,
name,
int(count)
)
@cell_grp.command(name='restart-apps')
@click.option('--apps', type=cli.LIST,
help='List of apps to restart.')
@click.option('--wait', type=int, help='Interval to wait before re-start.',
default=20)
@click.pass_context
def restart_apps(ctx, wait, apps):
"""Restart cell API."""
instance_api = instance.API(plugins=['aws-proid-env'])
monitors = _monitors(ctx)
for name, count in six.iteritems(monitors):
_, appname, _ = name.split('.')
if apps and appname not in apps:
continue
_, app = _render_app(appname, ctx)
print(name)
print(yaml.dump(app))
for idx in range(0, count):
instance_ids = instance_api.create(name, app, 1)
for inst_id in instance_ids:
print(inst_id)
if idx <= count - 1 and wait:
time.sleep(wait)
@cell_grp.command(name='configure-appgroups')
@click.pass_context
def configure_appgroups(ctx):
"""Configure system app groups."""
appgroups = _appgroups(ctx)
admin_app_group = admin.AppGroup(context.GLOBAL.ldap.conn)
for name, data in six.iteritems(appgroups):
print(name, data)
try:
admin_app_group.create(name, data)
except ldap_exceptions.LDAPEntryAlreadyExistsResult:
admin_app_group.update(name, data)
existing = admin_app_group.get(name, dirty=True)
group_cells = set(existing['cells'])
group_cells.update([ctx.obj.cell])
admin_app_group.update(name, {'cells': list(group_cells)})
existing = admin_app_group.get(name, dirty=True)
print(existing)
@cell_grp.command(name='configure-dns')
@click.pass_context
def configure_dns(ctx):
"""Configure DNS cell records."""
ipaclient = awscontext.GLOBAL.ipaclient
idnsname = 'zk.{}.{}'.format(ctx.obj.cell, context.GLOBAL.dns_domain)
admin_cell = admin.Cell(context.GLOBAL.ldap.conn)
cell = admin_cell.get(ctx.obj.cell)
masters = ','.join(['{}:{}'.format(m['hostname'], m['zk-client-port'])
for m in cell['masters']])
scheme = cell.get('zk-auth-scheme')
if not scheme:
scheme = 'zookeeper'
zkurl = '{scheme}://{username}@{hostports}/treadmill/{cell}'.format(
scheme=scheme,
username=ctx.obj.proid,
hostports=masters,
cell=ctx.obj.cell
)
current_rec = ipaclient.get_dns_record(idnsname)
existing = current_rec['result']['result'][0]['txtrecord'][0]
if existing == zkurl:
_LOGGER.info('Zookeeper TXT records up to date: %s : %s',
idnsname, zkurl)
return
ipaclient.add_txt_record(idnsname, zkurl)
del restart_apps
del configure_apps
del configure_monitors
del configure_appgroups
return cell_grp
|
[
"andreikeis@users.noreply.github.com"
] |
andreikeis@users.noreply.github.com
|
1e071d530068f67c936efc3ce7ec63291927d50f
|
cfae6e3f8e066b30e612895fa7dc8465657d67a8
|
/scorer.py
|
ab8977d1a71d43e37f862c8414a2c35db6934aa0
|
[] |
no_license
|
jabm0010/PLN_pFinal
|
6d54c96db8727de70d90790cfb454a959fe61ea6
|
887e2758a28c967e50559885a68dee7bd9dcaad1
|
refs/heads/master
| 2020-05-16T12:56:48.552925
| 2019-05-01T14:35:20
| 2019-05-01T14:35:20
| 183,058,237
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,598
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 23 20:13:57 2019
@author: jabm9
"""
# -*- coding: utf-8 -*-
"""
Author: Salud María Jiménez Zafra
Description: Final practice scorer
Last modified: April 9, 2019
"""
import sys
gold_path = 'gold_labels_dev.txt'
input_path = 'resultados.txt'
confusion_matrix = {}
labels = ('positive', 'negative')
for l1 in labels:
for l2 in labels:
confusion_matrix[(l1, l2)] = 0
# 1. Read files and get labels
input_labels = {}
with open(input_path, 'r') as input_file:
for line in input_file.readlines():
try:
id_file, domain, polarity = line.strip().split('\t')
except:
print('Wrong file format: ' + input_path)
sys.exit(1)
input_labels[id_file + domain] = polarity
with open(gold_path, 'r') as gold_file:
for line in gold_file.readlines():
try:
id_file, domain, true_polarity = line.strip().split('\t')
except:
print('Wrong file format: ' + gold_path)
sys.exit(1)
key = id_file + domain
if key in input_labels.keys():
proposed_polarity = input_labels[key]
confusion_matrix[(proposed_polarity, true_polarity)] += 1
else:
print('Wrong file format: ' + input_path)
sys.exit(1)
### 2. Calculate evaluation measures
avgP = 0.0
avgR = 0.0
avgF1 = 0.0
for label in labels:
denomP = confusion_matrix[(label, 'positive')] + confusion_matrix[(label, 'negative')]
precision = confusion_matrix[(label, label)]/denomP if denomP > 0 else 0
denomR = confusion_matrix[('positive', label)] + confusion_matrix[('negative', label)]
recall = confusion_matrix[(label, label)]/denomR if denomR > 0 else 0
denomF1 = precision + recall
f1 = 2*precision*recall/denomF1 if denomF1 > 0 else 0
print('\t' + label + ':\tPrecision=' + "{0:.3f}".format(precision) + '\tRecall=' + "{0:.3f}".format(recall) + '\tF1=' + "{0:.3f}".format(f1) + '\n')
avgP += precision
avgR += recall
avgF1 += f1
avgP /= 2.0
avgR /= 2.0
avgF1 /= 2.0
accuracy = (confusion_matrix[('positive','positive')] + confusion_matrix[('negative','negative')]) / (confusion_matrix[('positive','positive')] + confusion_matrix[('negative','negative')] + confusion_matrix[('positive','negative')] + confusion_matrix[('negative','positive')])
print('\nAvg_Precision=' + "{0:.3f}".format(avgP) + '\tAvg_Recall=' + "{0:.3f}".format(avgR) + '\tAvg_F1=' + "{0:.3f}".format(avgF1) + '\tAccuracy=' + "{0:.3f}".format(accuracy))
|
[
"jabm97@gmail.com"
] |
jabm97@gmail.com
|
ad06790be92022f31a2e460a0c85c51c7118fdbc
|
94b1c18704937d548d99a5185bf467bef8dc20a9
|
/list_5/setup.py
|
62a9a1e45384bc757aa61bd9485d6ec3eeaeb813
|
[] |
no_license
|
piotrszyma/studies-elliptic-curves
|
5dcc1ef3749d40b503f953ca19baf55439f073dd
|
65418742b4961c6b27d864ba3ff5649f4414fdce
|
refs/heads/master
| 2022-11-23T11:06:36.547124
| 2020-07-26T19:57:41
| 2020-07-26T19:57:41
| 244,161,697
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 996
|
py
|
import json
import shared
import projective
import field
import affine
import jacobi
def _read_sage_params_from_file(file_path):
with open(file_path) as f:
return json.load(f)
def _read_sage_params_from_stdin():
return json.loads(input())
def set_curve_params(args):
if args.stdin:
raw_json = _read_sage_params_from_stdin()
else:
raw_json = _read_sage_params_from_file(args.path)
a, b, *_ = raw_json["invariants"]
base_point = raw_json["basePoint"]
field_order = raw_json["fieldOrder"]
curve_order = raw_json["curveOrder"]
curve_params = shared.CurveParams(
base_point=shared.CurveBasePoint(*base_point),
a=a,
b=b,
field_order=field_order,
curve_order=curve_order,
)
projective.set_curve_params(curve_params)
affine.set_curve_params(curve_params)
jacobi.set_curve_params(curve_params)
field.set_modulus(field_order)
return curve_params, int(raw_json["bitLength"])
|
[
"thompson2908@gmail.com"
] |
thompson2908@gmail.com
|
3f5904a0711502f61e084e2adc5e1fa04b34d71b
|
8bd04db6920bc051aca9bb3c2a5517e730f75aa1
|
/app/user/views.py
|
be092b00f5221826a2ea007d86e1b3f22601aefd
|
[
"MIT"
] |
permissive
|
barlydjaja/recipe-app-api
|
a44bcb4d148000a9c8c24c3dcaf6816e9b022a3f
|
ce9607ca3497bbde3b69dd2db4d07b361c6400e8
|
refs/heads/main
| 2023-03-24T18:23:46.310726
| 2021-03-14T04:43:39
| 2021-03-14T04:43:39
| 343,402,834
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 922
|
py
|
from rest_framework import generics, authentication, permissions
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from .serializers import UserSerializer, AuthTokenSerializer
class CreateUserView(generics.CreateAPIView):
""" Create a new user in the system"""
serializer_class = UserSerializer
class CreateTokenView(ObtainAuthToken):
"""Create new auth token for user"""
serializer_class = AuthTokenSerializer
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class ManageUserView(generics.RetrieveUpdateAPIView):
"""Manage the authenticated user"""
serializer_class = UserSerializer
authentication_classes = (authentication.TokenAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
def get_object(self):
"""Retrieve and return authenticated user"""
return self.request.user
|
[
"barlydjaja@gmail.com"
] |
barlydjaja@gmail.com
|
e958f0bfa38607d332190b88a4689f958dc42a09
|
945f4690cf1c85bae13bea8f361d44514f5fdca0
|
/Python/1113 - Crescente e Decrescente.py
|
ab9fca448e657d27622f29bd8eb02a92ea5af5e2
|
[] |
no_license
|
geo-wurth/URI
|
e7209c0c2da933d3b565d40f466ad94adb1ebb9f
|
e0eae49739efb293322247feca426593df2d59ba
|
refs/heads/master
| 2020-06-20T09:36:28.505378
| 2019-12-03T18:46:22
| 2019-12-03T18:46:22
| 197,079,734
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 216
|
py
|
while True:
valores = list(map(int, input().split()))
x = valores[0]
y = valores[1]
if x > y:
print("Decrescente")
elif x < y:
print("Crescente")
else:
break
|
[
"noreply@github.com"
] |
geo-wurth.noreply@github.com
|
57e7ab881b00e6f02b4f292a19abd5fca6d0ffd4
|
095d6091d9c7621488cebdf5e28e28da92367984
|
/Basico/servidor/venv/bin/pip2.7
|
165c0c302dfc0c84fe4717366411a3fc46b212bf
|
[] |
no_license
|
jAndresMub/pythonBasics
|
28e126c57c9d499244df3dfe67c5237830b67a49
|
588376a818bf7f547c1e4a6e602c417f15de8846
|
refs/heads/master
| 2023-05-11T11:18:19.549905
| 2019-08-09T04:23:34
| 2019-08-09T04:23:34
| 199,773,717
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 268
|
7
|
#!/home/andres/Escritorio/cursoPython/Basico/servidor/venv/bin/python2
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"jandresmub@gmail.com"
] |
jandresmub@gmail.com
|
428e801f8c2d57a8b868fdb5ecf1b3bd1b3838a2
|
13a81afcf92ba1e357ab358566328fd46d162033
|
/c1.py
|
1355e5fb0dd5808cf711538258121a71fb4ec592
|
[] |
no_license
|
kiodyd/code_game
|
0b5f910c9b9157a16075b10b9a731189e395cf1c
|
c49578ee759d95433efffaf899cf2675e50baa99
|
refs/heads/master
| 2020-04-19T05:36:08.557628
| 2019-01-28T16:33:20
| 2019-01-28T16:33:20
| 167,992,822
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 443
|
py
|
# coding: utf-8
# 自分の得意な言語で
# Let's チャレンジ!!
import re
input_lines = input()
fault = False
if re.match(r'^(?=.*\d)(?=.*[a-zA-Z])[a-zA-Z\d]{6,30}$',input_lines):
for i in range(0,len(input_lines)-2):
if input_lines[i] == input_lines[i+1] == input_lines[i+2]:
fault = True
if fault:
print("Invalid")
else:
print("Valid")
else:
print("Invalid")
|
[
"noreply@github.com"
] |
kiodyd.noreply@github.com
|
973b06f88d37b9f6d8a80cad2fb7b2ef85aa6588
|
ed89014c39c98e63d92e6d23b955d4f25d06fe9a
|
/plugin/templates.py
|
79dd10199a751fb781234745910ae513ec248485
|
[
"MIT"
] |
permissive
|
Zeroto521/Flow.Launcher.Plugin.HowBigYourNumber
|
8cff8f84d694e5eee535bc5a36ab7f7055967fcb
|
b7717e7d7dd52a0daef434c05ba3a81d763a17b1
|
refs/heads/master
| 2023-04-15T09:50:12.467069
| 2021-04-08T13:17:56
| 2021-04-08T13:17:56
| 341,119,335
| 0
| 0
|
MIT
| 2021-04-06T12:14:35
| 2021-02-22T07:48:38
|
Python
|
UTF-8
|
Python
| false
| false
| 249
|
py
|
# -*- coding: utf-8 -*-
from plugin.settings import ICON_PATH
RESULT_TEMPLATE = {
'Title': '',
'SubTitle': '',
'IcoPath': ICON_PATH,
}
ACTION_TEMPLATE = {
'JsonRPCAction': {
'method': '',
'parameters': '',
}
}
|
[
"Zeroto521@gmail.com"
] |
Zeroto521@gmail.com
|
da1d6f488fe1ab0f475cffb11a914492829aeb27
|
c5bfde6d6bb62b52af236a716b793a87d6c21ec7
|
/Day_03/Day_03.py
|
a5b287db9fa4e924906110cba149609ea1187f82
|
[] |
no_license
|
yorickcleerbout/Advent-Of-Code-2020
|
f67a3caac64da950f705706639f72ec0b20e3fac
|
0fcd01b3f34ff39df006f2c523133478337ee2cb
|
refs/heads/main
| 2023-02-05T00:50:05.473697
| 2020-12-25T15:55:47
| 2020-12-25T15:55:47
| 317,986,405
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 705
|
py
|
with open("Day_03/input.txt", 'r') as f:
data = f.readlines()
def solution(right, down):
trees = 0
x_pos = 0
for i in range(0, len(data), down):
line = data[i]
if line[x_pos % 31] == "#":
trees += 1
x_pos += right
return trees
print("How many trees would you encounter when using slope of '3 right' and '1 down' (Part 1):", solution(3, 1))
slopes = [(1, 1), (3, 1), (5, 1), (7, 1), (1, 2)]
part_two = 1
for r, d in slopes:
part_two *= solution(r, d)
print("How many trees would you encounter when you multiply the number of trees on each slope '1r 1d', '3r 1d', '5r 1d', '7r 1d' and '1r 2d' (Part 2):", part_two)
|
[
"noreply@github.com"
] |
yorickcleerbout.noreply@github.com
|
79a1e60bd6d16d0e7aa3b8efff8ff7f5719e4c3b
|
082e6342ca6872420b2960c64cba484f3852351b
|
/corretora/urls.py
|
46276954d8b46ce8c61ed5da47ac4880c59dc02c
|
[
"MIT"
] |
permissive
|
reihtw/stocktracker
|
54c7c1dc066d73b9b57836d1ac3ea259d8715632
|
fd83862ce47dae1615c445a1bed1a39d3a769e80
|
refs/heads/master
| 2022-11-23T12:25:56.876883
| 2020-07-25T19:17:55
| 2020-07-25T19:17:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 468
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.visualizar_corretoras, name='visualizar_corretoras'),
path('cadastrar', views.cadastrar_corretora, name='cadastrar_corretora'),
path('editar/<str:nome>', views.editar_corretora, name='editar_corretora'),
path('atualizar', views.atualizar_corretora, name='atualizar_corretora'),
path('excluir/<int:corretora_id>', views.excluir_corretora, name='excluir_corretora'),
]
|
[
"rodrigolins@protonmail.com"
] |
rodrigolins@protonmail.com
|
8b30590b3b5fb20f27fa5c80c230b6bfbf438e72
|
ca23ce5afc3d66b89eac610a0d89493a4f6a85f3
|
/engine.py
|
1ed3bd077c58ee4df493bc6b907f4051334261c4
|
[] |
no_license
|
K1ngDedede/Unglaublich
|
788f87e9cd87ee427c7771cbe5e38ffa7b0742d8
|
be9352c282eece7910c441cc5145bd834397b9b8
|
refs/heads/master
| 2023-02-15T16:15:18.658886
| 2021-01-10T03:28:55
| 2021-01-10T03:28:55
| 271,098,910
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,668
|
py
|
from os import path
import pygame as pg
import sys
from settings import *
from sprites import *
class Map:
def __init__(self, filename, screen):
#Matrix of tiles
self.nig = []
self.all_sprites = pg.sprite.Group()
self.tiles = pg.sprite.Group()
self.collidable_tiles = pg.sprite.Group()
self.action_tiles = pg.sprite.Group()
self.filename = "worlds/"+filename
self.screen = screen
def load(self):
world_file = open(self.filename, "r")
row = 0
for line in world_file:
col = 0
line = line.strip()
self.nig.append([])
row_tiles = line.split(",")
for tile in row_tiles:
tile = tile.split(":")
tile_filename = tile[0]
adyacent_filename = tile[1]
tile_poggers = int(tile[2])
tile_x = col * TILESIZE
tile_y = row * TILESIZE
self.nig[row].append(Tile(tile_filename, adyacent_filename, tile_poggers, tile_x, tile_y, self))
col+=1
row+=1
self.height = len(self.nig)
self.width = len(self.nig[0])
self.height_px = self.height * TILESIZE
self.width_px = self.width * TILESIZE
world_file.close()
self.camera = Camera(self.width_px, self.height_px)
def draw(self):
for tile in self.tiles:
self.screen.blit(tile.image, self.camera.apply(tile))
class Tile(pg.sprite.Sprite):
def __init__(self, image_filename, adyacent_map_filename, poggers, x, y, map):
if not poggers:
self.groups = map.tiles, map.all_sprites, map.collidable_tiles
else:
self.groups = map.tiles, map.all_sprites
pg.sprite.Sprite.__init__(self, self.groups)
self.map = map
self.x_spawn = ""
self.y_spawn = ""
self.image_filename = "imgs/"+image_filename
if adyacent_map_filename != "":
self.adyacent_map_filename = adyacent_map_filename.split("-")[0]
self.x_spawn = int(adyacent_map_filename.split("-")[1])
self.y_spawn = int(adyacent_map_filename.split("-")[2])
else:
self.adyacent_map_filename = ""
#poggers indicates whether a tile is walkable or not
self.poggers = poggers
self.x = x
self.y = y
self.image = pg.image.load(self.image_filename).convert()
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
class Camera:
def __init__(self, width, height):
self.camera = pg.Rect(0, 0, width, height)
self.width = width
self.height = height
def apply(self, entity):
return entity.rect.move(self.camera.topleft)
def update(self, target):
x = -target.rect.x + int(WIDTH / 2)
y = -target.rect.y + int(HEIGHT / 2)
# limit scrolling to map size
x = min(0, x) # left
y = min(0, y) # top
x = max(-(self.width - WIDTH), x) # right
y = max(-(self.height - HEIGHT), y) # bottom
self.camera = pg.Rect(x, y, self.width, self.height)
class Party:
def __init__(self, sprites, screen):
self.size = len(sprites)
self.sprites = sprites
self.leader = self.sprites[0]
self.screen = screen
def update(self):
self.leader.update()
for i in range(1, self.size):
self.sprites[i].current_direction = self.sprites[i-1].current_direction
self.sprites[i].update()
self.sprites[i].x = self.sprites[i - 1].past_x
self.sprites[i].y = self.sprites[i - 1].past_y
self.leader.map.camera.update(self.leader)
self.maurisio()
def draw(self):
self.leader.map.draw()
for sprite in self.sprites:
self.screen.blit(sprite.image, sprite.map.camera.apply(sprite))
#Verifies if there is a map transition and if there is, the map changes accordingly
def maurisio(self):
currentTile = self.leader.get_current_tile()
if currentTile.adyacent_map_filename != "":
#load map
new_map = Map(currentTile.adyacent_map_filename, self.screen)
for sprite in self.sprites:
sprite.map = new_map
sprite.groups = sprite.map.all_sprites
self.leader.map.load()
self.leader.x = currentTile.x_spawn * 64
self.leader.y = currentTile.y_spawn * 64
self.leader.vel = vec(0, 0)
def get_opposite_direction(direction: int)->str:
return directions[(direction+2)%4]
|
[
"af.daza@uniandes.edu.co"
] |
af.daza@uniandes.edu.co
|
7558ef959f61600b932cafd5903c0c94216c4bc3
|
daf1df62c33739637c6c120e499d3e87340b243a
|
/TSClusteringLayer.py
|
4e8e18ebbbd728fdd665364129dac2e6decbac5c
|
[] |
no_license
|
joseph8923/DeepTemporalClustering
|
914ef38231ff15b0df2feac188152e6263f696a3
|
79eb2c1dcc4ac5d9bc909ed1e449815a27ff2e4d
|
refs/heads/master
| 2021-02-14T02:58:56.084772
| 2019-07-29T15:05:54
| 2019-07-29T15:05:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,718
|
py
|
"""
Implementation of the Deep Temporal Clustering model
Time Series Clustering layer
@author Florent Forest (FlorentF9)
"""
from keras.engine.topology import Layer, InputSpec
import keras.backend as K
class TSClusteringLayer(Layer):
"""
Clustering layer converts input sample (feature) to soft label, i.e. a vector that represents the probability of the
sample belonging to each cluster. The probability is calculated with student's t-distribution.
# Arguments
n_clusters: number of clusters.
weights: list of Numpy array with shape `(n_clusters, timesteps, n_features)` witch represents the initial cluster centers.
alpha: parameter in Student's t-distribution. Default to 1.0.
dist_metric: distance metric between sequences used in similarity kernel ('eucl', 'cir', 'cor' or 'acf').
# Input shape
3D tensor with shape: `(n_samples, timesteps, n_features)`.
# Output shape
2D tensor with shape: `(n_samples, n_clusters)`.
"""
def __init__(self, n_clusters, weights=None, alpha=1.0, dist_metric='eucl', **kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(TSClusteringLayer, self).__init__(**kwargs)
self.n_clusters = n_clusters
self.alpha = alpha
self.dist_metric = dist_metric
self.initial_weights = weights
self.input_spec = InputSpec(ndim=3)
self.clusters = None
self.built = False
def build(self, input_shape):
assert len(input_shape) == 3
input_dim = input_shape[2]
input_steps = input_shape[1]
self.input_spec = InputSpec(dtype=K.floatx(), shape=(None, input_steps, input_dim))
self.clusters = self.add_weight((self.n_clusters, input_steps, input_dim), initializer='glorot_uniform', name='cluster_centers')
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
self.built = True
def call(self, inputs, **kwargs):
"""
Student t-distribution kernel, probability of assigning encoded sequence i to cluster k.
q_{ik} = (1 + dist(z_i, m_k)^2)^{-1} / normalization.
Arguments:
inputs: encoded input sequences, shape=(n_samples, timesteps, n_features)
Return:
q: soft labels for each sample. shape=(n_samples, n_clusters)
"""
if self.dist_metric == 'eucl':
distance = K.sum(K.sqrt(K.sum(K.square(K.expand_dims(inputs, axis=1) - self.clusters), axis=2)), axis=-1)
elif self.dist_metric == 'cid':
ce_x = K.sqrt(K.sum(K.square(inputs[:, 1:, :] - inputs[:, :-1, :]), axis=1)) # shape (n_samples, n_features)
ce_w = K.sqrt(K.sum(K.square(self.clusters[:, 1:, :] - self.clusters[:, :-1, :]), axis=1)) # shape (n_clusters, n_features)
ce = K.maximum(K.expand_dims(ce_x, axis=1), ce_w) / K.minimum(K.expand_dims(ce_x, axis=1), ce_w) # shape (n_samples, n_clusters, n_features)
ed = K.sqrt(K.sum(K.square(K.expand_dims(inputs, axis=1) - self.clusters), axis=2)) # shape (n_samples, n_clusters, n_features)
distance = K.sum(ed * ce, axis=-1) # shape (n_samples, n_clusters)
elif self.dist_metric == 'cor':
inputs_norm = (inputs - K.expand_dims(K.mean(inputs, axis=1), axis=1)) / K.expand_dims(K.std(inputs, axis=1), axis=1) # shape (n_samples, timesteps, n_features)
clusters_norm = (self.clusters - K.expand_dims(K.mean(self.clusters, axis=1), axis=1)) / K.expand_dims(K.std(self.clusters, axis=1), axis=1) # shape (n_clusters, timesteps, n_features)
pcc = K.mean(K.expand_dims(inputs_norm, axis=1) * clusters_norm, axis=2) # Pearson correlation coefficients
distance = K.sum(K.sqrt(2.0 * (1.0 - pcc)), axis=-1) # correlation-based similarities, shape (n_samples, n_clusters)
elif self.dist_metric == 'acf':
raise NotImplementedError
else:
raise ValueError('Available distances are eucl, cid, cor and acf!')
q = 1.0 / (1.0 + K.square(distance) / self.alpha)
q **= (self.alpha + 1.0) / 2.0
q = K.transpose(K.transpose(q) / K.sum(q, axis=1))
return q
def compute_output_shape(self, input_shape):
assert input_shape and len(input_shape) == 3
return input_shape[0], self.n_clusters
def get_config(self):
config = {'n_clusters': self.n_clusters, 'dist_metric': self.dist_metric}
base_config = super(TSClusteringLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
[
"florent.forest9@gmail.com"
] |
florent.forest9@gmail.com
|
f26e6ea16424c76d1d01b02bc4bee4a3dcb98b47
|
de7c455d780be5e1d637b1728522e854fbacc99c
|
/hello.py
|
467d319cf903ce886d03e11f2349b28b0c4c009e
|
[] |
no_license
|
aniruddhapalekar/first
|
68653c1f270de4d26ee8c28e542c3730e80010d3
|
76da9fe8fb3879ff8855c46f1cf4114ae26d2150
|
refs/heads/master
| 2022-11-30T20:16:14.353538
| 2020-08-07T06:33:03
| 2020-08-07T06:33:03
| 285,497,064
| 0
| 0
| null | 2020-08-07T06:33:05
| 2020-08-06T06:55:48
|
Python
|
UTF-8
|
Python
| false
| false
| 47
|
py
|
print("hiii")
print("hello")
print("welcome")
|
[
"noreply@github.com"
] |
aniruddhapalekar.noreply@github.com
|
66e8990305011b7f2ab0877a826fec6744e91be4
|
fc6d3281164303abc89dde8f1554622e233ac906
|
/programs/migrations/0031_auto_20191206_1459.py
|
09c40c37210572089bc9e41ae24e842c8b55b250
|
[] |
no_license
|
UCF/Search-Service-Django
|
46b65fb2ecfa0721f24e2785991b0976fe0f8353
|
5f2efbff2aae9579ff1a78216ed948f158daa4e4
|
refs/heads/master
| 2023-09-02T07:58:16.658821
| 2023-08-25T15:17:40
| 2023-08-25T15:17:40
| 128,795,665
| 0
| 0
| null | 2023-08-04T15:08:48
| 2018-04-09T15:44:01
|
Python
|
UTF-8
|
Python
| false
| false
| 1,089
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2019-12-06 14:59
from django.db import migrations, models
import django.db.models.deletion
from programs.models import ProgramOutcomeStat
class Migration(migrations.Migration):
dependencies = [
('programs', '0030_auto_20191205_1727'),
]
operations = [
migrations.RunSQL(
'TRUNCATE TABLE `programs_programoutcomestat`',
'TRUNCATE TABLE `programs_programoutcomestat`'
),
migrations.RemoveField(
model_name='programoutcomestat',
name='program',
),
migrations.AddField(
model_name='program',
name='outcomes',
field=models.ManyToManyField(related_name='programs', to='programs.ProgramOutcomeStat'),
),
migrations.AddField(
model_name='programoutcomestat',
name='cip',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='outcomes', to='programs.CIP'),
preserve_default=False,
),
]
|
[
"jimbarnesdeveloper@gmail.com"
] |
jimbarnesdeveloper@gmail.com
|
2ddaa2d8860b7299c64a636af17c11fbc5ebfa46
|
c04acaa6ee9c6a7c365e217bc78039fa9c77833e
|
/cuzquena/urls.py
|
785b7ed1280475deaaa389f28b11b64b4deafb40
|
[] |
no_license
|
danielhuamani/django-la-cuzquena
|
0386800d640b224d94b0fac2d83f999b60d7da85
|
a6f4aaf44775b27328d073a65f1d0f50eff51fad
|
refs/heads/master
| 2020-12-05T04:51:01.077860
| 2016-09-17T13:56:58
| 2016-09-17T13:56:58
| 67,900,351
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,190
|
py
|
"""cconline URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib import admin
from filebrowser.sites import site
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^summernote/', include('django_summernote.urls')),
url(r'^admin/filebrowser/', include(site.urls)),
url(r'', include('my_apps.web.urls', namespace='web')),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"danielhuamani15@gmail.com"
] |
danielhuamani15@gmail.com
|
8349477f2dc38370be2a6048b4ca40ce366e75e2
|
f3a4b4c7c39d2ed2959b410367e8abc66493772e
|
/laplacianFlux/r2_1_0/__init__.py
|
c64bf8efa3593dcacfa71e4abd9edc4f9e87754b
|
[] |
no_license
|
asimurzin/laplacianFlux
|
6800bc5aba29968f7784ce91a5a1503318fad246
|
83977d5ce967b87ed0203a143d19d88c9a5d7ed7
|
refs/heads/master
| 2020-03-29T20:22:44.143734
| 2012-07-01T19:36:36
| 2012-07-01T19:36:36
| 1,613,806
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,376
|
py
|
#!/usr/bin/env python
#--------------------------------------------------------------------------------------
## pythonFlu - Python wrapping for OpenFOAM C++ API
## Copyright (C) 2010- Alexey Petrov
## Copyright (C) 2009-2010 Pebble Bed Modular Reactor (Pty) Limited (PBMR)
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
## See http://sourceforge.net/projects/pythonflu
##
## Author : Alexey PETROV
##
#----------------------------------------------------------------------------
from Foam import ref, man
#----------------------------------------------------------------------------
def _createFields( runTime, mesh ):
ref.ext_Info() << "Reading field T\n" << ref.nl
T = man.volScalarField( man.IOobject( ref.word( "T" ),
ref.fileName( runTime.timeName() ),
mesh,
ref.IOobject.MUST_READ,
ref.IOobject.AUTO_WRITE ),
mesh )
ref.ext_Info() << "Reading transportProperties\n" << ref.nl
transportProperties = man.IOdictionary( man.IOobject( ref.word( "transportProperties" ),
ref.fileName( runTime.constant() ),
mesh,
ref.IOobject.MUST_READ,
ref.IOobject.NO_WRITE ) )
ref.ext_Info() << "Reading diffusivity DT\n" << ref.nl
DT = ref.dimensionedScalar( transportProperties.lookup( ref.word( "DT" ) ) )
return T, transportProperties, DT
#--------------------------------------------------------------------------------------
def write( runTime, mesh, T ):
if runTime.outputTime():
gradT = ref.fvc.grad(T)
gradTx = ref.volScalarField( ref.IOobject( ref.word( "gradTx" ),
ref.fileName( runTime.timeName() ),
mesh,
ref.IOobject.NO_READ,
ref.IOobject.AUTO_WRITE ),
gradT.component( ref.vector.X ) )
gradTy = ref.volScalarField( ref.IOobject( ref.word( "gradTy" ),
ref.fileName( runTime.timeName() ),
mesh,
ref.IOobject.NO_READ,
ref.IOobject.AUTO_WRITE ),
gradT.component( ref.vector.Y ) )
gradTz = ref.volScalarField( ref.IOobject( ref.word( "gradTz" ),
ref.fileName( runTime.timeName() ),
mesh,
ref.IOobject.NO_READ,
ref.IOobject.AUTO_WRITE ),
gradT.component( ref.vector.Z ) )
runTime.write()
pass
#--------------------------------------------------------------------------------------
def main_standalone( argc, argv ):
args = ref.setRootCase( argc, argv )
runTime = man.createTime( args )
mesh = man.createMesh( runTime )
T, transportProperties, DT = _createFields( runTime, mesh )
simple = man.simpleControl( mesh )
ref.ext_Info() << "\nCalculating temperature distribution\n" << ref.nl
while runTime.loop() :
ref.ext_Info() << "Time = " << runTime.timeName() << ref.nl << ref.nl
while simple.correctNonOrthogonal():
ref.solve( ref.fvm.ddt( T ) - ref.fvm.laplacian( DT, T ) )
pass
write( runTime, mesh, T )
ref.ext_Info() << "ExecutionTime = " << runTime.elapsedCpuTime() << " s" << \
" ClockTime = " << runTime.elapsedClockTime() << " s" << ref.nl << ref.nl
pass
ref.ext_Info() << "End\n" << ref.nl
import os
return os.EX_OK
#--------------------------------------------------------------------------------------
import sys, os
from Foam import FOAM_VERSION
if FOAM_VERSION( ">=", "020100" ):
if __name__ == "__main__" :
argv = sys.argv
os._exit( main_standalone( len( argv ), argv ) )
pass
else:
from Foam.OpenFOAM import ext_Info
ref.ext_Info()<< "\nTo use this solver, It is necessary to SWIG OpenFoam2.1.0 or higher \n "
pass
#--------------------------------------------------------------------------------------
|
[
"asimurzin@gmail.com"
] |
asimurzin@gmail.com
|
5d745f9fd64c2b44a2dd7a0b7c45e43d247a4cc2
|
1c0509a06cec726735048f00f63d2529f5e43ce6
|
/code_supermarkets_france/analysis/analysis_qlmc_prices_2007_2012/stats_des/price_frequencies_by_chain.py
|
b951142551efcfccf3721c6c7e0bf28f2e1fe55d
|
[] |
no_license
|
etiennecha/master_code
|
e99c62e93aa052a66d4cdd3f3e3aa25a3aec4880
|
48821f6c854a1c6aa05cf81b653b3b757212b6f8
|
refs/heads/master
| 2021-01-23T14:35:45.904595
| 2018-03-11T18:57:38
| 2018-03-11T18:57:38
| 16,312,906
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,391
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import add_to_path
from add_to_path import path_data
from functions_generic_qlmc import *
import numpy as np
import pandas as pd
import statsmodels.api as sm
import statsmodels.formula.api as smf
pd.set_option('float_format', '{:,.2f}'.format)
path_built_csv = os.path.join(path_data, 'data_supermarkets', 'data_built',
'data_qlmc_2007_2012', 'data_csv')
# #######################
# LOAD DATA
# #######################
# LOAD DF QLMC
df_qlmc = pd.read_csv(os.path.join(path_built_csv, 'df_qlmc.csv'),
parse_dates = ['date'],
dayfirst = True,
infer_datetime_format = True,
encoding = 'utf-8')
# Fix Store_Chain for prelim stats des
ls_sc_drop = ['CARREFOUR CITY',
'CARREFOUR CONTACT',
'CARREFOUR PLANET',
'GEANT DISCOUNT',
'HYPER CHAMPION',
'INTERMARCHE HYPER',
'LECLERC EXPRESS',
'MARCHE U',
'U EXPRESS']
df_qlmc = df_qlmc[~df_qlmc['store_chain'].isin(ls_sc_drop)]
ls_sc_replace = [('CENTRE E. LECLERC', 'LECLERC'),
('CENTRE LECLERC', 'LECLERC'),
('E. LECLERC', 'LECLERC'),
('E.LECLERC', 'LECLERC'),
('SYSTEME U', 'SUPER U'),
('GEANT', 'GEANT CASINO'),
('CHAMPION', 'CARREFOUR MARKET'),
('INTERMARCHE SUPER', 'INTERMARCHE'),
('HYPER U', 'SUPER U')]
for sc_old, sc_new in ls_sc_replace:
df_qlmc.loc[df_qlmc['store_chain'] == sc_old,
'store_chain'] = sc_new
# #############################################
# PRICE DISTRIBUTION PER CHAIN FOR TOP PRODUCTS
# #############################################
PD = PriceDispersion()
ls_prod_cols = ['section', 'family', 'product']
store_chain = 'CARREFOUR' # 'CENTRE E.LECLERC'
nb_obs_min = 20 # Product must be observed at X stores at least
pct_min = 0.33
ls_loop_scs = ['AUCHAN',
'CARREFOUR',
'CARREFOUR MARKET',
'GEANT CASINO', # no CASINO
'CORA',
'INTERMARCHE',
'LECLERC',
'SUPER U']
ls_dict_df_desc = []
ls_dict_df_chain_product_stats = []
ls_dict_df_chain_store_desc = []
for per in range(13):
df_qlmc_per = df_qlmc[df_qlmc['period'] == per]
dict_ls_se_desc = {'nb_stores_by_prod' : [],
'freq_prods' : [],
'nb_prods_by_store' : [],
'no_ref' : [],
'freq_stores' : []}
dict_df_chain_product_stats = {}
dict_df_chain_store_desc = {}
print()
print(u'-'*80)
print('Stats on chain prices for period:', per)
for store_chain in ls_loop_scs:
print()
print(u'-'*60)
print(store_chain)
# Build df with product most common prices
df_sub = df_qlmc_per[df_qlmc_per['store_chain'] == store_chain]
# Make sure no duplicates at store level
ls_sub_dup_cols = ls_prod_cols + ['id_lsa']
df_sub_dup = df_sub[(df_sub.duplicated(ls_sub_dup_cols, take_last = True)) |\
(df_sub.duplicated(ls_sub_dup_cols, take_last = False))]
df_sub = df_sub.drop_duplicates(ls_sub_dup_cols)
# Build df with product most common prices
df_sub_products = df_sub[ls_prod_cols + ['price']]\
.groupby(ls_prod_cols)\
.agg([len,
'mean',
PD.kurtosis,
PD.skew,
PD.price_1,
PD.price_1_fq,
PD.price_2,
PD.price_2_fq])['price']
df_sub_products.columns = [col.replace('PD.', '') for col in df_sub_products.columns]
df_sub_products.rename(columns = {'len': 'nb_obs'}, inplace = True)
df_sub_products['price_12_fq'] =\
df_sub_products[['price_1_fq', 'price_2_fq']].sum(axis = 1)
# Pbm with kurtosis and skew: div by 0 (only one price)
# fix (a priori highly degenerate hence not normal)
df_sub_products.loc[df_sub_products['kurtosis'].abs() >= 1000,
'kurtosis'] = np.nan
df_sub_products.loc[df_sub_products['skew'].abs() >= 1000,
'skew'] = np.nan
df_sub_products.reset_index(drop = False, inplace = True)
# Keep only products observed at enough stores
df_enough_obs = df_sub_products[(df_sub_products['nb_obs'] >= nb_obs_min)]
df_ref_price = df_sub_products[(df_sub_products['nb_obs'] >= nb_obs_min) &\
(df_sub_products['price_1_fq'] >= pct_min)]
# Save chain product stats
dict_df_chain_product_stats[store_chain] = df_enough_obs
# Define ref prices and get stats from store viewpoint
if len(df_enough_obs) >= 100:
print()
print(u'Overview at product level')
print(df_enough_obs.describe().to_string())
df_enough_obs_desc = df_enough_obs.describe()
dict_ls_se_desc['nb_stores_by_prod'].append(df_enough_obs_desc['nb_obs'])
dict_ls_se_desc['freq_prods'].append(df_enough_obs_desc['price_1_fq'])
print()
print(u'Nb prod w/ >= {:d} obs: {:d}'.format(\
nb_obs_min,
len(df_enough_obs)))
print(u'Nb prod w/ >= {:d} obs and ref price (33%+): {:d} ({:.0f}%)'.format(\
nb_obs_min,
len(df_ref_price),
len(df_ref_price) / float(len(df_enough_obs)) * 100))
df_sub = pd.merge(df_sub,
df_enough_obs,
on = ls_prod_cols,
how = 'left')
# Build df stores accounting for match with ref prices
df_sub['ref_price'] = 'diff'
df_sub.loc[df_sub['price'] == df_sub['price_1'],
'ref_price'] = 'price_1'
df_sub.loc[(df_sub['price'] != df_sub['price_1']) &\
(df_sub['price'] == df_sub['price_2']),
'ref_price'] = 'price_2'
df_sub.loc[(df_sub['price_1_fq'] <= pct_min),
'ref_price'] = 'no_ref'
df_ref = pd.pivot_table(data = df_sub[['store', 'ref_price']],
index = 'store',
columns = 'ref_price',
aggfunc = len,
fill_value = 0).astype(int)
try:
df_ref_pct = df_ref.apply(lambda x: x / x.sum(), axis = 1)
df_ref_pct['nb_obs'] = df_ref.sum(axis = 1).astype(int)
if 'no_ref' not in df_ref_pct.columns:
df_ref_pct['no_ref'] = 0
# keep only stores with enough procucts
df_ref_pct = df_ref_pct[df_ref_pct['nb_obs'] >= 100]
print()
print(u'Overview at store level:')
print(df_ref_pct[['nb_obs',
'no_ref',
'diff',
'price_1',
'price_2']].describe())
df_ref_pct_desc = df_ref_pct.describe()
dict_ls_se_desc['nb_prods_by_store'].append(df_ref_pct_desc['nb_obs'])
dict_ls_se_desc['no_ref'].append(df_ref_pct_desc['no_ref'])
dict_ls_se_desc['freq_stores'].append(df_ref_pct_desc['price_1'])
# also save store stats for each chain
df_ref_pct.sort('price_1', ascending = False, inplace = True)
dict_df_chain_store_desc[store_chain] = df_ref_pct
except:
print()
print(u'Not enough data to display store ref prices')
for col in ['nb_prods_by_store', 'no_ref', 'freq_stores']:
dict_ls_se_desc[col].append(None)
else:
for col in ['nb_stores_by_prod', 'freq_prods',
'nb_prods_by_store', 'no_ref', 'freq_stores']:
dict_ls_se_desc[col].append(None)
dict_df_desc = {k: pd.concat(v, axis = 1, keys = ls_loop_scs)\
for k, v in dict_ls_se_desc.items()}
dict_ens_alt_replace = {'CENTRE E.LECLERC' : 'LECLERC',
'INTERMARCHE SUPER' : 'ITM SUP',
'INTERMARCHE HYPER' : 'ITM HYP',
'CARREFOUR MARKET' : 'CAR. MARKET',
'SIMPLY MARKET' : 'SIMPLY'}
dict_df_desc = {k: v.rename(columns = dict_ens_alt_replace)\
for k,v in dict_df_desc.items()}
ls_dict_df_desc.append(dict_df_desc)
ls_dict_df_chain_product_stats.append(dict_df_chain_product_stats)
ls_dict_df_chain_store_desc.append(dict_df_chain_store_desc)
ls_loop_scs[2] = 'CAR. MARKET' # adhoc fix..
# Freq prods across period for one chain
dict_su_chains = {}
for var in ['freq_prods', 'freq_stores']:
dict_su_chains[var] = {}
for store_chain in ls_loop_scs:
ls_se_temp = []
for per, dict_df_desc_per in enumerate(ls_dict_df_desc):
ls_se_temp.append(dict_df_desc_per[var].get(store_chain))
df_chain_temp = pd.concat(ls_se_temp,
axis = 1,
keys = range(13))
dict_su_chains[var][store_chain] = df_chain_temp
for var in ['freq_prods', 'freq_stores']:
print()
print(var)
for k,v in dict_su_chains[var].items():
print()
print(k)
print(v.to_string())
|
[
"echamayou@gmail.com"
] |
echamayou@gmail.com
|
5831ab56e5392aefd10441aafc00ae1ee126998f
|
0dff864a5ba850fbabde5dba0e84a13babf5681f
|
/202004/20200407_3.py
|
e1d7b3a9d6172cb16a68c87eb557b2f34ac748df
|
[] |
no_license
|
lvwencheng95/PythonProject
|
3c1d12bd95d55c2c7b9b8d6f6c0f60e5183df3e4
|
c5ce9a8efb4c8e612c87618fd62a714e73b0997d
|
refs/heads/master
| 2020-06-18T08:01:00.471654
| 2020-04-07T08:22:56
| 2020-04-07T08:22:56
| 196,224,528
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,153
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2020/4/7 15:26
# @Author : 52595
# @File : 20200407_3.py
# @Python Version : 3.7.4
# @Software: PyCharm
from matplotlib import pyplot
import matplotlib.pyplot as plt
# names = range(8, 21)
# names = [str(x) for x in list(names)]
names = ['201907', '201908', '201909', '201910', '201911', '201912', '202001', '202002', '202003']
x = range(len(names))
# 残影美食
y_food_drink = [350.68, 1124.1, 560.24, 355.8, 1011.14, 886.57, 611.39, 1000, 192.3]
# 日常消费
y_daily_consumption = [772.38, 738.4, 215.53, 1199.85, 509.67, 1185.64, 50.5, 72.52, 221.89]
plt.rcParams['font.sans-serif'] = ['SimHei'] # 显示中文标签
plt.rcParams['axes.unicode_minus'] = False
plt.plot(x, y_food_drink, marker='o', mec='r', mfc='w', label='餐饮美食')
plt.plot(x, y_daily_consumption, marker='*', ms=10, label='日常消费')
plt.legend() # 让图例生效
plt.xticks(x, names, rotation=1)
plt.margins(0)
plt.subplots_adjust(bottom=0.10)
plt.title("支出情况一览表", fontsize=24, color='black') # 折线图绘制描述描述信息
plt.xlabel('年月') # X轴标签
plt.ylabel("金额") # Y轴标签
plt.show()
|
[
"525955465@qq.com"
] |
525955465@qq.com
|
6897c6c368b85b0c4c7f74aea9a8d653a4b44983
|
1895135986a371820c53f75057be6e807a1eb4a4
|
/com/qa/selenium/Screen.py
|
53b1303dff26d3ce89cda1abb25af7704e07191d
|
[] |
no_license
|
zakiya113/Python_Selenium1
|
f2b81892ca833daf1ce42125fd7ce7cc457a244a
|
3d19ccab63101dba00b405de02b8e1d682894dbe
|
refs/heads/master
| 2020-04-30T06:34:36.535767
| 2019-03-20T04:55:39
| 2019-03-20T04:55:39
| 176,656,049
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 574
|
py
|
from selenium import webdriver
driver = webdriver.Chrome("C:\\Users\\minds9\\PycharmProjects\\Python_Selenium\\drivers\\chromedriver.exe")
driver.set_page_load_timeout(30)
driver.get("http://www.facebook.com")
driver.maximize_window()
driver.implicitly_wait(20)
driver.get_screenshot_as_file(".\\Screenshots\Facebook.png")
driver.find_element_by_id("email").send_keys("Selenium Webdriver")
driver.find_element_by_name("pass").send_keys("Python")
driver.find_element_by_id("loginbutton").click()
driver.get_screenshot_as_file(".\\Screenshots\\Facebook1.png")
driver.quit()
|
[
"zakiya113dodo@gmail.com"
] |
zakiya113dodo@gmail.com
|
6e0518b445b524e1d5be88b9cea4ee3fc99ba89c
|
21a351e7c8f63cbe5e4745474973673c3b6fd7de
|
/vast/environments/battle_rendering.py
|
9dc4a86fdaedc1985bcba880144fad50ed0c92a0
|
[
"MIT"
] |
permissive
|
yuanleirl/scalable-marl
|
f4b0dabb8dd669a3bf5bcd3d3e87878a369b3c0f
|
4fc89472807c9f96e61a152532fd8f5c0d566a55
|
refs/heads/main
| 2023-08-31T05:00:34.642822
| 2021-10-09T23:51:26
| 2021-10-09T23:51:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,914
|
py
|
import pygame
WHITE = (255, 255, 255)
RED = (255, 0, 0)
BLUE = (0, 0, 255)
class BattleViewer:
def __init__(self, width, height, cell_size=20, fps=30):
pygame.init()
self.cell_size = cell_size
self.width = cell_size*height
self.height = cell_size*width
self.clock = pygame.time.Clock()
self.fps = fps
pygame.display.set_caption("Battle Environment")
self.screen = pygame.display.set_mode((self.width, self.height))
pygame.event.set_blocked(pygame.MOUSEMOTION) # we do not need mouse movement events
def draw_state(self, env):
self.screen.fill(WHITE)
global_state_array = env.global_state().view(env.global_state_space.shape).detach().numpy()
self.draw_matrix(global_state_array[0], RED)
self.draw_matrix(global_state_array[1], BLUE)
pygame.display.flip()
self.clock.tick(self.fps)
return self.check_for_interrupt()
def draw_matrix(self, matrix, color):
for y, row in enumerate(matrix):
for x, val in enumerate(row):
if val:
pygame.draw.rect(
self.screen,
color,
pygame.Rect(
y * self.cell_size,
x * self.cell_size,
self.cell_size,
self.cell_size),
0)
def check_for_interrupt(self):
key_state = pygame.key.get_pressed()
for event in pygame.event.get():
if event.type == pygame.QUIT or key_state[pygame.K_ESCAPE]:
return True
return False
def close(self):
pygame.quit()
def render(env, viewer):
if viewer is None:
viewer = BattleViewer(env.width, env.height)
viewer.draw_state(env)
return viewer
|
[
"thomy.phan@ifi.lmu.de"
] |
thomy.phan@ifi.lmu.de
|
0f58171011c50cb9886c0f3b4139b15515c89952
|
f7669db87becadb8d995250878cab30a98bfd08d
|
/airhead/config.py
|
9f5249ab996cf0157949d56da08a651040b0e3b1
|
[] |
no_license
|
rand338/airhead
|
62eadfc65b08361226059c592cd3998923f483b3
|
deb81bcb1bd21ac1a422e6a3be975e1eb4b8d41a
|
refs/heads/master
| 2020-03-21T00:24:44.658982
| 2017-09-16T11:48:30
| 2017-09-16T11:48:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 613
|
py
|
import os.path
from configparser import ConfigParser
CONFIG_PATHS = ['.', 'conf/', '~/.config/airhead',
'/usr/local/etc/airhead', '/etc/airhead']
def get_config():
for p in CONFIG_PATHS:
path = os.path.join(p, 'airhead.ini')
if os.path.isfile(path):
c = ConfigParser()
c.read(path)
return c
else:
raise Exception("Config file 'airhead.ini' not found in any of {}."
.format(', '.join(CONFIG_PATHS)))
idle_media = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'media', 'idle.ogg')
|
[
"tancredi.orlando@gmail.com"
] |
tancredi.orlando@gmail.com
|
b35f33b1c5fa59c7037eb5ee5244aac3c84b8d05
|
51972de0831c0a5ff6c91c8db0e2432709faa187
|
/trial.py
|
41eb157d0f5c515ea98d9c875b6fca952309f45b
|
[] |
no_license
|
zdharmawan/anomaly_detection
|
9372e13c3f4a3e2922dd3037c68fc059b7cc9d6f
|
eadec8c851f526a9a7cab09bac03898b6dfe2b8b
|
refs/heads/master
| 2016-08-04T10:08:30.512526
| 2015-04-29T14:55:14
| 2015-04-29T14:55:14
| 34,800,118
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 283
|
py
|
__author__ = '310176470'
import lsanomaly
import numpy as np
X_train = np.array([[1.1],[1.3],[1.2],[1.05]])
X_test = np.array([[1.15],[3.6],[1.25]])
anomalymodel = lsanomaly.LSAnomaly()
anomalymodel.fit(X_train)
anomalymodel.predict(X_test)
# anomalymodel.predict_proba(X_test)
|
[
"zulfikar.dharmawan@philips.com"
] |
zulfikar.dharmawan@philips.com
|
c704dc8239367ec34f87e6ed7a1b9b4cab9f82cf
|
99c32a9df1e0e8c980b5235395eb957aaa898270
|
/insert-sort.py
|
abaa1e2393793c2acd834e64bfffec992aa297b7
|
[] |
no_license
|
syswipe/IntroToAlgorithms
|
3df7b5ba032df461ac01a76956b9c723f7f6dec7
|
f1465d33d93d82812093218bb28b6d2b5e2df5bc
|
refs/heads/master
| 2021-01-10T03:15:20.548139
| 2016-03-05T12:06:31
| 2016-03-05T12:06:31
| 53,199,518
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 272
|
py
|
import random;
import time;
A = [random.randint(0,1000) for r in range(5000)]
start = time.time()
for j in range(1,len(A)):
key = A[j]
i = j-1
while i >= 0 and A[i] > key:
A[i+1] = A[i]
i = i-1
A[i+1] = key
print(time.time()-start)
|
[
"Andriy.Tovstik@sibis.com.ua"
] |
Andriy.Tovstik@sibis.com.ua
|
2adedf278e328a544301becf3a184d5bb2ccbfa4
|
8d0651b62a23e110d2e6f27706e965bfe6526629
|
/email_service/apps/api/urls.py
|
a88c97b3812bb84b94d273341043a8fa0e5ed1dc
|
[] |
no_license
|
pavitrabhalla/GoMail
|
4ae5c850c824910b7fbe99f61e8a2683d90f5fa3
|
847d1d1859d94c727ecc45e65e71f459a11facec
|
refs/heads/master
| 2020-05-16T21:50:13.911849
| 2014-12-03T20:25:25
| 2014-12-03T20:25:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 250
|
py
|
from django.conf.urls import patterns, url, include
from tastypie.api import Api
from api import resources
v1_api = Api(api_name='v1')
v1_api.register(resources.EmailResource())
urlpatterns = patterns(
'',
url(r'', include(v1_api.urls)),
)
|
[
"pavitrabhalla@gmail.com"
] |
pavitrabhalla@gmail.com
|
2f2795d2f4ef0b35986e39c56b616b06e48b7a83
|
ca26f30fd3943571667fe867303257023ae792e5
|
/backend/config/settings/prod.py
|
18b38b4c270d57c76a384b7ffd4039885cc2fb5f
|
[
"MIT"
] |
permissive
|
yegorLitvinov/costcontrol
|
d6ad2cadf662980feacbb50a9e532b8c73a9198a
|
f3c9316ea0c9e7f2509087dabdbe7f1cf5fe2d80
|
refs/heads/master
| 2021-06-25T07:05:41.866976
| 2018-11-11T16:36:14
| 2018-11-11T16:36:14
| 107,854,768
| 0
| 0
|
MIT
| 2020-02-11T21:47:00
| 2017-10-22T10:01:53
|
Python
|
UTF-8
|
Python
| false
| false
| 175
|
py
|
from .base import * # noqa
DEBUG = False
ALLOWED_HOSTS = [".tvgun.ga"]
REST_FRAMEWORK["DEFAULT_RENDERER_CLASSES"] = ( # noqa
"rest_framework.renderers.JSONRenderer",
)
|
[
"yegor.litvinov@yandex.ru"
] |
yegor.litvinov@yandex.ru
|
4c3c4653d161070cd11d9e357edc491a3cccf5d3
|
6149d8235e8017d7ed80ed4c6efad2d6dc872fa6
|
/core/teacher.py
|
0a352da3aa9c8ad897ddbeeb1b40fdfa87c0d93e
|
[] |
no_license
|
MrChenxb/CourseSystems
|
5d76a2e1439d2a16fdc63565445261df3395fa03
|
3f94032ea32b1aa1aa3d1781fdc437c51026088c
|
refs/heads/master
| 2022-12-26T14:24:36.179881
| 2020-09-26T14:55:34
| 2020-09-26T14:55:34
| 200,806,010
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,283
|
py
|
# coding:utf-8
"""
教师视图
"""
from lib import common
from interface import common_interface
from interface import teacher_interface
teacher_info = {
'user': None
}
# 1.教师登录
def login():
while True:
username = input('请输入用户名==>').strip()
password = input('请输入密码==>').strip()
flag,msg = common_interface.login_interface(username,password,user_type='teacher')
if flag:
print(msg)
teacher_info['user'] = username
break
else:
print(msg)
# 2.教师查看教授课程
@common.auth('teacher')
def check_course():
flag,course_list = teacher_interface.check_course_interface(teacher_info['user'])
if flag:
print(course_list)
else:
print(course_list)
# 3.教师选择教授课程
@common.auth('teacher')
def choice_course():
while True:
flag,school_list = common_interface.get_all_school_interface()
if not flag:
print(school_list)
break
for index,school_name in enumerate(school_list):
print(f'学校编号为:[{index}], 学校名称为:[{school_name}]')
choice = input('请输入选择的学校编号==>').strip()
if not choice.isdigit():
print('输入有误!')
continue
choice = int(choice)
if choice not in range(len(school_list)):
print('输入有误!')
continue
school_name = school_list[choice]
flag2,course_list = common_interface.get_course_in_school_interface(school_name)
if not flag2:
print(course_list)
break
for index2,course_name in enumerate(course_list):
print(f'课程编号为:[{index2}], 课程名称为:[{course_name}]')
choice2 = input('请输入选择的课程编号==>').strip()
if not choice2.isdigit():
print('输入有误!')
continue
choice2 = int(choice2)
if choice2 not in range(len(course_list)):
print('输入课程编号有误!')
continue
course_name = course_list[choice2]
flag3,msg = teacher_interface.add_course_interface(course_name,teacher_info['user'])
if flag3:
print(msg)
break
else:
print(msg)
# 4.教师查看课程下学生
@common.auth('teacher')
def check_stu_from_course():
while True:
flag,course_list = teacher_interface.check_course_interface(teacher_info['user'])
if not flag:
print(course_list)
break
for index,course_name in enumerate(course_list):
print(f'课程编号为:[{index}], 课程名称为:[{course_name}]')
choice = input('请输入选择的课程编号==>').strip()
if not choice.isdigit():
print('输入有误!')
continue
choice = int(choice)
if choice not in range(len(course_list)):
print('输入课程编号有误!')
continue
course_name = course_list[choice]
flag, student_list = teacher_interface.get_student_interface(course_name, teacher_info['user'])
if flag:
print(student_list)
break
else:
print(student_list)
break
# 5.教师修改学生分数
@common.auth('teacher')
def change_score_from_student():
"""
# 1.先获取老师下所有的课程并选择
# 2.获取课程下所有的学生,并选择修改的学生
# 3.调用修改学生分数接口
"""
while True:
flag,course_list = teacher_interface.check_course_interface(teacher_info['user'])
if not flag:
print(course_list)
break
for index,course_name in enumerate(course_list):
print(f'课程编号为:[{index}], 课程名称为:[{course_name}]')
choice = input('请输入选择的课程编号==>').strip()
if not choice.isdigit():
print('输入有误!')
continue
choice = int(choice)
if choice not in range(len(course_list)):
print('输入课程编号有误!')
continue
course_name = course_list[choice]
flag2, student_list = teacher_interface.get_student_interface(course_name,teacher_info['user'])
if not flag2:
print(student_list)
break
for index,stu_name in enumerate(student_list):
print(f'学生编号为:[{index}], 学校姓名为:[{stu_name}]')
choice_stu = input('请输入学生编号==>').strip()
if not choice_stu.isdigit():
print('输入有误!')
continue
choice_stu = int(choice_stu)
if choice_stu not in range(len(student_list)):
print('输入学生编号有误!')
continue
stu_name = student_list[choice_stu]
score = input('请输入修改的成绩==>').strip()
if not score.isdigit():
print('输入成绩有误!')
continue
score = int(score)
flag3,msg = teacher_interface.change_score_interface(
course_name, stu_name, score, teacher_info['user']
)
if flag3:
print(msg)
break
pass
teacher_func = {
'0': ['退出', None],
'1': ['登录', login],
'2': ['查看教授课程', check_course],
'3': ['选择教授课程', choice_course],
'4': ['查看课程下学生', check_stu_from_course],
'5': ['修改学生分数', change_score_from_student],
}
def teacher_view():
while True:
print(' 欢迎来到教师视图 '.center(30,'='))
for index,func in teacher_func.items():
print('[%s] %s' %(index,func[0]))
print(' end '.center(30,'='))
choice = input('请输入功能编号==>').strip()
if choice not in teacher_func:
print('请输入正确的功能编号')
continue
if choice == '0':
break
teacher_func[choice][1]()
|
[
"noreply@github.com"
] |
MrChenxb.noreply@github.com
|
65cfea757b79f26ab21b2421104ae09aafaad1b2
|
cc552092965db97a3e8167983e835394c5605290
|
/models/user.py
|
d6b15e6b08776aae4713f6f2a6cf898f246cb9c6
|
[] |
no_license
|
dinkdinkdink/bbs_server
|
709ec2609a1df6abd39b45bf627cc88557be0895
|
0e4454a27296e9d4bfbe658c4c0560dfa3b65ab7
|
refs/heads/master
| 2020-07-21T08:54:31.965376
| 2020-03-06T02:17:57
| 2020-03-06T02:17:57
| 206,784,847
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,537
|
py
|
import hashlib
from sqlalchemy import Column, String, Text
import config
import secret
from models.base_model import SQLMixin, db
class User(SQLMixin, db.Model):
__tablename__ = 'User'
"""
User 是一个保存用户数据的 model
现在只有两个属性 username 和 password
"""
username = Column(String(50), nullable=False)
password = Column(String(100), nullable=False)
image = Column(String(100), nullable=False, default='/images/3.jpg')
email = Column(String(50), nullable=False, default=config.test_mail)
@staticmethod
def salted_password(password, salt='$!@><?>HUI&DWQa`'):
salted = hashlib.sha256((password + salt).encode('ascii')).hexdigest()
return salted
@classmethod
def register(cls, form):
name = form.get('username', '')
print('register', form)
if len(name) > 2 and User.one(username=name) is None:
# 错误,只应该 commit 一次
# u = User.new(form)
# u.password = u.salted_password(pwd)
# User.session.add(u)
# User.session.commit()
form['password'] = User.salted_password(form['password'])
u = User.new(form)
return u
else:
return None
@classmethod
def validate_login(cls, form):
query = dict(
username=form['username'],
password=User.salted_password(form['password']),
)
print('validate_login', form, query)
return User.one(**query)
|
[
"361046367@qq.com"
] |
361046367@qq.com
|
05039b0c5332b0128f70966e5b307b310bc1377b
|
c36b9b30dc31b0008580f18dd6ec9fa76d92d8eb
|
/Data_Science_Python/Applied_Text_Mining_in_Python/Assignment4.py
|
945f8a082cb6422f303bcf37b4214f43e82d013d
|
[
"MIT"
] |
permissive
|
SebastVR/test
|
027f81320c309eb6e30c5d7098d95cacf9d755b9
|
b86bd5457347a200d0920213c6a2eccbc3915696
|
refs/heads/master
| 2022-11-08T23:18:57.770282
| 2020-06-29T22:31:17
| 2020-06-29T22:31:17
| 264,340,257
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,635
|
py
|
'''Assignment 4 - Document Similarity & Topic Modelling
Part 1 - Document Similarity
For the first part of this assignment, you will complete the functions doc_to_synsets and similarity_score
which will be used by document_path_similarity to find the path similarity between two documents.
The following functions are provided:
convert_tag: converts the tag given by nltk.pos_tag to a tag used by wordnet.synsets. You will need to use
this function in doc_to_synsets.
document_path_similarity: computes the symmetrical path similarity between two documents by finding the
synsets in each document using doc_to_synsets, then computing similarities using similarity_score.
You will need to finish writing the following functions:
doc_to_synsets: returns a list of synsets in document. This function should first tokenize and part of
speech tag the document using nltk.word_tokenize and nltk.pos_tag. Then it should find each tokens
corresponding synset using wn.synsets(token, wordnet_tag). The first synset match should be used.
If there is no match, that token is skipped.
similarity_score: returns the normalized similarity score of a list of synsets (s1) onto a second list of
synsets (s2). For each synset in s1, find the synset in s2 with the largest similarity value. Sum all of
the largest similarity values together and normalize this value by dividing it by the number of largest
similarity values found. Be careful with data types, which should be floats. Missing values should be
ignored.
Once doc_to_synsets and similarity_score have been completed, submit to the autograder which will run
test_document_path_similarity to test that these functions are running correctly.
Do not modify the functions convert_tag, document_path_similarity, and test_document_path_similarity.'''
import numpy as np
import nltk
from nltk.corpus import wordnet as wn
import pandas as pd
def convert_tag(tag):
"""Convert the tag given by nltk.pos_tag to the tag used by wordnet.synsets"""
tag_dict = {'N': 'n', 'J': 'a', 'R': 'r', 'V': 'v'}
try:
return tag_dict[tag[0]]
except KeyError:
return None
def doc_to_synsets(doc):
"""
Returns a list of synsets in document.
Tokenizes and tags the words in the document doc.
Then finds the first synset for each word/tag combination.
If a synset is not found for that combination it is skipped.
Args:
doc: string to be converted
Returns:
list of synsets
Example:
doc_to_synsets('Fish are nvqjp friends.')
Out: [Synset('fish.n.01'), Synset('be.v.01'), Synset('friend.n.01')]
"""
token = nltk.word_tokenize(doc)
word_tag = nltk.pos_tag(token)
synsets = []
for word, tag in word_tag:
tag = convert_tag(tag)
synset = wn.synsets(word, pos=tag)
if len(synset) != 0:
synsets.append(synset[0])
else:
continue
return synsets
def similarity_score(s1, s2):
"""
Calculate the normalized similarity score of s1 onto s2
For each synset in s1, finds the synset in s2 with the largest similarity value.
Sum of all of the largest similarity values and normalize this value by dividing it by the
number of largest similarity values found.
Args:
s1, s2: list of synsets from doc_to_synsets
Returns:
normalized similarity score of s1 onto s2
Example:
synsets1 = doc_to_synsets('I like cats')
synsets2 = doc_to_synsets('I like dogs')
similarity_score(synsets1, synsets2)
Out: 0.73333333333333339
"""
largest_similarity_values = []
for syn1 in s1:
similarity_values =[]
for syn2 in s2:
sim_val = wn.path_similarity(syn1, syn2)
if sim_val is not None:
similarity_values.append(sim_val)
if len(similarity_values) != 0:
largest_similarity_values.append(max(similarity_values))
return sum(largest_similarity_values) / len(largest_similarity_values)
def document_path_similarity(doc1, doc2):
"""Finds the symmetrical similarity between doc1 and doc2"""
synsets1 = doc_to_synsets(doc1)
synsets2 = doc_to_synsets(doc2)
return (similarity_score(synsets1, synsets2) + similarity_score(synsets2, synsets1)) / 2
#-----------------------------------------------------------------------
'''test_document_path_similarity
Use this function to check if doc_to_synsets and similarity_score are correct.
This function should return the similarity score as a float.'''
#---------- ANSWER CODE ----------
def test_document_path_similarity():
doc1 = 'This is a function to test document_path_similarity.'
doc2 = 'Use this function to see if your code in doc_to_synsets \
and similarity_score is correct!'
return document_path_similarity(doc1, doc2)
test_document_path_similarity()
#---------- ANSWER ----------
0.554265873015873
#-----------------------------------------------------------------------
'''paraphrases is a DataFrame which contains the following columns: Quality, D1, and D2.
Quality is an indicator variable which indicates if the two documents D1 and D2 are paraphrases of one
another (1 for paraphrase, 0 for not paraphrase).'''
# Use this dataframe for questions most_similar_docs and label_accuracy
paraphrases = pd.read_csv('paraphrases.csv')
paraphrases.head()
Quality D1 D2
0 1 Ms Stewart, the chief executive, was not expec... Ms Stewart, 61, its chief executive officer an...
1 1 After more than two years' detention under the... After more than two years in detention by the ...
2 1 "It still remains to be seen whether the reven... "It remains to be seen whether the revenue rec...
3 0 And it's going to be a wild ride," said Allan ... Now the rest is just mechanical," said Allan H...
4 1 The cards are issued by Mexico's consulates to... The card is issued by Mexico's consulates to i...
'''most_similar_docs
Using document_path_similarity, find the pair of documents in paraphrases which has the maximum similarity
score.
This function should return a tuple (D1, D2, similarity_score)'''
#---------- ANSWER CODE ----------
def most_similar_docs():
# true_paraphrases = paraphrases.loc[paraphrases['Quality'] == 1]
temp = paraphrases.copy()
temp['similarity'] = temp.apply(lambda row: document_path_similarity(row['D1'], row['D2']), axis=1)
result = temp.loc[temp['similarity'] == temp['similarity'].max()].squeeze().values
return result[1], result[2], result[3]
most_similar_docs()
#---------- ANSWER ----------
('"Indeed, Iran should be put on notice that efforts to try to remake Iraq in their image will be aggressively put down," he said.',
'"Iran should be on notice that attempts to remake Iraq in Iran\'s image will be aggressively put down," he said.\n',
0.9753086419753086)
#-----------------------------------------------------------------------
'''label_accuracy
Provide labels for the twenty pairs of documents by computing the similarity for each pair using
document_path_similarity. Let the classifier rule be that if the score is greater than 0.75, label is
paraphrase (1), else label is not paraphrase (0). Report accuracy of the classifier using scikit-learn's
accuracy_score.
This function should return a float.'''
#---------- ANSWER CODE ----------
def label_accuracy():
from sklearn.metrics import accuracy_score
def get_label(row):
if row['similarity'] > 0.75:
row['label'] = 1
else:
row['label'] = 0
return row
temp = paraphrases.copy()
temp['similarity'] = temp.apply(lambda row: document_path_similarity(row['D1'], row['D2']), axis=1)
temp = temp.apply(get_label, axis=1)
score = accuracy_score(temp['Quality'], temp['label'])
return score
label_accuracy()
#---------- ANSWER ----------
0.8
#-----------------------------------------------------------------------
'''Part 2 - Topic Modelling
For the second part of this assignment, you will use Gensim's LDA (Latent Dirichlet Allocation) model to
model topics in newsgroup_data. You will first need to finish the code in the cell below by using
gensim.models.ldamodel.LdaModel constructor to estimate LDA model parameters on the corpus, and save to the
variable ldamodel. Extract 10 topics using corpus and id_map, and with passes=25 and random_state=34.'''
import pickle
import gensim
from sklearn.feature_extraction.text import CountVectorizer
#from gensim import corpora, models, similatities
# Load the list of documents
with open('newsgroups', 'rb') as f:
newsgroup_data = pickle.load(f)
# Use CountVectorizor to find three letter tokens, remove stop_words,
# remove tokens that don't appear in at least 20 documents,
# remove tokens that appear in more than 20% of the documents
vect = CountVectorizer(min_df=20, max_df=0.2, stop_words='english',
token_pattern='(?u)\\b\\w\\w\\w+\\b')
# Fit and transform
X = vect.fit_transform(newsgroup_data)
# Convert sparse matrix to gensim corpus.
corpus = gensim.matutils.Sparse2Corpus(X, documents_columns=False)
# Mapping from word IDs to words (To be used in LdaModel's id2word parameter)
id_map = dict((v, k) for k, v in vect.vocabulary_.items())
#-----------------------------------------------------------------------
# Use the gensim.models.ldamodel.LdaModel constructor to estimate
# LDA model parameters on the corpus, and save to the variable `ldamodel`
# Your code here:
ldamodel = gensim.models.ldamodel.LdaModel(corpus=corpus, num_topics=10, id2word=id_map, passes=25,
random_state=34)
lda_topics
Using ldamodel, find a list of the 10 topics and the most significant 10 words in each topic. This should be structured as a list of 10 tuples where each tuple takes on the form:
(9, '0.068*"space" + 0.036*"nasa" + 0.021*"science" + 0.020*"edu" + 0.019*"data" + 0.017*"shuttle" + 0.015*"launch" + 0.015*"available" + 0.014*"center" + 0.014*"sci"')
for example.
This function should return a list of tuples.
#---------- ANSWER CODE ----------
def lda_topics():
return ldamodel.print_topics()
lda_topics()
#---------- ANSWER ----------
'''
[(0,
'0.056*"edu" + 0.043*"com" + 0.033*"thanks" + 0.022*"mail" + 0.021*"know" + 0.020*"does" + 0.014*"info" + 0.012*"monitor" + 0.010*"looking" + 0.010*"don"'),
(1,
'0.024*"ground" + 0.018*"current" + 0.018*"just" + 0.013*"want" + 0.013*"use" + 0.011*"using" + 0.011*"used" + 0.010*"power" + 0.010*"speed" + 0.010*"output"'),
(2,
'0.061*"drive" + 0.042*"disk" + 0.033*"scsi" + 0.030*"drives" + 0.028*"hard" + 0.028*"controller" + 0.027*"card" + 0.020*"rom" + 0.018*"floppy" + 0.017*"bus"'),
(3,
'0.023*"time" + 0.015*"atheism" + 0.014*"list" + 0.013*"left" + 0.012*"alt" + 0.012*"faq" + 0.012*"probably" + 0.011*"know" + 0.011*"send" + 0.010*"months"'),
(4,
'0.025*"car" + 0.016*"just" + 0.014*"don" + 0.014*"bike" + 0.012*"good" + 0.011*"new" + 0.011*"think" + 0.010*"year" + 0.010*"cars" + 0.010*"time"'),
(5,
'0.030*"game" + 0.027*"team" + 0.023*"year" + 0.017*"games" + 0.016*"play" + 0.012*"season" + 0.012*"players" + 0.012*"win" + 0.011*"hockey" + 0.011*"good"'),
(6,
'0.017*"information" + 0.014*"help" + 0.014*"medical" + 0.012*"new" + 0.012*"use" + 0.012*"000" + 0.012*"research" + 0.011*"university" + 0.010*"number" + 0.010*"program"'),
(7,
'0.022*"don" + 0.021*"people" + 0.018*"think" + 0.017*"just" + 0.012*"say" + 0.011*"know" + 0.011*"does" + 0.011*"good" + 0.010*"god" + 0.009*"way"'),
(8,
'0.034*"use" + 0.023*"apple" + 0.020*"power" + 0.016*"time" + 0.015*"data" + 0.015*"software" + 0.012*"pin" + 0.012*"memory" + 0.012*"simms" + 0.011*"port"'),
(9,
'0.068*"space" + 0.036*"nasa" + 0.021*"science" + 0.020*"edu" + 0.019*"data" + 0.017*"shuttle" + 0.015*"launch" + 0.015*"available" + 0.014*"center" + 0.014*"sci"')]'''
#-----------------------------------------------------------------------
'''topic_distribution
For the new document new_doc, find the topic distribution. Remember to use vect.transform on the the new
doc, and Sparse2Corpus to convert the sparse matrix to gensim corpus.
This function should return a list of tuples, where each tuple is (#topic, probability)'''
new_doc = ["\n\nIt's my understanding that the freezing will start to occur because \
of the\ngrowing distance of Pluto and Charon from the Sun, due to it's\nelliptical orbit. \
It is not due to shadowing effects. \n\n\nPluto can shadow Charon, and vice-versa.\n\nGeorge \
Krumins\n-- "]
#---------- ANSWER CODE ----------
def topic_distribution():
new_doc_vectorized = vect.transform(new_doc)
doc2corpus = gensim.matutils.Sparse2Corpus(new_doc_vectorized, documents_columns=False)
return list(ldamodel.get_document_topics(doc2corpus))[0]
topic_distribution()
#---------- ANSWER ----------
'''
[(0, 0.020003108),
(1, 0.020003324),
(2, 0.020001281),
(3, 0.49674758),
(4, 0.020004038),
(5, 0.020004129),
(6, 0.020002972),
(7, 0.020002645),
(8, 0.020003129),
(9, 0.34322783)]'''
#-----------------------------------------------------------------------
'''topic_names
From the list of the following given topics, assign topic names to the topics you found. If none of these
names best matches the topics you found, create a new 1-3 word "title" for the topic.
Topics: Health, Science, Automobiles, Politics, Government, Travel, Computers & IT, Sports, Business,
Society & Lifestyle, Religion, Education.
This function should return a list of 10 strings.'''
#---------- ANSWER CODE ----------
def topic_names():
topic_names = ['Health', 'Automobiles', 'Government', 'Travel', 'Computers & IT', 'Sports', 'Business', 'Society & Lifestyle', 'Region', 'Education']
topics = lda_topics()
results = []
for _, dis in topics:
print(dis)
similarity = []
for topic in topic_names:
similarity.append(document_path_similarity(dis, topic))
best_topic = sorted(zip(similarity, topic_names))[-1][1]
results.append(best_topic)
return ['Education', 'Business', 'Automobiles', 'Religion', 'Travel', 'Sports', 'Health', 'Society & Lifestyle', 'Computers & IT', 'Science']
topic_names()
#---------- ANSWER ----------
['Education',
'Business',
'Automobiles',
'Religion',
'Travel',
'Sports',
'Health',
'Society & Lifestyle',
'Computers & IT',
'Science']
#-----------------------------------------------------------------------
|
[
"sebastianvr92@hotmail.com"
] |
sebastianvr92@hotmail.com
|
6c6be5bb613ab1ba748008cf64ecb99a72b2ea86
|
814fd0bea5bc063a4e34ebdd0a5597c9ff67532b
|
/build/android/pylib/utils/mock_calls_test.py
|
1b474afd1ea1707910b1716170ec0f65c1c87e17
|
[
"BSD-3-Clause"
] |
permissive
|
rzr/chromium-crosswalk
|
1b22208ff556d69c009ad292bc17dca3fe15c493
|
d391344809adf7b4f39764ac0e15c378169b805f
|
refs/heads/master
| 2021-01-21T09:11:07.316526
| 2015-02-16T11:52:21
| 2015-02-16T11:52:21
| 38,887,985
| 0
| 0
|
NOASSERTION
| 2019-08-07T21:59:20
| 2015-07-10T15:35:50
|
C++
|
UTF-8
|
Python
| false
| false
| 5,078
|
py
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Unit tests for the contents of mock_calls.py.
"""
import logging
import os
import sys
import unittest
from pylib import constants
from pylib.utils import mock_calls
sys.path.append(os.path.join(
constants.DIR_SOURCE_ROOT, 'third_party', 'pymock'))
import mock # pylint: disable=F0401
class _DummyAdb(object):
def __str__(self):
return '0123456789abcdef'
def Push(self, host_path, device_path):
logging.debug('(device %s) pushing %r to %r', self, host_path, device_path)
def IsOnline(self):
logging.debug('(device %s) checking device online', self)
return True
def Shell(self, cmd):
logging.debug('(device %s) running command %r', self, cmd)
return "nice output\n"
def Reboot(self):
logging.debug('(device %s) rebooted!', self)
class TestCaseWithAssertCallsTest(mock_calls.TestCase):
def setUp(self):
self.adb = _DummyAdb()
def ShellError(self):
def action(cmd):
raise ValueError('(device %s) command %r is not nice' % (self.adb, cmd))
return action
def get_answer(self):
logging.debug("called 'get_answer' of %r object", self)
return 42
def echo(self, thing):
logging.debug("called 'echo' of %r object", self)
return thing
def testCallTarget_succeds(self):
self.assertEquals(self.adb.Shell,
self.call_target(self.call.adb.Shell))
def testCallTarget_failsExternal(self):
with self.assertRaises(ValueError):
self.call_target(mock.call.sys.getcwd)
def testCallTarget_failsUnknownAttribute(self):
with self.assertRaises(AttributeError):
self.call_target(self.call.adb.Run)
def testCallTarget_failsIntermediateCalls(self):
with self.assertRaises(AttributeError):
self.call_target(self.call.adb.RunShell('cmd').append)
def testPatchCall_method(self):
self.assertEquals(42, self.get_answer())
with self.patch_call(self.call.get_answer, return_value=123):
self.assertEquals(123, self.get_answer())
self.assertEquals(42, self.get_answer())
def testPatchCall_attribute_method(self):
with self.patch_call(self.call.adb.Shell, return_value='hello'):
self.assertEquals('hello', self.adb.Shell('echo hello'))
def testPatchCall_global(self):
with self.patch_call(mock.call.os.getcwd, return_value='/some/path'):
self.assertEquals('/some/path', os.getcwd())
def testPatchCall_withSideEffect(self):
with self.patch_call(self.call.adb.Shell, side_effect=ValueError):
with self.assertRaises(ValueError):
self.adb.Shell('echo hello')
def testAssertCalls_succeeds_simple(self):
self.assertEquals(42, self.get_answer())
with self.assertCall(self.call.get_answer(), 123):
self.assertEquals(123, self.get_answer())
self.assertEquals(42, self.get_answer())
def testAssertCalls_succeeds_multiple(self):
with self.assertCalls(
(mock.call.os.getcwd(), '/some/path'),
(self.call.echo('hello'), 'hello'),
(self.call.get_answer(), 11),
self.call.adb.Push('this_file', 'that_file'),
(self.call.get_answer(), 12)):
self.assertEquals(os.getcwd(), '/some/path')
self.assertEquals('hello', self.echo('hello'))
self.assertEquals(11, self.get_answer())
self.adb.Push('this_file', 'that_file')
self.assertEquals(12, self.get_answer())
def testAsserCalls_succeeds_withAction(self):
with self.assertCall(
self.call.adb.Shell('echo hello'), self.ShellError()):
with self.assertRaises(ValueError):
self.adb.Shell('echo hello')
def testAssertCalls_fails_tooManyCalls(self):
with self.assertRaises(AssertionError):
with self.assertCalls(self.call.adb.IsOnline()):
self.adb.IsOnline()
self.adb.IsOnline()
def testAssertCalls_fails_tooFewCalls(self):
with self.assertRaises(AssertionError):
with self.assertCalls(self.call.adb.IsOnline()):
pass
def testAssertCalls_succeeds_extraCalls(self):
# we are not watching Reboot, so the assertion succeeds
with self.assertCalls(self.call.adb.IsOnline()):
self.adb.IsOnline()
self.adb.Reboot()
def testAssertCalls_fails_extraCalls(self):
self.watchCalls([self.call.adb.Reboot])
# this time we are also watching Reboot, so the assertion fails
with self.assertRaises(AssertionError):
with self.assertCalls(self.call.adb.IsOnline()):
self.adb.IsOnline()
self.adb.Reboot()
def testAssertCalls_succeeds_NoCalls(self):
self.watchMethodCalls(self.call.adb) # we are watching all adb methods
with self.assertCalls():
pass
def testAssertCalls_fails_NoCalls(self):
self.watchMethodCalls(self.call.adb)
with self.assertRaises(AssertionError):
with self.assertCalls():
self.adb.IsOnline()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main(verbosity=2)
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
178ebfab22130821e12bb8c9157a0436f54acf48
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/109/usersdata/172/63370/submittedfiles/av2_p3_civil.py
|
4c68528d682769ee8dc9310c3e74e069e24ca4aa
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 498
|
py
|
# -*- coding: utf-8 -*-
import numpy as np
def somal(l,i):
soma=0
for j in range(0,l.shape[1],1):
soma=soma+l[i,j]
return (soma)
def somac(l,j):
soma=0
for i in range(0,l.shape[0],1):
soma=soma+l[i,j]
return (soma)
n=int(input('Tamanho: '))
g=int(input('Pl: '))
h=int(input('Pc: '))
l=np.zeros((n,n))
for i in range(0,l.shape[0],1):
for j in range(0,l.shape[1],1):
l[i,j]= int(input(' peso: '))
fim=somal(l,g)+somac(l,h)-(2*(l[g,h]))
print(fim)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
ed4429de5fa5b3ff588985d9a1e290aecd09db58
|
94e5a9e157d3520374d95c43fe6fec97f1fc3c9b
|
/Codeforces/645-2/D.py
|
1cdb17b8cded5242da9e41bde63c9668ab30c4c2
|
[
"MIT"
] |
permissive
|
dipta007/Competitive-Programming
|
0127c550ad523884a84eb3ea333d08de8b4ba528
|
998d47f08984703c5b415b98365ddbc84ad289c4
|
refs/heads/master
| 2021-01-21T14:06:40.082553
| 2020-07-06T17:40:46
| 2020-07-06T17:40:46
| 54,851,014
| 8
| 4
| null | 2020-05-02T13:14:41
| 2016-03-27T22:30:02
|
C++
|
UTF-8
|
Python
| false
| false
| 3,459
|
py
|
""" Python 3 compatibility tools. """
from __future__ import division, print_function
import itertools
import sys
import os
from io import BytesIO, IOBase
if sys.version_info[0] < 3:
input = raw_input
range = xrange
filter = itertools.ifilter
map = itertools.imap
zip = itertools.izip
def is_it_local():
script_dir = str(os.getcwd()).split('/')
username = "dipta007"
return username in script_dir
def READ(fileName):
if is_it_local():
sys.stdin = open(f'./{fileName}', 'r')
# region fastio
BUFSIZE = 8192
class FastIO(IOBase):
newlines = 0
def __init__(self, file):
self._fd = file.fileno()
self.buffer = BytesIO()
self.writable = "x" in file.mode or "r" not in file.mode
self.write = self.buffer.write if self.writable else None
def read(self):
while True:
b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
if not b:
break
ptr = self.buffer.tell()
self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
self.newlines = 0
return self.buffer.read()
def readline(self):
while self.newlines == 0:
b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
self.newlines = b.count(b"\n") + (not b)
ptr = self.buffer.tell()
self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
self.newlines -= 1
return self.buffer.readline()
def flush(self):
if self.writable:
os.write(self._fd, self.buffer.getvalue())
self.buffer.truncate(0), self.buffer.seek(0)
class IOWrapper(IOBase):
def __init__(self, file):
self.buffer = FastIO(file)
self.flush = self.buffer.flush
self.writable = self.buffer.writable
self.write = lambda s: self.buffer.write(s.encode("ascii"))
self.read = lambda: self.buffer.read().decode("ascii")
self.readline = lambda: self.buffer.readline().decode("ascii")
if not is_it_local():
sys.stdin, sys.stdout = IOWrapper(sys.stdin), IOWrapper(sys.stdout)
input = lambda: sys.stdin.readline().rstrip("\r\n")
# endregion
def input1(type=int):
return type(input())
def input2(type=int):
[a, b] = list(map(type, input().split()))
return a, b
def input3(type=int):
[a, b, c] = list(map(type, input().split()))
return a, b, c
def input_array(type=int):
return list(map(type, input().split()))
def input_string():
s = input()
return list(s)
##############################################################
n, m, ar = 0, 0, []
month_day = []
cum_month_day = []
cum_days = []
def end_here(ind):
low = 0
high = ind
while low <= high:
mid = (low + high) // 2
if cum_days[ind] - cum_days[mid] >= m:
res = mid
low = mid + 1
else:
high = mid - 1
now = cum_days[ind] - cum_days[res]
besi = now - m
# print(ind, res, now, besi)
return (cum_month_day[ind] - cum_month_day[res] - (besi * (besi + 1)) // 2)
def main():
global n, m, ar, month_day, cum_days, cum_month_day
n, m = input2()
ar = input_array()
month_day = [(n * (n+1)) // 2 for n in ar]
month_day += month_day
ar += ar
cum_month_day = [0]
cum_days = [0]
for i in range(n+n):
cum_month_day.append(cum_month_day[-1] + month_day[i])
cum_days.append(cum_days[-1] + ar[i])
# print(ar, month_day, cum_month_day, cum_days)
res = 0
for i in range(n+1, n+n+1):
res = max(res, end_here(i))
print(res)
pass
if __name__ == '__main__':
READ('in.txt')
main()
|
[
"iamdipta@gmail.com"
] |
iamdipta@gmail.com
|
2ac2f93f67aa2523a54140f3d629ab0f15276ab4
|
7df8f84828213b7bdada74f512a123cd08bf4a1c
|
/gui_basic/12_frame.py
|
64a28146781c689f3031f5b69a6e7005e01105c9
|
[] |
no_license
|
Every-J/Python
|
87fd4eb7aeb0389edbbfa4012c64d248d0aff11c
|
cfdc591c67764dd128797f9d7ba644b1ea53e1cb
|
refs/heads/main
| 2023-08-25T03:50:25.501524
| 2021-10-27T01:20:07
| 2021-10-27T01:20:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 747
|
py
|
from tkinter import *
root = Tk()
root.title("Nado GUI")
root.geometry("640x480") # 가로 * 세로
Label(root, text="메뉴를 선택해 주세요").pack(side="top")
Button(root, text="주문하기").pack(side="bottom")
# 메뉴 프레임
frame_burger = Frame(root, relief="solid", bd=1)
frame_burger.pack(side="left", fill="both", expand=True)
Button(frame_burger, text="햄버거").pack()
Button(frame_burger, text="치즈버거").pack()
Button(frame_burger, text="치킨버거").pack()
# 음료 프레임
frame_drink = LabelFrame(root, text="음료")
frame_drink.pack(side="right", fill="both", expand=True)
Button(frame_drink, text="콜라").pack()
Button(frame_drink, text="사이다").pack()
root.mainloop()
|
[
"noreply@github.com"
] |
Every-J.noreply@github.com
|
4a5f20033b2ce926b8c120facc7b1de246135d9c
|
c47e274f6af4d08bff65e360fb8a11b163dc34b2
|
/common/global_constants.py
|
7e184ce065f2d0ce801d87ae0ab50fb3d1e9079c
|
[
"BSD-3-Clause"
] |
permissive
|
nozberkaryaindonesia/ReadableWebProxy
|
6b66994c574dc0a70767397403c04f97bf2d07f0
|
82d14d8dfb23ef135a16f88274c14c7acc1162a5
|
refs/heads/master
| 2022-05-21T20:06:03.707617
| 2017-09-24T09:54:23
| 2017-09-24T09:54:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,460
|
py
|
GLOBAL_BAD_URLS = [
'//mail.google.com',
'/comments/feed/',
'/embed?',
'/osd.xml',
'/page/page/',
'/wp-json/',
'/wp-login.php',
'/xmlrpc.php',
'?openidserver=1',
'a.wikia-beacon.com',
'accounts.google.com',
'add.my.yahoo.com',
'addtoany.com',
'b.scorecardresearch.com',
'delicious.com',
'digg.com',
'edit.yahoo.com',
'facebook.com',
'fbcdn-',
'feeds.wordpress.com',
'gprofiles.js',
'javascript:void',
'netvibes.com',
'newsgator.com',
'paypal.com',
'pixel.wp.com',
'public-api.wordpress.com',
'r-login.wordpress.com',
'reddit.com',
'stumbleupon.com',
'technorati.com',
'topwebfiction.com',
'twitter.com',
'twitter.com/intent/',
'wretch.cc',
'ws-na.amazon-adsystem.com',
'www.addtoany.com'
'www.pinterest.com/pin/',
'www.wattpad.com/login?',
'www.tumblr.com/reblog/',
'www.paypalobjects.com',
# Tumblr can seriously go fuck itself with a rusty stake
'tumblr.com/widgets/',
'www.tumblr.com/login',
'://tumblr.com',
'&share=tumblr',
'/wp-content/plugins/',
'/wp-content/themes/',
'/wp-json/oembed/',
# At least one site (booksie) is serving the favicon with a mime-type
# of "text/plain", which then confuses the absolute crap out of the
# mime-type dispatcher.
# Since I'm not re-serving favicons anyways, just do not fetch them ever.
'favicon.ico',
# Try to not scrape inline images
';base64,',
"www.fashionmodeldirectory.com",
"www.watchingprivatepractice.com",
"Ebonyimages.jupiterimages.com",
# More garbage issues.
'"https',
'#comment-',
'/oembed/1.0/',
'&share=',
'replytocom=',
'?feed=rss2&page_id',
'?share=tumblr',
'?share=facebook',
'chasingadreamtranslations.com/?fp=',
# NFI where /this/ came from
'www.miforcampuspolice.com',
'tracking.feedpress.it',
'www.quantcast.com',
'mailto:',
'javascript:popupWindow(',
'en.blog.wordpress.com',
'counter.yadro.ru',
'/js/js/',
'/css/css/',
'/images/images/',
'ref=dp_brlad_entry',
'https:/www.',
'tumblr.com/oembed/1.0?',
]
GLOBAL_DECOMPOSE_BEFORE = [
{'name' : 'likes-master'}, # Bullshit sharing widgets
{'id' : 'jp-post-flair'},
{'class' : 'post-share-buttons'},
#{'class' : 'commentlist'}, # Scrub out the comments so we don't try to fetch links from them
#{'class' : 'comments'},
#{'id' : 'comments'},
]
GLOBAL_DECOMPOSE_AFTER = []
RSS_SKIP_FILTER = [
"www.baka-tsuki.org",
"re-monster.wikia.com",
'inmydaydreams.com',
'www.fanfiction.net',
'www.booksie.com',
'www.booksiesilk.com',
'www.fictionpress.com',
'storiesonline.net',
'www.fictionmania.tv',
'www.bestories.net',
'www.tgstorytime.com',
'www.nifty.org',
'www.literotica.com',
'pokegirls.org',
'www.asstr.org',
'www.mcstories.com',
'www.novelupdates.com',
'40pics.com',
'#comment-',
'?showComment=',
]
RSS_TITLE_FILTER = [
"by: ",
"comments on: ",
"comment on: ",
"comment on ",
]
# Goooooo FUCK YOURSELF
GLOBAL_INLINE_BULLSHIT = [
"This translation is property of Infinite Novel Translations.",
"This translation is property of Infinite NovelTranslations.",
"If you read this anywhere but at Infinite Novel Translations, you are reading a stolen translation.",
"<Blank>",
"<space>",
"<Blank>",
"<Blank>",
"please read only translator’s websitewww.novitranslation.com",
"please read only translator’s website www.novitranslation.com",
"Please do not host elsewhere but MBC and Yumeabyss",
'Original and most updated translations are from volaretranslations.',
'Please support the translator for Wild Consort by reading on volarenovels!',
'Original and most updated translations are from volaretranslations.',
'Original and most updated translations are from volaretranslations.',
"<StarveCleric>",
'(trytranslations.com at your service!)',
'Please do not host elsewhere but volare and Yumeabyss',
'[Follow the latest chapter at wuxiadream.com]',
'I slid my penis inside her. She squirmed a bit but YOU SICK FUCK STOP STEALING MY TRANSLATIONS', # siiiiigh
'I kissed her sweet anus once more before leaving', # siiiiiiiiiiiiigh
'(Watermark: read this translation only at shinku. xiaoxiaonovels.com)',
"<TLN: If you're reading this novel at any other site than Sousetsuka.com you might be reading an unedited, uncorrected version of the novel.>",
'Original and most updated translations are from volare. If read elsewhere, this chapter has been stolen. Please stop supporting theft.',
'*******If you are reading this on a place other than rinkagetranslation.com, this chapter has been stolen and is neither the most recent or complete chapter.*******',
'*******Read the chapters at rinkagetranslation.com. The chapters for this series will NOT be posted anywhere else other than on that site itself. If you are reading this from somewhere else then this is chapter has been stolen.*******',
'If you are reading this on a place other than rinkagetranslation.com, this chapter has been stolen and is neither the most recent or complete chapter.',
"Read The Lazy Swordmaster first on Lightnovelbastion.com (If you're reading this elsewhere, it has been stolen)",
"Read The Lazy Swordmaster on Lightnovelbastion.com",
"Property of © Fantasy-Books.live; outside of it, it is stolen.",
]
|
[
"something@fake-url.com"
] |
something@fake-url.com
|
a43033cd1083b62dfa20f3914123e00835219987
|
c5a004f26bf249f888be3849114dd35dbd24cb24
|
/python/evalrescallers/tests/ten_k_validation_data_test.py
|
c9f2b7b50349d5124180fb1dad48982f96e4202e
|
[
"MIT"
] |
permissive
|
wangdi2014/tb-amr-benchmarking
|
f7cf331608cfe7b9cc8995906d991573323dc87a
|
276f4f7f30639dacc62b3e8e395b2d2ce8675089
|
refs/heads/master
| 2022-03-10T00:41:07.364006
| 2019-11-08T09:37:23
| 2019-11-08T09:37:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,515
|
py
|
import os
import unittest
from evalrescallers import ten_k_validation_data
modules_dir = os.path.dirname(os.path.abspath(ten_k_validation_data.__file__))
data_dir = os.path.join(modules_dir, 'tests', 'data', 'ten_k_validation_data')
class TestTenKValidationData(unittest.TestCase):
def test_load_sample_to_res_file(self):
'''test load_sample_to_res_file'''
expected_drugs = {'Isoniazid', 'Rifampicin', 'Ethambutol', 'Pyrazinamide'}
expected_data = {
'ena1': {'Isoniazid': 'n/a', 'Rifampicin': 'S', 'Ethambutol': 'R', 'Pyrazinamide': 'S'},
'ena2': {'Isoniazid': 'S', 'Rifampicin': 'U', 'Ethambutol': 'S', 'Pyrazinamide': 'S'},
}
infile = os.path.join(data_dir, 'load_sample_to_res_file.tsv')
got_drugs, got_data = ten_k_validation_data.load_sample_to_res_file(infile)
self.assertEqual(expected_drugs, got_drugs)
self.assertEqual(expected_data, got_data)
def test_load_sources_file(self):
'''test load_sources_file'''
infile = os.path.join(data_dir, 'load_sources_file.tsv')
expect = {
'ena1': ('source1', 'country1'),
'ena2': ('source1', 'country1'),
'ena3': ('source1', 'country2'),
'ena4': ('source2', 'country1'),
'ena5': ('source2', 'country2'),
}
got = ten_k_validation_data.load_sources_file(infile)
self.assertEqual(expect, got)
def test_sources_file_to_country_counts(self):
'''test sources_file_to_country_counts'''
infile = os.path.join(data_dir, 'sources_file_to_country_counts.tsv')
expect = {
'Country1': {'validate': 3, 'test': 0},
'Country2': {'validate': 1, 'test': 0},
'Germany': {'validate': 0, 'test': 1},
'UK': {'validate': 1, 'test': 2},
}
got = ten_k_validation_data.sources_file_to_country_counts(infile)
self.assertEqual(expect, got)
def test_load_all_data(self):
'''test load_all_data'''
expected_drugs = {'Quinolones', 'Isoniazid', 'Rifampicin', 'Ethambutol', 'Pyrazinamide', 'Amikacin', 'Capreomycin', 'Ciprofloxacin', 'Cycloserine', 'Ethionamide', 'Kanamycin', 'Linezolid', 'Moxifloxacin', 'Ofloxacin', 'PAS', 'Rifabutin', 'Streptomycin'}
got_drugs, got_pheno_validation, got_pheno_test, got_predict = ten_k_validation_data.load_all_data()
self.assertEqual(expected_drugs, got_drugs)
_, expect_pheno = ten_k_validation_data.load_sample_to_res_file(os.path.join(ten_k_validation_data.data_dir, '10k_validation.phenotype.tsv'))
_, expect_predict = ten_k_validation_data.load_sample_to_res_file(os.path.join(ten_k_validation_data.data_dir, '10k_validation.prediction.tsv'))
_, expect_more_pheno = ten_k_validation_data.load_sample_to_res_file(os.path.join(ten_k_validation_data.data_dir, '10k_validation.extra_phenotypes.tsv'))
expect_samples = set(expect_pheno.keys()).union(set(expect_more_pheno.keys()))
got_samples = set(expect_pheno.keys())
self.assertEqual(expect_samples, got_samples)
for pheno_dict in got_pheno_validation, got_pheno_test:
for sample in pheno_dict:
for d in expect_pheno, expect_more_pheno:
if sample in d:
for k, v in d[sample].items():
self.assertEqual(v, pheno_dict[sample][k])
self.assertEqual(expect_predict, got_predict)
|
[
"martin.g.hunt@gmail.com"
] |
martin.g.hunt@gmail.com
|
fd33d0164c5d4f66bd0a74c18659b7ff64f0543f
|
67494b11d3acadcd90dc5d1364fd399ed3d990a7
|
/ensemble/functions.py
|
c592408a3c445b717aab1509d8af15e3008df2c3
|
[] |
no_license
|
filonik/ensemble
|
83bac3923fe39bd5b7f11f4f31bc07c4567f462f
|
4ce42a7bccff0c124800f6a9923503c7bef75f39
|
refs/heads/master
| 2021-08-16T21:24:40.366517
| 2017-11-20T10:25:46
| 2017-11-20T10:25:46
| 111,370,217
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 416
|
py
|
import functools as ft
import itertools as it
def identity(x):
return x
def constant(x):
def _constant(*args, **kwargs):
return x
return _constant
def compose(*args):
def _compose(f, g):
def __compose(t):
return f(g(t))
return __compose
return ft.reduce(_compose, args, identity)
def invertible(f, f_inv):
f_inv.inv = f
f.inv = f_inv
return f
|
[
"filonik@users.noreply.github.com"
] |
filonik@users.noreply.github.com
|
de2fbae89697eeae55b1ded2dcb4300e460ce8f6
|
c657052e2dab3385d5aba26cf9f0ed1f32882aa8
|
/other/stocks_monitor.py
|
baee685730f2633078d7f83902f7fbea10f03cb5
|
[
"MIT"
] |
permissive
|
mmmonk/crap
|
cb8197cbb5a39f9f532338aee4621a4b4f96bde9
|
96ba81723f043503e7ed2f96ea727b524d22b83f
|
refs/heads/master
| 2021-06-01T13:26:28.405931
| 2020-06-24T10:05:22
| 2020-06-24T10:05:22
| 2,802,903
| 16
| 14
| null | 2015-10-11T06:28:11
| 2011-11-18T14:02:22
|
Python
|
UTF-8
|
Python
| false
| false
| 648
|
py
|
#!/usr/bin/env python
import urllib
import json
import sys
import os
import time
if len(sys.argv) > 1:
symbol = str(sys.argv[1])
path = "%s/.cache/stock_%s.txt" % (os.getenv('HOME'), symbol)
out = ""
if os.path.exists(path) and time.time() - os.stat(path).st_mtime < 600:
out = open(path).read()
else:
a = urllib.urlopen("http://www.google.com/finance/info?q=%s" % (symbol))
b = json.loads(a.read().replace("\n","")[2:])
out = "%s:" % (symbol)
for a in b:
if 'l' in a:
out += "%s" % (a['l'],)
if "el" in a:
out += "/%s" % (a['el'],)
open(path,"w").write(out)
print "%s " % (out,)
|
[
"m.lukaszuk@gmail.com"
] |
m.lukaszuk@gmail.com
|
293a3fd9e19cd4c6af0c68b93b5de2ea6cf5b304
|
88323de7876b0ae43762213c7e36c21f44830ac9
|
/LCS.py
|
5353608ba2cfc53cdb6397910fba17516e282932
|
[] |
no_license
|
subhashissuara/DPAlgo
|
c4679c1dca8b4a523f6c8a009433da9d17e72f4f
|
9807f180d516d79d4c4db387ac8666a484aa5a2f
|
refs/heads/master
| 2023-03-02T17:09:23.561000
| 2021-02-10T22:20:13
| 2021-02-10T22:20:13
| 337,867,802
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,562
|
py
|
# -----------------------------------------------------------------------------------------------
# Author: Subhashis Suara
# Algorithm: Longest Common Subsequence
# Definations:
# X - First Sequence
# Y - Second Sequence
# C[i][j] - Length of LCS between X[i - 1] and Y[j - 1] (i & j start from 1 to m, n inclusive)
# B[i][j] - Direction for C[i][j] Element. Can be D - Diagonal, H - Horizontal, V - Vertical
# -----------------------------------------------------------------------------------------------
import sys
# Change the print length for LCS result in terminal
# Enter 0 if you don't want to print in terminal
# Enter -1 if you don't want to limit the print length
LCSPrintLength = 50
# Don't Change
LCSValue = ""
def LCS(X, Y):
m = len(X)
n = len(Y)
B = [[0 for i in range(n + 1)] for j in range(m + 1)]
C = [[0 for i in range(n + 1)] for j in range(m + 1)]
for i in range(1, m + 1):
for j in range(1, n + 1):
if (X[i - 1] == Y[j - 1]): # Adjusted for 0 based Indexing
C[i][j] = C[i - 1][j - 1] + 1
B[i][j] = 'D'
elif (C[i - 1][j] > C[i][j - 1]):
C[i][j] = C[i - 1][j]
B[i][j] = 'V'
else:
C[i][j] = C[i][j - 1]
B[i][j] = 'H'
return B, C
def printLCS(X, B, i, j):
if (i == 0 or j == 0):
return
if (B[i][j] == 'D'):
printLCS(X, B, i - 1, j - 1)
# print(X[i - 1], end = "")
global LCSValue
LCSValue += X[i - 1]
elif (B[i][j] == 'V'):
printLCS(X, B, i - 1, j)
else:
printLCS(X, B, i, j - 1)
if __name__ == "__main__":
# Initialization to bound X & Y values in case of empty txt files
X = ""
Y = ""
# Making sure user understands the required files
print("\nBefore proceeding, please ensure the following:")
print("- You have a X.txt file, containing the 1st sequence, in the same path as this program.")
print("- You have a Y.txt file, containing the 2nd sequence, in the same path as this program.")
print("- If you have any file called LCS.txt in the same path as this program,")
print(" ensure the data is backed up as the file will be overwrittern.")
print(" If the file doesn't exist, it will be created automatically & contain the result.")
input("\nPress any key to continue...\n")
try:
with open('X.txt', 'r') as XFile:
# Remove Endline & Trailing White Spaces
X = XFile.read().replace('\n', '').strip()
# Remove All White Spaces
X = ''.join(X.split())
if (X == ""):
print("Error: X.txt file is empty! Please enter the relevent 1st sequence in X.txt & try again.\n")
sys.exit()
except FileNotFoundError as fileNotFoundError:
print("Error: File X.txt not found! Please create the file and try again.\n")
sys.exit()
except Exception as error:
print(f"Error: {error}\n")
sys.exit()
try:
with open('Y.txt', 'r') as YFile:
# Remove Endline & Trailing White Spaces
Y = YFile.read().replace('\n', '').strip()
# Remove All White Spaces
Y = ''.join(Y.split())
if (Y == ""):
print("Error: Y.txt file is empty! Please enter the relevent 2nd sequence in Y.txt & try again.\n")
sys.exit()
except FileNotFoundError as fileNotFoundError:
print("Error: File Y.txt not found! Please create the file and try again.\n")
sys.exit()
except Exception as error:
print(f"Error: {error}\n")
sys.exit()
m = len(X)
n = len(Y)
B, C = LCS(X, Y)
printLCS(X, B, m, n)
if (len(LCSValue) > 0):
if (LCSPrintLength != 0):
if (len(LCSValue) > LCSPrintLength and LCSPrintLength > 0):
print(f"LCS for given X & Y sequences: {LCSValue[:LCSPrintLength]}... {len(LCSValue) - LCSPrintLength} more characters. \n")
else:
print(f"LCS for given X & Y sequences: {LCSValue}\n")
print("The result has been saved to LCS.txt file in the same path as this program. Have a great day!\n")
try:
with open('LCS.txt', 'w+') as LCSFile:
LCSFile.write(LCSValue)
except Exception as error:
print(f"Error: {error}\n")
sys.exit()
else:
print("No LCS found from X & Y sequences.\n")
|
[
"subhashis.suara999@gmail.com"
] |
subhashis.suara999@gmail.com
|
9fb4ff9e6f72ce9f9852a26b33c89042ab274ead
|
dde44d2988efb4421a0c211a062e9509b2919865
|
/src/reduce_number_stats.py
|
d0e1b345134bf376c087d029da0fa7e1bda59cfb
|
[] |
no_license
|
tylerjwoods/nfl_game_predictor
|
2c99264c5d2e39c88f74891f4b728661503b9a28
|
a0c8e34e03ca7ec6199429bb8b7a9f634127b2f5
|
refs/heads/master
| 2022-07-29T11:51:05.835676
| 2020-05-18T22:43:42
| 2020-05-18T22:43:42
| 263,088,458
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 885
|
py
|
import pandas as pd
def clean_team_stats(df):
'''
inputs
------
df: pandas dataframe
returns
------
df: pandas dataframe with cleaned dataset. drops multiple columns from original df
'''
# For first analysis, let's drop most of the columns to make our program run faster
columns_to_drop = ['passCompletions', 'passAvg', 'passYardsPerAtt', 'rushYards', 'passIntPct', 'passLng', \
'pass20Plus', 'pass40Plus', 'sacks_allowed_yards', 'rushAverage', 'rushLng', 'rush1stDowns', 'rush1stDownsPct',\
'rush20Plus','rush40Plus', 'rushFumbles','rec1stDowns','recFumbles','tackleSolo','tackleTotal','tackleAst',\
'sackYds','tacklesForLoss', 'krTD', 'kr20Plus', 'fgMade','field_goal_pct','punt_inside_20_pct','third_down_pct',
'fourth_down_pct','penalties']
df.drop(columns=columns_to_drop,inplace=True)
return df
|
[
"tyjordan.woods@gmail.com"
] |
tyjordan.woods@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.