blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e5672f4fe584c8f4396ae6b8030cbba66b775c5b
|
b48afc357cd9fccdcef52d7be2385db04189324b
|
/sockets/pentesting/prueba.py
|
c674741734c8d3f53e9f245731fd08aad2b199cc
|
[] |
no_license
|
binarioGH/programas_de_prueba
|
0703cc8565aad6dcb9f48495393897dbeef48ac9
|
c9cc63b505f26c5720dc7298cdf35b14cba5c067
|
refs/heads/master
| 2020-03-06T21:28:23.663063
| 2020-02-07T05:07:21
| 2020-02-07T05:07:21
| 127,078,188
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 163
|
py
|
#-*-coding: utf-8-*-
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("192.168.0.5",21))
banner = sock.recv(1024)
print(banner)
|
[
"lacuentafalsadediegojonas@gmail.com"
] |
lacuentafalsadediegojonas@gmail.com
|
965954113fe472d45d3521f7b0dc9b0e9933fad9
|
fe24b009913d625f8dca547cf5a3956514678e04
|
/plugins/nn/unet_v2/src/py_example.py
|
b0fafe457bfb563d99a3f4119fefd0fe77d12af3
|
[] |
no_license
|
ai-motive/supervisely
|
6fed135b27afe166627c24bbee811d07053bd0e0
|
f6ef26cbb8a5f3457f97b2db600a442aa03aa5d2
|
refs/heads/master
| 2023-07-13T21:36:00.541608
| 2021-08-24T03:05:07
| 2021-08-24T03:05:07
| 372,686,196
| 1
| 0
| null | 2021-06-01T03:05:10
| 2021-06-01T03:05:09
| null |
UTF-8
|
Python
| false
| false
| 520
|
py
|
import requests
from requests_toolbelt import MultipartEncoder
if __name__ == '__main__':
content_dict = {}
content_dict['image'] = ("big_image.png", open("/workdir/src/big_image.jpg", 'rb'), 'image/*')
content_dict['mode'] = ("mode", open('/workdir/src/sliding_window_mode_example.json', 'rb'))
encoder = MultipartEncoder(fields=content_dict)
response = requests.post("http://0.0.0.0:5000/model/inference", data=encoder, headers={'Content-Type': encoder.content_type})
print(response.json())
|
[
"max@supervise.ly"
] |
max@supervise.ly
|
0bd2b3207f1c166cf6cd840b7f6f2f4ba9ad1f37
|
ec3090f90b1c3fe7e0550473d719ccc18d130dfe
|
/sample/linear_algebra.py
|
785e43e91f5ce3948d96dc6d183265f7c5269da2
|
[] |
no_license
|
EDAriR/Data_Science_from_Scratch
|
349ecb54bc5ebb5520d2d6bbe717bed911dc8177
|
471b3fe6d6bcd27f5f617c7784547ddb98989e9b
|
refs/heads/master
| 2020-03-15T21:46:40.167703
| 2018-05-15T15:48:34
| 2018-05-15T15:48:34
| 132,361,385
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,922
|
py
|
# -*- coding: iso-8859-15 -*-
import re, math, random # regexes, math functions, random numbers
import matplotlib.pyplot as plt # pyplot
from collections import defaultdict, Counter
from functools import partial, reduce
#
# functions for working with vectors
#
def vector_add(v, w):
"""adds two vectors componentwise"""
"""相應元素相加"""
return [v_i + w_i for v_i, w_i in zip(v, w)]
def vector_subtract(v, w):
"""subtracts two vectors componentwise"""
"""相應元素相減"""
return [v_i - w_i for v_i, w_i in zip(v, w)]
def vector_sum(vectors):
"""相應元素總和"""
return reduce(vector_add, vectors)
def scalar_multiply(c, v):
"""c 是一個數值, v 是一個向量"""
return [c * v_i for v_i in v]
def vector_mean(vectors):
"""compute the vector whose i-th element is the mean of the
i-th elements of the input vectors"""
"""計算出一個向量,其元素值為所有向量相應元素的平均值"""
n = len(vectors)
return scalar_multiply(1 / n, vector_sum(vectors))
def dot(v, w):
"""v_1 * w_1 + ... + v_n * w_n"""
"""點積 相應元素相乘之後加總"""
return sum(v_i * w_i for v_i, w_i in zip(v, w))
def sum_of_squares(v):
"""v_1 * v_1 + ... + v_n * v_n"""
return dot(v, v)
def magnitude(v):
return math.sqrt(sum_of_squares(v))
def squared_distance(v, w):
return sum_of_squares(vector_subtract(v, w))
def distance(v, w):
return math.sqrt(squared_distance(v, w))
#
# functions for working with matrices
#
def shape(A):
num_rows = len(A)
num_cols = len(A[0]) if A else 0
return num_rows, num_cols
def get_row(A, i):
return A[i]
def get_column(A, j):
return [A_i[j] for A_i in A]
def make_matrix(num_rows, num_cols, entry_fn):
"""returns a num_rows x num_cols matrix
whose (i,j)-th entry is entry_fn(i, j)"""
return [[entry_fn(i, j) for j in range(num_cols)]
for i in range(num_rows)]
def is_diagonal(i, j):
"""1's on the 'diagonal', 0's everywhere else"""
return 1 if i == j else 0
identity_matrix = make_matrix(5, 5, is_diagonal)
# user 0 1 2 3 4 5 6 7 8 9
#
friendships = [[0, 1, 1, 0, 0, 0, 0, 0, 0, 0], # user 0
[1, 0, 1, 1, 0, 0, 0, 0, 0, 0], # user 1
[1, 1, 0, 1, 0, 0, 0, 0, 0, 0], # user 2
[0, 1, 1, 0, 1, 0, 0, 0, 0, 0], # user 3
[0, 0, 0, 1, 0, 1, 0, 0, 0, 0], # user 4
[0, 0, 0, 0, 1, 0, 1, 1, 0, 0], # user 5
[0, 0, 0, 0, 0, 1, 0, 0, 1, 0], # user 6
[0, 0, 0, 0, 0, 1, 0, 0, 1, 0], # user 7
[0, 0, 0, 0, 0, 0, 1, 1, 0, 1], # user 8
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0]] # user 9
#####
# DELETE DOWN
#
def matrix_add(A, B):
if shape(A) != shape(B):
raise ArithmeticError("cannot add matrices with different shapes")
num_rows, num_cols = shape(A)
def entry_fn(i, j): return A[i][j] + B[i][j]
return make_matrix(num_rows, num_cols, entry_fn)
def make_graph_dot_product_as_vector_projection(plt):
v = [2, 1]
w = [math.sqrt(.25), math.sqrt(.75)]
c = dot(v, w)
vonw = scalar_multiply(c, w)
o = [0, 0]
plt.arrow(0, 0, v[0], v[1],
width=0.002, head_width=.1, length_includes_head=True)
plt.annotate("v", v, xytext=[v[0] + 0.1, v[1]])
plt.arrow(0, 0, w[0], w[1],
width=0.002, head_width=.1, length_includes_head=True)
plt.annotate("w", w, xytext=[w[0] - 0.1, w[1]])
plt.arrow(0, 0, vonw[0], vonw[1], length_includes_head=True)
plt.annotate(u"(v•w)w", vonw, xytext=[vonw[0] - 0.1, vonw[1] + 0.1])
plt.arrow(v[0], v[1], vonw[0] - v[0], vonw[1] - v[1],
linestyle='dotted', length_includes_head=True)
plt.scatter(*zip(v, w, o), marker='.')
plt.axis('equal')
plt.show()
make_graph_dot_product_as_vector_projection(plt)
|
[
"ed_he@syntrontech.com"
] |
ed_he@syntrontech.com
|
f83158efa33a39673f800fe504ec1c01c3306456
|
083ca3df7dba08779976d02d848315f85c45bf75
|
/DiagonalTraverse6.py
|
08697eb1bb23cd57f8ecca6a47acfe8dd293814f
|
[] |
no_license
|
jiangshen95/UbuntuLeetCode
|
6427ce4dc8d9f0f6e74475faced1bcaaa9fc9f94
|
fa02b469344cf7c82510249fba9aa59ae0cb4cc0
|
refs/heads/master
| 2021-05-07T02:04:47.215580
| 2020-06-11T02:33:35
| 2020-06-11T02:33:35
| 110,397,909
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 671
|
py
|
class Solution:
def findDiagonalOrder(self, matrix: list) -> list:
if not matrix:
return []
m, n = len(matrix), len(matrix[0])
entries = [[] for _ in range(m + n - 1)]
for i in range(m):
for j in range(n):
entries[i + j].append(matrix[i][j])
result = []
for i in range(len(entries)):
result += entries[i][::i % 2 * 2 - 1]
return result
if __name__ == '__main__':
m = int(input())
matrix = []
for i in range(m):
matrix.append([int(num) for num in input().split()])
solution = Solution()
print(solution.findDiagonalOrder(matrix))
|
[
"jiangshen95@163.com"
] |
jiangshen95@163.com
|
409e3ddf97c77d5f12416e5923d48d09f61b0418
|
eee4f528b8e3f0ed5a2cfe1359996ecc5293a45a
|
/clickTracker/views.py
|
a3516e4689050f26d76d0133118ac6e3010a7327
|
[] |
no_license
|
anoncb1754/VideoSoup
|
56baf4cabc2911768cdabab4037c4e73e362d7d0
|
b7afb1e00f0558c98448fae1f99fd7fe2b257ff5
|
refs/heads/master
| 2021-01-19T09:31:13.039227
| 2013-04-22T20:10:09
| 2013-04-22T20:10:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 592
|
py
|
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.http import Http404
from datetime import datetime
from clickTracker.models import ClicksTracked
def clickTracker(request):
'''
Does click tracking on post urls
'''
destination = request.GET.get('dst')
post_id = request.GET.get('id')
timestamp = datetime.now()
try:
click = ClicksTracked(post_id=post_id, destination=destination, timestamp=str(timestamp))
click.save()
except DatabaseError:
raise Http404
try:
return HttpResponseRedirect(destination)
except:
raise Http404
|
[
"cb1754@cb1754s-MacBook-Air.local"
] |
cb1754@cb1754s-MacBook-Air.local
|
22b0d22cc8f6ae16c5bdf42259895c0463786f0e
|
42aa91a206bd5a685f84e3751732bacc03a2674d
|
/polls/migrations/0001_initial.py
|
8666e22fb7e460b193932bcb82d0e839e859f7a4
|
[] |
no_license
|
deepanshubadshah/Django-polls
|
e4996ca5a84202cf86a6a26557def8f0abf46e93
|
52aeabb614cec46964ed8423ef4dea4ed4a33507
|
refs/heads/master
| 2020-08-18T01:46:19.091597
| 2019-10-17T20:20:58
| 2019-10-17T20:20:58
| 215,733,853
| 0
| 0
| null | 2019-10-18T03:02:53
| 2019-10-17T07:50:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,075
|
py
|
# Generated by Django 2.2.6 on 2019-10-16 04:29
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Question')),
],
),
]
|
[
"deepanshu16144@iiitd.ac.in"
] |
deepanshu16144@iiitd.ac.in
|
944ea00d1ec201dd0709e03276f8bdd7cb76fc26
|
8a2842ad67c3388d26688f78fb8bf828ac9912da
|
/app/api/urls.py
|
958b5719b2f93dc31996f5d138ee1268f0d98a15
|
[] |
no_license
|
gsi-sandy/django-on-docker
|
2532eedad0be704ac8ebda029067026fdc0c823b
|
e46d4e157243a37a53c003007669cadaac0afcda
|
refs/heads/master
| 2022-11-30T16:05:00.788144
| 2020-08-04T12:50:26
| 2020-08-04T12:50:26
| 276,133,260
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 420
|
py
|
from django.urls import include, path
from rest_framework import routers
from . import views
router = routers.DefaultRouter()
router.register(r'persons', views.PersonViewSet)
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
path('', include(router.urls)),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework'))
]
|
[
"sandy@generalsoftwareinc.com"
] |
sandy@generalsoftwareinc.com
|
9cb90f35ed78d85d682a190e8d2c536e11bcdc87
|
b62a4f4cc42aee1066c591e91a9c83c899329fbc
|
/print_table.py
|
e5144da8c4f33bcdb0a69abda3418032e69014b6
|
[] |
no_license
|
LiliyaRachkova/TVShows
|
f0fff2b23e322fb85882acabf52ed6f8c2cb51de
|
54068623380bd9c2f8d13f501a072e99b97a5d24
|
refs/heads/master
| 2023-06-03T13:09:31.850099
| 2021-06-18T19:14:33
| 2021-06-18T19:14:33
| 378,075,823
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 600
|
py
|
import psycopg2
import pandas as pd
import numpy as np
TVSHOW = "http://api.tvmaze.com/search/shows?q="
def print_table(cur):
cur.execute("select name, premiered from tvshows order by premiered")
tab = cur.fetchall()
try:
tab = np.array(tab)
df = pd.DataFrame(data=tab, columns=["name", "premiered"])
print(df)
except ValueError:
print("Таблица пуста")
if __name__ == '__main__':
with psycopg2.connect(dbname="postgres", user="postgres", password="example") as conn:
with conn.cursor() as cur:
print_table(cur)
|
[
"lili.rachkova@ya.ru"
] |
lili.rachkova@ya.ru
|
48fd5cd10a183f7b303fce8d996994720b59e4a7
|
edb09b9297e960ee3208077c4a15d3497136337b
|
/server.py
|
0df7b753bddf5d55a941a3c722f75c84b7013113
|
[] |
no_license
|
Mathes5556/interview
|
5fdf8d807b75e49de351c5a81a2d1f3f6badfbbf
|
156e3d734ea3883ae529e244117f981800ef09dc
|
refs/heads/master
| 2021-01-18T20:11:50.634786
| 2017-08-17T01:43:57
| 2017-08-17T01:43:57
| 100,546,106
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,491
|
py
|
from flask import Flask, request
from traingFlow import wholeTraingFlow
from utils import get_logger
from model import provideRecommendation
import pandas as pd
from model import provideRecommendation
app = Flask(__name__)
global models # well, global variables are bad, but this is for storing trained models during running of server
models = []
LOG = get_logger('RECOMENDATION_EXPONEA_LOG.log') #initialize of logger
@app.route('/')
def about():
'''
Just for ensure server works
:return:
'''
return 'Hello, World!'
@app.route('/train')
def train():
'''
Whole process of training applied on history data
:return:
'''
global models
models = wholeTraingFlow(LOG)
LOG.info("Models for products were sucesfully created!")
return "model was just trained!"
@app.route('/recommendationForUser/', methods=['POST'])
def recommendUser():
'''
based on POST user data get recommendation
:return: list of products which model recommend for given user
'''
global models
if len(models) == 0:
return "Please train models first!"
userJson = request.get_json()
userJsonReadyToDF = {}
for k in userJson:
userJsonReadyToDF[k] = [userJson[k]]
result = provideRecommendation(models, pd.DataFrame.from_dict(userJsonReadyToDF), LOG )
return "recomended products: " + str(result["recommendations"][0])
if __name__ == "__main__":
app.run()
|
[
"noreply@github.com"
] |
Mathes5556.noreply@github.com
|
a47231f66de9ee45eddd9c3bc0cf3ff7e80eff0d
|
6def27a464d1e7457731a40d268bc9d30bc42ba2
|
/middle_project 最终版/client/game1/game1_client.py
|
2071faeebd181518d4a8457cfe9e8e02d7ee666c
|
[] |
no_license
|
liu-xinke/playroom
|
43201c297ef92c294fd295ecf52946ff2c968fb2
|
4c44c47fd6dc0c71a34a65023692f34c60ece861
|
refs/heads/master
| 2020-03-26T23:58:57.966356
| 2018-08-22T01:10:36
| 2018-08-22T01:10:36
| 145,583,463
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,840
|
py
|
import sys
sys.path.append(r'./database')
from socket import *
import os
import random
from tkinter import *
from threading import Thread
from time import sleep
import buried_point2
import game1_secondground
#利用全局变量获取按下按钮后的返回值
#给定一个初始值
i = '123'
name = ''
other = ''
#界面信息提示
text1 = '正在匹配玩家...'
#客户端(用于接收/发送服务器信息)
def client():
global i
global text1
global other
s = socket()
try:
s.connect(('0.0.0.0',8080))
except ConnectionRefusedError:
text1 = '无法连接服务器!'
else:
print('waiting...')
while True:
if text1 != '正在匹配玩家...':
text1 = '正在匹配玩家...'
if i == 'q':
break
elif i == '123':
s.send(('connected ').encode())
sleep(0.5)
data = s.recv(1024).decode()
text1 = data
print(data)
if not data or data == '匹配成功':
s.send(name.encode())
try:
other = s.recv(1024).decode().split(' ')[2]
if other == name:
text1 = '不能自己匹配自己噢!'
i = 'q'
break
except:
pass
break
else:
i = "123"
while True:
if i == '123':
pass
else:
print(i)
if i == 'q':
s.send(i.encode())
break
else:
#第一次选择时向服务器发送选择信息
k = 0
print(i)
ch ={'1':'剪刀','2':'石头','3':'布'}
text1 = '你的选择是 %s \n请等待对方玩家选择' % ch[i]
if k == 0:
s.send(i.encode())
sleep(0.1)
k += 1
game = "猜拳"
w = buried_point2.buried_point(name,game)
#多次点击后不会向服务器发送选择信息,只取第一次选择情况
else:
print('你的选择是',i,'请等待对方玩家选择')
#接收服务器返回消息
data = s.recv(1024).decode()
print('receive:',data)
if data[-13:] == '对方已退出\n 游戏结束!':
text1 = '对方已退出\n 游戏结束!'
break
text1 = data
if not data or data== '对方已退出\n 游戏结束!':
break
#接收对方玩家选择信息后判定胜负
if data == "1" or data == "2" or data == "3":
play_multi_result(i,data,s)
#还原初始值
i = '123'
i = '123'
other = ''
# 猜拳小游戏主程序
#胜负判定函数
def play_multi_result(you,other,s):
global text1
print(name)
i = you
c = other
if i == '1':
if c == '2':
text1 = '对方选择了石头\n 你输了!'
msg = 'l %s' % name
s.send(msg.encode())
pass #输不加分
elif c == '1':
text1 = '对方选择了剪刀\n 平局!'
pass #平不加分
else:
text1 = '对方选择了布\n 你赢了!'
msg = 'w %s' % name
s.send(msg.encode())
#赢了去修改数据库
elif i == '2':
if c == '3':
text1 = '对方选择了布\n 你输了!'
msg = 'l %s' % name
s.send(msg.encode())
pass
elif c == '2':
text1 = '对方选择了石头\n 平局!'
pass
else:
text1 = '对方选择了剪刀\n 你赢了!'
msg = 'w %s' % name
s.send(msg.encode())
elif i == '3':
if c == '1':
text1 = '对方选择了剪刀\n 你输了!'
msg = 'l %s' % name
s.send(msg.encode())
pass
elif c == '3':
text1 = '对方选择了布\n 平局!'
pass
else:
text1 = '对方选择了石头\n 你赢了!'
msg = 'w %s' % name
s.send(msg.encode())
#游戏可视化窗口函数
def play_windows(Online):
global i
global text1
global other
#按钮事件函数
def press1():
global i
if i == '123':
i = '1'
def press2():
global i
if i == '123':
i = '2'
def press3():
global i
if i == '123':
i = '3'
def press4():
global i
try:
Online.config(state=ACTIVE)
except:
pass
if text1 == '对方已退出\n 游戏结束!':
root.destroy()
else:
i = 'q'
root.destroy()
#实时更新界面函数
def update_ui():
t1.configure(text=text1)
l1.configure(text=('你的对手:\n' + other.center(9)))
root.after(100,update_ui)
#主窗口函数
root = Toplevel()
root.title('猜拳小游戏')
root.geometry('400x600+800+250')
#对手信息区
frame1 = Frame(root,width=400,height=200,bg='yellow')
l1 = Label(frame1,font=('黑体',30))
l1.pack()
frame1.propagate(False)
frame1.pack()
#游戏日志区
frame2 = Frame(root,width=400,height=100)
t1 = Label(frame2,font=('宋体',30),bg='purple')
t1.pack(expand=YES,fill=BOTH)
frame2.propagate(False)
frame2.pack()
#游戏区
frame3 = Frame(root,width=400,height=250,bg='green')
b1 = Button(frame3,text='剪刀',font=('黑体',25),command=press1).pack(padx=25,side=LEFT)
b2 = Button(frame3,text='石头',font=('黑体',25),command=press2).pack(padx=25,side=LEFT)
b3 = Button(frame3,text='布',font=('黑体',25),command=press3).pack(padx=25,side=LEFT)
frame3.propagate(False)
frame3.pack()
#退出按钮
b4 = Button(root,text='退出',font=('黑体',25),command=press4).pack(side=BOTTOM)
update_ui()
root.protocol('WM_DELETE_WINDOW',press4)
root.mainloop()
#多线程函数
def main(who,Online):
global name
name = who
t1 = Thread(target=client)
t1.setDaemon(True)
t1.start()
play_windows(Online)
if __name__ == '__main__':
main(name,Online)
|
[
"953576288@qq.com"
] |
953576288@qq.com
|
bd1c3994e1c7429e1072cf4d027c437ad6f3987a
|
62a33631aae3af5ae1406e4f63d4591838a569ae
|
/src/apps/shop_managment/serializers/category_serializer.py
|
227d08429e061a8c0e4443e5602ab9e1222ef4e7
|
[] |
no_license
|
er5bus/shop-api
|
092a52a198c620becc48506aee6ef4c333f9798f
|
7feef3959fa3be69fcbbcf9c2f363757035968b4
|
refs/heads/main
| 2023-07-26T23:56:54.422996
| 2021-09-12T16:33:28
| 2021-09-12T16:33:28
| 343,736,798
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,066
|
py
|
import datetime
from rest_framework import serializers
from ..models.category import Category
from core.mixins.serializers import UniqueFieldsMixin
class CategorySerializer(UniqueFieldsMixin):
class Meta:
model = Category
fields = (
'id',
'category_name',
'description',
'created_at',
'is_active',
)
read_only_fields = (
'id',
'created_at',
'is_active',
)
def create(self, validated_data):
current_user = self.context.get('request').user
validated_data['created_by'] = current_user
return super().create(validated_data)
def update(self, instance, validated_data):
current_user = self.context.get('request').user
validated_data['updated_by'] = current_user
validated_data['updated_at'] = datetime.datetime.now()
import pprint
pp = pprint.PrettyPrinter(depth=6)
pp.pprint(validated_data)
return super().update(instance, validated_data)
|
[
"rami2sfari@gmail.com"
] |
rami2sfari@gmail.com
|
6c88248aa22465ec68fe92b2d27487bb17fc0874
|
61283bb32652990d882c6856cdc25868f91088d1
|
/Python/WebDev/Django/Tutorial/users/views.py
|
57308ac2ad847c9dc4f98963a5294785c7964d34
|
[
"MIT"
] |
permissive
|
michaelg29/Programming
|
1fbd03099ad75eeab7f0be9d4ee83cb4e5ef6c3f
|
7f726fbd8e97fe3d32d58265ab753735d88be3e0
|
refs/heads/master
| 2023-03-09T08:05:11.314808
| 2023-02-07T18:04:59
| 2023-02-07T18:04:59
| 163,780,658
| 5
| 3
|
MIT
| 2023-03-06T12:39:52
| 2019-01-02T01:35:00
|
C++
|
UTF-8
|
Python
| false
| false
| 824
|
py
|
from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .forms import UserRegisterForm
def register(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
messages.success(request, f'Your account has been created! You are now able to log in')
return redirect('login')
else:
form = UserRegisterForm()
return render(request, 'users/register.html', {'form': form})
@login_required
def profile(request):
return render(request, 'users/profile.html')
"""
message.debug
message.info
message.success
message.warning
message.error
"""
|
[
"30534878+michaelg29@users.noreply.github.com"
] |
30534878+michaelg29@users.noreply.github.com
|
8e71e1bca19fca69e10a9eeb6c69b6b561b6e6b1
|
26a2e9220b4008d1249ed9ee4e6fa60423985d3a
|
/Blackbox Optimization Techniques/frogger_env/envs/crossing_env_v1.py
|
ae10003bc3b391dbec91c58633ca12844e42a901
|
[] |
no_license
|
rprasan/Reinforcement-Learning
|
6f9a5d210c3bd8a5d79d128df0af87e618e18a96
|
7bca1ffa2b3a4ff729cb75b84c3ee9adbe3010f0
|
refs/heads/main
| 2023-08-03T13:43:08.486955
| 2023-07-27T21:49:05
| 2023-07-27T21:49:05
| 346,599,933
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,824
|
py
|
# Licensing information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to the authors.
#
# Authors: Avishek Biswas (avisheb@g.clemson.edu) and Ioannis Karamouzas (ioannis@g.clemson.edu)
import numpy as np
from gym.envs.registration import register
from frogger_env.envs.abstract import AbstractEnv
from frogger_env.agent.road import Road, Vehicle
from frogger_env.agent.frogger_agent import Agent
class CrossingEnv_V1(AbstractEnv):
"""
A modified frogger environment.
The agent has to cross the highway and reach the other side.
"""
"""The reward received when reaching the other side of the highway."""
GOAL_REWARD = 1.0
""" The reward received when colliding with a vehicle. """
COLLISION_REWARD = -0.3
""" The reward associated with how much aligned the agent is to its goal. """
GOAL_ALIGNMENT_REWARD = 0.01
ACTIONS_MAP = np.array([[0, 0],
[0, 1],
[0, -1],
[-1, 0],
[1, 0]])
def default_config(self) -> dict:
config = super().default_config()
config.update({
"observation": {
"lidar":
{
"sensing_distance": 10,
"angle_resolution": 12,
"frame_history": 4,
"flatten_observation": True,
"include_goal_distance": True,
"include_goal_local_coodinates": True
},
"occupancy_grid":
{
"frame_history": 1,
"flatten_observation": True,
"include_goal_distance": True,
"include_goal_local_coodinates": True
},
},
"observation_type": "lidar",
"world_bounds": [0., 0., 50., 50],
"lanes_count": 4,
"vehicles_count": 20, # upper bound
"duration": 60,
"vehicle_spacing": 2.5,
"vehicle_speed": 3.5,
"vehicle_width": 2.,
"random_init": 1,
"bidirectional": 1
})
return config
def _reset(self):
self._create_road()
self._create_agent()
def _create_road(self):
"""
Create a road composed of straight adjacent lanes and populate it with vehicles.
"""
self.road = Road(vehicles=[], lanes=[], np_random=self.np_random,
bidirectional=self.config["bidirectional"])
self.road.generate_lanes(self.config["lanes_count"], length=50.)
for _ in range(self.config["vehicles_count"]):
self.road.generate_random_vehicle(speed=self.config["vehicle_speed"],
lane_id=None,
spacing=self.config["vehicle_spacing"], width=self.config["vehicle_width"])
def _create_agent(self):
"""
Create the agent. Could be spawn randomly.
"""
self.agent_spawn = [10 + np.random.rand()*30., self.road.get_first_lane_Y() -4] if self.config["random_init"]\
else [25, self.road.get_first_lane_Y() - 4]
self.goal = [25, self.road.get_last_lane_Y() + 8]
self.agent = Agent(np.array(self.agent_spawn), radius=0.75, goal=self.goal,
action_map=self.ACTIONS_MAP, speed=2,
world_bounds=self.config["world_bounds"])
self.lower_boundary = [self.agent_spawn[0], self.agent_spawn[1] - 2]
def _reward(self, action):
"""
The reward is defined to encourage the agent move towards the goal and cross the highway,
while avoiding collisions.
"""
reward = self.COLLISION_REWARD * int(self.agent.crashed) \
+ self.GOAL_REWARD * int(self.agent.goal_distance < 5) \
+ self.GOAL_ALIGNMENT_REWARD * np.dot(self.agent.velocity_direction,
self.agent.goal_direction)
return reward
def _is_terminal(self):
"""
The episode is over if the agent collides, or the episode duration is met,
or the agent is close to the goal.
"""
return self.agent.crashed or \
(self.time >= self.config["duration"]
and not self.config["manual_control"]) or \
self.agent.goal_distance < 5 or \
self.agent.position[1] < self.lower_boundary[1]
register(
id='frogger-v1',
entry_point='frogger_env.envs:CrossingEnv_V1',
)
|
[
"rprasan@clemson.edu"
] |
rprasan@clemson.edu
|
820a35381bbb8b848bc8bc4310a9d3d09d1bfd40
|
e3f4cf19cd3514a9dd7a758212fb9a0d36d0935d
|
/auth_backend/role/migrations/0003_auto_20181217_0701.py
|
39157942f7c71dc5923ebab2458bb1dcc046448c
|
[] |
no_license
|
dbykov/auth-backend
|
236b60faca3c41b66bea51db0235b1a03ae1efd9
|
6e57aa91e0d7cce77a17ba61bc07def053756c40
|
refs/heads/master
| 2022-05-31T02:11:36.795650
| 2019-07-25T12:46:27
| 2019-07-25T12:46:27
| 199,646,871
| 0
| 0
| null | 2022-05-25T02:19:42
| 2019-07-30T12:25:07
|
Python
|
UTF-8
|
Python
| false
| false
| 455
|
py
|
# Generated by Django 2.1.3 on 2018-12-17 07:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('role', '0002_auto_20181129_0702'),
]
operations = [
migrations.AlterModelOptions(
name='role',
options={'ordering': ('-id',), 'verbose_name': 'Роль пользователя', 'verbose_name_plural': 'Роли пользователей'},
),
]
|
[
"dbykov@mail.ru"
] |
dbykov@mail.ru
|
a510cfec10e0a86ccf1d7cd7c1232f1a3b41a9f4
|
c394ed7555ee10b2819f1af4773aaf4703887a96
|
/lab_03/lab_01.py
|
9466abf0f46539126f301dbc4dc49df0f238c937
|
[] |
no_license
|
LozovskiAlexey/Computational-Algorithms
|
5ea587e6669a7475f6aca912e24c24a0703e83f5
|
62ab440564d152d89e45cc0c30291b74622c9b3b
|
refs/heads/master
| 2023-02-21T13:28:03.322795
| 2021-01-22T09:05:53
| 2021-01-22T09:05:53
| 331,891,999
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,109
|
py
|
from math import fabs
def f(x):
return x**2
def print_mtx(m):
print("!!!----------------------------")
for i in m:
for j in i:
if j is not None:
print("{:.3f}".format(j), end=' ')
print()
print("!!!----------------------------")
def print_data(data):
for i in data:
print("x = {:.3f}; f(x) = {:.3f}".format(i[0], i[1]))
def data_input():
data = list()
left, right, step = map(float, input('Введите "Левая граница Правая граница Шаг":').split())
while left < right + step / 2:
data.append((left, f(left)))
left += step
return data
def point_selection(data, n, x):
# n + 1 points
d_len = len(data)
new_data = list()
if d_len < n + 1:
return None
left = -1
index = 0
right = 1
mins = fabs(x - data[0][0])
count = 0
for i in range(d_len):
if fabs(x - data[i][0]) < mins:
left = i - 1
index = i
right = i + 1
mins = fabs(x - data[i][0])
new_data.append(data[index])
while left != -1 or right != d_len:
if right != d_len:
new_data.append(data[right])
right += 1
count += 1
if count == n:
break
if left != -1:
new_data.insert(0, data[left])
left -= 1
count += 1
if count == n:
break
return sorted(new_data)
def swap_cords(data):
new_data = list()
for i in data:
new_data.append((i[1], i[0]))
return new_data
# interpolation
def get_table(data, n):
m = [[None for j in range(2 + n)] for i in range(n + 1)]
for i in range(n + 1):
m[i][0] = data[i][0]
m[i][1] = data[i][1]
for col in range(n):
for row in range(n - col):
denominator = (m[row + 1 + col][0] - m[row][0])
denominator = denominator if denominator != 0 else 1e-10
m[row][col + 2] = (m[row + 1][col + 1] - m[row][col + 1]) / denominator
return m
def p(table, n, x):
# Pn(x) = f(x0) + (x − x0) · f(x0, x1) + (x − x0)(x − x1) · f(x0, x1, x2) + ...
# +(x − x0)(x − x1) ...(x − xn−1) · f(x0, x1, ..., xn).
mult = 1
Pn = table[0][1]
for i in range(n):
mult *= (x - table[i][0])
Pn += mult * table[0][2 + i]
return Pn
def interpolation(data, n, x):
# print(data)
# print(n)
# print(x)
data = point_selection(data, n, x)
table = get_table(data, n)
#print_mtx(table)
y = p(table, n, x)
return y
# main
def main():
'''
# f = open('input.txt', 'r')
# data = []
# for line in f:
# data.append((float(line.split()[0]), float(line.split()[1])))
# f.close()
'''
main_data = data_input()
print_data(main_data)
n = int(input("Введите степень многочлена: "))
x = float(input("Введите x: "))
data = point_selection(main_data, n, x)
if data is not None:
y = interpolation(data, n, x)
print("f({:.3f}) = {:.3f}".format(x, y))
# нахождение корня
flag = 0
if main_data[0][1] == 0:
flag = 1
else:
for i in range(1, len(main_data)):
if main_data[i][1] < 0 and main_data[i-1][1] > 0 or \
main_data[i][1] > 0 and main_data[i-1][1] < 0 or \
main_data[i][1] == 0:
flag = 1
break
if flag:
swap_data = swap_cords(main_data)
data = point_selection(swap_data, n, 0)
root = interpolation(data, n, 0)
print("f({:.3f}) = 0".format(root))
else:
print("Нельзя найти корень")
else:
print("Недотаточно точек, чтобы посчитать полином {:d} степени.".format(n))
if __name__ == '__main__':
main()
|
[
"55348265+LozovskiAlexey@users.noreply.github.com"
] |
55348265+LozovskiAlexey@users.noreply.github.com
|
ca28d6c5b515dfda3cbdafb46b37d89a8095da1b
|
b284d59bdf2c01977eb6d80795e2c75cb95b9b2c
|
/danibraz/checkout/validate_error_invoice.py
|
bda27a0d3f0ca85671f619873e051cf1c9bc10ab
|
[
"MIT"
] |
permissive
|
CoutinhoElias/danibraz
|
58d27cb30661d06091196cc487a9d902f4f8dac9
|
b21f3ce3477ded74c901fa377a5b2ac5e68faf36
|
refs/heads/master
| 2021-01-20T02:12:30.096953
| 2018-04-01T15:52:40
| 2018-04-01T15:52:40
| 89,386,992
| 0
| 1
|
MIT
| 2017-12-01T16:52:47
| 2017-04-25T17:14:27
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 154
|
py
|
from django.core.exceptions import ValidationError
def validate_quantity(value):
if not value < 5:
raise ValidationError("Campomenor que 5.")
|
[
"coutinho.elias@gmail.com"
] |
coutinho.elias@gmail.com
|
450788f02ffc268c72c96c8be14bc7828af2e160
|
cc81637c0cd6e23e0eca70ad00aaa53ef26ebd1d
|
/website/urls.py
|
bc690cab54bc82745fbe49f4ea60f6af92eeceef
|
[] |
no_license
|
madgeekfiend/django-screens
|
826d5cdbb90175e4c606ef8ed2347ee50092951e
|
d76a0eff62d201404db5d56e55f5612c4f76cc15
|
refs/heads/master
| 2021-01-21T05:05:47.304035
| 2014-08-07T15:36:48
| 2014-08-07T15:36:48
| 22,726,430
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 388
|
py
|
from django.conf.urls import patterns, url
from website import views
urlpatterns = patterns('',
# You know what this file is for
url(r'^upload/', views.upload, name='upload'),
url(r'^review/', views.review, name='review'),
url(r'^(?P<screen_id>\d+)/$', views.view_screen, name='view_screen'),
url(r'^(?P<screen_id>\d+)/comment/$', views.comment, name='comment'),
)
|
[
"totalgeek@outlook.com"
] |
totalgeek@outlook.com
|
19eaca7ff57991686ef9fc408897335ccb114c66
|
54077cef77625f39fd65b1df2b2a0bc82ca04773
|
/back/manage.py
|
61c0db9e2f3a1fadfe07bee429aba8144d128549
|
[] |
no_license
|
tabishkhan96/resa
|
e43ead182b36a34ed6ef0dee8e1def7152b7676e
|
5789d7e11a984f70e6509b0e2d82e9f149b55b91
|
refs/heads/master
| 2023-03-18T17:59:02.936862
| 2017-06-04T15:27:57
| 2017-06-04T15:27:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 822
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "meeting_room_reservation.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[
"tryphtik@hotmail.com"
] |
tryphtik@hotmail.com
|
24cc74599fb36feb9338f0a3d7920eb7c44d090c
|
9611f657bbb92d2cc3edd556ea3ffaa702e997f0
|
/utils/__init__.py
|
de17b5c9298991340efacc77b8a968e617660208
|
[] |
no_license
|
donhilion/JumpAndRun
|
10fdfdcc5fdcd5619b757c3f65e68d2bf4085852
|
0308785a51bf61d9a4fec2d8370540df502b8178
|
refs/heads/master
| 2021-01-23T08:38:53.808186
| 2014-01-28T20:26:53
| 2014-01-28T20:26:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25
|
py
|
__author__ = 'Donhilion'
|
[
"donhilion@googlemail.com"
] |
donhilion@googlemail.com
|
aeb3b8ff1f2ad11c5a1eabcd0b4aded91f0a5863
|
66dc2116f2c99cb79ad97a5320f99aedecfd9ac5
|
/select/src/median.py
|
1bd5547bf970eae3924f993d37e6e5ab89d240de
|
[] |
no_license
|
mregulski/ppt-4-aisd
|
9cee11d7d44df03a7b61ff228f761385349a280b
|
7a813ea66c7793e5681a9ddffbe1dd9a6a3951af
|
refs/heads/master
| 2021-01-12T13:38:07.140724
| 2017-03-07T22:21:17
| 2017-03-07T22:21:17
| 69,969,983
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,992
|
py
|
#!/usr/bin/python3
import random
import argparse
import sys
import math
logging = 0
cmps = 0
def comparison():
global cmps
cmps += 1
return True
def partition(array, start=0, end=-1, depth=0):
if(end == -1):
end = len(array)-1
pivot = array[random.randrange(start, end+1)]
if logging >= 5:
print("{indent}partitioning array[{start}:{end}] around \x1b[1;33m{pivot}\x1b[0m"
.format(indent=" "*depth,
start=start,
end=end+1,
pivot=pivot))
print("{}before: {}".format(" "*depth, array[start:end+1]))
print("{}pivot: {}".format(" "*depth, pivot))
i = start-1
j = end+1
while(comparison() and i < j):
j-=1
while(comparison() and array[j] > pivot):
j-=1
i+=1
while(comparison() and array[i] < pivot):
i+=1
if(comparison() and i < j):
array[i], array[j] = array[j], array[i]
if logging >= 5:
print("{}after (j={}):".format(" "*depth, j), end='')
print_array_marked(array[start:end+1], [j-start])
return j
def random_select(array, k, start=0, end=-1, depth=0):
if(end == -1):
end=len(array)-1
if(start == end):
return array[start]
split = partition(array, start, end, depth+1)
n = split - start + 1
if (comparison() and k <= n):
if logging >=5:
print("{indent}k={k} <= n={n}, searching element #{k} in array[{start}:{end}]"
.format(k=k, n=n, start=start, end=split+1, indent=" "*depth))
return random_select(array, k, start, split, depth+1)
else:
if logging >=5:
print("{indent}k={k} > n={n}, searching element #{i} in array[{start}:{end}]"
.format(k=k, n=n, i=k-n, start=split+1, end=end+1, indent=" "*depth))
return random_select(array, k-n, split+1, end, depth+1)
def select(array, k, depth=0):
global logging
if(len(array) <= 5):
array.sort()
# print('array:', array)
# print("k:", k)
return(array[k-1])
fives = [array[i:i+5] for i in range(0, len(array), 5)]
if(logging >= 5):
i = 0
for five in fives:
print("{}group[{}]:".format(" "*depth, i), five)
i += 1
x = []
ceil = math.ceil(len(array)/5)
# print("ceil: ", ceil)
# print("lenfives:" ,len(fives))
for i in range(ceil):
m_idx = math.ceil(len(fives[i])/2);
# print("m_idx:", m_idx)
x.append(select(fives[i], m_idx))
if(logging >= 1):
print("{}medians:".format(" "*depth), x)
M = select(x, math.ceil(len(x)/2), depth+1)
if(logging >= 1):
print("{}Median-of-medians:".format(" "*depth), M)
P1, P2, P3 = [], [], []
for val in array:
if (comparison() and val < M):
P1.append(val)
elif (comparison() and val == M):
P2.append(val)
else:
P3.append(val)
if logging >= 1:
print("{}P1 < Median ({} elem.):".format(" "*depth, len(P1)), P1)
print("{}P2 = Median ({} elem.):".format(" "*depth, len(P2)), P2)
print("{}P3 > Median ({} elem.):".format(" "*depth, len(P3)), P3)
# print("{}k:".format(" "*depth), k)
if (comparison() and k <= len(P1)):
if logging >= 1:
print("{0}k={1} <= len(P1), searching for element #{1}. in P1".format(" "*depth, k))
return select(P1, k, depth+1)
elif (comparison() and k > len(P1)+len(P2)):
if logging >= 1:
print("{}k={} > len(P1)+len(P2), searching for element #{} in P3"
.format(" "*depth, k, k-len(P1)-len(P2)))
return select(P3, k-len(P1)-len(P2), depth+1)
else:
if logging >= 1:
print("{}len(P1) < k={} < len(P1)+len(P2), returning median ({})"
.format(" "*depth, k, M))
return M
def generate_random(size):
x = []
for i in range(size):
x.append(random.randrange(size*2))
return x
def generate_perm(size):
x = [0]*size
used = [0]*(size)
for i in range(size):
while(True):
val = random.randint(1, size)
x[i] = val
if used[val-1] != 1:
break
used[val-1] = 1
return x
def print_array_marked(array, mark_indices):
print('[', end='')
for i in range(len(array)):
print("{col_start}{var}{col_end}{comma}".format(var=array[i],
col_start="\x1b[1;33m" if i in mark_indices else "",
col_end="\x1b[0m",
comma="," if i < len(array) - 1 else ''), end='')
print(']')
def main(args):
global cmps
array = args.generator(args.size)
if(args.interactive):
print("data:", array)
input("press ENTER to find element #{} deterministically.".format(args.position))
print("\x1b[1melement #{}\x1b[0m: \x1b[1;33m{}\x1b[0m".format(args.position, select(array[:], args.position)))
input("press ENTER to find element #{} randomly.".format(args.position))
print("\x1b[1melement #{}\x1b[0m: \x1b[1;33m{}\x1b[0m".format(args.position, random_select(array[:], args.position)))
input("press ENTER to check with sorted data")
print_array_marked(sorted(array), [args.position-1])
else:
test_count = 10
max_size = 10000
for i in range(test_count):
print("\x1b[1;36mTest #{}\x1b[0m".format(i+1))
rs_out = open('./random.out', 'a+')
s_out = open('./select.out', 'a+')
for size in range(100, max_size+1, 100):
print("\r\x1b[2Ksize: {}...".format(size),end=' ')
array = generate_random(size)
k = random.randrange(size)
cmps = 0
x = random_select(array[:], k)
# print("cmps:",cmps,end=' ')
rs_out.write("{:8d}\t{:8d}\t{:8d}\n".format(size, cmps, k))
cmps = 0
x = select(array[:], k)
s_out.write("{:8d}\t{:8d}\t{:8d}\n".format(size, cmps, k))
# print("cmps:",cmps,end=' ')
print("done.".format(size=size))
rs_out.close()
s_out.close()
if __name__ == '__main__':
def generator(string):
if string in ['perm', 'permutation']:
value = generate_perm
else:
value = generate_random
return value
parser = argparse.ArgumentParser()
parser.add_argument('--size', '-s', type=int, default=20)
parser.add_argument('--position', '-i', type=int, default=10)
parser.add_argument('--generator', '-t', type=generator, default="rand")
parser.add_argument('--logging', '-v', type=int, default=0)
parser.add_argument('--interactive', action='store_const', const=True, default=False)
args = parser.parse_args()
logging = 5 if args.interactive else args.logging
main(args)
|
[
"maarcin.regulski@gmail.com"
] |
maarcin.regulski@gmail.com
|
0f9d370037e854c8b1e43ca32b4093a32a97b926
|
f36cb3559618915ca36c7abb29a49aec5a972afe
|
/week-3/assignment_1.py
|
260a391cf7677f8498ee685ea6af737d2cbece56
|
[] |
no_license
|
snsn/programmers-data-engineering-study
|
3e31b1100625500796b182b796f8f5b0fb377f00
|
e5f92b1183c4f35e33a84e34bf8aced5fbf65a5d
|
refs/heads/master
| 2022-12-04T02:47:05.823218
| 2020-08-12T05:30:02
| 2020-08-12T05:30:02
| 283,785,212
| 0
| 0
| null | 2020-08-12T05:30:05
| 2020-07-30T13:35:49
| null |
UTF-8
|
Python
| false
| false
| 3,977
|
py
|
# -*- coding: utf-8 -*-
"""assignment-1.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1L0TgaTyi_KjfnSa3EjhI5M2ue_6ER3Bd
"""
import sqlalchemy
user = 'peter'
password = 'PeterWoW1!'
sql_conn_str = 'postgresql://{user}:{password}@grepp-data.cduaw970ssvt.ap-northeast-2.redshift.amazonaws.com:5439/dev'.format(
user=user,
password=password
)
sqlalchemy.create_engine(sql_conn_str)
# Commented out IPython magic to ensure Python compatibility.
# %load_ext sql
# Commented out IPython magic to ensure Python compatibility.
# %%sql
#
# SELECT * FROM raw_data.session_timestamp LIMIT 10
# Commented out IPython magic to ensure Python compatibility.
# %sql sql_conn_str
# Commented out IPython magic to ensure Python compatibility.
# %sql postgresql://peter:PeterWoW1!@grepp-data.cduaw970ssvt.ap-northeast-2.redshift.amazonaws.com:5439/dev
# Commented out IPython magic to ensure Python compatibility.
# %%sql
#
# CREATE TABLE adhoc.peter_channel (
# channel varchar(32) primary key
# );
# Commented out IPython magic to ensure Python compatibility.
# %%sql
#
# INSERT INTO adhoc.peter_channel VALUES ('FACEBOOK'), ('GOOGLE');
#
# SELECT * FROM adhoc.peter_channel;
# Commented out IPython magic to ensure Python compatibility.
# %%sql
#
# DROP TABLE adhoc.peter_channel;
#
# CREATE TABLE adhoc.peter_channel AS
# SELECT DISTINCT channel
# FROM raw_data.user_session_channel;
# Commented out IPython magic to ensure Python compatibility.
# %%sql
#
# SELECT *
# FROM adhoc.peter_channel
# LIMIT 10;
# Commented out IPython magic to ensure Python compatibility.
# %%sql
#
# ALTER TABLE adhoc.peter_channel
# RENAME channel to channelname;
#
# INSERT INTO adhoc.peter_channel VALUES ('TIKTOK');
# Commented out IPython magic to ensure Python compatibility.
# %%sql
#
# SELECT *
# FROM adhoc.peter_channel;
# Commented out IPython magic to ensure Python compatibility.
# %%sql
#
# SELECT COUNT(1)
# FROM raw_data.session_timestamp;
# Commented out IPython magic to ensure Python compatibility.
# %%sql
#
# SELECT usc.channel, COUNT(1)
# FROM raw_data.user_session_channel usc
# JOIN raw_data.session_timestamp st
# ON st.sessionid = usc.sessionid
# GROUP BY 1;
# Commented out IPython magic to ensure Python compatibility.
# %%sql
#
# SELECT COUNT(1)
# FROM raw_data.user_session_channel usc
# WHERE usc.channel in ('Google','Facebook');
# Commented out IPython magic to ensure Python compatibility.
# %%sql
#
# SELECT COUNT(1)
# FROM raw_data.user_session_channel usc
# WHERE channel ilike 'Google' or channel ilike 'Facebook';
# Commented out IPython magic to ensure Python compatibility.
# %%sql
#
# SELECT DISTINCT channel
# FROM raw_data.user_session_channel
# WHERE channel ILIKE '%o%';
# Commented out IPython magic to ensure Python compatibility.
# %%sql
#
# SELECT DISTINCT channel
# FROM raw_data.user_session_channel
# WHERE channel NOT ILIKE '%o%';
# Commented out IPython magic to ensure Python compatibility.
# %%sql
#
# SELECT
# LEN(channelname),
# UPPER(channelname),
# LOWER(channelname),
# LEFT(channelname, 4)
# FROM adhoc.peter_channel;
# Commented out IPython magic to ensure Python compatibility.
# %%sql
#
# SELECT EXTRACT(HOUR FROM st.ts), COUNT(1)
# FROM raw_data.user_session_channel usc
# JOIN raw_data.session_timestamp st
# ON st.sessionid = usc.sessionid
# GROUP BY 1
# ORDER BY 2 DESC;
# Commented out IPython magic to ensure Python compatibility.
# %%sql
#
# SELECT usc.channel, COUNT(1)
# FROM raw_data.channel c
# JOIN raw_data.user_session_channel usc
# ON usc.channel = c.channelname
# GROUP BY 1;
# Commented out IPython magic to ensure Python compatibility.
# %%sql
#
# SELECT st.ts, usc.channel, ROW_NUMBER() OVER (PARTITION BY usc.userid ORDER BY st.ts)
# FROM raw_data.user_session_channel usc
# JOIN raw_data.session_timestamp st
# ON usc.sessionid = st.sessionid
# WHERE userid = 251
# ORDER BY 1;
|
[
"peter@grepp.co"
] |
peter@grepp.co
|
f16c0eb80ddbde537a25faa6d4acb812b83a763e
|
e6053153c9baa95156f0e7aa700047ea2d26e249
|
/Data/__init__.py
|
c586133032f53376ef402302e9f20d5b22bba2a7
|
[] |
no_license
|
Songtuan/Show-and-Tell-Model
|
d8d6b71fb8cf3274862c63aa324445d8e802ce41
|
7b298bd8ab30c9b052d144838ecf022220d41628
|
refs/heads/master
| 2020-07-22T07:23:55.489781
| 2019-10-14T03:01:24
| 2019-10-14T03:01:24
| 207,115,342
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,425
|
py
|
import torch
from torch.utils.data import Dataset
import torchvision.transforms as trn
import h5py
class CaptionDataset(Dataset):
def __init__(self, input_file, transform=None):
h = h5py.File(input_file)
self.imgs = h['images']
self.captions = h['captions']
self.captions_per_img = h.attrs['captions_per_image']
self.captions_unencode = h['captions_uncode']
assert self.captions.shape[0] // self.imgs.shape[0] == self.captions_per_img
if transform is not None:
self.transform = transform
else:
self.transform = trn.Compose([trn.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
assert self.imgs.shape[0] * 1 == self.captions.shape[0]
def __getitem__(self, item):
img = self.imgs[item // self.captions_per_img]
img = trn.ToTensor()(img)
if img[img > 1].shape[0] != 0 or img[img < 0].shape[0] != 0:
img = self.transform(img)
img = img.float()
assert img.shape == torch.Size([3, 256, 256])
caption = self.captions[item]
caption = torch.from_numpy(caption).long()
caption_unencode = self.captions_unencode[item]
data = {'image': img, 'caption': caption, 'caption_unencode': caption_unencode}
# data = {'image': img, 'caption': caption}
return data
def __len__(self):
return self.captions.shape[0]
|
[
"u6162630@anu.edu.au"
] |
u6162630@anu.edu.au
|
a8cb34215357391688c6922cc45ce8c787596bbc
|
84243e597645fb81d1300ea8ecf7f398f94089da
|
/StackedDateHistogram.py
|
d824e847f74eda1da4a93ade6f2456ebda5e230d
|
[
"MIT"
] |
permissive
|
stevezieglerva/plot-wrappers
|
07fc24f1f1e1140d9040034087a294b3c704cc8d
|
e1acd92a0c4452280974e6e28340c849bb63c818
|
refs/heads/main
| 2023-05-30T02:08:39.665311
| 2021-06-18T20:25:00
| 2021-06-18T20:25:00
| 339,087,341
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,294
|
py
|
import json
import pandas as pd
import matplotlib.pyplot as plt
class StackedDateHistogram:
def __init__(
self,
date_column_name,
grouping_column,
value_column,
df,
):
self._max_groupings = 5
self._date_column_name = date_column_name
self._date_period_name = date_column_name
self._grouping_column = grouping_column
self._value_column = value_column
self._aggregation = "sum"
self._chart_type = "area"
self._input_df = df
plt.style.use("seaborn")
# plt.show()
def set_max_groupings(self, max_groupings):
self._max_groupings = max_groupings
def set_aggregation(self, aggregation):
possible_values = ["sum", "count", "unique_count"]
assert (
aggregation in possible_values
), f"aggregation must be one of: {possible_values}"
self._aggregation = aggregation
def set_chart_type(self, chart_type):
possible_values = ["area", "bar"]
assert (
chart_type in possible_values
), f"chart_type must be one of: {possible_values}"
self._chart_type = chart_type
def set_date_period(self, date_format, period):
self._input_df["new_date"] = pd.to_datetime(
self._input_df[self._date_column_name], format=date_format
).dt.to_period(period)
self._date_period_name = "new_date"
print(self._input_df)
def _group_data(self):
if self._aggregation == "sum":
largest_df = (
self._input_df.groupby([self._grouping_column])[self._value_column]
.sum()
.nlargest(self._max_groupings)
.to_frame()
)
largest_categories = largest_df.index.values.tolist()
# print(largest_categories)
filtered_to_largest = self._input_df[
self._input_df[self._grouping_column].isin(largest_categories)
]
new_group = filtered_to_largest.groupby(
[self._date_period_name, self._grouping_column]
)[self._value_column].sum()
if self._aggregation == "count":
largest_df = (
self._input_df.groupby([self._grouping_column])[self._value_column]
.count()
.nlargest(self._max_groupings)
.to_frame()
)
largest_categories = largest_df.index.values.tolist()
# print(largest_categories)
filtered_to_largest = self._input_df[
self._input_df[self._grouping_column].isin(largest_categories)
]
new_group = filtered_to_largest.groupby(
[self._date_period_name, self._grouping_column]
)[self._value_column].count()
if self._aggregation == "unique_count":
largest_df = (
self._input_df.groupby([self._grouping_column])[self._value_column]
.nunique()
.nlargest(self._max_groupings)
.to_frame()
)
largest_categories = largest_df.index.values.tolist()
# print(largest_categories)
filtered_to_largest = self._input_df[
self._input_df[self._grouping_column].isin(largest_categories)
]
new_group = filtered_to_largest.groupby(
[self._date_period_name, self._grouping_column]
)[self._value_column].nunique()
return new_group
def to_json(self):
self._grouped_df = self._group_data().unstack()
json_str = self._grouped_df.to_json()
json_dict = json.loads(json_str)
return json_dict
def save_plot(self, filename):
fig, ax = plt.subplots()
self._grouped_df = self._group_data().unstack()
if self._chart_type == "bar":
self._grouped_df.plot(kind=self._chart_type, stacked=True, width=0.8, ax=ax)
else:
self._grouped_df.plot(kind=self._chart_type, stacked=True, ax=ax)
plt.xticks(rotation=90)
legend = plt.legend(frameon=1)
frame = legend.get_frame()
frame.set_facecolor("white")
plt.tight_layout()
plt.savefig(filename)
|
[
"stephen.v.ziegler@gmail.com"
] |
stephen.v.ziegler@gmail.com
|
fb411458ce67bc348b868b70def5489795c60675
|
2442d45c9d98f7175447fd1d62212cbba0604bc8
|
/실전연습/뱀게임/SnakeGame.py
|
7fb21478b91b6f1c19d28ad158083534d4a7dd85
|
[] |
no_license
|
yerimroh/Python-Practice
|
5f25db8ae50f89da3a909a37486bfcf7f044bec9
|
1faeba80062e4a16ae33673490821d424cd77d1f
|
refs/heads/master
| 2023-07-31T20:26:07.064031
| 2021-09-16T21:20:09
| 2021-09-16T21:20:09
| 283,073,517
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,709
|
py
|
import pygame
import sys
from pygame.locals import *
from random import*
pygame.init() # Initializing
#############################################################
# The node class that composese the snake's body
headSize = 20
class Node(pygame.Rect):
def __init__(self, x, y):
super().__init__(int(x), int(y), headSize, headSize) # Initialte the Rect class
##############################################################
# Screen setting
screenWidth = 460
screenHeight = 640
screen =pygame.display.set_mode((screenWidth, screenHeight))
pygame.display.set_caption("Snake Game") # The title of the game
##############################################################
# Other settings
gameFont = pygame.font.Font(None, 30) # Game Font
snakeColor = (55, 120, 120) # color of the snake
clock = pygame.time.Clock() # FPS
##############################################################
# Load sources
background = pygame.image.load("sources\\background.png")
eatingSound = pygame.mixer.Sound("sources\\eating sound effect.wav")
apple = pygame.image.load("sources\\apple.png")
appleSize = apple.get_rect().size
appleWidth = appleSize[0]
appleHeight = appleSize[1]
appleX = randrange(5, screenWidth - appleWidth)
appleY = randrange(5, screenHeight - appleHeight)
#############################################################
# Snake Setting
snakeSpeed = 0.3
toX = 0
toY = 0
totalApple = 0 # keep track of the number of apples that the snake got
# Snake Head
head = Node((screenWidth / 2 - headSize / 2), (screenHeight / 2 - headSize / 2)) # Create head
# Snake Body
nodes = []
nodes.append(head) # add head to the list before the game loop
direction = None
# method that will grow body each time the snake obtains the apple
def growBody():
if direction == "LEFT":
newNode = Node(nodes[len(nodes) - 1].left + headSize, nodes[len(nodes) - 1].top)
nodes.append(newNode)
elif direction == "RIGHT":
newNode = Node(nodes[len(nodes) - 1].left - headSize, nodes[len(nodes) - 1].top)
nodes.append(newNode)
elif direction == "UP":
newNode = Node(nodes[len(nodes) - 1].left, nodes[len(nodes) - 1].top + headSize)
nodes.append(newNode)
elif direction == "DOWN":
newNode = Node(nodes[len(nodes) - 1].left, nodes[len(nodes) - 1].top - headSize)
nodes.append(newNode)
##############################################################
# Game Loop
isRunning = True
isGameOver = False
while(isRunning):
dt = clock.tick(30) # FPS = 30
for event in pygame.event.get():
if event.type == pygame.QUIT:
isRunning = False
# Handle keyboard inputs
if event.type == pygame.KEYDOWN and isGameOver == False:
if event.key == pygame.K_RIGHT:
toX += snakeSpeed
toY = 0
direction = "RIGHT"
elif event.key == pygame.K_LEFT:
toX -= snakeSpeed
toY = 0
direction = "LEFT"
elif event.key == pygame.K_UP:
toY -= snakeSpeed
toX = 0
direction = "UP"
elif event.key == pygame.K_DOWN:
toY += snakeSpeed
toX = 0
direction = "DOWN"
# When the player let go of the keyboard (do not move)
if event.type == pygame.KEYUP and isGameOver == False:
if event.key == pygame.K_RIGHT or event.key == pygame.K_LEFT:
toX = 0
if event.key == pygame.K_UP or event.key == pygame.K_DOWN:
toY = 0
if isGameOver == False and event.type == pygame.KEYDOWN:
# store the movement to trace the path the head took
newX = []
newY = []
for i in range(0, len(nodes) - 1):
newX.append(nodes[i].left)
newY.append(nodes[i].top)
# move around the head
head.left += int(toX * dt)
head.top += int(toY * dt)
# make the body to follow the trace of the path
for j in range(0, len(newX)):
nodes[j + 1].left = newX[j]
nodes[j + 1].top = newY[j]
######################################################################
# Handle game events
# If the snake collides with any of the edges of the screen
if head.left > screenWidth - headSize or head.left < -5:
isGameOver = True
background = pygame.image.load("sources\\gameover.png")
elif head.top > screenHeight - headSize or head.left < -5:
isGameOver = True
background = pygame.image.load("sources\\gameover.png")
appleRect = apple.get_rect() # apple
appleRect.left = appleX
appleRect.top = appleY
# when the snake obtains the apple
if head.colliderect(appleRect):
pygame.mixer.Sound.play(eatingSound)
appleX = randrange(5, screenWidth - appleWidth)
appleY = randrange(5, screenHeight - appleHeight)
totalApple += 1
growBody()
# When the snake head collides with its body
for node in nodes:
if head.colliderect(node) and nodes.index(node) > 4:
isGameOver = True
background = pygame.image.load("sources\\gameover.png")
#####################################################################
# Draw components on the screen
screen.blit(background, (0, 0)) # background
if isGameOver == False:
for node in nodes:
pygame.draw.rect(screen, snakeColor, node) # draw snake
screen.blit(apple, (appleX, appleY)) # draw apple
pygame.display.update() # update the screen each time
# Exit out from the game
pygame.time.delay(1000)
pygame.quit()
|
[
"yerimmie1125@gmail.com"
] |
yerimmie1125@gmail.com
|
a9a87a3073f5e5b306e21351d62078b946fb85c8
|
36724b5400430baea62119bf3c91fe2cc2cb61e7
|
/apps/mensura/migrations/0001_initial.py
|
13218e40b736ec489c83f76d84e882e00c899c53
|
[] |
no_license
|
cerbeerza/Peritos
|
20883e476b83bddbe7f735fe7918f107ee812795
|
96b10520fbd4d373bc731492ef97c156b8efc6b3
|
refs/heads/master
| 2021-01-16T18:24:00.921061
| 2019-09-03T20:02:24
| 2019-09-03T20:02:24
| 100,069,043
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 858
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-03-21 13:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='MensuraGeneral',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('periodo', models.CharField(max_length=4)),
('num_mensura', models.IntegerField()),
('promedio', models.FloatField()),
('empresa', models.CharField(max_length=40)),
('situacion', models.CharField(max_length=20)),
('usuario_id', models.CharField(max_length=10)),
],
),
]
|
[
"cerbeerza@gmail.com"
] |
cerbeerza@gmail.com
|
243a9a8513dbd98f7277254c647f9c346def832a
|
ac5bc9956bf97beb0135384f1592b59c12ffe1bb
|
/arview.py
|
1cdbed837b1130114515dc37a4fbc930927b03ca
|
[
"MIT"
] |
permissive
|
hirax/restaurants_viewer
|
7e89b404d8be68d74794e5ed095f51b5fd57f6e0
|
eae056893941cf39fa87640bffea72a8aed47c4a
|
refs/heads/master
| 2020-07-21T16:32:51.652150
| 2019-06-03T23:30:10
| 2019-06-03T23:30:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,194
|
py
|
# coding: utf-8
import json
import time
from enum import IntFlag
from math import pi
import location
import numpy
import requests
import ui
from numpy import sin, cos
from objc_util import *
class SCNVector3(Structure):
_fields_ = [('x', c_float), ('y', c_float), ('z', c_float)]
load_framework('SceneKit')
load_framework('ARKit')
with open('token.txt') as f:
API_KEY = f.read().replace('\n', '')
URL = 'http://webservice.recruit.co.jp/hotpepper/gourmet/v1/?key={0}&lat={1}&lng={2}&range=5&format=json'
byou = 25.2
scale = 40
W = 4
L = 100
H = 4
class_list = [
'NSError', 'SCNScene', 'ARSCNView',
'ARWorldTrackingConfiguration',
'ARSession', 'UIViewController',
'ARPlaneAnchor', 'SCNView', 'SCNBox',
'SCNText', 'SCNNode',
'SCNLight', 'SCNCamera',
'SCNAction',
'SCNTransaction',
'UIFont',
'SCNSphere', 'SCNFloor',
'SCNLookAtConstraint',
'SCNPhysicsShape',
'SCNPhysicsBody',
'UIColor', 'NSObject'
]
NSError, SCNScene, ARSCNView, ARWorldTrackingConfiguration, \
ARSession, UIViewController, ARPlaneAnchor, SCNView, SCNBox, \
SCNText, SCNNode, SCNLight, SCNCamera, SCNAction, SCNTransaction, \
UIFont, SCNSphere, SCNFloor, SCNLookAtConstraint, \
SCNPhysicsShape, SCNPhysicsBody, UIColor, NSObject = map(ObjCClass, class_list)
deepskyblue = UIColor.color(red=0.0, green=191.0, blue=255.0, alpha=1.0)
rotate_action = SCNAction.rotateByX_y_z_duration_(0, pi * 2, 0, 10)
up = SCNAction.moveByX_y_z_duration_(0, 30, 0, 3)
down = SCNAction.moveByX_y_z_duration_(0, -30, 0, 3)
up_down = SCNAction.sequence_([up, down])
scene_view = None
class ARWorldAlignment(IntFlag):
ARWorldAlignmentGravity = 0
ARWorldAlignmentGravityAndHeading = 1
ARWorldAlignmentCamera = 2
class ARPlaneDetection(IntFlag):
ARPlaneDetectionNone = 0
ARPlaneDetectionHorizontal = 1 << 0
ARPlaneDetectionVertical = 1 << 1
class ARSessionRunOptions(IntFlag):
ARSessionRunOptionsNone = 0
ARSessionRunOptionResetTracking = 1 << 0
ARSessionRunOptionRemoveExistingAnchors = 1 << 1
def get_location():
location.start_updates() # GPSデータ更新を開始
gps_data = location.get_location() # GPSデータを取得する
location.stop_updates() # GPSデータ更新を終了
return gps_data['latitude'], gps_data['longitude']
def get_restaurants(_lat, _lng):
"""緯度: lat 経度: lng"""
response = requests.get(URL.format(API_KEY, _lat, _lng))
result = json.loads(response.text)
lat_lng = []
for restaurant in result['results']['shop']:
lat = float(restaurant['lat'])
lng = float(restaurant['lng'])
lat_lng.append((lat, lng, restaurant['name']))
r = []
for lat, lng, name in lat_lng:
r2 = []
difference = (_lat - lat) * 3600
r2.append(int(difference * byou))
difference = (lng - _lng) * 3600
r2.append(int(difference * byou))
r2.append(name)
r.append(r2)
return r
def createARSceneView(x, y, w, h, debug=True):
v = ARSCNView.alloc().initWithFrame_((CGRect(CGPoint(x, y), CGSize(w, h))))
v.setShowsStatistics_(debug)
return v
@on_main_thread
def run(ar_session):
ar_configuration = ARWorldTrackingConfiguration.alloc().init()
ar_configuration.setPlaneDetection_(ARPlaneDetection.ARPlaneDetectionHorizontal)
ar_configuration.setWorldAlignment_(
ARWorldAlignment.ARWorldAlignmentGravity)
ar_session.runWithConfiguration_options_(ar_configuration,
ARSessionRunOptions.ARSessionRunOptionResetTracking | ARSessionRunOptions.ARSessionRunOptionRemoveExistingAnchors)
time.sleep(0.5)
def CustomViewController_viewWillAppear_(_self, _cmd, animated):
return
def CustomViewController_viewWillDisappear_(_self, _cmd, animated):
session = scene_view.session()
session.pause()
def MyARSCNViewDelegate_renderer_didAdd_for_(_self, _cmd, scenerenderer, node, anchor):
if not isinstance(anchor, ARPlaneAnchor):
return
def MyARSCNViewDelegate_session_didFailWithError_(_self, _cmd, _session, _error):
print('error', _error, _cmd, _session)
err_obj = ObjCInstance(_error)
print(err_obj)
def convert_round(x, z, r):
cosr = cos(r)
sinr = sin(r)
X = cosr * x - sinr * z
Z = sinr * x + cosr * z
return X, Z
def get_text(text, x, y, z):
text_mesh = SCNText.textWithString_extrusionDepth_(text, 3.0)
text_mesh.setFlatness_(0.2)
text_mesh.setChamferRadius_(0.4)
text_mesh.setFont_(UIFont.fontWithName_size_('HoeflerText-Black', 15))
bbox_min, bbox_max = SCNVector3(), SCNVector3()
text_mesh.getBoundingBoxMin_max_(byref(bbox_min), byref(bbox_max), restype=None,
argtypes=[POINTER(SCNVector3), POINTER(SCNVector3)])
text_width = bbox_max.x - bbox_min.x
text_node = SCNNode.nodeWithGeometry_(text_mesh)
text_node.setCastsShadow_(True)
text_container = SCNNode.node()
text_container.addChildNode_(text_node)
text_container.setPosition_((x, y, z))
text_container.runAction(SCNAction.repeatActionForever(SCNAction.group([rotate_action, up_down])))
text_node.setPosition_((-text_width / 2, 0, 0))
return text_container
def add_restaurants(root_node, round_num):
restaurants = get_restaurants(*get_location())
if round_num == 90.0 or round_num == 0:
r = 0
elif round_num < 90:
if round_num < 45:
r = round_num + (45 - round_num)
else:
r = 45 + round_num * 2
else:
r = round_num
for restaurant in restaurants:
box = SCNBox.boxWithWidth_height_length_chamferRadius_(W, L, H, 0)
box_node = SCNNode.nodeWithGeometry_(box)
x, z = restaurant[1], restaurant[0]
if r:
x, z = convert_round(x, z, r)
box_node.setPosition_((x, 25, z))
box_node.runAction(SCNAction.repeatActionForever(rotate_action))
a = numpy.array([0, 0])
b = numpy.array(restaurant[:2])
u = b - a
length = numpy.linalg.norm(u)
if length < 100:
box.material().setColor_(deepskyblue.CGColor())
else:
box.material().setColor_(UIColor.blueColor().CGColor())
name = str(restaurant[2])
metal = '{}メートル'.format(int(length))
root_node.addChildNode_(
get_text('{0}\n{1}'.format(name, metal.center(len(name))), x - 6, 25, z - 6))
root_node.addChildNode_(box_node)
class MyARView(ui.View):
def __init__(self):
super().__init__(self)
self.flex = 'WH'
@on_main_thread
def initialize(self, round_num):
global scene_view
screen = ui.get_screen_size()
# シーンのセットアップ
scene = SCNScene.scene()
# view delegateのセットアップ
methods = [MyARSCNViewDelegate_renderer_didAdd_for_, MyARSCNViewDelegate_session_didFailWithError_]
protocols = ['ARSCNViewDelegate']
MyARSCNViewDelegate = create_objc_class('MyARSCNViewDelegate', NSObject, methods=methods, protocols=protocols)
delegate = MyARSCNViewDelegate.alloc().init()
# シーンviewのセットアップ
scene_view = createARSceneView(0, 0, screen.width, screen.height)
scene_view.scene = scene
scene_view.setDelegate_(delegate)
# コントローラーのセットアップ
methods = [CustomViewController_viewWillAppear_, CustomViewController_viewWillDisappear_]
protocols = []
CustomViewController = create_objc_class('CustomViewController', UIViewController, methods=methods,
protocols=protocols)
cvc = CustomViewController.alloc().init()
cvc.view = scene_view
# 初期設定
self_objc = ObjCInstance(self)
self_objc.nextResponder().addChildViewController_(cvc)
self_objc.addSubview_(scene_view)
cvc.didMoveToParentViewController_(self_objc)
# ARのセッションを開始
run(scene_view.session())
root_node = scene.rootNode()
scene_view = SCNView.alloc().initWithFrame_options_(((0, 0), (400, 400)), None).autorelease()
scene_view.setAutoresizingMask_(18)
scene_view.setAllowsCameraControl_(True)
# 光源設定
light_node = SCNNode.node()
light_node.setPosition_((1.5, 1.5, 1.5))
light = SCNLight.light()
light.setType_('omni')
light.setCastsShadow_(True)
light_node.setLight_(light)
# カメラ設定
camera = SCNCamera.camera()
camera_node = SCNNode.node()
camera_node.setCamera(camera)
camera_node.setPosition((0, 2, 0))
# メインノードに子ノードを追加
root_node.addChildNode_(camera_node)
root_node.addChildNode_(light_node)
add_restaurants(root_node, round_num)
def will_close(self):
session = scene_view.session()
session.pause()
if __name__ == '__main__':
v = MyARView()
v.present('full_screen', hide_title_bar=True, orientations=['portrait'])
v.initialize(0)
|
[
"noreply@github.com"
] |
hirax.noreply@github.com
|
92574a41f24fa70f4056ae2050c2c690a412eac4
|
5e5d5fd3d6da5191c426679f3c3ff34a53bf65f9
|
/tests/conftest.py
|
23b7134ac9a4bd87b2089dd90dace00f62feeaa4
|
[
"MIT"
] |
permissive
|
Locotar/anerp
|
36f0e5b1b6e6b7b3ab9b821fba7d8008e185b6dc
|
784d990071b6b380bfd532db76f242af3a98e4e4
|
refs/heads/master
| 2022-04-26T13:25:17.865775
| 2016-02-01T06:12:18
| 2016-02-01T06:12:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 985
|
py
|
# -*- coding: utf-8 -*-
'''Defines fixtures available to all tests.'''
import pytest
from webtest import TestApp
from anerp.app import create_app
from anerp.lib.database import db as _db
from anerp.settings import TestConfig
from .factories import UserFactory
@pytest.yield_fixture(scope='function')
def app():
'''An application for the tests.'''
_app = create_app(TestConfig)
ctx = _app.test_request_context()
ctx.push()
yield _app
ctx.pop()
@pytest.fixture(scope='function')
def testapp(app):
'''A Webtest app.'''
return TestApp(app)
@pytest.yield_fixture(scope='function')
def db(app):
'''A database for the tests.'''
_db.app = app
with app.app_context():
_db.create_all()
yield _db
# Explicitly close DB connection
_db.session.close()
_db.drop_all()
@pytest.fixture
def user(db):
'''A user for the tests.'''
user = UserFactory(password='myprecious')
db.session.commit()
return user
|
[
"fernandojr.ifcg@live.com"
] |
fernandojr.ifcg@live.com
|
57bbdb4ebc42d7134e424b6f16fcbb7fa1ec8d7a
|
6d03a06a80910d13023949b4297aaa1ac348980b
|
/cifar10.py
|
2ac72eecceae47c5cb7993ef3fe24343da264ce4
|
[] |
no_license
|
strike60/cifar10-NIN
|
0f6d83ef3b80689f65c3a5e48ab48662ef44641f
|
259f81801c7bbd58211994e4281db2d771aa69f3
|
refs/heads/master
| 2021-01-25T14:10:43.958757
| 2018-03-05T02:11:01
| 2018-03-05T02:11:01
| 123,663,580
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,385
|
py
|
import tensorflow as tf
import cifar10_input
import os
import numpy as np
import re
import sys
import tarfile
from six.moves import urllib
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer(
'batch_size', 128, """Number of images to process in a batch""")
tf.app.flags.DEFINE_string(
'data_dir', '../cifar10_data', """Path to the CIFAR-10 data directory.""")
IMAGE_SIZE = cifar10_input.IMAGE_SIZE
NUM_CLASSES = cifar10_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
MOVING_AVERAGE_DECAY = 0.9999
NUM_EPOCHS_PER_DECAY = 300
LEARNING_RATE_DECAY_FACTOR = 0.9
INITIAL_LEARNING_RATE = 0.0001
conv_weight_decay = 0.00005
TOWER_NAME = 'tower'
DATA_URL = 'https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
def _activation_summary(x):
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
with tf.device('/cpu:0'):
var = tf.get_variable(
name, shape, initializer=initializer, dtype=tf.float32)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
var = _variable_on_cpu(name, shape, tf.truncated_normal_initializer(
stddev=stddev, dtype=tf.float32))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputs():
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.distorted_inputs(
data_dir=data_dir, batch_size=FLAGS.batch_size)
return images, labels
def inputs(eval_data):
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.inputs(
eval_data=eval_data, data_dir=data_dir, batch_size=FLAGS.batch_size)
return images, labels
def inference(images, keep_prob):
# images size is [batch,32,32,3]
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay(
'weights', shape=[5, 5, 3, 192], stddev=5e-2, wd=conv_weight_decay)
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding="SAME")
biases = _variable_on_cpu(
'biases', [192], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv1)
# conv1 size is [batch,32,32,192]
with tf.variable_scope('cccp1') as scope:
kernel = _variable_with_weight_decay(
'weights', shape=[1, 1, 192, 160], stddev=5e-2, wd=conv_weight_decay)
conv = tf.nn.conv2d(conv1, kernel, [1, 1, 1, 1], padding="SAME")
biases = _variable_on_cpu(
'biases', [160], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
cccp1 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(cccp1)
# cccp1 size is [batch,32,32,160]
with tf.variable_scope('cccp2') as scope:
kernel = _variable_with_weight_decay(
'weights', shape=[1, 1, 160, 96], stddev=5e-2, wd=conv_weight_decay)
conv = tf.nn.conv2d(cccp1, kernel, [1, 1, 1, 1], padding="SAME")
biases = _variable_on_cpu('biases', [96], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
cccp2 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(cccp2)
# cccp2 size is [batch,32,32,96]
with tf.variable_scope('pool1') as scope:
pool1 = tf.nn.max_pool(cccp2, ksize=[1, 3, 3, 1], strides=[
1, 2, 2, 1], padding="SAME", name='pool1')
# pool1 size is [batch,16,16,96]
with tf.variable_scope('dropout1') as scope:
dropout = tf.nn.dropout(pool1, keep_prob, name='dropout1')
# dropout size is [batch,16,16,96]
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay(
'weights', shape=[5, 5, 96, 192], stddev=5e-2, wd=conv_weight_decay)
conv = tf.nn.conv2d(dropout, kernel, [1, 1, 1, 1], padding="SAME")
biases = _variable_on_cpu(
'biases', [192], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv2)
# conv2 size is [batch,16,16,192]
with tf.variable_scope('cccp3') as scope:
kernel = _variable_with_weight_decay(
'weights', shape=[1, 1, 192, 192], stddev=5e-2, wd=conv_weight_decay)
conv = tf.nn.conv2d(conv2, kernel, [1, 1, 1, 1], padding="SAME")
biases = _variable_on_cpu(
'biases', [192], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
cccp3 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(cccp3)
# cccp3 size is [batch,16,16,192]
with tf.variable_scope('cccp4') as scope:
kernel = _variable_with_weight_decay(
'weights', shape=[1, 1, 192, 192], stddev=5e-2, wd=conv_weight_decay)
conv = tf.nn.conv2d(cccp3, kernel, [1, 1, 1, 1], padding="SAME")
biases = _variable_on_cpu(
'biases', [192], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
cccp4 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(cccp4)
# cccp4 size is [batch,16,16,192]
with tf.variable_scope('pool2') as scope:
pool2 = tf.nn.max_pool(cccp4, ksize=[1, 3, 3, 1], strides=[
1, 2, 2, 1], padding="SAME", name='pool2')
# pool2 size is [batch,8,8,192]
with tf.variable_scope('dropout2') as scope:
dropout2 = tf.nn.dropout(pool2, keep_prob, name='dropout2')
# dropout2 size is [batch,8,8,192]
with tf.variable_scope('conv3') as scope:
kernel = _variable_with_weight_decay(
'weights', shape=[3, 3, 192, 192], stddev=5e-2, wd=conv_weight_decay)
conv = tf.nn.conv2d(dropout2, kernel, [1, 1, 1, 1], padding="SAME")
biases = _variable_on_cpu(
'biases', [192], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv3 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv3)
# conv3 size is [batch,8,8,192]
with tf.variable_scope('cccp5') as scope:
kernel = _variable_with_weight_decay(
'weights', shape=[1, 1, 192, 192], stddev=5e-2, wd=conv_weight_decay)
conv = tf.nn.conv2d(conv3, kernel, [1, 1, 1, 1], padding="SAME")
biases = _variable_on_cpu(
'biases', [192], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
cccp5 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(cccp5)
# cccp5 size is [batch,8,8,192]
with tf.variable_scope('cccp6') as scope:
kernel = _variable_with_weight_decay(
'weights', shape=[1, 1, 192, 10], stddev=5e-2, wd=conv_weight_decay)
conv = tf.nn.conv2d(cccp5, kernel, [1, 1, 1, 1], padding="SAME")
biases = _variable_on_cpu('biases', [10], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
cccp6 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(cccp6)
# cccp6 size is [batch,8,8,10]
logits = tf.reduce_mean(cccp6, [1, 2])
return logits
def loss(logits, labels):
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
for l in losses + [total_loss]:
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN/FLAGS.batch_size
decay_steps = int(num_batches_per_epoch*NUM_EPOCHS_PER_DECAY)
lr = tf.train.exponential_decay(
INITIAL_LEARNING_RATE, global_step, decay_steps, LEARNING_RATE_DECAY_FACTOR, staircase=True)
tf.summary.scalar('learning_rate', lr)
loss_averages_op = _add_loss_summaries(total_loss)
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.AdamOptimizer(lr)
grads = opt.compute_gradients(total_loss)
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variable_averages_op = variable_averages.apply(
tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variable_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = FLAGS.data_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')
if not os.path.exists(extracted_dir_path):
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
# if __name__ == "__main__":
# a = np.ones([10, 32, 32, 3], dtype=np.float32)
# c = tf.constant([1, 2, 3, 4, 5, 6, 7, 8, 9, 9], dtype=tf.int32)
# a_ = tf.constant(a)
# b = inference(a_)
# d = tf.nn.in_top_k(b, c, 1)
# with tf.Session() as sess:
# sess.run(tf.global_variables_initializer())
# print(sess.run(b))
# print(np.sum(sess.run(d)))
|
[
"xiangdanqi60@gmail.com"
] |
xiangdanqi60@gmail.com
|
899597fd8353a77813a37816a08ac1651bfc58e9
|
f6cd0b0ffa6638318ca7f23077245b6379286975
|
/mod/wotstat/res/scripts/client/gui/mods/wot_stat/asyncRsponce.py
|
8b7981fccaf2463bfccc4205a87b462a16e84607
|
[
"MIT"
] |
permissive
|
Steelwall2014/wot-stat
|
54ddfea0597252e4d3b1cfc2a122b7dc5c5f34e1
|
0439c56cfdc0ef6e9c3011850e624d6712daaa77
|
refs/heads/main
| 2023-07-04T02:12:55.787612
| 2021-08-11T01:00:21
| 2021-08-11T01:00:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,119
|
py
|
# -*- coding: utf-8 -*-
import BigWorld
import threading
import urllib2
json_headers = {'Content-type': 'application/json',
'Accept': 'application/json'}
def get_async(url, data=None, callback=None):
request_async(url, data, get, callback)
def post_async(url, data=None, callback=None):
request_async(url, data, post, callback)
def request_async(url, data, method, callback):
event = threading.Event()
runner = threading.Thread(target=run,
args=(event, url, data, method, callback))
runner.start()
event.wait()
def run(event, url, data, method, callback):
event.set()
result = method(url, data)
if callback:
callback(result)
def get(url, data):
if data:
params = urllib2.urlencode(data)
url = '?'.join(url, params)
return urllib2.urlopen(url).read()
def post(url, data):
if data:
req = urllib2.Request(url, data, headers=json_headers)
return urllib2.urlopen(req).read()
else:
return urllib2.urlopen(url).read()
|
[
"soprachev@mail.ru"
] |
soprachev@mail.ru
|
64a0114f9c5ff58208a57a2598eee7dcbe253ab4
|
3e1b705d7e2d771a13bbb29d5c9e0279ec7922e9
|
/kmeans-7.py
|
adf5c14c0ae62424d646af97b1df100408ee5beb
|
[] |
no_license
|
Chandu-K/machinelearninginpython
|
77c51a04afc833f0ae26df6579fa0c09b6457d0d
|
324b0f3566959c2ea7d2b484ffd812e5d6357cfb
|
refs/heads/master
| 2020-03-22T03:27:54.535129
| 2018-07-02T10:17:54
| 2018-07-02T10:17:54
| 139,433,638
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 540
|
py
|
import matplotlib.pyplot as plt
from matplotlib import style
style.use('ggplot')
import numpy as np
from sklearn.cluster import KMeans
X= np.array([[1,2],[1.5,1.8],[5,8],[8,8],[1,0.6],[9,11]])
#plt.scatter(X[:,0],X[:,1],s=150)
#plt.show()
clf=KMeans(n_clusters=2)
clf.fit(X)
centroids =clf.cluster_centers_
labels=clf.labels_
colors=["g.","r.","c.","b.","k.","o."]
for i in range(len(X)):
plt.plot(X[i][0],X[i][1],colors[labels[i]], markersize=100)
plt.scatter(centroids[:,0],centroids[:,1], marker='x',s=150,linewidth=5)
plt.show()
|
[
"chandu.kota.pk@gmail.com"
] |
chandu.kota.pk@gmail.com
|
6103a26c7c46df208b2a6c1245e86efe9f687c8c
|
ea532c855f5372888f91c60d95a51b89bf091749
|
/controllers/userTable.py
|
6cb95b1faef8c340cf5e0c6ea6303f8f31382602
|
[] |
no_license
|
ndohertyjr/the_professor
|
383f04b42e1ccd6e06fca8a41387f23ac5b15e05
|
c2bec1423af4a415f67e9d99105110655205fb3f
|
refs/heads/main
| 2023-09-02T00:57:44.766728
| 2021-10-31T20:27:19
| 2021-10-31T20:27:19
| 382,406,350
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,902
|
py
|
# Database controller class
from model.database import get_db
from sqlite3 import Error
"""
Controls access to the user table in the database
"""
'''
CREATE FUNCTION
'''
# Verify user does not exist in table, then add user
def add_user(user_id, username, role, points):
db = get_db()
cursor = db.cursor()
query = ''' INSERT INTO users(id,username,role,points)
VALUES(?,?,?,?) '''
user = (user_id, username, role, points)
if user_exists(cursor, user_id):
print("User already exists")
else:
try:
cursor.execute(query, user)
print("User added!")
db.commit()
except Error as e:
print(e, "Failed to add user to DB")
if db:
db.close()
'''
READ FUNCTIONS
'''
# Get user id from username
def get_user_id(username):
db = get_db()
cursor = db.cursor()
query = ''' SELECT id FROM users WHERE username= ? '''
cursor.execute(query, (username,))
user_id = cursor.fetchone()[0]
print(user_id)
db.close()
return user_id
# Return all the usernames in the table sorted alphabetically
def get_all_usernames():
db = get_db()
cursor = db.cursor()
query = ''' SELECT username FROM users ORDER BY username COLLATE NOCASE ASC'''
cursor.execute(query)
all_users = []
for username in cursor.fetchall():
all_users.append(username[0])
db.close()
return all_users
# Get username by querying the users unique id
def get_username(user_id):
db = get_db()
cursor = db.cursor()
query = ''' SELECT username FROM users WHERE id=?'''
cursor.execute(query, (user_id,))
username = cursor.fetchone()[0]
db.close()
return username
# Get user's current participation points based on query of their unique id
def get_user_points(user_id):
db = get_db()
cursor = db.cursor()
query = ''' SELECT points FROM users WHERE id=?'''
cursor.execute(query, (user_id,))
points = cursor.fetchone()[0]
db.close()
return points
'''
UPDATE FUNCTIONS
'''
# updates the username associated with user id
def update_user_name(user_id, new_username):
db = get_db()
cursor = db.cursor()
update_query = ''' UPDATE users SET username = ? WHERE id = ? '''
updated_info = new_username, user_id
try:
cursor.execute(update_query, updated_info)
db.commit()
print("Username changed for user", user_id, "to", new_username)
except Error as e:
print(e, " ***Update username query failed***")
finally:
if db:
db.close()
# Update user points based on a value change
def update_user_points(user_id, points_val_change):
db = get_db()
cursor = db.cursor()
current_points = get_user_points(user_id)
current_points += points_val_change
update_query = ''' UPDATE users SET points = ? WHERE id = ? '''
update_values = current_points, user_id
try:
cursor.execute(update_query, update_values)
db.commit()
print("Point value updated!")
except Error as e:
print(e, "Update failed!")
finally:
if db:
db.close()
'''
DELETE FUNCTION
'''
# Delete user record by querying id
def delete_user(user_id):
db = get_db()
cursor = db.cursor()
delete_query = ''' DELETE FROM users WHERE id = ? '''
try:
cursor.execute(delete_query, (user_id,))
db.commit()
print("User deleted!")
except Error as e:
print(e, "Delete failed!")
finally:
if db:
db.close()
'''
TERTIARY FUNCTIONS
'''
# Validation function to confirm if user exists in DB
def user_exists(cursor, user_id):
query = ''' SELECT id FROM users WHERE id=? '''
cursor.execute(query, (user_id,))
result = cursor.fetchone()
if result:
return True
else:
return False
|
[
"51974523+ndohertyjr@users.noreply.github.com"
] |
51974523+ndohertyjr@users.noreply.github.com
|
24d6c9ac7d8ee3f90c92b2f6116de6365a619256
|
858943227f43c2b6ad387a31120b86235152a0fb
|
/reporting/basic/username_to_hash_id_reports.py
|
6c10f486862315a415b4a7f0355da5b81b775083
|
[
"MIT"
] |
permissive
|
qjyzwlz/edx_data_research
|
2e14db206a5fef9a41c7a6470bf16591298f6dd9
|
6fb22ae7a40a1c531887a109a9e5aa4cc02cd189
|
refs/heads/master
| 2021-01-16T21:50:21.922901
| 2015-06-07T14:47:49
| 2015-06-07T14:47:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,015
|
py
|
'''
In this script, we take a csv report as input and maps usernames
to their hash ids and return a new csv_report
Usage:
python username_to_hash_id_reports.py db_name csv_report
'''
import sys
import csv
from common.base_edx import EdXConnection
from common.generate_csv_report import CSV
db_name = sys.argv[1]
# Change name of collection as required
connection = EdXConnection(db_name, 'user_id_map' )
collection = connection.get_access_to_collection()
with open(sys.argv[2]) as f:
headers = next(f)
reader = csv.reader(f)
data = [row for row in reader]
result = []
for row in data:
cursor = collection['user_id_map'].find_one({'username' : row[0]})
hash_id = cursor['hash_id']
user_id = cursor['id']
result.append([row[0], user_id, hash_id] + row[1:])
input_file, extension = sys.argv[2].split('.')
output = CSV(result, [headers.split(',')[0],'User ID','User Hash ID'] + headers.split(',')[1:], output_file=input_file+'_username_anon.'+extension)
output.generate_csv()
|
[
"uehtesham90@gmail.com"
] |
uehtesham90@gmail.com
|
f8d6a6d886bf761a7a3d19afa1fc71b998ed7ede
|
3d3bfef2045fcadb15033f4d291d9fd8deb42286
|
/Uploader/serializer.py
|
7eed05788a14086778be098d62ea117004a8a0f6
|
[] |
no_license
|
dheerajnishad/django-interior-design-api
|
778e6cf8b302b96cf0b70f93404b3d3571028540
|
103481613e40b45dda4eab3d50853ce2be1bb9d8
|
refs/heads/master
| 2023-03-31T20:32:54.116498
| 2021-03-26T14:46:40
| 2021-03-26T14:46:40
| 351,811,361
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 236
|
py
|
from rest_framework import serializers
from Uploader.models import ProjectImageUploader
class ProjectImageUploadSerializer(serializers.ModelSerializer):
class Meta:
model = ProjectImageUploader
fields = "__all__"
|
[
"dheerajnishad22@gmail.com"
] |
dheerajnishad22@gmail.com
|
8dc48a952db7cb203c4fca96f582f6db69b40e10
|
db2d1f36f348576d96b10f151f51c820d19b671c
|
/memory.py
|
682d98e5dcbb02a20b61aa08e1c49e52c84d2c48
|
[] |
no_license
|
kq2/Games
|
aa269ad43ea86ba0df09f699655fcd02f9089155
|
0908455c584205fcaaacb1f208582456e1072471
|
refs/heads/master
| 2021-01-01T03:49:48.799541
| 2016-10-03T01:04:20
| 2016-10-03T01:04:20
| 56,762,106
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,515
|
py
|
"""
Game Memory
"""
import math
import random
import kq2tile
import kq2grid
import kq2gui
import kq2animation
TILE_SIZE = 100, 100
CELL_SIZE = 106, 106
ANGLE_ANIMATION = [
0.16, 0.31, 0.45, 0.58, 0.7,
0.81, 0.91, 1.0, 1.08, 1.15,
1.14, 1.12, 1.09, 1.05, 1
]
def random_colors(num_color):
"""
Return given number of random paired colors.
"""
ans = []
while len(ans) < num_color:
color, _ = kq2tile.random_color()
ans.append(color)
ans.append(color)
random.shuffle(ans)
return ans
def valid_click(pos, tile):
"""
Return true if click is valid to Memory game.
"""
animation = tile.get_animation()
return (not animation.is_moving()
and animation.is_front()
and tile.has_pos(pos))
def new_tile(tile, tile_color):
"""
Add animation to new tile.
"""
animation = kq2animation.Flipping(0, tile_color, tile_color)
tile.set_animation(animation)
def reset_tile(tile):
"""
Add animation to show tile's color then hide.
"""
animation = tile.get_animation()
animation.set_back_color(tile.get_color())
animation.move(math.pi)
animation.move(0, ANGLE_ANIMATION)
def flip_tile(tile, angle, is_vel=False):
"""
Add animation to flip tile.
"""
animation = tile.get_animation()
animation.move(angle, ANGLE_ANIMATION, is_vel)
class Game(kq2grid.Grid, kq2gui.Game):
"""
Memory game.
"""
def __init__(self, rows, cols):
"""
Initialize a game with tiles.
"""
kq2grid.Grid.__init__(self, rows, cols)
self.score = 0
self.exposed_tiles = []
tile_color = 'White'
for row, col in self:
tile = kq2tile.Tile(row, col, CELL_SIZE,
TILE_SIZE, tile_color)
tile.set_border_color(tile_color)
self.set_tile(row, col, tile)
new_tile(tile, tile_color)
def reset(self):
"""
Override to reset each tile's color and animation.
"""
self.score = 0
self.exposed_tiles = []
self.get_gui().update_score(self.score)
colors = random_colors(len(self))
for row, col in self:
tile = self.get_tile(row, col)
tile.set_color(colors.pop())
reset_tile(tile)
def click(self, pos):
"""
Click on a tile.
"""
row, col = kq2tile.pos2cell(pos, CELL_SIZE)
tile = self.get_tile(row, col)
if tile and valid_click(pos, tile):
click_on_left = pos[0] < tile.get_center()[0]
angle = -math.pi if click_on_left else math.pi
self.flip(tile, angle)
def flip(self, tile, angle):
"""
Flip a tile. Main game logic.
"""
# if 2 tiles are already exposed, flip them back if
# they have different colors.
if len(self.exposed_tiles) == 2:
tile1 = self.exposed_tiles.pop()
tile2 = self.exposed_tiles.pop()
if tile1.get_color() != tile2.get_color():
flip_tile(tile1, 0)
flip_tile(tile2, 0)
# if 1 or 0 tile exposed, flip it to expose
if len(self.exposed_tiles) < 2:
flip_tile(tile, angle, is_vel=True)
self.exposed_tiles.append(tile)
self.score += 1
self.get_gui().update_score(self.score)
def draw(self, canvas):
"""
Draw all tiles on canvas.
"""
for row, col in self:
self.get_tile(row, col).draw(canvas)
class GUI(kq2gui.GUI):
"""
Memory game GUI.
"""
def __init__(self, gui, game):
"""
Initialize a game GUI, encapsulating the game and a real GUI,
so that the real GUI can be easily replaced.
"""
kq2gui.GUI.__init__(self, gui, game, 'Memory',
CELL_SIZE[0] * game.get_cols(),
CELL_SIZE[1] * game.get_rows(),
'Black')
self.set_mouse_click_handler(self.click)
self.label = self.add_label('0')
self.start_frame()
def click(self, pos):
"""
Mouse click handler.
"""
self.get_game().click(pos)
def update_score(self, score):
"""
Update score on GUI.
"""
self.label.set_text(str(score))
def run(gui):
"""
Start a game.
"""
game = Game(3, 3)
GUI(gui, game).new_game()
|
[
"kq2@rice.edu"
] |
kq2@rice.edu
|
2cfec2b157b72c86df79a7c60561062d66386896
|
a57cc7eb409c4dfac08753544e6fdce5842e8c30
|
/bought/admin.py
|
de206e3cf9f1a41819f39c68939d9210acbfcb1d
|
[] |
no_license
|
satizz/satiz_gallery
|
4ef307fdd54dc1e698a045141b7a5581fd858b7f
|
b7518a14b57bc78cb7bb05b3393b6a3ae8681327
|
refs/heads/master
| 2021-01-10T04:05:08.468337
| 2016-03-11T07:43:02
| 2016-03-11T07:43:02
| 52,650,796
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 535
|
py
|
from django.contrib import admin
from bought.models import item
# Register your models here.
class itemAdmin(admin.ModelAdmin):
fieldsets = [
('Item Details',{'fields':[]} ),
(None, {'fields':['name']}),
(None,{'fields':['quan']}),
(None,{'fields':['rate']}),
(None,{'fields':['date']}),
('status',{'fields':['status']}),
]
list_filter = ['date']
search_fields = ['name']
list_display = ('name','quan','rate','total','status')
admin.site.register(item, itemAdmin)
|
[
"sat.nep001@gmail.com"
] |
sat.nep001@gmail.com
|
290bb219c322d38a5d7f352adeacb26b906d1b1c
|
e767f21e012d76038086935754a2099e3a161aa5
|
/figure_scripts/Malezieux_CellRep_Fig3_kinetics.py
|
4ec5d479ab2a24fa7122178bf48fa102a17a2fa6
|
[] |
no_license
|
MerylMalezieux/Malezieux_CellRep_2020
|
99344888c47c11f2321dee907b9a428a539f4750
|
dac469bf9eb28134132cc81d9ee99fed2d391668
|
refs/heads/master
| 2022-11-26T18:22:29.985598
| 2020-05-27T15:14:08
| 2020-05-27T15:14:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26,479
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 21 15:31:32 2019
@author: Ashley
"""
# Manuscript Malezieux, Kees, Mulle submitted to Cell Reports
# Figure 3
# Description: onset and offset kinetics of dVm with theta
# %% import modules
import os
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from itertools import compress
import matplotlib as mpl
# %% definitions
# eta = event triggered averages. CHANGE: nans instead of removing events
# VERSION: sample window is more likely to be the same in different recordings
def prepare_eta(signal, ts, event_times, win):
samp_period = np.round((ts[1] - ts[0]), decimals=3)
win_npts = [np.round(np.abs(win[0])/samp_period).astype(int),
np.round(np.abs(win[1])/samp_period).astype(int)]
# win_npts = [ts[ts < ts[0] + np.abs(win[0])].size,
# ts[ts < ts[0] + np.abs(win[1])].size]
et_ts = ts[0:np.sum(win_npts)] - ts[0] + win[0]
et_signal = np.empty(0)
if event_times.size > 0:
if signal.ndim == 1:
et_signal = np.zeros((et_ts.size, event_times.size))
for i in np.arange(event_times.size):
if np.logical_or((event_times[i]+win[0]<ts[0]), (event_times[i]+win[1]>ts[-1])):
et_signal[:, i] = np.nan*np.ones(et_ts.size)
else:
# find index of closest timestamp to the event time
ind = np.argmin(np.abs(ts-event_times[i]))
et_signal[:, i] = signal[(ind - win_npts[0]): (ind + win_npts[1])]
elif signal.ndim == 2:
et_signal = np.zeros((signal.shape[0], et_ts.size, event_times.size))
for i in np.arange(event_times.size):
if np.logical_or((event_times[i]+win[0]<ts[0]), (event_times[i]+win[1]>ts[-1])):
et_signal[:, :, i] = np.nan*np.ones([signal.shape[0], et_ts.size])
else:
# find index of closest timestamp to the event time
ind = np.argmin(np.abs(ts-event_times[i]))
et_signal[:, :, i] = signal[:, (ind - win_npts[0]):
(ind + win_npts[1])]
return et_signal, et_ts
# eta = event triggered averages.
# VERSION: sample window is more likely to be the same in different recordings
# VERSION: Keep good part of signal, put nans when recording ends within the window
# only remove events where there is no recording for the dVm comparison windows
# Note: only for 1d signals
def prepare_eta_keep(signal, ts, event_times, win, dVm_win):
samp_period = np.round((ts[1] - ts[0]), decimals=3)
win_npts = [np.round(np.abs(win[0])/samp_period).astype(int),
np.round(np.abs(win[1])/samp_period).astype(int)]
#et_ts = ts[0:np.sum(win_npts)] - ts[0] + win[0]
et_ts = np.arange(win[0], win[1], samp_period)
et_signal = np.empty(0)
# pad signal and ts with nans at front and end
signal = np.concatenate((np.full(win_npts[0], np.nan), signal,
np.full(win_npts[1], np.nan)), axis=None)
ts_pad = np.concatenate((et_ts[:win_npts[0]]+ts[0], ts,
et_ts[win_npts[0]:]+ts[-1]), axis=None)
if event_times.size > 0:
et_signal = np.zeros((et_ts.size, event_times.size))
for i in np.arange(event_times.size):
if np.logical_or((event_times[i]+dVm_win[0]<ts[0]), (event_times[i]+dVm_win[1]>ts[-1])):
et_signal[:, i] = np.nan*np.ones(et_ts.size)
else:
#ind = np.argmin(np.abs(ts-event_times[i])) + win_npts[0]
ind = np.searchsorted(ts_pad, event_times[i])
et_signal[:, i] = signal[(ind - win_npts[0]): (ind + win_npts[1])]
return et_signal, et_ts
# eta = event triggered averages: Version: skip events too close to edge
# VERSION: sample window is more likely to be the same in different recordings
def prepare_eta_skip(signal, ts, event_times, win):
samp_period = np.round((ts[1] - ts[0]), decimals=3)
win_npts = [np.round(np.abs(win[0])/samp_period).astype(int),
np.round(np.abs(win[1])/samp_period).astype(int)]
# win_npts = [ts[ts < ts[0] + np.abs(win[0])].size,
# ts[ts < ts[0] + np.abs(win[1])].size]
et_ts = ts[0:np.sum(win_npts)] - ts[0] + win[0]
et_signal = np.empty(0)
if event_times.size > 0:
# remove any events that are too close to the beginning or end of recording
if event_times[0]+win[0] < ts[0]:
event_times = event_times[1:]
if event_times[-1]+win[1] > ts[-1]:
event_times = event_times[:-1]
if signal.ndim == 1:
et_signal = np.zeros((et_ts.size, event_times.size))
for i in np.arange(event_times.size):
# find index of closest timestamp to the event time
ind = np.argmin(np.abs(ts-event_times[i]))
et_signal[:, i] = signal[(ind - win_npts[0]): (ind + win_npts[1])]
elif signal.ndim == 2:
et_signal = np.zeros((signal.shape[0], et_ts.size, event_times.size))
for i in np.arange(event_times.size):
# find index of closest timestamp to the event time
ind = np.argmin(np.abs(ts-event_times[i]))
et_signal[:, :, i] = signal[:, (ind - win_npts[0]):
(ind + win_npts[1])]
return et_signal, et_ts
# eta = event triggered averages
# this code is for point processes, but times instead of inds
def prepare_eta_times(pt_times, event_times, win):
et_signal = []
if (pt_times.size > 0) & (event_times.size > 0):
# find pt_times that occur within window of each event_time
for i in np.arange(event_times.size):
ts_section = pt_times[(pt_times > event_times[i] + win[0]) &
(pt_times < event_times[i] + win[1])]
ts_section = ts_section - event_times[i]
et_signal.append(ts_section)
else:
et_signal = [np.empty(0) for k in np.arange(event_times.size)]
return et_signal
# definition for self_calculated variance
def MADAM(data_pts, descriptor):
v = np.sum(np.abs(data_pts-descriptor))/data_pts.size
return v
# %% Load data
dataset_folder = (r'C:\Users\akees\Documents\Ashley\Papers\MIND 1\Cell Reports\Dryad upload\Dataset')
cell_files = os.listdir(dataset_folder)
data = [{} for k in np.arange(len(cell_files))]
for i in np.arange(len(cell_files)):
full_file = os.path.join(dataset_folder, cell_files[i])
data[i] = np.load(full_file, allow_pickle=True).item()
states = [{'state':'theta', 'bef':-2.5, 'aft':0.5, 'samp_time':2,
't_win':[-30, 30], 'd_win':[-4, 12]},
{'state':'LIA', 'bef':-4, 'aft':-1, 'samp_time':2,
't_win':[-30, 30], 'd_win':[-4, 12]},
{'state':'run_theta', 'bef':-2.5, 'aft':0.5, 'samp_time':2,
't_win':[-30, 30], 'd_win':[-4, 12]},
{'state':'nonrun_theta', 'bef':-2.5, 'aft':0.5, 'samp_time':2,
't_win':[-30, 30], 'd_win':[-4, 12]}]
# %% process data
# make a dictionary to hold values collapsed over all cells
events = [{} for k in np.arange(len(states))]
# find Vm0, dVm and significance for each run, excluding when Ih is changed
for l in np.arange(len(states)):
all_c_p = np.empty(0)
all_Ih = np.empty(0)
all_Vm0 = np.empty(0)
all_dVm = np.empty(0)
all_dVm_p = np.empty(0)
for i in np.arange(len(data)):
samp_freq = 1/(data[i]['Vm_ds_ts'][1] - data[i]['Vm_ds_ts'][0])
num_ind = int(states[l]['samp_time']*samp_freq)
# find index of dIh_times
dIh_ind = data[i]['dIh_times']*samp_freq
dIh_ind = dIh_ind.astype(int)
c_p = np.zeros(data[i][states[l]['state']+'_start'].size)
Ih = np.zeros(data[i][states[l]['state']+'_start'].size)
Vm0 = np.zeros(data[i][states[l]['state']+'_start'].size)
dVm = np.zeros(data[i][states[l]['state']+'_start'].size)
dVm_p = np.zeros(data[i][states[l]['state']+'_start'].size)
for j in np.arange(data[i][states[l]['state']+'_start'].size):
# find indices
bef_ind = int(np.sum(data[i]['Vm_ds_ts'] <
(data[i][states[l]['state']+'_start'][j] + states[l]['bef'])))
aft_ind = int(np.sum(data[i]['Vm_ds_ts'] <
(data[i][states[l]['state']+'_start'][j] + states[l]['aft'])))
# put nan if times are straddling a time when dIh is changed
dIh_true = np.where((dIh_ind > bef_ind) & (dIh_ind < aft_ind + num_ind))[0]
if dIh_true.size > 0:
Ih[j] = np.nan
Vm0[j] = np.nan
dVm[j] = np.nan
dVm_p[j] = np.nan
else:
if np.logical_or(l==0, l==1):
c_p[j] = data[i][states[l]['state']+'_cell_p']
else:
c_p[j] = data[i]['theta_cell_p']
Ih_ind = np.searchsorted(data[i]['Vm_Ih_ts'],
data[i][states[l]['state']+'_start'][j])
Ih[j] = data[i]['Vm_Ih'][Ih_ind]
# test whether Vm values are significantly different
# Welch's t-test: normal, unequal variances, independent samp
t, p = stats.ttest_ind(data[i]['Vm_ds'][bef_ind:bef_ind+num_ind],
data[i]['Vm_ds'][aft_ind:aft_ind+num_ind],
equal_var=False, nan_policy='omit')
dVm_p[j] = p
if (np.nanmean(data[i]['Vm_ds'][aft_ind:aft_ind+num_ind]) -
np.nanmean(data[i]['Vm_ds'][bef_ind:bef_ind+num_ind])) > 0:
Vm0[j] = np.nanmin(data[i]['Vm_s_ds'][bef_ind:bef_ind+num_ind])
dVm[j] = (np.nanmax(data[i]['Vm_s_ds'][aft_ind:aft_ind+num_ind]) -
np.nanmin(data[i]['Vm_s_ds'][bef_ind:bef_ind+num_ind]))
else:
Vm0[j] = np.nanmax(data[i]['Vm_s_ds'][bef_ind:bef_ind+num_ind])
dVm[j] = (np.nanmin(data[i]['Vm_s_ds'][aft_ind:aft_ind+num_ind]) -
np.nanmax(data[i]['Vm_s_ds'][bef_ind:bef_ind+num_ind]))
data[i][states[l]['state']+'_c_p'] = c_p
data[i][states[l]['state']+'_Ih'] = Ih
data[i][states[l]['state']+'_Vm0'] = Vm0
data[i][states[l]['state']+'_dVm'] = dVm
data[i][states[l]['state']+'_dVm_p'] = dVm_p
all_c_p = np.append(all_c_p, c_p)
all_Ih = np.append(all_Ih, Ih)
all_Vm0 = np.append(all_Vm0, Vm0)
all_dVm = np.append(all_dVm, dVm)
all_dVm_p = np.append(all_dVm_p, dVm_p)
events[l]['c_p'] = all_c_p
events[l]['Ih'] = all_Ih
events[l]['Vm0'] = all_Vm0
events[l]['dVm'] = all_dVm
events[l]['dVm_p'] = all_dVm_p
# add windows triggered by start of some brain states
for l in np.arange(len(states)):
for i in np.arange(len(data)):
dVm_win = [states[l]['bef'], states[l]['aft']+states[l]['samp_time']]
t_Vm, t_ts = prepare_eta_keep(data[i]['Vm_s_ds'], data[i]['Vm_ds_ts'],
data[i][states[l]['state']+'_start'],
states[l]['t_win'], dVm_win)
t_sp = prepare_eta_times(data[i]['sp_times'],
data[i][states[l]['state']+'_start'],
states[l]['t_win'])
data[i][states[l]['state']+'_Vm'] = t_Vm
data[i][states[l]['state']+'_sp'] = t_sp
states[l]['t_ts'] = t_ts
## add windows triggered by offset of some brain states
#for l in np.arange(len(states)):
# for i in np.arange(len(data)):
# dVm_win = [states[l]['bef'], states[l]['aft']+states[l]['samp_time']]
# t_Vm, t_ts = prepare_eta_keep(data[i]['Vm_s_ds'], data[i]['Vm_ds_ts'],
# data[i][states[l]['state']+'_stop'],
# states[l]['t_win'], dVm_win)
# t_sp = prepare_eta_times(data[i]['sp_times'],
# data[i][states[l]['state']+'_start'],
# states[l]['t_win'])
# data[i][states[l]['state']+'_Vm'] = t_Vm
# data[i][states[l]['state']+'_sp'] = t_sp
# states[l]['t_ts'] = t_ts
# add triggered windows to event dictionary
# VERSION: only removes events with all nans (not any nans)
for l in np.arange(len(events)):
raster_sp = []
psth_sp = np.empty(0)
Vm = np.empty((states[l]['t_ts'].shape[0], 0))
duration = np.empty(0)
cell_id = np.empty(0)
for i in np.arange(len(data)):
cell_psth_sp = np.empty(0)
if data[i][states[l]['state']+'_start'].size > 0:
Vm = np.append(Vm, data[i][states[l]['state']+'_Vm'], axis=1)
duration = np.append(duration, (data[i][states[l]['state']+'_stop'] -
data[i][states[l]['state']+'_start']))
if isinstance(data[i]['cell_id'], str):
ind = data[i]['cell_id'].find('_')
cell_int = int(data[i]['cell_id'][:ind])*np.ones(data[i][states[l]['state']+'_start'].size)
cell_id = np.append(cell_id, cell_int)
else:
cell_int = data[i]['cell_id']*np.ones(data[i][states[l]['state']+'_start'].size)
cell_id = np.append(cell_id, cell_int)
for j in np.arange(data[i][states[l]['state']+'_start'].size):
psth_sp = np.append(psth_sp, data[i][states[l]['state']+'_sp'][j])
cell_psth_sp = np.append(cell_psth_sp, data[i][states[l]['state']+'_sp'][j])
raster_sp.append(data[i][states[l]['state']+'_sp'][j])
data[i][states[l]['state']+'_psth_sp'] = cell_psth_sp
# remove nans
no_nan = np.logical_and([~np.isnan(Vm).all(axis=0)],
[~np.isnan(events[l]['Vm0'])]).flatten()
events[l]['Vm'] = Vm[:, no_nan]
events[l]['cell_id'] = cell_id[no_nan]
events[l]['duration'] = duration[no_nan]
events[l]['raster_sp'] = list(compress(raster_sp, no_nan))
events[l]['c_p'] = events[l]['c_p'][no_nan]
events[l]['Ih'] = events[l]['Ih'][no_nan]
events[l]['Vm0'] = events[l]['Vm0'][no_nan]
events[l]['dVm'] = events[l]['dVm'][no_nan]
events[l]['dVm_p'] = events[l]['dVm_p'][no_nan]
# normalize the Vm to start of the event-triggered window
for l in np.arange(len(events)):
start_ind = np.sum(states[l]['t_ts'] < states[l]['bef'])
stop_ind = np.sum(states[l]['t_ts'] < (states[l]['bef'] + states[l]['samp_time']))
comp = np.mean(events[l]['Vm'][start_ind:stop_ind], axis=0)
events[l]['norm_Vm'] = events[l]['Vm'] - comp
# for each event, label with -1 (hyp), 0 (no), 1 (dep)
for l in np.arange(len(events)):
dVm_type = np.zeros(events[l]['dVm'].size)
dVm_type[np.logical_and(events[l]['dVm']<0, events[l]['dVm_p']<0.05)] = -1
dVm_type[np.logical_and(events[l]['dVm']>0, events[l]['dVm_p']<0.05)] = 1
events[l]['dVm_type'] = dVm_type
# take the norm_Vm matrices and realign them to the offsets
for l in np.arange(len(events)):
d_win = -1*np.array(states[l]['d_win'][::-1])
win_ind = np.searchsorted(states[l]['t_ts'], d_win)
states[l]['t_ts_off'] = states[l]['t_ts'][win_ind[0]:win_ind[1]]
ind0 = np.searchsorted(states[l]['t_ts'], 0)
off_norm_Vm = np.full((int(np.diff(win_ind)), events[l]['duration'].size), np.nan)
for j in np.arange(events[l]['duration'].size):
shift_ind = np.searchsorted(states[l]['t_ts'], events[l]['duration'][j]) - ind0
off_norm_Vm[:, j] = events[l]['norm_Vm'][win_ind[0]+shift_ind:win_ind[1]+shift_ind, j]
events[l]['off_norm_Vm'] = off_norm_Vm
# %% make average spectrograms
#l = 0
#spec_win = [-3, 3]
#
## triggered z_Sxx for theta onset
#t_Sxx, t_spec_ts = prepare_eta_skip(data[0]['z_Sxx'], data[0]['spec_ts'],
# data[0]['theta_start'], spec_win)
#t_num_pts_spec = t_spec_ts.shape[0]
#f_num_pts = data[0]['z_Sxx'].shape[0]
#all_t_Sxx = np.empty([f_num_pts, t_num_pts_spec, 0])
#for i in np.arange(len(data)):
# t_Sxx, t_spec_ts = prepare_eta_skip(data[i]['z_Sxx'], data[i]['spec_ts'],
# data[i]['theta_start'], spec_win)
# if t_Sxx.size > 0:
# all_t_Sxx = np.append(all_t_Sxx, t_Sxx, axis=2)
#mean_on_Sxx = np.mean(all_t_Sxx, axis=2)
## triggered z_Sxx for theta offset
#t_Sxx, t_spec_ts = prepare_eta_skip(data[0]['z_Sxx'], data[0]['spec_ts'],
# data[0]['theta_stop'], spec_win)
#t_num_pts_spec = t_spec_ts.shape[0]
#f_num_pts = data[0]['z_Sxx'].shape[0]
#all_t_Sxx = np.empty([f_num_pts, t_num_pts_spec, 0])
#for i in np.arange(len(data)):
# t_Sxx, t_spec_ts = prepare_eta_skip(data[i]['z_Sxx'], data[i]['spec_ts'],
# data[i]['theta_stop'], spec_win)
# if t_Sxx.size > 0:
# all_t_Sxx = np.append(all_t_Sxx, t_Sxx, axis=2)
#mean_off_Sxx = np.mean(all_t_Sxx, axis=2)
# %% set figure parameters
# set colors
# states
c_run_theta = [0.398, 0.668, 0.547]
c_nonrun_theta = [0.777, 0.844, 0.773]
c_LIA = [0.863, 0.734, 0.582]
# response type
c_hyp = [0.184, 0.285, 0.430]
c_dep = [0.629, 0.121, 0.047]
c_no = [1, 1, 1]
# dependent variables
c_sp = [0.398, 0.461, 0.703]
c_Vm = [0.398, 0.461, 0.703]
# other
c_lgry = [0.75, 0.75, 0.75]
c_mgry = [0.5, 0.5, 0.5]
c_dgry = [0.25, 0.25, 0.25]
c_wht = [1, 1, 1]
c_blk = [0, 0, 0]
c_bwn = [0.340, 0.242, 0.125]
c_lbwn = [0.645, 0.484, 0.394]
c_grn = [0.148, 0.360, 0.000]
c_dVm = [c_hyp, c_mgry, c_dep]
c_state = [c_run_theta, c_LIA, c_mgry]
# set style defaults
mpl.rcParams['font.size'] = 8
mpl.rcParams['savefig.dpi'] = 1200
mpl.rcParams['lines.linewidth'] = 1.5
mpl.rcParams['font.sans-serif'] = "Arial"
mpl.rcParams['font.family'] = "sans-serif"
mpl.rcParams['axes.spines.right'] = False
mpl.rcParams['axes.spines.top'] = False
mpl.rcParams['axes.linewidth'] = 1
mpl.rcParams['xtick.major.size'] = 4
mpl.rcParams['xtick.major.width'] = 1
mpl.rcParams['ytick.major.size'] = 4
mpl.rcParams['ytick.major.width'] = 1
mpl.rcParams['boxplot.whiskerprops.linestyle'] = '-'
mpl.rcParams['patch.force_edgecolor'] = True
mpl.rcParams['patch.facecolor'] = 'b'
# set figure output folder
fig_folder = r'C:\Users\akees\Documents\Ashley\Figures\2020-05_Paper_MIND1\Fig3'
# %% Make figures
dVm = ['hyp', 'no', 'dep']
# plot the dVm color plot, events organized by event duration
# version: blue/red for hyp/dep; hyp/dep/no events plotted on separated figures
l = 0
m = 0
fig, ax = plt.subplots(1, 1, figsize=[1.8, 3.45])
# transpose the norm Vm
norm_Vm = np.transpose(events[l]['norm_Vm'][:, events[l]['dVm_type'] == m-1])
duration = events[l]['duration'][events[l]['dVm_type'] == m-1]
# set order
order = np.flip(np.argsort(duration), axis=0)
p = ax.pcolormesh(states[l]['t_ts'], np.arange(order.size),
norm_Vm[order], cmap='RdBu_r', vmin=-5, vmax=5)
ax.scatter(duration[order], np.arange(order.size)+0.5,
color=c_blk, s=1)
ax.scatter(np.zeros(order.size), np.arange(order.size)+0.5,
color=c_blk, s=1)
ax.axis('tight')
ax.set_xlim(states[l]['d_win'])
ax.set_xticks([-4, 0, 4, 8, 12])
ax.set_yticks([order.size-1])
ax.set_yticklabels([order.size])
ax.set_ylim([0, order.size-1])
ax.set_ylabel('events', verticalalignment='center')
ax.yaxis.set_label_coords(-0.1, 0.5, transform=None)
ax.spines['top'].set_visible(True)
ax.spines['right'].set_visible(True)
ax.set_xlabel('time relative to theta\nonset (s)')
fig.tight_layout()
plt.savefig(os.path.join(fig_folder, 'Vm_color_' + dVm[m] + '.png'),
transparent=True)
m = 2
fig, ax = plt.subplots(1, 1, figsize=[1.8, 2.1])
# transpose the norm Vm
norm_Vm = np.transpose(events[l]['norm_Vm'][:, events[l]['dVm_type'] == m-1])
duration = events[l]['duration'][events[l]['dVm_type'] == m-1]
# set order
order = np.flip(np.argsort(duration), axis=0)
p = ax.pcolormesh(states[l]['t_ts'], np.arange(order.size),
norm_Vm[order], cmap='RdBu_r', vmin=-5, vmax=5)
ax.scatter(duration[order], np.arange(order.size)+0.5,
color=c_blk, s=1)
ax.scatter(np.zeros(order.size), np.arange(order.size)+0.5,
color=c_blk, s=1)
ax.axis('tight')
ax.set_xlim(states[l]['d_win'])
ax.set_xticks([-4, 0, 4, 8, 12])
ax.set_yticks([order.size-1])
ax.set_yticklabels([order.size])
ax.set_ylim([0, order.size-1])
ax.set_ylabel('events', verticalalignment='center')
ax.yaxis.set_label_coords(-0.1, 0.5, transform=None)
ax.spines['top'].set_visible(True)
ax.spines['right'].set_visible(True)
ax.set_xlabel('time relative to theta\nonset (s)')
# add a scale bar for the colors
divider = make_axes_locatable(ax)
cax = divider.append_axes("top", size="10%", pad=0.1)
cb = plt.colorbar(p, cax=cax, orientation="horizontal", ticks=[-5, 5])
cb.set_label(r'$\Delta$'+' Vm (mV)', labelpad=-22)
axcb = cb.ax
axcb.tick_params(bottom=False, top=True, labelbottom=False, labeltop=True)
#axcb.text(0, 15, r'$\Delta$'+' Vm (mV)', rotation=0, horizontalalignment='center')
fig.tight_layout()
plt.savefig(os.path.join(fig_folder, 'Vm_color_' + dVm[m] + '.png'),
transparent=True)
#%% make figures for on/offset kinetics
## avg spectrograms and on/offset kinetics
# VERSION: hyp and dep traces on same axis
# onset kinetics
fig, ax = plt.subplots(2, 2, figsize=[3.4, 3.5], sharex='col', sharey='row',
gridspec_kw = {'height_ratios':[1, 2]})
## average spectrogram - onset
#im = ax[0, 0].pcolormesh(t_spec_ts, data[0]['f'][data[0]['f']<16],
# mean_on_Sxx[data[0]['f']<16],
# shading='flat', cmap='viridis', vmin=-0.5, vmax=0.5)
#ax[0, 0].axvline(0, linestyle='--', color=c_blk)
#ax[0, 0].axis('tight')
#ax[0, 0].set_yticks([8, 16])
#ax[0, 0].spines['top'].set_visible(True)
#ax[0, 0].spines['right'].set_visible(True)
#divider = make_axes_locatable(ax[0, 0])
#cax = divider.append_axes("top", size="10%", pad=0.1)
#cb = plt.colorbar(im, cax=cax, orientation="horizontal", ticks=[-0.5, 0.5])
#cb.set_label('power (z)', labelpad=-21)
#axcb = cb.ax
#axcb.tick_params(bottom=False, top=True, labelbottom=False, labeltop=True)
## average spectrogram - offset
#im = ax[0, 1].pcolormesh(t_spec_ts, data[0]['f'][data[0]['f']<16],
# mean_off_Sxx[data[0]['f']<16],
# shading='flat', cmap='viridis', vmin=-0.5, vmax=0.5)
#ax[0, 1].axvline(0, linestyle='--', color=c_blk)
#ax[0, 1].axis('tight')
#ax[0, 1].set_yticks([8, 16])
#ax[0, 1].spines['top'].set_visible(True)
#ax[0, 1].spines['right'].set_visible(True)
#divider = make_axes_locatable(ax[0, 1])
#cax = divider.append_axes("top", size="10%", pad=0.1)
#cb = plt.colorbar(im, cax=cax, orientation="horizontal", ticks=[-0.5, 0.5])
#cb.set_label('power (z)', labelpad=-21)
#axcb = cb.ax
#axcb.tick_params(bottom=False, top=True, labelbottom=False, labeltop=True)
# average hyp - onset
m = 0
mean_Vm = np.nanmean(events[l]['norm_Vm'][:, events[l]['dVm_type'] == m-1], axis=1)
sem_Vm = stats.sem(events[l]['norm_Vm'][:, events[l]['dVm_type'] == m-1], axis=1, nan_policy='omit')
ax[1, 0].fill_between(states[l]['t_ts'], (mean_Vm + sem_Vm), (mean_Vm - sem_Vm),
facecolor=c_hyp, linewidth=0, alpha=0.5, zorder=1)
ax[1, 0].plot(states[0]['t_ts'], mean_Vm, color=c_hyp, zorder=4)
ax[1, 0].axhline(0, linestyle='--', color=c_blk)
ax[1, 0].axvline(0, linestyle='--', color=c_blk)
# average hyp - offset
m = 0
mean_Vm = np.nanmean(events[l]['off_norm_Vm'][:, events[l]['dVm_type'] == m-1], axis=1)
sem_Vm = stats.sem(events[l]['off_norm_Vm'][:, events[l]['dVm_type'] == m-1], axis=1, nan_policy='omit')
ax[1, 1].fill_between(states[0]['t_ts_off'], (mean_Vm + sem_Vm), (mean_Vm - sem_Vm),
facecolor=c_hyp, linewidth=0, alpha=0.5, zorder=1)
ax[1, 1].plot(states[0]['t_ts_off'], mean_Vm, color=c_hyp, zorder=4)
ax[1, 1].axhline(0, linestyle='--', color=c_blk)
ax[1, 1].axvline(0, linestyle='--', color=c_blk)
# average dep - onset
m = 2
mean_Vm = np.nanmean(events[l]['norm_Vm'][:, events[l]['dVm_type'] == m-1], axis=1)
sem_Vm = stats.sem(events[l]['norm_Vm'][:, events[l]['dVm_type'] == m-1], axis=1, nan_policy='omit')
ax[1, 0].fill_between(states[l]['t_ts'], (mean_Vm + sem_Vm), (mean_Vm - sem_Vm),
facecolor=c_dep, linewidth=0, alpha=0.5, zorder=1)
ax[1, 0].plot(states[0]['t_ts'], mean_Vm, color=c_dep, zorder=4)
ax[1, 0].axhline(0, linestyle='--', color=c_blk)
ax[1, 0].axvline(0, linestyle='--', color=c_blk)
# average dep - offset
m = 2
mean_Vm = np.nanmean(events[l]['off_norm_Vm'][:, events[l]['dVm_type'] == m-1], axis=1)
sem_Vm = stats.sem(events[l]['off_norm_Vm'][:, events[l]['dVm_type'] == m-1], axis=1, nan_policy='omit')
ax[1, 1].fill_between(states[l]['t_ts_off'], (mean_Vm + sem_Vm), (mean_Vm - sem_Vm),
facecolor=c_dep, linewidth=0, alpha=0.5, zorder=1)
ax[1, 1].plot(states[l]['t_ts_off'], mean_Vm, color=c_dep, zorder=4)
ax[1, 1].axhline(0, linestyle='--', color=c_blk)
ax[1, 1].axvline(0, linestyle='--', color=c_blk)
# format
ax[1, 0].set_xlim([-2, 1.5])
ax[1, 1].set_xlim([-1.5, 2])
ax[1, 0].set_xticks([-2, -1, 0, 1])
ax[1, 1].set_xticks([-1, 0, 1, 2])
ax[1, 0].set_ylim([-3.6, 3.1])
ax[0, 1].tick_params(left=False, right=True)
ax[1, 1].spines['left'].set_visible(False)
ax[1, 1].spines['right'].set_visible(True)
ax[1, 1].tick_params(left=False, right=True)
ax[0, 0].set_ylabel('Hz', rotation=0, verticalalignment='center')
ax[1, 0].set_ylabel(r'$\Delta$'+' Vm (mV)')
ax[1, 0].set_xlabel('time relative to theta\nonset (s)')
ax[1, 1].set_xlabel('time relative to theta\noffset (s)')
ax[0, 0].set_yticks([8, 16])
fig.tight_layout()
plt.savefig(os.path.join(fig_folder, 'avg_on_off_v2.png'),
transparent=True)
|
[
"noreply@github.com"
] |
MerylMalezieux.noreply@github.com
|
cf3e55341e073c593654b0ce4d048e66abc3427b
|
0e950f5d08149dd1e0e4380964144287a8edc6e7
|
/steps/step11.py
|
432fbdd184225fd543c0eaf5bf2e9a1b1120cafe
|
[] |
no_license
|
shotakikuchi/deeplearning-from-scratch3
|
ff65e88216a6332e1679d04b66cc17ec87711ea0
|
ac09afc2d00064d7327d19d6a4de6096215430e2
|
refs/heads/master
| 2022-07-19T08:20:07.182011
| 2020-05-24T12:34:01
| 2020-05-24T12:34:01
| 266,511,741
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,701
|
py
|
import numpy as np
import unittest
class Variable:
def __init__(self, data):
if data is not None:
if not isinstance(data, np.ndarray):
raise TypeError(f'{type(data)} is not supported')
self.data = data
self.grad = None
self.creator = None
def set_creator(self, func):
self.creator = func
def backward(self):
if self.grad is None:
self.grad = np.ones_like(self.data)
funcs = [self.creator]
while funcs:
f = funcs.pop() # 関数を取得
x, y = f.input, f.output # 関数の入出力を取得
x.grad = f.backward(y.grad) # 関数のbackwardメソッドを呼ぶ
if x.creator is not None:
funcs.append(x.creator) # 1つ前の関数をリストに追加
class Function:
def __call__(self, inputs):
xs = [x.data for x in inputs]
ys = self.forward(xs)
outputs = [Variable(as_array(y)) for y in ys]
for output in outputs:
# memorize parent creator to output
output.set_creator(self)
# memorize inputs
self.inputs = inputs
# memorize outputs
self.outputs = outputs
return outputs
def forward(self, x):
raise NotImplementedError()
def backward(self, x):
raise NotImplementedError()
class Add(Function):
def forward(self, xs):
x0, x1 = xs
y = x0 + x1
return (y,)
def as_array(x):
if np.isscalar(x):
return np.array(x)
return x
|
[
"shota.kikuchi@coconala.com"
] |
shota.kikuchi@coconala.com
|
cf4004f1e7c009c4ded193633c9c4aa8c7cda448
|
005355a6ef55bd41c608b25bdbb4c6ede7927053
|
/password_generator/settings.py
|
86a40e45f48e8a4ec594389b4a1e32d772789d78
|
[] |
no_license
|
dcepticonMan/django3-password-generator
|
b0dc0e6623f2c635c9048902e4b19bbe25137032
|
22ddfb2683dc2807baee9e437da152df796af7ad
|
refs/heads/master
| 2023-02-16T21:03:59.866296
| 2021-01-15T18:52:06
| 2021-01-15T18:52:06
| 329,997,859
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,115
|
py
|
"""
Django settings for password_generator project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'mb%2u&a1$5hpy#n2ms-dx+d9-o9c9)ol#5*@p@h0)_teqid287'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'generator',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'password_generator.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'password_generator.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"mobooks2020@gmail.com"
] |
mobooks2020@gmail.com
|
49984aaf6964669dd79c03129fbee288768035b4
|
d856fe696f688f3dcda6b990f280ae789d47273f
|
/test/test_tensor_creation_ops.py
|
3575095066daf6c95cc81e216e6ede3b05581a55
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
BBIGCat111/pytorch
|
8f8cd0d9d3fc63b8ae33b6f0ebf87c561734d6ff
|
d3cde6c23c1a7d184f38d4e5fb797257e39d3376
|
refs/heads/master
| 2023-03-15T12:46:26.442454
| 2021-03-08T06:54:31
| 2021-03-08T06:57:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 150,435
|
py
|
import torch
import numpy as np
import sys
import math
import warnings
import unittest
from itertools import product, combinations, combinations_with_replacement, permutations
import random
from torch.testing._internal.common_utils import (
TestCase, run_tests, do_test_empty_full, TEST_WITH_ROCM, suppress_warnings,
torch_to_numpy_dtype_dict, slowTest, TEST_SCIPY, IS_MACOS, IS_PPC,
IS_WINDOWS)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, deviceCountAtLeast, onlyOnCPUAndCUDA,
onlyCPU, largeTensorTest, precisionOverride, dtypes,
onlyCUDA, skipCPUIf, dtypesIfCUDA, dtypesIfCPU)
# TODO: refactor tri_tests_args, _compare_trilu_indices, run_additional_tri_tests
from torch.testing._internal.common_methods_invocations import (
tri_tests_args, _compare_trilu_indices, run_additional_tri_tests)
# TODO: replace with make_tensor
def _generate_input(shape, dtype, device, with_extremal):
if shape == ():
x = torch.tensor((), dtype=dtype, device=device)
else:
if dtype.is_floating_point or dtype.is_complex:
# work around torch.randn not being implemented for bfloat16
if dtype == torch.bfloat16:
x = torch.randn(*shape, device=device) * random.randint(30, 100)
x = x.to(torch.bfloat16)
else:
x = torch.randn(*shape, dtype=dtype, device=device) * random.randint(30, 100)
x[torch.randn(*shape) > 0.5] = 0
if with_extremal and dtype.is_floating_point:
# Use extremal values
x[torch.randn(*shape) > 0.5] = float('nan')
x[torch.randn(*shape) > 0.5] = float('inf')
x[torch.randn(*shape) > 0.5] = float('-inf')
elif with_extremal and dtype.is_complex:
x[torch.randn(*shape) > 0.5] = complex('nan')
x[torch.randn(*shape) > 0.5] = complex('inf')
x[torch.randn(*shape) > 0.5] = complex('-inf')
elif dtype == torch.bool:
x = torch.zeros(shape, dtype=dtype, device=device)
x[torch.randn(*shape) > 0.5] = True
else:
x = torch.randint(15, 100, shape, dtype=dtype, device=device)
return x
# TODO: replace with make_tensor
def _rand_shape(dim, min_size, max_size):
shape = []
for i in range(dim):
shape.append(random.randint(min_size, max_size))
return tuple(shape)
# Test suite for tensor creation ops
#
# Includes creation functions like torch.eye, random creation functions like
# torch.rand, and *like functions like torch.ones_like.
# DOES NOT INCLUDE view ops, which are tested in TestViewOps (currently in
# test_torch.py) OR numpy interop (which is also still tested in test_torch.py)
#
# See https://pytorch.org/docs/master/torch.html#creation-ops
class TestTensorCreation(TestCase):
exact_dtype = True
@onlyCPU
@dtypes(torch.float)
def test_diag_embed(self, device, dtype):
x = torch.arange(3 * 4, dtype=dtype, device=device).view(3, 4)
result = torch.diag_embed(x)
expected = torch.stack([torch.diag(r) for r in x], 0)
self.assertEqual(result, expected)
result = torch.diag_embed(x, offset=1, dim1=0, dim2=2)
expected = torch.stack([torch.diag(r, 1) for r in x], 1)
self.assertEqual(result, expected)
def test_cat_mem_overlap(self, device):
x = torch.rand((1, 3), device=device).expand((6, 3))
y = torch.rand((3, 3), device=device)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
torch.cat([y, y], out=x)
@onlyOnCPUAndCUDA
def test_vander(self, device):
x = torch.tensor([1, 2, 3, 5], device=device)
self.assertEqual((0, 0), torch.vander(torch.tensor([]), 0).shape)
with self.assertRaisesRegex(RuntimeError, "N must be non-negative."):
torch.vander(x, N=-1)
with self.assertRaisesRegex(RuntimeError, "x must be a one-dimensional tensor."):
torch.vander(torch.stack((x, x)))
@onlyOnCPUAndCUDA
@dtypes(torch.bool, torch.uint8, torch.int8, torch.short, torch.int, torch.long,
torch.float, torch.double,
torch.cfloat, torch.cdouble)
def test_vander_types(self, device, dtype):
if dtype is torch.uint8:
# Note: no negative uint8 values
X = [[1, 2, 3, 5], [0, 1 / 3, 1, math.pi, 3 / 7]]
elif dtype is torch.bool:
# Note: see https://github.com/pytorch/pytorch/issues/37398
# for why this is necessary.
X = [[True, True, True, True], [False, True, True, True, True]]
elif dtype in [torch.cfloat, torch.cdouble]:
X = [[1 + 1j, 1 + 0j, 0 + 1j, 0 + 0j],
[2 + 2j, 3 + 2j, 4 + 3j, 5 + 4j]]
else:
X = [[1, 2, 3, 5], [-math.pi, 0, 1 / 3, 1, math.pi, 3 / 7]]
N = [None, 0, 1, 3]
increasing = [False, True]
for x, n, inc in product(X, N, increasing):
numpy_dtype = torch_to_numpy_dtype_dict[dtype]
pt_x = torch.tensor(x, device=device, dtype=dtype)
np_x = np.array(x, dtype=numpy_dtype)
pt_res = torch.vander(pt_x, increasing=inc) if n is None else torch.vander(pt_x, n, inc)
np_res = np.vander(np_x, n, inc)
self.assertEqual(
pt_res,
torch.from_numpy(np_res),
atol=1e-3,
rtol=0,
exact_dtype=False)
def test_cat_all_dtypes_and_devices(self, device):
for dt in torch.testing.get_all_dtypes():
x = torch.tensor([[1, 2], [3, 4]], dtype=dt, device=device)
expected1 = torch.tensor([[1, 2], [3, 4], [1, 2], [3, 4]], dtype=dt, device=device)
self.assertEqual(torch.cat((x, x), 0), expected1)
expected2 = torch.tensor([[1, 2, 1, 2], [3, 4, 3, 4]], dtype=dt, device=device)
self.assertEqual(torch.cat((x, x), 1), expected2)
def test_fill_all_dtypes_and_devices(self, device):
for dt in torch.testing.get_all_dtypes():
for x in [torch.tensor((10, 10), dtype=dt, device=device),
torch.empty(10000, dtype=dt, device=device)]: # large tensor
numel = x.numel()
bound = 100 if dt in (torch.uint8, torch.int8) else 2000
for n in range(-bound, bound, bound // 10):
x.fill_(n)
self.assertEqual(x, torch.tensor([n] * numel, dtype=dt, device=device))
self.assertEqual(dt, x.dtype)
def test_roll(self, device):
numbers = torch.arange(1, 9, device=device)
single_roll = numbers.roll(1, 0)
expected = torch.tensor([8, 1, 2, 3, 4, 5, 6, 7], device=device)
self.assertEqual(single_roll, expected, msg="{} did not equal expected result".format(single_roll))
roll_backwards = numbers.roll(-2, 0)
expected = torch.tensor([3, 4, 5, 6, 7, 8, 1, 2], device=device)
self.assertEqual(roll_backwards, expected, msg="{} did not equal expected result".format(roll_backwards))
data = numbers.view(2, 2, 2)
rolled = data.roll(1, 0)
expected = torch.tensor([5, 6, 7, 8, 1, 2, 3, 4], device=device).view(2, 2, 2)
self.assertEqual(expected, rolled, msg="{} did not equal expected result: {}".format(rolled, expected))
data = data.view(2, 4)
# roll a loop until back where started
loop_rolled = data.roll(2, 0).roll(4, 1)
self.assertEqual(data, loop_rolled, msg="{} did not equal the original: {}".format(loop_rolled, data))
# multiple inverse loops
self.assertEqual(data, data.roll(-20, 0).roll(-40, 1))
self.assertEqual(torch.tensor([8, 1, 2, 3, 4, 5, 6, 7], device=device), numbers.roll(1, 0))
# test non-contiguous
# strided equivalent to numbers.as_strided(size=(4, 2), stride=(1, 4))
strided = numbers.view(2, 4).transpose(0, 1)
self.assertFalse(strided.is_contiguous(), "this test needs a non-contiguous tensor")
expected = torch.tensor([4, 8, 1, 5, 2, 6, 3, 7]).view(4, 2)
rolled = strided.roll(1, 0)
self.assertEqual(expected, rolled,
msg="non contiguous tensor rolled to {} instead of {} ".format(rolled, expected))
# test roll with no dimension specified
expected = numbers.roll(1, 0).view(2, 4)
self.assertEqual(expected, data.roll(1), msg="roll with no dims should flatten and roll.")
self.assertEqual(expected, data.roll(1, dims=None), msg="roll with no dims should flatten and roll.")
# test roll over multiple dimensions
expected = torch.tensor([[7, 8, 5, 6], [3, 4, 1, 2]], device=device)
double_rolled = data.roll(shifts=(2, -1), dims=(1, 0))
self.assertEqual(double_rolled, expected,
msg="should be able to roll over two dimensions, got {}".format(double_rolled))
self.assertRaisesRegex(RuntimeError, "required", lambda: data.roll(shifts=(), dims=()))
self.assertRaisesRegex(RuntimeError, "required", lambda: data.roll(shifts=(), dims=1))
# shifts/dims should align
self.assertRaisesRegex(RuntimeError, "align", lambda: data.roll(shifts=(1, 2), dims=(1,)))
self.assertRaisesRegex(RuntimeError, "align", lambda: data.roll(shifts=(1,), dims=(1, 2)))
# test bool tensor
t = torch.zeros(6, dtype=torch.bool, device=device)
t[0] = True
t[3] = True
self.assertEqual(torch.tensor([False, True, False, False, True, False]), t.roll(1, 0))
# test complex tensor
t = torch.tensor([1, 2 + 1j, 3.5, 4. + 2j, 5j, 6.], device=device)
t[0] = 1 + 0.5j
t[3] = 4.
expected = torch.tensor([6., 1 + 0.5j, 2 + 1j, 3.5, 4., 5j], device=device)
self.assertEqual(expected, t.roll(1, 0))
@slowTest
def test_triu_tril(self, device):
def gen_mask(shape, diagonal, device, upper):
mask = torch.zeros(*shape[-2:]).byte()
for i in range(shape[-2]):
for j in range(shape[-1]):
cond = j - i < diagonal if upper else j - i > diagonal
if cond:
mask[i, j] = 1
return mask.expand(*shape).to(device)
torch_functions = {True: torch.triu, False: torch.tril}
numpy_functions = {True: np.triu, False: np.tril}
# TODO: remove this when bool and half are supported for torch.where
def bool_half_compat_where(pred, true_tensor, false_tensor, dtype):
if dtype == torch.bool or dtype == torch.half:
return torch.where(pred.byte(), true_tensor.byte(), false_tensor.byte()).to(dtype=dtype)
else:
return torch.where(pred, true_tensor, false_tensor)
def run_test(shape, device, diagonal, dtype):
x = torch.empty(*shape, device=device, dtype=dtype).fill_(2)
for upper in [True, False]:
# normal test with mask
torch_tri_func = torch_functions[upper]
res1 = torch_tri_func(x, diagonal=diagonal)
res2 = torch.empty(0, device=device, dtype=dtype)
torch_tri_func(x, diagonal=diagonal, out=res2)
exp_mask = gen_mask(shape, diagonal, device, upper)
expected = bool_half_compat_where(exp_mask, torch.tensor(0).type_as(x), x, dtype)
self.assertEqual(res1, res2, atol=0, rtol=0)
self.assertEqual(expected, res1, atol=0, rtol=0)
# non-contiguous and expanded tensors test
if 0 not in shape:
for s in range(-len(shape), -1):
# non-contiguous tensors
x_nc = x.clone().transpose(s, s + 1)
exp_mask = gen_mask(x_nc.size(), diagonal, device, upper)
if 1 not in shape:
assert not x_nc.is_contiguous(), "x is intentionally non-contiguous"
exp_nc = bool_half_compat_where(exp_mask, torch.tensor(0).type_as(x), x_nc, dtype)
self.assertEqual(torch_tri_func(x_nc, diagonal), exp_nc, atol=0, rtol=0)
x_nc_is_contiguous = x_nc.is_contiguous()
if upper:
self.assertEqual(x_nc.triu_(diagonal), exp_nc, atol=0, rtol=0)
else:
self.assertEqual(x_nc.tril_(diagonal), exp_nc, atol=0, rtol=0)
self.assertTrue(x_nc.is_contiguous() == x_nc_is_contiguous,
"contiguity of x_nc should not be changed")
# expanded tensors
expanded_size = (x.size(0),) + x.size()
x_expanded = x.clone().expand(*expanded_size)
if x.size(0) != 1:
assert 0 in x_expanded.stride(), "x intentionally has 0 in its stride"
output = torch_tri_func(x_expanded, diagonal)
self.assertEqual(output, expected.expand(expanded_size), atol=0, rtol=0)
if x.size(0) != 1:
self.assertTrue(0 in x_expanded.stride(),
"geometry of x_expanded should be the same")
if upper:
self.assertEqual(output, x_expanded.triu_(diagonal), atol=0, rtol=0)
else:
self.assertEqual(output, x_expanded.tril_(diagonal), atol=0, rtol=0)
# numpy test
numpy_tri_func = numpy_functions[upper]
self.assertEqual(numpy_tri_func(x.to('cpu').numpy(), diagonal), res1.cpu().numpy())
diagonals = [-2, -1, 0, 1, 2]
shapes = [(3, 3), (5, 3, 3), (7, 5, 3, 3), # square matrices
(7, 3), (5, 7, 3), (7, 5, 7, 3), # fat matrices
(3, 7), (5, 3, 7), (7, 5, 3, 7), # thin matrices
(3, 0), (0, 3, 3), (3, 3, 0, 0), # no numel matrices
(3, 1), (5, 3, 1), (7, 5, 3, 1), # very fat matrices
(1, 3), (5, 1, 3), (7, 5, 1, 3), # very thin matrices
(1, 3, 3, 3), (3, 1, 3, 3, 3)] # unsqueezed batch dimensions
dtypes = [dtype for dtype in torch.testing.get_all_dtypes() if dtype != torch.bfloat16]
for s, d, dtype in product(shapes, diagonals, dtypes):
run_test(s, device, d, dtype)
def test_diagflat(self, device):
dtype = torch.float32
# Basic sanity test
x = torch.randn((100,), dtype=dtype, device=device)
result = torch.diagflat(x)
expected = torch.diag(x)
self.assertEqual(result, expected)
# Test offset
x = torch.randn((100,), dtype=dtype, device=device)
result = torch.diagflat(x, 17)
expected = torch.diag(x, 17)
self.assertEqual(result, expected)
# Test where input has more than one dimension
x = torch.randn((2, 3, 4), dtype=dtype, device=device)
result = torch.diagflat(x)
expected = torch.diag(x.contiguous().view(-1))
self.assertEqual(result, expected)
# Noncontig input
x = torch.randn((2, 3, 4), dtype=dtype, device=device).transpose(2, 0)
self.assertFalse(x.is_contiguous())
result = torch.diagflat(x)
expected = torch.diag(x.contiguous().view(-1))
self.assertEqual(result, expected)
# Complex number support
result = torch.diagflat(torch.ones(4, dtype=torch.complex128))
expected = torch.eye(4, dtype=torch.complex128)
self.assertEqual(result, expected)
def test_block_diag(self, device):
def block_diag_workaround(*arrs):
arrs_expanded = []
for a in arrs:
if a.dim() == 2:
arrs_expanded.append(a)
elif a.dim() == 1:
arrs_expanded.append(a.expand(1, a.size(0)))
elif a.dim() == 0:
arrs_expanded.append(a.expand(1, 1))
shapes = torch.tensor([a.shape for a in arrs_expanded], device=device)
out = torch.zeros(
torch.sum(shapes, dim=0).tolist(),
dtype=arrs_expanded[0].dtype,
device=device
)
r, c = 0, 0
for i, (rr, cc) in enumerate(shapes):
out[r:r + rr, c:c + cc] = arrs_expanded[i]
r += rr
c += cc
return out
tensors = [
torch.rand((2, 2), device=device),
torch.rand((2, 3), device=device),
torch.rand(10, device=device),
torch.rand((8, 1), device=device),
torch.rand(1, device=device)[0]
]
result = torch.block_diag(*tensors)
result_check = block_diag_workaround(*tensors)
self.assertEqual(result, result_check)
tensor = torch.rand(1, device=device)[0]
result = torch.block_diag(tensor)
result_check = tensor.expand(1, 1)
self.assertEqual(result, result_check)
tensor = torch.rand(10, device=device)
result = torch.block_diag(tensor)
result_check = tensor.expand(1, tensor.size(0))
self.assertEqual(result, result_check)
result = torch.block_diag()
result_check = torch.empty(1, 0, device=device)
self.assertEqual(result, result_check)
self.assertEqual(result.device.type, 'cpu')
test_dtypes = [
torch.uint8,
torch.int8,
torch.int16,
torch.int32,
torch.int64,
torch.float32,
torch.float64,
torch.complex64,
torch.complex128
]
# Test pairs of different dtypes
for dtype1 in test_dtypes:
for dtype2 in test_dtypes:
a = torch.tensor(1, device=device, dtype=dtype1)
b = torch.tensor(2, device=device, dtype=dtype2)
result = torch.block_diag(a, b)
result_dtype = torch.result_type(a, b)
result_check = torch.tensor([[1, 0], [0, 2]], device=device, dtype=result_dtype)
self.assertEqual(result, result_check)
with self.assertRaisesRegex(
RuntimeError,
"torch.block_diag: Input tensors must have 2 or fewer dimensions. Input 1 has 3 dimensions"
):
torch.block_diag(torch.tensor(5), torch.tensor([[[6]]]))
with self.assertRaisesRegex(
RuntimeError,
"torch.block_diag: Input tensors must have 2 or fewer dimensions. Input 0 has 4 dimensions"
):
torch.block_diag(torch.tensor([[[[6]]]]))
if device != 'cpu':
with self.assertRaisesRegex(
RuntimeError,
(
"torch.block_diag: input tensors must all be on the same device."
" Input 0 is on device cpu and input 1 is on device "
)
):
torch.block_diag(torch.ones(2, 2).cpu(), torch.ones(2, 2, device=device))
@unittest.skipIf(not TEST_SCIPY, "Scipy not found")
def test_block_diag_scipy(self, device):
import scipy.linalg
scipy_tensors_list = [
[
1,
[2],
[],
[3, 4, 5],
[[], []],
[[6], [7.3]]
],
[
[[1, 2], [3, 4]],
[1]
],
[
[[4, 9], [7, 10]],
[4.6, 9.12],
[1j + 3]
],
[]
]
expected_torch_types = [
torch.float32,
torch.int64,
torch.complex64,
torch.float32
]
expected_scipy_types = [
torch.float64,
# windows scipy block_diag returns int32 types
torch.int32 if IS_WINDOWS else torch.int64,
torch.complex128,
torch.float64
]
for scipy_tensors, torch_type, scipy_type in zip(scipy_tensors_list, expected_torch_types, expected_scipy_types):
torch_tensors = [torch.tensor(t, device=device) for t in scipy_tensors]
torch_result = torch.block_diag(*torch_tensors)
self.assertEqual(torch_result.dtype, torch_type)
scipy_result = torch.tensor(
scipy.linalg.block_diag(*scipy_tensors),
device=device
)
self.assertEqual(scipy_result.dtype, scipy_type)
scipy_result = scipy_result.to(torch_type)
self.assertEqual(torch_result, scipy_result)
@onlyOnCPUAndCUDA
@dtypes(torch.float32, torch.float64)
def test_torch_complex(self, device, dtype):
real = torch.tensor([1, 2], device=device, dtype=dtype)
imag = torch.tensor([3, 4], device=device, dtype=dtype)
z = torch.complex(real, imag)
complex_dtype = torch.complex64 if dtype == torch.float32 else torch.complex128
self.assertEqual(torch.tensor([1.0 + 3.0j, 2.0 + 4.0j], dtype=complex_dtype), z)
@onlyOnCPUAndCUDA
@dtypes(torch.float32, torch.float64)
def test_torch_polar(self, device, dtype):
abs = torch.tensor([1, 2, -3, -4.5, 1, 1], device=device, dtype=dtype)
angle = torch.tensor([math.pi / 2, 5 * math.pi / 4, 0, -11 * math.pi / 6, math.pi, -math.pi],
device=device, dtype=dtype)
z = torch.polar(abs, angle)
complex_dtype = torch.complex64 if dtype == torch.float32 else torch.complex128
self.assertEqual(torch.tensor([1j, -1.41421356237 - 1.41421356237j, -3,
-3.89711431703 - 2.25j, -1, -1],
dtype=complex_dtype),
z, atol=1e-5, rtol=1e-5)
@onlyOnCPUAndCUDA
@dtypes(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64,
torch.float16, torch.complex64, torch.complex128, torch.bool)
def test_torch_complex_floating_dtype_error(self, device, dtype):
for op in (torch.complex, torch.polar):
a = torch.tensor([1, 2], device=device, dtype=dtype)
b = torch.tensor([3, 4], device=device, dtype=dtype)
error = r"Expected both inputs to be Float or Double tensors but " \
r"got [A-Za-z]+ and [A-Za-z]+"
with self.assertRaisesRegex(RuntimeError, error):
op(a, b)
@onlyOnCPUAndCUDA
@dtypes(torch.float32, torch.float64)
def test_torch_complex_same_dtype_error(self, device, dtype):
def dtype_name(dtype):
return 'Float' if dtype == torch.float32 else 'Double'
for op in (torch.complex, torch.polar):
other_dtype = torch.float64 if dtype == torch.float32 else torch.float32
a = torch.tensor([1, 2], device=device, dtype=dtype)
b = torch.tensor([3, 4], device=device, dtype=other_dtype)
error = "Expected object of scalar type {} but got scalar type " \
"{} for second argument".format(dtype_name(dtype),
dtype_name(other_dtype))
with self.assertRaisesRegex(RuntimeError, error):
op(a, b)
@onlyOnCPUAndCUDA
@dtypes(torch.float32, torch.float64)
def test_torch_complex_out_dtype_error(self, device, dtype):
def dtype_name(dtype):
return 'Float' if dtype == torch.float32 else 'Double'
def complex_dtype_name(dtype):
return 'ComplexFloat' if dtype == torch.complex64 else 'ComplexDouble'
for op in (torch.complex, torch.polar):
a = torch.tensor([1, 2], device=device, dtype=dtype)
b = torch.tensor([3, 4], device=device, dtype=dtype)
out = torch.zeros(2, device=device, dtype=dtype)
expected_dtype = torch.complex64 if dtype == torch.float32 else torch.complex128
error = "Expected object of scalar type {} but got scalar type " \
"{} for argument 'out'".format(
complex_dtype_name(expected_dtype), dtype_name(dtype))
with self.assertRaisesRegex(RuntimeError, error):
op(a, b, out=out)
def test_cat_empty_legacy(self, device):
# FIXME: this is legacy behavior and should be removed
# when we support empty tensors with arbitrary sizes
dtype = torch.float32
x = torch.randn((4, 3, 32, 32), dtype=dtype, device=device)
empty = torch.randn((0,), dtype=dtype, device=device)
res1 = torch.cat([x, empty], dim=1)
res2 = torch.cat([empty, x], dim=1)
self.assertEqual(res1, res2)
res1 = torch.cat([empty, empty], dim=1)
self.assertEqual(res1, empty)
with self.assertRaisesRegex(RuntimeError,
'non-empty list of Tensors'):
torch.cat([], dim=1)
def test_cat_empty(self, device):
dtype = torch.float32
x = torch.randn((4, 3, 32, 32), dtype=dtype, device=device)
empty = torch.randn((4, 0, 32, 32), dtype=dtype, device=device)
res1 = torch.cat([x, empty], dim=1)
res2 = torch.cat([empty, x], dim=1)
self.assertEqual(res1, res2)
res1 = torch.cat([empty, empty], dim=1)
self.assertEqual(res1, empty)
# check non-legacy-behavior (sizes don't match)
empty = torch.randn((4, 0, 31, 32), dtype=dtype, device=device)
self.assertRaises(RuntimeError, lambda: torch.cat([x, empty], dim=1))
self.assertRaises(RuntimeError, lambda: torch.cat([empty, x], dim=1))
# check non-legacy-behavior (dimensions don't match)
empty = torch.randn((4, 0), dtype=dtype, device=device)
self.assertRaises(RuntimeError, lambda: torch.cat([x, empty], dim=1))
self.assertRaises(RuntimeError, lambda: torch.cat([empty, x], dim=1))
def test_cat_out(self, device):
x = torch.zeros((0), device=device)
y = torch.randn((4, 6), device=device)
with self.assertRaisesRegex(
RuntimeError, r"unsupported operation:.* input tensor 0"):
torch.cat([x, y], dim=0, out=x)
with self.assertRaisesRegex(
RuntimeError, r"unsupported operation:.* input tensor 1"):
torch.cat([x, y], dim=0, out=y)
z = torch.zeros((4, 6), device=device)
with self.assertRaisesRegex(
RuntimeError, r"unsupported operation:.* input tensor 1"):
torch.cat([y, z], out=z[:2, :])
w = y.view(-1).clone()
a = torch.cat([w[:2], w[4:6]])
b = torch.cat([w[:2], w[4:6]], out=w[6:10])
self.assertEqual(a, b)
self.assertEqual(w[:6], y.view(-1)[:6])
# Case:
# Reference: https://github.com/pytorch/pytorch/issues/49878
for dim in [0, 1]:
x = torch.zeros((10, 5, 2), device=device)
random_length = random.randint(1, 4)
y = x.narrow(dim, 0, x.shape[dim] - random_length)
val = torch.full_like(y[0], 3., device=device)
if dim == 0:
self.assertTrue(y.is_contiguous())
else:
self.assertFalse(y.is_contiguous())
torch.cat((val[None],) * y.shape[0], dim=0, out=y)
expected_y = torch.cat((val[None],) * y.shape[0], dim=0)
expected_x = torch.zeros((10, 5, 2), device=device)
if dim == 0:
expected_x[:x.shape[dim] - random_length, :, :] = expected_y
elif dim == 1:
expected_x[:, :x.shape[dim] - random_length, :] = expected_y
self.assertEqual(y, expected_y)
self.assertEqual(x, expected_x)
def test_cat_out_channels_last(self, device):
x = torch.randn((4, 3, 8, 8))
y = torch.randn(x.shape)
res1 = torch.cat((x, y))
z = res1.clone().contiguous(memory_format=torch.channels_last)
res2 = torch.cat((x, y), out=z)
self.assertEqual(res1, res2)
@onlyCPU
def test_cat_in_channels_last(self, device):
for dim in range(4):
x = torch.randn((4, 15, 8, 8), device=device)
y = torch.randn(x.shape, device=device)
res1 = torch.cat((x, y), dim=dim)
x = x.clone().contiguous(memory_format=torch.channels_last)
y = y.clone().contiguous(memory_format=torch.channels_last)
res2 = torch.cat((x, y), dim=dim)
self.assertTrue(res2.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(res1, res2)
# Size larger than grain size.
x = torch.randn((4, 15, 256, 256), device=device)
y = torch.randn(x.shape, device=device)
res1 = torch.cat((x, y), dim=dim)
x = x.clone().contiguous(memory_format=torch.channels_last)
y = y.clone().contiguous(memory_format=torch.channels_last)
res2 = torch.cat((x, y), dim=dim)
self.assertTrue(res2.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(res1, res2)
@onlyCUDA
def test_cat_preserve_channels_last(self, device):
x = torch.randn((4, 3, 8, 8), device=device)
y = torch.randn(x.shape, device=device)
res1 = torch.cat((x, y))
res2 = torch.cat((x.contiguous(memory_format=torch.channels_last), y.contiguous(memory_format=torch.channels_last)))
self.assertEqual(res1, res2)
self.assertTrue(res2.is_contiguous(memory_format=torch.channels_last))
@onlyCUDA
@deviceCountAtLeast(2)
def test_cat_different_devices(self, devices):
cuda0 = torch.randn((3, 3), device=devices[0])
cuda1 = torch.randn((3, 3), device=devices[1])
with self.assertRaisesRegex(RuntimeError,
"input tensors must be on the same device"):
torch.cat((cuda0, cuda1))
cpu = torch.randn(3, 3)
with self.assertRaisesRegex(RuntimeError,
"input tensors must be on the same device"):
torch.cat((cuda0, cpu))
with self.assertRaisesRegex(RuntimeError,
"input tensors must be on the same device"):
torch.cat((cpu, cuda0))
# TODO: reconcile with other cat tests
# TODO: Compare with a NumPy reference instead of CPU
@onlyCUDA
def test_cat(self, device):
SIZE = 10
for dim in range(-3, 3):
pos_dim = dim if dim >= 0 else 3 + dim
x = torch.rand(13, SIZE, SIZE, device=device).transpose(0, pos_dim)
y = torch.rand(17, SIZE, SIZE, device=device).transpose(0, pos_dim)
z = torch.rand(19, SIZE, SIZE, device=device).transpose(0, pos_dim)
res1 = torch.cat((x, y, z), dim)
self.assertEqual(res1.narrow(pos_dim, 0, 13), x, atol=0, rtol=0)
self.assertEqual(res1.narrow(pos_dim, 13, 17), y, atol=0, rtol=0)
self.assertEqual(res1.narrow(pos_dim, 30, 19), z, atol=0, rtol=0)
x = torch.randn(20, SIZE, SIZE, device=device)
self.assertEqual(torch.cat(torch.split(x, 7)), x)
self.assertEqual(torch.cat(torch.chunk(x, 7)), x)
y = torch.randn(1, SIZE, SIZE, device=device)
z = torch.cat([x, y])
self.assertEqual(z.size(), (21, SIZE, SIZE))
# TODO: update this test to compare against NumPy instead of CPU
@onlyCUDA
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float, torch.double)
def test_device_rounding(self, device, dtype):
# test half-to-even
a = [-5.8, -3.5, -2.3, -1.5, -0.5, 0.5, 1.5, 2.3, 3.5, 5.8]
res = [-6., -4., -2., -2., 0., 0., 2., 2., 4., 6.]
a_tensor = torch.tensor(a, device=device).round()
res_tensor = torch.tensor(res, device='cpu')
self.assertEqual(a_tensor, res_tensor)
# Note: This test failed on XLA since its test cases are created by empty_strided which
# doesn't support overlapping sizes/strides in XLA impl
@onlyOnCPUAndCUDA
def test_like_fn_stride_proparation_vs_tensoriterator_unary_op(self, device):
# Test like functions against tensoriterator based unary operator (exp) to
# make sure the returned tensor from like function follows the same stride propergation
# rule as what tensoriterator does for unary operator. The like function's output strides
# is computed on CPU side always, no need to test GPU here.
def compare_helper_(like_fn, t):
te = torch.exp(t)
tl = like_fn(t)
self.assertEqual(te.stride(), tl.stride())
self.assertEqual(te.size(), tl.size())
like_fns = [
lambda t, **kwargs: torch.zeros_like(t, **kwargs),
lambda t, **kwargs: torch.ones_like(t, **kwargs),
lambda t, **kwargs: torch.randint_like(t, 10, 100, **kwargs),
lambda t, **kwargs: torch.randint_like(t, 100, **kwargs),
lambda t, **kwargs: torch.randn_like(t, **kwargs),
lambda t, **kwargs: torch.rand_like(t, **kwargs),
lambda t, **kwargs: torch.full_like(t, 7, **kwargs),
lambda t, **kwargs: torch.empty_like(t, **kwargs)]
# dense non-overlapping tensor,
# non-dense non-overlapping sliced tensor
# non-dense non-overlapping gapped tensor
# non-dense non-overlapping 0 strided tensor
# non-dense overlapping general tensor
# non-dense overlapping sliced tensor
# non-dense overlapping gapped tensor
# non-dense overlapping 0 strided tensor
# non-dense overlapping equal strides
tset = (
torch.randn(4, 3, 2, device=device),
torch.randn(4, 3, 2, device=device)[:, :, ::2],
torch.empty_strided((4, 3, 2), (10, 3, 1), device=device).fill_(1.0),
torch.empty_strided((4, 3, 2), (10, 0, 3), device=device).fill_(1.0),
torch.empty_strided((4, 3, 2), (10, 1, 2), device=device).fill_(1.0),
torch.empty_strided((4, 3, 2), (4, 2, 1), device=device)[:, :, ::2].fill_(1.0),
torch.empty_strided((4, 3, 2), (10, 1, 1), device=device).fill_(1.0),
torch.empty_strided((4, 1, 1, 2), (10, 0, 0, 2), device=device).fill_(1.0),
torch.empty_strided((4, 2, 3), (10, 3, 3), device=device).fill_(1.0))
for like_fn in like_fns:
for t in tset:
for p in permutations(range(t.dim())):
tp = t.permute(p)
compare_helper_(like_fn, tp)
def _test_special_stacks(self, dim, at_least_dim, torch_fn, np_fn, device, dtype):
# Test error for non-tuple argument
t = torch.randn(10)
with self.assertRaisesRegex(TypeError, "must be tuple of Tensors, not Tensor"):
torch_fn(t)
# Test error for a single array
with self.assertRaisesRegex(TypeError, "must be tuple of Tensors, not Tensor"):
torch_fn((t))
# Test 0-D
num_tensors = random.randint(1, 5)
input_t = [torch.tensor(random.uniform(0, 10), device=device, dtype=dtype) for i in range(num_tensors)]
actual = torch_fn(input_t)
expected = np_fn([input.cpu().numpy() for input in input_t])
self.assertEqual(actual, expected)
for ndims in range(1, 5):
base_shape = list(_rand_shape(ndims, min_size=1, max_size=5))
for i in range(ndims):
shape = list(base_shape)
num_tensors = random.randint(1, 5)
torch_input = []
# Create tensors with shape being different along one axis only
for param in range(num_tensors):
shape[i] = random.randint(1, 5)
torch_input.append(_generate_input(tuple(shape), dtype, device, with_extremal=False))
# Determine if input tensors have valid dimensions.
valid_dim = True
for k in range(len(torch_input) - 1):
for tdim in range(ndims):
# Test whether all tensors have the same shape except in concatenating dimension
# Unless the number of dimensions is less than the corresponding at_least function dimension
# Since the original concatenating dimension would shift after applying at_least and would no
# longer be the concatenating dimension
if (ndims < at_least_dim or tdim != dim) and torch_input[k].size()[tdim] != torch_input[k + 1].size()[tdim]:
valid_dim = False
# Special case for hstack is needed since hstack works differently when ndims is 1
if valid_dim or (torch_fn is torch.hstack and ndims == 1):
# Valid dimensions, test against numpy
np_input = [input.cpu().numpy() for input in torch_input]
actual = torch_fn(torch_input)
expected = np_fn(np_input)
self.assertEqual(actual, expected)
else:
# Invalid dimensions, test for error
with self.assertRaisesRegex(RuntimeError, "Sizes of tensors must match except in dimension"):
torch_fn(torch_input)
with self.assertRaises(ValueError):
np_input = [input.cpu().numpy() for input in torch_input]
np_fn(np_input)
@onlyOnCPUAndCUDA
@dtypes(*(torch.testing.get_all_int_dtypes() + torch.testing.get_all_fp_dtypes(include_bfloat16=False) +
torch.testing.get_all_complex_dtypes()))
def test_hstack_column_stack(self, device, dtype):
ops = ((torch.hstack, np.hstack), (torch.column_stack, np.column_stack))
for torch_op, np_op in ops:
self._test_special_stacks(1, 1, torch_op, np_op, device, dtype)
# Test torch.column_stack with combinations of 1D and 2D tensors input
one_dim_tensor = torch.arange(0, 10).to(dtype=dtype, device=device)
two_dim_tensor = torch.arange(0, 100).to(dtype=dtype, device=device).reshape(10, 10)
inputs = two_dim_tensor, one_dim_tensor, two_dim_tensor, one_dim_tensor
torch_result = torch.column_stack(inputs)
np_inputs = [input.cpu().numpy() for input in inputs]
np_result = np.column_stack(np_inputs)
self.assertEqual(np_result,
torch_result)
@onlyOnCPUAndCUDA
@dtypes(*(torch.testing.get_all_int_dtypes() + torch.testing.get_all_fp_dtypes(include_bfloat16=False) +
torch.testing.get_all_complex_dtypes()))
def test_vstack_row_stack(self, device, dtype):
ops = ((torch.vstack, np.vstack), (torch.row_stack, np.row_stack))
for torch_op, np_op in ops:
self._test_special_stacks(0, 2, torch_op, np_op, device, dtype)
for i in range(5):
# Test dimension change for 1D tensor of size (N) and 2D tensor of size (1, N)
n = random.randint(1, 10)
input_a = _generate_input((n,), dtype, device, with_extremal=False)
input_b = _generate_input((1, n), dtype, device, with_extremal=False)
torch_input = [input_a, input_b]
np_input = [input.cpu().numpy() for input in torch_input]
actual = torch_op(torch_input)
expected = np_op(np_input)
self.assertEqual(actual, expected)
@onlyOnCPUAndCUDA
@dtypes(*(torch.testing.get_all_int_dtypes() + torch.testing.get_all_fp_dtypes(include_bfloat16=False) +
torch.testing.get_all_complex_dtypes()))
def test_dstack(self, device, dtype):
self._test_special_stacks(2, 3, torch.dstack, np.dstack, device, dtype)
for i in range(5):
# Test dimension change for 1D tensor of size (N), 2D tensor of size (1, N), and 3D tensor of size (1, N, 1)
n = random.randint(1, 10)
input_a = _generate_input((n,), dtype, device, with_extremal=False)
input_b = _generate_input((1, n), dtype, device, with_extremal=False)
input_c = _generate_input((1, n, 1), dtype, device, with_extremal=False)
torch_input = [input_a, input_b, input_c]
np_input = [input.cpu().numpy() for input in torch_input]
actual = torch.dstack(torch_input)
expected = np.dstack(np_input)
self.assertEqual(actual, expected)
# Test dimension change for 2D tensor of size (M, N) and 3D tensor of size (M, N, 1)
m = random.randint(1, 10)
n = random.randint(1, 10)
input_a = _generate_input((m, n), dtype, device, with_extremal=False)
input_b = _generate_input((m, n, 1), dtype, device, with_extremal=False)
torch_input = [input_a, input_b]
np_input = [input.cpu().numpy() for input in torch_input]
actual = torch.dstack(torch_input)
expected = np.dstack(np_input)
self.assertEqual(actual, expected)
@dtypes(torch.int32, torch.int64)
def test_large_linspace(self, device, dtype):
start = torch.iinfo(dtype).min
end = torch.iinfo(dtype).max & ~0xfff
steps = 15
x = torch.linspace(start, end, steps, dtype=dtype, device=device)
self.assertGreater(x[1] - x[0], (end - start) / steps)
@dtypes(torch.float32, torch.float64)
def test_unpack_double(self, device, dtype):
# Reference: https://github.com/pytorch/pytorch/issues/33111
vals = (2 ** 24 + 1, 2 ** 53 + 1,
np.iinfo(np.int64).max, np.iinfo(np.uint64).max, np.iinfo(np.uint64).max + 1,
-1e500, 1e500)
for val in vals:
t = torch.tensor(val, dtype=dtype, device=device)
a = np.array(val, dtype=torch_to_numpy_dtype_dict[dtype])
self.assertEqual(t, torch.from_numpy(a))
def _float_to_int_conversion_helper(self, vals, device, dtype):
a = np.array(vals, dtype=np.float32).astype(torch_to_numpy_dtype_dict[dtype])
t = torch.tensor(vals, device=device, dtype=torch.float).to(dtype)
self.assertEqual(torch.from_numpy(a), t.cpu())
# Checks that float->integer casts don't produce undefined behavior errors.
# Note: In C++, casting from a floating value to an integral dtype
# is undefined if the floating point value is not within the integral
# dtype's dynamic range. This can (and should) cause undefined behavior
# errors with UBSAN. These casts are deliberate in PyTorch, however, and
# NumPy has the same behavior.
@onlyOnCPUAndCUDA
@unittest.skipIf(IS_MACOS, "Test is broken on MacOS, see https://github.com/pytorch/pytorch/issues/38752")
@unittest.skipIf(IS_PPC, "Test is borken on PowerPC, see https://github.com/pytorch/pytorch/issues/39671")
@dtypes(torch.bool, torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
def test_float_to_int_conversion_finite(self, device, dtype):
min = torch.finfo(torch.float).min
max = torch.finfo(torch.float).max
# Note: CUDA max float -> integer conversion is divergent on some dtypes
vals = (min, -2, -1.5, -.5, 0, .5, 1.5, 2, max)
if self.device_type == 'cuda':
if torch.version.hip:
# HIP min float -> int64 conversion is divergent
vals = (-2, -1.5, -.5, 0, .5, 1.5, 2)
else:
vals = (min, -2, -1.5, -.5, 0, .5, 1.5, 2)
self._float_to_int_conversion_helper(vals, device, dtype)
# Note: CUDA will fail this test on most dtypes, often dramatically.
@onlyCPU
@dtypes(torch.bool, torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
def test_float_to_int_conversion_nonfinite(self, device, dtype):
vals = (float('-inf'), float('inf'), float('nan'))
self._float_to_int_conversion_helper(vals, device, dtype)
# TODO: re-enable this test
@unittest.skipIf(True, "real and imag not implemented for complex")
@onlyOnCPUAndCUDA
def test_complex_type_conversions(self, device):
dtypes = [torch.float, torch.complex64, torch.complex128]
for from_type in dtypes:
for to_type in dtypes:
from_tensor = torch.randn(4, dtype=from_type, device=device)
to_tensor = from_tensor.to(to_type)
if from_type.is_complex and not to_type.is_complex:
self.assertEqual(torch.real(from_tensor), to_tensor, exact_dtype=False)
elif not from_type.is_complex and to_type.is_complex:
self.assertEqual(from_tensor, torch.real(to_tensor), exact_dtype=False)
self.assertEqual(torch.zeros_like(torch.imag(to_tensor)), torch.imag(to_tensor), exact_dtype=False)
else:
self.assertEqual(from_tensor, to_tensor, exact_dtype=False)
@slowTest
@onlyCPU
def test_cat_big(self, device):
SIZE1 = 6500
SIZE2 = 4500
concat_list = []
concat_list.append(torch.ones((SIZE1, 1024 * 512), dtype=torch.uint8, device=device))
concat_list.append(torch.ones((SIZE2, 1024 * 512), dtype=torch.uint8, device=device))
result = torch.cat(concat_list)
self.assertEqual(result.size(0), SIZE1 + SIZE2)
@onlyCPU
def test_cat_bad_input_sizes(self, device):
x = torch.randn(2, 1, device=device)
y = torch.randn(2, 1, 1, device=device)
z = torch.randn(2, 1, 1, device=device)
self.assertRaises(RuntimeError, lambda: torch.cat([x, y, z]))
x = torch.randn(2, 1, 2, device=device)
y = torch.randn(2, 1, 1, device=device)
z = torch.randn(2, 2, 1, device=device)
self.assertRaises(RuntimeError, lambda: torch.cat([x, y, z], dim=1))
@onlyCPU
@dtypes(torch.half, torch.double, torch.int)
def test_cat2(self, device, dtype):
SIZE = 10
for dim in range(-3, 3):
pos_dim = dim if dim >= 0 else 3 + dim
x = torch.randint(low=-100, high=100, size=(13, SIZE, SIZE), device=device).to(dtype).transpose(0, pos_dim)
y = torch.randint(low=-100, high=100, size=(17, SIZE, SIZE), device=device).to(dtype).transpose(0, pos_dim)
z = torch.randint(low=-100, high=100, size=(19, SIZE, SIZE), device=device).to(dtype).transpose(0, pos_dim)
res1 = torch.cat((x, y, z), dim)
self.assertEqual(res1.narrow(pos_dim, 0, 13), x, atol=0, rtol=0)
self.assertEqual(res1.narrow(pos_dim, 13, 17), y, atol=0, rtol=0)
self.assertEqual(res1.narrow(pos_dim, 30, 19), z, atol=0, rtol=0)
x = torch.randint(low=-100, high=100, size=(20, SIZE, SIZE), device=device).to(dtype)
self.assertEqual(torch.cat(torch.split(x, 7)), x)
self.assertEqual(torch.cat(torch.chunk(x, 7)), x)
y = torch.randint(low=-100, high=100, size=(1, SIZE, SIZE), device=device).to(dtype)
z = torch.cat([x, y])
self.assertEqual(z.size(), (21, SIZE, SIZE))
self.assertRaises(RuntimeError, lambda: torch.cat([]))
self.assertRaisesRegex(TypeError, 'got None', lambda: torch.cat([x, None]))
@onlyCPU
def test_cat_scalars(self, device):
x = torch.tensor(0, device=device)
y = torch.tensor(1, device=device)
with self.assertRaisesRegex(RuntimeError, 'zero-dimensional.*cannot be concatenated'):
torch.cat([x, y])
def test_zeros_dtype_out_match(self, device):
d = torch.tensor((2, 3), device=device, dtype=torch.double)
self.assertRaises(RuntimeError, lambda: torch.zeros((2, 3), device=device, dtype=torch.float32, out=d))
# TODO: update to work on CUDA, too
@onlyCPU
def test_trilu_indices(self, device):
for test_args in tri_tests_args:
_compare_trilu_indices(self, *test_args)
run_additional_tri_tests(self, 'cpu')
# test default options
x = torch.ones(
3, 3, dtype=torch.long, device='cpu', layout=torch.strided)
self.assertEqual(
x.tril(0).nonzero().transpose(0, 1), torch.tril_indices(3, 3))
self.assertEqual(
x.triu(0).nonzero().transpose(0, 1), torch.triu_indices(3, 3))
# test stride 0 cases
x = torch.ones(
3, 1, 3, 3, dtype=torch.long, device='cpu', layout=torch.strided)
output = x.triu(2).expand(3, 3, 3, 3)
b = x.clone().expand(3, 3, 3, 3)
self.assertEqual(b.triu(2), output)
self.assertRaises(RuntimeError, lambda: b.triu_(2))
# TODO: update to work on CUDA, too
@onlyCPU
def test_stack(self, device):
for dtype in (torch.half, torch.double, torch.int):
x = torch.randint(low=-100, high=100, size=(2, 3, 4)).to(dtype)
y = torch.randint(low=-100, high=100, size=(2, 3, 4)).to(dtype)
z = torch.randint(low=-100, high=100, size=(2, 3, 4)).to(dtype)
for dim in range(4):
res = torch.stack((x, y, z), dim)
res_neg = torch.stack((x, y, z), dim - 4)
expected_size = x.size()[:dim] + (3,) + x.size()[dim:]
self.assertEqual(res, res_neg)
self.assertEqual(res.size(), expected_size)
self.assertEqual(res.select(dim, 0), x, atol=0, rtol=0)
self.assertEqual(res.select(dim, 1), y, atol=0, rtol=0)
self.assertEqual(res.select(dim, 2), z, atol=0, rtol=0)
# TODO: update to work on CUDA, too
@onlyCPU
def test_stack_out(self, device):
for dtype in (torch.half, torch.double, torch.int):
x = torch.randint(low=-100, high=100, size=(2, 3, 4)).to(dtype)
y = torch.randint(low=-100, high=100, size=(2, 3, 4)).to(dtype)
z = torch.randint(low=-100, high=100, size=(2, 3, 4)).to(dtype)
for dim in range(4):
expected_size = x.size()[:dim] + (3,) + x.size()[dim:]
res_out = x.new(expected_size)
res_neg_out = x.new(expected_size)
res_out_dp = res_out.data_ptr()
res_out_neg_dp = res_neg_out.data_ptr()
torch.stack((x, y, z), dim, out=res_out)
torch.stack((x, y, z), dim - 4, out=res_neg_out)
self.assertEqual(res_out, res_neg_out)
self.assertEqual(res_out.size(), expected_size)
self.assertEqual(res_out_dp, res_out.data_ptr())
self.assertEqual(res_out_neg_dp, res_neg_out.data_ptr())
self.assertEqual(res_out.select(dim, 0), x, atol=0, rtol=0)
self.assertEqual(res_out.select(dim, 1), y, atol=0, rtol=0)
self.assertEqual(res_out.select(dim, 2), z, atol=0, rtol=0)
def test_repeat_interleave(self, device):
x = torch.tensor([0, 1, 2, 3], device=device)
expected = torch.tensor([1, 2, 2, 3, 3, 3], device=device)
self.assertEqual(torch.repeat_interleave(x), expected)
with self.assertRaises(RuntimeError):
torch.repeat_interleave(torch.arange(4, device=device).reshape(2, 2))
with self.assertRaises(RuntimeError):
torch.repeat_interleave(torch.arange(4.0, device=device))
with self.assertRaises(RuntimeError):
torch.repeat_interleave(torch.tensor([1, 2, -1, 3, 4], device=device))
y = torch.tensor([[1, 2], [3, 4]], device=device)
y1_v1 = torch.repeat_interleave(y, 2)
y1_v2 = torch.repeat_interleave(y, torch.tensor(2, device=device))
y1_v3 = torch.repeat_interleave(y, torch.tensor([2], device=device))
y1_expect = torch.tensor([1, 1, 2, 2, 3, 3, 4, 4], device=device)
self.assertEqual(y1_v1, y1_expect)
self.assertEqual(y1_v2, y1_expect)
self.assertEqual(y1_v3, y1_expect)
y2 = torch.repeat_interleave(y, 3, dim=1)
y2_expect = torch.tensor([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]], device=device)
self.assertEqual(y2, y2_expect)
y3 = torch.repeat_interleave(y, torch.tensor([1, 2], device=device), dim=0)
y3_expect = torch.tensor([[1, 2],
[3, 4],
[3, 4]], device=device)
self.assertEqual(y3, y3_expect)
with self.assertRaises(RuntimeError):
torch.repeat_interleave(y, torch.tensor([1, 2, 3], device=device), dim=0)
with self.assertRaises(RuntimeError):
torch.repeat_interleave(y, torch.arange(9, device=device).reshape(3, 3), dim=0)
# test zero sized dimension
x = torch.zeros((5, 0), device=device)
y = torch.repeat_interleave(x, repeats=3, dim=1)
self.assertEqual(y, x.new_zeros(5, 0, device=device))
x = torch.tensor([], dtype=torch.int64, device=device)
y = torch.repeat_interleave(x, x)
self.assertEqual(y, x)
# TODO: udpate to work on CUDA, too
@onlyCPU
def test_new_methods_requires_grad(self, device):
size = (10,)
test_cases = [
# method name, args
('new_full', [size, 1]),
('new_empty', [size]),
('new_zeros', [size]),
]
for method_name, args in test_cases:
x = torch.randn(size)
for requires_grad in [True, False]:
x_new = x.__getattribute__(method_name)(*args, requires_grad=requires_grad)
self.assertEqual(x_new.requires_grad, requires_grad)
x = torch.randint(10, size)
with self.assertRaisesRegex(
RuntimeError,
r'Only Tensors of floating point and complex dtype can require gradients'):
x_new = x.__getattribute__(method_name)(*args, requires_grad=True)
# TODO: update to work on CUDA, too?
@onlyCPU
def test_tensor_from_sequence(self, device):
class MockSequence(object):
def __init__(self, lst):
self.lst = lst
def __len__(self):
return len(self.lst)
def __getitem__(self, item):
raise TypeError
class GoodMockSequence(MockSequence):
def __getitem__(self, item):
return self.lst[item]
bad_mock_seq = MockSequence([1.0, 2.0, 3.0])
good_mock_seq = GoodMockSequence([1.0, 2.0, 3.0])
with self.assertRaisesRegex(ValueError, 'could not determine the shape'):
torch.Tensor(bad_mock_seq)
self.assertEqual(torch.Tensor([1.0, 2.0, 3.0]), torch.Tensor(good_mock_seq))
# TODO: update to work on CUDA, too?
@onlyCPU
def test_simple_scalar_cast(self, device):
ok = [torch.Tensor([1.5]), torch.zeros(1, 1, 1, 1)]
ok_values = [1.5, 0]
not_ok = map(torch.Tensor, [[], [1, 2], [[1, 2], [3, 4]]])
for tensor, value in zip(ok, ok_values):
self.assertEqual(int(tensor), int(value))
self.assertEqual(float(tensor), float(value))
self.assertEqual(complex(tensor), complex(value))
self.assertEqual(complex(torch.tensor(1.5j)), 1.5j)
for tensor in not_ok:
self.assertRaises(ValueError, lambda: int(tensor))
self.assertRaises(ValueError, lambda: float(tensor))
self.assertRaises(ValueError, lambda: complex(tensor))
self.assertRaises(RuntimeError, lambda: float(torch.tensor(1.5j)))
self.assertRaises(RuntimeError, lambda: int(torch.tensor(1.5j)))
# TODO: update to work on CUDA, too?
@onlyCPU
def test_offset_scalar_cast(self, device):
x = torch.Tensor([1, 2, 3])
y = x[2:]
self.assertEqual(int(y), 3)
def test_meshgrid(self, device):
a = torch.tensor(1, device=device)
b = torch.tensor([1, 2, 3], device=device)
c = torch.tensor([1, 2], device=device)
grid_a, grid_b, grid_c = torch.meshgrid([a, b, c])
self.assertEqual(grid_a.shape, torch.Size([1, 3, 2]))
self.assertEqual(grid_b.shape, torch.Size([1, 3, 2]))
self.assertEqual(grid_c.shape, torch.Size([1, 3, 2]))
grid_a2, grid_b2, grid_c2 = torch.meshgrid(a, b, c)
self.assertEqual(grid_a2.shape, torch.Size([1, 3, 2]))
self.assertEqual(grid_b2.shape, torch.Size([1, 3, 2]))
self.assertEqual(grid_c2.shape, torch.Size([1, 3, 2]))
expected_grid_a = torch.ones(1, 3, 2, dtype=torch.int64, device=device)
expected_grid_b = torch.tensor([[[1, 1],
[2, 2],
[3, 3]]], device=device)
expected_grid_c = torch.tensor([[[1, 2],
[1, 2],
[1, 2]]], device=device)
self.assertTrue(grid_a.equal(expected_grid_a))
self.assertTrue(grid_b.equal(expected_grid_b))
self.assertTrue(grid_c.equal(expected_grid_c))
self.assertTrue(grid_a2.equal(expected_grid_a))
self.assertTrue(grid_b2.equal(expected_grid_b))
self.assertTrue(grid_c2.equal(expected_grid_c))
def test_cartesian_prod(self, device):
a = torch.tensor([1], device=device)
b = torch.tensor([1, 2, 3], device=device)
c = torch.tensor([1, 2], device=device)
prod = torch.cartesian_prod(a, b, c)
expected = torch.tensor(list(product([a], b, c)), device=device)
self.assertEqual(expected, prod)
# test 0 size input
d = torch.empty(0, dtype=b.dtype, device=device)
prod = torch.cartesian_prod(a, b, c, d)
expected = torch.empty(0, 4, dtype=b.dtype, device=device)
self.assertEqual(expected, prod)
# test single input
prod = torch.cartesian_prod(b)
self.assertEqual(b, prod)
def test_combinations(self, device):
a = torch.tensor([1, 2, 3], device=device)
c = torch.combinations(a, r=1)
expected = torch.tensor(list(combinations(a, r=1)), device=device)
self.assertEqual(c, expected)
c = torch.combinations(a, r=1, with_replacement=True)
expected = torch.tensor(list(combinations_with_replacement(a, r=1)), device=device)
self.assertEqual(c, expected)
c = torch.combinations(a)
expected = torch.tensor(list(combinations(a, r=2)), device=device)
self.assertEqual(c, expected)
c = torch.combinations(a, with_replacement=True)
expected = torch.tensor(list(combinations_with_replacement(a, r=2)), device=device)
self.assertEqual(c, expected)
c = torch.combinations(a, r=3)
expected = torch.tensor(list(combinations(a, r=3)), device=device)
self.assertEqual(c, expected)
c = torch.combinations(a, r=4)
expected = torch.empty(0, 4, dtype=a.dtype, device=device)
self.assertEqual(c, expected)
c = torch.combinations(a, r=5)
expected = torch.empty(0, 5, dtype=a.dtype, device=device)
self.assertEqual(c, expected)
# test empty imput
a = torch.empty(0, device=device)
c1 = torch.combinations(a)
c2 = torch.combinations(a, with_replacement=True)
expected = torch.empty(0, 2, dtype=a.dtype, device=device)
self.assertEqual(c1, expected)
self.assertEqual(c2, expected)
def test_linlogspace_mem_overlap(self, device):
x = torch.rand(1, device=device).expand(10)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
torch.linspace(1, 10, 10, out=x)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
torch.logspace(1, 10, 10, out=x)
def test_ctor_with_numpy_array(self, device):
correct_dtypes = [
np.double,
np.float,
np.float16,
np.int64,
np.int32,
np.int16,
np.int8,
np.uint8,
np.bool,
]
incorrect_byteorder = '>' if sys.byteorder == 'little' else '<'
incorrect_dtypes = [incorrect_byteorder + t for t in ['d', 'f']]
for dtype in correct_dtypes:
array = np.array([1, 2, 3, 4], dtype=dtype)
# Upcast
tensor = torch.DoubleTensor(array).to(device)
for i in range(len(array)):
self.assertEqual(tensor[i], array[i])
# Downcast (sometimes)
tensor = torch.FloatTensor(array).to(device)
for i in range(len(array)):
self.assertEqual(tensor[i], array[i])
tensor = torch.HalfTensor(array).to(device)
for i in range(len(array)):
self.assertEqual(tensor[i], array[i])
@dtypes(torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64)
def test_random(self, device, dtype):
# This test is flaky with p<=(2/(ub-lb))^200=6e-36
t = torch.empty(200, dtype=dtype, device=device)
lb = 1
ub = 4
t.fill_(-1)
t.random_(lb, ub)
self.assertEqual(t.min(), lb)
self.assertEqual(t.max(), ub - 1)
t.fill_(-1)
t.random_(ub)
self.assertEqual(t.min(), 0)
self.assertEqual(t.max(), ub - 1)
def test_random_bool(self, device):
size = 2000
t = torch.empty(size, dtype=torch.bool, device=device)
t.fill_(False)
t.random_()
self.assertEqual(t.min(), False)
self.assertEqual(t.max(), True)
self.assertTrue(0.4 < (t.eq(True)).to(torch.int).sum().item() / size < 0.6)
t.fill_(True)
t.random_()
self.assertEqual(t.min(), False)
self.assertEqual(t.max(), True)
self.assertTrue(0.4 < (t.eq(True)).to(torch.int).sum().item() / size < 0.6)
def test_random_from_to_bool(self, device):
size = 2000
int64_min_val = torch.iinfo(torch.int64).min
int64_max_val = torch.iinfo(torch.int64).max
min_val = 0
max_val = 1
froms = [int64_min_val, -42, min_val - 1, min_val, max_val, max_val + 1, 42]
tos = [-42, min_val - 1, min_val, max_val, max_val + 1, 42, int64_max_val]
for from_ in froms:
for to_ in tos:
t = torch.empty(size, dtype=torch.bool, device=device)
if to_ > from_:
if not (min_val <= from_ <= max_val):
self.assertRaisesRegex(
RuntimeError,
"from is out of bounds",
lambda: t.random_(from_, to_)
)
elif not (min_val <= (to_ - 1) <= max_val):
self.assertRaisesRegex(
RuntimeError,
"to - 1 is out of bounds",
lambda: t.random_(from_, to_)
)
else:
t.random_(from_, to_)
range_ = to_ - from_
delta = 1
self.assertTrue(from_ <= t.to(torch.int).min() < (from_ + delta))
self.assertTrue((to_ - delta) <= t.to(torch.int).max() < to_)
else:
self.assertRaisesRegex(
RuntimeError,
"random_ expects 'from' to be less than 'to', but got from=" + str(from_) + " >= to=" + str(to_),
lambda: t.random_(from_, to_)
)
@dtypes(*(torch.testing.get_all_int_dtypes() + torch.testing.get_all_fp_dtypes()))
def test_random_full_range(self, device, dtype):
size = 2000
alpha = 0.1
int64_min_val = torch.iinfo(torch.int64).min
int64_max_val = torch.iinfo(torch.int64).max
if dtype == torch.double:
fp_limit = 2**53
elif dtype == torch.float:
fp_limit = 2**24
elif dtype == torch.half:
fp_limit = 2**11
elif dtype == torch.bfloat16:
fp_limit = 2**8
else:
fp_limit = 0
t = torch.empty(size, dtype=dtype, device=device)
if dtype in [torch.float, torch.double, torch.half, torch.bfloat16]:
from_ = int(max(-fp_limit, int64_min_val))
to_inc_ = int(min(fp_limit, int64_max_val))
else:
from_ = int(max(torch.iinfo(dtype).min, int64_min_val))
to_inc_ = int(min(torch.iinfo(dtype).max, int64_max_val))
range_ = to_inc_ - from_ + 1
t.random_(from_, None)
delta = max(1, alpha * range_)
self.assertTrue(from_ <= t.to(torch.double).min() < (from_ + delta))
self.assertTrue((to_inc_ - delta) < t.to(torch.double).max() <= to_inc_)
@dtypes(*(torch.testing.get_all_int_dtypes() + torch.testing.get_all_fp_dtypes()))
def test_random_from_to(self, device, dtype):
size = 2000
alpha = 0.1
int64_min_val = torch.iinfo(torch.int64).min
int64_max_val = torch.iinfo(torch.int64).max
if dtype in [torch.float, torch.double, torch.half]:
min_val = int(max(torch.finfo(dtype).min, int64_min_val))
max_val = int(min(torch.finfo(dtype).max, int64_max_val))
froms = [min_val, -42, 0, 42]
tos = [-42, 0, 42, max_val >> 1]
elif dtype == torch.bfloat16:
min_val = int64_min_val
max_val = int64_max_val
froms = [min_val, -42, 0, 42]
tos = [-42, 0, 42, max_val >> 1]
elif dtype == torch.uint8:
min_val = torch.iinfo(dtype).min
max_val = torch.iinfo(dtype).max
froms = [int64_min_val, -42, min_val - 1, min_val, 42, max_val, max_val + 1]
tos = [-42, min_val - 1, min_val, 42, max_val, max_val + 1, int64_max_val]
elif dtype == torch.int64:
min_val = int64_min_val
max_val = int64_max_val
froms = [min_val, -42, 0, 42]
tos = [-42, 0, 42, max_val]
else:
min_val = torch.iinfo(dtype).min
max_val = torch.iinfo(dtype).max
froms = [int64_min_val, min_val - 1, min_val, -42, 0, 42, max_val, max_val + 1]
tos = [min_val - 1, min_val, -42, 0, 42, max_val, max_val + 1, int64_max_val]
if dtype == torch.double:
fp_limit = 2**53
elif dtype == torch.float:
fp_limit = 2**24
elif dtype == torch.half:
fp_limit = 2**11
elif dtype == torch.bfloat16:
fp_limit = 2**8
else:
fp_limit = 0
for from_ in froms:
for to_ in tos:
t = torch.empty(size, dtype=dtype, device=device)
if to_ > from_:
if not (min_val <= from_ <= max_val):
self.assertRaisesRegex(
RuntimeError,
"from is out of bounds",
lambda: t.random_(from_, to_)
)
elif not (min_val <= (to_ - 1) <= max_val):
self.assertRaisesRegex(
RuntimeError,
"to - 1 is out of bounds",
lambda: t.random_(from_, to_)
)
else:
if dtype.is_floating_point and (
not (-fp_limit <= from_ <= fp_limit) or not (-fp_limit <= (to_ - 1) <= fp_limit)):
if not (-fp_limit <= from_ <= fp_limit):
self.assertWarnsRegex(UserWarning, "from is out of bounds",
lambda: t.random_(from_, to_))
if not (-fp_limit <= (to_ - 1) <= fp_limit):
self.assertWarnsRegex(UserWarning, "to - 1 is out of bounds",
lambda: t.random_(from_, to_))
else:
t.random_(from_, to_)
range_ = to_ - from_
delta = max(1, alpha * range_)
if dtype == torch.bfloat16:
# Less strict checks because of rounding errors
# TODO investigate rounding errors
self.assertTrue(from_ <= t.to(torch.double).min() < (from_ + delta))
self.assertTrue((to_ - delta) < t.to(torch.double).max() <= to_)
else:
self.assertTrue(from_ <= t.to(torch.double).min() < (from_ + delta))
self.assertTrue((to_ - delta) <= t.to(torch.double).max() < to_)
else:
self.assertRaisesRegex(
RuntimeError,
"random_ expects 'from' to be less than 'to', but got from=" + str(from_) + " >= to=" + str(to_),
lambda: t.random_(from_, to_)
)
@dtypes(*(torch.testing.get_all_int_dtypes() + torch.testing.get_all_fp_dtypes()))
def test_random_to(self, device, dtype):
size = 2000
alpha = 0.1
int64_min_val = torch.iinfo(torch.int64).min
int64_max_val = torch.iinfo(torch.int64).max
if dtype in [torch.float, torch.double, torch.half]:
min_val = int(max(torch.finfo(dtype).min, int64_min_val))
max_val = int(min(torch.finfo(dtype).max, int64_max_val))
tos = [-42, 0, 42, max_val >> 1]
elif dtype == torch.bfloat16:
min_val = int64_min_val
max_val = int64_max_val
tos = [-42, 0, 42, max_val >> 1]
elif dtype == torch.uint8:
min_val = torch.iinfo(dtype).min
max_val = torch.iinfo(dtype).max
tos = [-42, min_val - 1, min_val, 42, max_val, max_val + 1, int64_max_val]
elif dtype == torch.int64:
min_val = int64_min_val
max_val = int64_max_val
tos = [-42, 0, 42, max_val]
else:
min_val = torch.iinfo(dtype).min
max_val = torch.iinfo(dtype).max
tos = [min_val - 1, min_val, -42, 0, 42, max_val, max_val + 1, int64_max_val]
from_ = 0
for to_ in tos:
t = torch.empty(size, dtype=dtype, device=device)
if to_ > from_:
if not (min_val <= (to_ - 1) <= max_val):
self.assertRaisesRegex(
RuntimeError,
"to - 1 is out of bounds",
lambda: t.random_(from_, to_)
)
else:
t.random_(to_)
range_ = to_ - from_
delta = max(1, alpha * range_)
if dtype == torch.bfloat16:
# Less strict checks because of rounding errors
# TODO investigate rounding errors
self.assertTrue(from_ <= t.to(torch.double).min() < (from_ + delta))
self.assertTrue((to_ - delta) < t.to(torch.double).max() <= to_)
else:
self.assertTrue(from_ <= t.to(torch.double).min() < (from_ + delta))
self.assertTrue((to_ - delta) <= t.to(torch.double).max() < to_)
else:
self.assertRaisesRegex(
RuntimeError,
"random_ expects 'from' to be less than 'to', but got from=" + str(from_) + " >= to=" + str(to_),
lambda: t.random_(from_, to_)
)
@dtypes(*(torch.testing.get_all_int_dtypes() + torch.testing.get_all_fp_dtypes()))
def test_random_default(self, device, dtype):
size = 2000
alpha = 0.1
if dtype == torch.float:
to_inc = 1 << 24
elif dtype == torch.double:
to_inc = 1 << 53
elif dtype == torch.half:
to_inc = 1 << 11
elif dtype == torch.bfloat16:
to_inc = 1 << 8
else:
to_inc = torch.iinfo(dtype).max
t = torch.empty(size, dtype=dtype, device=device)
t.random_()
self.assertTrue(0 <= t.to(torch.double).min() < alpha * to_inc)
self.assertTrue((to_inc - alpha * to_inc) < t.to(torch.double).max() <= to_inc)
# TODO: this test should be updated
@onlyOnCPUAndCUDA
def test_empty_full(self, device):
torch_device = torch.device(device)
device_type = torch_device.type
if device_type == 'cpu':
do_test_empty_full(self, torch.testing.get_all_math_dtypes('cpu'), torch.strided, torch_device)
if device_type == 'cuda':
do_test_empty_full(self, torch.testing.get_all_math_dtypes('cpu'), torch.strided, None)
do_test_empty_full(self, torch.testing.get_all_math_dtypes('cpu'), torch.strided, torch_device)
# TODO: this test should be updated
@suppress_warnings
@onlyOnCPUAndCUDA
@deviceCountAtLeast(1)
def test_tensor_device(self, devices):
device_type = torch.device(devices[0]).type
if device_type == 'cpu':
self.assertEqual('cpu', torch.tensor(5).device.type)
self.assertEqual('cpu',
torch.ones((2, 3), dtype=torch.float32, device='cpu').device.type)
self.assertEqual('cpu',
torch.ones((2, 3), dtype=torch.float32, device='cpu:0').device.type)
self.assertEqual('cpu',
torch.tensor(torch.ones((2, 3), dtype=torch.float32), device='cpu:0').device.type)
self.assertEqual('cpu', torch.tensor(np.random.randn(2, 3), device='cpu').device.type)
if device_type == 'cuda':
self.assertEqual('cuda:0', str(torch.tensor(5).cuda(0).device))
self.assertEqual('cuda:0', str(torch.tensor(5).cuda('cuda:0').device))
self.assertEqual('cuda:0',
str(torch.tensor(5, dtype=torch.int64, device=0).device))
self.assertEqual('cuda:0',
str(torch.tensor(5, dtype=torch.int64, device='cuda:0').device))
self.assertEqual('cuda:0',
str(torch.tensor(torch.ones((2, 3), dtype=torch.float32), device='cuda:0').device))
self.assertEqual('cuda:0', str(torch.tensor(np.random.randn(2, 3), device='cuda:0').device))
for device in devices:
with torch.cuda.device(device):
device_string = 'cuda:' + str(torch.cuda.current_device())
self.assertEqual(device_string,
str(torch.tensor(5, dtype=torch.int64, device='cuda').device))
with self.assertRaises(RuntimeError):
torch.tensor(5).cuda('cpu')
with self.assertRaises(RuntimeError):
torch.tensor(5).cuda('cpu:0')
if len(devices) > 1:
self.assertEqual('cuda:1', str(torch.tensor(5).cuda(1).device))
self.assertEqual('cuda:1', str(torch.tensor(5).cuda('cuda:1').device))
self.assertEqual('cuda:1',
str(torch.tensor(5, dtype=torch.int64, device=1).device))
self.assertEqual('cuda:1',
str(torch.tensor(5, dtype=torch.int64, device='cuda:1').device))
self.assertEqual('cuda:1',
str(torch.tensor(torch.ones((2, 3), dtype=torch.float32),
device='cuda:1').device))
self.assertEqual('cuda:1',
str(torch.tensor(np.random.randn(2, 3), device='cuda:1').device))
# TODO: this test should be updated
@onlyOnCPUAndCUDA
def test_as_strided_neg(self, device):
error = r'as_strided: Negative strides are not supported at the ' \
r'moment, got strides: \[-?[0-9]+(, -?[0-9]+)*\]'
with self.assertRaisesRegex(RuntimeError, error):
torch.as_strided(torch.ones(3, 3, device=device), (1, 1), (2, -1))
with self.assertRaisesRegex(RuntimeError, error):
torch.as_strided(torch.ones(14, device=device), (2,), (-11,))
# TODO: this test should be updated
def test_zeros(self, device):
res1 = torch.zeros(100, 100, device=device)
res2 = torch.tensor((), device=device)
torch.zeros(100, 100, device=device, out=res2)
self.assertEqual(res1, res2)
boolTensor = torch.zeros(2, 2, device=device, dtype=torch.bool)
expected = torch.tensor([[False, False], [False, False]],
device=device, dtype=torch.bool)
self.assertEqual(boolTensor, expected)
halfTensor = torch.zeros(1, 1, device=device, dtype=torch.half)
expected = torch.tensor([[0.]], device=device, dtype=torch.float16)
self.assertEqual(halfTensor, expected)
bfloat16Tensor = torch.zeros(1, 1, device=device, dtype=torch.bfloat16)
expected = torch.tensor([[0.]], device=device, dtype=torch.bfloat16)
self.assertEqual(bfloat16Tensor, expected)
complexTensor = torch.zeros(2, 2, device=device, dtype=torch.complex64)
expected = torch.tensor([[0., 0.], [0., 0.]], device=device, dtype=torch.complex64)
self.assertEqual(complexTensor, expected)
# TODO: this test should be updated
def test_zeros_out(self, device):
shape = (3, 4)
out = torch.zeros(shape, device=device)
torch.zeros(shape, device=device, out=out)
# change the dtype, layout, device
with self.assertRaises(RuntimeError):
torch.zeros(shape, device=device, dtype=torch.int64, out=out)
with self.assertRaises(RuntimeError):
torch.zeros(shape, device=device, layout=torch.sparse_coo, out=out)
# leave them the same
self.assertEqual(torch.zeros(shape, device=device),
torch.zeros(shape, device=device, dtype=out.dtype, out=out))
self.assertEqual(torch.zeros(shape, device=device),
torch.zeros(shape, device=device, layout=torch.strided, out=out))
self.assertEqual(torch.zeros(shape, device=device),
torch.zeros(shape, device=device, out=out))
# TODO: this test should be updated
def test_ones(self, device):
res1 = torch.ones(100, 100, device=device)
res2 = torch.tensor((), device=device)
torch.ones(100, 100, device=device, out=res2)
self.assertEqual(res1, res2)
# test boolean tensor
res1 = torch.ones(1, 2, device=device, dtype=torch.bool)
expected = torch.tensor([[True, True]], device=device, dtype=torch.bool)
self.assertEqual(res1, expected)
# TODO: this test should be updated
@onlyCPU
def test_constructor_dtypes(self, device):
default_type = torch.Tensor().type()
self.assertIs(torch.Tensor().dtype, torch.get_default_dtype())
self.assertIs(torch.uint8, torch.ByteTensor.dtype)
self.assertIs(torch.float32, torch.FloatTensor.dtype)
self.assertIs(torch.float64, torch.DoubleTensor.dtype)
torch.set_default_tensor_type('torch.FloatTensor')
self.assertIs(torch.float32, torch.get_default_dtype())
self.assertIs(torch.FloatStorage, torch.Storage)
torch.set_default_dtype(torch.float64)
self.assertIs(torch.float64, torch.get_default_dtype())
self.assertIs(torch.DoubleStorage, torch.Storage)
torch.set_default_tensor_type(torch.FloatTensor)
self.assertIs(torch.float32, torch.get_default_dtype())
self.assertIs(torch.FloatStorage, torch.Storage)
if torch.cuda.is_available():
torch.set_default_tensor_type(torch.cuda.FloatTensor)
self.assertIs(torch.float32, torch.get_default_dtype())
self.assertIs(torch.float32, torch.cuda.FloatTensor.dtype)
self.assertIs(torch.cuda.FloatStorage, torch.Storage)
torch.set_default_dtype(torch.float64)
self.assertIs(torch.float64, torch.get_default_dtype())
self.assertIs(torch.cuda.DoubleStorage, torch.Storage)
# don't support integral or sparse default types.
self.assertRaises(TypeError, lambda: torch.set_default_tensor_type('torch.IntTensor'))
self.assertRaises(TypeError, lambda: torch.set_default_dtype(torch.int64))
# don't allow passing dtype to set_default_tensor_type
self.assertRaises(TypeError, lambda: torch.set_default_tensor_type(torch.float32))
torch.set_default_tensor_type(default_type)
# TODO: this test should be updated
@onlyCPU
def test_constructor_device_legacy(self, device):
self.assertRaises(RuntimeError, lambda: torch.FloatTensor(device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.FloatTensor(torch.Size([2, 3, 4]), device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.FloatTensor((2.0, 3.0), device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.Tensor(device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.Tensor(torch.Size([2, 3, 4]), device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.Tensor((2.0, 3.0), device='cuda'))
x = torch.randn((3,), device='cpu')
self.assertRaises(RuntimeError, lambda: x.new(device='cuda'))
self.assertRaises(RuntimeError, lambda: x.new(torch.Size([2, 3, 4]), device='cuda'))
self.assertRaises(RuntimeError, lambda: x.new((2.0, 3.0), device='cuda'))
if torch.cuda.is_available():
self.assertRaises(RuntimeError, lambda: torch.cuda.FloatTensor(device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.cuda.FloatTensor(torch.Size([2, 3, 4]), device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.cuda.FloatTensor((2.0, 3.0), device='cpu'))
default_type = torch.Tensor().type()
torch.set_default_tensor_type(torch.cuda.FloatTensor)
self.assertRaises(RuntimeError, lambda: torch.Tensor(device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.Tensor(torch.Size([2, 3, 4]), device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.Tensor((2.0, 3.0), device='cpu'))
torch.set_default_tensor_type(torch.cuda.FloatTensor)
torch.set_default_tensor_type(default_type)
x = torch.randn((3,), device='cuda')
self.assertRaises(RuntimeError, lambda: x.new(device='cpu'))
self.assertRaises(RuntimeError, lambda: x.new(torch.Size([2, 3, 4]), device='cpu'))
self.assertRaises(RuntimeError, lambda: x.new((2.0, 3.0), device='cpu'))
# TODO: this test should be updated
@suppress_warnings
@onlyCPU
def test_tensor_factory(self, device):
# TODO: This test probably doesn't make too much sense now that
# torch.tensor has been established for a while; it makes more
# sense to test the legacy behavior in terms of the new behavior
expected = torch.Tensor([1, 1])
# test data
res1 = torch.tensor([1, 1])
self.assertEqual(res1, expected, exact_dtype=False)
res1 = torch.tensor([1, 1], dtype=torch.int)
self.assertEqual(res1, expected, exact_dtype=False)
self.assertIs(torch.int, res1.dtype)
# test copy
res2 = torch.tensor(expected)
self.assertEqual(res2, expected)
res2[1] = 2
self.assertEqual(expected, torch.ones_like(expected))
res2 = torch.tensor(expected, dtype=torch.int)
self.assertEqual(res1, expected, exact_dtype=False)
self.assertIs(torch.int, res1.dtype)
# test copy with numpy
for dtype in [np.float64, np.int64, np.int8, np.uint8]:
a = np.array([5.]).astype(dtype)
res1 = torch.tensor(a)
self.assertEqual(5., res1[0].item())
a[0] = 7.
self.assertEqual(5., res1[0].item())
# test boolean tensor
a = torch.tensor([True, True, False, True, True], dtype=torch.bool)
b = torch.tensor([-1, -1.1, 0, 1, 1.1], dtype=torch.bool)
self.assertEqual(a, b)
c = torch.tensor([-0.1, -1.1, 0, 1, 0.1], dtype=torch.bool)
self.assertEqual(a, c)
d = torch.tensor((-.3, 0, .3, 1, 3 / 7), dtype=torch.bool)
e = torch.tensor((True, False, True, True, True), dtype=torch.bool)
self.assertEqual(e, d)
f = torch.tensor((-1, 0, -1.1, 1, 1.1), dtype=torch.bool)
self.assertEqual(e, f)
int64_max = torch.iinfo(torch.int64).max
int64_min = torch.iinfo(torch.int64).min
float64_max = torch.finfo(torch.float64).max
float64_min = torch.finfo(torch.float64).min
g_1 = torch.tensor((float('nan'), 0, int64_min, int64_max, int64_min - 1), dtype=torch.bool)
self.assertEqual(e, g_1)
g_2 = torch.tensor((int64_max + 1, 0, (int64_max + 1) * 2, (int64_max + 1) * 2 + 1, float64_min), dtype=torch.bool)
self.assertEqual(e, g_2)
g_3 = torch.tensor((float64_max, 0, float64_max + 1, float64_min - 1, float64_max + 1e291), dtype=torch.bool)
self.assertEqual(e, g_3)
h = torch.tensor([True, False, False, True, False, True, True], dtype=torch.bool)
i = torch.tensor([1e-323, 1e-324, 0j, 1e-323j, 1e-324j, 1 + 2j, -1j], dtype=torch.bool)
self.assertEqual(h, i)
j = torch.tensor((True, True, True, True), dtype=torch.bool)
k = torch.tensor((1e323, -1e323, float('inf'), -float('inf')), dtype=torch.bool)
self.assertEqual(j, k)
# TODO: this test should be updated
@suppress_warnings
@onlyCPU
def test_tensor_factory_copy_var(self, device):
def check_copy(copy, is_leaf, requires_grad, data_ptr=None):
if data_ptr is None:
data_ptr = copy.data_ptr
self.assertEqual(copy, source, exact_dtype=False)
self.assertTrue(copy.is_leaf == is_leaf)
self.assertTrue(copy.requires_grad == requires_grad)
self.assertTrue(copy.data_ptr == data_ptr)
source = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
# test torch.tensor()
check_copy(torch.tensor(source), True, False)
check_copy(torch.tensor(source, requires_grad=False), True, False)
check_copy(torch.tensor(source, requires_grad=True), True, True)
# test tensor.new_tensor()
copy = torch.randn(1)
check_copy(copy.new_tensor(source), True, False)
check_copy(copy.new_tensor(source, requires_grad=False), True, False)
check_copy(copy.new_tensor(source, requires_grad=True), True, True)
# test torch.as_tensor()
check_copy(torch.as_tensor(source), source.is_leaf, source.requires_grad, source.data_ptr) # not copy
check_copy(torch.as_tensor(source, dtype=torch.float), False, True) # copy and keep the graph
# TODO: this test should be updated
@onlyCPU
def test_tensor_factory_type_inference(self, device):
def test_inference(default_dtype):
saved_dtype = torch.get_default_dtype()
torch.set_default_dtype(default_dtype)
default_complex_dtype = torch.complex64 if default_dtype == torch.float32 else torch.complex128
self.assertIs(default_dtype, torch.tensor(()).dtype)
self.assertIs(default_dtype, torch.tensor(5.).dtype)
self.assertIs(torch.int64, torch.tensor(5).dtype)
self.assertIs(torch.bool, torch.tensor(True).dtype)
self.assertIs(torch.int32, torch.tensor(5, dtype=torch.int32).dtype)
self.assertIs(default_dtype, torch.tensor(((7, 5), (9, 5.))).dtype)
self.assertIs(default_dtype, torch.tensor(((5., 5), (3, 5))).dtype)
self.assertIs(torch.int64, torch.tensor(((5, 3), (3, 5))).dtype)
self.assertIs(default_complex_dtype, torch.tensor(((5, 3 + 2j), (3, 5 + 4j))).dtype)
self.assertIs(torch.float64, torch.tensor(np.array(())).dtype)
self.assertIs(torch.float64, torch.tensor(np.array(5.)).dtype)
if np.array(5).dtype == np.int64: # np long, which can be 4 bytes (e.g. on windows)
self.assertIs(torch.int64, torch.tensor(np.array(5)).dtype)
else:
self.assertIs(torch.int32, torch.tensor(np.array(5)).dtype)
self.assertIs(torch.uint8, torch.tensor(np.array(3, dtype=np.uint8)).dtype)
self.assertIs(default_dtype, torch.tensor(((7, np.array(5)), (np.array(9), 5.))).dtype)
self.assertIs(torch.float64, torch.tensor(((7, 5), (9, np.array(5.)))).dtype)
self.assertIs(torch.int64, torch.tensor(((5, np.array(3)), (np.array(3), 5))).dtype)
torch.set_default_dtype(saved_dtype)
test_inference(torch.float64)
test_inference(torch.float32)
# TODO: this test should be updated
@suppress_warnings
@onlyCPU
def test_new_tensor(self, device):
expected = torch.autograd.Variable(torch.ByteTensor([1, 1]))
# test data
res1 = expected.new_tensor([1, 1])
self.assertEqual(res1, expected)
res1 = expected.new_tensor([1, 1], dtype=torch.int)
self.assertEqual(res1, expected, exact_dtype=False)
self.assertIs(torch.int, res1.dtype)
# test copy
res2 = expected.new_tensor(expected)
self.assertEqual(res2, expected)
res2[1] = 2
self.assertEqual(expected, torch.ones_like(expected))
res2 = expected.new_tensor(expected, dtype=torch.int)
self.assertEqual(res2, expected, exact_dtype=False)
self.assertIs(torch.int, res2.dtype)
# test copy with numpy
a = np.array([5.])
res1 = torch.tensor(a)
res1 = res1.new_tensor(a)
self.assertEqual(5., res1[0].item())
a[0] = 7.
self.assertEqual(5., res1[0].item())
if torch.cuda.device_count() >= 2:
expected = expected.cuda(1)
res1 = expected.new_tensor([1, 1])
self.assertEqual(res1.get_device(), expected.get_device())
res1 = expected.new_tensor([1, 1], dtype=torch.int)
self.assertIs(torch.int, res1.dtype)
self.assertEqual(res1.get_device(), expected.get_device())
res2 = expected.new_tensor(expected)
self.assertEqual(res2.get_device(), expected.get_device())
res2 = expected.new_tensor(expected, dtype=torch.int)
self.assertIs(torch.int, res1.dtype)
self.assertEqual(res2.get_device(), expected.get_device())
res2 = expected.new_tensor(expected, dtype=torch.int, device=0)
self.assertIs(torch.int, res1.dtype)
self.assertEqual(res2.get_device(), 0)
res1 = expected.new_tensor(1)
self.assertEqual(res1.get_device(), expected.get_device())
res1 = expected.new_tensor(1, dtype=torch.int)
self.assertIs(torch.int, res1.dtype)
self.assertEqual(res1.get_device(), expected.get_device())
# TODO: this test should be updated
@onlyCPU
def test_as_tensor(self, device):
# from python data
x = [[0, 1], [2, 3]]
self.assertEqual(torch.tensor(x), torch.as_tensor(x))
self.assertEqual(torch.tensor(x, dtype=torch.float32), torch.as_tensor(x, dtype=torch.float32))
# python data with heterogeneous types
z = [0, 'torch']
with self.assertRaisesRegex(TypeError, "invalid data type"):
torch.tensor(z)
torch.as_tensor(z)
# python data with self-referential lists
z = [0]
z += [z]
with self.assertRaisesRegex(TypeError, "self-referential lists are incompatible"):
torch.tensor(z)
torch.as_tensor(z)
z = [[1, 2], z]
with self.assertRaisesRegex(TypeError, "self-referential lists are incompatible"):
torch.tensor(z)
torch.as_tensor(z)
# from tensor (doesn't copy unless type is different)
y = torch.tensor(x)
self.assertIs(y, torch.as_tensor(y))
self.assertIsNot(y, torch.as_tensor(y, dtype=torch.float32))
if torch.cuda.is_available():
self.assertIsNot(y, torch.as_tensor(y, device='cuda'))
y_cuda = y.to('cuda')
self.assertIs(y_cuda, torch.as_tensor(y_cuda))
self.assertIs(y_cuda, torch.as_tensor(y_cuda, device='cuda'))
# doesn't copy
for dtype in [np.float64, np.int64, np.int8, np.uint8]:
n = np.random.rand(5, 6).astype(dtype)
n_astensor = torch.as_tensor(n)
self.assertEqual(torch.tensor(n), n_astensor)
n_astensor[0][0] = 25.7
self.assertEqual(torch.tensor(n), n_astensor)
# changing dtype causes copy
n = np.random.rand(5, 6).astype(np.float32)
n_astensor = torch.as_tensor(n, dtype=torch.float64)
self.assertEqual(torch.tensor(n, dtype=torch.float64), n_astensor)
n_astensor[0][1] = 250.8
self.assertNotEqual(torch.tensor(n, dtype=torch.float64), n_astensor)
# changing device causes copy
if torch.cuda.is_available():
n = np.random.randn(5, 6)
n_astensor = torch.as_tensor(n, device='cuda')
self.assertEqual(torch.tensor(n, device='cuda'), n_astensor)
n_astensor[0][2] = 250.9
self.assertNotEqual(torch.tensor(n, device='cuda'), n_astensor)
# TODO: this test should be updated
@suppress_warnings
def test_range(self, device):
res1 = torch.range(0, 1, device=device)
res2 = torch.tensor((), device=device)
torch.range(0, 1, device=device, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
# Check range for non-contiguous tensors.
x = torch.zeros(2, 3, device=device)
torch.range(0, 3, device=device, out=x.narrow(1, 1, 2))
res2 = torch.tensor(((0, 0, 1), (0, 2, 3)), device=device, dtype=torch.float32)
self.assertEqual(x, res2, atol=1e-16, rtol=0)
# Check negative
res1 = torch.tensor((1, 0), device=device, dtype=torch.float32)
res2 = torch.tensor((), device=device)
torch.range(1, 0, -1, device=device, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
# Equal bounds
res1 = torch.ones(1, device=device)
res2 = torch.tensor((), device=device)
torch.range(1, 1, -1, device=device, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
torch.range(1, 1, 1, device=device, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
# TODO: this test should be updated
def test_range_warning(self, device):
with warnings.catch_warnings(record=True) as w:
torch.range(0, 10, device=device)
self.assertEqual(len(w), 1)
# TODO: this test should be updated
@onlyCPU
def test_arange(self, device):
res = torch.tensor(range(10000))
res1 = torch.arange(0, 10000) # Use a larger number so vectorized code can be triggered
res2 = torch.tensor([], dtype=torch.int64)
torch.arange(0, 10000, out=res2)
self.assertEqual(res, res1, atol=0, rtol=0)
self.assertEqual(res, res2, atol=0, rtol=0)
# Vectorization on non-contiguous tensors
res = torch.rand(3, 3, 300000).to(torch.int64)
res = res.permute(2, 0, 1)
torch.arange(0, 300000 * 3 * 3, out=res)
self.assertEqual(res.flatten(), torch.arange(0, 300000 * 3 * 3))
# Check arange with only one argument
res1 = torch.arange(10)
res2 = torch.arange(0, 10)
self.assertEqual(res1, res2, atol=0, rtol=0)
# Check arange for non-contiguous tensors.
x = torch.zeros(2, 3)
torch.arange(0, 4, out=x.narrow(1, 1, 2))
res2 = torch.Tensor(((0, 0, 1), (0, 2, 3)))
self.assertEqual(x, res2, atol=1e-16, rtol=0)
# Check negative
res1 = torch.Tensor((1, 0))
res2 = torch.Tensor()
torch.arange(1, -1, -1, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
# Equal bounds
res1 = torch.ones(1)
res2 = torch.Tensor()
torch.arange(1, 0, -1, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
torch.arange(1, 2, 1, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
# FloatTensor
res1 = torch.arange(0.6, 0.89, 0.1, out=torch.FloatTensor())
self.assertEqual(res1, [0.6, 0.7, 0.8])
res1 = torch.arange(1, 10, 0.3, out=torch.FloatTensor())
self.assertEqual(res1.size(0), 30)
self.assertEqual(res1[0], 1)
self.assertEqual(res1[29], 9.7)
# DoubleTensor
res1 = torch.arange(0.6, 0.89, 0.1, out=torch.DoubleTensor())
self.assertEqual(res1, [0.6, 0.7, 0.8])
res1 = torch.arange(1, 10, 0.3, out=torch.DoubleTensor())
self.assertEqual(res1.size(0), 30)
self.assertEqual(res1[0], 1)
self.assertEqual(res1[29], 9.7)
# Bool Input matching numpy semantics
r = torch.arange(True)
self.assertEqual(r[0], 0)
r2 = torch.arange(False)
self.assertEqual(len(r2), 0)
self.assertEqual(r.dtype, torch.int64)
self.assertEqual(r2.dtype, torch.int64)
# Check that it's exclusive
r = torch.arange(0, 5)
self.assertEqual(r.min(), 0)
self.assertEqual(r.max(), 4)
self.assertEqual(r.numel(), 5)
r = torch.arange(0, 5, 2)
self.assertEqual(r.min(), 0)
self.assertEqual(r.max(), 4)
self.assertEqual(r.numel(), 3)
r1 = torch.arange(0, 5 + 1e-6)
# NB: without the dtype, we'll infer output type to be int64
r2 = torch.arange(0, 5, dtype=torch.float32)
r3 = torch.arange(0, 5 - 1e-6)
self.assertEqual(r1[:-1], r2, atol=0, rtol=0)
self.assertEqual(r2, r3, atol=0, rtol=0)
r1 = torch.arange(10, -1 + 1e-6, -1)
# NB: without the dtype, we'll infer output type to be int64
r2 = torch.arange(10, -1, -1, dtype=torch.float32)
r3 = torch.arange(10, -1 - 1e-6, -1)
self.assertEqual(r1, r2, atol=0, rtol=0)
self.assertEqual(r2, r3[:-1], atol=0, rtol=0)
# Test Rounding Errors
line = torch.zeros(size=(1, 49))
self.assertWarnsRegex(UserWarning, 'The out tensor will be resized',
lambda: torch.arange(-1, 1, 2. / 49, dtype=torch.float32, out=line))
self.assertEqual(line.shape, [50])
x = torch.empty(1).expand(10)
self.assertRaises(RuntimeError, lambda: torch.arange(10, out=x))
msg = "unsupported range"
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(0, float('inf')))
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(float('inf')))
for device in torch.testing.get_all_device_types():
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(-5, float('nan'), device=device))
# check with step size
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(0, float('-inf'), -1, device=device))
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(0, float('inf'), device=device))
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(float('-inf'), 10, device=device))
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(float('nan'), 10, device=device))
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(float('inf'), device=device))
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(float('nan'), device=device))
self.assertRaisesRegex(
RuntimeError, "overflow",
lambda: torch.arange(1.175494351e-38, 3.402823466e+38, device=device))
# check that it holds a consistent output shape on precision-cornered step sizes
d = torch.arange(-4.0, 4.0, 0.01, dtype=torch.float32, device=device)
self.assertEqual(d.shape[0], 800)
# TODO: this test should be updated
@onlyCPU
def test_arange_inference(self, device):
saved_dtype = torch.get_default_dtype()
torch.set_default_dtype(torch.float32)
# end only
self.assertIs(torch.float32, torch.arange(1.).dtype)
self.assertIs(torch.float32, torch.arange(torch.tensor(1.)).dtype)
self.assertIs(torch.float32, torch.arange(torch.tensor(1., dtype=torch.float64)).dtype)
self.assertIs(torch.int64, torch.arange(1).dtype)
self.assertIs(torch.int64, torch.arange(torch.tensor(1)).dtype)
self.assertIs(torch.int64, torch.arange(torch.tensor(1, dtype=torch.int16)).dtype)
# start, end, [step]
self.assertIs(torch.float32, torch.arange(1., 3).dtype)
self.assertIs(torch.float32, torch.arange(torch.tensor(1., dtype=torch.float64), 3).dtype)
self.assertIs(torch.float32, torch.arange(1, 3.).dtype)
self.assertIs(torch.float32, torch.arange(torch.tensor(1, dtype=torch.int16), torch.tensor(3.)).dtype)
self.assertIs(torch.float32, torch.arange(1, 3, 1.).dtype)
self.assertIs(torch.float32,
torch.arange(torch.tensor(1),
torch.tensor(3, dtype=torch.int16),
torch.tensor(1., dtype=torch.float64)).dtype)
self.assertIs(torch.int64, torch.arange(1, 3).dtype)
self.assertIs(torch.int64, torch.arange(torch.tensor(1), 3).dtype)
self.assertIs(torch.int64, torch.arange(torch.tensor(1), torch.tensor(3, dtype=torch.int16)).dtype)
self.assertIs(torch.int64, torch.arange(1, 3, 1).dtype)
self.assertIs(torch.int64,
torch.arange(torch.tensor(1),
torch.tensor(3),
torch.tensor(1, dtype=torch.int16)).dtype)
torch.set_default_dtype(saved_dtype)
def test_empty_strided(self, device):
for shape in [(2, 3, 4), (0, 2, 0)]:
# some of these cases are pretty strange, just verifying that if as_strided
# allows them then empty_strided can as well.
for strides in [(12, 4, 1), (2, 4, 6), (0, 0, 0)]:
empty_strided = torch.empty_strided(shape, strides, device=device)
# as_strided checks the storage size is big enough to support such a strided tensor;
# instead of repeating this calculation, we just use empty_strided which does the same
# calculation when setting the storage size.
as_strided = torch.empty(empty_strided.storage().size(),
device=device).as_strided(shape, strides)
self.assertEqual(empty_strided.shape, as_strided.shape)
self.assertEqual(empty_strided.stride(), as_strided.stride())
def test_new_empty_strided(self, device):
def _test(sizes, strides, dtype):
x = torch.zeros(5, 5, dtype=dtype, device=device)
result = x.new_empty_strided(sizes, strides)
expected = torch.empty_strided(sizes, strides, dtype=x.dtype, device=x.device)
self.assertEqual(result.shape, expected.shape)
self.assertEqual(result.stride(), expected.stride())
self.assertEqual(result.dtype, expected.dtype)
self.assertEqual(result.device, expected.device)
_test([2, 3], [3, 1], torch.float)
_test([5, 3], [0, 1], torch.int)
_test([], [], torch.float)
# Some really weird cases
for shape in [(2, 3, 4), (0, 2, 0)]:
for strides in [(12, 4, 1), (2, 4, 6), (0, 0, 0)]:
_test(shape, strides, torch.float)
def test_strided_mismatched_stride_shape(self, device):
for shape, strides in [((1, ), ()), ((1, 2), (1, ))]:
with self.assertRaisesRegex(RuntimeError, "mismatch in length of strides and shape"):
torch.tensor(0.42, device=device).as_strided(shape, strides)
with self.assertRaisesRegex(RuntimeError, "mismatch in length of strides and shape"):
torch.tensor(0.42, device=device).as_strided_(shape, strides)
def test_empty_tensor_props(self, device):
sizes = [(0,), (0, 3), (5, 0), (5, 0, 3, 0, 2), (0, 3, 0, 2), (0, 5, 0, 2, 0)]
for size in sizes:
x = torch.empty(tuple(size), device=device)
self.assertEqual(size, x.shape)
self.assertTrue(x.is_contiguous())
size_ones_instead_of_zeros = (x if x != 0 else 1 for x in size)
y = torch.empty(tuple(size_ones_instead_of_zeros), device=device)
self.assertEqual(x.stride(), y.stride())
def test_eye(self, device):
for dtype in torch.testing.get_all_dtypes():
if dtype == torch.bfloat16:
continue
# Test the RuntimeError is raised when either m or n is a negative number
for n, m in ((-1, 1), (1, -1), (-1, -1)):
with self.assertRaisesRegex(RuntimeError, 'must be greater or equal to'):
torch.eye(n, m, device=device, dtype=dtype)
# Test when the `m` parameter is not provided
for n in (3, 5, 7):
res1 = torch.eye(n, device=device, dtype=dtype)
naive_eye = torch.zeros(n, n, dtype=dtype, device=device)
naive_eye.diagonal(dim1=-2, dim2=-1).fill_(1)
self.assertEqual(naive_eye, res1)
# Check eye_out outputs
res2 = torch.empty(0, device=device, dtype=dtype)
torch.eye(n, out=res2)
self.assertEqual(res1, res2)
for n, m in product([3, 5, 7], repeat=2):
# Construct identity using diagonal and fill
res1 = torch.eye(n, m, device=device, dtype=dtype)
naive_eye = torch.zeros(n, m, dtype=dtype, device=device)
naive_eye.diagonal(dim1=-2, dim2=-1).fill_(1)
self.assertEqual(naive_eye, res1)
# Check eye_out outputs
res2 = torch.empty(0, device=device, dtype=dtype)
torch.eye(n, m, out=res2)
self.assertEqual(res1, res2)
@precisionOverride({torch.float: 1e-8, torch.double: 1e-10})
@dtypes(*(torch.testing.get_all_fp_dtypes(include_half=False, include_bfloat16=False) +
torch.testing.get_all_complex_dtypes()))
def test_linspace_vs_numpy(self, device, dtype):
start = -0.0316082797944545745849609375 + (0.8888888888j if dtype.is_complex else 0)
end = .0315315723419189453125 + (0.444444444444j if dtype.is_complex else 0)
for steps in [1, 2, 3, 5, 11, 256, 257, 2**22]:
t = torch.linspace(start, end, steps, device=device, dtype=dtype)
a = np.linspace(start, end, steps, dtype=torch_to_numpy_dtype_dict[dtype])
t = t.cpu()
self.assertEqual(t, torch.from_numpy(a))
self.assertTrue(t[0].item() == a[0])
self.assertTrue(t[steps - 1].item() == a[steps - 1])
def _test_linspace_logspace_complex_helper(self, torch_fn, np_fn, device, dtype):
start = torch.randn(1, dtype=dtype).item()
end = (start + torch.randn(1, dtype=dtype) + random.randint(5, 15)).item()
def test_fn(torch_fn, numpy_fn, steps):
t = torch_fn(start, end, steps, device=device)
a = numpy_fn(start, end, steps, dtype=torch_to_numpy_dtype_dict[dtype])
t = t.cpu()
self.assertEqual(t, torch.from_numpy(a))
for steps in [1, 2, 3, 5, 11, 256, 257, 2**22]:
test_fn(torch.linspace, np.linspace, steps)
@dtypes(torch.complex64)
def test_linspace_vs_numpy_complex(self, device, dtype):
self._test_linspace_logspace_complex_helper(torch.linspace, np.linspace,
device, dtype)
@dtypes(torch.complex64)
def test_logspace_vs_numpy_complex(self, device, dtype):
self._test_linspace_logspace_complex_helper(torch.logspace, np.logspace,
device, dtype)
@precisionOverride({torch.float: 1e-6, torch.double: 1e-10})
@dtypes(*torch.testing.get_all_fp_dtypes(include_half=False, include_bfloat16=False))
def test_logspace_vs_numpy(self, device, dtype):
start = -0.0316082797944545745849609375
end = .0315315723419189453125
for steps in [1, 2, 3, 5, 11, 256, 257, 2**22]:
t = torch.logspace(start, end, steps, device=device, dtype=dtype)
a = np.logspace(start, end, steps, dtype=torch_to_numpy_dtype_dict[dtype])
t = t.cpu()
self.assertEqual(t, torch.from_numpy(a))
self.assertEqual(t[0], a[0])
self.assertEqual(t[steps - 1], a[steps - 1])
def _linspace_logspace_warning_helper(self, op, device, dtype):
with self.maybeWarnsRegex(UserWarning, "Not providing a value for .+"):
op(0, 10, device=device, dtype=dtype)
@dtypes(torch.float)
def test_linspace_steps_warning(self, device, dtype):
self._linspace_logspace_warning_helper(torch.linspace, device, dtype)
@dtypes(torch.float)
def test_logspace_steps_warning(self, device, dtype):
self._linspace_logspace_warning_helper(torch.logspace, device, dtype)
@onlyCUDA
@largeTensorTest('16GB')
def test_range_factories_64bit_indexing(self, device):
bigint = 2 ** 31 + 1
t = torch.arange(bigint, dtype=torch.long, device=device)
self.assertEqual(t[-1].item(), bigint - 1)
del t
t = torch.linspace(0, 1, bigint, dtype=torch.float, device=device)
self.assertEqual(t[-1].item(), 1)
del t
t = torch.logspace(0, 1, bigint, 2, dtype=torch.float, device=device)
self.assertEqual(t[-1].item(), 2)
del t
@onlyOnCPUAndCUDA
def test_tensor_ctor_device_inference(self, device):
torch_device = torch.device(device)
values = torch.tensor((1, 2, 3), device=device)
# Tests tensor and as_tensor
# Note: warnings are suppressed (suppresses warnings)
for op in (torch.tensor, torch.as_tensor):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.assertEqual(op(values).device, torch_device)
self.assertEqual(op(values, dtype=torch.float64).device, torch_device)
if self.device_type == 'cuda':
with torch.cuda.device(device):
self.assertEqual(op(values.cpu()).device, torch.device('cpu'))
# Tests sparse ctor
indices = torch.tensor([[0, 1, 1],
[2, 0, 1],
[2, 1, 0]], device=device)
sparse_size = (3, 3, 3)
sparse_default = torch.sparse_coo_tensor(indices, values, sparse_size)
self.assertEqual(sparse_default.device, torch_device)
sparse_with_dtype = torch.sparse_coo_tensor(indices, values, sparse_size, dtype=torch.float64)
self.assertEqual(sparse_with_dtype.device, torch_device)
if self.device_type == 'cuda':
with torch.cuda.device(device):
sparse_with_dtype = torch.sparse_coo_tensor(indices.cpu(), values.cpu(),
sparse_size, dtype=torch.float64)
self.assertEqual(sparse_with_dtype.device, torch.device('cpu'))
@onlyOnCPUAndCUDA
@precisionOverride({torch.bfloat16: 5e-2, torch.half: 1e-3})
@unittest.skipIf(not TEST_SCIPY, "Scipy not found")
@dtypesIfCUDA(torch.float, torch.double, torch.bfloat16, torch.half, torch.long)
@dtypesIfCPU(torch.float, torch.double, torch.long)
def test_signal_window_functions(self, device, dtype):
import scipy.signal as signal
def test(name, kwargs):
torch_method = getattr(torch, name + '_window')
if not dtype.is_floating_point:
with self.assertRaisesRegex(RuntimeError, r'floating point'):
torch_method(3, dtype=dtype)
return
for size in [0, 1, 2, 5, 10, 50, 100, 1024, 2048]:
for periodic in [True, False]:
res = torch_method(size, periodic=periodic, **kwargs, device=device, dtype=dtype)
# NB: scipy always returns a float64 result
ref = torch.from_numpy(signal.get_window((name, *(kwargs.values())), size, fftbins=periodic))
self.assertEqual(res, ref, exact_dtype=False)
with self.assertRaisesRegex(RuntimeError, r'not implemented for sparse types'):
torch_method(3, layout=torch.sparse_coo)
self.assertTrue(torch_method(3, requires_grad=True).requires_grad)
self.assertFalse(torch_method(3).requires_grad)
for window in ['hann', 'hamming', 'bartlett', 'blackman']:
test(window, kwargs={})
for num_test in range(50):
test('kaiser', kwargs={'beta': random.random() * 30})
def test_tensor_factories_empty(self, device):
# ensure we can create empty tensors from each factory function
shapes = [(5, 0, 1), (0,), (0, 0, 1, 0, 2, 0, 0)]
for shape in shapes:
for dt in torch.testing.get_all_dtypes():
self.assertEqual(shape, torch.zeros(shape, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.zeros_like(torch.zeros(shape, device=device, dtype=dt)).shape)
self.assertEqual(shape, torch.full(shape, 3, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.full_like(torch.zeros(shape, device=device, dtype=dt), 3).shape)
self.assertEqual(shape, torch.ones(shape, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.ones_like(torch.zeros(shape, device=device, dtype=dt)).shape)
self.assertEqual(shape, torch.empty(shape, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.empty_like(torch.zeros(shape, device=device, dtype=dt)).shape)
self.assertEqual(shape, torch.empty_strided(shape, (0,) * len(shape), device=device, dtype=dt).shape)
if dt == torch.bool:
self.assertEqual(shape, torch.randint(2, shape, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.randint_like(torch.zeros(shape, device=device, dtype=dt), 2).shape)
elif dt.is_complex:
self.assertRaises(RuntimeError, lambda: torch.randint(6, shape, device=device, dtype=dt).shape)
else:
self.assertEqual(shape, torch.randint(6, shape, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.randint_like(torch.zeros(shape, device=device, dtype=dt), 6).shape)
if dt not in {torch.double, torch.float, torch.half, torch.bfloat16, torch.complex64, torch.complex128}:
self.assertRaises(RuntimeError, lambda: torch.rand(shape, device=device, dtype=dt).shape)
if dt == torch.double or dt == torch.float or dt.is_complex:
self.assertEqual(shape, torch.randn(shape, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.randn_like(torch.zeros(shape, device=device, dtype=dt)).shape)
self.assertEqual((0,), torch.arange(0, device=device).shape)
self.assertEqual((0, 0), torch.eye(0, device=device).shape)
self.assertEqual((0, 0), torch.eye(0, 0, device=device).shape)
self.assertEqual((5, 0), torch.eye(5, 0, device=device).shape)
self.assertEqual((0, 5), torch.eye(0, 5, device=device).shape)
self.assertEqual((0,), torch.linspace(1, 1, 0, device=device).shape)
self.assertEqual((0,), torch.logspace(1, 1, 0, device=device).shape)
self.assertEqual((0,), torch.randperm(0, device=device).shape)
self.assertEqual((0,), torch.bartlett_window(0, device=device).shape)
self.assertEqual((0,), torch.bartlett_window(0, periodic=False, device=device).shape)
self.assertEqual((0,), torch.hamming_window(0, device=device).shape)
self.assertEqual((0,), torch.hann_window(0, device=device).shape)
self.assertEqual((0,), torch.kaiser_window(0, device=device).shape)
self.assertEqual((1, 1, 0), torch.tensor([[[]]], device=device).shape)
self.assertEqual((1, 1, 0), torch.as_tensor([[[]]], device=device).shape)
@onlyCUDA
def test_tensor_factory_gpu_type_inference(self, device):
saved_type = torch.Tensor().type()
torch.set_default_tensor_type(torch.cuda.DoubleTensor)
torch.set_default_dtype(torch.float32)
self.assertIs(torch.float32, torch.tensor(0.).dtype)
self.assertEqual(torch.device(device), torch.tensor(0.).device)
torch.set_default_dtype(torch.float64)
self.assertIs(torch.float64, torch.tensor(0.).dtype)
self.assertEqual(torch.device(device), torch.tensor(0.).device)
torch.set_default_tensor_type(saved_type)
@onlyCUDA
def test_tensor_factory_gpu_type(self, device):
saved_type = torch.Tensor().type()
torch.set_default_tensor_type(torch.cuda.FloatTensor)
x = torch.zeros((5, 5))
self.assertIs(torch.float32, x.dtype)
self.assertTrue(x.is_cuda)
torch.set_default_tensor_type(torch.cuda.DoubleTensor)
x = torch.zeros((5, 5))
self.assertIs(torch.float64, x.dtype)
self.assertTrue(x.is_cuda)
torch.set_default_tensor_type(saved_type)
@skipCPUIf(True, 'compares device with cpu')
@dtypes(torch.int, torch.long, torch.float, torch.double)
def test_arange_device_vs_cpu(self, device, dtype):
cpu_tensor = torch.arange(0, 10, dtype=dtype, device='cpu')
device_tensor = torch.arange(0, 10, dtype=dtype, device=device)
self.assertEqual(cpu_tensor, device_tensor)
@onlyCUDA
def test_arange_bfloat16(self, device):
ref_tensor = torch.tensor([0, 1, 2, 3], dtype=torch.bfloat16, device=device)
bfloat16_tensor = torch.arange(0, 4, dtype=torch.bfloat16, device=device)
self.assertEqual(ref_tensor, bfloat16_tensor)
# step=2
ref_tensor = torch.tensor([0, 2, 4], dtype=torch.bfloat16, device=device)
bfloat16_tensor = torch.arange(0, 6, step=2, dtype=torch.bfloat16, device=device)
self.assertEqual(ref_tensor, bfloat16_tensor)
@dtypes(*torch.testing.get_all_dtypes(include_bool=False, include_half=False))
@dtypesIfCUDA(*torch.testing.get_all_dtypes(include_bool=False, include_half=True))
def test_linspace(self, device, dtype):
_from = random.random()
to = _from + random.random()
res1 = torch.linspace(_from, to, 137, device=device, dtype=dtype)
res2 = torch.tensor((), device=device, dtype=dtype)
torch.linspace(_from, to, 137, dtype=dtype, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
# small tensor
self.assertEqual(torch.linspace(10, 20, 11, device=device, dtype=dtype),
torch.tensor(list(range(10, 21)), device=device, dtype=dtype))
# large tensor
if dtype not in (torch.int8, torch.uint8):
self.assertEqual(torch.linspace(10, 2000, 1991, device=device, dtype=dtype),
torch.tensor(list(range(10, 2001)), device=device, dtype=dtype))
# Vectorization on non-contiguous tensors
if dtype not in (torch.int8, torch.uint8): # int8 and uint8 are too small for this test
res = torch.rand(3, 3, 1000, device=device).to(dtype)
res = res.permute(2, 0, 1)
torch.linspace(0, 1000 * 3 * 3, 1000 * 3 * 3, out=res)
self.assertEqual(res.flatten(), torch.linspace(0, 1000 * 3 * 3, 1000 * 3 * 3, device=device, dtype=dtype))
self.assertRaises(RuntimeError, lambda: torch.linspace(0, 1, -1, device=device, dtype=dtype))
# steps = 1
self.assertEqual(torch.linspace(0, 1, 1, device=device, dtype=dtype),
torch.zeros(1, device=device, dtype=dtype), atol=0, rtol=0)
# steps = 0
self.assertEqual(torch.linspace(0, 1, 0, device=device, dtype=dtype).numel(), 0, atol=0, rtol=0)
# Check linspace for generating the correct output for each dtype.
start = 0 if dtype == torch.uint8 else -100
expected_lin = torch.tensor([start + .5 * i for i in range(401)], device=device, dtype=torch.double)
actual_lin = torch.linspace(start, start + 200, 401, device=device, dtype=dtype)
# If on GPU, allow for minor error depending on dtype.
tol = 0.
if device != 'cpu':
if dtype == torch.half:
tol = 1e-1
elif dtype == torch.float:
tol = 1e-5
elif dtype == torch.double:
tol = 1e-10
self.assertEqual(expected_lin.to(dtype), actual_lin, atol=tol, rtol=0)
# Check linspace for generating with start > end.
self.assertEqual(torch.linspace(2, 0, 3, device=device, dtype=dtype),
torch.tensor((2, 1, 0), device=device, dtype=dtype),
atol=0, rtol=0)
# Check for race condition (correctness when applied on a large tensor).
if dtype not in (torch.int8, torch.uint8, torch.int16, torch.half, torch.bfloat16):
y = torch.linspace(0, 999999 + (999999j if dtype.is_complex else 0),
1000000, device=device, dtype=dtype)
if dtype.is_complex:
cond = torch.logical_and(y[:-1].real < y[1:].real, y[:-1].imag < y[1:].imag)
else:
cond = y[:-1] < y[1:]
correct = all(cond)
self.assertTrue(correct)
# Check linspace for non-contiguous tensors.
x = torch.zeros(2, 3, device=device, dtype=dtype)
y = torch.linspace(0, 3, 4, out=x.narrow(1, 1, 2), dtype=dtype)
self.assertEqual(x, torch.tensor(((0, 0, 1), (0, 2, 3)), device=device, dtype=dtype), atol=0, rtol=0)
def _test_linspace_logspace_deduction_helper(self, fn, device):
for start, end in [(1, 2), (1., 2), (1., -2.), (1j, 2j), (0., 2j), (1j, 2)]:
dtype = torch.float32
if isinstance(start, complex) or isinstance(end, complex):
dtype = torch.cfloat
if dtype == torch.cfloat:
# TODO(kshitij12345): Fix unnecessary warning
# Reference: https://github.com/pytorch/pytorch/issues/53171
with self.assertWarnsRegex(UserWarning,
"As either `start` or `stop` is complex"):
self.assertEqual(fn(start, end, steps=100, device=device).dtype, dtype)
else:
self.assertEqual(fn(start, end, steps=100, device=device).dtype, dtype)
def test_linspace_deduction(self, device):
# Test deduction from input parameters.
self._test_linspace_logspace_deduction_helper(torch.linspace, device)
def test_logspace_deduction(self, device):
# Test deduction from input parameters.
self._test_linspace_logspace_deduction_helper(torch.logspace, device)
# The implementation of linspace+logspace goes through a different path
# when the steps arg is equal to 0 or 1. For other values of `steps`
# they call specialized linspace (or logspace) kernels.
LINSPACE_LOGSPACE_SPECIAL_STEPS = [0, 1]
# NOTE [Linspace+Logspace precision override]
# Our Linspace and logspace torch.half CUDA kernels are not very precise.
# Since linspace/logspace are deterministic, we can compute an expected
# amount of error (by testing without a precision override), adding a tiny
# amount (EPS) to that, and using that value as the override.
LINSPACE_LOGSPACE_EXTRA_EPS = 1e-5
# Compares linspace device vs. cpu
def _test_linspace(self, device, dtype, steps):
a = torch.linspace(0, 10, steps=steps, dtype=dtype, device=device)
b = torch.linspace(0, 10, steps=steps)
self.assertEqual(a, b, exact_dtype=False)
# See NOTE [Linspace+Logspace precision override]
@skipCPUIf(True, "compares with CPU")
@precisionOverride({torch.half: 0.0039 + LINSPACE_LOGSPACE_EXTRA_EPS})
@dtypesIfCUDA(*(torch.testing.get_all_fp_dtypes() + torch.testing.get_all_complex_dtypes()))
def test_linspace_device_vs_cpu(self, device, dtype):
self._test_linspace(device, dtype, steps=10)
@skipCPUIf(True, "compares with CPU")
@dtypesIfCUDA(*(torch.testing.get_all_fp_dtypes() + torch.testing.get_all_complex_dtypes()))
def test_linspace_special_steps(self, device, dtype):
for steps in self.LINSPACE_LOGSPACE_SPECIAL_STEPS:
self._test_linspace(device, dtype, steps=steps)
# Compares logspace device vs cpu
def _test_logspace(self, device, dtype, steps):
a = torch.logspace(1, 1.1, steps=steps, dtype=dtype, device=device)
b = torch.logspace(1, 1.1, steps=steps)
self.assertEqual(a, b, exact_dtype=False)
# Compares logspace device vs cpu
def _test_logspace_base2(self, device, dtype, steps):
a = torch.logspace(1, 1.1, steps=steps, base=2, dtype=dtype, device=device)
b = torch.logspace(1, 1.1, steps=steps, base=2)
self.assertEqual(a, b, exact_dtype=False)
# See NOTE [Linspace+Logspace precision override]
@skipCPUIf(True, "compares with CPU")
@precisionOverride({torch.half: 0.025 + LINSPACE_LOGSPACE_EXTRA_EPS})
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float, torch.double)
def test_logspace_device_vs_cpu(self, device, dtype):
self._test_logspace(device, dtype, steps=10)
# See NOTE [Linspace+Logspace precision override]
@skipCPUIf(True, "compares with CPU")
@precisionOverride({torch.half: 0.0201 + LINSPACE_LOGSPACE_EXTRA_EPS})
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float, torch.double)
def test_logspace_base2(self, device, dtype):
self._test_logspace_base2(device, dtype, steps=10)
@skipCPUIf(True, "compares with CPU")
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float, torch.double)
def test_logspace_special_steps(self, device, dtype):
for steps in self.LINSPACE_LOGSPACE_SPECIAL_STEPS:
self._test_logspace(device, dtype, steps=steps)
self._test_logspace_base2(device, dtype, steps=steps)
@dtypes(*torch.testing.get_all_dtypes(include_bool=False, include_half=False, include_complex=False))
@dtypesIfCUDA(*((torch.testing.get_all_int_dtypes() + [torch.float32, torch.float16, torch.bfloat16])
if TEST_WITH_ROCM
else torch.testing.get_all_dtypes(include_bool=False, include_half=True, include_complex=False)))
def test_logspace(self, device, dtype):
_from = random.random()
to = _from + random.random()
res1 = torch.logspace(_from, to, 137, device=device, dtype=dtype)
res2 = torch.tensor((), device=device, dtype=dtype)
torch.logspace(_from, to, 137, device=device, dtype=dtype, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
self.assertRaises(RuntimeError, lambda: torch.logspace(0, 1, -1, device=device, dtype=dtype))
self.assertEqual(torch.logspace(0, 1, 1, device=device, dtype=dtype),
torch.ones(1, device=device, dtype=dtype), atol=0, rtol=0)
# Check precision - start, stop and base are chosen to avoid overflow
# steps is chosen so that step size is not subject to rounding error
# a tolerance is needed for gpu tests due to differences in computation
atol = None
rtol = None
if self.device_type == 'cpu':
atol = 0
rtol = 0
self.assertEqual(torch.tensor([2. ** (i / 8.) for i in range(49)], device=device, dtype=dtype),
torch.logspace(0, 6, steps=49, base=2, device=device, dtype=dtype),
atol=atol, rtol=rtol)
# Check non-default base=2
self.assertEqual(torch.logspace(1, 1, 1, 2, device=device, dtype=dtype),
torch.ones(1, device=device, dtype=dtype) * 2)
self.assertEqual(torch.logspace(0, 2, 3, 2, device=device, dtype=dtype),
torch.tensor((1, 2, 4), device=device, dtype=dtype))
# Check logspace_ for generating with start > end.
self.assertEqual(torch.logspace(1, 0, 2, device=device, dtype=dtype),
torch.tensor((10, 1), device=device, dtype=dtype), atol=0, rtol=0)
# Check logspace_ for non-contiguous tensors.
x = torch.zeros(2, 3, device=device, dtype=dtype)
y = torch.logspace(0, 3, 4, base=2, device=device, dtype=dtype, out=x.narrow(1, 1, 2))
self.assertEqual(x, torch.tensor(((0, 1, 2), (0, 4, 8)), device=device, dtype=dtype), atol=0, rtol=0)
@onlyOnCPUAndCUDA
@dtypes(torch.half, torch.float, torch.double)
def test_full_inference(self, device, dtype):
size = (2, 2)
prev_default = torch.get_default_dtype()
torch.set_default_dtype(dtype)
# Tests bool fill value inference
t = torch.full(size, True)
self.assertEqual(t.dtype, torch.bool)
# Tests integer fill value inference
t = torch.full(size, 1)
self.assertEqual(t.dtype, torch.long)
# Tests float fill value inference
t = torch.full(size, 1.)
self.assertEqual(t.dtype, dtype)
# Tests complex inference
t = torch.full(size, (1 + 1j))
ctype = torch.complex128 if dtype is torch.double else torch.complex64
self.assertEqual(t.dtype, ctype)
torch.set_default_dtype(prev_default)
def test_full_out(self, device):
size = (5,)
o = torch.empty(size, device=device, dtype=torch.long)
# verifies dtype/out conflict throws a RuntimeError
with self.assertRaises(RuntimeError):
torch.full(o.shape, 1., dtype=torch.float, out=o)
# verifies out dtype overrides inference
self.assertEqual(torch.full(o.shape, 1., out=o).dtype, o.dtype)
self.assertEqual(torch.full(size, 1, out=o).dtype, o.dtype)
# check that warning for numpy being not writable is suppressed
# when a copy of it is being created.
# see issue #47160
def test_tensor_from_non_writable_numpy(self, device):
with warnings.catch_warnings(record=True) as w:
a = np.arange(5.)
a.flags.writeable = False
t = torch.tensor(a)
self.assertEqual(len(w), 0)
# Class for testing random tensor creation ops, like torch.randint
class TestRandomTensorCreation(TestCase):
exact_dtype = True
# TODO: add torch.complex64, torch.complex128
@dtypes(torch.float, torch.double)
def test_normal(self, device, dtype):
def helper(self, device, dtype, ptype, t_transform, std_transform):
q = torch.empty(100, 100, dtype=dtype, device=device)
q.normal_()
self.assertEqual(t_transform(q).mean(), 0, atol=0.2, rtol=0)
self.assertEqual(t_transform(q).std(), std_transform(1), atol=0.2, rtol=0)
q.normal_(2, 3)
self.assertEqual(t_transform(q).mean(), 2, atol=0.3, rtol=0)
self.assertEqual(t_transform(q).std(), std_transform(3), atol=0.3, rtol=0)
q = torch.empty(100, 100, dtype=dtype, device=device)
q_row1 = q[0:1].clone()
q[99:100].normal_()
self.assertEqual(t_transform(q[99:100]).mean(), 0, atol=0.2, rtol=0)
self.assertEqual(t_transform(q[99:100]).std(), std_transform(1), atol=0.2, rtol=0)
self.assertEqual(t_transform(q[0:1]).clone(), t_transform(q_row1))
mean = torch.empty(100, 100, dtype=dtype, device=device)
mean[:50].fill_(ptype(0))
mean[50:].fill_(ptype(1))
std = torch.empty(100, 100, dtype=torch.float, device=device)
std[:, :50] = 4
std[:, 50:] = 1
r = torch.normal(mean)
self.assertEqual(r.dtype, dtype)
self.assertEqual(str(r.device), device)
self.assertEqual(t_transform(r[:50]).mean(), 0, atol=0.2, rtol=0)
self.assertEqual(t_transform(r[50:]).mean(), 1, atol=0.2, rtol=0)
self.assertEqual(t_transform(r).std(), std_transform(1), atol=0.2, rtol=0)
r.fill_(42)
r = torch.normal(mean, 3)
self.assertEqual(r.dtype, dtype)
self.assertEqual(str(r.device), device)
self.assertEqual(t_transform(r[:50]).mean(), 0, atol=0.2, rtol=0)
self.assertEqual(t_transform(r[50:]).mean(), 1, atol=0.2, rtol=0)
self.assertEqual(t_transform(r).std(), std_transform(3), atol=0.2, rtol=0)
r.fill_(42)
torch.normal(mean, 3, out=r)
self.assertEqual(r.dtype, dtype)
self.assertEqual(str(r.device), device)
self.assertEqual(t_transform(r[:50]).mean(), 0, atol=0.2, rtol=0)
self.assertEqual(t_transform(r[50:]).mean(), 1, atol=0.2, rtol=0)
self.assertEqual(t_transform(r).std(), std_transform(3), atol=0.2, rtol=0)
r.fill_(42)
r = torch.normal(2, std)
self.assertFalse(r.dtype.is_complex)
self.assertEqual(str(r.device), device)
self.assertEqual(r.mean(), 2, atol=0.2, rtol=0)
self.assertEqual(r[:, :50].std(), 4, atol=0.3, rtol=0)
self.assertEqual(r[:, 50:].std(), 1, atol=0.2, rtol=0)
r.fill_(42)
torch.normal(2, std, out=r)
self.assertFalse(r.dtype.is_complex)
self.assertEqual(str(r.device), device)
self.assertEqual(r.mean(), 2, atol=0.2, rtol=0)
self.assertEqual(r[:, :50].std(), 4, atol=0.3, rtol=0)
self.assertEqual(r[:, 50:].std(), 1, atol=0.2, rtol=0)
r.fill_(42)
r = torch.normal(mean, std)
self.assertEqual(r.dtype, dtype)
self.assertEqual(str(r.device), device)
self.assertEqual(t_transform(r[:50]).mean(), 0, atol=0.2, rtol=0)
self.assertEqual(t_transform(r[50:]).mean(), 1, atol=0.2, rtol=0)
self.assertEqual(t_transform(r[:, :50]).std(), std_transform(4), atol=0.3, rtol=0)
self.assertEqual(t_transform(r[:, 50:]).std(), std_transform(1), atol=0.2, rtol=0)
r.fill_(42)
torch.normal(mean, std, out=r)
self.assertEqual(r.dtype, dtype)
self.assertEqual(str(r.device), device)
self.assertEqual(t_transform(r[:50]).mean(), 0, atol=0.2, rtol=0)
self.assertEqual(t_transform(r[50:]).mean(), 1, atol=0.2, rtol=0)
self.assertEqual(t_transform(r[:, :50]).std(), std_transform(4), atol=0.3, rtol=0)
self.assertEqual(t_transform(r[:, 50:]).std(), std_transform(1), atol=0.2, rtol=0)
r.fill_(42)
r = torch.normal(2, 3, (100, 100), dtype=dtype, device=device)
self.assertEqual(r.dtype, dtype)
self.assertEqual(str(r.device), device)
self.assertEqual(t_transform(r).mean(), 2, atol=0.3, rtol=0)
self.assertEqual(t_transform(r).std(), std_transform(3), atol=0.3, rtol=0)
r.fill_(42)
torch.normal(2, 3, (100, 100), dtype=dtype, device=device, out=r)
self.assertEqual(r.dtype, dtype)
self.assertEqual(str(r.device), device)
self.assertEqual(t_transform(r).mean(), 2, atol=0.3, rtol=0)
self.assertEqual(t_transform(r).std(), std_transform(3), atol=0.3, rtol=0)
if dtype.is_complex:
helper(self, device, dtype, lambda x: complex(x, x),
lambda t: torch.real(t).to(torch.float), lambda mean: mean / math.sqrt(2))
helper(self, device, dtype, lambda x: complex(x, x),
lambda t: torch.imag(t).to(torch.float), lambda mean: mean / math.sqrt(2))
self.assertRaisesRegex(
RuntimeError, "normal expects standard deviation to be non-complex",
lambda: torch.normal(0, torch.empty(100, 100, dtype=dtype, device=device)))
out = torch.empty(100, 100, dtype=dtype, device=device)
self.assertRaisesRegex(
RuntimeError, "normal expects standard deviation to be non-complex",
lambda: torch.normal(0, torch.empty(100, 100, dtype=dtype, device=device), out=out))
else:
helper(self, device, dtype, lambda x: x, lambda t: t, lambda mean: mean)
@dtypes(torch.float, torch.double, torch.half)
@dtypesIfCUDA(torch.float, torch.double, torch.half, torch.bfloat16)
def test_uniform_from_to(self, device, dtype):
size = 2000
alpha = 0.1
float_min = torch.finfo(torch.float).min
float_max = torch.finfo(torch.float).max
double_min = torch.finfo(torch.double).min
double_max = torch.finfo(torch.double).max
if dtype == torch.bfloat16:
min_val = -3.389531389251535e+38
max_val = 3.389531389251535e+38
else:
min_val = torch.finfo(dtype).min
max_val = torch.finfo(dtype).max
values = [double_min, float_min, -42, 0, 42, float_max, double_max]
for from_ in values:
for to_ in values:
t = torch.empty(size, dtype=dtype, device=device)
if not (min_val <= from_ <= max_val) or not (min_val <= to_ <= max_val):
pass
elif to_ < from_:
self.assertRaisesRegex(
RuntimeError,
"uniform_ expects to return",
lambda: t.uniform_(from_, to_)
)
elif to_ - from_ > max_val:
self.assertRaisesRegex(
RuntimeError,
"uniform_ expects to-from",
lambda: t.uniform_(from_, to_)
)
else:
t.uniform_(from_, to_)
range_ = to_ - from_
if not (dtype == torch.bfloat16) and not (
dtype == torch.half and device == 'cpu') and not torch.isnan(t).all():
delta = alpha * range_
double_t = t.to(torch.double)
if range_ == 0:
self.assertTrue(double_t.min() == from_)
self.assertTrue(double_t.max() == to_)
elif dtype == torch.half:
self.assertTrue(from_ <= double_t.min() <= (from_ + delta))
self.assertTrue((to_ - delta) <= double_t.max() <= to_)
else:
self.assertTrue(from_ <= double_t.min() <= (from_ + delta))
self.assertTrue((to_ - delta) <= double_t.max() < to_)
def test_random_neg_values(self, device):
SIZE = 10
signed_dtypes = [torch.double, torch.float, torch.long, torch.int, torch.short]
for dtype in signed_dtypes:
res = torch.rand(SIZE, SIZE).to(device=device, dtype=dtype)
res.random_(-10, -1)
self.assertLessEqual(res.max().item(), 9)
self.assertGreaterEqual(res.min().item(), -10)
# TODO: this test should be updated
@onlyCPU
def test_randint_inference(self, device):
size = (2, 1)
for args in [(3,), (1, 3)]: # (low,) and (low, high)
self.assertIs(torch.int64, torch.randint(*args, size=size).dtype)
self.assertIs(torch.int64, torch.randint(*args, size=size, layout=torch.strided).dtype)
self.assertIs(torch.int64, torch.randint(*args, size=size, generator=torch.default_generator).dtype)
self.assertIs(torch.float32, torch.randint(*args, size=size, dtype=torch.float32).dtype)
out = torch.empty(size, dtype=torch.float32)
self.assertIs(torch.float32, torch.randint(*args, size=size, out=out).dtype)
self.assertIs(torch.float32, torch.randint(*args, size=size, out=out, dtype=torch.float32).dtype)
out = torch.empty(size, dtype=torch.int64)
self.assertIs(torch.int64, torch.randint(*args, size=size, out=out).dtype)
self.assertIs(torch.int64, torch.randint(*args, size=size, out=out, dtype=torch.int64).dtype)
# TODO: this test should be updated
@onlyCPU
def test_randint(self, device):
SIZE = 100
def seed(generator):
if generator is None:
torch.manual_seed(123456)
else:
generator.manual_seed(123456)
return generator
for generator in (None, torch.Generator()):
generator = seed(generator)
res1 = torch.randint(0, 6, (SIZE, SIZE), generator=generator)
res2 = torch.empty((), dtype=torch.int64)
generator = seed(generator)
torch.randint(0, 6, (SIZE, SIZE), generator=generator, out=res2)
generator = seed(generator)
res3 = torch.randint(6, (SIZE, SIZE), generator=generator)
res4 = torch.empty((), dtype=torch.int64)
generator = seed(generator)
torch.randint(6, (SIZE, SIZE), out=res4, generator=generator)
self.assertEqual(res1, res2)
self.assertEqual(res1, res3)
self.assertEqual(res1, res4)
self.assertEqual(res2, res3)
self.assertEqual(res2, res4)
self.assertEqual(res3, res4)
self.assertTrue((res1 < 6).all().item())
self.assertTrue((res1 >= 0).all().item())
@dtypes(torch.half, torch.float, torch.bfloat16, torch.double,
torch.complex32, torch.complex64, torch.complex128)
def test_randn(self, device, dtype):
SIZE = 100
for size in [0, SIZE]:
torch.manual_seed(123456)
res1 = torch.randn(size, size, dtype=dtype, device=device)
res2 = torch.tensor([], dtype=dtype, device=device)
torch.manual_seed(123456)
torch.randn(size, size, out=res2)
self.assertEqual(res1, res2)
@dtypes(torch.float, torch.double, torch.complex64, torch.complex128)
def test_rand(self, device, dtype):
SIZE = 100
for size in [0, SIZE]:
torch.manual_seed(123456)
res1 = torch.rand(size, size, dtype=dtype, device=device)
res2 = torch.tensor([], dtype=dtype, device=device)
torch.manual_seed(123456)
torch.rand(size, size, out=res2)
self.assertEqual(res1, res2)
@slowTest
def test_randperm(self, device):
if device == 'cpu':
rng_device = None
else:
rng_device = [device]
# Test core functionality. On CUDA, for small n, randperm is offloaded to CPU instead. For large n, randperm is
# executed on GPU.
for n in (100, 50000, 100000):
# Ensure both integer and floating-point numbers are tested. Half follows an execution path that is
# different from others on CUDA.
for dtype in (torch.long, torch.half, torch.float):
if n > 2049 and dtype == torch.half: # Large n for torch.half will raise an exception, do not test here.
continue
with torch.random.fork_rng(devices=rng_device):
res1 = torch.randperm(n, dtype=dtype, device=device)
res2 = torch.empty(0, dtype=dtype, device=device)
torch.randperm(n, out=res2, dtype=dtype, device=device)
self.assertEqual(res1, res2, atol=0, rtol=0)
# Default type is long
for n in (100, 10000):
self.assertEqual(torch.randperm(n, device=device).dtype, torch.long)
# randperm of 0 elements is an empty tensor
res1 = torch.randperm(0)
res2 = torch.tensor(5, dtype=dtype, device=device)
torch.randperm(0, out=res2)
self.assertEqual(res1.numel(), 0)
self.assertEqual(res2.numel(), 0)
# Test exceptions when n is too large for a floating point type
for dtype, small_n, large_n in ((torch.half, 2**11 + 1, 2**11 + 2),
(torch.float, 2**24 + 1, 2**24 + 2),
(torch.double, 2**25, # 2**53 + 1 is too large to run
2**53 + 2)):
res = torch.empty(0, dtype=dtype, device=device)
torch.randperm(small_n, out=res) # No exception expected
self.assertRaises(RuntimeError, lambda: torch.randperm(large_n, out=res, device=device))
# Test non-contiguous tensors
for n in (4, 5, 6, 10, 20):
non_contiguous_tensor = torch.zeros((2, 3), dtype=torch.long, device=device).t()
self.assertFalse(non_contiguous_tensor.is_contiguous())
with torch.random.fork_rng(devices=rng_device):
res = torch.randperm(n, dtype=torch.long, device=device)
torch.randperm(n, out=non_contiguous_tensor)
self.assertEqual(non_contiguous_tensor, res)
# Test exceptions when device and generator types are incompatible
@onlyCUDA
def test_randperm_device_compatibility(self, device):
cuda_gen = torch.Generator(device='cuda')
cpu_gen = torch.Generator(device='cpu')
for n in (0, 3, 100, 30000):
regex = 'Expected a .* generator device but found .*'
cuda_t = torch.tensor(n, device='cuda')
self.assertRaisesRegex(RuntimeError, regex, lambda: torch.randperm(n, device='cuda', generator=cpu_gen))
self.assertRaisesRegex(RuntimeError, regex, lambda: torch.randperm(n, device='cuda', generator=cpu_gen, out=cuda_t))
cpu_t = torch.tensor(n, device='cpu')
self.assertRaisesRegex(RuntimeError, regex, lambda: torch.randperm(n, device='cpu', generator=cuda_gen))
self.assertRaisesRegex(RuntimeError, regex, lambda: torch.randperm(n, device='cpu', generator=cuda_gen, out=cpu_t))
self.assertRaisesRegex(RuntimeError, regex, lambda: torch.randperm(n, generator=cuda_gen)) # implicitly on CPU
# Class for testing *like ops, like torch.ones_like
class TestLikeTensorCreation(TestCase):
exact_dtype = True
# TODO: this test should be updated
def test_ones_like(self, device):
expected = torch.ones(100, 100, device=device)
res1 = torch.ones_like(expected)
self.assertEqual(res1, expected)
# test boolean tensor
expected = torch.tensor([True, True], device=device, dtype=torch.bool)
res1 = torch.ones_like(expected)
self.assertEqual(res1, expected)
# TODO: this test should be updated
@onlyCPU
def test_empty_like(self, device):
x = torch.autograd.Variable(torch.Tensor())
y = torch.autograd.Variable(torch.randn(4, 4))
z = torch.autograd.Variable(torch.IntTensor([1, 2, 3]))
for a in (x, y, z):
self.assertEqual(torch.empty_like(a).shape, a.shape)
self.assertEqualTypeString(torch.empty_like(a), a)
def test_zeros_like(self, device):
expected = torch.zeros((100, 100,), device=device)
res1 = torch.zeros_like(expected)
self.assertEqual(res1, expected)
@deviceCountAtLeast(2)
def test_zeros_like_multiple_device(self, devices):
expected = torch.zeros(100, 100, device=devices[0])
x = torch.randn(100, 100, device=devices[1], dtype=torch.float32)
output = torch.zeros_like(x)
self.assertEqual(output, expected)
@deviceCountAtLeast(2)
def test_ones_like_multiple_device(self, devices):
expected = torch.ones(100, 100, device=devices[0])
x = torch.randn(100, 100, device=devices[1], dtype=torch.float32)
output = torch.ones_like(x)
self.assertEqual(output, expected)
# Full-like precedence is the explicit dtype then the dtype of the "like"
# tensor.
@onlyOnCPUAndCUDA
def test_full_like_inference(self, device):
size = (2, 2)
like = torch.empty((5,), device=device, dtype=torch.long)
self.assertEqual(torch.full_like(like, 1.).dtype, torch.long)
self.assertEqual(torch.full_like(like, 1., dtype=torch.complex64).dtype,
torch.complex64)
instantiate_device_type_tests(TestTensorCreation, globals())
instantiate_device_type_tests(TestRandomTensorCreation, globals())
instantiate_device_type_tests(TestLikeTensorCreation, globals())
if __name__ == '__main__':
run_tests()
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
4af06ebfc8fef42f9edfc1ec9452157f468d98fa
|
7a86aabeae1071c09573dde886d7d31b472cbd35
|
/intro_to_cs/pycharm/exercise1_sct.py
|
816ee3843065c66f4ffbb397182600d983e170f2
|
[] |
no_license
|
kkrugler/codecademy-validator
|
7453f1f82e6488aecb959af0f9d3a8b05eca2ee2
|
fe5749cfb12705e0c16f7060111dd4e9b4ffc9ba
|
refs/heads/master
| 2021-01-21T21:40:01.819302
| 2016-05-17T15:43:43
| 2016-05-17T15:43:43
| 29,620,988
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,195
|
py
|
import re
printed_lines = CC.prints()
def check_text(expected, actual, desc, is_describe_expected=True):
last_char_desc = None
if (type(actual) != str):
return '''Your %s is not even a String.''' % desc
if (expected[-1] == '.'):
last_char_desc = 'period'
elif (expected[-1] == '!'):
last_char_desc = 'exclamation point'
if ( (last_char_desc)
and (actual == expected[:-1])):
return '''It looks like you forgot the %s at the end of your %s.''' % (last_char_desc, desc)
if (actual.find(' ') > 0):
return '''Your %s contains two spaces in a row.
Check its construction over carefully to avoid this problem.''' % desc
if (actual.startswith(' ')):
return '''Your %s starts with a space.
Check its construction over carefully to avoid this problem.''' % desc
if (actual.endswith(' ')):
return '''Your %s ends with a space.
Check its construction over carefully to avoid this problem.''' % desc
case_warning = ''
if (actual.lower() == expected.lower()):
case_warning = ''' The difference is only a question of uppercase vs. lowercase,
so check your text over carefully.'''
if (actual == expected):
return True
# Although the following error message is not always grammatically
# correct (since the first sentence doesn't end in a period),
# that period was confusing students, who assumed it was part
# of the expected string.
if (is_describe_expected):
return '''Your %s was "%s" instead of "%s"%s''' % (desc, actual, expected, case_warning)
return '''Your %s was incorrect.%s''' % (desc, case_warning)
def check_prediction(expected, name, line, prediction_pattern, no_match_msg, section=None):
actual = globals().get(name)
if (not (name in globals())):
return '''You seem to have modified the program somehow so that
it no longer assigns %s to anything.
Click the Reset Code button and start over.''' % name
assignment_re = re.compile(r'^([^ \t=]+)[ \t]*=([^=].*)?$')
assignment_match = assignment_re.match(line)
if ( (assignment_match)
and (assignment_match.groups()[0] == name)):
prediction_re = re.compile(prediction_pattern)
if (not prediction_re.match(line)):
return no_match_msg
if (type(expected) == str):
return check_text(expected, actual, name, False)
if (expected != actual):
if (section):
return '''One of the predictions in your %s set
was incorrect.''' % section
else:
return '''Your %s was not correct.''' % name
return True
def check_int_prediction(expected, name, line, section=None):
no_match_msg = '''You must assign %s to a single Integer literal value.
No "re-computing" your prediction!''' % name
return check_prediction(expected,
name,
line,
r'^[^ \t=]+[ \t]*=[ \t]*(\+|-)?\d+[ \t]*(#.*)?$',
no_match_msg,
section)
def check_str_prediction(expected, name, line, section=None):
no_match_msg = '''You must assign %s to a single String literal value.
No "re-computing" your prediction!''' % name
return check_prediction(expected,
name,
line,
r'^[^ \t=]+[ \t]*=[ \t]*(\'|")[^+%]+[ \t]*(#.*)?$',
no_match_msg,
section)
if (error):
return """You broke the code with your changes so that it is
no longer valid Python syntax. The cryptic error message to the
right will identify the first line that Python didn't like.
You can try to fix the error you introduced, or just click the
Reset Code button and start over."""
code_lines = code.splitlines()
line_number = 0
for line in code_lines:
line_number += 1
result = check_int_prediction(15,
'answer_1',
line)
if (result != True):
return result
result = check_int_prediction(0,
'answer_2',
line)
if (result != True):
return result
result = check_str_prediction('red',
'answer_3',
line)
if (result != True):
return result
result = check_str_prediction('blue',
'answer_4',
line)
if (result != True):
result = check_str_prediction('red',
'answer_4',
line)
if (result != True):
return result
result = check_str_prediction('increase',
'answer_5',
line)
if (result != True):
return result
result = check_str_prediction('yes',
'answer_6',
line)
if (result != True):
return result
result = check_int_prediction(10,
'answer_7',
line)
if (result != True):
return result
result = check_str_prediction('num_cats',
'answer_8_name',
line)
if (result != True):
return result
result = check_int_prediction(17,
'answer_8_value',
line)
if (result != True):
return result
result = check_str_prediction('num_cats',
'answer_9_name',
line)
if (result != True):
return result
result = check_int_prediction(18,
'answer_9_value',
line)
if (result != True):
return result
result = check_int_prediction(8,
'answer_10',
line)
if (result != True):
return result
result = check_str_prediction('yes',
'answer_11',
line)
if (result != True):
return result
result = check_str_prediction('increase, game.py:5',
'answer_12',
line)
if (result != True):
return result
result = check_int_prediction(8,
'answer_13',
line)
if (result != True):
return result
result = check_str_prediction('num_cats',
'answer_14_name',
line)
if (result != True):
return result
result = check_int_prediction(20,
'answer_14_value',
line)
if (result != True):
return result
result = check_int_prediction(8,
'answer_15',
line)
if (result != True):
return result
result = check_int_prediction(9,
'answer_16',
line)
if (result != True):
return result
result = check_str_prediction('no',
'answer_17',
line)
if (result != True):
return result
result = check_int_prediction(11,
'answer_18',
line)
if (result != True):
return result
result = check_str_prediction('4. 1',
'answer_19',
line)
if (result != True):
return result
result = check_int_prediction(25,
'answer_20',
line)
if (result != True):
return result
return True
|
[
"Schmed@TransPac.com"
] |
Schmed@TransPac.com
|
b1d924e93f89ee833b35e7cf845dd9470032aca3
|
4749276f3075c477598eba4be32db32e23226f86
|
/numpy_and_plt/plt.py
|
09aa76bb99b53358aec3532b4d8b5083121abbd5
|
[] |
no_license
|
hangdragon/DNN
|
1eca7268f72e412cca4624e3236736e3f22d7921
|
1bc80d067a74168d8eaf452e54eb2f1e97a62036
|
refs/heads/master
| 2022-07-31T18:04:47.551383
| 2020-05-25T13:34:45
| 2020-05-25T13:34:45
| 266,782,474
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,173
|
py
|
# -*- coding : utf - 8 -*-
import tensorflow as tf
import numpy as np
class Perceptron_tf :
def architecture(self):
########## layer의 갯수(layer_number)입력 ###########
self.number_of_layers = int(input('원하시는 레이어의 갯수를 입력하세요(1이상인 정수입니다!) : '))
self.number_of_nodes = [[] for i in range(self.number_of_layers + 1)] # SLP이면 x,y , DLP이면 x,h1,y,,,의 노드 갯수를 담으려고 만든 리스트.
# x,h1,y에서 x는 트레이닝 데이터의 노드들 갯수. h1는 hidden layer1에 대한 노드들 갯수. y는 출력 데이터 노드들 갯수를 의미한다.
# number_of_nodes라는 리스트 안에는 각 단계별 노드 갯수인 '스칼라'값이 들어간다. number_of_nodes의 길이는 number_of_layers의 +1!
########## 히든 레이어 갯수 및 입력,출력,히든 레이어 각각의 노드 갯수들 초기화 ############
if self.number_of_layers == 1:
print('{}를 선택하셨습니다.\n트레이닝 데이터, 출력 데이터 노드의 갯수를 각각 입력하세요.'.format('SLP'))
for i in range(len(self.number_of_nodes)):
if i == 0:
self.number_of_nodes[i] = int(input('$트레이닝 데이터 노드 갯수 >>>'))-1
elif i == self.number_of_layers:
self.number_of_nodes[i] = int(input('$출력 데이터 노드 갯수 >>>'))
elif self.number_of_layers == 2:
print('{}를 선택하셨습니다.\n트레이닝 데이터, 히든 레이어 노드, 출력 데이터 노드의 갯수를 각각 입력하세요.'.format('DLP'))
for i in range(len(self.number_of_nodes)):
if i == 0:
self.number_of_nodes[i] = int(input('$트레이닝 데이터 노드 갯수 >>>'))-1
elif i == self.number_of_layers:
self.number_of_nodes[i] = int(input('$출력 데이터 노드 갯수 >>>'))
else:
self.number_of_nodes[i] = int(input('$히든 레이어 노드 갯수 >>>'))-1
else:
print('히든 레이어가 {}개인 {}를 선택하셨습니다.'.format(self.number_of_layers - 1, 'MLP'))
print('\n트레이닝 데이터, 히든 레이어들의 노드, 출력 데이터 노드의 갯수를 각각 입력하세요.')
for i in range(len(self.number_of_nodes)):
if i == 0:
self.number_of_nodes[i] = int(input('$트레이닝 데이터 노드 갯수 >>>'))-1
elif i == self.number_of_layers:
self.number_of_nodes[i] = int(input('$출력 데이터 노드 갯수 >>>'))
else:
self.number_of_nodes[i] = int(input(f'$히든 레이어{i}의 노드 갯수 >>>'))-1
print(f'\n노드 갯수들은 다음과 같습니다 {self.number_of_nodes}')
############트레이닝 데이터의 갯수 입력#############
self.number_of_training_data = int(input('\n원하시는 트레이닝 데이터의 갯수를 입력하세요 : ')) # ex) (0,0),(0,1),(1,0),(1,1)이면 n = 4
def __init__(self): # 생성자는 멤버 변수들의 선언 및 아키텍처 단계를 수행한다.
self.number_of_layers = 0 # layer 갯수
self.number_of_nodes = [] # 선택한 아키텍처에 대한 레이어별 노드 갯수들
self.number_of_training_data = 0 # training data 갯수이자 label 갯수
self.architecture() # architecture 멤버 함수를 실행하여 위의 세개의 변수 값을 초기화!
self.number_of_input_node = self.number_of_nodes[0]
self.number_of_output_node = self.number_of_nodes[-1]
self.t_data = np.zeros((self.number_of_training_data, self.number_of_input_node)) # 입력 트레이닝 데이터
self.x_ = tf.placeholder(tf.float32,[None,self.number_of_input_node],name = 't_data')
self.label = np.zeros((self.number_of_training_data, self.number_of_output_node)) # 출력 트레이닝 데이터(레이블들)
self.y_ = tf.placeholder(tf.float32, [None, self.number_of_output_node], name='label')
self.weight_for_layers = [[] for i in range(self.number_of_layers)] # 전체 레이어의 갯수만큼 각각에 해당하는 웨이트 벡터들을 담을 리스트
self.bias_for_layers = [[] for i in range(self.number_of_layers)] # 전체 레이어의 갯수만큼 각각에 해당하는 바이어스들을 담을 리스트
self.y_est_for_layers = [[] for i in range(self.number_of_layers)] # 전체 레이어의 갯수만큼 각각에 해당하는 y 벡터들을 담을 리스트
self.y_est_final = []
self.cost = 0 # 최종 에러함수(비용함수)
###########learning rate 입력############
self.lr = float(input('\n원하시는 learning rate를 입력해주세요 : '))
self.select_ftn = None
def initialize(self):
########## 트레이닝 데이터들 (x벡터들) 입력 ############
print('\n트레이닝 데이터를 하나씩 입력하세요. (입력 예시 : (0,0)이면 >>>0 0)\n')
for i in range(self.number_of_training_data):
self.t_data[i] = list(map(float, input(f'요소의 갯수가 {self.number_of_input_node}개인 {(i + 1)}번째 트레이닝 데이터 입력 >>').strip().split()))
self.t_data = np.array(self.t_data,dtype=np.float32)
#self.x_ = tf.placeholder(tf.float32,[None,self.number_of_input_node],name = 't_data')
########## 레이블들 (y벡터들) 입력 ############
print('\n라벨들을 하나씩 입력하세요. (입력 예시 : (1,2)이면 >>>1 2)\n')
for j in range(self.number_of_training_data):
self.label[j] = list(map(float, input(f'요소의 갯수가 {self.number_of_output_node}인 {(j + 1)}번째 레이블 입력 >>').strip().split()))
self.label = np.array(self.label, dtype=np.float32)
#self.y_ = tf.placeholder(tf.float32, [None, self.number_of_output_node], name='label')
########### activation ftn을 sigmoid로 할지 relu로 할지 결정하는 곳 ###########
print('\nweight vector들을 초기화 하기전에 먼저 activation ftn를 뭘로할지 선탁해야합니다.')
self.select_ftn = input('activation ftn을 선택하세요.\nex) sigmoid를 쓰고싶다면 sigmoid, relu를 쓰고싶다면 relu, leaky_relu를 쓰고싶다면 leaky_relu.\n\n>>>')
if self.select_ftn == 'sigmoid':
########## sigmoid-> 웨이트벡터들(w벡터들)을 가우시안으로 초기화 ############
for iter in range(len(self.weight_for_layers)):
print('#######{}번째 layer의 weight 초기화#######'.format(iter + 1)) # 각 뉴런마다의 웨이트벡터를 초기화 해주기 위함
print(f'\n제 {iter + 1}번째 뉴런의 웨이트 벡터의 요소의 갯수는 {self.number_of_nodes[iter] * self.number_of_nodes[iter + 1]}개 입니다.')
print(f'{iter + 1}번째 뉴런의 웨이트 벡터들을 가우시안 분포를 사용한 Xavier방법으로 초기화 하겠습니다.')
self.weight_for_layers[iter] = tf.Variable(tf.random_normal((self.number_of_nodes[iter], self.number_of_nodes[iter + 1]),0,np.sqrt(2/((self.number_of_nodes[iter]+self.number_of_nodes[iter + 1])))))
self.bias_for_layers[iter] = tf.Variable(tf.random_normal([self.number_of_nodes[iter + 1]], 0, np.sqrt(2 / ((self.number_of_nodes[iter] + self.number_of_nodes[iter + 1])))))
if iter == 0 :
self.y_est_for_layers[0] = tf.nn.sigmoid(tf.matmul(self.x_, self.weight_for_layers[0]) + self.bias_for_layers[0])
else:
self.y_est_for_layers[iter] = tf.nn.sigmoid(tf.matmul(self.y_est_for_layers[iter-1], self.weight_for_layers[iter]) + self.bias_for_layers[iter])
elif self.select_ftn == 'relu':
########## relu-> 웨이트벡터들(w벡터들)을 He Initialization으로 초기화 ############
for iter in range(len(self.weight_for_layers)):
print('#######{}번째 layer의 weight 초기화#######'.format(iter + 1)) # 각 뉴런마다의 웨이트벡터를 초기화 해주기 위함
print(f'\n제 {iter + 1}번째 뉴런의 웨이트 벡터의 요소의 갯수는 {self.number_of_nodes[iter] * self.number_of_nodes[iter + 1]}개 입니다.')
print(f'{iter + 1}번째 뉴런의 웨이트 벡터들을 He Initialization으로 초기화 하겠습니다.')
self.weight_for_layers[iter] = tf.Variable(tf.random_normal((self.number_of_nodes[iter], self.number_of_nodes[iter + 1]),0,np.sqrt(2/self.number_of_nodes[iter])))
self.bias_for_layers[iter] = tf.Variable(tf.random_normal([self.number_of_nodes[iter + 1]], 0, np.sqrt(2 / self.number_of_nodes[iter])))
if iter == 0 :
self.y_est_for_layers[0] = tf.nn.relu(tf.matmul(self.x_, self.weight_for_layers[0]) + self.bias_for_layers[0])
elif iter == self.number_of_layers-1 : #마지막 뉴런에서는 늘 sigmoid로 해주기 위함
self.y_est_for_layers[iter] = tf.sigmoid(tf.matmul(self.y_est_for_layers[iter - 1], self.weight_for_layers[iter]) + self.bias_for_layers[iter])
else:
self.y_est_for_layers[iter] = tf.nn.relu(tf.matmul(self.y_est_for_layers[iter-1], self.weight_for_layers[iter]) + self.bias_for_layers[iter])
elif self.select_ftn == 'leaky_relu':
########## relu-> 웨이트벡터들(w벡터들)을 He Initialization으로 초기화 ############
for iter in range(len(self.weight_for_layers)):
print('#######{}번째 layer의 weight 초기화#######'.format(iter + 1)) # 각 뉴런마다의 웨이트벡터를 초기화 해주기 위함
print(f'\n제 {iter + 1}번째 뉴런의 웨이트 벡터의 요소의 갯수는 {self.number_of_nodes[iter] * self.number_of_nodes[iter + 1]}개 입니다.')
print(f'{iter + 1}번째 뉴런의 웨이트 벡터들을 He Initialization으로 초기화 하겠습니다.')
self.weight_for_layers[iter] = tf.Variable(tf.random_normal((self.number_of_nodes[iter], self.number_of_nodes[iter + 1]),0, np.sqrt(2 / self.number_of_nodes[iter])))
self.bias_for_layers[iter] = tf.Variable(tf.random_normal([self.number_of_nodes[iter + 1]], 0, np.sqrt(2 / self.number_of_nodes[iter])))
if iter == 0 :
self.y_est_for_layers[0] = tf.nn.leaky_relu(tf.matmul(self.x_, self.weight_for_layers[0]) + self.bias_for_layers[0],0.0001)
elif iter == self.number_of_layers-1 : #마지막 뉴런에서는 늘 sigmoid로 해주기 위함
self.y_est_for_layers[iter] = tf.sigmoid(tf.matmul(self.y_est_for_layers[iter - 1], self.weight_for_layers[iter]) + self.bias_for_layers[iter])
else:
self.y_est_for_layers[iter] = tf.nn.leaky_relu(tf.matmul(self.y_est_for_layers[iter-1], self.weight_for_layers[iter]) + self.bias_for_layers[iter],0.0001)
self.y_est_final = self.y_est_for_layers[-1]
self.cost = tf.reduce_sum(((self.y_ - self.y_est_final) ** 2))
def feed_forward_and_gradient_back_propagation(self,loop_number=20000) :
train = tf.train.GradientDescentOptimizer(learning_rate=self.lr).minimize(self.cost)
predicted = tf.cast(self.y_est_final > 0.5, dtype=tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, self.y_), dtype=tf.float32))
# =================================================
with tf.Session() as sess: #이 블럭 안에서만 세션이 유효하다. 이 블럭 나가게되면 세션값들의 데이터엔 접근할 수 없다.
sess.run(tf.global_variables_initializer())
for step in range(loop_number):
sess.run(train, feed_dict={self.x_: self.t_data, self.y_: self.label})
if step % 100 == 0:
print(step, sess.run(self.cost, feed_dict={self.x_: self.t_data, self.y_: self.label}), sess.run(self.weight_for_layers))
h, c, a = sess.run([self.y_, predicted, accuracy], feed_dict={self.x_: self.t_data, self.y_: self.label})
print("\nHypothesis: ", h, "\nCorrect: ", c, "\nAccuracy: ", a)
print(sess.run(self.weight_for_layers), sess.run(self.bias_for_layers)) # 여기 위에선 W와 b에 무엇을 넣어준다 이런것도 없다. 근데 업데이트(학습)이 될수 있었던 이유는 train세션의 cost부분에서 W와 b를 알아서 찾아내어서 학습을 시킴.
self.weight_for_layers = sess.run(self.weight_for_layers) #세션 종료전에 self.weight를 세선 런한 값으로 초기화 해줘야함. 이거 안해주면 학습 전의 이상한 웨이트가 들어가버림
self.bias_for_layers = sess.run(self.bias_for_layers) #바이어스도 마찬가지!
def testing(self):
sigma = float(input('가우시안 노이즈의 표준편차를 입력해주세요(0을 입력하면 binary 분포를 따릅니다) : '))
test_x = np.zeros((1, self.number_of_input_node)) # 입력 트레이닝 데이터
for i in range(len(test_x[0])):
test_x[0][i] = np.random.randint(2) + np.random.normal(0,sigma)# 0 또는 1에 대해 equivalent하게 할당.(binary)
# 한편, 입력받은 표준편차를 바탕으로 평균이 0 이고 분산이 sigma^2인 가우시안 확률변수를 더해줬음.
test_x = np.array(test_x,dtype=np.float32)
####### 수렴된 w벡터들을 가지고 퍼셉트론안에서 계속 절차를 돌린 후, y_test값을 얻어내면 끝! #######
if self.select_ftn == 'sigmoid':
########## sigmoid-> 웨이트벡터들(w벡터들)을 가우시안으로 초기화 ############
for iter in range(len(self.weight_for_layers)):
if iter == 0 :
self.y_est_for_layers[0] = tf.nn.sigmoid(tf.matmul(test_x, self.weight_for_layers[0]) + self.bias_for_layers[0])
else:
self.y_est_for_layers[iter] = tf.nn.sigmoid(tf.matmul(self.y_est_for_layers[iter-1], self.weight_for_layers[iter]) + self.bias_for_layers[iter])
elif self.select_ftn == 'relu':
########## relu-> 웨이트벡터들(w벡터들)을 He Initialization으로 초기화 ############
for iter in range(len(self.weight_for_layers)):
if iter == 0 :
self.y_est_for_layers[0] = tf.nn.relu(tf.matmul(test_x, self.weight_for_layers[0]) + self.bias_for_layers[0])
else:
self.y_est_for_layers[iter] = tf.nn.relu(tf.matmul(self.y_est_for_layers[iter-1], self.weight_for_layers[iter]) + self.bias_for_layers[iter])
elif self.select_ftn == 'leaky_relu':
########## relu-> 웨이트벡터들(w벡터들)을 He Initialization으로 초기화 ############
for iter in range(len(self.weight_for_layers)):
if iter == 0 :
self.y_est_for_layers[0] = tf.nn.leaky_relu(tf.matmul(test_x, self.weight_for_layers[0]) + self.bias_for_layers[0])
else:
self.y_est_for_layers[iter] = tf.nn.leaky_relu(tf.matmul(self.y_est_for_layers[iter-1], self.weight_for_layers[iter]) + self.bias_for_layers[iter])
self.y_est_final = self.y_est_for_layers[-1]
predicted = tf.cast(self.y_est_final > 0.5, dtype=tf.float32)
self.cost = tf.reduce_sum(((self.y_ - self.y_est_final) ** 2))
sess = tf.Session()
sess.run(tf.global_variables_initializer())
print(f'test데이터 {test_x}를 넣었을때 추정값 y는 {sess.run(predicted,feed_dict={self.x_:test_x})}')
perceptron = Perceptron_tf()
perceptron.initialize()
perceptron.feed_forward_and_gradient_back_propagation()
perceptron.testing()
|
[
"hanjiyong@HANui-MacBookPro.local"
] |
hanjiyong@HANui-MacBookPro.local
|
53f90f9fd678ba7dd9efc74fe19bb7a39c50362f
|
1fe113a1521d65b5067956437219aade3f5954d3
|
/expressivity/midiio/RawInstreamFile.py
|
f9c40e1578a690fbf47bbe9a6dfe25348b80d7c3
|
[] |
no_license
|
bjvanderweij/expressivity
|
b7c00b935da88d51cb3532e960e93b2c9d455976
|
579108611d9f7201b5047444f6bd7973bee302a2
|
refs/heads/master
| 2016-09-10T19:43:50.299929
| 2015-04-05T20:41:11
| 2015-04-05T20:41:11
| 1,846,634
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,908
|
py
|
# -*- coding: ISO-8859-1 -*-
# standard library imports
from struct import unpack
# custom import
from midiio.DataTypeConverters import readBew, readVar, varLen
class RawInstreamFile:
"""
It parses and reads data from an input file. It takes care of big
endianess, and keeps track of the cursor position. The midi parser
only reads from this object. Never directly from the file.
"""
def __init__(self, infile=''):
"""
If 'file' is a string we assume it is a path and read from
that file.
If it is a file descriptor we read from the file, but we don't
close it.
Midi files are usually pretty small, so it should be safe to
copy them into memory.
"""
if infile:
if isinstance(infile, str):
infile = open(infile, 'rb')
self.data = infile.read()
infile.close()
else:
# don't close the f
self.data = infile.read()
else:
self.data = ''
# start at beginning ;-)
self.cursor = 0
# setting up data manually
def setData(self, data=''):
"Sets the data from a string."
self.data = data
# cursor operations
def setCursor(self, position=0):
"Sets the absolute position if the cursor"
self.cursor = position
def getCursor(self):
"Returns the value of the cursor"
return self.cursor
def moveCursor(self, relative_position=0):
"Moves the cursor to a new relative position"
self.cursor += relative_position
# native data reading functions
def nextSlice(self, length, move_cursor=1):
"Reads the next text slice from the raw data, with length"
c = self.cursor
slc = self.data[c:c+length]
if move_cursor:
self.moveCursor(length)
return slc
def readBew(self, n_bytes=1, move_cursor=1):
"""
Reads n bytes of date from the current cursor position.
Moves cursor if move_cursor is true
"""
return readBew(self.nextSlice(n_bytes, move_cursor))
def readVarLen(self):
"""
Reads a variable length value from the current cursor position.
Moves cursor if move_cursor is true
"""
MAX_VARLEN = 4 # Max value varlen can be
var = readVar(self.nextSlice(MAX_VARLEN, 0))
# only move cursor the actual bytes in varlen
self.moveCursor(varLen(var))
return var
if __name__ == '__main__':
test_file = 'test/midifiles/minimal.mid'
fis = RawInstreamFile(test_file)
print(fis.nextSlice(len(fis.data)))
test_file = 'test/midifiles/cubase-minimal.mid'
cubase_minimal = open(test_file, 'rb')
fis2 = RawInstreamFile(cubase_minimal)
print(fis2.nextSlice(len(fis2.data)))
cubase_minimal.close()
|
[
"bjvanderweij@gmail.com"
] |
bjvanderweij@gmail.com
|
12647e19ddbcc77e4aa3c3ad0e1ebd5240dfb434
|
299ffd64164158ee111a250884c3788cc2ffd983
|
/backend/zoom_24987/settings.py
|
bd6f2b9ba71e741b359d63319c516d67959dedc4
|
[] |
no_license
|
crowdbotics-apps/zoom-24987
|
eaebe40b8bfc363750075f69946c94c32250a618
|
fcefda700a3dcae95410edf518e17ebacf51f69a
|
refs/heads/master
| 2023-03-21T00:50:54.786881
| 2021-03-12T01:53:33
| 2021-03-12T01:53:33
| 346,899,137
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,096
|
py
|
"""
Django settings for zoom_24987 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'modules',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'zoom_24987.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'zoom_24987.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
708723aa3ef6416f3039260ae36ae594d3543ed8
|
d64cf6fbb39ddc42a0dd7c73fb970eca458c584d
|
/system/indy-node-tests/TestAdHocSuite.py
|
08d89d2443e91fc9002bea83d3d447bc7285601d
|
[
"Apache-2.0"
] |
permissive
|
AYCH-Inc/aych.hyperindy.autest
|
81173b28314ff9e6e74adaea6c60f961e002c858
|
8486267e45c362a28843e64634c6a5f0ea0edb9e
|
refs/heads/master
| 2021-04-09T23:27:22.308251
| 2020-02-17T12:49:29
| 2020-02-17T12:49:29
| 248,891,962
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,680
|
py
|
import pytest
from system.utils import *
import docker
from random import choice
@pytest.mark.usefixtures('docker_setup_and_teardown')
@pytest.mark.usefixtures('check_no_failures_fixture')
class TestAdHocSuite:
@pytest.mark.nodes_num(4)
@pytest.mark.asyncio
# staging net issue (INDY-2233)
async def test_rotate_bls_and_get_txn(
self, pool_handler, wallet_handler, get_default_trustee, nodes_num
):
docker_client = docker.from_env()
trustee_did, _ = get_default_trustee
steward_did, steward_vk = await did.create_and_store_my_did(
wallet_handler, json.dumps({'seed': '000000000000000000000000Steward4'})
)
await ensure_pool_performs_write_read(pool_handler, wallet_handler, trustee_did, nyms_count=3)
for i in range(10):
# rotate bls keys for Node4
res1 = docker_client.containers.list(
filters={'name': 'node4'}
)[0].exec_run(
['init_bls_keys', '--name', 'Node4'], user='indy'
)
bls_key, bls_key_pop = res1.output.decode().splitlines()
bls_key, bls_key_pop = bls_key.split()[-1], bls_key_pop.split()[-1]
data = json.dumps(
{
'alias': 'Node4',
'blskey': bls_key,
'blskey_pop': bls_key_pop
}
)
req = await ledger.build_node_request(steward_did, '4PS3EDQ3dW1tci1Bp6543CfuuebjFrg36kLAUcskGfaA', data)
res2 = json.loads(
await ledger.sign_and_submit_request(pool_handler, wallet_handler, steward_did, req)
)
assert res2['op'] == 'REPLY'
# write txn
await ensure_pool_performs_write_read(pool_handler, wallet_handler, trustee_did)
# get txn
req = await ledger.build_get_txn_request(None, 'DOMAIN', 10)
res3 = json.loads(await ledger.submit_request(pool_handler, req))
assert res3['result']['seqNo'] is not None
# check that pool is ok
await ensure_all_nodes_online(pool_handler, wallet_handler, trustee_did)
await ensure_ledgers_are_in_sync(pool_handler, wallet_handler, trustee_did)
await ensure_state_root_hashes_are_in_sync(pool_handler, wallet_handler, trustee_did)
@pytest.mark.asyncio
# SN-7
async def test_drop_states(
self, payment_init, pool_handler, wallet_handler, get_default_trustee,
initial_token_minting, initial_fees_setting
):
libsovtoken_payment_method = 'sov'
trustee_did, _ = get_default_trustee
address2 = await payment.create_payment_address(wallet_handler, libsovtoken_payment_method, '{}')
# mint tokens
address = initial_token_minting
# set fees
print(initial_fees_setting)
# set auth rule for schema
req = await ledger.build_auth_rule_request(trustee_did, '101', 'ADD', '*', None, '*',
json.dumps(
{
'constraint_id': 'OR',
'auth_constraints': [
{
'constraint_id': 'ROLE',
'role': '0',
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {'fees': 'add_schema_250'}
},
{
'constraint_id': 'ROLE',
'role': '2',
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {'fees': 'add_schema_250'}
},
{
'constraint_id': 'ROLE',
'role': '101',
'sig_count': 1,
'need_to_be_owner': False,
'metadata': {'fees': 'add_schema_250'}
}
]
}
)
)
res1 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res1)
assert res1['op'] == 'REPLY'
# write schema with fees
source1, _ = await get_payment_sources(pool_handler, wallet_handler, address)
schema_id, schema_json = await anoncreds.issuer_create_schema(
trustee_did, random_string(5), '1.0', json.dumps(['name', 'age'])
)
req = await ledger.build_schema_request(trustee_did, schema_json)
req_with_fees_json, _ = await payment.add_request_fees(
wallet_handler, trustee_did, req, json.dumps([source1]), json.dumps(
[{'recipient': address, 'amount': 750 * 100000}]
), None
)
res2 = json.loads(
await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req_with_fees_json)
)
print(res2)
assert res2['op'] == 'REPLY'
# send payment
source2, _ = await get_payment_sources(pool_handler, wallet_handler, address)
req, _ = await payment.build_payment_req(
wallet_handler, trustee_did, json.dumps([source2]), json.dumps(
[{"recipient": address2, "amount": 500 * 100000}, {"recipient": address, "amount": 250 * 100000}]
), None
)
res3 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
print(res3)
assert res3['op'] == 'REPLY'
# stop Node7 -> drop token state -> start Node7
node7 = NodeHost(7)
node7.stop_service()
time.sleep(3)
for _ledger in ['pool', 'domain', 'config', 'sovtoken']:
print(node7.run('rm -rf /var/lib/indy/sandbox/data/Node7/{}_state'.format(_ledger)))
time.sleep(3)
node7.start_service()
# check that pool is ok
await ensure_all_nodes_online(pool_handler, wallet_handler, trustee_did)
await ensure_ledgers_are_in_sync(pool_handler, wallet_handler, trustee_did)
await ensure_state_root_hashes_are_in_sync(pool_handler, wallet_handler, trustee_did)
# write some txns
await ensure_pool_performs_write_read(pool_handler, wallet_handler, trustee_did, nyms_count=10)
# send another payment
source3, _ = await get_payment_sources(pool_handler, wallet_handler, address)
req, _ = await payment.build_payment_req(
wallet_handler, trustee_did, json.dumps([source3]), json.dumps(
[{"recipient": address2, "amount": 125 * 100000}, {"recipient": address, "amount": 125 * 100000}]
), None
)
res4 = json.loads(await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, req))
assert res4['op'] == 'REPLY'
# check again that pool is ok
await ensure_all_nodes_online(pool_handler, wallet_handler, trustee_did)
await ensure_ledgers_are_in_sync(pool_handler, wallet_handler, trustee_did)
await ensure_state_root_hashes_are_in_sync(pool_handler, wallet_handler, trustee_did)
@pytest.mark.parametrize(
'demote_count, promote_count',
[
(1, 5),
(100, 5),
(100, 1)
]
)
@pytest.mark.asyncio
async def test_misc_redundant_demotions_promotions(
self, pool_handler, wallet_handler, get_default_trustee, payment_init, initial_token_minting, nodes_num,
demote_count, promote_count
):
trustee_did, _ = get_default_trustee
pool_info = get_pool_info('1')
node_list = ['Node{}'.format(x) for x in range(1, nodes_num + 1)]
address = initial_token_minting
fees = await fees_setter(pool_handler, wallet_handler, trustee_did, 'sov')
# find primary
primary, primary_alias, primary_did = await get_primary(pool_handler, wallet_handler, trustee_did)
# select random node
node_to_demote = choice(node_list)
# demote it
demote_tasks = []
for i in range(demote_count):
task = demote_node(pool_handler, wallet_handler, trustee_did, node_to_demote, pool_info[node_to_demote])
demote_tasks.append(task)
await asyncio.gather(*demote_tasks, return_exceptions=True)
await pool.refresh_pool_ledger(pool_handler)
# make sure VC is done
new_primary = await ensure_primary_changed(pool_handler, wallet_handler, trustee_did, primary)
new_primary_name = 'Node{}'.format(new_primary)
# demote new primary
demote_tasks = []
for i in range(demote_count):
task = demote_node(
pool_handler, wallet_handler, trustee_did, new_primary_name, pool_info[new_primary_name]
)
demote_tasks.append(task)
await asyncio.gather(*demote_tasks, return_exceptions=True)
await pool.refresh_pool_ledger(pool_handler)
# make sure VC is done
super_new_primary = await ensure_primary_changed(pool_handler, wallet_handler, trustee_did, new_primary)
# write txn with fees
req = await ledger.build_attrib_request(trustee_did, trustee_did, None, None, random_string(256))
await add_fees_and_send_request(pool_handler, wallet_handler, trustee_did, address, req, fees['attrib'])
# promote both nodes back simultaneously
promote_tasks = []
for i in range(promote_count):
task1 = promote_node(pool_handler, wallet_handler, trustee_did, node_to_demote, pool_info[node_to_demote])
promote_tasks.append(task1)
task2 = promote_node(
pool_handler, wallet_handler, trustee_did, new_primary_name, pool_info[new_primary_name]
)
promote_tasks.append(task2)
await asyncio.gather(*promote_tasks, return_exceptions=True)
await pool.refresh_pool_ledger(pool_handler)
# make sure VC is done
await ensure_primary_changed(pool_handler, wallet_handler, trustee_did, super_new_primary)
await ensure_pool_is_functional(pool_handler, wallet_handler, trustee_did, nyms_count=10)
await ensure_pool_is_okay(pool_handler, wallet_handler, trustee_did)
@pytest.mark.parametrize(
'iterations, nyms_count',
[
(1, 10),
(5, 10),
(5, 1)
]
)
@pytest.mark.asyncio
async def test_misc_cyclic_demotions_promotions(
self, pool_handler, wallet_handler, get_default_trustee, payment_init, initial_token_minting, nodes_num,
iterations, nyms_count
):
trustee_did, _ = get_default_trustee
pool_info = get_pool_info('1')
node_list = ['Node{}'.format(x) for x in range(1, nodes_num + 1)]
address = initial_token_minting
fees = await fees_setter(pool_handler, wallet_handler, trustee_did, 'sov')
for _ in range(iterations):
# find primary
primary, primary_alias, primary_did = await get_primary(pool_handler, wallet_handler, trustee_did)
# select random node
node_to_demote = choice(node_list)
# demote it
await demote_node(pool_handler, wallet_handler, trustee_did, node_to_demote, pool_info[node_to_demote])
await pool.refresh_pool_ledger(pool_handler)
# make sure VC is done
new_primary = await ensure_primary_changed(pool_handler, wallet_handler, trustee_did, primary)
# make sure pool works
await ensure_pool_is_functional(pool_handler, wallet_handler, trustee_did, nyms_count=nyms_count)
# write txn with fees
req = await ledger.build_attrib_request(trustee_did, trustee_did, None, None, random_string(256))
await add_fees_and_send_request(pool_handler, wallet_handler, trustee_did, address, req, fees['attrib'])
# promote node back
await promote_node(pool_handler, wallet_handler, trustee_did, node_to_demote, pool_info[node_to_demote])
await pool.refresh_pool_ledger(pool_handler)
# make sure VC is done
await ensure_primary_changed(pool_handler, wallet_handler, trustee_did, new_primary)
# make sure pool works
await ensure_pool_is_functional(pool_handler, wallet_handler, trustee_did, nyms_count=nyms_count)
await ensure_pool_is_okay(pool_handler, wallet_handler, trustee_did)
|
[
"vladimir.shishkin@dsr-corporation.com"
] |
vladimir.shishkin@dsr-corporation.com
|
8bf7f50220f7b1f42f0f502c85f94c86400cf9ff
|
dd04c22836edefe77eeeaedc92662f09fc66c09c
|
/examples/indicators/moving_averages/indicators_ema.py
|
5026aa9e5c6ff4ebb42b3b5090945de3300e86db
|
[
"MIT"
] |
permissive
|
MyBourse/trading-technical-indicators
|
0d7e3e86a274f3ed50290072e2d716416a8d485d
|
908e93018b3aa8ba9bee099ce9f1813ea64c6d72
|
refs/heads/master
| 2022-04-14T13:27:01.261453
| 2020-02-09T12:31:07
| 2020-02-09T12:31:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,540
|
py
|
'''
File name: indicators_ema.py
Example code rlated to the tradingti.indicator package.
EMA Trading Indicator.
Author: Vasileios Saveris
enail: vsaveris@gmail.com
License: MIT
Date last modified: 26.01.2020
Python Version: 3.6
'''
import pandas as pd
from tradingti.indicators import EMA
# Future Warning matplotlib
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
# Read data from csv file. Set the index to the correct column (dates column)
df = pd.read_csv('../data/sample_data.csv', parse_dates = True, index_col = 0)
# Calculate the EMA indicator
ema = EMA(df[df.index >= '2011-01-01'], span_periods = [200])
# Save the plot of the calculated Technical Indicator
ema.getTiPlot().savefig('../figures/indicators_ema_200_example.png')
print('- Graph ../figures/indicators_ema_200_example.png saved.')
# Calculate the EMA indicator for the default span periods (short term ema = 26,
# long term ema = 200)
ema = EMA(df[df.index >= '2011-01-01'])
# Save the plot of the calculated Technical Indicator
ema.getTiPlot().savefig('../figures/indicators_ema_50_200_example.png')
print('- Graph ../figures/indicators_ema_50_200_example.png saved.')
# Get EMA calculated data
print('\nEMA data:\n', ema.getTiData())
# Get EMA value for a specific date
print('\nEMA value at 2012-09-06:', ema.getTiValue('2012-09-06'))
# Get the most recent EMA value
print('\nEMA value at', df.index[0], ':', ema.getTiValue())
# Get signal from EMA
print('\nSignal:', ema.getSignal())
|
[
"vsaveris@gmail.com"
] |
vsaveris@gmail.com
|
3737ae2448b4558433d4e71caef1f5ebcd5b024c
|
c8b819d5e728e30d4d796a5c6821421e01529302
|
/djProject/apps/tasks/migrations/0004_auto__add_comment.py
|
06648e54504d10f9f6aa913ad137b245ad927135
|
[
"BSD-3-Clause"
] |
permissive
|
devsar/djProject
|
729e42ab8799ec648810eb7151c08cf4102efbbd
|
503fb9dd06c304f0450977bf4bac87d238b71626
|
refs/heads/master
| 2020-05-20T06:41:33.500733
| 2011-08-01T02:08:32
| 2011-08-01T02:08:32
| 1,985,397
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,233
|
py
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Comment'
db.create_table('tasks_comment', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('task', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['tasks.Task'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('comment', self.gf('django.db.models.fields.TextField')()),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal('tasks', ['Comment'])
def backwards(self, orm):
# Deleting model 'Comment'
db.delete_table('tasks_comment')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'projects.project': {
'Meta': {'object_name': 'Project'},
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'feed': ('django.db.models.fields.URLField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'since': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '12'})
},
'sprints.sprint': {
'Meta': {'object_name': 'Sprint'},
'end_date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['projects.Project']"}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '12'})
},
'tasks.comment': {
'Meta': {'object_name': 'Comment'},
'comment': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tasks.Task']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'tasks.log': {
'Meta': {'object_name': 'Log'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tasks.Task']"})
},
'tasks.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tasks.Task']"})
},
'tasks.task': {
'Meta': {'object_name': 'Task'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '255'}),
'estimated': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '4', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tasks.Task']", 'null': 'True', 'blank': 'True'}),
'priority': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['projects.Project']"}),
'remaining': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '4', 'decimal_places': '2', 'blank': 'True'}),
'spend': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '4', 'decimal_places': '2', 'blank': 'True'}),
'sprint': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sprints.Sprint']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'N'", 'max_length': '2'})
}
}
complete_apps = ['tasks']
|
[
"sebastian@devsar.com"
] |
sebastian@devsar.com
|
172af6bb4452711f78a3c0202568c7e899d5c577
|
238e46a903cf7fac4f83fa8681094bf3c417d22d
|
/VTK/vtk_7.1.1_x64_Debug/lib/python2.7/site-packages/twisted/words/test/test_oscar.py
|
b2035d1afb15fa8f92969c10f1ffd8f6810ac5bf
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"BSD-3-Clause"
] |
permissive
|
baojunli/FastCAE
|
da1277f90e584084d461590a3699b941d8c4030b
|
a3f99f6402da564df87fcef30674ce5f44379962
|
refs/heads/master
| 2023-02-25T20:25:31.815729
| 2021-02-01T03:17:33
| 2021-02-01T03:17:33
| 268,390,180
| 1
| 0
|
BSD-3-Clause
| 2020-06-01T00:39:31
| 2020-06-01T00:39:31
| null |
UTF-8
|
Python
| false
| false
| 689
|
py
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.words.protocols.oscar}.
"""
from twisted.trial.unittest import TestCase
from twisted.words.protocols.oscar import encryptPasswordMD5
class PasswordTests(TestCase):
"""
Tests for L{encryptPasswordMD5}.
"""
def test_encryptPasswordMD5(self):
"""
L{encryptPasswordMD5} hashes the given password and key and returns a
string suitable to use to authenticate against an OSCAR server.
"""
self.assertEqual(
encryptPasswordMD5('foo', 'bar').encode('hex'),
'd73475c370a7b18c6c20386bcf1339f2')
|
[
"l”ibaojunqd@foxmail.com“"
] |
l”ibaojunqd@foxmail.com“
|
6ff915ccb108c93545ca357fe9633d124c416a8e
|
f120d4902578e9d531a9e3a6701a9ea4030c7c80
|
/Python_NLP_backend/news/realnews.py
|
963dfa9f0415ccd6eae813f3ce041e0f6aeb2b73
|
[] |
no_license
|
mjosephan2/baermaster
|
89e5589a04b03b59b868f3e6eb97d318420d423e
|
cfda4cb5a37a55303abb0e4470c249ffd321d965
|
refs/heads/master
| 2020-08-13T11:43:33.543997
| 2019-09-26T10:57:32
| 2019-09-26T10:57:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,107
|
py
|
import newsapi
from newsapi import NewsApiClient
import numpy as np
import pandas as pd
import time
import scipy.sparse as ss
import matplotlib.pyplot as plt
import csv
import collections
import string
import nltk
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
import re
from sklearn.feature_extraction.text import CountVectorizer
from nltk import ngrams
import logging
import spacy
from nltk import ngrams
from nltk.stem.wordnet import WordNetLemmatizer
import time
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
# Init
#q='bitcoin',
# /v2/top-headlines
class news_text():
newsapi = NewsApiClient(api_key='1ca90686b682467a97477cdef14ef436')
everything = newsapi.get_everything(sources='financial-post',language='en')
def assign_data(self):
completearticles=[]
articles=[]
titles=[]
urls=[]
imgurls=[]
dictionaries=self.everything["articles"]
for dic in dictionaries:
text=(dic["content"])
completearticles.append(dic["title"]+". "+text)
articles.append(text)
titles.append(dic["title"])
urls.append(dic["url"])
imgurls.append(dic["urlToImage"])
self.completearticles=completearticles
self.articles=articles
self.titles=titles
self.urls=urls
self.imgurls=imgurls
def return_articles(self):
return(self.articles)
def return_titles(self):
return(self.titles)
def return_urls(self):
return(self.urls)
def return_imgurls(self):
return(self.imgurls)
#preprocessing step before converting to vectors
def preprocess_text(self,text):
textlist=text.split("… [+")
text2=textlist[0]
text2=text2.replace("\r"," ")
text2=text2.replace("\n"," ")
textlist=text2.split(" ")
textlist=[text for text in textlist if text!=""]
textlist=textlist[:len(textlist)-1]
text=" ".join(textlist)
text = text.lower()
tokenizer = RegexpTokenizer(r'\w+') #tokenize words
tokens = tokenizer.tokenize(text)
punctuation = list(string.punctuation)
stoplist = stopwords.words('english')
stoplist = set(stoplist) #like a list, but can use hash table
tokens = [WordNetLemmatizer().lemmatize(token) for token in tokens] #lemmatize all tokens
tokens = [w for w in tokens if not w.isdigit()] #remove digits
tokens = [w for w in tokens if len(w)>2] #remove words having 2 or less chars
tokens = [w for w in tokens if not w in punctuation] #remove punctuations
tokens = [w for w in tokens if not w in stoplist] #remove stopwords
# stemmed = [sno.stem(words) for words in filtered_words]
return (" ".join(tokens)) #remove large sentence with all purified words
def return_processed_texts(self):
articles=np.array(self.completearticles)
processed_articles=[self.preprocess_text(text) for text in articles]
return(processed_articles)
|
[
"jason.chowcs#gmail.com"
] |
jason.chowcs#gmail.com
|
e2874dfd6f8aca2ed3abbd73ad1d33af2537a36c
|
b2d5ab7f1b7d2cebd25a2a4ec4a1f3d834ec442c
|
/1044.py
|
52fdf47bdf58592f204eae61aa2b69745642c29a
|
[] |
no_license
|
henriqueparaguassu/uri-online-judge
|
d3a1663e9f054a6c8f9255606576492f0710f7c1
|
793579a3c31d5577283dcb3c209eb279fa76f673
|
refs/heads/main
| 2023-08-05T12:17:42.099688
| 2021-09-16T00:54:59
| 2021-09-16T00:54:59
| 406,966,727
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 151
|
py
|
n = input()
A = n.split()
x = int(A[0])
y = int(A[1])
if (x % y == 0) or (y % x == 0):
print('Sao Multiplos')
else:
print('Nao sao Multiplos')
|
[
"henriquevoglerparaguassu@gmail.com"
] |
henriquevoglerparaguassu@gmail.com
|
ab6f3a4d3ab74a2ba6c4d84ce083ce4ccbe64a61
|
25c86c9a28308ae3237dacc0d59c204cfc393003
|
/app/maps/migrations/0014_auto_20170611_2126.py
|
1bc96f9c5b37bd7995f1a28364418200cae01811
|
[
"MIT"
] |
permissive
|
bladekp/DroniadaDjangoDronekitAPP
|
ec6002eef27706413340e1c2c4afcd92f094939a
|
2a829ee5f1cc718b501ae315e812433b8ef49293
|
refs/heads/master
| 2021-12-14T17:25:19.893776
| 2021-12-06T16:54:57
| 2021-12-06T16:54:57
| 91,502,139
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 773
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-11 21:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('maps', '0013_auto_20170611_2123'),
]
operations = [
migrations.AlterField(
model_name='beacon',
name='altitude',
field=models.BigIntegerField(default=0.0),
),
migrations.AlterField(
model_name='beacon',
name='latitude',
field=models.BigIntegerField(default=0.0),
),
migrations.AlterField(
model_name='beacon',
name='longitude',
field=models.BigIntegerField(default=0.0),
),
]
|
[
"bladekp@ee.pw.edu.pl"
] |
bladekp@ee.pw.edu.pl
|
4e0d695cc5dadb7408ed67f7b27413687749c8f5
|
f994051a71b2a6fe5ba1357bc9da9b38d5843e26
|
/react_frontend/views.py
|
bbd03c3b92037a5c37c21486493ad11c1f80726e
|
[] |
no_license
|
mohsam97/complaint-management-portal
|
ed6be88668764f24db506c15f827f864e9ea0cba
|
0314a18af297637f6f7107c58d9cce2850a48fc4
|
refs/heads/master
| 2023-07-04T13:33:40.040202
| 2021-08-07T17:44:53
| 2021-08-07T17:44:53
| 393,752,758
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 107
|
py
|
from django.shortcuts import render
def index(request):
return render(request, "frontend/index.html")
|
[
"mohsam97@gmail.com"
] |
mohsam97@gmail.com
|
42e731c4a27671293b4759fbeac2b6d133bfb510
|
85e6d039205878475e05957219aa6fb32b04b086
|
/28-implement-strstr.py
|
34ad216b73d8add1e35e450064f5b8b85f47dec5
|
[
"MIT"
] |
permissive
|
LGX95/leetcode
|
e4e3fd98527a8559b60beb572b1f728b97357c14
|
a25813975beca8e24e8b0c920d6e2ef488c848da
|
refs/heads/master
| 2023-01-28T13:00:16.624801
| 2020-12-07T15:17:58
| 2020-12-07T15:17:58
| 292,799,111
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 571
|
py
|
"""Question: https://leetcode.com/problems/implement-strstr/
"""
from util import print_vars
class Solution:
def strStr(self, haystack: str, needle: str) -> int:
if haystack == "" and needle == "":
return 0
for i in range(len(haystack) - len(needle) + 1):
if haystack[i:i + len(needle)] == needle:
return i
return -1
if __name__ == "__main__":
haystack = "hello"
needle = "ll"
output = Solution().strStr(haystack, needle)
print_vars(haystack, needle, output)
assert output == 2
|
[
"ligengxin95@gmail.com"
] |
ligengxin95@gmail.com
|
c58a2bd5c02ec9417df7aefbfc85f7b4a8906bb1
|
c44a2871e1fc79f91d195acfac4642fc22f9017a
|
/MachineLearning/StockPricePredictor/LinRegLearner.py
|
0e79c3d9a71234cfc2cd427296ccbfa854c65482
|
[] |
no_license
|
mkumble/codestack
|
299155657a6310f70080acc9c9e2755a1e7f6c31
|
a3ef98b8c8563082fa7e618cb1e06db52e02c413
|
refs/heads/master
| 2020-05-18T23:34:15.833632
| 2015-04-22T05:13:20
| 2015-04-22T05:13:20
| 23,770,326
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,426
|
py
|
#!/usr/bin/env python
__author__ = "Mithun Kumble"
import numpy
from scipy.spatial import cKDTree
import math,random,sys,bisect,time
import numpy,scipy.spatial.distance
import cProfile,pstats
import sys
from CommonUtils import calculateRMSError
from DataHandler import getflatcsv
class LinRegLearner:
def __init__(self):
"""
Initialize the variables
"""
self.Xtrain = None
self.Ytrain = None
self.coeff = None
self.res = None
def addEvidence(self,Xtrain,Ytrain=None):
"""
Trains the Linear Regression Learner from the XTrain values
"""
self.Xtrain = Xtrain
self.Ytrain = Ytrain
xTrainIdentityMatrix = numpy.hstack([self.Xtrain, numpy.ones((len(self.Xtrain), 1))])
self.coeff = numpy.zeros(2)
self.coeff[0] = numpy.linalg.lstsq(xTrainIdentityMatrix, Ytrain)[0][0]
self.coeff[1] = numpy.linalg.lstsq(xTrainIdentityMatrix, Ytrain)[0][1]
self.res = numpy.linalg.lstsq(xTrainIdentityMatrix, Ytrain)[0][2]
def query(self,XTest):
"""
Retrieves the predicted Y values based on the input XTest values
"""
Y = numpy.dot(XTest, self.coeff) + self.res
return Y
def testLinRegLearner(fname):
"""
The function testLinRegLearner does the following things:
i) Creates a Linear Regression Learner
ii) Trains the learner using about 60% of the data
iii Tests the learner using 40% of the data - Calculates the Root Mean Square Error, Correlation Coefficient for the predicted values.
"""
learner = LinRegLearner()
data = getflatcsv(fname)
xTrainData = data[0:0.6*len(data),0:2]
yTrainData = data[0:0.6*len(data),2:3]
xTest = data[0.6*len(data):len(data),0:2]
learner.addEvidence(xTrainData,yTrainData)
yResult = learner.query(xTest)
yActual = data[0.6*len(data):len(data),2]
rmse = calculateRMSError(yResult,yActual)
corrCoeff= numpy.corrcoef(yResult, yActual)[0,1]
return rmse,corrCoeff,yActual,yResult
fname = "data-classification-prob.csv"
rmse,corrCoeff,yActual,yResult = testLinRegLearner(fname)
print "\n\tLearner: Linear Regression Learner"
print "\t\tFile Name: "+fname
print "\t\tRMS Error = "+str(rmse)
print "\t\tCorrelation Coefficient = "+str(corrCoeff)+"\n\n"
fname = "data-ripple-prob.csv"
rmse,corrCoeff,yActual,yResult = testLinRegLearner(fname)
print "\n\tLearner: Linear Regression Learner"
print "\t\tFile Name: "+fname
print "\t\tRMS Error = "+str(rmse)
print "\t\tCorrelation Coefficient = "+str(corrCoeff)+"\n\n"
|
[
"mkumble@gmail.com"
] |
mkumble@gmail.com
|
a4198b01c54441278045f5049ab837b159422e89
|
f8ff804676002a23ec59b470a41c784a2ad9d1b8
|
/z3_toolbox.py
|
20d209942d1bd0943c29625656aa722082d7ac7c
|
[] |
no_license
|
NickF0211/2542project-pre-study
|
ebce207987d2a405dd14cb388b6d21ffc030a4ab
|
29d80d0ab2f79dda92c8c1e709bdb13f7fc5e3e3
|
refs/heads/master
| 2023-04-24T08:26:27.578385
| 2021-05-09T00:14:29
| 2021-05-09T00:14:29
| 342,433,404
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 240
|
py
|
from z3 import *
def atMostOne(candidates):
if candidates == []:
return True
else:
head = candidates[0]
rst = candidates[1:]
return And(Implies(head, Not(Or(rst))), Implies(Not(head), atMostOne(rst)))
|
[
"lkbjhgfd@hotmail.com"
] |
lkbjhgfd@hotmail.com
|
1dfed182f707b1230da4ea92963aa1a4bc0ae4ce
|
a1dee1218ca6d948e6c9ba5f5bc299357fa17cf3
|
/tests/utils-tests.py
|
9da2d628f93fc7eb9ecaabb7bd718234ae31dca7
|
[
"MIT"
] |
permissive
|
koaning/wallholla
|
27d3a7cf310ec093f0b37db9d3bf9e025f44ee09
|
6641928dab90b19f02de9a8bfecac88d773e35ae
|
refs/heads/master
| 2021-03-29T02:05:19.133413
| 2018-06-01T12:15:58
| 2018-06-01T12:15:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,597
|
py
|
import unittest
from utils import make_pretrained_filenames
class MakeFileNameTestCase(unittest.TestCase):
def test_basic_usage1(self):
res = make_pretrained_filenames("catdog", "random", "mobilenet", 200, (100, 100))
outcome = [
'catdog-mobilenet-random-200-100x100-train-data.npy',
'catdog-mobilenet-random-200-100x100-train-label.npy',
'catdog-mobilenet-random-200-100x100-valid-data.npy',
'catdog-mobilenet-random-200-100x100-valid-label.npy']
for i in range(4):
self.assertEqual(res[i], outcome[i])
def test_basic_usage2(self):
res = make_pretrained_filenames("dogcatz", "random", "foofoo", 200, (100, 100))
outcome = [
'dogcatz-foofoo-random-200-100x100-train-data.npy',
'dogcatz-foofoo-random-200-100x100-train-label.npy',
'dogcatz-foofoo-random-200-100x100-valid-data.npy',
'dogcatz-foofoo-random-200-100x100-valid-label.npy']
for i in range(4):
self.assertEqual(res[i], outcome[i])
def test_basic_usage3(self):
res = make_pretrained_filenames("catdog", "random", "mobilenet", 20000, (5, 5))
outcome = [
'catdog-mobilenet-random-20000-5x5-train-data.npy',
'catdog-mobilenet-random-20000-5x5-train-label.npy',
'catdog-mobilenet-random-20000-5x5-valid-data.npy',
'catdog-mobilenet-random-20000-5x5-valid-label.npy']
for i in range(4):
self.assertEqual(res[i], outcome[i])
if __name__ == '__main__':
unittest.main()
|
[
"vincentwarmerdam@Vincents-MacBook-Pro.local"
] |
vincentwarmerdam@Vincents-MacBook-Pro.local
|
c66c0543322bb8f8a5580de0e145f2b559ec397a
|
9bbb7685f7a85f505784543694cb94431326c83b
|
/tests/test_install.py
|
2901da1126623c5f614a826cb5bad1443040b8dd
|
[
"Apache-2.0"
] |
permissive
|
hunter-packages/fruit
|
88248cb71b7fedc455ebdd7ac3624dfd8f030331
|
71d9ada48f7bf1749ce2889250955404582a7c6b
|
refs/heads/hunter-3.1.1
| 2020-03-27T13:03:06.281313
| 2018-08-29T11:29:11
| 2018-08-29T11:29:11
| 146,586,928
| 1
| 2
|
Apache-2.0
| 2018-08-29T11:29:12
| 2018-08-29T10:58:43
|
C++
|
UTF-8
|
Python
| false
| false
| 33,914
|
py
|
#!/usr/bin/env python3
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fruit_test_common import *
COMMON_DEFINITIONS = '''
#include "test_common.h"
struct X;
struct Annotation1 {};
using XAnnot1 = fruit::Annotated<Annotation1, X>;
'''
@pytest.mark.parametrize('XParamInChildComponent,XParamInRootComponent', [
('X', 'X'),
('X', 'const X'),
('fruit::Annotated<Annotation1, X>', 'fruit::Annotated<Annotation1, X>'),
('fruit::Annotated<Annotation1, X>', 'fruit::Annotated<Annotation1, const X>'),
])
def test_success(XParamInChildComponent, XParamInRootComponent):
source = '''
struct X {
int n;
X(int n) : n(n) {}
};
fruit::Component<XParamInChildComponent> getChildComponent() {
return fruit::createComponent()
.registerProvider<XParamInChildComponent()>([]() { return X(5); });
}
fruit::Component<XParamInRootComponent> getRootComponent() {
return fruit::createComponent()
.install(getChildComponent);
}
int main() {
fruit::Injector<XParamInRootComponent> injector(getRootComponent);
X x = injector.get<XParamInRootComponent>();
Assert(x.n == 5);
}
'''
expect_success(
COMMON_DEFINITIONS,
source,
locals())
@pytest.mark.parametrize('XParamInChildComponent,XParamInRootComponent', [
('const X', 'X'),
('fruit::Annotated<Annotation1, const X>', 'fruit::Annotated<Annotation1, X>'),
])
def test_install_error_child_component_provides_const(XParamInChildComponent, XParamInRootComponent):
source = '''
struct X {};
fruit::Component<XParamInChildComponent> getChildComponent();
fruit::Component<XParamInRootComponent> getRootComponent() {
return fruit::createComponent()
.install(getChildComponent);
}
'''
expect_compile_error(
'NonConstBindingRequiredButConstBindingProvidedError<XParamInRootComponent>',
'The type T was provided as constant, however one of the constructors/providers/factories in this component',
COMMON_DEFINITIONS,
source,
locals())
@pytest.mark.parametrize('ProvidedXParam,RequiredXParam', [
('X', 'X'),
('X', 'const X'),
('const X', 'const X'),
('fruit::Annotated<Annotation1, X>', 'fruit::Annotated<Annotation1, X>'),
('fruit::Annotated<Annotation1, X>', 'fruit::Annotated<Annotation1, const X>'),
('fruit::Annotated<Annotation1, const X>', 'fruit::Annotated<Annotation1, const X>'),
])
def test_with_requirements_success(ProvidedXParam, RequiredXParam):
ProvidedXParamWithoutConst = ProvidedXParam.replace('const ', '')
source = '''
struct X {
int n;
X(int n) : n(n) {}
};
struct Y {
X x;
Y(X x): x(x) {}
};
fruit::Component<fruit::Required<RequiredXParam>, Y> getChildComponent1() {
return fruit::createComponent()
.registerProvider<Y(RequiredXParam)>([](X x) { return Y(x); });
}
fruit::Component<ProvidedXParam> getChildComponent2() {
return fruit::createComponent()
.registerProvider<ProvidedXParamWithoutConst()>([]() { return X(5); });
}
fruit::Component<Y> getRootComponent() {
return fruit::createComponent()
.install(getChildComponent1)
.install(getChildComponent2);
}
int main() {
fruit::Injector<Y> injector(getRootComponent);
Y y = injector.get<Y>();
Assert(y.x.n == 5);
}
'''
expect_success(
COMMON_DEFINITIONS,
source,
locals())
@pytest.mark.parametrize('ProvidedXParam,RequiredXParam', [
('const X', 'X'),
('fruit::Annotated<Annotation1, const X>', 'fruit::Annotated<Annotation1, X>'),
])
def test_with_requirements_error_only_nonconst_provided(ProvidedXParam, RequiredXParam):
source = '''
struct X {};
struct Y {};
fruit::Component<fruit::Required<RequiredXParam>, Y> getChildComponent1();
fruit::Component<ProvidedXParam> getChildComponent2();
fruit::Component<Y> getRootComponent() {
return fruit::createComponent()
.install(getChildComponent1)
.install(getChildComponent2);
}
'''
expect_compile_error(
'NonConstBindingRequiredButConstBindingProvidedError<RequiredXParam>',
'The type T was provided as constant, however one of the constructors/providers/factories in this component',
COMMON_DEFINITIONS,
source,
locals())
@pytest.mark.parametrize('ProvidedXParam,RequiredXParam', [
('const X', 'X'),
('fruit::Annotated<Annotation1, const X>', 'fruit::Annotated<Annotation1, X>'),
])
def test_with_requirements_error_only_nonconst_provided_reversed_install_order(ProvidedXParam, RequiredXParam):
source = '''
struct X {};
struct Y {};
fruit::Component<fruit::Required<RequiredXParam>, Y> getChildComponent1();
fruit::Component<ProvidedXParam> getChildComponent2();
fruit::Component<Y> getRootComponent() {
return fruit::createComponent()
.install(getChildComponent2)
.install(getChildComponent1);
}
'''
expect_compile_error(
'NonConstBindingRequiredButConstBindingProvidedError<RequiredXParam>',
'The type T was provided as constant, however one of the constructors/providers/factories in this component',
COMMON_DEFINITIONS,
source,
locals())
def test_with_requirements_not_specified_in_child_component_error():
source = '''
struct X {
int n;
X(int n) : n(n) {}
};
struct Y {
X x;
Y(X x): x(x) {}
};
fruit::Component<fruit::Required<X>, Y> getParentYComponent() {
return fruit::createComponent()
.registerProvider([](X x) { return Y(x); });
}
// We intentionally don't have fruit::Required<X> here, we want to test that this results in an error.
fruit::Component<Y> getYComponent() {
return fruit::createComponent()
.install(getParentYComponent);
}
'''
expect_compile_error(
'NoBindingFoundError<X>',
'No explicit binding nor C::Inject definition was found for T',
COMMON_DEFINITIONS,
source)
@pytest.mark.parametrize('XAnnot,ConstXAnnot', [
('X', 'const X'),
('fruit::Annotated<Annotation1, X>', 'fruit::Annotated<Annotation1, const X>'),
])
def test_install_requiring_nonconst_then_install_requiring_const_ok(XAnnot, ConstXAnnot):
source = '''
struct X {};
struct Y {};
struct Z {};
fruit::Component<fruit::Required<XAnnot>, Y> getChildComponent1() {
return fruit::createComponent()
.registerConstructor<Y()>();
}
fruit::Component<fruit::Required<ConstXAnnot>, Z> getChildComponent2() {
return fruit::createComponent()
.registerConstructor<Z()>();
}
fruit::Component<Y, Z> getRootComponent() {
return fruit::createComponent()
.install(getChildComponent1)
.install(getChildComponent2)
.registerConstructor<XAnnot()>();
}
int main() {
fruit::Injector<Y, Z> injector(getRootComponent);
injector.get<Y>();
injector.get<Z>();
}
'''
expect_success(
COMMON_DEFINITIONS,
source,
locals())
def test_install_requiring_nonconst_then_install_requiring_const_declaring_const_requirement_error():
source = '''
struct X {};
struct Y {};
struct Z {};
fruit::Component<fruit::Required<X>, Y> getChildComponent1();
fruit::Component<fruit::Required<const X>, Z> getChildComponent2();
fruit::Component<fruit::Required<const X>, Y, Z> getRootComponent() {
return fruit::createComponent()
.install(getChildComponent1)
.install(getChildComponent2);
}
'''
expect_compile_error(
'ConstBindingDeclaredAsRequiredButNonConstBindingRequiredError<X>',
'The type T was declared as a const Required type in the returned Component, however',
COMMON_DEFINITIONS,
source,
locals())
def test_install_requiring_const_then_install_requiring_nonconst_ok():
source = '''
struct X {};
struct Y {};
struct Z {};
fruit::Component<fruit::Required<const X>, Y> getChildComponent1() {
return fruit::createComponent()
.registerConstructor<Y()>();
}
fruit::Component<fruit::Required<X>, Z> getChildComponent2() {
return fruit::createComponent()
.registerConstructor<Z()>();
}
fruit::Component<Y, Z> getRootComponent() {
return fruit::createComponent()
.install(getChildComponent1)
.install(getChildComponent2)
.registerConstructor<X()>();
}
int main() {
fruit::Injector<Y, Z> injector(getRootComponent);
injector.get<Y>();
injector.get<Z>();
}
'''
expect_success(
COMMON_DEFINITIONS,
source,
locals())
def test_install_requiring_const_then_install_requiring_nonconst_declaring_const_requirement_error():
source = '''
struct X {};
struct Y {};
struct Z {};
fruit::Component<fruit::Required<const X>, Y> getChildComponent1();
fruit::Component<fruit::Required<X>, Z> getChildComponent2();
fruit::Component<fruit::Required<const X>, Y, Z> getRootComponent() {
return fruit::createComponent()
.install(getChildComponent1)
.install(getChildComponent2);
}
'''
expect_compile_error(
'ConstBindingDeclaredAsRequiredButNonConstBindingRequiredError<X>',
'The type T was declared as a const Required type in the returned Component, however',
COMMON_DEFINITIONS,
source,
locals())
def test_install_with_args_success():
source = '''
struct X {
int n;
X(int n) : n(n) {}
};
struct Arg {
Arg(int) {}
Arg() = default;
Arg(const Arg&) = default;
Arg(Arg&&) = default;
Arg& operator=(const Arg&) = default;
Arg& operator=(Arg&&) = default;
};
bool operator==(const Arg&, const Arg&) {
return true;
}
namespace std {
template <>
struct hash<Arg> {
size_t operator()(const Arg&) {
return 0;
}
};
}
fruit::Component<X> getParentComponent(int, std::string, Arg, Arg) {
return fruit::createComponent()
.registerProvider([]() { return X(5); });
}
fruit::Component<X> getComponent() {
return fruit::createComponent()
.install(getParentComponent, 5, std::string("Hello"), Arg{}, 15);
}
int main() {
fruit::Injector<X> injector(getComponent);
X x = injector.get<X>();
Assert(x.n == 5);
}
'''
expect_success(COMMON_DEFINITIONS, source)
def test_install_with_args_error_not_move_constructible():
source = '''
struct Arg {
Arg() = default;
Arg(const Arg&) = default;
Arg(Arg&&) = delete;
Arg& operator=(const Arg&) = default;
Arg& operator=(Arg&&) = default;
};
bool operator==(const Arg&, const Arg&);
namespace std {
template <>
struct hash<Arg> {
size_t operator()(const Arg&);
};
}
fruit::Component<X> getParentComponent(int, std::string, Arg);
fruit::Component<X> getComponent() {
return fruit::createComponent()
.install(getParentComponent, 5, std::string("Hello"), Arg{});
}
'''
expect_generic_compile_error(
'error: use of deleted function .Arg::Arg\(Arg&&\).'
+ '|error: call to deleted constructor of .Arg.'
+ '|.Arg::Arg\(Arg &&\).: cannot convert argument 1 from .std::_Tuple_val<_This>. to .const Arg &.',
COMMON_DEFINITIONS,
source)
def test_install_with_args_error_not_move_constructible_with_conversion():
source = '''
struct Arg {
Arg(int) {}
Arg() = default;
Arg(const Arg&) = default;
Arg(Arg&&) = delete;
Arg& operator=(const Arg&) = default;
Arg& operator=(Arg&&) = default;
};
bool operator==(const Arg&, const Arg&);
namespace std {
template <>
struct hash<Arg> {
size_t operator()(const Arg&);
};
}
fruit::Component<X> getParentComponent(int, std::string, Arg);
fruit::Component<X> getComponent() {
return fruit::createComponent()
.install(getParentComponent, 5, std::string("Hello"), 15);
}
'''
expect_generic_compile_error(
'error: use of deleted function .Arg::Arg\(Arg&&\).'
+ '|error: call to deleted constructor of .Arg.'
+ '|.Arg::Arg\(Arg &&\).: cannot convert argument 1 from .std::_Tuple_val<_This>. to .int.',
COMMON_DEFINITIONS,
source)
def test_install_with_args_error_not_copy_constructible():
source = '''
struct X {
int n;
X(int n) : n(n) {}
};
struct Arg {
Arg() = default;
Arg(const Arg&) = delete;
Arg(Arg&&) = default;
Arg& operator=(const Arg&) = default;
Arg& operator=(Arg&&) = default;
};
bool operator==(const Arg&, const Arg&);
namespace std {
template <>
struct hash<Arg> {
size_t operator()(const Arg&);
};
}
fruit::Component<X> getParentComponent(int, std::string, Arg);
fruit::Component<X> getComponent() {
return fruit::createComponent()
.install(getParentComponent, 5, std::string("Hello"), Arg{});
}
'''
expect_generic_compile_error(
'error: use of deleted function .Arg::Arg\(const Arg&\).'
+ '|error: call to deleted constructor of .Arg.'
+ '|error C2280: .Arg::Arg\(const Arg &\).: attempting to reference a deleted function',
COMMON_DEFINITIONS,
source)
def test_install_with_args_error_not_copy_constructible_with_conversion():
source = '''
struct X {
int n;
X(int n) : n(n) {}
};
struct Arg {
Arg(int) {}
Arg() = default;
Arg(const Arg&) = delete;
Arg(Arg&&) = default;
Arg& operator=(const Arg&) = default;
Arg& operator=(Arg&&) = default;
};
bool operator==(const Arg&, const Arg&);
namespace std {
template <>
struct hash<Arg> {
size_t operator()(const Arg&);
};
}
fruit::Component<X> getParentComponent(int, std::string, Arg);
fruit::Component<X> getComponent() {
return fruit::createComponent()
.install(getParentComponent, 5, std::string("Hello"), 15);
}
'''
expect_generic_compile_error(
'error: use of deleted function .Arg::Arg\(const Arg&\).'
+ '|error: call to deleted constructor of .Arg.'
+ '|error C2280: .Arg::Arg\(const Arg &\).: attempting to reference a deleted function',
COMMON_DEFINITIONS,
source)
def test_install_with_args_error_not_move_assignable():
source = '''
struct Arg {
Arg() = default;
Arg(const Arg&) = default;
Arg(Arg&&) = default;
Arg& operator=(const Arg&) = default;
Arg& operator=(Arg&&) = delete;
};
bool operator==(const Arg&, const Arg&);
namespace std {
template <>
struct hash<Arg> {
size_t operator()(const Arg&);
};
}
fruit::Component<X> getParentComponent(int, std::string, Arg);
fruit::Component<X> getComponent() {
return fruit::createComponent()
.install(getParentComponent, 5, std::string("Hello"), Arg{});
}
'''
expect_generic_compile_error(
'error: use of deleted function .Arg& Arg::operator=\(Arg&&\).'
+ '|error: overload resolution selected deleted operator .=.'
+ '|error C2280: .Arg &Arg::operator =\(Arg &&\).: attempting to reference a deleted function',
COMMON_DEFINITIONS,
source)
def test_install_with_args_error_not_move_assignable_with_conversion():
source = '''
struct Arg {
Arg(int) {}
Arg() = default;
Arg(const Arg&) = default;
Arg(Arg&&) = default;
Arg& operator=(const Arg&) = default;
Arg& operator=(Arg&&) = delete;
};
bool operator==(const Arg&, const Arg&);
namespace std {
template <>
struct hash<Arg> {
size_t operator()(const Arg&);
};
}
fruit::Component<X> getParentComponent(int, std::string, Arg);
fruit::Component<X> getComponent() {
return fruit::createComponent()
.install(getParentComponent, 5, std::string("Hello"), 15);
}
'''
expect_generic_compile_error(
'error: use of deleted function .Arg& Arg::operator=\(Arg&&\).'
+ '|error: overload resolution selected deleted operator .=.'
+ '|error C2280: .Arg &Arg::operator =\(Arg &&\).: attempting to reference a deleted function',
COMMON_DEFINITIONS,
source)
def test_install_with_args_error_not_copy_assignable():
source = '''
struct X {
int n;
X(int n) : n(n) {}
};
struct Arg {
Arg() = default;
Arg(const Arg&) = default;
Arg(Arg&&) = default;
Arg& operator=(const Arg&) = delete;
Arg& operator=(Arg&&) = default;
};
bool operator==(const Arg&, const Arg&);
namespace std {
template <>
struct hash<Arg> {
size_t operator()(const Arg&);
};
}
fruit::Component<X> getParentComponent(int, std::string, Arg);
fruit::Component<X> getComponent() {
return fruit::createComponent()
.install(getParentComponent, 5, std::string("Hello"), Arg{});
}
'''
expect_generic_compile_error(
'error: use of deleted function .Arg& Arg::operator=\(const Arg&\).'
+ '|error: overload resolution selected deleted operator .=.'
+ '|error C2280: .Arg &Arg::operator =\(const Arg &\).: attempting to reference a deleted function',
COMMON_DEFINITIONS,
source)
def test_install_with_args_error_not_copy_assignable_with_conversion():
source = '''
struct X {
int n;
X(int n) : n(n) {}
};
struct Arg {
Arg(int) {}
Arg() = default;
Arg(const Arg&) = default;
Arg(Arg&&) = default;
Arg& operator=(const Arg&) = delete;
Arg& operator=(Arg&&) = default;
};
bool operator==(const Arg&, const Arg&);
namespace std {
template <>
struct hash<Arg> {
size_t operator()(const Arg&);
};
}
fruit::Component<X> getParentComponent(int, std::string, Arg);
fruit::Component<X> getComponent() {
return fruit::createComponent()
.install(getParentComponent, 5, std::string("Hello"), 15);
}
'''
expect_generic_compile_error(
'error: use of deleted function .Arg& Arg::operator=\(const Arg&\).'
+ '|error: overload resolution selected deleted operator .=.'
+ '|error C2280: .Arg &Arg::operator =\(const Arg &\).: attempting to reference a deleted function',
COMMON_DEFINITIONS,
source)
def test_install_with_args_error_not_equality_comparable():
source = '''
struct X {
int n;
X(int n) : n(n) {}
};
struct Arg {
Arg() = default;
Arg(const Arg&) = default;
Arg(Arg&&) = default;
Arg& operator=(const Arg&) = default;
Arg& operator=(Arg&&) = default;
};
namespace std {
template <>
struct hash<Arg> {
size_t operator()(const Arg&);
};
}
fruit::Component<X> getParentComponent(int, std::string, Arg);
fruit::Component<X> getComponent() {
return fruit::createComponent()
.install(getParentComponent, 5, std::string("Hello"), Arg{});
}
'''
expect_generic_compile_error(
'error: no match for .operator==. \(operand types are .const Arg. and .const Arg.\)'
+ '|error: invalid operands to binary expression \(.const Arg. and .const Arg.\)'
+ '|error C2676: binary .==.: .const Arg. does not define this operator',
COMMON_DEFINITIONS,
source)
def test_install_with_args_error_not_equality_comparable_with_conversion():
source = '''
struct X {
int n;
X(int n) : n(n) {}
};
struct Arg {
Arg(int) {}
Arg() = default;
Arg(const Arg&) = default;
Arg(Arg&&) = default;
Arg& operator=(const Arg&) = default;
Arg& operator=(Arg&&) = default;
};
namespace std {
template <>
struct hash<Arg> {
size_t operator()(const Arg&);
};
}
fruit::Component<X> getParentComponent(int, std::string, Arg);
fruit::Component<X> getComponent() {
return fruit::createComponent()
.install(getParentComponent, 5, std::string("Hello"), 15);
}
'''
expect_generic_compile_error(
'error: no match for .operator==. \(operand types are .const Arg. and .const Arg.\)'
+ '|error: invalid operands to binary expression \(.const Arg. and .const Arg.\)'
+ '|error C2676: binary .==.: .const Arg. does not define this operator',
COMMON_DEFINITIONS,
source)
def test_install_with_args_error_not_hashable():
source = '''
struct Arg {
Arg() = default;
Arg(const Arg&) = default;
Arg(Arg&&) = default;
Arg& operator=(const Arg&) = default;
Arg& operator=(Arg&&) = default;
};
bool operator==(const Arg&, const Arg&);
fruit::Component<X> getParentComponent(int, std::string, Arg);
fruit::Component<X> getComponent() {
return fruit::createComponent()
.install(getParentComponent, 5, std::string("Hello"), Arg{});
}
'''
expect_generic_compile_error(
'error: use of deleted function .std::hash<Arg>::hash\(\).'
+ '|error: call to implicitly-deleted default constructor of .std::hash<Arg>.'
+ '|error: invalid use of incomplete type .struct std::hash<Arg>.'
+ '|error: implicit instantiation of undefined template .std::(__1::)?hash<Arg>.'
+ '|error C2338: The C\+\+ Standard doesn.t provide a hash for this type.'
+ '|error C2064: term does not evaluate to a function taking 1 arguments',
COMMON_DEFINITIONS,
source)
def test_install_with_args_error_not_hashable_with_conversion():
source = '''
struct Arg {
Arg(int) {}
Arg() = default;
Arg(const Arg&) = default;
Arg(Arg&&) = default;
Arg& operator=(const Arg&) = default;
Arg& operator=(Arg&&) = default;
};
bool operator==(const Arg&, const Arg&);
fruit::Component<X> getParentComponent(int, std::string, Arg);
fruit::Component<X> getComponent() {
return fruit::createComponent()
.install(getParentComponent, 5, std::string("Hello"), 15);
}
'''
expect_generic_compile_error(
'error: use of deleted function .std::hash<Arg>::hash\(\).'
+ '|error: call to implicitly-deleted default constructor of .std::hash<Arg>.'
+ '|error: invalid use of incomplete type .struct std::hash<Arg>.'
+ '|error: implicit instantiation of undefined template .std::(__1::)?hash<Arg>.'
+ '|error C2338: The C\+\+ Standard doesn.t provide a hash for this type.'
+ '|error C2064: term does not evaluate to a function taking 1 arguments',
COMMON_DEFINITIONS,
source)
@pytest.mark.parametrize('XAnnot', [
'X',
'fruit::Annotated<Annotation1, X>',
])
def test_install_component_functions_deduped(XAnnot):
source = '''
struct X {};
X x;
fruit::Component<> getComponent() {
return fruit::createComponent()
.addInstanceMultibinding<XAnnot, X>(x);
}
fruit::Component<> getComponent2() {
return fruit::createComponent()
.install(getComponent);
}
fruit::Component<> getComponent3() {
return fruit::createComponent()
.install(getComponent);
}
fruit::Component<> getComponent4() {
return fruit::createComponent()
.install(getComponent2)
.install(getComponent3);
}
int main() {
fruit::Injector<> injector(getComponent4);
// We test multibindings because the effect on other bindings is not user-visible (that only affects
// performance).
std::vector<X*> multibindings = injector.getMultibindings<XAnnot>();
Assert(multibindings.size() == 1);
Assert(multibindings[0] == &x);
}
'''
expect_success(
COMMON_DEFINITIONS,
source,
locals())
@pytest.mark.parametrize('XAnnot', [
'X',
'fruit::Annotated<Annotation1, X>',
])
def test_install_component_functions_deduped_across_normalized_component(XAnnot):
source = '''
struct X {};
X x;
fruit::Component<> getComponent() {
return fruit::createComponent()
.addInstanceMultibinding<XAnnot, X>(x);
}
fruit::Component<> getComponent2() {
return fruit::createComponent()
.install(getComponent);
}
fruit::Component<> getComponent3() {
return fruit::createComponent()
.install(getComponent);
}
int main() {
fruit::NormalizedComponent<> normalizedComponent(getComponent2);
fruit::Injector<> injector(normalizedComponent, getComponent3);
// We test multibindings because the effect on other bindings is not user-visible (that only affects
// performance).
std::vector<X*> multibindings = injector.getMultibindings<XAnnot>();
Assert(multibindings.size() == 1);
Assert(multibindings[0] == &x);
}
'''
expect_success(
COMMON_DEFINITIONS,
source,
locals())
@pytest.mark.parametrize('XAnnot', [
'X',
'fruit::Annotated<Annotation1, X>',
])
def test_install_component_functions_with_args_deduped(XAnnot):
source = '''
struct X {};
X x;
fruit::Component<> getComponent(int) {
return fruit::createComponent()
.addInstanceMultibinding<XAnnot, X>(x);
}
fruit::Component<> getComponent2() {
return fruit::createComponent()
.install(getComponent, 1);
}
fruit::Component<> getComponent3() {
return fruit::createComponent()
.install(getComponent, 1);
}
fruit::Component<> getComponent4() {
return fruit::createComponent()
.install(getComponent2)
.install(getComponent3);
}
int main() {
fruit::Injector<> injector(getComponent4);
// We test multibindings because the effect on other bindings is not user-visible (that only affects
// performance).
std::vector<X*> multibindings = injector.getMultibindings<XAnnot>();
Assert(multibindings.size() == 1);
Assert(multibindings[0] == &x);
}
'''
expect_success(
COMMON_DEFINITIONS,
source,
locals())
@pytest.mark.parametrize('XAnnot', [
'X',
'fruit::Annotated<Annotation1, X>',
])
def test_install_component_functions_different_args_not_deduped(XAnnot):
source = '''
struct X {};
X x;
fruit::Component<> getComponent(int) {
return fruit::createComponent()
.addInstanceMultibinding<XAnnot, X>(x);
}
fruit::Component<> getComponent2() {
return fruit::createComponent()
.install(getComponent, 1);
}
fruit::Component<> getComponent3() {
return fruit::createComponent()
.install(getComponent, 2);
}
fruit::Component<> getComponent4() {
return fruit::createComponent()
.install(getComponent2)
.install(getComponent3);
}
int main() {
fruit::Injector<> injector(getComponent4);
// We test multibindings because the effect on other bindings is not user-visible (it only affects
// performance).
std::vector<X*> multibindings = injector.getMultibindings<XAnnot>();
Assert(multibindings.size() == 2);
Assert(multibindings[0] == &x);
Assert(multibindings[1] == &x);
}
'''
expect_success(
COMMON_DEFINITIONS,
source,
locals())
def test_install_component_functions_loop():
source = '''
struct X {};
struct Y {};
struct Z {};
// X -> Y -> Z -> Y
fruit::Component<X> getXComponent();
fruit::Component<Y> getYComponent();
fruit::Component<Z> getZComponent();
fruit::Component<X> getXComponent() {
return fruit::createComponent()
.registerConstructor<X()>()
.install(getYComponent);
}
fruit::Component<Y> getYComponent() {
return fruit::createComponent()
.registerConstructor<Y()>()
.install(getZComponent);
}
fruit::Component<Z> getZComponent() {
return fruit::createComponent()
.registerConstructor<Z()>()
.install(getYComponent);
}
int main() {
fruit::Injector<X> injector(getXComponent);
(void)injector;
}
'''
expect_runtime_error(
'Component installation trace \(from top-level to the most deeply-nested\):\n'
+ '(class )?fruit::Component<(struct )?X> ?\((__cdecl)?\*\)\((void)?\)\n'
+ '<-- The loop starts here\n'
+ '(class )?fruit::Component<(struct )?Y> ?\((__cdecl)?\*\)\((void)?\)\n'
+ '(class )?fruit::Component<(struct )?Z> ?\((__cdecl)?\*\)\((void)?\)\n'
+ '(class )?fruit::Component<(struct )?Y> ?\((__cdecl)?\*\)\((void)?\)\n',
COMMON_DEFINITIONS,
source,
locals())
def test_install_component_functions_different_arguments_loop_not_reported():
source = '''
struct X {};
struct Y {};
struct Z {};
// X -> Y(1) -> Z -> Y(2)
fruit::Component<X> getXComponent();
fruit::Component<Y> getYComponent(int);
fruit::Component<Z> getZComponent();
fruit::Component<X> getXComponent() {
return fruit::createComponent()
.registerConstructor<X()>()
.install(getYComponent, 1);
}
fruit::Component<Y> getYComponent(int n) {
if (n == 1) {
return fruit::createComponent()
.registerConstructor<Y()>()
.install(getZComponent);
} else {
return fruit::createComponent()
.registerConstructor<Y()>();
}
}
fruit::Component<Z> getZComponent() {
return fruit::createComponent()
.registerConstructor<Z()>()
.install(getYComponent, 2);
}
int main() {
fruit::Injector<X> injector(getXComponent);
injector.get<X>();
}
'''
expect_success(
COMMON_DEFINITIONS,
source,
locals())
if __name__== '__main__':
main(__file__)
|
[
"poletti.marco@gmail.com"
] |
poletti.marco@gmail.com
|
e82872e0749461c4e0d26b538971dd6af10c902e
|
6ec710d4577e1e06b3c57fd49ac88e9bce1b82ed
|
/venv/bin/pygubu-designer
|
13d69bca40150a647a9232994f85fdecdc5e4aff
|
[] |
no_license
|
TandyTnd/electricidad
|
a35cbf1b0618e763d1355c8adfdf7654f44f5420
|
7344ef1e2ab2c91f8c3149f4e124ea884d290ac6
|
refs/heads/master
| 2023-04-07T16:27:22.877466
| 2020-01-10T20:42:38
| 2020-01-10T20:42:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 280
|
#!/home/tndrdesk/PycharmProjects/electricidad/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pygubudesigner.main import start_pygubu
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(start_pygubu())
|
[
"A01654827@itesm.mx"
] |
A01654827@itesm.mx
|
|
da57267020f99209add791345266dff1d04c191c
|
39b3c5b822244c970be17da4bba52e394bc3f258
|
/TIDALDL-PY/tidal_dl/download.py
|
0b5ad83ea871d1f2b51d7231654eb4568654169b
|
[
"Apache-2.0"
] |
permissive
|
ModProg/Tidal-Media-Downloader
|
897c6ec725818050c43e70b38143847afd174c48
|
50b08a3f68426d765a943049fc921ef178083929
|
refs/heads/master
| 2022-11-19T17:01:32.866966
| 2020-07-03T11:32:57
| 2020-07-03T11:32:57
| 276,871,019
| 0
| 0
|
Apache-2.0
| 2020-07-03T10:18:30
| 2020-07-03T10:18:29
| null |
UTF-8
|
Python
| false
| false
| 32,358
|
py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : download.py
@Time : 2019/02/27
@Author : Yaron Huang
@Version : 1.0
@Contact : yaronhuang@qq.com
@Desc : Download Function
'''
import sys
import os
import codecs
from datetime import datetime
from aigpy import pathHelper
# from tidal_dl import netHelper
from aigpy import netHelper
from aigpy import fileHelper
from aigpy import cmdHelper
from aigpy import systemHelper
# from tidal_dl.ffmpegHelper import FFmpegTool
from aigpy.ffmpegHelper import FFmpegTool
from aigpy.cmdHelper import myinput, myinputInt
from aigpy.threadHelper import ThreadTool
from aigpy.progressHelper import ProgressTool
from tidal_dl.check import CheckTool
from tidal_dl.tidal import TidalTool
from tidal_dl.tidal import TidalConfig
from tidal_dl.tidal import TidalAccount
from tidal_dl.decryption import decrypt_security_token
from tidal_dl.decryption import decrypt_file
from tidal_dl.printhelper import printChoice, printErr, printSUCCESS, printWarning, printInfo
class Download(object):
def __init__(self, threadNum=3):
self.config = TidalConfig()
self.tool = TidalTool()
self.thread = ThreadTool(int(threadNum))
self.ffmpeg = FFmpegTool(mergerTimeout=45)
self.progress = ProgressTool(100)
self.check = CheckTool()
self.showpro = False
if self.config.showprogress == 'True':
self.showpro = True
pathHelper.mkdirs(self.config.outputdir + "/Album/")
pathHelper.mkdirs(self.config.outputdir + "/Playlist/")
pathHelper.mkdirs(self.config.outputdir + "/Video/")
pathHelper.mkdirs(self.config.outputdir + "/Favorite/")
def __isNeedDownload(self, path, url):
curSize = fileHelper.getFileSize(path)
if curSize <= 0:
return True
netSize = netHelper.getFileSize(url)
if curSize >= netSize:
return False
return True
# dowmload track thread
def __thradfunc_dl(self, paraList):
count = 1
printRet = True
pstr = paraList['title'] + "(Download Err!)"
redownload = True
needDl = True
bIsSuccess = False
albumInfo = None
index = None
coverpath = None
err = None
ignoreCertificate = False
if 'redownload' in paraList:
redownload = paraList['redownload']
if 'retry' in paraList:
count = count + paraList['retry']
if 'show' in paraList:
printRet = paraList['show']
if 'album' in paraList:
albumInfo = paraList['album']
if 'index' in paraList:
index = paraList['index']
if 'coverpath' in paraList:
coverpath = paraList['coverpath']
if redownload is False:
needDl = self.__isNeedDownload(paraList['path'], paraList['url'])
# DEBUG
# self.tool.setTrackMetadata(paraList['trackinfo'], paraList['path'], albumInfo, index, coverpath)
showprogress = False
if int(self.config.threadnum) <= 1 and self.showpro:
showprogress = True
Contributors = self.tool.getTrackContributors(paraList['trackinfo']['id'])
if needDl:
try:
while count > 0:
count = count - 1
check, err = netHelper.downloadFileRetErr(paraList['url'], paraList['path']+'.part', showprogress=showprogress, stimeout=20, ignoreCertificate=ignoreCertificate)
if check is True:
if paraList['key'] == '':
# unencrypted -> just move into place
os.replace(paraList['path']+'.part', paraList['path'])
break
else:
# encrypted -> decrypt and remove encrypted file
key, nonce = decrypt_security_token(paraList['key'])
decrypt_file(paraList['path']+'.part', paraList['path'], key, nonce)
os.remove(paraList['path']+'.part')
break
else:
ignoreCertificate = True
if check:
bIsSuccess = True
if self.tool.isNeedCovertToM4a(paraList['path']):
if paraList['codec'] == 'ac4':
printInfo(14, 'Skip convert to m4a(AC4-Codec).')
elif paraList['codec'] == 'mha1':
printInfo(14, 'Skip convert to m4a(MHA1-Codec).')
else:
paraList['path'] = self.tool.covertMp4toM4a(paraList['path'])
self.tool.setTrackMetadata(paraList['trackinfo'], paraList['path'], albumInfo, index, coverpath, Contributors)
pstr = paraList['title']
except Exception as e:
printErr(14, str(e) + " while downloading " + paraList['url'])
else:
pstr = paraList['title']
bIsSuccess = True
if printRet:
if(bIsSuccess):
printSUCCESS(14, pstr)
else:
if err is None:
errmsg = "Unknow!" + paraList['url']
else:
errmsg = str(err) + '!' + paraList['url']
printErr(14, pstr + ' ' + errmsg)
return
# creat album output dir
def __creatAlbumDir(self, albumInfo, quality='LOW'):
# creat outputdir
title = pathHelper.replaceLimitChar(albumInfo['title'], '-')
author = pathHelper.replaceLimitChar(albumInfo['artist']['name'], '-')
# add year
if self.config.addyear != 'No':
if self.config.addyear == 'Before':
title = '[' + str(datetime.strptime(albumInfo['releaseDate'], '%Y-%m-%d').year) + '] '+title
elif self.config.addyear == 'After':
title = title+' [' + str(datetime.strptime(albumInfo['releaseDate'], '%Y-%m-%d').year) + ']'
else:
title = title
# add albumid labels
if self.config.addAlbumidbeforefolder == 'True':
title = '[' + str(albumInfo['id']) + '] ' + title
# add quality[M] labels and explicit[E] labels
flag = ''
if 'audioQuality' in albumInfo and albumInfo['audioQuality'] == 'HI_RES' and quality == 'HI_RES':
flag = 'M'
if 'explicit' in albumInfo and albumInfo['explicit']:
flag += 'E'
if flag != '':
title = '[' + flag + '] '+ title
targetDir = self.config.outputdir + "/Album/" + author + '/' + title
targetDir = os.path.abspath(targetDir)
pathHelper.mkdirs(targetDir)
# creat volumes dir
count = 1
numOfVolumes = int(albumInfo['numberOfVolumes'])
if numOfVolumes > 1:
while count < numOfVolumes + 1:
volumeDir = targetDir + "/CD" + str(count)
pathHelper.mkdirs(volumeDir)
count = count + 1
return targetDir
def _getSongExtension(self, downloadUrl):
if downloadUrl.find('.flac?') != -1:
return '.flac'
if downloadUrl.find('.m4a?') != -1:
return '.m4a'
if downloadUrl.find('.mp4?') != -1:
return '.mp4'
return '.m4a'
def _IsExplicitString(self, IsExplicit):
String = None
if IsExplicit:
String = 'Explicit'
return String
def __getAlbumSongSavePath(self, targetDir, albumInfo, item, extension):
if extension is None:
extension = ".m4a"
seq = self.tool.getIndexStr(item['trackNumber'], albumInfo['numberOfTracks'])
if self.config.addhyphen == 'True':
seq += '- '
if self.config.artistbeforetitle == 'True':
seq += pathHelper.replaceLimitChar(albumInfo['artist']['name'], '-') + ' - '
name = seq + pathHelper.replaceLimitChar(item['title'], '-')
fileExplicit = self._IsExplicitString(item['explicit'])
# if self.config.addhyphen == 'True':
# name = seq + '- ' + pathHelper.replaceLimitChar(item['title'], '-')
if self.config.addexplicit == "True" and fileExplicit is not None:
name = name + " - " + fileExplicit
seq = item['volumeNumber']
path = targetDir + "/"
if int(albumInfo['numberOfVolumes']) > 1:
path += 'CD' + str(seq) + "/"
maxlen = 255
if systemHelper.isLinux():
maxlen = 4090
# truncate filename when it's longer than system's
# filename limit which is 255
len_sum = len(path) + len(name) + len(extension)
if len_sum > maxlen:
diff = maxlen - len_sum
name = name[: len(name) + diff]
filePath = path + name + extension
checklen = len(filePath)
return filePath
def __getExistFiles(self, paths):
ret = []
for item in paths:
if os.path.isfile(item):
ret.append(item)
return ret
def __getVideoResolutionIndex(self, reslist):
array = []
# if reslist != None:
# for item in reslist:
# subs = item.split('x')
# subs = subs[1].split(',')
# array.append(int(subs[0]))
for item in reslist:
subs = item.split('x')
subs = subs[1].split(',')
array.append(int(subs[0]))
cmp = int(self.config.resolution)
ret = 0
for item in array:
if cmp >= item:
return ret
ret += 1
return len(array) - 1
def downloadAlbum(self, album_id=None, redl_flag=None):
while_count = 9999
while while_count > 0:
while_count -= 1
if album_id is not None:
while_count = 0
sID = album_id
else:
print("----------------ALBUM------------------")
sID = printChoice("Enter AlbumID(Enter '0' go back):", True, 0)
if sID == 0:
return
aAlbumInfo = self.tool.getAlbum(sID)
if self.tool.errmsg != "":
printErr(0, "Get AlbumInfo Err! " + self.tool.errmsg)
continue
print("[Title] %s" % (aAlbumInfo['title']))
print("[SongNum] %s\n" % (aAlbumInfo['numberOfTracks']))
# Get Tracks
aAlbumTracks = self.tool.getAlbumTracks(sID)
if self.tool.errmsg != "":
printErr(0, "Get AlbumTracks Err!" + self.tool.errmsg)
continue
aAlbumVideos = self.tool.getAlbumVideos(sID)
# Creat OutputDir
targetDir = self.__creatAlbumDir(aAlbumInfo, self.config.quality)
# write msg
string = self.tool.convertAlbumInfoToString(aAlbumInfo, aAlbumTracks)
with codecs.open(targetDir + "/AlbumInfo.txt", 'w', 'utf-8') as fd:
fd.write(string)
# download cover
coverPath = targetDir + '/' + pathHelper.replaceLimitChar(aAlbumInfo['title'], '-') + '.jpg'
if aAlbumInfo['cover'] is not None:
coverUrl = self.tool.getAlbumArtworkUrl(aAlbumInfo['cover'])
netHelper.downloadFile(coverUrl, coverPath)
# check exist files
redownload = True
if redl_flag is None:
existFiles = pathHelper.getDirFiles(targetDir)
for item in existFiles:
if '.txt' in item:
continue
if '.jpg' in item:
continue
check = printChoice("Some tracks already exist. Redownload?(y/n):")
if not cmdHelper.isInputYes(check):
redownload = False
break
else:
redownload = redl_flag
# download album tracks
for item in aAlbumTracks['items']:
streamInfo = self.tool.getStreamUrl(str(item['id']), self.config.quality)
if self.tool.errmsg != "" or not streamInfo:
printErr(14, item['title'] + "(Get Stream Url Err!" + self.tool.errmsg + ")")
continue
fileType = self._getSongExtension(streamInfo['url'])
filePath = self.__getAlbumSongSavePath(targetDir, aAlbumInfo, item, fileType)
paraList = {'album': aAlbumInfo,
'redownload': redownload,
'title': item['title'],
'trackinfo': item,
'url': streamInfo['url'],
'path': filePath,
'retry': 3,
'key': streamInfo['encryptionKey'],
'coverpath': coverPath,
'codec': streamInfo['codec']}
self.thread.start(self.__thradfunc_dl, paraList)
# wait all download thread
self.thread.waitAll()
self.tool.removeTmpFile(targetDir)
# remove cover
if self.config.savephoto != 'True':
pathHelper.remove(coverPath)
# download video
for item in aAlbumVideos:
item = item['item']
filePath = targetDir + '/' + pathHelper.replaceLimitChar(item['title'], '-') + ".mp4"
filePath = os.path.abspath(filePath)
if os.access(filePath, 0):
os.remove(filePath)
try:
resolutionList, urlList = self.tool.getVideoResolutionList(item['id'])
selectIndex = self.__getVideoResolutionIndex(resolutionList)
if self.ffmpeg.mergerByM3u8_Multithreading2(urlList[int(selectIndex)], filePath, showprogress=self.showpro):
printSUCCESS(14, item['title'])
else:
printErr(14, item['title'])
except:
printErr(14, item['title'])
# return
return
def downloadArtistAlbum(self, includeSingles=True, artistID=None):
while True:
print("-------------ARTIST ALBUM--------------")
if artistID is not None:
sID = artistID
else:
sID = printChoice("Enter Artist ID(Enter '0' go back):", True, 0)
if sID == 0:
return
array = self.tool.getArtistAlbum(sID, includeSingles)
if self.tool.errmsg != "":
printErr(0, "Get AlbumList Err! " + self.tool.errmsg)
continue
redownload = True
if artistID is None:
check = printChoice("Skip downloaded files?(y/n):")
if cmdHelper.isInputYes(check):
redownload = False
for index, item in enumerate(array):
print("----Album[{0}/{1}]----".format(index+1, len(array)))
self.downloadAlbum(item['id'], redownload)
if artistID is not None:
# Break out of the function if we are only downloading one artist's albums
return
def downloadTrack(self, track_id=None):
while_count = 9999
while while_count > 0:
while_count -= 1
if track_id is not None:
while_count = 0
sID = track_id
else:
print("----------------TRACK------------------")
sID = printChoice("Enter TrackID(Enter '0' go back):", True, 0)
if sID == 0:
return
aTrackInfo = self.tool.getTrack(sID)
if self.tool.errmsg != "":
printErr(0, "Get TrackInfo Err! " + self.tool.errmsg)
return
aAlbumInfo = self.tool.getAlbum(aTrackInfo['album']['id'])
if self.tool.errmsg != "":
printErr(0, "Get TrackInfo Err! " + self.tool.errmsg)
return
# t = self.tool.getTrackContributors(sID)
print("[AlbumTitle ] %s" % (aAlbumInfo['title']))
print("[TrackTitle ] %s" % (aTrackInfo['title']))
print("[Duration ] %s" % (aTrackInfo['duration']))
print("[TrackNumber] %s" % (aTrackInfo['trackNumber']))
print("[Explicit ] %s" % (aAlbumInfo['explicit']))
# print("[Version ] %s\n" % (aTrackInfo['version']))
# Creat OutputDir
targetDir = self.__creatAlbumDir(aAlbumInfo, self.config.quality)
# download cover
coverPath = targetDir + '/' + pathHelper.replaceLimitChar(aAlbumInfo['title'], '-') + '.jpg'
if aAlbumInfo['cover'] is not None:
coverUrl = self.tool.getAlbumArtworkUrl(aAlbumInfo['cover'])
netHelper.downloadFile(coverUrl, coverPath)
# download
streamInfo = self.tool.getStreamUrl(sID, self.config.quality)
if self.tool.errmsg != "" or not streamInfo:
printErr(14, aTrackInfo['title'] + "(Get Stream Url Err!" + self.tool.errmsg + ")")
continue
print("[Codec ] %s" % (streamInfo['codec']))
fileType = self._getSongExtension(streamInfo['url'])
filePath = self.__getAlbumSongSavePath(targetDir, aAlbumInfo, aTrackInfo, fileType)
paraList = {'album': aAlbumInfo,
'title': aTrackInfo['title'],
'trackinfo': aTrackInfo,
'url': streamInfo['url'],
'path': filePath,
'retry': 3,
'key': streamInfo['encryptionKey'],
'coverpath': coverPath,
'codec': streamInfo['codec']}
self.thread.start(self.__thradfunc_dl, paraList)
# wait all download thread
self.thread.waitAll()
self.tool.removeTmpFile(targetDir)
# remove cover
if self.config.savephoto != 'True':
pathHelper.remove(coverPath)
return
def downloadVideo(self, video_id=None):
flag = True
while flag:
targetDir = self.config.outputdir + "/Video/"
if video_id is None:
print("----------------VIDEO------------------")
sID = printChoice("Enter VideoID(Enter '0' go back):", True, 0)
if sID == 0:
return
else:
flag = False
sID = video_id
aVideoInfo = self.tool.getVideo(sID)
if self.tool.errmsg != "":
printErr(0, "Get VideoInfo Err! " + self.tool.errmsg)
continue
print("[Title ] %s" % (aVideoInfo['title']))
print("[Duration ] %s" % (aVideoInfo['duration']))
print("[TrackNumber] %s" % (aVideoInfo['trackNumber']))
print("[Type ] %s\n" % (aVideoInfo['type']))
# get resolution
index = 0
resolutionList, urlList = self.tool.getVideoResolutionList(sID)
if self.tool.errmsg != "":
printErr(14, self.tool.errmsg)
continue
index = self.__getVideoResolutionIndex(resolutionList)
path = targetDir + "/" + pathHelper.replaceLimitChar(aVideoInfo['title'], '-') + ".mp4"
path = os.path.abspath(path)
if os.access(path, 0):
os.remove(path)
if self.ffmpeg.mergerByM3u8_Multithreading2(urlList[int(index)], path, True):
printSUCCESS(14, aVideoInfo['title'])
else:
printErr(14, aVideoInfo['title'])
return
def downloadPlaylist(self, playlist_id=None):
while True:
targetDir = self.config.outputdir + "/Playlist/"
if playlist_id is None:
print("--------------PLAYLIST-----------------")
sID = printChoice("Enter PlayListID(Enter '0' go back):")
if sID == '0':
return
else:
sID = playlist_id
aPlaylistInfo, aItemInfo = self.tool.getPlaylist(sID)
if self.tool.errmsg != "":
printErr(0, "Get PlaylistInfo Err! " + self.tool.errmsg)
return
print("[Title] %s" % (aPlaylistInfo['title']))
print("[Type] %s" % (aPlaylistInfo['type']))
print("[NumberOfTracks] %s" % (aPlaylistInfo['numberOfTracks']))
print("[NumberOfVideos] %s" % (aPlaylistInfo['numberOfVideos']))
print("[Duration] %s\n" % (aPlaylistInfo['duration']))
# Creat OutputDir
targetDir = targetDir + pathHelper.replaceLimitChar(aPlaylistInfo['title'], '-')
targetDir = os.path.abspath(targetDir).strip()
pathHelper.mkdirs(targetDir)
# write msg
string = self.tool.convertPlaylistInfoToString(aPlaylistInfo, aItemInfo)
with codecs.open(targetDir + "/PlaylistInfo.txt", 'w', 'utf-8') as fd:
fd.write(string)
# download cover
coverPath = targetDir + '/' + pathHelper.replaceLimitChar(aPlaylistInfo['title'], '-') + '.jpg'
coverUrl = self.tool.getPlaylistArtworkUrl(aPlaylistInfo['uuid'])
check = netHelper.downloadFile(coverUrl, coverPath)
# download track
bBreakFlag = False
bFirstTime = True
errIndex = []
index = 0
while bBreakFlag is False:
self.check.clear()
index = 0
tmpcoverpath = []
for item in aItemInfo:
type = item['type']
item = item['item']
if type != 'track':
continue
index = index + 1
if bFirstTime is False:
if self.check.isInErr(index - 1, errIndex) == False:
continue
streamInfo = self.tool.getStreamUrl(str(item['id']), self.config.quality)
# streamInfo = self.tool.getStreamUrl(str(item['id']), 'DOLBY_ATMOS')
if self.tool.errmsg != "" or not streamInfo:
printErr(14, item['title'] + "(Get Stream Url Err!!" + self.tool.errmsg + ")")
continue
aAlbumInfo = self.tool.getAlbum(item['album']['id'])
fileType = self._getSongExtension(streamInfo['url'])
# change targetDir
targetDir2 = targetDir
if self.config.plfile2arfolder == "True":
targetDir2 = self.__creatAlbumDir(aAlbumInfo, self.config.quality)
filePath = self.__getAlbumSongSavePath(targetDir2, aAlbumInfo, item, fileType)
paraList = {'album': aAlbumInfo, 'title': item['title'], 'trackinfo': item,
'url': streamInfo['url'], 'path': filePath, 'retry': 3, 'key': streamInfo['encryptionKey'], 'codec': streamInfo['codec']}
else:
seq = self.tool.getIndexStr(index, len(aItemInfo))
filePath = targetDir2 + '/' + seq + " "+ pathHelper.replaceLimitChar(item['title'], '-') + fileType
paraList = {'album': aAlbumInfo, 'index': index, 'title': item['title'], 'trackinfo': item,
'url': streamInfo['url'], 'path': filePath, 'retry': 3, 'key': streamInfo['encryptionKey'], 'codec': streamInfo['codec']}
try:
coverPath = targetDir2 + '/' + pathHelper.replaceLimitChar(aAlbumInfo['title'], '-') + '.jpg'
coverUrl = self.tool.getAlbumArtworkUrl(aAlbumInfo['cover'])
netHelper.downloadFile(coverUrl, coverPath)
paraList['coverpath'] = coverPath
tmpcoverpath.append(coverPath)
except:
cmdHelper.myprint("Could not download artwork for '{}'".format(
item['title']), cmdHelper.TextColor.Red, None)
if self.config.onlym4a == "True":
self.check.addPath(filePath.replace(".mp4", ".m4a"))
else:
self.check.addPath(filePath)
self.thread.start(self.__thradfunc_dl, paraList)
self.thread.waitAll()
self.tool.removeTmpFile(targetDir)
# remove cover
if self.config.savephoto != 'True':
for item in tmpcoverpath:
pathHelper.remove(item)
bBreakFlag = True
bFirstTime = False
# check
isErr, errIndex = self.check.checkPaths()
if isErr:
check = printChoice("[Err]\t\t" + str(len(errIndex)) + " Tracks Download Failed.Try Again?(y/n):")
if check == 'y' or check == 'Y':
bBreakFlag = False
# download video
for item in aItemInfo:
type = item['type']
item = item['item']
if type != 'video':
continue
filePath = targetDir + '/' + pathHelper.replaceLimitChar(item['title'], '-') + ".mp4"
filePath = os.path.abspath(filePath)
if os.access(filePath, 0):
os.remove(filePath)
videoID = item['id']
resolutionList, urlList = self.tool.getVideoResolutionList(videoID)
if urlList is None:
printErr(14, item['title'] + '(' + self.tool.errmsg + ')')
else:
selectIndex = self.__getVideoResolutionIndex(resolutionList)
if self.ffmpeg.mergerByM3u8_Multithreading2(urlList[int(selectIndex)], filePath, showprogress=self.showpro):
printSUCCESS(14, item['title'])
else:
printErr(14, item['title'] + "(Download Or Merger Err!)")
if playlist_id is not None:
return
return
def downloadFavorite(self):
targetDir = self.config.outputdir + "/Favorite/"
pathHelper.mkdirs(targetDir)
trackList, videoList = self.tool.getFavorite(self.config.userid)
if self.tool.errmsg != "":
printErr(0, "Get FavoriteList Err! " + self.tool.errmsg)
return
print("[NumberOfTracks] %s" % (len(trackList)))
print("[NumberOfVideos] %s" % (len(videoList)))
# download track
for item in trackList:
item = item['item']
streamInfo = self.tool.getStreamUrl(str(item['id']), self.config.quality)
if self.tool.errmsg != "" or not streamInfo:
printErr(14, item['title'] + "(Get Stream Url Err!!" + self.tool.errmsg + ")")
continue
fileType = self._getSongExtension(streamInfo['url'])
filePath = targetDir + '/' + pathHelper.replaceLimitChar(item['title'], '-') + fileType
aAlbumInfo = self.tool.getAlbum(item['album']['id'])
paraList = {'album': aAlbumInfo, 'title': item['title'], 'trackinfo': item,
'url': streamInfo['url'], 'path': filePath, 'retry': 3, 'key': streamInfo['encryptionKey'], 'codec': streamInfo['codec']}
self.thread.start(self.__thradfunc_dl, paraList)
self.thread.waitAll()
# download video
for item in videoList:
item = item['item']
filePath = targetDir + '/' + pathHelper.replaceLimitChar(item['title'], '-') + ".mp4"
filePath = os.path.abspath(filePath)
if os.access(filePath, 0):
os.remove(filePath)
resolutionList, urlList = self.tool.getVideoResolutionList(item['id'])
selectIndex = self.__getVideoResolutionIndex(resolutionList)
if self.ffmpeg.mergerByM3u8_Multithreading2(urlList[int(selectIndex)], filePath, showprogress=self.showpro):
printSUCCESS(14, item['title'])
else:
printErr(14, item['title'])
return
def downloadUrl(self, link):
stype, sid = self.tool.parseLink(link)
if stype is None or sid is None:
return
if stype == "album":
print("----------------ALBUM------------------")
self.downloadAlbum(sid)
elif stype == "track":
print("----------------TRACK------------------")
self.downloadTrack(sid)
elif stype == "video":
print("----------------VIDEO------------------")
self.downloadVideo(sid)
elif stype == "playlist":
print("--------------PLAYLIST-----------------")
self.downloadPlaylist(sid)
elif stype == "artist":
print("----------------ARTIST-----------------")
self.downloadArtistAlbum(self.config.includesingle == "True", sid)
def downloadByFile(self, path):
if not os.path.exists(path):
return
arr = self.tool.parseFile(path)
print("----------------FILE------------------")
print("[Number of albums] %s" % (len(arr['album'])))
print("[Number of artists] %s" % (len(arr['artist'])))
print("[Number of tracks] %s" % (len(arr['track'])))
print("[Number of videos] %s" % (len(arr['video'])))
print("[Number of URLs] %s" % (len(arr['url'])))
if len(arr['album']) > 0:
redownload = True
check = printChoice("Skip downloaded files?(y/n):")
if not cmdHelper.isInputYes(check):
redownload = False
for index, item in enumerate(arr['album']):
print("----Album[{0}/{1}]----".format(index+1, len(arr['album'])))
print("[ID] %s" % (item))
self.downloadAlbum(item, redownload)
for index, item in enumerate(arr['artist']):
print(index)
print("----Artist[{0}/{1}]----".format(index+1, len(arr['artist'])))
print("[ID] %s" % (item))
includeSingles = self.config.includesingle == "True"
self.downloadArtistAlbum(includeSingles, item)
for index, item in enumerate(arr['track']):
print("----Track[{0}/{1}]----".format(index+1, len(arr['track'])))
print("[ID] %s" % (item))
self.downloadTrack(item)
for index, item in enumerate(arr['video']):
print("----Video[{0}/{1}]----".format(index+1, len(arr['video'])))
print("[ID] %s" % (item))
self.downloadVideo(item)
for index, item in enumerate(arr['url']):
print("----Url[{0}/{1}]----".format(index+1, len(arr['url'])))
print("[link] %s" % (item))
stype, sid = self.tool.parseLink(item)
if stype is None or sid is None:
printErr(14, 'Link can`t parse!')
continue
print("[ID] %s" % (sid))
if stype == "album":
print("[Type] %s" % ("album"))
self.downloadAlbum(sid)
if stype == "track":
print("[Type] %s" % ("track"))
self.downloadTrack(sid)
if stype == "video":
print("[Type] %s" % ("video"))
self.downloadVideo(sid)
|
[
"392309221@qq.com"
] |
392309221@qq.com
|
dee56192c665947a5c40981b9f530c8e07040c27
|
bef915f5c24958737f9bbecb5ed51b485bb86384
|
/pddlstream/algorithms/satisfaction2.py
|
6239d323c4b3db1b1423818b6ba4985be04d0375
|
[
"MIT"
] |
permissive
|
aiyi2099/pddlstream
|
c7757764d066e95fc533e9f7ce318bfbe935f6c5
|
2efd66351f9f2ae875d3b3629a49d9c22fa54896
|
refs/heads/master
| 2020-04-25T21:39:42.959640
| 2019-02-25T23:46:54
| 2019-02-25T23:46:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,272
|
py
|
from __future__ import print_function
from pddlstream.algorithms.algorithm import parse_stream_pddl, evaluations_from_init
from pddlstream.algorithms.common import SolutionStore
from pddlstream.algorithms.downward import make_domain, make_predicate, add_predicate, make_axiom
from pddlstream.algorithms.recover_optimizers import retrace_instantiation, combine_optimizers
from pddlstream.algorithms.reorder import reorder_stream_plan
from pddlstream.algorithms.scheduling.postprocess import reschedule_stream_plan
#from pddlstream.algorithms.skeleton import SkeletonQueue
from pddlstream.algorithms.skeleton2 import SkeletonQueue
from pddlstream.algorithms.scheduling.utils import partition_external_plan
from pddlstream.language.constants import is_parameter, get_length, partition_facts
from pddlstream.language.conversion import revert_solution, \
evaluation_from_fact, replace_expression, get_prefix, get_args
from pddlstream.language.object import Object, OptimisticObject
from pddlstream.language.optimizer import UNSATISFIABLE
from pddlstream.language.stream import Stream
from pddlstream.language.function import Function, Predicate
from pddlstream.language.statistics import write_stream_statistics, compute_plan_effort
from pddlstream.utils import INF, get_mapping, elapsed_time, str_from_object, safe_zip
from pddlstream.algorithms.reorder import get_partial_orders
from pddlstream.utils import get_connected_components, grow_component, adjacent_from_edges, incoming_from_edges
import time
BIND_ACTION = 'bindings'
def obj_from_existential_expression(parent): # obj_from_value_expression
return replace_expression(parent, lambda o: OptimisticObject
.from_opt(o, o) if is_parameter(o) else Object.from_value(o))
def create_domain(goal_facts):
domain = make_domain()
for fact in goal_facts: # TODO: consider removing this annoying check
name = get_prefix(fact)
parameters = ['?x{}'.format(i) for i in range(len(get_args(fact)))]
add_predicate(domain, make_predicate(name, parameters))
return domain
def plan_functions(functions, externals):
external_from_function = {}
for external in filter(lambda e: isinstance(e, Function), externals):
assert external.function not in external_from_function
external_from_function[external.function] = external
function_plan = set()
for term in functions:
if get_prefix(term) not in external_from_function:
raise ValueError('{} is not implemented'.format(get_prefix(term)))
external = external_from_function[get_prefix(term)]
instance = external.get_instance(get_args(term))
[result] = instance.next_optimistic()
function_plan.add(result)
print('Function plan:', str_from_object(function_plan))
return function_plan
def get_parameters(goal_facts):
return {o for f in goal_facts for o in get_args(f) if isinstance(o, OptimisticObject)}
def extract_streams(evaluations, externals, goal_facts):
streams = list(filter(lambda e: isinstance(e, Stream), externals))
free_parameters = get_parameters(goal_facts)
visited_facts = set()
stream_results = []
for fact in goal_facts:
# TODO: prune results that already exceed effort limit
retrace_instantiation(fact, streams, evaluations, free_parameters, visited_facts, stream_results)
print('Streams:', stream_results)
# TODO: express some of this pruning using effort (e.g. unlikely to sample bound value)
return stream_results
def get_optimistic_cost(function_plan):
return sum([0.] + [result.value for result in function_plan
if type(result.external) == Function])
def bindings_from_plan(plan_skeleton, action_plan):
if action_plan is None:
return None
bindings = {}
for (name1, args1), (name2, args2) in safe_zip(plan_skeleton, action_plan):
assert name1 == name2
parameter_names = [o.value for o in args1]
bindings.update(get_mapping(parameter_names, args2))
return bindings
##################################################
def create_disable_axiom(external_plan):
# TODO: express constraint mutexes upfront
stream_plan, _ = partition_external_plan(external_plan)
#print(stream_plan)
parameters = []
preconditions = [result.stream_fact for result in stream_plan]
derived = (UNSATISFIABLE,)
# TODO: add parameters in the event that the same skeleton can be blocked twice
return make_axiom(parameters, preconditions, derived)
def compute_failed_indices(skeleton):
failed_indices = set()
for binding in skeleton.root.post_order():
result = binding.next_result
if (result is not None) and result.instance.num_calls and (not result.instance.successes):
failed_indices.add(binding.stream_indices[0])
#assert not binding.children
return sorted(failed_indices)
def current_failed_cluster(binding):
failed_index = binding.stream_indices[0]
assert 1 <= binding.stream_attempts[0]
failed_result = binding.skeleton.stream_plan[failed_index]
successful_results = [result for i, result in enumerate(binding.skeleton.stream_plan)
if i not in binding.stream_indices]
stream_plan = successful_results + [failed_result]
partial_orders = get_partial_orders(stream_plan)
# All connected components
#return get_connected_components(stream_plan, partial_orders)
# Only the failed connected component
return [grow_component([failed_result], adjacent_from_edges(partial_orders))]
def current_failure_contributors(binding):
# Alternatively, find unsuccessful streams in cluster and add ancestors
failed_index = binding.stream_indices[0]
assert 1 <= binding.stream_attempts[0]
failed_result = binding.skeleton.stream_plan[failed_index]
failed_indices = compute_failed_indices(binding.skeleton) # Use last index?
partial_orders = get_partial_orders(binding.skeleton.stream_plan)
incoming = incoming_from_edges(partial_orders)
failed_ancestors = grow_component([failed_result], incoming)
for index in reversed(failed_indices):
if index == failed_index:
continue
result = binding.skeleton.stream_plan[index]
ancestors = grow_component([result], incoming)
if ancestors & failed_ancestors:
failed_ancestors.update(ancestors)
return [failed_ancestors]
def extract_disabled_clusters(queue):
clusters = set()
for skeleton in queue.skeletons:
# TODO: include costs within clustering?
# What is goal is to be below a cost threshold?
# In satisfaction, no need because costs are fixed
# Make stream_facts for externals to prevent use of the same ones
# This ordering is why it's better to put likely to fail first
# Branch on the different possible binding outcomes
# TODO: consider a nonlinear version of this that evaluates out of order
# Need extra sampling effort to identify infeasible subsets
# Treat unevaluated optimistically, as in always satisfiable
# Need to keep streams with outputs to connect if downstream is infeasible
# TODO: prune streams that always have at least one success
# TODO: CSP identification of irreducible unsatisfiable subsets
# TODO: take into consideration if a stream is enumerated to mark as a hard failure
# Decompose down optimizers
#cluster_plans = [skeleton.stream_plan]
partial_orders = get_partial_orders(skeleton.stream_plan)
cluster_plans = get_connected_components(skeleton.stream_plan, partial_orders)
binding = skeleton.best_binding
if not binding.is_bound():
# TODO: block if cost sensitive to possibly get cheaper solutions
#cluster_plans = current_failed_cluster(binding)
cluster_plans = current_failure_contributors(binding)
for cluster_plan in cluster_plans:
clusters.add(frozenset(cluster_plan))
return clusters
def are_domainated(clusters1, clusters2):
return all(any(c1 <= c2 for c2 in clusters2) for c1 in clusters1)
##################################################
def constraint_satisfaction(stream_pddl, stream_map, init, terms, stream_info={},
costs=True, max_cost=INF, success_cost=INF, max_time=INF, max_effort=INF,
max_skeletons=INF, search_sample_ratio=1, verbose=True, **search_args):
# Approaches
# 1) Existential quantification of bindings in goal conditions
# 2) Backtrack useful streams and then schedule. Create arbitrary outputs for not mentioned.
# 3) Construct all useful streams and then associate outputs with bindings
# Useful stream must satisfy at least one fact. How should these assignments be propagated though?
# Make an action that maps each stream result to unbound values?
# TODO: include functions again for cost-sensitive satisfaction
# TODO: convert init into streams to bind certain facts
# TODO: investigate constraint satisfaction techniques for binding instead
# TODO: could also instantiate all possible free parameters even if not useful
# TODO: effort that is a function of the number of output parameters (degrees of freedom)
# TODO: use a CSP solver instead of a planner internally
# TODO: max_iterations?
if not terms:
return {}, 0, init
constraints, negated, functions = partition_facts(set(map(obj_from_existential_expression, terms)))
if not costs:
functions = []
evaluations = evaluations_from_init(init)
goal_facts = set(filter(lambda f: evaluation_from_fact(f) not in evaluations, constraints))
free_parameters = sorted(get_parameters(goal_facts))
externals = parse_stream_pddl(stream_pddl, stream_map, stream_info)
stream_results = extract_streams(evaluations, externals, goal_facts)
function_plan = plan_functions(negated + functions, externals)
plan_skeleton = [(BIND_ACTION, free_parameters)]
cost = get_optimistic_cost(function_plan)
if max_cost < cost:
return None, INF, init
# TODO: detect connected components
# TODO: eagerly evaluate fully bound constraints
# TODO: consider other results if this fails
domain = create_domain(goal_facts)
init_evaluations = evaluations.copy()
store = SolutionStore(evaluations, max_time=max_time, success_cost=success_cost, verbose=verbose)
queue = SkeletonQueue(store, domain, disable=False)
num_iterations = search_time = sample_time = 0
last_clusters = set()
last_success = True
while not store.is_terminated():
num_iterations += 1
start_time = time.time()
print('\nIteration: {} | Skeletons: {} | Skeleton Queue: {} | Evaluations: {} | '
'Cost: {:.3f} | Search Time: {:.3f} | Sample Time: {:.3f} | Total Time: {:.3f}'.format(
num_iterations, len(queue.skeletons), len(queue),
len(evaluations), store.best_cost, search_time, sample_time, store.elapsed_time()))
external_plan = None
if len(queue.skeletons) < max_skeletons:
clusters = extract_disabled_clusters(queue)
domain.axioms[:] = [create_disable_axiom(cluster) for cluster in clusters]
dominated = are_domainated(last_clusters, clusters)
last_clusters = clusters
planner = 'ff-astar' # TODO: toggle within reschedule_stream_plan
#if last_success or not dominated: # Could also keep a history of results
stream_plan = reschedule_stream_plan(init_evaluations, goal_facts, domain, stream_results,
unique_binding=True, unsatisfiable=True,
max_effort=max_effort, planner=planner, **search_args)
if stream_plan is not None:
external_plan = reorder_stream_plan(combine_optimizers(
init_evaluations, stream_plan + list(function_plan)))
print('Stream plan ({}, {:.3f}): {}'.format(
get_length(external_plan), compute_plan_effort(external_plan), external_plan))
last_success = (external_plan is not None)
search_time += elapsed_time(start_time)
# Once a constraint added for a skeleton, it should only be relaxed
start_time = time.time()
if last_success: # Only works if create_disable_axioms never changes
allocated_sample_time = (search_sample_ratio * search_time) - sample_time
else:
allocated_sample_time = INF
queue.process(external_plan, plan_skeleton, cost=cost,
complexity_limit=INF, max_time=allocated_sample_time)
sample_time += elapsed_time(start_time)
if not last_success and not queue:
break
# TODO: exhaustively compute all plan skeletons and add to queue within the focused algorithm
write_stream_statistics(externals, verbose)
action_plan, cost, facts = revert_solution(store.best_plan, store.best_cost, evaluations)
bindings = bindings_from_plan(plan_skeleton, action_plan)
return bindings, cost, facts
|
[
"caelan@mit.edu"
] |
caelan@mit.edu
|
22db28b27bcda667767fa13f454db3c18a2c3a11
|
9d3171d191914bb980f8fea2b895de79d9893db6
|
/scikits/statsmodels/stats/tests/test_diagnostic.py
|
ac1e07b146b8b7f9f439869919dcbdbfd6620fb7
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
takluyver/statsmodels
|
2d3ba11035501bd1e35f23daf27bca823eec2cb5
|
3f1aeba98cd4bc2f326f9c18c34e66c396be99cf
|
refs/heads/master
| 2023-06-19T18:51:28.464440
| 2012-02-29T16:24:25
| 2012-02-29T16:24:25
| 3,585,072
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 28,983
|
py
|
# -*- coding: utf-8 -*-
"""Tests for Regression Diagnostics and Specification Tests
Created on Thu Feb 09 13:19:47 2012
Author: Josef Perktold
License: BSD-3
currently all tests are against R
"""
import os
import numpy as np
from numpy.testing import (assert_, assert_almost_equal, assert_equal,
assert_approx_equal)
from scikits.statsmodels.regression.linear_model import OLS, GLSAR
from scikits.statsmodels.tools.tools import add_constant
from scikits.statsmodels.datasets import macrodata
import scikits.statsmodels.sandbox.panel.sandwich_covariance as sw
import scikits.statsmodels.stats.diagnostic as smsdia
#import scikits.statsmodels.sandbox.stats.diagnostic as smsdia
import scikits.statsmodels.stats.outliers_influence as oi
cur_dir = os.path.abspath(os.path.dirname(__file__))
def compare_t_est(sp, sp_dict, decimal=(14, 14)):
assert_almost_equal(sp[0], sp_dict['statistic'], decimal=decimal[0])
assert_almost_equal(sp[1], sp_dict['pvalue'], decimal=decimal[1])
def notyet_atst():
d = macrodata.load().data
realinv = d['realinv']
realgdp = d['realgdp']
realint = d['realint']
endog = realinv
exog = add_constant(np.c_[realgdp, realint],prepend=True)
res_ols1 = OLS(endog, exog).fit()
#growth rates
gs_l_realinv = 400 * np.diff(np.log(d['realinv']))
gs_l_realgdp = 400 * np.diff(np.log(d['realgdp']))
lint = d['realint'][:-1]
tbilrate = d['tbilrate'][:-1]
endogg = gs_l_realinv
exogg = add_constant(np.c_[gs_l_realgdp, lint], prepend=True)
exogg2 = add_constant(np.c_[gs_l_realgdp, tbilrate], prepend=True)
res_ols = OLS(endogg, exogg).fit()
res_ols2 = OLS(endogg, exogg2).fit()
#the following were done accidentally with res_ols1 in R,
#with original Greene data
params = np.array([-272.3986041341653, 0.1779455206941112,
0.2149432424658157])
cov_hac_4 = np.array([1321.569466333051, -0.2318836566017612,
37.01280466875694, -0.2318836566017614, 4.602339488102263e-05,
-0.0104687835998635, 37.012804668757, -0.0104687835998635,
21.16037144168061]).reshape(3,3, order='F')
cov_hac_10 = np.array([2027.356101193361, -0.3507514463299015,
54.81079621448568, -0.350751446329901, 6.953380432635583e-05,
-0.01268990195095196, 54.81079621448564, -0.01268990195095195,
22.92512402151113]).reshape(3,3, order='F')
#goldfeld-quandt
het_gq_greater = dict(statistic=13.20512768685082, df1=99, df2=98,
pvalue=1.246141976112324e-30, distr='f')
het_gq_less = dict(statistic=13.20512768685082, df1=99, df2=98, pvalue=1.)
het_gq_2sided = dict(statistic=13.20512768685082, df1=99, df2=98,
pvalue=1.246141976112324e-30, distr='f')
#goldfeld-quandt, fraction = 0.5
het_gq_greater_2 = dict(statistic=87.1328934692124, df1=48, df2=47,
pvalue=2.154956842194898e-33, distr='f')
gq = smsdia.het_goldfeldquandt(endog, exog, split=0.5)
compare_t_est(gq, het_gq_greater, decimal=(13, 14))
assert_equal(gq[-1], 'increasing')
harvey_collier = dict(stat=2.28042114041313, df=199,
pvalue=0.02364236161988260, distr='t')
#hc = harvtest(fm, order.by=ggdp , data = list())
harvey_collier_2 = dict(stat=0.7516918462158783, df=199,
pvalue=0.4531244858006127, distr='t')
##################################
class TestDiagnosticG(object):
def __init__(self):
d = macrodata.load().data
#growth rates
gs_l_realinv = 400 * np.diff(np.log(d['realinv']))
gs_l_realgdp = 400 * np.diff(np.log(d['realgdp']))
lint = d['realint'][:-1]
tbilrate = d['tbilrate'][:-1]
endogg = gs_l_realinv
exogg = add_constant(np.c_[gs_l_realgdp, lint], prepend=True)
exogg2 = add_constant(np.c_[gs_l_realgdp, tbilrate], prepend=True)
exogg3 = add_constant(np.c_[gs_l_realgdp], prepend=True)
res_ols = OLS(endogg, exogg).fit()
res_ols2 = OLS(endogg, exogg2).fit()
res_ols3 = OLS(endogg, exogg3).fit()
self.res = res_ols
self.res2 = res_ols2
self.res3 = res_ols3
self.endog = self.res.model.endog
self.exog = self.res.model.exog
def test_basic(self):
#mainly to check I got the right regression
#> mkarray(fm$coefficients, "params")
params = np.array([-9.48167277465485, 4.3742216647032,
-0.613996969478989])
assert_almost_equal(self.res.params, params, decimal=14)
def test_hac(self):
res = self.res
#> nw = NeweyWest(fm, lag = 4, prewhite = FALSE, verbose=TRUE)
#> nw2 = NeweyWest(fm, lag=10, prewhite = FALSE, verbose=TRUE)
#> mkarray(nw, "cov_hac_4")
cov_hac_4 = np.array([1.385551290884014, -0.3133096102522685,
-0.0597207976835705, -0.3133096102522685, 0.1081011690351306,
0.000389440793564336, -0.0597207976835705, 0.000389440793564339,
0.0862118527405036]).reshape(3,3, order='F')
#> mkarray(nw2, "cov_hac_10")
cov_hac_10 = np.array([1.257386180080192, -0.2871560199899846,
-0.03958300024627573, -0.2871560199899845, 0.1049107028987101,
0.0003896205316866944, -0.03958300024627578, 0.0003896205316866961,
0.0985539340694839]).reshape(3,3, order='F')
cov, bse_hac = sw.cov_hac_simple(res, nlags=4, use_correction=False)
assert_almost_equal(cov, cov_hac_4, decimal=14)
assert_almost_equal(bse_hac, np.sqrt(np.diag(cov)), decimal=14)
cov, bse_hac = sw.cov_hac_simple(res, nlags=10, use_correction=False)
assert_almost_equal(cov, cov_hac_10, decimal=14)
assert_almost_equal(bse_hac, np.sqrt(np.diag(cov)), decimal=14)
def test_het_goldfeldquandt(self):
#TODO: test options missing
#> gq = gqtest(fm, alternative='greater')
#> mkhtest_f(gq, 'het_gq_greater', 'f')
het_gq_greater = dict(statistic=0.5313259064778423,
pvalue=0.9990217851193723,
parameters=(98, 98), distr='f')
#> gq = gqtest(fm, alternative='less')
#> mkhtest_f(gq, 'het_gq_less', 'f')
het_gq_less = dict(statistic=0.5313259064778423,
pvalue=0.000978214880627621,
parameters=(98, 98), distr='f')
#> gq = gqtest(fm, alternative='two.sided')
#> mkhtest_f(gq, 'het_gq_two_sided', 'f')
het_gq_two_sided = dict(statistic=0.5313259064778423,
pvalue=0.001956429761255241,
parameters=(98, 98), distr='f')
#> gq = gqtest(fm, fraction=0.1, alternative='two.sided')
#> mkhtest_f(gq, 'het_gq_two_sided_01', 'f')
het_gq_two_sided_01 = dict(statistic=0.5006976835928314,
pvalue=0.001387126702579789,
parameters=(88, 87), distr='f')
#> gq = gqtest(fm, fraction=0.5, alternative='two.sided')
#> mkhtest_f(gq, 'het_gq_two_sided_05', 'f')
het_gq_two_sided_05 = dict(statistic=0.434815645134117,
pvalue=0.004799321242905568,
parameters=(48, 47), distr='f')
endogg, exogg = self.endog, self.exog
#tests
gq = smsdia.het_goldfeldquandt(endogg, exogg, split=0.5)
compare_t_est(gq, het_gq_greater, decimal=(14, 14))
assert_equal(gq[-1], 'increasing')
gq = smsdia.het_goldfeldquandt(endogg, exogg, split=0.5,
alternative='decreasing')
compare_t_est(gq, het_gq_less, decimal=(14, 14))
assert_equal(gq[-1], 'decreasing')
gq = smsdia.het_goldfeldquandt(endogg, exogg, split=0.5,
alternative='two-sided')
compare_t_est(gq, het_gq_two_sided, decimal=(14, 14))
assert_equal(gq[-1], 'two-sided')
#TODO: forcing the same split as R 202-90-90-1=21
gq = smsdia.het_goldfeldquandt(endogg, exogg, split=90, drop=21,
alternative='two-sided')
compare_t_est(gq, het_gq_two_sided_01, decimal=(14, 14))
assert_equal(gq[-1], 'two-sided')
#TODO other options ???
def test_het_breush_pagan(self):
res = self.res
bptest = dict(statistic=0.709924388395087, pvalue=0.701199952134347,
parameters=(2,), distr='f')
bp = smsdia.het_breushpagan(res.resid, res.model.exog)
compare_t_est(bp, bptest, decimal=(13, 13))
def test_het_white(self):
res = self.res
#TODO: regressiontest compare with Greene or Gretl or Stata
hw = smsdia.het_white(res.resid, res.model.exog)
hw_values = (33.503722896538441, 2.9887960597830259e-06,
7.7945101228430946, 1.0354575277704231e-06)
assert_almost_equal(hw, hw_values)
def test_het_arch(self):
#> library(FinTS)
#> at = ArchTest(residuals(fm), lags=4)
#> mkhtest(at, 'archtest_4', 'chi2')
archtest_4 = dict(statistic=3.43473400836259,
pvalue=0.487871315392619, parameters=(4,),
distr='chi2')
#> at = ArchTest(residuals(fm), lags=12)
#> mkhtest(at, 'archtest_12', 'chi2')
archtest_12 = dict(statistic=8.648320999014171,
pvalue=0.732638635007718, parameters=(12,),
distr='chi2')
at4 = smsdia.het_arch(self.res.resid, maxlag=4)
at12 = smsdia.het_arch(self.res.resid, maxlag=12)
compare_t_est(at4[:2], archtest_4, decimal=(12, 13))
compare_t_est(at12[:2], archtest_12, decimal=(13, 14))
def test_acorr_breush_godfrey(self):
res = self.res
#bgf = bgtest(fm, order = 4, type="F")
breushgodfrey_f = dict(statistic=1.179280833676792,
pvalue=0.321197487261203,
parameters=(4,195,), distr='f')
#> bgc = bgtest(fm, order = 4, type="Chisq")
#> mkhtest(bgc, "breushpagan_c", "chi2")
breushgodfrey_c = dict(statistic=4.771042651230007,
pvalue=0.3116067133066697,
parameters=(4,), distr='chi2')
bg = smsdia.acorr_breush_godfrey(res, nlags=4)
bg_r = [breushgodfrey_c['statistic'], breushgodfrey_c['pvalue'],
breushgodfrey_f['statistic'], breushgodfrey_f['pvalue']]
assert_almost_equal(bg, bg_r, decimal=13)
def test_acorr_ljung_box(self):
res = self.res
#> bt = Box.test(residuals(fm), lag=4, type = "Ljung-Box")
#> mkhtest(bt, "ljung_box_4", "chi2")
ljung_box_4 = dict(statistic=5.23587172795227, pvalue=0.263940335284713,
parameters=(4,), distr='chi2')
#> bt = Box.test(residuals(fm), lag=4, type = "Box-Pierce")
#> mkhtest(bt, "ljung_box_bp_4", "chi2")
ljung_box_bp_4 = dict(statistic=5.12462932741681,
pvalue=0.2747471266820692,
parameters=(4,), distr='chi2')
#ddof correction for fitted parameters in ARMA(p,q) fitdf=p+q
#> bt = Box.test(residuals(fm), lag=4, type = "Ljung-Box", fitdf=2)
#> mkhtest(bt, "ljung_box_4df2", "chi2")
ljung_box_4df2 = dict(statistic=5.23587172795227,
pvalue=0.0729532930400377,
parameters=(2,), distr='chi2')
#> bt = Box.test(residuals(fm), lag=4, type = "Box-Pierce", fitdf=2)
#> mkhtest(bt, "ljung_box_bp_4df2", "chi2")
ljung_box_bp_4df2 = dict(statistic=5.12462932741681,
pvalue=0.0771260128929921,
parameters=(2,), distr='chi2')
lb, lbpval, bp, bppval = smsdia.acorr_ljungbox(res.resid, 4,
boxpierce=True)
compare_t_est([lb[-1], lbpval[-1]], ljung_box_4, decimal=(13, 14))
compare_t_est([bp[-1], bppval[-1]], ljung_box_bp_4, decimal=(13, 14))
def test_harvey_collier(self):
#> hc = harvtest(fm, order.by = NULL, data = list())
#> mkhtest_f(hc, 'harvey_collier', 't')
harvey_collier = dict(statistic=0.494432160939874,
pvalue=0.6215491310408242,
parameters=(198), distr='t')
#> hc2 = harvtest(fm, order.by=ggdp , data = list())
#> mkhtest_f(hc2, 'harvey_collier_2', 't')
harvey_collier_2 = dict(statistic=1.42104628340473,
pvalue=0.1568762892441689,
parameters=(198), distr='t')
hc = smsdia.linear_harvey_collier(self.res)
compare_t_est(hc, harvey_collier, decimal=(12, 12))
def test_rainbow(self):
#rainbow test
#> rt = raintest(fm)
#> mkhtest_f(rt, 'raintest', 'f')
raintest = dict(statistic=0.6809600116739604, pvalue=0.971832843583418,
parameters=(101, 98), distr='f')
#> rt = raintest(fm, center=0.4)
#> mkhtest_f(rt, 'raintest_center_04', 'f')
raintest_center_04 = dict(statistic=0.682635074191527,
pvalue=0.971040230422121,
parameters=(101, 98), distr='f')
#> rt = raintest(fm, fraction=0.4)
#> mkhtest_f(rt, 'raintest_fraction_04', 'f')
raintest_fraction_04 = dict(statistic=0.565551237772662,
pvalue=0.997592305968473,
parameters=(122, 77), distr='f')
#> rt = raintest(fm, order.by=ggdp)
#Warning message:
#In if (order.by == "mahalanobis") { :
# the condition has length > 1 and only the first element will be used
#> mkhtest_f(rt, 'raintest_order_gdp', 'f')
raintest_order_gdp = dict(statistic=1.749346160513353,
pvalue=0.002896131042494884,
parameters=(101, 98), distr='f')
rb = smsdia.linear_rainbow(self.res)
compare_t_est(rb, raintest, decimal=(13, 14))
rb = smsdia.linear_rainbow(self.res, frac=0.4)
compare_t_est(rb, raintest_fraction_04, decimal=(13, 14))
def test_compare_lr(self):
res = self.res
res3 = self.res3 #nested within res
#lrtest
#lrt = lrtest(fm, fm2)
#Model 1: ginv ~ ggdp + lint
#Model 2: ginv ~ ggdp
lrtest = dict(loglike1=-763.9752181602237, loglike2=-766.3091902020184,
chi2value=4.66794408358942, pvalue=0.03073069384028677,
df=(4,3,1))
lrt = res.compare_lr_test(res3)
assert_almost_equal(lrt[0], lrtest['chi2value'], decimal=14)
assert_almost_equal(lrt[1], lrtest['pvalue'], decimal=14)
waldtest = dict(fvalue=4.65216373312492, pvalue=0.03221346195239025,
df=(199,200,1))
wt = res.compare_f_test(res3)
assert_almost_equal(wt[0], waldtest['fvalue'], decimal=13)
assert_almost_equal(wt[1], waldtest['pvalue'], decimal=14)
def test_compare_nonnested(self):
res = self.res
res2 = self.res2
#jt = jtest(fm, lm(ginv ~ ggdp + tbilrate))
#Estimate Std. Error t value Pr(>|t|)
jtest = [('M1 + fitted(M2)', 1.591505670785873, 0.7384552861695823,
2.155182176352370, 0.032354572525314450, '*'),
('M2 + fitted(M1)', 1.305687653016899, 0.4808385176653064,
2.715438978051544, 0.007203854534057954, '**')]
jt1 = smsdia.compare_j(res2, res)
assert_almost_equal(jt1, jtest[0][3:5], decimal=13)
jt2 = smsdia.compare_j(res, res2)
assert_almost_equal(jt2, jtest[1][3:5], decimal=14)
#Estimate Std. Error z value Pr(>|z|)
coxtest = [('fitted(M1) ~ M2', -0.782030488930356, 0.599696502782265,
-1.304043770977755, 1.922186587840554e-01, ' '),
('fitted(M2) ~ M1', -2.248817107408537, 0.392656854330139,
-5.727181590258883, 1.021128495098556e-08, '***')]
ct1 = smsdia.compare_cox(res, res2)
assert_almost_equal(ct1, coxtest[0][3:5], decimal=13)
ct2 = smsdia.compare_cox(res2, res)
assert_almost_equal(ct2, coxtest[1][3:5], decimal=12)
#TODO should be approx
# Res.Df Df F Pr(>F)
encomptest = [('M1 vs. ME', 198, -1, 4.644810213266983,
0.032354572525313666, '*'),
('M2 vs. ME', 198, -1, 7.373608843521585,
0.007203854534058054, '**')]
# Estimate Std. Error t value
petest = [('M1 + log(fit(M1))-fit(M2)', -229.281878354594596,
44.5087822087058598, -5.15139, 6.201281252449979e-07),
('M2 + fit(M1)-exp(fit(M2))', 0.000634664704814,
0.0000462387010349, 13.72583, 1.319536115230356e-30)]
def test_cusum_ols(self):
#R library(strucchange)
#> sc = sctest(ginv ~ ggdp + lint, type="OLS-CUSUM")
#> mkhtest(sc, 'cusum_ols', 'BB')
cusum_ols = dict(statistic=1.055750610401214, pvalue=0.2149567397376543,
parameters=(), distr='BB') #Brownian Bridge
k_vars=3
cs_ols = smsdia.breaks_cusumolsresid(self.res.resid, ddof=k_vars) #
compare_t_est(cs_ols, cusum_ols, decimal=(12, 12))
def test_breaks_hansen(self):
#> sc = sctest(ginv ~ ggdp + lint, type="Nyblom-Hansen")
#> mkhtest(sc, 'breaks_nyblom_hansen', 'BB')
breaks_nyblom_hansen = dict(statistic=1.0300792740544484,
pvalue=0.1136087530212015,
parameters=(), distr='BB')
bh = smsdia.breaks_hansen(self.res)
assert_almost_equal(bh[0], breaks_nyblom_hansen['statistic'],
decimal=14)
#TODO: breaks_hansen doesn't return pvalues
def test_recursive_residuals(self):
reccumres_standardize = np.array([-2.151, -3.748, -3.114, -3.096,
-1.865, -2.230, -1.194, -3.500, -3.638, -4.447, -4.602, -4.631, -3.999,
-4.830, -5.429, -5.435, -6.554, -8.093, -8.567, -7.532, -7.079, -8.468,
-9.320, -12.256, -11.932, -11.454, -11.690, -11.318, -12.665, -12.842,
-11.693, -10.803, -12.113, -12.109, -13.002, -11.897, -10.787, -10.159,
-9.038, -9.007, -8.634, -7.552, -7.153, -6.447, -5.183, -3.794, -3.511,
-3.979, -3.236, -3.793, -3.699, -5.056, -5.724, -4.888, -4.309, -3.688,
-3.918, -3.735, -3.452, -2.086, -6.520, -7.959, -6.760, -6.855, -6.032,
-4.405, -4.123, -4.075, -3.235, -3.115, -3.131, -2.986, -1.813, -4.824,
-4.424, -4.796, -4.000, -3.390, -4.485, -4.669, -4.560, -3.834, -5.507,
-3.792, -2.427, -1.756, -0.354, 1.150, 0.586, 0.643, 1.773, -0.830,
-0.388, 0.517, 0.819, 2.240, 3.791, 3.187, 3.409, 2.431, 0.668, 0.957,
-0.928, 0.327, -0.285, -0.625, -2.316, -1.986, -0.744, -1.396, -1.728,
-0.646, -2.602, -2.741, -2.289, -2.897, -1.934, -2.532, -3.175, -2.806,
-3.099, -2.658, -2.487, -2.515, -2.224, -2.416, -1.141, 0.650, -0.947,
0.725, 0.439, 0.885, 2.419, 2.642, 2.745, 3.506, 4.491, 5.377, 4.624,
5.523, 6.488, 6.097, 5.390, 6.299, 6.656, 6.735, 8.151, 7.260, 7.846,
8.771, 8.400, 8.717, 9.916, 9.008, 8.910, 8.294, 8.982, 8.540, 8.395,
7.782, 7.794, 8.142, 8.362, 8.400, 7.850, 7.643, 8.228, 6.408, 7.218,
7.699, 7.895, 8.725, 8.938, 8.781, 8.350, 9.136, 9.056, 10.365, 10.495,
10.704, 10.784, 10.275, 10.389, 11.586, 11.033, 11.335, 11.661, 10.522,
10.392, 10.521, 10.126, 9.428, 9.734, 8.954, 9.949, 10.595, 8.016,
6.636, 6.975])
rr = smsdia.recursive_olsresiduals(self.res, skip=3, alpha=0.95)
assert_equal(np.round(rr[5][1:], 3), reccumres_standardize) #extra zero in front
#assert_equal(np.round(rr[3][4:], 3), np.diff(reccumres_standardize))
assert_almost_equal(rr[3][4:], np.diff(reccumres_standardize),3)
assert_almost_equal(rr[4][3:].std(ddof=1), 10.7242, decimal=4)
#regression number, visually checked with graph from gretl
ub0 = np.array([ 13.37318571, 13.50758959, 13.64199346, 13.77639734,
13.91080121])
ub1 = np.array([ 39.44753774, 39.58194162, 39.7163455 , 39.85074937,
39.98515325])
lb, ub = rr[6]
assert_almost_equal(ub[:5], ub0, decimal=7)
assert_almost_equal(lb[:5], -ub0, decimal=7)
assert_almost_equal(ub[-5:], ub1, decimal=7)
assert_almost_equal(lb[-5:], -ub1, decimal=7)
#test a few values with explicit OLS
endog = self.res.model.endog
exog = self.res.model.exog
params = []
ypred = []
for i in range(3,10):
resi = OLS(endog[:i], exog[:i]).fit()
ypred.append(resi.model.predict(resi.params, exog[i]))
params.append(resi.params)
assert_almost_equal(rr[2][3:10], ypred, decimal=12)
assert_almost_equal(rr[0][3:10], endog[3:10] - ypred, decimal=12)
assert_almost_equal(rr[1][2:9], params, decimal=12)
def test_normality(self):
res = self.res
#> library(nortest) #Lilliefors (Kolmogorov-Smirnov) normality test
#> lt = lillie.test(residuals(fm))
#> mkhtest(lt, "lillifors", "-")
lillifors1 = dict(statistic=0.0723390908786589,
pvalue=0.01204113540102896, parameters=(), distr='-')
#> lt = lillie.test(residuals(fm)**2)
#> mkhtest(lt, "lillifors", "-")
lillifors2 = dict(statistic=0.301311621898024,
pvalue=1.004305736618051e-51,
parameters=(), distr='-')
#> lt = lillie.test(residuals(fm)[1:20])
#> mkhtest(lt, "lillifors", "-")
lillifors3 = dict(statistic=0.1333956004203103,
pvalue=0.4618672180799566, parameters=(), distr='-')
lf1 = smsdia.lillifors(res.resid)
lf2 = smsdia.lillifors(res.resid**2)
lf3 = smsdia.lillifors(res.resid[:20])
compare_t_est(lf1, lillifors1, decimal=(15, 15))
compare_t_est(lf2, lillifors2, decimal=(15, 15)) #pvalue very small
assert_approx_equal(lf2[1], lillifors2['pvalue'], significant=10)
compare_t_est(lf3, lillifors3, decimal=(15, 1))
#R uses different approximation for pvalue in last case
#> ad = ad.test(residuals(fm))
#> mkhtest(ad, "ad3", "-")
adr1 = dict(statistic=1.602209621518313, pvalue=0.0003937979149362316,
parameters=(), distr='-')
#> ad = ad.test(residuals(fm)**2)
#> mkhtest(ad, "ad3", "-")
adr2 = dict(statistic=np.inf, pvalue=np.nan, parameters=(), distr='-')
#> ad = ad.test(residuals(fm)[1:20])
#> mkhtest(ad, "ad3", "-")
adr3 = dict(statistic=0.3017073732210775, pvalue=0.5443499281265933,
parameters=(), distr='-')
ad1 = smsdia.normal_ad(res.resid)
compare_t_est(ad1, adr1, decimal=(14, 18))
ad2 = smsdia.normal_ad(res.resid**2)
assert_(np.isinf(ad2[0]))
ad3 = smsdia.normal_ad(res.resid[:20])
compare_t_est(ad3, adr3, decimal=(14, 14))
def test_influence(self):
res = self.res
#this test is slow
import json
fp = file(os.path.join(cur_dir,"results/influence_lsdiag_R.json"))
lsdiag = json.load(fp)
#basic
assert_almost_equal(lsdiag['cov.scaled'],
res.cov_params().ravel(), decimal=14)
assert_almost_equal(lsdiag['cov.unscaled'],
res.normalized_cov_params.ravel(), decimal=14)
infl = oi.Influence(res)
c0, c1 = infl.cooks_distance() #TODO: what's c1
assert_almost_equal(c0, lsdiag['cooks'], decimal=14)
assert_almost_equal(infl.hat_matrix_diag, lsdiag['hat'], decimal=14)
assert_almost_equal(infl.resid_studentized_internal,
lsdiag['std.res'], decimal=14)
#slow:
infl.get_all_obs() #slow, nobs estimation loop
dffits, dffth = infl.dffits
assert_almost_equal(dffits, lsdiag['dfits'], decimal=14)
assert_almost_equal(infl.resid_studentized_external,
lsdiag['stud.res'], decimal=14)
import pandas
fn = os.path.join(cur_dir,"results/influence_measures_R.csv")
infl_r = pandas.read_csv(fn, index_col=0)
conv = lambda s: 1 if s=='TRUE' else 0
fn = os.path.join(cur_dir,"results/influence_measures_bool_R.csv")
#not used yet:
#infl_bool_r = pandas.read_csv(fn, index_col=0,
# converters=dict(zip(range(7),[conv]*7)))
infl_r2 = np.asarray(infl_r)
assert_almost_equal(infl.dfbetas, infl_r2[:,:3], decimal=13)
assert_almost_equal(infl.cov_ratio, infl_r2[:,4], decimal=14)
#duplicates
assert_almost_equal(dffits, infl_r2[:,3], decimal=14)
assert_almost_equal(c0, infl_r2[:,5], decimal=14)
assert_almost_equal(infl.hat_matrix_diag, infl_r2[:,6], decimal=14)
#Note: for dffits, R uses a threshold around 0.36, mine: dffits[1]=0.24373
#TODO: finish and check thresholds and pvalues
'''
R has
>>> np.nonzero(np.asarray(infl_bool_r["dffit"]))[0]
array([ 6, 26, 63, 76, 90, 199])
>>> np.nonzero(np.asarray(infl_bool_r["cov.r"]))[0]
array([ 4, 26, 59, 61, 63, 72, 76, 84, 91, 92, 94, 95, 108,
197, 198])
>>> np.nonzero(np.asarray(infl_bool_r["hat"]))[0]
array([ 62, 76, 84, 90, 91, 92, 95, 108, 197, 199])
'''
def grangertest():
#> gt = grangertest(ginv, ggdp, order=4)
#> gt
#Granger causality test
#
#Model 1: ggdp ~ Lags(ggdp, 1:4) + Lags(ginv, 1:4)
#Model 2: ggdp ~ Lags(ggdp, 1:4)
grangertest = dict(fvalue=1.589672703015157, pvalue=0.178717196987075,
df=(198,193))
if __name__ == '__main__':
t = TestDiagnosticG()
t.test_basic()
t.test_hac()
t.test_acorr_breush_godfrey()
t.test_acorr_ljung_box()
t.test_het_goldfeldquandt()
t.test_het_breush_pagan()
t.test_het_white()
t.test_compare_lr()
t.test_compare_nonnested()
t.test_influence()
##################################################
'''
J test
Model 1: ginv ~ ggdp + lint
Model 2: ginv ~ ggdp + tbilrate
Estimate Std. Error t value Pr(>|t|)
M1 + fitted(M2) 1.591505670785873 0.7384552861695823 2.15518 0.0323546 *
M2 + fitted(M1) 1.305687653016899 0.4808385176653064 2.71544 0.0072039 **
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
= lm(ginv ~ ggdp + tbilrate)
> ct = coxtest(fm, fm3)
> ct
Cox test
Model 1: ginv ~ ggdp + lint
Model 2: ginv ~ ggdp + tbilrate
Estimate Std. Error z value Pr(>|z|)
fitted(M1) ~ M2 -0.782030488930356 0.599696502782265 -1.30404 0.19222
fitted(M2) ~ M1 -2.248817107408537 0.392656854330139 -5.72718 1.0211e-08 ***
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
> et = encomptest(fm, fm3)
> et
Encompassing test
Model 1: ginv ~ ggdp + lint
Model 2: ginv ~ ggdp + tbilrate
Model E: ginv ~ ggdp + lint + tbilrate
Res.Df Df F Pr(>F)
M1 vs. ME 198 -1 4.64481 0.0323546 *
M2 vs. ME 198 -1 7.37361 0.0072039 **
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
> fm4 = lm(realinv ~ realgdp + realint, data=d)
> fm5 = lm(log(realinv) ~ realgdp + realint, data=d)
> pet = petest(fm4, fm5)
> pet
PE test
Model 1: realinv ~ realgdp + realint
Model 2: log(realinv) ~ realgdp + realint
Estimate Std. Error t value
M1 + log(fit(M1))-fit(M2) -229.281878354594596 44.5087822087058598 -5.15139
M2 + fit(M1)-exp(fit(M2)) 0.000634664704814 0.0000462387010349 13.72583
Pr(>|t|)
M1 + log(fit(M1))-fit(M2) 6.2013e-07 ***
M2 + fit(M1)-exp(fit(M2)) < 2.22e-16 ***
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
'''
|
[
"josef.pktd@gmail.com"
] |
josef.pktd@gmail.com
|
98e2d1500a5338b9d1789e2f09a9c8c3deeadece
|
d1d81b97f4f6c733841150b95d9f966fc97e3846
|
/dir()语句.py
|
3659532052568344541c9ab7d0d2887541f18cbd
|
[] |
no_license
|
A-lPha/-python-test
|
15ce38d473811d0a68a04d18e841543bdfa03688
|
3b8300f87e4be0145ed78f4a2ffe641adef6e25f
|
refs/heads/master
| 2021-01-13T14:48:31.681494
| 2016-12-15T15:01:21
| 2016-12-15T15:01:21
| 76,569,324
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26
|
py
|
import os
print dir(os)
|
[
"noreply@github.com"
] |
A-lPha.noreply@github.com
|
3c68c0ecfbf00348fbd1c2d22ad0713d11d9420b
|
a1e98002088582085f0733d7bbe3c6416a28f2ca
|
/Django/june5/manage.py
|
02adebf23b3ece45366357e172c531d64ae63bcf
|
[] |
no_license
|
flannerykj/modules-archive
|
ff7dbde76ccac873d0a6ed15ad8458d827b9008f
|
1c10d04794b497354d976d8bd9814db2bb2b48ad
|
refs/heads/master
| 2020-12-02T16:36:43.702798
| 2017-07-07T16:36:52
| 2017-07-07T16:36:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 803
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "june5.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[
"flannj@gmail.com"
] |
flannj@gmail.com
|
16f537a1b9f7d342b7d0390549a86589bdc67781
|
5a0dc727d87f2e46c56299a4084877e9c1633141
|
/src/pageobject/loginpage.py
|
1141f09be460e506337882775b0f0302e4c78b00
|
[] |
no_license
|
llllllinggggwei/auto_UI
|
f0bbe17cd6dd1f21768867dbfb6ff57a08519600
|
6108fba3dc94c12ea3620758f078df469747c621
|
refs/heads/master
| 2023-04-14T00:29:36.211286
| 2021-04-27T03:39:33
| 2021-04-27T03:39:33
| 360,745,275
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 910
|
py
|
from src.pageobject.basepage import Page
from selenium.webdriver.common.by import By
# 登录页面
class LoginPage(Page):
# 元素集
# 账号输入框
account_input = (By.NAME, "account")
# 密码输入框
password_input = (By.NAME, "password")
# 登录按钮
login_button = (By.XPATH, "//span[text()='登 录']")
# 关闭窗口按钮
close_button = (By.CLASS_NAME, "icon-close")
# 验证元素
# 退出系统按钮
logout_button = (By.XPATH, "//span[text()='退出系统']")
def __init__(self, driver):
Page.__init__(self, driver)
# 输入账号
def input_account(self, account):
self.input_text(self.account_input, account)
# 输入密码
def input_password(self, password):
self.input_text(self.password_input, password)
# 点击登录
def click_login(self):
self.click(self.login_button)
|
[
"419056831@qq.com"
] |
419056831@qq.com
|
31fef5230a0f043658d1beccdb76fae0ca3d4085
|
2c7608ea752fee771f4e69fcaf6716ba2e82fce7
|
/bin/pasteurize
|
35b93faf5839293fd403c90c5b41e50c099808c7
|
[] |
no_license
|
YLZLY/ChatterBot_
|
5ea57eb68759e2676706672587c3f97457ca35be
|
a0333f5e703065bafb6a7e1b28c44bad1fa797c1
|
refs/heads/master
| 2020-09-14T10:43:46.292984
| 2018-05-03T19:32:53
| 2018-05-03T19:32:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
#!/Users/amarchisio/starterbot/starterbot/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from libpasteurize.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"noreply@github.com"
] |
YLZLY.noreply@github.com
|
|
882398f469ad3b3c3ed122b0a18d5b1cd52427ee
|
0451afefb78e8bff2804b3a0aa9e7420b84e3a19
|
/03-equality.py
|
7f0aae3fec436c4818e13280a6d33a0a159bfd01
|
[] |
no_license
|
jarednthomas/python-challenge-2021
|
f3ee348e1aadf37665b5735f2e0f3ad472bcebb5
|
0646c4374e50734504c94cd2d887a96e4bc15074
|
refs/heads/main
| 2023-07-02T07:19:39.788371
| 2021-08-02T06:57:27
| 2021-08-02T06:57:27
| 391,512,416
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 936
|
py
|
#!/usr/bin/env python3
import re, requests
from bs4 import BeautifulSoup, Comment
# Set url and headers
url = "http://www.pythonchallenge.com/pc/def/equality.html"
headers = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'GET',
'Access-Control-Allow-Headers': 'Content-Type',
'Access-Control-Max-Age': '3600',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0'
}
# Parse html with bs4
response = requests.get(url, headers)
soup = BeautifulSoup(response.content, 'html.parser')
# Extract comments from soup and save last
comments = soup.find_all(text=lambda text: isinstance(text, Comment))
last_comment = comments[-1]
# Find all matches of hint using a regular expression
regex = r"[a-z][A-Z]{3}[a-z]{1}[A-Z]{3}[a-z]"
matches = re.findall(regex, last_comment)
# Return solution url
print(url.replace("equality", "".join( i[4] for i in matches )))
|
[
"jaredpyrothomas@gmail.com"
] |
jaredpyrothomas@gmail.com
|
8a6cad25cd8faee53fb079d4f2d6261d56c6bb5d
|
e6145805bc3a338fab6dbe8a2737f4047a654ef6
|
/project_site/sql-orm.py
|
717347861bc5e445d1a4d1e82876ec42de80102a
|
[] |
no_license
|
Danko99/site_django_first
|
654017e7fc8c923b30e249c419616653c636bc4a
|
63a4a45633c25cf8f073a3b78e9f3d2b2b83e7da
|
refs/heads/master
| 2023-08-07T19:20:14.135840
| 2021-09-23T16:40:11
| 2021-09-23T16:40:11
| 405,380,071
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 119
|
py
|
from news.models import News
news1 = News(title="Новость 2", content="Контент новости 2").save()
|
[
"58398839+Danko998@users.noreply.github.com"
] |
58398839+Danko998@users.noreply.github.com
|
39b633a1c1e7311cd3957b5b541a863b54ca8d09
|
f3bd271bf00325881fb5b2533b9ef7f7448a75ec
|
/xcp2k/classes/_rho0_information1.py
|
266e763637cc4db3096f702b0dc24ff4016f5056
|
[] |
no_license
|
obaica/xcp2k
|
7f99fc9d494859e16b9b0ea8e217b0493f4b2f59
|
6e15c2c95658f545102595dc1783f5e03a9e6916
|
refs/heads/master
| 2020-07-15T17:27:43.378835
| 2019-02-11T16:32:24
| 2019-02-11T16:32:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 721
|
py
|
from xcp2k.inputsection import InputSection
from _each249 import _each249
class _rho0_information1(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Section_parameters = None
self.Add_last = None
self.Common_iteration_levels = None
self.Filename = None
self.Log_print_key = None
self.Unit = None
self.EACH = _each249()
self._name = "RHO0_INFORMATION"
self._keywords = {'Common_iteration_levels': 'COMMON_ITERATION_LEVELS', 'Log_print_key': 'LOG_PRINT_KEY', 'Add_last': 'ADD_LAST', 'Unit': 'UNIT', 'Filename': 'FILENAME'}
self._subsections = {'EACH': 'EACH'}
self._attributes = ['Section_parameters']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
dbf8bbb76fa72d0dc5d11a244358c3400d2a591c
|
b03fe7009626eb0001ab9e797e098ea248291910
|
/Contest/SymmetricTree.py
|
38c5c45288c627e9378cdef4d253b7a896ace901
|
[] |
no_license
|
Anirudh-thakur/LeetCodeProblems
|
84761bd006527fb485a7e6a24047fc9d8cbde7fc
|
1e59e40df06df85d8f3366c326cb4392d27b9e64
|
refs/heads/main
| 2023-07-01T13:34:45.166492
| 2021-08-07T02:09:10
| 2021-08-07T02:09:10
| 367,592,489
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,322
|
py
|
# https://leetcode.com/problems/symmetric-tree/
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution(object):
def isSymmetric(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
if root == None:
return True
if root.left == None and root.right == None:
return True
if root.left == None or root.left == None:
return False
if root.left.val != root.right.val:
return False
return self.findSymmetry(root.left,root.right)
def findSymmetry(self,p,q):
if p == None and q == None:
return True
if p == None or q == None:
return False
result = self.findSymmetry(p.left,q.right)
result = result and self.findSymmetry(p.right,q.left)
return p.val == q.val and result
if __name__ == '__main__':
objS = Solution()
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(2)
root.left.left = TreeNode(3)
root.left.right = TreeNode(4)
root.right.right = TreeNode(3)
root.right.left = TreeNode(4)
result = objS.isSymmetric(root)
print(result)
|
[
"anirudh.thakur94@gmail.com"
] |
anirudh.thakur94@gmail.com
|
d24ea9e87cc6ec4b639a144c8ebb515845c2ea48
|
439ab4a51a4fc1f0877a6bdc5092d6761ff46b96
|
/polls/urls.py
|
cfadddf3130ffaecdd0410152bc698a2c4a26e12
|
[] |
no_license
|
UsernameForGerman/askru
|
e58ccd13d6e0da9abe103e75688ab6f97698438b
|
c2b62c175cc38abf0d565d2e0e42f05e19bf878d
|
refs/heads/master
| 2022-12-11T09:20:50.071495
| 2020-09-16T19:23:12
| 2020-09-16T19:23:12
| 295,678,924
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 143
|
py
|
from django.urls import include, path
from .routings import router
app_name = 'polls'
urlpatterns = [
path('', include(router.urls)),
]
|
[
"polyakgerman@gmail.com"
] |
polyakgerman@gmail.com
|
d2f8a4958bd91a5421dcd5d164e905b1d5656717
|
0d546bff5f7421c5b118ff9dce257b9c65291689
|
/services/cms_app.py
|
111edca6c0b1f00f6802f2029a0ca604d80cf403
|
[] |
no_license
|
ZloiGremlin/agentstvo
|
cace44c9298b2e5d2767094e5bacd6cd130eb451
|
1d39694caf89c66ac67489aa3add2482d7e139e1
|
refs/heads/master
| 2016-09-01T19:48:04.140064
| 2013-06-01T14:08:28
| 2013-06-01T14:08:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,208
|
py
|
# vim:fileencoding=utf-8
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
class CakesApp(CMSApp):
name = u'Услуги: Торты'
urls = ["services.cake_urls"]
class DecoratesApp(CMSApp):
name = u'Услуги: Украшения'
urls = ["services.decorate_urls"]
class RequisitesApp(CMSApp):
name = u'Услуги: Реквизиты'
urls = ["services.req_urls"]
class ArtistApp(CMSApp):
name = u'Услуги: Артисты'
urls = ["services.artist_urls"]
class McApp(CMSApp):
name = u'Услуги: Ведущие'
urls = ["services.mc_urls"]
class KidsApp(CMSApp):
name = u'Услуги: Для детей'
urls = ["services.kids_urls"]
class MusicApp(CMSApp):
name = u'Услуги: Музыканты'
urls = ["services.music_urls"]
class CarApp(CMSApp):
name = u'Услуги: Автомобили'
urls = ["services.car_urls"]
apphook_pool.register(CakesApp)
apphook_pool.register(DecoratesApp)
apphook_pool.register(RequisitesApp)
apphook_pool.register(ArtistApp)
apphook_pool.register(McApp)
apphook_pool.register(KidsApp)
apphook_pool.register(MusicApp)
apphook_pool.register(CarApp)
|
[
"zloi.gremlin@gmail.com"
] |
zloi.gremlin@gmail.com
|
05ba170c288cc1402abe1c388ae50633613b71c6
|
e49d335d66e6ce28330cd9bb1731152d41dff8f8
|
/favNumber.py
|
b6695a2034523df6ae714dfa81d806816e74774e
|
[] |
no_license
|
sizif/ps-python-path-one
|
fe52b2b9e3328e752497f4e23cfbadc889e1a9f4
|
a4e4aa552a5028979ddc04546ef7fdefcc39c8c9
|
refs/heads/master
| 2020-06-18T17:56:23.760126
| 2016-10-14T19:56:28
| 2016-10-14T19:56:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 68
|
py
|
print("What's your favorite number?") # print to console
# git test
|
[
"ImsirovicAjdin@users.noreply.github.com"
] |
ImsirovicAjdin@users.noreply.github.com
|
c2505100ccc3c9c808011a301d677a8ff2f94b66
|
8e57713c7662fb4e851b4a60e1aa743143e94940
|
/To_From_dynamo.py
|
82b1799ddbfbe39ba3d568dc76e13f5bf84615c8
|
[] |
no_license
|
Prathyusha277/Cloud_Project
|
5ca4538bc102e30ef58d7a482cd54bdab34563a1
|
78b4f6b56e32f793f0ac8b8b412a3b085f85599c
|
refs/heads/master
| 2020-04-03T00:55:32.589432
| 2016-11-30T19:37:16
| 2016-11-30T19:37:16
| 60,643,786
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 733
|
py
|
import boto
import csv
import random
from boto import dynamodb2
from boto.dynamodb2.exceptions import ItemNotFound
from boto.dynamodb2.table import Table
from boto.dynamodb.condition import LE, EQ, GE, BETWEEN
def cc_prediction(From_user,TO_USER):
dynamodb_conn = boto.dynamodb2.connect_to_region('us-west-2')
dynamodb_table = Table('Enron_Data',connection=dynamodb_conn)
print From_user
print TO_USER
to_user_list = dynamodb_table.query_2(From__eq =From_user,To_List__eq =TO_USER)
no_of_cc = 2
cc_list = []
print to_user_list.__sizeof__()
for user in to_user_list:
print "In"
cc_list = user['CC']
predicted_list = []
if len(cc_list) > 0:
predicted_list = random.sample(cc_list,no_of_cc)
return predicted_list
|
[
"prathyu@uw.edu"
] |
prathyu@uw.edu
|
2d2e271336619fd6ce911645db998560f6dc91c4
|
18c10aa1261bea4ae02fa79598446df714519c6f
|
/80_pythonProject/code07-01.py
|
09a2e9b1428a585abf14afa2b2ae0c0adc62af93
|
[] |
no_license
|
giveseul-23/give_Today_I_Learn
|
3077efbcb11ae4632f68dfa3f9285d2c2ad27359
|
f5599f0573fbf0ffdfbcc9c79b468e3c76303dd4
|
refs/heads/master
| 2023-05-06T08:13:49.845436
| 2021-05-25T04:33:20
| 2021-05-25T04:33:20
| 330,189,867
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 192
|
py
|
# a = 10
# b = 20
# c = 30
# hap = a + b + c
a = [10, 20, 30]
a = [0, 0, 0]
a[0] = 10
a[1] = 20
a[2] = 30
#a[3] = 40
hap = a[0] + a[1] + a[2]
print(hap)
print(len(a)) # 방의 전체 갯수
|
[
"joodasel@icloud.com"
] |
joodasel@icloud.com
|
00b1556f93908ce7243de4a76431a0482c4fb549
|
58338d473f34e7fdcb80e6a281a65e8cbb6826e8
|
/showing_digit.py
|
06134f8291c74ae70effecef62f6668c0658825b
|
[] |
no_license
|
sandeep9889/neural_network
|
5b0bddec899418f8434c5b25b7198338d79190b6
|
634beb5a161a2ceead3fdd924bb4834d7169273d
|
refs/heads/main
| 2023-08-27T18:42:01.810711
| 2021-11-15T06:49:46
| 2021-11-15T06:49:46
| 427,841,196
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 338
|
py
|
import numpy
import matplotlib.pyplot
%matplotlib inline
data_file = open("mnist_dataset/mnist_train_100.csv","r")
data_list = data_file.readlines()
data_file.close
all_values = data_list[0].split(',')
image_array = numpy.asfarray(all_values[1:]).reshape((28,28))
matplotlib.pyplot.imshow(image_array,cmap='Greys', interpolation='none')
|
[
"sandeepchauhan9630228313@gmail.com"
] |
sandeepchauhan9630228313@gmail.com
|
af63607ea6184a20fe8159adf2d867627c3ab91f
|
53bd1888e29dd76a2aed467eb5122dcc4802013b
|
/src/test/s3.py
|
27015fbf30e579d9ae3efbd53e1c558df9434937
|
[] |
no_license
|
hongyunnchen/TWCC-CLI
|
254c45830c39047d6e08e798de85bec6c48b0359
|
7b80dd5df86199a6c0544d42a0648fbcf44fc9db
|
refs/heads/master
| 2023-01-07T14:15:20.826838
| 2020-06-17T01:06:10
| 2020-06-17T01:06:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,867
|
py
|
from __future__ import print_function
import sys, os
TWCC_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path[1]=TWCC_PATH
from termcolor import colored
def TWCC_LOGO():
print (
colored(">"*10+" Welcome to ", 'yellow'),
colored('TWCC.ai', 'white', attrs=['reverse', 'blink']),
colored(" "+"<"*10, 'yellow')
)
TWCC_LOGO() ## here is logo
import re
from twcc.services.s3_tools import S3
import click,time
@click.group()
def cli():
pass
# Bucket functions
@click.command()
@click.option('-n','--name','bucket_name',required=True,type=str,help='Name of the Bucket')
def create_bucket(bucket_name):
''' Create new s3 bucket.
'''
s3 = S3()
s3.create_bucket(bucket_name)
@click.command()
#@click.option('-lb','list4buckets',is_flag = False,type=bool, help = 'Show all buckets in this project')
def list_buckets():
''' List all the exist s3 buckets in the project.
'''
s3 = S3()
#if not list4buckets:
buckets = s3.list_bucket()
s3.test_table(buckets)
@click.command()
@click.option('-n','--name','bucket_name',required=True, help = 'Name of the Bucket.')
@click.option('-df','df',is_flag = True,help = 'Help delete all the files inside the bucket before delete bucket.')
def del_bucket(bucket_name,df):
''' Delete s3 bucket
'''
s3 = S3()
s3.del_bucket(bucket_name,df)
# File functions
@click.command()
@click.option('-n','--name','bucket_name',required=True, help = 'Name of the Bucket.')
def list_files(bucket_name):
''' List all the exist files inside the s3 bucket.
'''
s3 = S3()
files = s3.list_object(bucket_name)
s3.test_table(files)
@click.command()
@click.option('-n','--name','bucket_name',required=True, help = 'Name of the Bucket.')
@click.option('-f','--file_name','file_name',required=True, help = 'Name of the File.')
def del_file(bucket_name,file_name):
''' Delete file from s3 bucket
'''
s3 = S3()
s3.del_object(bucket_name,file_name)
@click.command()
@click.option('-s','--source','source',required=True, help = 'Name of the File.')
@click.option('-d','--directory','directory',required=True, help = 'Name of the Bucket.')
@click.option('-k','--key','key',help ='The name of the key to upload to.')
@click.option('-r','r',is_flag = True,help = 'Recursively copy entire directories.' )
def upload(source,directory,key,r):
''' Upload to s3 bucket
'''
s3 = S3()
# Check for source type
if os.path.isdir(source):
if r != True:
raise Exception("{} is path, need to set recursive to True".format(source))
s3.upload_bucket(path = source ,bucket_name = directory,r=r)
else:
if key == None:
key = source.split('/')[-1]
s3.upload_bucket(file_name = source ,bucket_name = directory,key = key)
#download_bucket(self,bucket_name=None,key=None,file_name=None,path=None,r=False)
@click.command()
@click.option('-s','--source','source',required=True, help = 'Name of the Bucket.')
@click.option('-d','--directory','directory',required=True, help = 'Name of the path.')
@click.option('-k','--key','key',help ='The name of the key to download.')
@click.option('-r','r',is_flag = True,help = 'Recursively copy entire directories.' )
def download(source,directory,key,r):
''' Download from s3 bucket
'''
s3 = S3()
# Check for source type
if not s3.check_4_bucket(source):
raise Exception("No such bucket name {} exists".format(source))
# Check if the directory exists
# Download everything inside the bucket
if os.path.isdir(directory) and key == None:
if r != True:
raise Exception("{} is path, need to set recursive to True".format(directory))
s3.download_bucket(bucket_name = source,path=directory,r=r)
else:
# Download everthing from a folder
if key.endswith('*'):
files = s3.list_object(source)
prefix_folder = '/'.join(key.split('/')[:-1])
desire_files = s3.list_files_v2(bucket_name=source,delimiter='',prefix=prefix_folder)
for desire_file in desire_files:
if not desire_file.endswith('/'):
print(desire_file)
new_directory = directory + desire_file
s3.download_bucket(file_name = new_directory,bucket_name = source,key = desire_file)
else:
# Download a single file from a folder or bucket
if directory.endswith('/'):
directory = directory + key
s3.download_bucket(file_name = directory,bucket_name = source,key = key)
cli.add_command(create_bucket)
cli.add_command(list_buckets)
cli.add_command(del_bucket)
cli.add_command(list_files)
cli.add_command(del_file)
cli.add_command(upload)
cli.add_command(download)
if __name__ == '__main__':
cli()
|
[
"taiwanpride888@gmail.com"
] |
taiwanpride888@gmail.com
|
bd58898ff5e639d5aed47288e437c57b6290db16
|
f19012ef68807173eea827c0315f672ef56290ce
|
/python/problem10/sol.py
|
e7b438949e76985aab91857547173180efafd201
|
[] |
no_license
|
dkuldeep11/project-euler
|
33b49202c401d649b2dd61b1243f7aa0d3a7dbd9
|
2e9e33b9e9e1207e73a119a68ff0eacbfc682d82
|
refs/heads/master
| 2020-08-06T20:03:14.035077
| 2016-10-22T08:01:41
| 2016-10-22T08:01:41
| 21,801,851
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 225
|
py
|
marked = [0] * 1000000
value = 3
s = 2
while value < 1000000:
if marked[value] == 0:
s += value
i = value
while i < 1000000:
marked[i] = 1
i += value
value += 2
print s
|
[
"dkuldeep11@gmail.com"
] |
dkuldeep11@gmail.com
|
07af8e9ef55e0b2579cbc523d01dae3e7ecc544f
|
e46c72e21f3eb65f79d6100a50ae008d60e34946
|
/old/language/python/udemy/ds/135/135.py
|
9d5df69b4678a9f72bdcd3bd70f4d26f8bb1b94f
|
[] |
no_license
|
jsmack/learn
|
cacacdad07c56d73c32797f6393c89185e549bc5
|
2bc31eb32100eaff7409d6932eb67fb18be37cd8
|
refs/heads/master
| 2023-04-15T17:45:14.402233
| 2023-03-20T06:20:57
| 2023-03-20T06:20:57
| 126,584,082
| 0
| 1
| null | 2023-03-09T00:44:18
| 2018-03-24T09:24:08
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 766
|
py
|
import sqlite3
#conn = sqlite3.connect('test_sqlite.db')
## in memory
conn = sqlite3.connect(':memory:')
curs = conn.cursor()
curs.execute(
'create table persons(id INTEGER PRIMARY KEY AUTOINCREMENT, name STRING)'
)
conn.commit()
curs.execute(
'INSERT INTO persons(name) values("Mike")'
)
conn.commit()
curs.execute('select * from persons')
print(curs.fetchall())
curs.execute(
'INSERT INTO persons(name) values("Nancy")'
)
curs.execute(
'INSERT INTO persons(name) values("Jun")'
)
conn.commit()
curs.execute('UPDATE persons set name = "Michel" where name = "Mike"')
conn.commit()
curs.execute('DELETE FROM persons where name ="Michel"')
conn.commit()
curs.execute('select * from persons')
print(curs.fetchall())
curs.close()
conn.close()
|
[
"noreply@github.com"
] |
jsmack.noreply@github.com
|
7bcdde9d69bbfdf507ab5deb5fc46e32a41bc479
|
5af6c600306d0bb2ad9ff9b7ac660c4f0b250a54
|
/analyses/scripts/plots/create_audio_filter_plots.py
|
df4f9a6deb5ed23d8608ad824c4a417a01791a1c
|
[
"MIT"
] |
permissive
|
jean-andre-gauthier/findsong
|
2c1c47fa4313bae3da6b34c893465c070b20ffe3
|
7dbac881d4ac8aeb0826c5999e1a5bf9ca68ff2f
|
refs/heads/master
| 2021-06-07T07:14:04.182444
| 2020-07-01T07:32:32
| 2020-07-01T07:32:32
| 125,167,600
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,768
|
py
|
"""
Generates a plot for audio filter analyses
WARNING: contains hardcoded values (taken from analyses/data/audio_filters/recognition_rate_for_matches)
"""
from argparse import ArgumentParser
import matplotlib
matplotlib.use("Agg")
from itertools import groupby
import matplotlib.pyplot as plt
import numpy as np
from os import path
def main():
parser = ArgumentParser()
parser.add_argument(
"--audiofilterplotpath",
help="path to the audio filter output plot (path)",
required=True,
type=str)
parser.add_argument(
"--pitchplotpath",
help="path to the pitch output plot (path)",
required=True,
type=str)
parser.add_argument(
"--tempoplotpath",
help="path to the tempo output plot (path)",
required=True,
type=str)
args = parser.parse_args()
if path.exists(args.audiofilterplotpath):
print(f"Error: {args.audiofilterplotpath} already exists")
exit(1)
if path.exists(args.pitchplotpath):
print(f"Error: {args.pitchplotpath} already exists")
exit(1)
if path.exists(args.tempoplotpath):
print(f"Error: {args.tempoplotpath} already exists")
exit(1)
create_audio_filter_plot(args.audiofilterplotpath)
create_pitch_plot(args.pitchplotpath)
create_tempo_plot(args.tempoplotpath)
def create_audio_filter_plot(audio_filter_plot_path):
plt.figure(0, figsize=(5, 7.5))
axes = [plt.subplot2grid((2, 1), (0, 0)), plt.subplot2grid((2, 1), (1, 0))]
plt.suptitle(
"Matcher peformance with distorted audio", fontsize=12, y=0.05)
plt.tight_layout(pad=4.0, w_pad=4.0, h_pad=4.0)
indices = np.arange(1, 8)
labels = np.array([
"aecho", "aphaser", "chorus", "clean", "flanger", "highpass", "lowpass"
])
values = np.array([97.55, 97.91, 98.05, 99.36, 97.81, 97.88, 99.21])
aecho, aphaser, chorus, clean, flanger, highpass, lowpass = axes[0].bar(
indices, values)
axes[0].set_xticks(indices)
axes[0].set_xticklabels(labels, rotation=45)
axes[0].set_ylim([95, 100])
axes[0].set_ylabel("Recognition rate in %")
cell_text = np.array([["aecho", "0.8:0.8:1000:0.8"], [
"aphaser", "delay=5.0:speed=2.0"
], ["chorus", "0.7:0.9:55:0.4:0.25:2"], ["clean", "-"],
["flanger", "delay=20:depth=5:regen=10:speed=2"],
["highpass", "f=440"], ["lowpass", "f=440"]])
col_labels = np.array(["filter name", "filter parameter"])
axes[1].xaxis.set_visible(False)
axes[1].yaxis.set_visible(False)
table = axes[1].table(
cellText=cell_text,
colLabels=col_labels,
alpha=0.0,
bbox=None,
colLoc="center",
cellLoc="center",
loc="center",
rasterized=False,
rowLoc="center")
table.auto_set_font_size(False)
table.set_fontsize(6)
table.scale(1, 1.75)
for (line, col), cell in table.get_celld().items():
if line == 0:
cell._text.set_weight("bold")
cell.set_linewidth(0)
cell.set_fill(False)
plt.savefig(audio_filter_plot_path, transparent=True)
def create_pitch_plot(pitch_plot_path):
xs = np.arange(1, 7)
ys1 = np.array([41, 12, 5, 2, 10, 1])
ys2 = np.array([38.29, 24.33, 20.4, 15, 16.3, 13])
create_plot(xs, "Pitch shift (halftones)", ys1, "Recognition rate in %",
ys2, "Average match score",
"Matcher performance with pitch shift", pitch_plot_path)
def create_tempo_plot(tempo_plot_path):
xs = np.array([2.5, 5, 7.5, 10, 12.5, 15])
ys1 = np.array([97, 95, 73, 54, 49, 36])
ys2 = np.array([76.26, 39.14, 26.93, 23.74, 21.24, 20.28])
create_plot(xs, "Tempo increase (percent)", ys1, "Recognition rate in %",
ys2, "Average match score",
"Matcher performance with tempo increase", tempo_plot_path)
def create_plot(xs, xs_label, ys1, ys_label1, ys2, ys_label2, title,
file_name):
figure, axis1 = plt.subplots()
axis1.set_xlabel(xs_label)
axis1.set_ylabel(ys_label1, color="red")
axis1.tick_params(axis='y', labelcolor="red")
handle1, = plt.plot(xs, ys1, "r--", label=ys_label1)
ticks = [tick for tick in plt.gca().get_yticks() if tick >= 0]
plt.gca().set_yticks(ticks)
axis2 = axis1.twinx()
axis2.set_ylabel(ys_label2, color="blue")
axis2.tick_params(axis='y', labelcolor="blue")
handle2, = plt.plot(xs, ys2, "b--", label=ys_label2)
figure.tight_layout(pad=3.0, w_pad=3.0, h_pad=3.0)
figure.suptitle(title, fontsize=12, y=0.05)
plt.legend(handles=[handle1, handle2], loc=1)
plt.savefig(file_name, transparent=True)
if __name__ == "__main__":
main()
|
[
"jean.andre.gauthier@gmail.com"
] |
jean.andre.gauthier@gmail.com
|
321011e09a0f1060c3d37f2603fac8f748d076a1
|
0c165b875e9c0189a01fdd77b6a5c22a371be1f5
|
/bp_locations/deprecated/get_TopBPs.py
|
60328b4a9fd5431f7f8c102588aa7a2ed60a87ab
|
[
"MIT"
] |
permissive
|
gunnarpope/eosblocklife
|
ed904c9257860660a98fb8719cd8398e88cf9455
|
1028788f73568de68f7df51f5d960e3e38d9bc4e
|
refs/heads/master
| 2020-04-07T06:12:36.129427
| 2019-07-19T18:35:10
| 2019-07-19T18:35:10
| 158,126,213
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,373
|
py
|
import os
import requests
import json
from pprint import pprint
from get_api import *
# output = str(os.system('cleos -u https://api.eossweden.org system listproducers -l 100 > bp_list.txt'))
with open('bp_list.txt','r') as f:
data = f.readlines()
# create a row entry for each BP
bps = [ row.strip().split() for row in data]
header = bps[0]
bps = bps[1:] # strip the header
# print the top 21 BPs
for bp in bps[:21]:
print(bp)
rank = 1
for i in range(len(bps)):
# bp[i][3] = float(bp[i][3])
bps[i].append(rank)
rank += 1
# top21bp = bps[:21]
top21bp = bps[:2] # REMOVE LATER, FOR TESTING ONLY
print(len(top21bp))
print(top21bp[:5])
bot21bp = bps[21:]
print(len(bot21bp))
print(bot21bp[:5])
# get the url for each bp
urls = [ [x[0], x[2]] for x in top21bp]
print(urls)
# get the gps coordinates for each bp
bp_url = top21bp[0][2]
print(bp_url)
bp_list = []
bp_error= []
bpjson = {}
for bp in top21bp:
bpname = bp[0]
rank = bp[4]
lat, lon, country, city = get_location(bp[2])
if city != 'NULL':
print(bpname, rank, lat, lon)
bp_list.append((bpname, rank, lat, lon))
bpjson[bpname] = {'rank': rank, 'lat': lat, 'lon': lon}
else:
print(bpname, rank, 'NULL','NULL')
bp_error.append(bpname)
bpjson[bpname] = {'rank': rank, 'lat':'NULL', 'lon':'NULL'}
# if
with open('bp_rank_location.json','w') as f:
f.write(json.dumps(bpjson))
|
[
"gunnar@gmail.com"
] |
gunnar@gmail.com
|
d01fc85129d2621718d9fb4e85e5e59853382e4e
|
85291887cc4550a45acc077d0ef007efca39460c
|
/fastreid/solver/optim/__init__.py
|
35166055399065859dc667f41a27d92176acc14e
|
[
"Apache-2.0"
] |
permissive
|
lingxiao-he/fast-reid
|
309915e98b679264ae6d57b3573cf00502e8576a
|
29f318c609a6c94b4ae8ab2d88ca37f689e6c109
|
refs/heads/master
| 2022-09-12T04:18:21.906079
| 2020-06-01T02:42:33
| 2020-06-01T02:42:33
| 268,408,241
| 0
| 1
|
Apache-2.0
| 2020-06-01T02:38:57
| 2020-06-01T02:38:56
| null |
UTF-8
|
Python
| false
| false
| 291
|
py
|
from .lamb import Lamb
from .lookahead import Lookahead, LookaheadAdam
from .novograd import Novograd
from .over9000 import Over9000, RangerLars
from .radam import RAdam, PlainRAdam, AdamW
from .ralamb import Ralamb
from .ranger import Ranger
from .swa import SWA
from torch.optim import *
|
[
"sherlockliao01@gmail.com"
] |
sherlockliao01@gmail.com
|
09312b413626034603d22e8710c8a59d6faa95ce
|
3199589c741e1be8bf226bfdb557978cf36b10f7
|
/smartrez/migrations/0005_auto_20170602_0531.py
|
f6b0b866fe027243eeb270ce98ca028249e0e61e
|
[] |
no_license
|
yadav1aryan/smartrez
|
566f0555850bc8143edd53222018025706529864
|
89caa41005fd9d2a26bfd9ef05522e05a255f885
|
refs/heads/master
| 2021-01-23T10:21:25.226003
| 2017-06-06T06:05:54
| 2017-06-06T06:05:54
| 93,055,895
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 479
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-02 05:31
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('smartrez', '0004_img_thumb_url'),
]
operations = [
migrations.RemoveField(
model_name='selected_imgs',
name='search_query',
),
migrations.DeleteModel(
name='Selected_imgs',
),
]
|
[
"yadav1aryan@gmail.com"
] |
yadav1aryan@gmail.com
|
72d6712831449572e9936eb833178603ded478e6
|
5c7da7dabdc076ad7113ccd20561a8bbf5f9a70e
|
/investments/api/urls.py
|
324a28aca34350ecc4cebfd00b973c17458b954b
|
[] |
no_license
|
aqcloudacio/cloudaciofeez
|
2499fb5fc5334fa871daab2abea6c34bfa8c7667
|
8399560ece9aa10a6d6801f42c027dca26a65936
|
refs/heads/master
| 2023-02-27T22:36:20.501159
| 2021-02-11T00:03:46
| 2021-02-11T00:03:46
| 337,887,413
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,887
|
py
|
from django.urls import include, path
from rest_framework.routers import SimpleRouter
from rest_framework_nested import routers
from investments.api.views import (InvestmentViewSet, InvestmentClassViewSet,
AssetAllocationViewSet,
AssetAllocationNameViewSet,
InvestmentNameViewSet,
NABInvestmentViewSet,
InvestmentTemplateViewSet,
AssetAllocationTemplateViewSet,
UnlinkedAssetAllocationNameViewSet,
UnlinkedInvestmentNameViewSet,
InvestmentAASummaryViewset)
from portfolios.api.urls import platform_router
app_name = "investments"
#########
# Full length routes
#########
portfolio_router = routers.NestedSimpleRouter(platform_router,
r'portfolios',
lookup='portfolio')
portfolio_router.register(r'investments',
InvestmentViewSet,
'investments')
investments_router = routers.NestedSimpleRouter(portfolio_router,
r'investments',
lookup='investment')
investments_router.register(r'aa',
AssetAllocationViewSet,
'aa')
#########
# Template routes
#########
templaterouter = SimpleRouter()
templaterouter.register(r"investmenttemplate", InvestmentTemplateViewSet)
investment_template_router = routers.NestedSimpleRouter(templaterouter,
r'investmenttemplate',
lookup='investmenttemplate')
investment_template_router.register(r'aa',
AssetAllocationTemplateViewSet,
'aa')
#########
# Basename routes
#########
rootrouter = SimpleRouter()
rootrouter.register(r"investmentclass", InvestmentClassViewSet)
rootrouter.register(r"aaname", AssetAllocationNameViewSet)
rootrouter.register(r"unlinkedaaname", UnlinkedAssetAllocationNameViewSet)
# All investment names
rootrouter.register(r"investmentname", InvestmentNameViewSet)
# Only investment names that are not linked to a platform (non-specific invs)
rootrouter.register(r"unlinkedinvestmentname", UnlinkedInvestmentNameViewSet)
rootrouter.register(r"NABinvestment", NABInvestmentViewSet)
urlpatterns = [
path("", include(portfolio_router.urls)),
path("", include(investments_router.urls)),
path("", include(templaterouter.urls)),
path("", include(investment_template_router.urls)),
path("", include(rootrouter.urls))
]
|
[
"alejandro.quintero@clouxter.com"
] |
alejandro.quintero@clouxter.com
|
87b391c44ecdd79cf4d4aa98c49de5f95409783b
|
ac562c0d008a282bef5ea4705b4fc8c2b8897964
|
/Ch07_exceptions/TRY_EXTENSIONS/ch07_09_assertion.py
|
2376c7c5769be7bd3562d34a00bcfa0da2a2e6a0
|
[] |
no_license
|
jimmus69/python-projects
|
ed78a50250356e9366c9d10303facf037a310596
|
9a91fe7f979e6e09dedec7b60595e91ae2d0d321
|
refs/heads/master
| 2021-01-21T10:52:08.486557
| 2017-05-18T18:51:54
| 2017-05-18T18:51:54
| 91,708,264
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 728
|
py
|
#! /usr/bin/python -O
"""
Program: ch07_06_assertion.py
Function: An exploration of assertion
"""
import sys
def get_number():
number = int(raw_input("Enter a number (10 - 99): "))
assert number > 9 and number < 100, "Number must be between 10 and 99"
return number
while True:
try:
number = get_number()
result = 100 / number
except AssertionError, error_string:
print error_string
except (KeyboardInterrupt, EOFError):
break
except:
print "specific exception =", str(sys.exc_info()[0]).split('.')[-1][:-2]
print "error string =", str(sys.exc_info()[1])
else:
print "The value is ", result
print "Good bye!"
exit(0)
|
[
"noreply@github.com"
] |
jimmus69.noreply@github.com
|
99d5bdfc4d5e27844954b4dc1ceb95a16c54bf99
|
0322f3ea9e66a303d46e229ddf2cbd46e794f46e
|
/model/label.py
|
3506eecf086118b43ecaa9df7026b9755a24bbea
|
[] |
no_license
|
WangYX-TKZ/AdvancedEAST-caffe
|
3df0c7cff265439bf2e1b888be0b9e3d9920dd95
|
0e56626165fd679f2daa302286d5025078b131ef
|
refs/heads/master
| 2022-12-04T13:01:46.971973
| 2020-08-27T06:44:00
| 2020-08-27T06:44:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,787
|
py
|
import numpy as np
import os
import cv2
from PIL import Image, ImageDraw
from tqdm import tqdm
import cfg
def point_inside_of_quad(px, py, quad_xy_list, p_min, p_max):
if (p_min[0] <= px <= p_max[0]) and (p_min[1] <= py <= p_max[1]):
xy_list = np.zeros((4, 2))
xy_list[:3, :] = quad_xy_list[1:4, :] - quad_xy_list[:3, :]
xy_list[3] = quad_xy_list[0, :] - quad_xy_list[3, :]
yx_list = np.zeros((4, 2))
yx_list[:, :] = quad_xy_list[:, -1:-3:-1]
a = xy_list * ([py, px] - yx_list)
b = a[:, 0] - a[:, 1]
if np.amin(b) >= 0 or np.amax(b) <= 0:
return True
else:
return False
else:
return False
def point_inside_of_nth_quad(px, py, xy_list, shrink_1, long_edge):
nth = -1
vs = [[[0, 0, 3, 3, 0], [1, 1, 2, 2, 1]],
[[0, 0, 1, 1, 0], [2, 2, 3, 3, 2]]]
for ith in range(2):
quad_xy_list = np.concatenate((
np.reshape(xy_list[vs[long_edge][ith][0]], (1, 2)),
np.reshape(shrink_1[vs[long_edge][ith][1]], (1, 2)),
np.reshape(shrink_1[vs[long_edge][ith][2]], (1, 2)),
np.reshape(xy_list[vs[long_edge][ith][3]], (1, 2))), axis=0)
p_min = np.amin(quad_xy_list, axis=0)
p_max = np.amax(quad_xy_list, axis=0)
if point_inside_of_quad(px, py, quad_xy_list, p_min, p_max):
if nth == -1:
nth = ith
else:
nth = -1
break
return nth
def shrink(xy_list, ratio=cfg.shrink_ratio):
if ratio == 0.0:
return xy_list, xy_list
diff_1to3 = xy_list[:3, :] - xy_list[1:4, :]
diff_4 = xy_list[3:4, :] - xy_list[0:1, :]
diff = np.concatenate((diff_1to3, diff_4), axis=0)
dis = np.sqrt(np.sum(np.square(diff), axis=-1))
# determine which are long or short edges
long_edge = int(np.argmax(np.sum(np.reshape(dis, (2, 2)), axis=0)))
short_edge = 1 - long_edge
# cal r length array
r = [np.minimum(dis[i], dis[(i + 1) % 4]) for i in range(4)]
# cal theta array
diff_abs = np.abs(diff)
diff_abs[:, 0] += cfg.epsilon
theta = np.arctan(diff_abs[:, 1] / diff_abs[:, 0])
# shrink two long edges
temp_new_xy_list = np.copy(xy_list)
shrink_edge(xy_list, temp_new_xy_list, long_edge, r, theta, ratio)
shrink_edge(xy_list, temp_new_xy_list, long_edge + 2, r, theta, ratio)
# shrink two short edges
new_xy_list = np.copy(temp_new_xy_list)
shrink_edge(temp_new_xy_list, new_xy_list, short_edge, r, theta, ratio)
shrink_edge(temp_new_xy_list, new_xy_list, short_edge + 2, r, theta, ratio)
return temp_new_xy_list, new_xy_list, long_edge
def shrink_edge(xy_list, new_xy_list, edge, r, theta, ratio=cfg.shrink_ratio):
if ratio == 0.0:
return
start_point = edge
end_point = (edge + 1) % 4
long_start_sign_x = np.sign(
xy_list[end_point, 0] - xy_list[start_point, 0])
new_xy_list[start_point, 0] = \
xy_list[start_point, 0] + \
long_start_sign_x * ratio * r[start_point] * np.cos(theta[start_point])
long_start_sign_y = np.sign(
xy_list[end_point, 1] - xy_list[start_point, 1])
new_xy_list[start_point, 1] = \
xy_list[start_point, 1] + \
long_start_sign_y * ratio * r[start_point] * np.sin(theta[start_point])
# long edge one, end point
long_end_sign_x = -1 * long_start_sign_x
new_xy_list[end_point, 0] = \
xy_list[end_point, 0] + \
long_end_sign_x * ratio * r[end_point] * np.cos(theta[start_point])
long_end_sign_y = -1 * long_start_sign_y
new_xy_list[end_point, 1] = \
xy_list[end_point, 1] + \
long_end_sign_y * ratio * r[end_point] * np.sin(theta[start_point])
def process_label(data_dir=cfg.data_dir):
with open(os.path.join(data_dir, cfg.val_fname), 'r') as f_val:
f_list = f_val.readlines()
with open(os.path.join(data_dir, cfg.train_fname), 'r') as f_train:
f_list.extend(f_train.readlines())
for line, _ in zip(f_list, tqdm(range(len(f_list)))):
line_cols = str(line).strip().split(',')
img_name, width, height = \
line_cols[0].strip(), int(line_cols[1].strip()), \
int(line_cols[2].strip())
gt = np.zeros((7,height // cfg.pixel_size, width // cfg.pixel_size))
# gt = np.zeros((height // cfg.pixel_size, width // cfg.pixel_size, 7)) 7 128 128
train_label_dir = os.path.join(data_dir, cfg.train_label_dir_name)
xy_list_array = np.load(os.path.join(train_label_dir,
img_name[:-4] + '.npy'))
train_image_dir = os.path.join(data_dir, cfg.train_image_dir_name)
img_path = os.path.join(train_image_dir, img_name)
im = cv2.imread(img_path)
if im == None:
print(img_path)
continue
# with Image.open(os.path.join(train_image_dir, img_name)) as im:
# draw = ImageDraw.Draw(im)
for xy_list in xy_list_array:
_, shrink_xy_list, _ = shrink(xy_list, cfg.shrink_ratio)
shrink_1, _, long_edge = shrink(xy_list, cfg.shrink_side_ratio)
p_min = np.amin(shrink_xy_list, axis=0)
p_max = np.amax(shrink_xy_list, axis=0)
# floor of the float
ji_min = (p_min / cfg.pixel_size - 0.5).astype(int) - 1
# +1 for ceil of the float and +1 for include the end
ji_max = (p_max / cfg.pixel_size - 0.5).astype(int) + 3
imin = np.maximum(0, ji_min[1])
imax = np.minimum(height // cfg.pixel_size, ji_max[1])
jmin = np.maximum(0, ji_min[0])
jmax = np.minimum(width // cfg.pixel_size, ji_max[0])
for i in range(imin, imax):
for j in range(jmin, jmax):
px = (j + 0.5) * cfg.pixel_size
py = (i + 0.5) * cfg.pixel_size
if point_inside_of_quad(px, py,
shrink_xy_list, p_min, p_max):
gt[0,i, j] = 1
line_width, line_color = 1, (0,0,255)
ith = point_inside_of_nth_quad(px, py,
xy_list,
shrink_1,
long_edge)
vs = [[[3, 0], [1, 2]], [[0, 1], [2, 3]]]
if ith in range(2):
gt[1,i, j] = 1
if ith == 0:
line_width, line_color = 2, (0,255,255)
else:
line_width, line_color = 2, (0,255,0)
gt[2:3,i, j] = ith
gt[3:5,i, j]=xy_list[vs[long_edge][ith][0]] - [px, py]
gt[5:,i, j]=xy_list[vs[long_edge][ith][1]] - [px, py]
cv2.line(im, (int(px - 0.5 * cfg.pixel_size), int(py - 0.5 * cfg.pixel_size)),
(int(px + 0.5 * cfg.pixel_size), int(py - 0.5 * cfg.pixel_size)), line_color,
line_width)
cv2.line(im, (int(px + 0.5 * cfg.pixel_size), int(py - 0.5 * cfg.pixel_size)),
(int(px + 0.5 * cfg.pixel_size), int(py + 0.5 * cfg.pixel_size)), line_color,
line_width)
cv2.line(im, (int(px + 0.5 * cfg.pixel_size), int(py + 0.5 * cfg.pixel_size)),
(int(px - 0.5 * cfg.pixel_size), int(py + 0.5 * cfg.pixel_size)), line_color,
line_width)
cv2.line(im, (int(px - 0.5 * cfg.pixel_size), int(py + 0.5 * cfg.pixel_size)),
(int(px - 0.5 * cfg.pixel_size), int(py - 0.5 * cfg.pixel_size)), line_color,
line_width)
cv2.line(im, (int(px - 0.5 * cfg.pixel_size), int(py - 0.5 * cfg.pixel_size)),
(int(px + 0.5 * cfg.pixel_size), int(py - 0.5 * cfg.pixel_size)), line_color,
line_width)
act_image_dir = os.path.join(cfg.data_dir,
cfg.show_act_image_dir_name)
if cfg.draw_act_quad:
# im.save(os.path.join(act_image_dir, img_name))
cv2.imwrite(os.path.join(act_image_dir, img_name),im)
train_label_dir = os.path.join(data_dir, cfg.train_label_dir_name)
np.save(os.path.join(train_label_dir,
img_name[:-4] + '_gt.npy'), gt)
if __name__ == '__main__':
process_label()
|
[
"395934383@qq.com"
] |
395934383@qq.com
|
979fda9d64ffb33988e33d552691c2db5cf5b8c9
|
cc3b5dca5e969b3890ccd91d41d04b068e21b13a
|
/graph.py
|
25c4859fc4c756d4a071916d28163ecb5d7f5bfa
|
[] |
no_license
|
saransappa/Graph-Algorithms-in-Python
|
846cfe53ed4dc5ac3ab093f20b4db7580c85a502
|
2d8530ce8037e093ce5aa0dd8129b10dee308cfa
|
refs/heads/master
| 2022-11-26T10:33:50.629733
| 2020-07-27T05:41:59
| 2020-07-27T05:41:59
| 281,698,931
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,403
|
py
|
# @author: Saran Sappa
class graph_node:
label = None
adjlist = None # adjacency list of the node
visited = None # -1 if node is not visited, 0 if visited but still under process and 1 if node visited
component = None # Used for assigning connected component number
def __init__(self, l):
self.label = l
self.adjlist = []
self.visited = -1
self.component = -1
def add_neighbour(self, k):
self.adjlist.append(k)
def print(self,adj = True):
print(self.label,end = " ")
if adj:
print("->",end=" ")
for i in self.adjlist:
print(i.label, end=" -> ")
print("\n")
def dfs(self,output=True, count=-1): #count defines the connected component number while finding the connected components
if output:
print(self.label,end=" -> ")
self.visited = 0
if count>0:
self.component = count
for i in self.adjlist:
if i.visited == -1:
i.dfs(output=output,count=count)
self.visited = 1
def SCC_dfs(self,arr):
self.visited = 0
k = arr
k.append(self.label)
for i in self.adjlist:
if i.visited == -1:
k = i.SCC_dfs(k)
self.visited = 1
return k
def isCyclic(self):
self.visited = 0
k = False
for i in self.adjlist:
if i.visited == -1:
k = i.isCyclic()
if i.visited == 0:
return True
self.visited = 1
return k
class graph:
size = None
directed = None
nodes = None
cyclic = None
def __init__(self, s, directed=True):
self.size =s
self.cyclic = False
self.directed = directed
self.nodes = []
for i in range(s):
g= graph_node(i)
self.nodes.append(g)
def print(self):
print('-'*10+ " Adjacency lists of all vertices "+'-'*10)
for i in self.nodes:
i.print()
def add_edge(self,k,l):
p = None #Temporary variable
q = None #Temporary variable
for i in self.nodes:
if i.label == k:
p = i
break
for i in self.nodes:
if i.label == l:
q = i
if self.directed:
p.add_neighbour(q)
else:
p.add_neighbour(q)
q.add_neighbour(p)
def dfs(self, start = -1,output=True,mod_visited = True): # start denotes the label of starting node for DFS.
if output: #output is True if we want to print DFS output else it is False.
print('-'*10 + " Depth First Search "+'-'*10)
if start==-1: # start becomes -1 if label if start node is not provided
for i in self.nodes:
if i.visited == -1:
i.dfs(output=output)
else:
self.nodes[start].dfs(output=output)
if mod_visited: # mod_visited if True if we want to clear visited attribute of all vertices else it is False.
for i in self.nodes: # Marking all nodes as unvisited after completion of DFS
i.visited = -1
print("\n")
def bfs(self, start = -1): # start denotes the label of starting node for BFS
print('-'*10 + " Breadth First Search "+'-'*10)
initial = None
if start==-1: # start becomes -1 if label if start node is not provided
initial = 0
else:
initial = start
queue = []
queue.append(self.nodes[initial])
#self.nodes[initial].visited = 1
while len(queue)!=0:
p = queue.pop(0)
p.visited = 1
print(p.label,end=" -> ")
for i in p.adjlist:
if i.visited == -1:
queue.append(i)
i.visited = 0
for i in self.nodes: # Marking all nodes as unvisited after completion of BFS
i.visited = -1
print("\n")
def isConnected(self):
self.dfs(start = 0,output=False, mod_visited=False)
for i in self.nodes:
if i.visited == -1:
print("The graph is disconnected.")
return
print("The graph is connected.")
def noOfConnectedComponents(self):
_count = 1
for i in self.nodes:
if i.visited == -1:
i.dfs(output=False, count = _count)
_count+=1
for i in self.nodes: # Marking all nodes as unvisited after completion of BFS
i.visited = -1
print("\n")
return _count
def connectedComponents(self):
count = self.noOfConnectedComponents()
for i in range(1,count):
print("Connected Component "+str(i))
for j in self.nodes:
if j.component == i:
print(j.label,end=" -> ")
print()
def SCC_dfs(self):
arr = []
for i in self.nodes:
if i.visited == -1:
arr = i.SCC_dfs(arr)
for i in self.nodes: # Marking all nodes as unvisited after completion of BFS
i.visited = -1
return arr
def SCCUtil(self,count_):
count = count_
for i in range(1,count):
print("Strongly Connected Component "+str(i))
for j in self.nodes:
if j.component == i:
print(j.label,end=" -> ")
print()
def SCC(self,g_): # Call this method to find strongly connected components g= given graph, g_ = reverse graph
k = g_.SCC_dfs()
k.reverse()
count = 1
for i in k:
if self.nodes[i].visited == -1:
self.nodes[i].dfs(output=False,count = count)
count+=1
self.SCCUtil(count_ = count)
def isCyclic(self): # Call this method to check the cyclicity of a graph
for i in self.nodes:
if i.visited == -1:
self.cyclic = i.isCyclic()
if i.visited == 0:
self.cyclic = True
print(self.cyclic)
for i in self.nodes: # Marking all nodes as unvisited after completion of BFS
i.visited = -1
if __name__ == "__main__":
s = int(input("Please enter the size of the graph : "))
k = int(input("Please enter 1 for directed graph or 0 for undirected graph : "))
z = None
if k==1:
z = True
else:
z = False
g = graph(s, directed=z)
t = int(input("Please enter the no.of edges : "))
print("Please enter the edges in the format \"K l\" (without quotes) for an edge(k,l)")
if k == 1:
g_ = graph(s,directed=True)
for i in range(t):
k = input().split()
g.add_edge(int(k[0]),int(k[1]))
g_.add_edge(int(k[1]),int(k[0]))
"""
Note: Use SCC for directed graphs and connectedComponents for undirected graphs respectively.
"""
#g.print()
#g_.print()
#g.dfs()
#g_.dfs()
#g.SCC(g_)
#g.isCyclic()
#g.dfs(start=1)
#g.bfs()
#g.bfs(start =2)
#g.isConnected()
#print("No. of connected components in the graph = "+str(g.noOfConnectedComponents()))
#g.connectedComponents()
|
[
"saran.sappa@gmail.com"
] |
saran.sappa@gmail.com
|
42c706c83bf0ed544bdb726a8bf29c823388270e
|
56a4b179029d1808151bd8435b7b357f6247d8c0
|
/idealab/makeGallery.py
|
2202f2a336ca4feb01898faf62482d220e07f0c7
|
[] |
no_license
|
jtmorgan/grantsbot
|
8bfa1adec259054e75b6391e260c05cf48377a35
|
0d334a962e08e0e0c6eb5970d70a830e596913fa
|
refs/heads/master
| 2020-03-30T23:51:54.143130
| 2019-02-25T23:01:12
| 2019-02-25T23:01:12
| 8,145,168
| 2
| 3
| null | 2015-06-22T18:07:50
| 2013-02-11T19:50:11
|
Python
|
UTF-8
|
Python
| false
| false
| 3,244
|
py
|
#! /usr/bin/python2.7
# Copyright 2013 Jtmorgan
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import grantsbot_settings
import output_settings
import profiles
from random import shuffle
import sys
import templates
###FUNCTIONS
def makeGallery():
"""
Makes featured profiles for IdeaLab galleries.
"""
if params['subtype'] in ['intro', 'new_idea', 'ieg_draft', 'participants_wanted']:
featured_list = getFeaturedProfiles()
else:
sys.exit("unrecognized featured content type " + params['subtype'])
prepOutput(featured_list)
def getFeaturedProfiles():
"""
Gets info about the top-billed profiles in a guide.
"""
featured_list = []
profile_page = profiles.Profiles(params[params['subtype']]['input page path'], params[params['subtype']]['input page id'], params)
profile_list = profile_page.getPageSectionData(level = params[params['subtype']]['profile toclevel'])
for profile in profile_list:
# print profile
text = profile_page.getPageText(profile['index'])
profile = profile_page.scrapeInfobox(profile, text)
if len(profile['summary']) > 1 and len(profile['image']) > 1:
profile['action'] = params[params['subtype']]['action']
profile['summary'] = tools.formatSummaries(profile['summary'])
featured_list.append(profile)
shuffle(featured_list)
featured_list = featured_list[:params[params['subtype']]['number featured']]
return featured_list
def prepOutput(featured_list):
first_subpage = params[params['subtype']]['first subpage']
number_featured = params[params['subtype']]['number featured']
featured_list = tools.addDefaults(featured_list)
output = profiles.Profiles(params[params['subtype']]['output path'], settings = params) #stupid tocreate a new profile object here. and stupid to re-specify the path below
i = first_subpage
for f in featured_list:
if i <= first_subpage + (number_featured - 1):
f['profile'] = output.formatProfile(f)
f['profile'] = params['header template'] + '\n' + f['profile']
edit_summ = params['edit summary'] % (params['subtype'] + " " + params['type'])
output.publishProfile(f['profile'], params[params['subtype']]['output path'], edit_summ, sb_page = i)
i += 1
else:
break
if __name__ == "__main__":
param = output_settings.Params()
params = param.getParams(sys.argv[1])
params['type'] = sys.argv[1]
params['subtype'] = sys.argv[2]
tools = profiles.Toolkit()
makeGallery()
|
[
"jonnymorgan.esq@gmail.com"
] |
jonnymorgan.esq@gmail.com
|
688d7c6bcc6a6697f9f6c1936e4fbe8249fa496d
|
d43f7f98ebadc574fe0c1195c98da3a59803b060
|
/api/migrations/0001_initial.py
|
41d837d580acf874a71dbf5bca1ff0f05cb130f2
|
[] |
no_license
|
rauloojs/tata_heroku
|
1400b74f5a24d1b99657debee641fcebf9d2812c
|
3c5cd6c08c32d53b10915bc07729f331755971fd
|
refs/heads/master
| 2020-05-18T18:59:02.560111
| 2015-09-22T05:32:37
| 2015-09-22T05:32:37
| 42,086,021
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,117
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Doctor',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('doc_name', models.CharField(max_length=100)),
('doc_esp', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Patient',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('pat_name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Tags',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('tag_name', models.CharField(max_length=50)),
],
),
]
|
[
"rauloojs@gmail.com"
] |
rauloojs@gmail.com
|
cb7409896fed2288aa583a0de9b5fa86614e9641
|
07c8f75a5b061f3a8070bf053311692e4c9a3dce
|
/wage-calculator.py
|
b749dc7cba0b00b3cd5029aa374f0f72c4824387
|
[] |
no_license
|
kuzmicheff/wage-calculator
|
3d4981b84582250eac3be6f2cdc05e60080f12a7
|
d7568684016c1becd1c74aca856cf268e05123b1
|
refs/heads/master
| 2020-12-24T21:01:42.549374
| 2016-05-14T23:00:38
| 2016-05-14T23:00:38
| 58,769,831
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
print ("Wage Calculator")
weeklyHours=input("Enter work hours: ")
hourlyWage=input("Enter pay rate: ")
weeklyPay=weeklyHours*hourlyWage
if weeklyHours>40:
overtimeWage=hourlyWage*1.5
overtimeAmount=(weeklyHours-40)*overtimeWage
weeklyHours=40
weeklyPay=weeklyHours*hourlyWage+overtimeAmount
weeklyPay=str(weeklyPay)
message="The weekly pay is $"
print (message+weeklyPay)
|
[
"kuzmicheff@gmail.com"
] |
kuzmicheff@gmail.com
|
a9838870e87d81f80191e3ce0af0627564cd2c98
|
30f1efe7d81334daff4175e32f347798ddfef6e5
|
/sqlmaparch/lib/request/connect.py
|
7a964d84d9c6f8a0ff40da18b752e95c1880cd5c
|
[] |
no_license
|
HyperionGray/mr-injector
|
056f3479a3debee0ee78570a0225f14604da0038
|
e3e7f007bfbbb2746493a5ea0e28bd56aab3cd6b
|
refs/heads/master
| 2021-01-19T11:46:19.070871
| 2013-09-25T15:56:57
| 2013-09-25T15:56:57
| 32,473,527
| 3
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 40,430
|
py
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2013 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import httplib
import json
import logging
import re
import socket
import string
import time
import urllib2
import urlparse
import traceback
from extra.safe2bin.safe2bin import safecharencode
from lib.core.agent import agent
from lib.core.common import asciifyUrl
from lib.core.common import calculateDeltaSeconds
from lib.core.common import clearConsoleLine
from lib.core.common import cpuThrottle
from lib.core.common import evaluateCode
from lib.core.common import extractRegexResult
from lib.core.common import findMultipartPostBoundary
from lib.core.common import getCurrentThreadData
from lib.core.common import getHostHeader
from lib.core.common import getRequestHeader
from lib.core.common import getUnicode
from lib.core.common import logHTTPTraffic
from lib.core.common import pushValue
from lib.core.common import popValue
from lib.core.common import randomizeParameterValue
from lib.core.common import randomInt
from lib.core.common import randomStr
from lib.core.common import readInput
from lib.core.common import removeReflectiveValues
from lib.core.common import singleTimeLogMessage
from lib.core.common import singleTimeWarnMessage
from lib.core.common import stdev
from lib.core.common import wasLastResponseDelayed
from lib.core.common import unicodeencode
from lib.core.common import urldecode
from lib.core.common import urlencode
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.dicts import POST_HINT_CONTENT_TYPES
from lib.core.enums import ADJUST_TIME_DELAY
from lib.core.enums import AUTH_TYPE
from lib.core.enums import CUSTOM_LOGGING
from lib.core.enums import HTTP_HEADER
from lib.core.enums import HTTPMETHOD
from lib.core.enums import NULLCONNECTION
from lib.core.enums import PAYLOAD
from lib.core.enums import PLACE
from lib.core.enums import POST_HINT
from lib.core.enums import REDIRECTION
from lib.core.enums import WEB_API
from lib.core.exception import SqlmapCompressionException
from lib.core.exception import SqlmapConnectionException
from lib.core.exception import SqlmapSyntaxException
from lib.core.exception import SqlmapValueException
from lib.core.settings import ASTERISK_MARKER
from lib.core.settings import CUSTOM_INJECTION_MARK_CHAR
from lib.core.settings import DEFAULT_CONTENT_TYPE
from lib.core.settings import DEFAULT_GET_POST_DELIMITER
from lib.core.settings import HTTP_ACCEPT_HEADER_VALUE
from lib.core.settings import HTTP_ACCEPT_ENCODING_HEADER_VALUE
from lib.core.settings import MAX_CONNECTION_CHUNK_SIZE
from lib.core.settings import MAX_CONNECTIONS_REGEX
from lib.core.settings import MAX_CONNECTION_TOTAL_SIZE
from lib.core.settings import META_REFRESH_REGEX
from lib.core.settings import MIN_TIME_RESPONSES
from lib.core.settings import IS_WIN
from lib.core.settings import LARGE_CHUNK_TRIM_MARKER
from lib.core.settings import PAYLOAD_DELIMITER
from lib.core.settings import PERMISSION_DENIED_REGEX
from lib.core.settings import PLAIN_TEXT_CONTENT_TYPE
from lib.core.settings import UNENCODED_ORIGINAL_VALUE
from lib.core.settings import URI_HTTP_HEADER
from lib.core.settings import WARN_TIME_STDEV
from lib.request.basic import decodePage
from lib.request.basic import forgeHeaders
from lib.request.basic import processResponse
from lib.request.direct import direct
from lib.request.comparison import comparison
from lib.request.methodrequest import MethodRequest
from thirdparty.socks.socks import ProxyError
from thirdparty.multipart import multipartpost
class Connect(object):
"""
This class defines methods used to perform HTTP requests
"""
@staticmethod
def _getPageProxy(**kwargs):
return Connect.getPage(**kwargs)
@staticmethod
def _retryProxy(**kwargs):
threadData = getCurrentThreadData()
threadData.retriesCount += 1
if kb.testMode and kb.previousMethod == PAYLOAD.METHOD.TIME:
# timed based payloads can cause web server unresponsiveness
# if the injectable piece of code is some kind of JOIN-like query
warnMsg = "most probably web server instance hasn't recovered yet "
warnMsg += "from previous timed based payload. If the problem "
warnMsg += "persists please wait for few minutes and rerun "
warnMsg += "without flag T in option '--technique' "
warnMsg += "(e.g. '--flush-session --technique=BEUS') or try to "
warnMsg += "lower the value of option '--time-sec' (e.g. '--time-sec=2')"
singleTimeWarnMessage(warnMsg)
elif kb.originalPage is None:
if conf.tor:
warnMsg = "please make sure that you have "
warnMsg += "Tor installed and running so "
warnMsg += "you could successfully use "
warnMsg += "switch '--tor' "
if IS_WIN:
warnMsg += "(e.g. 'https://www.torproject.org/download/download.html.en')"
else:
warnMsg += "(e.g. 'https://help.ubuntu.com/community/Tor')"
else:
warnMsg = "if the problem persists please check that the provided "
warnMsg += "target URL is valid. In case that it is, you can try to rerun "
warnMsg += "with the switch '--random-agent' turned on "
warnMsg += "and/or proxy switches ('--ignore-proxy', '--proxy',...)"
singleTimeWarnMessage(warnMsg)
elif conf.threads > 1:
warnMsg = "if the problem persists please try to lower "
warnMsg += "the number of used threads (option '--threads')"
singleTimeWarnMessage(warnMsg)
time.sleep(1)
kwargs['retrying'] = True
return Connect._getPageProxy(**kwargs)
@staticmethod
def _connReadProxy(conn):
retVal = ""
if not kb.dnsMode and conn:
headers = conn.info()
if headers and (headers.getheader(HTTP_HEADER.CONTENT_ENCODING, "").lower() in ("gzip", "deflate")\
or "text" not in headers.getheader(HTTP_HEADER.CONTENT_TYPE, "").lower()):
retVal = conn.read(MAX_CONNECTION_TOTAL_SIZE)
if len(retVal) == MAX_CONNECTION_TOTAL_SIZE:
warnMsg = "large compressed response detected. Disabling compression"
singleTimeWarnMessage(warnMsg)
kb.pageCompress = False
else:
while True:
_ = conn.read(MAX_CONNECTION_CHUNK_SIZE)
if len(_) == MAX_CONNECTION_CHUNK_SIZE:
warnMsg = "large response detected. This could take a while"
singleTimeWarnMessage(warnMsg)
_ = re.sub(r"(?si)%s.+?%s" % (kb.chars.stop, kb.chars.start), "%s%s%s" % (kb.chars.stop, LARGE_CHUNK_TRIM_MARKER, kb.chars.start), _)
retVal += _
else:
retVal += _
break
if len(retVal) > MAX_CONNECTION_TOTAL_SIZE:
warnMsg = "too large response detected. Automatically trimming it"
singleTimeWarnMessage(warnMsg)
break
return retVal
@staticmethod
def getPage(**kwargs):
"""
This method connects to the target URL or proxy and returns
the target URL page content
"""
if conf.delay is not None and isinstance(conf.delay, (int, float)) and conf.delay > 0:
time.sleep(conf.delay)
elif conf.cpuThrottle:
cpuThrottle(conf.cpuThrottle)
if conf.dummy:
return randomStr(int(randomInt()), alphabet=[chr(_) for _ in xrange(256)]), {}, int(randomInt())
threadData = getCurrentThreadData()
with kb.locks.request:
kb.requestCounter += 1
threadData.lastRequestUID = kb.requestCounter
url = kwargs.get("url", None) or conf.url
get = kwargs.get("get", None)
post = kwargs.get("post", None)
method = kwargs.get("method", None)
cookie = kwargs.get("cookie", None)
ua = kwargs.get("ua", None) or conf.agent
referer = kwargs.get("referer", None) or conf.referer
host = kwargs.get("host", None) or conf.host
direct_ = kwargs.get("direct", False)
multipart = kwargs.get("multipart", False)
silent = kwargs.get("silent", False)
raise404 = kwargs.get("raise404", True)
timeout = kwargs.get("timeout", None) or conf.timeout
auxHeaders = kwargs.get("auxHeaders", None)
response = kwargs.get("response", False)
ignoreTimeout = kwargs.get("ignoreTimeout", False) or kb.ignoreTimeout
refreshing = kwargs.get("refreshing", False)
retrying = kwargs.get("retrying", False)
crawling = kwargs.get("crawling", False)
skipRead = kwargs.get("skipRead", False)
if not urlparse.urlsplit(url).netloc:
url = urlparse.urljoin(conf.url, url)
# flag to know if we are dealing with the same target host
target = reduce(lambda x, y: x == y, map(lambda x: urlparse.urlparse(x).netloc.split(':')[0], [url, conf.url or ""]))
if not retrying:
# Reset the number of connection retries
threadData.retriesCount = 0
# fix for known issue when urllib2 just skips the other part of provided
# url splitted with space char while urlencoding it in the later phase
url = url.replace(" ", "%20")
conn = None
code = None
page = None
_ = urlparse.urlsplit(url)
requestMsg = u"HTTP request [#%d]:\n%s " % (threadData.lastRequestUID, method or (HTTPMETHOD.POST if post is not None else HTTPMETHOD.GET))
requestMsg += ("%s%s" % (_.path or "/", ("?%s" % _.query) if _.query else "")) if not any((refreshing, crawling)) else url
responseMsg = u"HTTP response "
requestHeaders = u""
responseHeaders = None
logHeaders = u""
skipLogTraffic = False
raise404 = raise404 and not kb.ignoreNotFound
# support for non-latin (e.g. cyrillic) URLs as urllib/urllib2 doesn't
# support those by default
url = asciifyUrl(url)
# fix for known issues when using url in unicode format
# (e.g. UnicodeDecodeError: "url = url + '?' + query" in redirect case)
url = unicodeencode(url)
try:
socket.setdefaulttimeout(timeout)
if direct_:
if '?' in url:
url, params = url.split('?', 1)
params = urlencode(params)
url = "%s?%s" % (url, params)
requestMsg += "?%s" % params
elif multipart:
# Needed in this form because of potential circle dependency
# problem (option -> update -> connect -> option)
from lib.core.option import proxyHandler
multipartOpener = urllib2.build_opener(proxyHandler, multipartpost.MultipartPostHandler)
conn = multipartOpener.open(unicodeencode(url), multipart)
page = Connect._connReadProxy(conn) if not skipRead else None
responseHeaders = conn.info()
responseHeaders[URI_HTTP_HEADER] = conn.geturl()
page = decodePage(page, responseHeaders.get(HTTP_HEADER.CONTENT_ENCODING), responseHeaders.get(HTTP_HEADER.CONTENT_TYPE))
return page
elif any((refreshing, crawling)):
pass
elif target:
if conf.forceSSL and urlparse.urlparse(url).scheme != "https":
url = re.sub("\Ahttp:", "https:", url, re.I)
url = re.sub(":80/", ":443/", url, re.I)
if PLACE.GET in conf.parameters and not get:
get = conf.parameters[PLACE.GET]
if not conf.skipUrlEncode:
get = urlencode(get, limit=True)
if get:
url = "%s?%s" % (url, get)
requestMsg += "?%s" % get
if PLACE.POST in conf.parameters and not post and method in (None, HTTPMETHOD.POST):
post = conf.parameters[PLACE.POST]
elif get:
url = "%s?%s" % (url, get)
requestMsg += "?%s" % get
requestMsg += " %s" % httplib.HTTPConnection._http_vsn_str
# Prepare HTTP headers
headers = forgeHeaders({HTTP_HEADER.COOKIE: cookie, HTTP_HEADER.USER_AGENT: ua, HTTP_HEADER.REFERER: referer})
if kb.authHeader:
headers[HTTP_HEADER.AUTHORIZATION] = kb.authHeader
if kb.proxyAuthHeader:
headers[HTTP_HEADER.PROXY_AUTHORIZATION] = kb.proxyAuthHeader
headers[HTTP_HEADER.ACCEPT] = HTTP_ACCEPT_HEADER_VALUE
headers[HTTP_HEADER.ACCEPT_ENCODING] = HTTP_ACCEPT_ENCODING_HEADER_VALUE if kb.pageCompress else "identity"
headers[HTTP_HEADER.HOST] = host or getHostHeader(url)
if post is not None and HTTP_HEADER.CONTENT_TYPE not in headers:
headers[HTTP_HEADER.CONTENT_TYPE] = POST_HINT_CONTENT_TYPES.get(kb.postHint, DEFAULT_CONTENT_TYPE)
if headers.get(HTTP_HEADER.CONTENT_TYPE) == POST_HINT_CONTENT_TYPES[POST_HINT.MULTIPART]:
warnMsg = "missing 'boundary parameter' in '%s' header. " % HTTP_HEADER.CONTENT_TYPE
warnMsg += "Will try to reconstruct"
singleTimeWarnMessage(warnMsg)
boundary = findMultipartPostBoundary(conf.data)
if boundary:
headers[HTTP_HEADER.CONTENT_TYPE] = "%s; boundary=%s" % (headers[HTTP_HEADER.CONTENT_TYPE], boundary)
if auxHeaders:
for key, item in auxHeaders.items():
headers[key] = item
for key, item in headers.items():
del headers[key]
headers[unicodeencode(key, kb.pageEncoding)] = unicodeencode(item, kb.pageEncoding)
post = unicodeencode(post, kb.pageEncoding)
if method:
req = MethodRequest(url, post, headers)
req.set_method(method)
else:
req = urllib2.Request(url, post, headers)
requestHeaders += "\n".join("%s: %s" % (key.capitalize() if isinstance(key, basestring) else key, getUnicode(value)) for (key, value) in req.header_items())
if not getRequestHeader(req, HTTP_HEADER.COOKIE) and conf.cj:
conf.cj._policy._now = conf.cj._now = int(time.time())
cookies = conf.cj._cookies_for_request(req)
requestHeaders += "\n%s" % ("Cookie: %s" % ";".join("%s=%s" % (getUnicode(cookie.name), getUnicode(cookie.value)) for cookie in cookies))
if post is not None:
if not getRequestHeader(req, HTTP_HEADER.CONTENT_LENGTH):
requestHeaders += "\n%s: %d" % (string.capwords(HTTP_HEADER.CONTENT_LENGTH), len(post))
if not getRequestHeader(req, HTTP_HEADER.CONNECTION):
requestHeaders += "\n%s: close" % HTTP_HEADER.CONNECTION
requestMsg += "\n%s" % requestHeaders
if post is not None:
requestMsg += "\n\n%s" % getUnicode(post)
requestMsg += "\n"
threadData.lastRequestMsg = requestMsg
logger.log(CUSTOM_LOGGING.TRAFFIC_OUT, requestMsg)
conn = urllib2.urlopen(req)
if not kb.authHeader and getRequestHeader(req, HTTP_HEADER.AUTHORIZATION) and conf.aType == AUTH_TYPE.BASIC:
kb.authHeader = getRequestHeader(req, HTTP_HEADER.AUTHORIZATION)
if not kb.proxyAuthHeader and getRequestHeader(req, HTTP_HEADER.PROXY_AUTHORIZATION):
kb.proxyAuthHeader = getRequestHeader(req, HTTP_HEADER.PROXY_AUTHORIZATION)
# Return response object
if response:
return conn, None, None
# Get HTTP response
if hasattr(conn, 'redurl'):
page = (threadData.lastRedirectMsg[1] if kb.redirectChoice == REDIRECTION.NO\
else Connect._connReadProxy(conn)) if not skipRead else None
skipLogTraffic = kb.redirectChoice == REDIRECTION.NO
code = conn.redcode
else:
page = Connect._connReadProxy(conn) if not skipRead else None
code = code or conn.code
responseHeaders = conn.info()
responseHeaders[URI_HTTP_HEADER] = conn.geturl()
page = decodePage(page, responseHeaders.get(HTTP_HEADER.CONTENT_ENCODING), responseHeaders.get(HTTP_HEADER.CONTENT_TYPE))
status = getUnicode(conn.msg)
if extractRegexResult(META_REFRESH_REGEX, page) and not refreshing:
url = extractRegexResult(META_REFRESH_REGEX, page)
debugMsg = "got HTML meta refresh header"
logger.debug(debugMsg)
if kb.alwaysRefresh is None:
msg = "sqlmap got a refresh request "
msg += "(redirect like response common to login pages). "
msg += "Do you want to apply the refresh "
msg += "from now on (or stay on the original page)? [Y/n]"
choice = readInput(msg, default="Y")
kb.alwaysRefresh = choice not in ("n", "N")
if kb.alwaysRefresh:
if url.lower().startswith('http://'):
kwargs['url'] = url
else:
kwargs['url'] = conf.url[:conf.url.rfind('/') + 1] + url
threadData.lastRedirectMsg = (threadData.lastRequestUID, page)
kwargs['refreshing'] = True
kwargs['get'] = None
kwargs['post'] = None
try:
return Connect._getPageProxy(**kwargs)
except SqlmapSyntaxException:
pass
# Explicit closing of connection object
if not conf.keepAlive:
try:
if hasattr(conn.fp, '_sock'):
conn.fp._sock.close()
conn.close()
except Exception, msg:
warnMsg = "problem occured during connection closing ('%s')" % msg
logger.warn(warnMsg)
except urllib2.HTTPError, e:
page = None
responseHeaders = None
try:
page = e.read() if not skipRead else None
responseHeaders = e.info()
responseHeaders[URI_HTTP_HEADER] = e.geturl()
page = decodePage(page, responseHeaders.get(HTTP_HEADER.CONTENT_ENCODING), responseHeaders.get(HTTP_HEADER.CONTENT_TYPE))
except socket.timeout:
warnMsg = "connection timed out while trying "
warnMsg += "to get error page information (%d)" % e.code
logger.warn(warnMsg)
return None, None, None
except KeyboardInterrupt:
raise
except:
pass
finally:
page = page if isinstance(page, unicode) else getUnicode(page)
code = e.code
threadData.lastHTTPError = (threadData.lastRequestUID, code)
kb.httpErrorCodes[code] = kb.httpErrorCodes.get(code, 0) + 1
status = getUnicode(e.msg)
responseMsg += "[#%d] (%d %s):\n" % (threadData.lastRequestUID, code, status)
if responseHeaders:
logHeaders = "\n".join("%s: %s" % (getUnicode(key.capitalize() if isinstance(key, basestring) else key), getUnicode(value)) for (key, value) in responseHeaders.items())
logHTTPTraffic(requestMsg, "%s%s\n\n%s" % (responseMsg, logHeaders, (page or "")[:MAX_CONNECTION_CHUNK_SIZE]))
skipLogTraffic = True
if conf.verbose <= 5:
responseMsg += getUnicode(logHeaders)
elif conf.verbose > 5:
responseMsg += "%s\n\n%s" % (logHeaders, (page or "")[:MAX_CONNECTION_CHUNK_SIZE])
logger.log(CUSTOM_LOGGING.TRAFFIC_IN, responseMsg)
if e.code == httplib.UNAUTHORIZED:
errMsg = "not authorized, try to provide right HTTP "
errMsg += "authentication type and valid credentials (%d)" % code
raise SqlmapConnectionException(errMsg)
elif e.code == httplib.NOT_FOUND:
if raise404:
errMsg = "page not found (%d)" % code
raise SqlmapConnectionException(errMsg)
else:
debugMsg = "page not found (%d)" % code
singleTimeLogMessage(debugMsg, logging.DEBUG)
processResponse(page, responseHeaders)
elif e.code == httplib.GATEWAY_TIMEOUT:
if ignoreTimeout:
return None, None, None
else:
warnMsg = "unable to connect to the target URL (%d - %s)" % (e.code, httplib.responses[e.code])
if threadData.retriesCount < conf.retries and not kb.threadException:
warnMsg += ". sqlmap is going to retry the request"
logger.critical(warnMsg)
return Connect._retryProxy(**kwargs)
elif kb.testMode:
logger.critical(warnMsg)
return None, None, None
else:
raise SqlmapConnectionException(warnMsg)
else:
debugMsg = "got HTTP error code: %d (%s)" % (code, status)
logger.debug(debugMsg)
except (urllib2.URLError, socket.error, socket.timeout, httplib.BadStatusLine, httplib.IncompleteRead, ProxyError, SqlmapCompressionException), e:
tbMsg = traceback.format_exc()
if "no host given" in tbMsg:
warnMsg = "invalid URL address used (%s)" % repr(url)
raise SqlmapSyntaxException(warnMsg)
elif "forcibly closed" in tbMsg:
warnMsg = "connection was forcibly closed by the target URL"
elif "timed out" in tbMsg:
warnMsg = "connection timed out to the target URL"
elif "URLError" in tbMsg or "error" in tbMsg:
warnMsg = "unable to connect to the target URL"
elif "BadStatusLine" in tbMsg:
warnMsg = "connection dropped or unknown HTTP "
warnMsg += "status code received. Try to force the HTTP User-Agent "
warnMsg += "header with option '--user-agent' or switch '--random-agent'"
elif "IncompleteRead" in tbMsg:
warnMsg = "there was an incomplete read error while retrieving data "
warnMsg += "from the target URL"
else:
warnMsg = "unable to connect to the target URL"
if "BadStatusLine" not in tbMsg:
warnMsg += " or proxy"
if silent:
return None, None, None
elif "forcibly closed" in tbMsg:
logger.critical(warnMsg)
return None, None, None
elif ignoreTimeout and any(_ in tbMsg for _ in ("timed out", "IncompleteRead")):
return None, None, None
elif threadData.retriesCount < conf.retries and not kb.threadException:
warnMsg += ". sqlmap is going to retry the request"
logger.critical(warnMsg)
return Connect._retryProxy(**kwargs)
elif kb.testMode:
logger.critical(warnMsg)
return None, None, None
else:
raise SqlmapConnectionException(warnMsg)
finally:
page = page if isinstance(page, unicode) else getUnicode(page)
socket.setdefaulttimeout(conf.timeout)
processResponse(page, responseHeaders)
if conn and getattr(conn, "redurl", None):
_ = urlparse.urlsplit(conn.redurl)
_ = ("%s%s" % (_.path or "/", ("?%s" % _.query) if _.query else ""))
requestMsg = re.sub("(\n[A-Z]+ ).+?( HTTP/\d)", "\g<1>%s\g<2>" % getUnicode(_), requestMsg, 1)
responseMsg += "[#%d] (%d %s):\n" % (threadData.lastRequestUID, conn.code, status)
else:
responseMsg += "[#%d] (%d %s):\n" % (threadData.lastRequestUID, code, status)
if responseHeaders:
logHeaders = "\n".join("%s: %s" % (getUnicode(key.capitalize() if isinstance(key, basestring) else key), getUnicode(value)) for (key, value) in responseHeaders.items())
if not skipLogTraffic:
logHTTPTraffic(requestMsg, "%s%s\n\n%s" % (responseMsg, logHeaders, (page or "")[:MAX_CONNECTION_CHUNK_SIZE]))
if conf.verbose <= 5:
responseMsg += getUnicode(logHeaders)
elif conf.verbose > 5:
responseMsg += "%s\n\n%s" % (logHeaders, (page or "")[:MAX_CONNECTION_CHUNK_SIZE])
logger.log(CUSTOM_LOGGING.TRAFFIC_IN, responseMsg)
return page, responseHeaders, code
@staticmethod
def queryPage(value=None, place=None, content=False, getRatioValue=False, silent=False, method=None, timeBasedCompare=False, noteResponseTime=True, auxHeaders=None, response=False, raise404=None, removeReflection=True):
"""
This method calls a function to get the target URL page content
and returns its page MD5 hash or a boolean value in case of
string match check ('--string' command line parameter)
"""
if conf.direct:
return direct(value, content)
get = None
post = None
cookie = None
ua = None
referer = None
host = None
page = None
pageLength = None
uri = None
code = None
urlEncodePost = None
if not place:
place = kb.injection.place or PLACE.GET
raise404 = place != PLACE.URI if raise404 is None else raise404
value = agent.adjustLateValues(value)
payload = agent.extractPayload(value)
threadData = getCurrentThreadData()
if conf.httpHeaders:
headers = dict(conf.httpHeaders)
contentType = max(headers[_] if _.upper() == HTTP_HEADER.CONTENT_TYPE.upper() else None for _ in headers.keys())
urlEncodePost = contentType and "urlencoded" in contentType or contentType is None
if (kb.postHint or conf.skipUrlEncode) and urlEncodePost:
urlEncodePost = False
conf.httpHeaders = [_ for _ in conf.httpHeaders if _[1] != contentType]
contentType = POST_HINT_CONTENT_TYPES.get(kb.postHint, PLAIN_TEXT_CONTENT_TYPE)
conf.httpHeaders.append((HTTP_HEADER.CONTENT_TYPE, contentType))
if payload:
if kb.tamperFunctions:
for function in kb.tamperFunctions:
payload = function(payload=payload, headers=auxHeaders)
if not isinstance(payload, basestring):
errMsg = "tamper function '%s' returns " % function.func_name
errMsg += "invalid payload type ('%s')" % type(payload)
raise SqlmapValueException(errMsg)
value = agent.replacePayload(value, payload)
logger.log(CUSTOM_LOGGING.PAYLOAD, safecharencode(payload))
if place == PLACE.CUSTOM_POST:
if kb.postHint in (POST_HINT.SOAP, POST_HINT.XML):
# payloads in SOAP/XML should have chars > and < replaced
# with their HTML encoded counterparts
payload = payload.replace('>', ">").replace('<', "<")
elif kb.postHint == POST_HINT.JSON:
if payload.startswith('"') and payload.endswith('"'):
payload = json.dumps(payload[1:-1])
else:
payload = json.dumps(payload)[1:-1]
value = agent.replacePayload(value, payload)
else:
# GET, POST, URI and Cookie payload needs to be throughly URL encoded
if place in (PLACE.GET, PLACE.URI, PLACE.COOKIE) and not conf.skipUrlEncode or place in (PLACE.POST,) and urlEncodePost:
payload = urlencode(payload, '%', False, place != PLACE.URI)
value = agent.replacePayload(value, payload)
if conf.hpp:
if not any(conf.url.lower().endswith(_.lower()) for _ in (WEB_API.ASP, WEB_API.ASPX)):
warnMsg = "HTTP parameter pollution should work only against "
warnMsg += "ASP(.NET) targets"
singleTimeWarnMessage(warnMsg)
if place in (PLACE.GET, PLACE.POST):
_ = re.escape(PAYLOAD_DELIMITER)
match = re.search("(?P<name>\w+)=%s(?P<value>.+?)%s" % (_, _), value)
if match:
payload = match.group("value")
for splitter in (urlencode(' '), ' '):
if splitter in payload:
prefix, suffix = ("*/", "/*") if splitter == ' ' else (urlencode(_) for _ in ("*/", "/*"))
parts = payload.split(splitter)
parts[0] = "%s%s" % (parts[0], suffix)
parts[-1] = "%s%s=%s%s" % (DEFAULT_GET_POST_DELIMITER, match.group("name"), prefix, parts[-1])
for i in xrange(1, len(parts) - 1):
parts[i] = "%s%s=%s%s%s" % (DEFAULT_GET_POST_DELIMITER, match.group("name"), prefix, parts[i], suffix)
payload = "".join(parts)
for splitter in (urlencode(','), ','):
payload = payload.replace(splitter, "%s%s=" % (DEFAULT_GET_POST_DELIMITER, match.group("name")))
value = agent.replacePayload(value, payload)
else:
warnMsg = "HTTP parameter pollution works only with regular "
warnMsg += "GET and POST parameters"
singleTimeWarnMessage(warnMsg)
if place:
value = agent.removePayloadDelimiters(value)
if PLACE.GET in conf.parameters:
get = conf.parameters[PLACE.GET] if place != PLACE.GET or not value else value
if PLACE.POST in conf.parameters:
post = conf.parameters[PLACE.POST] if place != PLACE.POST or not value else value
if PLACE.CUSTOM_POST in conf.parameters:
post = conf.parameters[PLACE.CUSTOM_POST].replace(CUSTOM_INJECTION_MARK_CHAR, "") if place != PLACE.CUSTOM_POST or not value else value
post = post.replace(ASTERISK_MARKER, '*') if post else post
if PLACE.COOKIE in conf.parameters:
cookie = conf.parameters[PLACE.COOKIE] if place != PLACE.COOKIE or not value else value
if PLACE.USER_AGENT in conf.parameters:
ua = conf.parameters[PLACE.USER_AGENT] if place != PLACE.USER_AGENT or not value else value
if PLACE.REFERER in conf.parameters:
referer = conf.parameters[PLACE.REFERER] if place != PLACE.REFERER or not value else value
if PLACE.HOST in conf.parameters:
host = conf.parameters[PLACE.HOST] if place != PLACE.HOST or not value else value
if PLACE.URI in conf.parameters:
uri = conf.url if place != PLACE.URI or not value else value
else:
uri = conf.url
if value and place == PLACE.CUSTOM_HEADER:
if not auxHeaders:
auxHeaders = {}
auxHeaders[value.split(',')[0]] = value.split(',', 1)[1]
if conf.rParam:
def _randomizeParameter(paramString, randomParameter):
retVal = paramString
match = re.search("%s=(?P<value>[^&;]+)" % randomParameter, paramString)
if match:
origValue = match.group("value")
retVal = re.sub("%s=[^&;]+" % randomParameter, "%s=%s" % (randomParameter, randomizeParameterValue(origValue)), paramString)
return retVal
for randomParameter in conf.rParam:
for item in (PLACE.GET, PLACE.POST, PLACE.COOKIE):
if item in conf.parameters:
if item == PLACE.GET and get:
get = _randomizeParameter(get, randomParameter)
elif item == PLACE.POST and post:
post = _randomizeParameter(post, randomParameter)
elif item == PLACE.COOKIE and cookie:
cookie = _randomizeParameter(cookie, randomParameter)
if conf.evalCode:
delimiter = conf.pDel or DEFAULT_GET_POST_DELIMITER
variables = {}
originals = {}
for item in filter(None, (get, post)):
for part in item.split(delimiter):
if '=' in part:
name, value = part.split('=', 1)
value = urldecode(value, convall=True, plusspace=(item==post and kb.postSpaceToPlus))
evaluateCode("%s=%s" % (name, repr(value)), variables)
originals.update(variables)
evaluateCode(conf.evalCode, variables)
for name, value in variables.items():
if name != "__builtins__" and originals.get(name, "") != value:
if isinstance(value, (basestring, int)):
value = unicode(value)
if '%s=' % name in (get or ""):
get = re.sub("((\A|\W)%s=)([^%s]+)" % (name, delimiter), "\g<1>%s" % value, get)
elif '%s=' % name in (post or ""):
post = re.sub("((\A|\W)%s=)([^%s]+)" % (name, delimiter), "\g<1>%s" % value, post)
elif post is not None:
post += "%s%s=%s" % (delimiter, name, value)
else:
get += "%s%s=%s" % (delimiter, name, value)
if not conf.skipUrlEncode:
get = urlencode(get, limit=True)
if post is not None:
if place not in (PLACE.POST, PLACE.CUSTOM_POST) and hasattr(post, UNENCODED_ORIGINAL_VALUE):
post = getattr(post, UNENCODED_ORIGINAL_VALUE)
elif urlEncodePost:
post = urlencode(post, spaceplus=kb.postSpaceToPlus)
if timeBasedCompare:
if len(kb.responseTimes) < MIN_TIME_RESPONSES:
clearConsoleLine()
if conf.tor:
warnMsg = "it's highly recommended to avoid usage of switch '--tor' for "
warnMsg += "time-based injections because of its high latency time"
singleTimeWarnMessage(warnMsg)
warnMsg = "time-based comparison needs larger statistical "
warnMsg += "model. Making a few dummy requests, please wait.."
singleTimeWarnMessage(warnMsg)
while len(kb.responseTimes) < MIN_TIME_RESPONSES:
Connect.queryPage(content=True)
elif not kb.testMode:
warnMsg = "it is very important not to stress the network adapter's "
warnMsg += "bandwidth during usage of time-based payloads"
singleTimeWarnMessage(warnMsg)
if not kb.laggingChecked:
kb.laggingChecked = True
deviation = stdev(kb.responseTimes)
if deviation > WARN_TIME_STDEV:
kb.adjustTimeDelay = ADJUST_TIME_DELAY.DISABLE
warnMsg = "there is considerable lagging "
warnMsg += "in connection response(s). Please use as high "
warnMsg += "value for option '--time-sec' as possible (e.g. "
warnMsg += "10 or more)"
logger.critical(warnMsg)
if conf.safUrl and conf.saFreq > 0:
kb.queryCounter += 1
if kb.queryCounter % conf.saFreq == 0:
Connect.getPage(url=conf.safUrl, cookie=cookie, direct=True, silent=True, ua=ua, referer=referer, host=host)
start = time.time()
if kb.nullConnection and not content and not response and not timeBasedCompare:
noteResponseTime = False
pushValue(kb.pageCompress)
kb.pageCompress = False
if kb.nullConnection == NULLCONNECTION.HEAD:
method = HTTPMETHOD.HEAD
elif kb.nullConnection == NULLCONNECTION.RANGE:
if not auxHeaders:
auxHeaders = {}
auxHeaders[HTTP_HEADER.RANGE] = "bytes=-1"
_, headers, code = Connect.getPage(url=uri, get=get, post=post, cookie=cookie, ua=ua, referer=referer, host=host, silent=silent, method=method, auxHeaders=auxHeaders, raise404=raise404, skipRead=(kb.nullConnection == NULLCONNECTION.SKIP_READ))
if headers:
if kb.nullConnection in (NULLCONNECTION.HEAD, NULLCONNECTION.SKIP_READ) and HTTP_HEADER.CONTENT_LENGTH in headers:
pageLength = int(headers[HTTP_HEADER.CONTENT_LENGTH])
elif kb.nullConnection == NULLCONNECTION.RANGE and HTTP_HEADER.CONTENT_RANGE in headers:
pageLength = int(headers[HTTP_HEADER.CONTENT_RANGE][headers[HTTP_HEADER.CONTENT_RANGE].find('/') + 1:])
kb.pageCompress = popValue()
if not pageLength:
try:
page, headers, code = Connect.getPage(url=uri, get=get, post=post, cookie=cookie, ua=ua, referer=referer, host=host, silent=silent, method=method, auxHeaders=auxHeaders, response=response, raise404=raise404, ignoreTimeout=timeBasedCompare)
except MemoryError:
page, headers, code = None, None, None
warnMsg = "site returned insanely large response"
if kb.testMode:
warnMsg += " in testing phase. This is a common "
warnMsg += "behavior in custom WAF/IDS/IPS solutions"
singleTimeWarnMessage(warnMsg)
if conf.secondOrder:
page, headers, code = Connect.getPage(url=conf.secondOrder, cookie=cookie, ua=ua, silent=silent, auxHeaders=auxHeaders, response=response, raise404=False, ignoreTimeout=timeBasedCompare, refreshing=True)
threadData.lastQueryDuration = calculateDeltaSeconds(start)
kb.originalCode = kb.originalCode or code
if kb.testMode:
kb.testQueryCount += 1
if timeBasedCompare:
return wasLastResponseDelayed()
elif noteResponseTime:
kb.responseTimes.append(threadData.lastQueryDuration)
if not response and removeReflection:
page = removeReflectiveValues(page, payload)
kb.maxConnectionsFlag = re.search(MAX_CONNECTIONS_REGEX, page or "", re.I) is not None
kb.permissionFlag = re.search(PERMISSION_DENIED_REGEX, page or "", re.I) is not None
if content or response:
return page, headers
if getRatioValue:
return comparison(page, headers, code, getRatioValue=False, pageLength=pageLength), comparison(page, headers, code, getRatioValue=True, pageLength=pageLength)
else:
return comparison(page, headers, code, getRatioValue, pageLength)
|
[
"punk@localhost.localdomain"
] |
punk@localhost.localdomain
|
b5518ff31854762bc8a611482d0b0bf0adbeae35
|
c7a9727f0fd2eaf28a1dcb8ba121634e0620d24d
|
/WassersteinGAN_template/FrameSenderReciver_A.py
|
469634bb7f0484665a332cc3887ea70f6caf8056
|
[] |
no_license
|
lvwanyou/SAGAN_MQTT
|
649c52826cb4c48cbd5c79302242737b9e8dc17e
|
c91c7e998fb81b3fbe2f6eea0d75cf2a715ed40a
|
refs/heads/master
| 2022-07-15T14:00:08.532699
| 2020-05-19T09:57:00
| 2020-05-19T09:57:00
| 228,141,607
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,246
|
py
|
import socket
from handle_data_util import dataSwitch
import time
import os
import sys
if __name__ == '__main__':
TCP_IP = '127.0.0.1'
TCP_PORT = 502
BUFFER_SIZE = 10000
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setblocking(0)
s.settimeout(2)
s.connect((TCP_IP, TCP_PORT))
logs = []
if len(sys.argv) > 1:
count = int(sys.argv[1]) ##############################################################################################
else:
count = 0
#with open('GeneratedDataModbus/generated_data_write_single_register_16.txt', 'r') as f:
#with open('dataseven1.txt', 'r') as f:
with open('modbus_write_single_register.txt', 'r') as f:
try:
content = f.readlines()
for i, val in enumerate(content):
if i >= count:
if val is not None and val != '\n':
val = val.strip()
count = count + 1
string = dataSwitch(val.strip('\n')) # switch hex to bit for sending it to the simulations
s.send(string)
logs.append(str(i) + ' TX ' + val)
time.sleep(0.1)
data = s.recv(BUFFER_SIZE)
# result = data.encode('hex')########################################################################
result = data.hex()
logs.append(str(i) + ' RX ' + result + '\n')
except IOError as e:
s.close()
f.close()
os.system("python FrameSenderReciver_B.py " + str(count))
print('can not read the file!')
finally:
with open("LogDataCommunications/logfirst33.txt", "a") as f:
f.write(" ".join(logs))
#RX Reciving Data
#TX Sending Data
#RX Reciving Data
#TX Sending Data
#RX Reciving Data
#TX Sending Data
#RX Reciving Data
#TX Sending Data
#RX Reciving Data
#TX Sending Data
#RX Reciving Data
#TX Sending Data
#RX Reciving Data
#TX Sending Data
#RX Reciving Data
#TX Sending Data
#RX Reciving Data
#TX Sending Data
#RX Reciving Data
#TX Sending Data
#RX Reciving Data
#TX Sending Data
#RX Reciving Data
|
[
"lvwanyou@163.com"
] |
lvwanyou@163.com
|
c56a30d7e9a2a3015e39f40d95876a92ff6ac4d6
|
2c88a421e5fcb9fe62a96c83522a464a41316962
|
/resources/user.py
|
d035db209c84fcbaa1aee420ca5fa7a1cfb69fad
|
[] |
no_license
|
kurtispinkney/UdemyAPICourse
|
06409b97c0507ee46389d177ec923297944461f9
|
4ac83915bac1deacf08b08de6fef45db6cbeb694
|
refs/heads/master
| 2020-04-25T19:32:11.369004
| 2019-03-02T20:34:46
| 2019-03-02T20:34:46
| 173,023,786
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 842
|
py
|
import sqlite3
from flask_restful import Resource, reqparse
from models.user import UserModel
class UserRegister(Resource):
parser = reqparse.RequestParser()
parser.add_argument("username",
type=str,
required=True,
help="This field cannot be left blank")
parser.add_argument("password",
type=str,
required=True,
help="This field cannot be left blank")
def post(self):
data = UserRegister.parser.parse_args()
if UserModel.find_by_username(data['username']):
return {"message": "A user with that username already exists."}, 400
user = UserModel(**data)
user.save_to_db()
return {"message": "User created successfully."}, 201
|
[
"kurtis.pinkney@gmail.com"
] |
kurtis.pinkney@gmail.com
|
a852056766b2efab5ac506625b024a0666dd43fd
|
3ed82278dc32f90996484552ef25adc2086830a9
|
/web/open_webpage.py
|
cdd38c1ad07347fef58604a97a6ce05ce35744eb
|
[] |
no_license
|
arunkuttiyara/python
|
f9f3bb25eebba08855a9398475a67147863fe0da
|
5dbabd1b2f9e1dcc7c8ef092ffe2ce6644547c26
|
refs/heads/master
| 2021-01-21T13:57:55.652389
| 2016-04-13T01:11:09
| 2016-04-13T01:11:09
| 39,611,387
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 198
|
py
|
#!/usr/bin/python
import webbrowser
webbrowser.open('https://www.google.ca/?gfe_rd=cr&ei=BdWdVbj_DOaM8Qf2tLjYAQ&gws_rd=ssl')
webbrowser.open('https://drive.google.com/drive/my-drive?ltmpl=drive')
|
[
"arunkuttiyara@yahoo.com"
] |
arunkuttiyara@yahoo.com
|
eba0be807bb462569eca6e28182a3f6c2562cedc
|
1a2422ffcbd5edd61d1f22951b615a59bcdae782
|
/svhn/fitting_eae_svhn.py
|
8b4d3bc2350e7a4c1e99b304bc6d74f5b6a232fa
|
[] |
no_license
|
laoyangui/autoencoder_based_image_compression
|
1e5180af0cfb03128c7f2599c17b43e5e3e5687b
|
5c65bd56299c2c9bb98c54e968420009827053f0
|
refs/heads/master
| 2020-04-19T16:08:19.115634
| 2019-01-06T14:34:29
| 2019-01-06T14:34:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,564
|
py
|
"""A script to fit a Laplace density to the normed histogram of the latent variables in a trained entropy autoencoder.
250 digits from the SVHN test set are used
for the fitting.
"""
import argparse
import matplotlib
try:
import PyQt5
matplotlib.use('Qt5Agg')
except ImportError:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy
import os
import pickle
import scipy.stats
import parsing.parsing
import svhn.svhn
import tools.tools as tls
def fitting_eae_svhn(reference_float64, entropy_ae, title, path):
"""Fits a Laplace density to the normed histogram of the latent variables in the trained entropy autoencoder.
Parameters
----------
reference_float64 : numpy.ndarray
2D array with data-type `numpy.float64`.
RGB digits after the preprocessing.
`reference_float64[i, :]` contains the
ith RGB digit after the preprocessing.
entropy_ae : EntropyAutoencoder
Entropy autoencoder trained with a
specific scaling coefficient.
title : str
Title of the saved normed histogram.
path : str
Path to the saved normed histogram. The
path must end with ".png".
"""
y = entropy_ae.encoder(reference_float64)[1]
max_abs_y = numpy.ceil(numpy.amax(numpy.absolute(y))).item()
# The grid below contains 20 points
# per unit interval.
grid = numpy.linspace(-max_abs_y,
max_abs_y,
num=40*int(max_abs_y) + 1)
# Let's assume that `y` contains i.i.d samples from
# an unknown probability density function. The two
# equations below result from the minimization of
# the Kullback-Lieber divergence of the unknown
# probability density function from our statistical
# model (Laplace density of location `laplace_location`
# and scale `laplace_scale`). Note that this minimization
# is equivalent to the maximum likelihood estimator.
# To dive into the details, see:
# "Estimating distributions and densities". 36-402,
# advanced data analysis, CMU, 27 January 2011.
laplace_location = numpy.mean(y).item()
laplace_scale = numpy.mean(numpy.absolute(y - laplace_location)).item()
laplace_pdf = scipy.stats.laplace.pdf(grid,
loc=laplace_location,
scale=laplace_scale)
handle = [plt.plot(grid, laplace_pdf, color='red')[0]]
hist, bin_edges = numpy.histogram(y,
bins=60,
density=True)
plt.bar(bin_edges[0:60],
hist,
width=bin_edges[1] - bin_edges[0],
align='edge',
color='blue')
plt.title(title)
plt.legend(handle,
[r'$f( . ; {0}, {1})$'.format(str(round(laplace_location, 2)), str(round(laplace_scale, 2)))],
prop={'size': 30},
loc=9)
plt.savefig(path)
plt.clf()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Fits a Laplace density to the normed histogram of the latent variables in a trained entropy autoencoder.')
parser.add_argument('bin_width_init',
help='value of the quantization bin width at the beginning of the training',
type=parsing.parsing.float_strictly_positive)
parser.add_argument('gamma',
help='scaling coefficient',
type=parsing.parsing.float_strictly_positive)
parser.add_argument('--learn_bin_width',
help='if given, at training time, the quantization bin width was learned',
action='store_true',
default=False)
args = parser.parse_args()
path_to_test = 'svhn/results/test_data.npy'
path_to_mean_training = 'svhn/results/mean_training.npy'
path_to_std_training = 'svhn/results/std_training.npy'
if args.learn_bin_width:
suffix = 'learning_bw_{0}_{1}'.format(tls.float_to_str(args.bin_width_init),
tls.float_to_str(args.gamma))
else:
suffix = '{0}_{1}'.format(tls.float_to_str(args.bin_width_init),
tls.float_to_str(args.gamma))
path_to_checking_f = os.path.join('eae/visualization/test/checking_fitting/',
suffix)
if not os.path.exists(path_to_checking_f):
os.makedirs(path_to_checking_f)
path_to_model = 'eae/results/eae_svhn_{}.pkl'.format(suffix)
# `reference_uint8.dtype` is equal to `numpy.uint8`.
reference_uint8 = numpy.load(path_to_test)[0:250, :]
# `mean_training.dtype` and `std_training.dtype`
# are equal to `numpy.float64`.
mean_training = numpy.load(path_to_mean_training)
std_training = numpy.load(path_to_std_training)
# The function `svhn.svhn.preprocess_svhn` checks
# that `reference_uint8.dtype` is equal to `numpy.uint8`
# and `reference_uint8.ndim` is equal to 2.
reference_float64 = svhn.svhn.preprocess_svhn(reference_uint8,
mean_training,
std_training)
with open(path_to_model, 'rb') as file:
entropy_ae = pickle.load(file)
fitting_eae_svhn(reference_float64,
entropy_ae,
'Latent variables',
os.path.join(path_to_checking_f, 'fitting_laplace.png'))
|
[
"tdumas@ad.inria.fr"
] |
tdumas@ad.inria.fr
|
e97ca3fe809c877888d219f53ac676b825839591
|
948d3b8c03e2fecc4f852cd8b4120e1b3378bfaf
|
/API/PYTHON/20181127/5.py
|
826ec3e8b0c07b4cd384e744b2d3522295f7a33a
|
[] |
no_license
|
ezhuo/ezhuo.github.io
|
e370abb4bfbbfcc5750a5f9fafa2b995bb1d7d48
|
977f3ecdd5dee4eb0f10a42572aaecb335145313
|
refs/heads/master
| 2021-05-05T20:13:35.446537
| 2019-01-26T08:39:26
| 2019-01-26T08:39:26
| 115,300,126
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 824
|
py
|
from collections import Counter
from collections import OrderedDict
from collections import defaultdict
d = defaultdict(list)
d['a'].append(1)
d['a'].append(2)
d['b'].append(4)
print(d)
d = defaultdict(set)
d['a'].add(1)
d['a'].add(2)
d['b'].add(4)
print(d)
d = OrderedDict()
d['foo'] = 1
d['bar'] = 2
d['spam'] = 3
d['grok'] = 4
d['abc'] = 1
# Outputs "foo 1", "bar 2", "spam 3", "grok 4"
for key in d:
print(key, d[key])
print(d)
words = [
'look', 'into', 'my', 'eyes', 'look', 'into', 'my', 'eyes',
'the', 'eyes', 'the', 'eyes', 'the', 'eyes', 'not', 'around', 'the',
'eyes', "don't", 'look', 'around', 'the', 'eyes', 'look', 'into',
'my', 'eyes', "you're", 'under'
]
word_counts = Counter(words)
# 出现频率最高的3个单词
top_three = word_counts.most_common(3)
print(sorted(top_three))
|
[
"hi371@qq.com"
] |
hi371@qq.com
|
0a16b1f1e718525cf07bd396651a1c7750109c8e
|
2e358129de246b0894e4f0682353016c5d847926
|
/streams_err.py
|
5eeed01adfc4b2a77e660db80d5293dd2e11ee4e
|
[] |
no_license
|
davidtaxer/scripts
|
98095f6ddc9b590228ebcf9e5ad1c6a3172df627
|
977b2ed2efcb05cefc4dfedc74c3b3e7297b97a7
|
refs/heads/main
| 2023-06-09T15:51:54.950345
| 2021-03-06T21:08:50
| 2021-03-06T21:08:50
| 345,185,801
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 167
|
py
|
#!/usr/bin/env python3
data = input("This will come from STDIN: ")
print("Now we write it to STDOUT: " + data)
raise ValueError("Now we generate an error to STDERR")
|
[
"noreply@github.com"
] |
davidtaxer.noreply@github.com
|
33be4cfc896102f5652fad7640e441ab9af55c97
|
c66955c6fc178955c2024e0318ec7a91a8386c2d
|
/testframework/excise/testcases/utdemo/bubble_parametrize.py
|
25b4b472273f4c30378806ba74ca347527b84f05
|
[] |
no_license
|
duheng18/python-study
|
a98642d6ee1b0043837c3e7c5b91bf1e28dfa588
|
13c0571ac5d1690bb9e615340482bdb2134ecf0e
|
refs/heads/master
| 2022-11-30T17:36:57.060130
| 2019-11-18T07:31:40
| 2019-11-18T07:31:40
| 147,268,053
| 1
| 0
| null | 2022-11-22T03:36:51
| 2018-09-04T00:49:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,170
|
py
|
'''
对下面测试方法使用pytest的rerun, 参数化方法来实现自动化测试
def bubble_sort(nums):
for i in range(len(nums) - 1):
for j in range(len(nums) - i - 1):
if nums[j] > nums[j + 1]:
nums[j], nums[j + 1] = nums[j + 1], nums[j]
return random.choice([nums, None, 10])
'''
import pytest
import random
data = [([1, 2, 3, 4], [1, 2, 3, 4]), ([4, 5, 6,7], [4, 7, 6,5])]
def bubble_sort(nums):
for i in range(len(nums) - 1):
for j in range(len(nums) - i - 1):
if nums[j] > nums[j + 1]:
nums[j], nums[j + 1] = nums[j + 1], nums[j]
return random.choice([nums, None, 10])
def bubble_sort_new(nums):
for i in range(len(nums) - 1):
for j in range(len(nums) - i - 1):
if nums[j] > nums[j + 1]:
nums[j], nums[j + 1] = nums[j + 1], nums[j]
return nums
@pytest.mark.flaky(reruns=3,reruns_delay=2)
@pytest.mark.parametrize("nums", data)
def test_bubble_sort(nums):
print(bubble_sort(nums))
print(bubble_sort_new(nums))
assert bubble_sort(nums) == bubble_sort_new(nums)
if __name__ == '__main__':
pytest.main()
|
[
"emaildh@163.com"
] |
emaildh@163.com
|
51426d948c70181ff18426bfe1fcfb5e2adf5b0a
|
01a1643de3348991dfdf47f34002095d5e57e43d
|
/bot/handlers/GetMarkup.py
|
34c6c5f083c2e3d76410982f55431ff70eed4baa
|
[] |
no_license
|
Sortia/alexworld
|
e5344734a3d702240b028471ddb614c161de2dbb
|
42654ce7f761a450150ca074f92b7f8019795897
|
refs/heads/master
| 2022-11-25T23:56:21.396363
| 2020-07-16T16:06:21
| 2020-07-16T16:06:21
| 279,020,788
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 247
|
py
|
from bot.handlers.Markup import Markup
class GetMarkupHandler:
@staticmethod
def handle(message, bot):
bot.send_message(
message.chat.id,
"Держи",
reply_markup=Markup.default()
)
|
[
"alexkiyan.lug@gmail.com"
] |
alexkiyan.lug@gmail.com
|
73149def26bab2a8b1756d768a1175d96d9c84e1
|
9059fbaf6e3686a46612a88e9a9cdc6e664e8aa8
|
/genplur.py
|
53f9d049b4ae9c796d207e196d88b9e0ae05671a
|
[] |
no_license
|
religofsil/progs
|
4422d1918c50eaa25d0a689febcb1bd2216c88ba
|
cf7ebb7a1fdd0c0beeb73d96b6a59c1d92a6605f
|
refs/heads/master
| 2020-04-16T00:22:31.853907
| 2018-02-02T11:20:49
| 2018-02-02T11:20:49
| 33,817,293
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 359
|
py
|
# -*- coding: utf-8 -*-
import codecs
import re
count=0
f = codecs.open("fiction3.xml", "r", "utf8")
for line in f:
if re.search("pl.*gen", line):
count+=1
a=re.search(u"</ana>[а-яА-ЯЁё]*`[а-яё]*", line)
if a!=None:
l=a.group()
l=l.replace("</ana>", "")
print l
f.close()
print count
|
[
"religionofsilence@gmail.com"
] |
religionofsilence@gmail.com
|
dd6014899756f2ffdd627d017aaea8d03d985aee
|
c7fa04eac79c3be523bebdec84cf31f500225de9
|
/direction/interfaces/protocol.py
|
c0ef5a1b0c9bbe2dc6c4bf9737a040ae79980a08
|
[] |
no_license
|
iancmcc/direction
|
aa56231aa3faf003807fd1afcac63cde93e7978e
|
b3685a3ff8f11fe80b4f400c2079d31c3d1b2680
|
refs/heads/master
| 2021-01-22T11:47:57.738178
| 2010-10-10T04:51:22
| 2010-10-10T04:51:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,205
|
py
|
from abc import ABCMeta, abstractmethod, abstractproperty
__metaclass__ = type
__all__ = ['Transaction', 'Request', 'SuccessResult', 'FailureResult',
'Response']
class Transaction:
"""
A transaction representing a single remote method call.
"""
__metaclass__ = ABCMeta
@abstractproperty
def action(self):
"""
Action.
"""
@abstractproperty
def method(self):
"""
Method.
"""
@abstractproperty
def data(self):
"""
Data.
"""
@abstractproperty
def type(self):
"""
Type.
"""
@abstractproperty
def tid(self):
"""
Transaction ID.
"""
class Request:
"""
A set of transactions representing a single request to the service.
"""
__metaclass__ = ABCMeta
@abstractproperty
def transactions(self):
"""
The set of Transactions that came with this request.
"""
class Result:
"""
The result of a single transaction call.
"""
__metaclass__ = ABCMeta
def type(self):
"""
Type of this result.
"""
class SuccessResult(Result):
"""
A successful result.
"""
@abstractproperty
def action(self):
"""
Action that produced this result.
"""
@abstractproperty
def method(self):
"""
Method that produced this result.
"""
@abstractproperty
def result(self):
"""
Result of the method call.
"""
@abstractproperty
def tid(self):
"""
Transaction ID for this result.
"""
class FailureResult(Result):
"""
A failed result.
"""
@abstractproperty
def message(self):
"""
Message indicating what went wrong.
"""
@abstractproperty
def where(self):
"""
Information about where the error occurred.
"""
class Response:
"""
A response containing multiple serialized TransactionResults.
"""
__metaclass__ = ABCMeta
@abstractproperty
def results(self):
"""
All the results for this response.
"""
|
[
"ian.mccracken@gmail.com"
] |
ian.mccracken@gmail.com
|
c67d5704121265e4071cf2fac6c05bea4386e156
|
2c64663773bb08d3f16b2f5a16ade67436fe18a5
|
/3. django/1026/07_django_rest_framework/articles/views.py
|
a915beaba4787c2718c2183291fda7819d2790bd
|
[] |
no_license
|
teqn99/TIL
|
2cf5bb9c4e8c8e84fcb6108d4e1b75f3777dbbe9
|
fa6517aa0e6af737dd8d2e62bc23facc71470d13
|
refs/heads/master
| 2023-08-28T20:48:24.374417
| 2021-11-15T14:56:02
| 2021-11-15T14:56:02
| 333,354,614
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,537
|
py
|
from rest_framework.response import Response
from rest_framework.decorators import api_view
from rest_framework import status
from django.shortcuts import get_list_or_404, get_object_or_404
from articles.models import Article
from .models import Article, Comment
from .serializers import ArticleListSerializer, ArticleSerializer, CommentSerializer
from articles import serializers
# article_list에서 @api_view를 사용하는 이유는
# @api_view가 없으면, 404 페이지가 HTML로 보여짐
# @api_view가 있으면, 404 페이지가 JSON으로 응답
@api_view(['GET', 'POST']) # DRF에서 데코레이터가 없으면 응답이 되지 않는다 -> 필수
def article_list(request):
# 전체 게시글 조회
if request.method == 'GET':
articles = get_list_or_404(Article)
serializers = ArticleListSerializer(articles, many=True)
return Response(serializers.data)
# 게시글 생성
elif request.method == 'POST':
serializers = ArticleSerializer(data=request.data)
if serializers.is_valid(raise_exception=True): # 예외 발생 처리를 통해 맨아래 부분을 쓰지 않아도 됨
# raise_exception=True는 기본적으로 문제가 있을 경우 HTTP 400 코드를 응답함
serializers.save()
return Response(serializers.data, status=status.HTTP_201_CREATED) # 생성 성공 시 잘 만들었다는 메세지 출력
# return Response(serializers.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET', 'PUT', 'DELETE'])
def article_detail(request, article_pk):
article = get_object_or_404(Article, pk=article_pk)
if request.method == 'GET':
serializer = ArticleSerializer(article)
return Response(serializer.data)
elif request.method == 'PUT':
# serializer = ArticleSerializer(instance=article, data=request.data)
serializer = ArticleSerializer(article, data=request.data)
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(serializer.data)
elif request.method == 'DELETE':
article.delete()
data = {
'delete': f'데이터 {article_pk}번이 삭제되었습니다.'
}
return Response(data, status=status.HTTP_204_NO_CONTENT)
@api_view(['GET'])
def comment_list(request):
comments = get_list_or_404(Comment)
serializer = CommentSerializer(comments, many=True)
return Response(serializer.data)
@api_view(['GET', 'PUT', 'DELETE'])
def comment_detail(request, comment_pk):
comment = get_object_or_404(Comment, pk=comment_pk)
if request.method == 'GET':
serializer = CommentSerializer(comment)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = CommentSerializer(comment, data=request.data)
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(serializer.data)
elif request.method == 'DELETE':
comment.delete()
data = {
'delete': f'댓글 {comment_pk}번이 삭제되었습니다.'
}
return Response(data, status=status.HTTP_204_NO_CONTENT)
@api_view(['POST'])
def comment_create(request, article_pk):
article = get_object_or_404(Article, pk=article_pk)
serializer = CommentSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
serializer.save(article=article)
return Response(serializer.data, status=status.HTTP_201_CREATED)
|
[
"teqn99@gmail.com"
] |
teqn99@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.