blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8a27d5cf919c43df37bcb664422415fc60bd850b
|
56f968ff8f215f0a64eb822916acf9e6039a036e
|
/env/bin/rst2html5.py
|
a506af6914bc457993707d54eb799cc534b808ef
|
[] |
no_license
|
AntObr/credit-to-customer
|
754d9d144d1f9f49ce9aefd6e0500d76e1cde1e1
|
36b2a100dc65b109b72527a36556a37418b1fcda
|
refs/heads/master
| 2022-01-25T17:34:10.820544
| 2018-04-17T16:48:06
| 2018-04-17T16:48:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,189
|
py
|
#!/Users/aobrie/Documents/Projects/credit-to-customer/env/bin/python
# -*- coding: utf8 -*-
# :Copyright: © 2015 Günter Milde.
# :License: Released under the terms of the `2-Clause BSD license`_, in short:
#
# Copying and distribution of this file, with or without modification,
# are permitted in any medium without royalty provided the copyright
# notice and this notice are preserved.
# This file is offered as-is, without any warranty.
#
# .. _2-Clause BSD license: http://www.spdx.org/licenses/BSD-2-Clause
#
# Revision: $Revision: 7847 $
# Date: $Date: 2015-03-17 18:30:47 +0100 (Di, 17 Mär 2015) $
"""
A minimal front end to the Docutils Publisher, producing HTML 5 documents.
The output also conforms to XHTML 1.0 transitional
(except for the doctype declaration).
"""
try:
import locale # module missing in Jython
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
pass
from docutils.core import publish_cmdline, default_description
description = (u'Generates HTML 5 documents from standalone '
u'reStructuredText sources '
+ default_description)
publish_cmdline(writer_name='html5', description=description)
|
[
"obrien.r.anthony@gmail.com"
] |
obrien.r.anthony@gmail.com
|
f699cd13d0a44f84e958ae8ed9a94e8acfffe352
|
2e2a8df64f0d07fcea33cadb1cb9731a68fec2e3
|
/HyHelper/ONI_webscript.py
|
5be70d00624ce3937c5fd8ba6c8fa41f289b6ea6
|
[
"MIT"
] |
permissive
|
aherrera1721/hyhelper
|
e4f7a73facd33d067766289a8fc6c2809954848e
|
73fccda145bcf1a979e646156343f56c40df0b61
|
refs/heads/master
| 2020-07-26T16:27:10.198397
| 2019-12-13T06:24:24
| 2019-12-13T06:24:24
| 208,703,410
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,478
|
py
|
from mechanize import Browser
from bs4 import BeautifulSoup
def get_data():
"""
Gets the Running 3-Month Mean ONI values table from: https://ggweather.com/enso/oni.htm
"""
br = Browser()
url = 'https://ggweather.com/enso/oni.htm'
webpage = br.open(url)
html = webpage.read()
bs = BeautifulSoup(html, features="html5lib")
table = bs.find(lambda tag: tag.name=='table' and tag.has_attr('width') and tag['width']=='930')
rows = table.findAll('tr')
data_table = []
for row in rows:
cols = row.findAll('td')
cols = [ele.text.strip() for ele in cols]
data_table.append([ele for ele in cols])
return data_table
class ONI_Season():
"""
Class to represent a season of 3-month mean ONI values. An ONI season starts in July and ends in June.
"""
def __init__(self, data):
"""
Initializes a new instance of `ONI_Season` to have the following key attributes:
* `enso_type`
* `season`
* `oni_vals`
Parameters:
* data (list): a row from the Running 3-Month Mean ONI values table from: https://ggweather.com/enso/oni.htm
"""
self.enso_type = data[0] if data[0] else "N"
self.season = (float(data[1]), float(data[3]))
self.oni_vals = {
"JJA": float(data[4]) if data[4] else None,
"JAS": float(data[5]) if data[5] else None,
"ASO": float(data[6]) if data[6] else None,
"SON": float(data[7]) if data[7] else None,
"OND": float(data[8]) if data[8] else None,
"NDJ": float(data[9]) if data[9] else None,
"DJF": float(data[10]) if data[10] else None,
"JFM": float(data[11]) if data[11] else None,
"FMA": float(data[12]) if data[12] else None,
"MAM": float(data[13]) if data[13] else None,
"AMJ": float(data[14]) if data[14] else None,
"MJJ": float(data[15]) if data[15] else None,
}
def get_oni_seasons():
"""
Gets the Running 3-Month Mean ONI values from: https://ggweather.com/enso/oni.htm as instances of ONI_Season.
Returns a dictionary mapping season years (e.g., (1950, 1951)) to its respective ONI_Season instance.
"""
data_table = get_data()
oni_seasons = dict()
for data in data_table[2::]:
oni_season = ONI_Season(data)
oni_seasons[oni_season.season] = oni_season
return oni_seasons
|
[
"noreply@github.com"
] |
aherrera1721.noreply@github.com
|
15c3df7f9087494205581ad7a8a4f49a4af05841
|
1e56141b7d5a738a74966c5e447249b895d6bfa9
|
/feeds/views.py
|
123efd22be6571c2127cd568f41f30bd9b73f3f1
|
[] |
no_license
|
Dexter009/BloggIt
|
685737e546739faa4450a294e2f6ff44f7ee8ddd
|
56a5515128d6ab34a55de248c96de0b344d3c160
|
refs/heads/master
| 2021-04-30T04:15:39.936828
| 2018-02-14T16:17:34
| 2018-02-14T16:17:34
| 121,531,908
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,529
|
py
|
import os
from PIL import Image
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Count
from django.http import Http404
from django.shortcuts import render, redirect
from django.contrib.auth import login, authenticate, logout
from django.contrib.auth.models import User
from feeds.forms import AuthenticateForm, UserCreateForm, FeedsForm,UserEditForm, \
ProfileEditForm
from feeds.models import Feeds, UserProfile
def index(request, auth_form=None, user_form=None):
# User is logged in
if request.user.is_authenticated():
feeds_form = FeedsForm()
user = request.user
feedss_self = Feeds.objects.filter(user=user.id)
feedss_buddies = Feeds.objects.filter(user__userprofile__in=user.profile.follows.all)
feedss = feedss_self | feedss_buddies
return render(request,
'buddies.html',
{'feeds_form': feeds_form, 'user': user,
'feedss': feedss,
'next_url': '/', })
else:
# User is not logged in
auth_form = auth_form or AuthenticateForm()
user_form = user_form or UserCreateForm()
return render(request,
'home.html',
{'auth_form': auth_form, 'user_form': user_form, })
def login_view(request):
if request.method == 'POST':
form = AuthenticateForm(data=request.POST)
if form.is_valid():
login(request, form.get_user())
# Success
return redirect('/')
else:
# Failure
return index(request, auth_form=form)
return redirect('/')
def logout_view(request):
logout(request)
return redirect('/')
def signup(request):
user_form = UserCreateForm(data=request.POST)
if request.method == 'POST':
if user_form.is_valid():
username = user_form.cleaned_data.get('username')
password = user_form.cleaned_data.get('password2')
user_form.save()
profile = UserProfile.objects.create(user=user_form)
user = authenticate(username=username, password=password)
login(request, user)
return redirect('/')
else:
return index(request, user_form=user_form)
return redirect('/edit.html')
@login_required
def submit(request):
if request.method == "POST":
feeds_form = FeedsForm(data=request.POST)
next_url = request.POST.get("next_url", "/")
if feeds_form.is_valid():
feeds = feeds_form.save(commit=False)
feeds.user = request.user
feeds.save()
return redirect(next_url)
else:
return public(request, feeds_form)
return redirect('/')
@login_required
def public(request, feeds_form=None):
feeds_form = feeds_form or FeedsForm()
feedss = Feeds.objects.reverse()[:10]
return render(request,
'public.html',
{'feeds_form': feeds_form, 'next_url': '/feedss',
'feedss': feedss, 'username': request.user.username})
def get_latest(user):
try:
return user.feeds_set.order_by('-id')[0]
except IndexError:
return ""
@login_required
def users(request, username="", feeds_form=None):
if username:
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
raise Http404
feedss = Feeds.objects.filter(user=user.id)
if username == request.user.username or request.user.profile.follows.filter(user__username=username):
return render(request, 'user.html', {'user': user, 'feedss': feedss, })
return render(request, 'user.html', {'user': user, 'feedss': feedss, 'follow': True, })
users = User.objects.all().annotate(feeds_count=Count('feeds'))
feedss = map(get_latest, users)
obj = zip(users, feedss)
feeds_form = feeds_form or FeedsForm()
return render(request,
'profiles.html',
{'obj': obj, 'next_url': '/users/',
'users':users,
'feeds_form': feeds_form,
'username': request.user.username,
'fname': request.user.first_name})
@login_required
def follow(request):
if request.method == "POST":
follow_id = request.POST.get('follow', False)
if follow_id:
try:
user = User.objects.get(id=follow_id)
request.user.profile.follows.add(user.profile)
except ObjectDoesNotExist:
return redirect('/users/')
return redirect('/users/')
@login_required
def edit(request):
if request.method == 'POST':
user_form = UserEditForm(instance=request.user,
data=request.POST)
profile_form = ProfileEditForm(
instance=request.user.profile,
data=request.POST,
files=request.FILES)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
return render(request,'profiles.html')
else:
user_form = UserEditForm(instance=request.user)
profile_form = ProfileEditForm(
instance=request.user.profile)
return render(request,
'edit.html',
{'user_form': user_form,
'profile_form': profile_form})
|
[
"noreply@github.com"
] |
Dexter009.noreply@github.com
|
239cf91ef9a1aff1f0c3fc3aaef72f1bf10325d7
|
caf8ca1b140d84076d81a35b97e8995979830787
|
/selenium_demo/testcases/pytest/test_11.py
|
9d2bfc8b9e49c214fd6724c0f220d40f4c5fb07a
|
[] |
no_license
|
ganmanlin/pytest-selenium-demo
|
2f2dd79ba6ed9af64237dde8e60670d245b88709
|
cfced05f28db614dc4739e5d8e1c878bd56e5932
|
refs/heads/main
| 2023-05-31T13:33:20.646475
| 2021-06-27T14:55:36
| 2021-06-27T14:55:36
| 380,766,595
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 687
|
py
|
import pytest
import allure
@pytest.fixture(scope="session")
def login():
print("用例先登陆")
@allure.step("步骤1:点xxx")
def step_1():
print("111")
@allure.step("步骤2:点xxx")
def step_2():
print("222")
@allure.feature("编辑页面")
class TestEditPage():
'''编辑页面'''
@allure.story('这是一个xxx的用例')
def test_1(self,login):
'''用力描述:先登录,再去执行xxx'''
step_1()
step_2()
print('test_1')
@allure.story('这是一个yyy的用力')
def test_2(self,login):
'''用力描述:先登录,再去执行yyy'''
print('test_2')
# if __name__ == '__main__':
|
[
"chenjing@chenjings-MacBook-Pro.local"
] |
chenjing@chenjings-MacBook-Pro.local
|
f737a98c385e73d1190a7e43abef05f282d07481
|
0ae54acc15ec017797594e0deefc695ba0ff36b9
|
/apps/pedido/migrations/0004_pedido_totalpagar.py
|
7baeaebb01e4a5d06a9a3cea2c46d720de58effa
|
[] |
no_license
|
yasmani0/soptec
|
32d0677281f5847d63ef057439caa46fa8d323e8
|
650ad2b3b5f5cffa6e098a9bfbc48e407a513c48
|
refs/heads/main
| 2023-06-15T18:44:51.499412
| 2021-07-16T20:08:26
| 2021-07-16T20:08:26
| 382,013,816
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 414
|
py
|
# Generated by Django 2.2.5 on 2021-06-05 05:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pedido', '0003_pedido_disponibilidad'),
]
operations = [
migrations.AddField(
model_name='pedido',
name='totalPagar',
field=models.FloatField(blank=True, max_length=10, null=True),
),
]
|
[
"74805175+yasmani0@users.noreply.github.com"
] |
74805175+yasmani0@users.noreply.github.com
|
3bb6cc883f0b5afe906b06fa38c40a07d3f89e99
|
b00881def4d59f20d06e56d3fa786547385fa725
|
/Project/weerstation.py
|
33fe0df42f5820429181e9e305c11dd2db65491e
|
[] |
no_license
|
BaertMatthias/Weerstation
|
1b913c2f4b1e16c05c25fa2a3f170b6b28f4d41c
|
50cbf6b9cace536d632de0d420e88355f2ff7c83
|
refs/heads/master
| 2021-06-19T00:40:33.456435
| 2017-06-06T09:10:46
| 2017-06-06T09:10:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,242
|
py
|
from LCD import LCD
from DbClass import DbClass
import Adafruit_DHT
import RPi.GPIO as GPIO
import datetime
humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.DHT22, 4)
humidity = round(humidity, 2)
temperature = round(temperature, 2)
import spidev
import time
spi = spidev.SpiDev()
spi.open(0,0)
def readChannel(channel):
adc = spi.xfer2([1,(8+channel)<<4,0])
data = ((adc[1]&3) << 8 | adc[2])
return data
def berekenLichtsterkte():
data_licht = readChannel(0)
lichtsterkte = -(data_licht - 850)
lichtsterkte = lichtsterkte / (850 - 180) * 100
lichtsterkte = round(lichtsterkte,2)
return lichtsterkte
import Adafruit_BMP2.BMP280 as BMP280
sensor = BMP280.BMP280()
luchtdruk = sensor.read_pressure()
luchtdruk = round(luchtdruk,2)
lcd = LCD(26,19,12,16,20,21)
lcd.main()
lcd.init()
lcd.message('Aangesloten')
db = DbClass()
try:
while True:
tijdstip = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
setTempToDatabase(temperature,tijdstip)
setLightToDatabase(berekenLichtsterkte(),tijdstip)
setPressureToDatabase(luchtdruk,tijdstip)
setHumidityToDatabase(humidity,tijdstip)
time.sleep(1)
except KeyboardInterrupt:
pass
GPIO.cleanup()
|
[
"matthias.baert@student.howest.be"
] |
matthias.baert@student.howest.be
|
9891a6cf044318b3237050b1335b181959c80ed8
|
0c9080f172d77032305cdeef26d32b332d8e7952
|
/comodit_client/control/exceptions.py
|
9dd6b28861aaec8c59489644df76fe77c23e300c
|
[
"MIT"
] |
permissive
|
comodit/comodit-client
|
73b498a16b9a794caeed2d424d677e624c69c2d4
|
dac2011d0d31e740445d5877d28d5cd12b4be730
|
refs/heads/master
| 2023-07-17T06:01:37.227155
| 2023-04-03T09:37:57
| 2023-04-03T09:37:57
| 1,128,114
| 0
| 26
|
NOASSERTION
| 2023-04-03T09:37:58
| 2010-12-01T08:22:08
|
Python
|
UTF-8
|
Python
| false
| false
| 600
|
py
|
# control.exceptions - Exceptions for the application controllers.
# coding: utf-8
#
# Copyright 2010 Guardis SPRL, Liège, Belgium.
# Authors: Laurent Eschenauer <laurent.eschenauer@guardis.com>
#
# This software cannot be used and/or distributed without prior
# authorization from Guardis.
class ControllerException(Exception):
def __init__(self, message):
self.msg = message
class ArgumentException(Exception):
def __init__(self, message):
self.msg = message
class NotFoundException(ArgumentException):
pass
class MissingException(ArgumentException):
pass
|
[
"laurent@eschenauer.be"
] |
laurent@eschenauer.be
|
681915dcd5064146e70e08a4c14e6b9cf5ca866c
|
b9d594ebfd463e33c82f9e3180bdeb76a5618c1c
|
/node_sync_protect.py
|
add160be1e7ea928e7081cfc7814b95ba93439a0
|
[] |
no_license
|
wschxida/node_sync_protect
|
945abda073eb0a13d62bdc61ec0bf07f87b41f54
|
5930146ed33d320c2bc9bb1e06c30e2d9d59a348
|
refs/heads/master
| 2021-05-27T08:11:58.282435
| 2020-04-09T01:44:16
| 2020-04-09T01:44:16
| 254,243,630
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,262
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : node_sync_protect.py
# @Author: Cedar
# @Date : 2020/4/2
# @Desc :
import pymysql
def query_mysql(config_params, query_sql):
"""
执行SQL
:param config_params:
:param query_sql:
:return:
"""
# 连接mysql
config = {
'host': config_params["host"],
'port': config_params["port"],
'user': config_params["user"],
'passwd': config_params["passwd"],
'db': config_params["db"],
'charset': 'utf8mb4',
'cursorclass': pymysql.cursors.DictCursor
}
conn = pymysql.connect(**config)
conn.autocommit(1)
# 使用cursor()方法获取操作游标
cur = conn.cursor()
cur.execute(query_sql) # 执行sql语句
results = cur.fetchall() # 获取查询的所有记录
conn.close() # 关闭连接
return results
def sync_heart_beat_from_extractor_to_center(config_extractor, config_center):
"""
只要采集节点的心跳10分钟内没同步到中心库,就执行更新
"""
try:
# 查询采集库node心跳,最近一小时有心跳的节点
sql_extractor = "select Node_ID,Last_Heart_Beat_Time from node " \
"where node_role='E' and Is_Enabled=1 and Sub_System_Name='KWM' " \
"and Last_Heart_Beat_Time>DATE_SUB(CURRENT_TIME(),INTERVAL 1 hour);"
query_result_extractor = query_mysql(config_extractor, sql_extractor)
# 把数据库查询结果改成字典Node_ID:Last_Heart_Beat_Time的形式,便于后面操作
node_heart_beat_extractor = {}
for item in query_result_extractor:
node_heart_beat_extractor[item["Node_ID"]] = item["Last_Heart_Beat_Time"]
# 查询中心库node表心跳
sql_center = "select Node_ID,Last_Heart_Beat_Time from node " \
"where node_role='E' and Is_Enabled=1 and Sub_System_Name='KWM';"
query_result_center = query_mysql(config_center, sql_center)
# 把数据库查询结果改成字典Node_ID:Last_Heart_Beat_Time的形式,便于后面操作
node_heart_beat_center = {}
for item in query_result_center:
node_heart_beat_center[item["Node_ID"]] = item["Last_Heart_Beat_Time"]
# 更新中心库的采集服务器心跳
for i in node_heart_beat_extractor.keys():
time_diff = node_heart_beat_extractor[i] - node_heart_beat_center[i]
# print(node_heart_beat_extractor[i])
# print(node_heart_beat_center[i])
second_diff = time_diff.days*24*3600 + time_diff.seconds
# print(i, second_diff)
# 如果采集库的心跳比中心库大10分钟以上,就更新回去
if second_diff > 600:
print("sync_heart_beat_from_extractor_to_center node_id:", i, " second_diff:", second_diff)
update_heart_beat_sql = f"update node set Last_Heart_Beat_Time='{node_heart_beat_extractor[i]}' where Node_ID={i}"
query_mysql(config_center, update_heart_beat_sql)
except Exception as e:
raise e
def sync_node_from_center_to_extractor(config_center, config_extractor):
"""
如果中心库和采集库node表的Is_Enabled和Is_Working查询结果不一致,就执行同步
把中心库node表的Is_Enabled及Is_Working同步到采集库
"""
try:
sql = "select Node_ID,Is_Enabled,Is_Working from node;"
# 查询中心库
query_result_center = query_mysql(config_center, sql)
# 查询采集库
query_result_extractor = query_mysql(config_extractor, sql)
# print(query_result_center)
# print(query_result_extractor)
# 如果中心库与采集库不一致,就执行更新
if query_result_center != query_result_extractor:
print("sync_node_from_center_to_extractor")
for item in query_result_center:
sql_text = f"update node set Is_Enabled={item['Is_Enabled']},Is_Working={item['Is_Working']} where Node_ID={item['Node_ID']};"
sql_text = sql_text.replace('None', 'Null')
query_mysql(config_extractor, sql_text)
except Exception as e:
raise e
def sync_node_in_node_group_from_center_to_extractor(config_center, config_extractor):
"""
如果中心库和采集库node_in_node_group查询结果不一致,就执行同步
把中心库node_in_node_group同步到采集库
"""
try:
sql = "select * from node_in_node_group;"
# 查询中心库node_in_node_group
query_result_center = query_mysql(config_center, sql)
# 查询采集库node_in_node_group
query_result_extractor = query_mysql(config_extractor, sql)
# 如果中心库与采集库不一致,就执行更新
if query_result_center != query_result_extractor:
print("sync_node_in_node_group_from_center_to_extractor")
for item in query_result_center:
sql_text = f"replace into node_in_node_group(Node_In_Node_Group_ID, Node_Group_Code, Node_ID, Part_No, Part_Amount) values({item['Node_In_Node_Group_ID']}, '{item['Node_Group_Code']}', {item['Node_ID']}, {item['Part_No']}, {item['Part_Amount']});"
sql_text = sql_text.replace('None', 'Null')
# print(sql_text)
query_mysql(config_extractor, sql_text)
except Exception as e:
raise e
if __name__ == '__main__':
# extractor = {'host': '192.168.1.166', 'port': 3306, 'user': 'root', 'passwd': 'poms@db', 'db': 'test_extractor'}
# center = {'host': '192.168.1.166', 'port': 3306, 'user': 'root', 'passwd': 'poms@db', 'db': 'test_center'}
center = {'host': '192.168.1.116', 'port': 3306, 'user': 'root', 'passwd': 'poms@db', 'db': 'mymonitor'}
extractor_117 = {'host': '192.168.1.117', 'port': 3306, 'user': 'root', 'passwd': 'poms@db', 'db': 'mymonitor'}
print("---117 start---")
# 只要采集节点的心跳10分钟内没同步到中心库,就执行更新:采集->中心
sync_heart_beat_from_extractor_to_center(extractor_117, center)
# 只要中心库和采集库node表的Is_Enabled和Is_Working查询结果不一致,就执行同步:中心->采集
sync_node_from_center_to_extractor(center, extractor_117)
# 只要中心库和采集库node_in_node_group查询结果不一致,就执行同步:中心->采集
sync_node_in_node_group_from_center_to_extractor(center, extractor_117)
print("---117 end---")
extractor_118 = {'host': '192.168.1.118', 'port': 3306, 'user': 'root', 'passwd': 'poms@db', 'db': 'mymonitor'}
print("---118 start---")
# 只要采集节点的心跳10分钟内没同步到中心库,就执行更新:采集->中心
sync_heart_beat_from_extractor_to_center(extractor_118, center)
# 只要中心库和采集库node表的Is_Enabled和Is_Working查询结果不一致,就执行同步:中心->采集
sync_node_from_center_to_extractor(center, extractor_118)
# 只要中心库和采集库node_in_node_group查询结果不一致,就执行同步:中心->采集
sync_node_in_node_group_from_center_to_extractor(center, extractor_118)
print("---118 end---")
|
[
"wschxida@gmail.com"
] |
wschxida@gmail.com
|
5b90c09f7c40e941ed2b77199bd4001e8310cd30
|
7f839902b841766cc46a53bfdeca5f34233d4168
|
/Ensemble_method_project/code.py
|
de120f906a881e8c8847fe2733953940c787f9f4
|
[
"MIT"
] |
permissive
|
Deepthi-AJ/Best-Projects-ga-learner-dsmp-repo
|
bb7fcc95296dee9b0585b09a6a27713f0800541f
|
b9d52a29eff734696ae269cafc8407d2121b40b0
|
refs/heads/master
| 2023-03-15T18:36:48.401630
| 2020-05-21T14:14:22
| 2020-05-21T14:14:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,186
|
py
|
# --------------
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import pandas as pd
# Code starts here
df=pd.read_csv(path)
df.head(5)
X=df.drop('attr1089',1)
y=df.attr1089
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.3 , random_state =4)
scaler=MinMaxScaler()
scaler.fit(X_train)
X_train=scaler.transform(X_train)
X_test=scaler.transform(X_test)
# --------------
from sklearn.metrics import classification_report
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
lr=LogisticRegression()
lr.fit(X_train,y_train)
y_pred=lr.predict(X_test)
roc_score = lr.score(X_test,y_test)
print("roc_score",roc_score)
# --------------
from sklearn.tree import DecisionTreeClassifier
dt=DecisionTreeClassifier(random_state=4)
dt.fit(X_train,y_train)
y_pred=dt.predict(X_test)
roc_score=dt.score(X_test,y_test)
print("roc_score",roc_score)
# --------------
from sklearn.ensemble import RandomForestClassifier
# Code strats here
rfc=RandomForestClassifier(random_state=4)
rfc.fit(X_train,y_train)
y_pred=rfc.predict(X_test)
roc_score=rfc.score(X_test,y_test)
print("roc_score",roc_score)
# Code ends here
# --------------
# Import Bagging Classifier
from sklearn.ensemble import BaggingClassifier
# Code starts here
bagging_clf=BaggingClassifier(base_estimator= DecisionTreeClassifier(), n_estimators=100 , max_samples=100 , random_state=0)
bagging_clf.fit(X_train,y_train)
y_pred=bagging_clf.predict(X_test)
score_bagging=bagging_clf.score(X_test,y_test)
print("score_bagging",score_bagging)
# Code ends here
# --------------
# Import libraries
from sklearn.ensemble import VotingClassifier
# Various models
clf_1 = LogisticRegression()
clf_2 = DecisionTreeClassifier(random_state=4)
clf_3 = RandomForestClassifier(random_state=4)
model_list = [('lr',clf_1),('DT',clf_2),('RF',clf_3)]
# Code starts here
voting_clf_hard=VotingClassifier(estimators= model_list,voting='hard')
voting_clf_hard.fit(X_train,y_train)
hard_voting_score= voting_clf_hard.score(X_test,y_test)
print("hard_voting_score:",hard_voting_score)
# Code ends here
|
[
"rkkirpane@users.noreply.github.com"
] |
rkkirpane@users.noreply.github.com
|
7dc496901fd7ae4c97c641e93d8d18edcfaf26db
|
f660229f0c7e0bc2f5390334cc87dbba9be45b61
|
/affinity.py
|
93abd632cda5988a96bf46b4c0ebdd05a1843fb9
|
[] |
no_license
|
Francosinus/DBS
|
37af03358c302931e8d4724ff2dc36bf477d54bf
|
23ebcf069a0e68a54c3743587a685984e5fd54fc
|
refs/heads/master
| 2021-01-25T08:18:59.535456
| 2017-07-27T10:06:28
| 2017-07-27T10:06:28
| 93,748,875
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 935
|
py
|
import numpy as np
import sklearn.cluster
import distance
import csv
inpute = open("/home/franko/DBS Projekt/noduplicates.csv","r")
reader=csv.reader(inpute, delimiter=";")
output=open("/home/franko/DBS Projekt/cluster.csv", "w")
writer= csv.writer(output)
column= []
for row in reader:
column.append(row[0])
words = np.asarray(column) #zum indizieren der Liste
lev = -1*np.array([[distance.levenshtein(w1,w2) for w1 in words] for w2
in words])#levenshtein distanz vergleich mit allen woertern
ap = sklearn.cluster.AffinityPropagation(affinity="precomputed", damping=0.5)#aehnlich wie kmeans
ap.fit(lev)
for cluster_id in np.unique(ap.labels_):
exemplar = words[ap.cluster_centers_indices_[cluster_id]]
cluster = np.unique(words[np.nonzero(ap.labels_==cluster_id)])
cluster_str = ", ".join(cluster)
writer.writerow([" - *%s:* %s" % (exemplar, cluster_str)])
print(" - *%s:* %s" % (exemplar, cluster_str))
|
[
"noreply@github.com"
] |
Francosinus.noreply@github.com
|
7ef0094e3da7d99e42f732c70bd9ff9e23ee8a44
|
ab5c23fb0c9cb2520d7cd7929d38009ca7060442
|
/user/migrations/0004_remove_customuser_phone.py
|
53fc11838962d9aa0a9e3efcbb880b06f5f2485a
|
[] |
no_license
|
cliffaust/revolvemart-backend
|
5acc777e09cf05ff1222eea9a22c6d1376ef26fd
|
e0ad9dac511c5630913b062ba4e90a42e26d3855
|
refs/heads/master
| 2023-08-11T21:21:09.810339
| 2021-09-15T19:36:58
| 2021-09-15T19:36:58
| 406,183,740
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 319
|
py
|
# Generated by Django 3.2.3 on 2021-09-10 17:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('user', '0003_bookviews'),
]
operations = [
migrations.RemoveField(
model_name='customuser',
name='phone',
),
]
|
[
"kingsleyclifford2016@gmail.com"
] |
kingsleyclifford2016@gmail.com
|
c6d2960e80a9008022afb81663105b0662db75b1
|
4ccb6c096ac6ba68fa91a5c257331b20ecabf16e
|
/testing/marionette/client/marionette/runner/mixins/browsermob-proxy-py/browsermobproxy/__init__.py
|
5c6d63004fbc1c652de41ad40e092b8bddc0efba
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
classilla/tenfourfox
|
ad7856e9e2309e8d0da2d3eed3090cadf493a413
|
17b23692fe309927badba0617b7679ed50d46acb
|
refs/heads/master
| 2023-07-08T23:59:21.137323
| 2023-04-20T05:34:54
| 2023-04-20T05:34:54
| 40,451,619
| 247
| 41
|
NOASSERTION
| 2023-06-25T08:42:34
| 2015-08-09T21:46:01
| null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
__version__ = '0.5.0'
from .server import Server
from .client import Client
__all__ = ['Server', 'Client', 'browsermobproxy']
|
[
"classilla@floodgap.com"
] |
classilla@floodgap.com
|
090d80f2807c7f63dfe8141e4a1167570ff9c51e
|
cf973acca8a9f1bb6e015eeb60c9dfb620ba81f5
|
/management/urls.py
|
7f6058d83089685154623c0171f9b1c9a0295296
|
[] |
no_license
|
rgegriff/menomnom
|
db0c341a555c1a2e057edc310a6e44160ae15bd0
|
0390d4767bea4bda1a3a961bebd5de6eb640dccf
|
refs/heads/master
| 2021-01-18T01:42:33.735904
| 2013-04-03T13:18:42
| 2013-04-03T13:18:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 388
|
py
|
from django.conf.urls import patterns, url
import views
urlpatterns = patterns("",
url(r'^$', views.GeneralManagementPage.as_view()),
url(r"^hours/$", views.HoursManagementPage.as_view()),
url(r'^specials/$', views.SpecialsManagementPage.as_view()),
url(r'^highlights/$', views.HighlightsManagementPage.as_view()),
url(r'^bulletin/$', views.BulletinPost.as_view()),
)
|
[
"root@menomnom.(none)"
] |
root@menomnom.(none)
|
a238fa5894d968b9152395c95fae875d670d632c
|
b773f9c1ea92373d0d2976e08aa66a563d4fa3b7
|
/venv/bin/pip
|
ce24341335d59fc7217cbea5c4a0b101ffd35911
|
[] |
no_license
|
yDeepak1889/Electronic-Medical-Record-management-using-Blockchain
|
35276dafcd35e95d7230b3c1ff12a6bdbf97dddb
|
7d525afe5a328b3fca723a7c658a8e28ee92499d
|
refs/heads/master
| 2021-04-06T08:27:21.885255
| 2018-05-04T07:53:06
| 2018-05-04T07:53:06
| 124,678,427
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
#!/home/deepak/PycharmProjects/Blockchain/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip')()
)
|
[
"iit2015124@iiita.ac.in"
] |
iit2015124@iiita.ac.in
|
|
5f7cb71869fd9e4fd8007b8011ddd41599050f29
|
a4f6244e5ed787d972135efc456af6b00084653d
|
/K-NN/UTM.py
|
c7a99c54493843433a794b64bd8d63ce64bb4f80
|
[] |
no_license
|
coco11563/machineLearning
|
18e349102332ad07b21f8c20e468de3017824e21
|
6fe35ec2f945d5d6ba601a05b3f6e0060dab890b
|
refs/heads/master
| 2021-01-19T06:39:44.017514
| 2016-08-19T15:27:08
| 2016-08-19T15:27:08
| 60,177,604
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,303
|
py
|
'''
Created on 2016年6月6日
@author: coco1
'''
import math
def LL2UTM_USGS(a, f, lat, lon, lonOrigin, FN):
'''
a = 6378136.49m
b = 6356755.00m
lonOrigin = 114.17
FN = 0
** Input:(a, f, lat, lon, lonOrigin, FN)
** a 椭球体长半轴
** f 椭球体扁率 f=(a-b)/a 其中b代表椭球体的短半轴
** lat 经过UTM投影之前的纬度
** lon 经过UTM投影之前的经度
** lonOrigin 中央经度线
** FN 纬度起始点,北半球为0,南半球为10000000.0m
---------------------------------------------
** Output:(UTMNorthing, UTMEasting)
** UTMNorthing 经过UTM投影后的纬度方向的坐标
** UTMEasting 经过UTM投影后的经度方向的坐标
---------------------------------------------
** 功能描述:UTM投影
** 作者: Ace Strong
** 单位: CCA NUAA
** 创建日期:2008年7月19日
** 版本:1.0
** 本程序实现的公式请参考
** "Coordinate Conversions and Transformations including Formulas" p35.
** & http://www.uwgb.edu/dutchs/UsefulData/UTMFormulas.htm
'''
# e表示WGS84第一偏心率,eSquare表示e的平方
eSquare = 2*f - f*f
k0 = 0.9996
# 确保longtitude位于-180.00----179.9之间
lonTemp = (lon+180)-int((lon+180)/360)*360-180
latRad = math.radians(lat)
lonRad = math.radians(lonTemp)
lonOriginRad = math.radians(lonOrigin)
e2Square = (eSquare)/(1-eSquare)
V = a/math.sqrt(1-eSquare*math.sin(latRad)**2)
T = math.tan(latRad)**2
C = e2Square*math.cos(latRad)**2
A = math.cos(latRad)*(lonRad-lonOriginRad)
M = a*((1-eSquare/4-3*eSquare**2/64-5*eSquare**3/256)*latRad
-(3*eSquare/8+3*eSquare**2/32+45*eSquare**3/1024)*math.sin(2*latRad)
+(15*eSquare**2/256+45*eSquare**3/1024)*math.sin(4*latRad)
-(35*eSquare**3/3072)*math.sin(6*latRad))
# x
UTMEasting = k0*V*(A+(1-T+C)*A**3/6
+ (5-18*T+T**2+72*C-58*e2Square)*A**5/120)+ 500000.0
# y
UTMNorthing = k0*(M+V*math.tan(latRad)*(A**2/2+(5-T+9*C+4*C**2)*A**4/24
+(61-58*T+T**2+600*C-330*e2Square)*A**6/720))
# 南半球纬度起点为10000000.0m
UTMNorthing += FN
return (UTMEasting,UTMNorthing)
e , n = LL2UTM_USGS(6378136.49,6356755.00,30.45821,114.272369,114.17,0)
print(e , n)
|
[
"coco11563@yeah.net"
] |
coco11563@yeah.net
|
982871d8e7bf9e4dc1a68ec9e2c52cf445beee93
|
cec03b50138885a2a3082f4cca1aa2b6714861aa
|
/Internet Speed Test/log_speedtest.py
|
6510fcf824fd908aa09a393a0fb4d4015abf4955
|
[] |
no_license
|
dassowmd/UsefulTools
|
92fa4d86b7cdac5c3839f89ce0d435a5ab27f728
|
cb9875017c99d36ba4a4c1088cb2b3679335320c
|
refs/heads/master
| 2022-03-15T15:07:24.789451
| 2022-03-05T22:17:29
| 2022-03-05T22:17:29
| 79,920,969
| 1
| 0
| null | 2017-01-24T22:58:54
| 2017-01-24T14:52:55
|
Python
|
UTF-8
|
Python
| false
| false
| 4,246
|
py
|
# C:\Users\dasso\Google
# Drive\Python
# Library\Internet
# Speed
# Test
# #!/usr/bin/env python
import os
import subprocess
import logging
import sys
import re
import time
import datetime
SPEEDTEST_CMD = "C:\Python37\Lib\site-packages\speedtest.py"
sleepTime = 1500
if len(sys.argv) < 2:
logFolder = input(
"Please enter the folder that you would like to save the speedtest.log file in\n"
)
sleepTime = int(
input(
"How many seconds would you like the program to wait befor running again?\n"
)
)
else:
logFolder = sys.argv[1]
LOG_FILE = str(logFolder) + "\Log Files\speedtest.log"
if not os.path.exists(logFolder + "\Log Files"):
os.makedirs(logFolder + "\Log Files")
def main():
while True:
setup_logging()
computerName = os.environ["COMPUTERNAME"]
try:
ISP, ping, download, upload = get_speedtest_results()
except ValueError as err:
logging.info("%s %s", computerName, err)
next
else:
logging.info("%s %s %s %s %s", ISP, computerName, ping, download, upload)
time.sleep(sleepTime)
def setup_logging():
logging.basicConfig(
filename=LOG_FILE,
level=logging.INFO,
format="%(asctime)s %(message)s",
datefmt="%Y-%m-%d %H:%M",
)
def get_speedtest_results():
ISP = ping = download = upload = None
speedtest_output = subprocess.check_output("python " + SPEEDTEST_CMD)
# with subprocess.check_output('python ' + SPEEDTEST_CMD + ' --simple') as speedtest_output:
# print speedtest_output
# print len(speedtest_output)
speedtest_output_clean = speedtest_output.decode("utf-8").split("\n")
# print(speedtest_output_clean)
for line in speedtest_output_clean:
lineSplit = line.split()
# print lineSplit
try:
if "Hosted" in lineSplit:
for i in lineSplit[0:]:
if ping == None:
if find_Ping_Regex(i) == None:
next
else:
ping = find_Ping_Regex(i)
elif "Download:" in lineSplit:
label, value, unit = line.split()
download = str(value)
elif "Upload:" in lineSplit:
label, value, unit = line.split()
upload = str(value)
elif "from" in lineSplit:
for i in lineSplit[0:]:
if ISP == None:
if find_IP_Regex(i) == None:
next
else:
ISP = find_IP_Regex(i)
except:
next
# print(ISP, ping, download, upload)
if all((ISP, ping, download, upload)): # if all values were parsed
print(str(datetime.datetime.now()), ISP, ping, download, upload)
return ISP, ping, download, upload
else:
raise ValueError("TEST FAILED")
def find_IP_Regex(txt):
re1 = "(\\d+)" # Integer Number 1
re2 = "(.)" # Any Single Character 1
re3 = "(\\d+)" # Integer Number 2
re4 = "(.)" # Any Single Character 2
re5 = "(\\d+)" # Integer Number 3
re6 = "(.)" # Any Single Character 3
re7 = "(\\d+)" # Integer Number 4
rg = re.compile(re1 + re2 + re3 + re4 + re5 + re6 + re7, re.IGNORECASE | re.DOTALL)
m = rg.search(txt)
if m:
int1 = m.group(1)
c1 = m.group(2)
int2 = m.group(3)
c2 = m.group(4)
int3 = m.group(5)
c3 = m.group(6)
int4 = m.group(7)
return (
str(int1) + str(c1) + str(int2) + str(c2) + str(int3) + str(c3) + str(int4)
)
else:
return None
def find_Ping_Regex(txt):
re1 = "(\\d{2})" # Integer Number 1
re2 = "(.)" # Any Single Character 1
re3 = "(\\d{3})" # Integer Number 2
rg = re.compile(re1 + re2 + re3, re.IGNORECASE | re.DOTALL)
m = rg.search(txt)
if m:
int1 = m.group(1)
c1 = m.group(2)
int2 = m.group(3)
return str(int1) + str(c1) + str(int2)
else:
return None
if __name__ == "__main__":
try:
main()
except Exception as e:
next
|
[
"dassowmd@gmail.com"
] |
dassowmd@gmail.com
|
574690fff47ed7a9e81713f6a703b4f571cfd74a
|
c047518e0bc0be1d1a46b734fbf53610cb8a407f
|
/Codeforces/1241/A.py
|
862b24fd35e729d307cd9822e35d720a824f4148
|
[] |
no_license
|
fernandozanutto/competitive_programming
|
c3e006544ddba1702a37eeb437cb015713e8c2d1
|
cf721a7bcce6c5d5fc9f739ad729079c939fc421
|
refs/heads/master
| 2020-06-19T06:57:32.288602
| 2020-04-04T14:58:45
| 2020-04-04T14:58:45
| 196,607,123
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 153
|
py
|
for _ in range(int(input())):
a = int(input())
if a == 2:
print(2)
elif a % 2 == 0:
print(0)
else:
print(1)
|
[
"ferzanutto1999@gmail.com"
] |
ferzanutto1999@gmail.com
|
6f3e85c584f6c0602334be4ce82542e984e36fb9
|
d161be36a888fe131425c99248793cad0ce73a3e
|
/data_loader.py
|
8933f72802e6cf74eb4d61f354d694578c359540
|
[
"Apache-2.0"
] |
permissive
|
nlp-kg/medknow
|
c7332f68571ce29d7e0fd6fc6c3c6d3d4db0ed1c
|
46c99d692b9099bc39ef88a5f47e4749584f7f9c
|
refs/heads/master
| 2023-03-28T00:09:24.946615
| 2020-01-23T12:58:17
| 2020-01-23T12:58:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,113
|
py
|
#!/usr/bin/python !/usr/bin/env python
# -*- coding: utf-8 -*
# Functions to extract knowledge from medical text. Everything related to
# reading and parsing.
import json
import py2neo
import pymongo
import langid
import pandas as pd
from config import settings
from utilities import time_log
from multiprocessing import cpu_count
import ijson.backends.yajl2_cffi as ijson2
def load_mongo(key):
"""
Parse collection from mongo
Input:
- key: str,
the type of input to read
Output:
- json_ : dic,
json-style dictionary with a field containing
documents
"""
# input mongo variables from settings.yaml
uri = settings['load']['mongo']['uri']
db_name = settings['load']['mongo']['db']
collection_name = settings['load']['mongo']['collection']
client = pymongo.MongoClient(uri)
db = client[db_name]
collection = db[collection_name]
# itemfield containing list of elements
out_outfield = settings['out']['json']['itemfield']
json_ = {out_outfield: []}
cur = collection.find({})
for item in cur:
del item['_id']
json_[out_outfield].append(item)
return json_
def load_mongo_batches(key, N_collection, ind_=0):
"""
Parse collection from mongo to be processed in streaming/parallel fashion.
Fetches step = (N X numb_cores) of documents starting from ind_ and
delivers it to the rest of the pipeline.
Input:
- key: str,
the type of input to read
- N_collection: int,
total collection length
- ind: int,
the starting point of the batch (or stream) to be read
Output:
- json_ : dic,
json-style dictionary with a field containing
items
"""
# input file path from settings.yaml
uri = settings['load']['mongo']['uri']
db_name = settings['load']['mongo']['db']
collection_name = settings['load']['mongo']['collection']
client = pymongo.MongoClient(uri)
db = client[db_name]
collection = db[collection_name]
# itemfield containing list of elements
out_outfield = settings['out']['json']['itemfield']
json_ = {out_outfield: []}
stream_flag = str(settings['pipeline']['in']['stream']) == 'True'
# batch size in case of streaming enviroment is just one
if stream_flag:
step = 1
# else N_THREADS*
else:
try:
N_THREADS = int(settings['num_cores'])
except:
N_THREADS = cpu_count()
try:
batch_per_core = int(settings['batch_per_core'])
except:
batch_per_core = 100
step = N_THREADS * batch_per_core
time_log("Will start from %d/%d and read %d items" % (ind_, N_collection, step))
if step > N_collection:
step = N_collection
else:
cur = collection.find({}, skip=ind_, limit=step)
c = 0
for item in cur:
del item['_id']
c += 1
json_[out_outfield].append(item)
return json_, ind_ + step
def load_file(key):
"""
Parse file containing items.
Input:
- key: str,
the type of input to read
Output:
- json_ : dic,
json-style dictionary with items
"""
# input file path from settings.yamml
if key == 'med_rec':
json_ = parse_medical_rec()
else:
inp_path = settings['load']['path']['file_path']
with open(inp_path, 'r') as f:
json_ = json.load(f, encoding='utf-8')
return json_
def load_file_batches(key, N_collection, ind_=0):
"""
Parse collection from file to be processed in streaming/parallel fashion.
Fetches step = (N X numb_cores) of documents starting from ind_ and
delivers it to the rest of the pipeline.
Input:
- key: str,
the type of input to read
- N_collection: int,
total collection length
- ind: int,
the starting point of the batch (or stream) to be read
Output:
- json_ : dic,
json-style dictionary with a field containing
items
"""
# Filepath to item collection
inp_path = settings['load']['path']['file_path']
# Document iterator field in the collection
infield = settings['load'][key]['itemfield']
# itemfield containing list of elements
out_outfield = settings['out']['json']['itemfield']
# The generated json_
json_ = {out_outfield: []}
# Check if streaming
stream_flag = str(settings['pipeline']['in']['stream']) == 'True'
# batch size in case of streaming enviroment is just one
if stream_flag:
step = 1
# else N_THREADS* Batches_per_core
else:
try:
N_THREADS = int(settings['num_cores'])
except:
N_THREADS = cpu_count()
try:
batch_per_core = int(settings['batch_per_core'])
except:
batch_per_core = 100
step = N_THREADS * batch_per_core
if step > N_collection:
step = N_collection
# Collection counter
col_counter = 0
#print infield
time_log("Will start from %d/%d and read %d items" % (ind_, N_collection, step))
with open(inp_path, 'r') as f:
docs = ijson2.items(f, '%s.item' % infield)
for c, item in enumerate(docs):
if c < ind_:
continue
json_[out_outfield].append(item)
#print json_
col_counter += 1
if col_counter >= step:
break
if col_counter == 0:
#print 'Col_counter'
#print col_counter
return None, None
else:
#print json_
return json_, ind_ + step
def parse_medical_rec():
"""
Parse file containing medical records.
Output:
- json_ : dic,
json-style dictionary with documents containing
a list of dicts, containing the medical record and the corresponding
attributes
"""
# path to file to read from
inp_path = settings['load']['path']['file_path']
# csv seperator from settings.yaml
sep = settings['load']['med_rec']['sep']
# textfield to read text from
textfield = settings['load']['med_rec']['textfield']
# idfield where id of document is stored
idfield = settings['load']['med_rec']['idfield']
with open(inp_path, 'r') as f:
diag = pd.DataFrame.from_csv(f, sep='\t')
# Get texts
texts = diag[textfield].values
# outerfield for the documents in json
itemfield = settings['out']['json']['itemfield']
# textfield to read text from
out_textfield = settings['out']['json']['json_text_field']
# labelfield where title of the document is stored
out_labelfield = settings['out']['json']['json_label_field']
diag[out_labelfield] = ['Medical Record' + str(i) for i in diag.index.values.tolist()]
if not('journal' in diag.columns.tolist()):
diag['journal'] = ['None' for i in diag.index.values.tolist()]
# Replace textfiled with out_textfield
diag[out_textfield] = diag[textfield]
del diag[textfield]
# Replace id with default out_idfield
diag['id'] = diag[idfield]
del diag[idfield]
json_ = {itemfield: diag.to_dict(orient='records')}
return json_
def parse_text(json_):
"""
Helper function to parse the loaded documents. Specifically,
we ignore documents with no assigned text field. We also provide
an empty string for label if non-existent. Other than that, norma-
lizing the id,text and label fields as indicated in the settings.
Input:
- json_: dicm
json-style dictionary with a field containing
items
Output:
- json_ : dic,
json-style dictionary with a field containing normalized and
cleaned items
"""
## Values to read from
# itemfield containing list of elements containing text
outfield = settings['load']['text']['itemfield']
# textfield to read text from
textfield = settings['load']['text']['textfield']
# idfield where id of document is stored
idfield = settings['load']['text']['idfield']
# labelfield where title of the document is stored
labelfield = settings['load']['text']['labelfield']
## Values to replace them with ##
# itemfield containing list of elements
out_outfield = settings['out']['json']['itemfield']
# textfield to read text from
out_textfield = settings['out']['json']['json_text_field']
# idfield where id of document is stored
out_idfield = settings['out']['json']['json_id_field']
# labelfield where title of the document is stored
out_labelfield = settings['out']['json']['json_label_field']
json_[outfield] = [art for art in json_[outfield] if textfield in art.keys()]
json_[outfield] = [art for art in json_[outfield] if langid.classify(art[textfield])[0] == 'en']
for article in json_[outfield]:
article[out_textfield] = article.pop(textfield)
article[out_idfield] = article.pop(idfield)
if labelfield != 'None':
article[out_labelfield] = article.pop(labelfield)
else:
article[out_labelfield] = ' '
if not('journal' in article.keys()):
article['journal'] = 'None'
json_[out_outfield] = json_.pop(outfield)
# N = len(json_[out_outfield])
# json_[out_outfield] = json_[out_outfield][(2*N/5):(3*N/5)]
json_[out_outfield] = json_[out_outfield][:]
return json_
def parse_remove_edges(key=None):
"""
Dummy function to conform with the pipeline when
we just want to delete edges instead of inserting
them.
Output:
- an empty dic to be passed around, as to
conform to the pipeline schema
"""
# Read neo4j essentials before
host = settings['neo4j']['host']
port = settings['neo4j']['port']
user = settings['neo4j']['user']
password = settings['neo4j']['password']
try:
graph = py2neo.Graph(host=host, port=port, user=user, password=password)
except Exception, e:
#time_log(e)
#time_log("Couldn't connect to db! Check settings!")
exit(2)
quer1 = """ MATCH ()-[r]->() WHERE r.resource = "%s" DELETE r;""" % (settings['neo4j']['resource'])
f = graph.run(quer1)
rem = f.stats()['relationships_deleted']
quer2 = """ MATCH ()-[r]->() WHERE "%s" in r.resource SET
r.resource = FILTER(x IN r.resource WHERE x <> "%s");""" % (settings['neo4j']['resource'], settings['neo4j']['resource'])
f = graph.run(quer2)
alt = f.stats()['properties_set']
time_log('Removed %d edges that were found only in %s' % (rem, settings['neo4j']['resource']))
time_log("Altered %s edges' resource attribute associated with %s" % (alt, settings['neo4j']['resource']))
exit(1)
return {}
def get_collection_count(source, type):
"""
Helper function to get total collection length.
Input:
- source: str, value denoting where we will read from (e.g 'mongo')
- type: str, value denoting what we will read (e.g. text, edges)
Output:
- N_collection: int,
number of items in the collection
"""
if source == 'file':
inp_path = settings['load']['path']['file_path']
# Document iterator field in the collection
infield = settings['load'][type]['itemfield']
with open(inp_path, 'r') as f:
docs = ijson2.items(f, '%s.item' % infield)
N_collection = 0
for item in docs:
N_collection += 1
elif source == 'mongo':
# input mongo variables from settings.yaml
uri = settings['load']['mongo']['uri']
db_name = settings['load']['mongo']['db']
collection_name = settings['load']['mongo']['collection']
client = pymongo.MongoClient(uri)
db = client[db_name]
collection = db[collection_name]
N_collection = collection.count()
else:
time_log("Can't calculate total collection count for source type %s" % settings['in']['source'])
raise NotImplementedError
return N_collection
|
[
"bogas.ko@gmail.com"
] |
bogas.ko@gmail.com
|
84ac418dd2d52f34fa1e8225a71e7e31f49eead7
|
ef243d91a1826b490e935fa3f3e6c29c3cc547d0
|
/typed_ast/_ast3/Await.py
|
607a263ba1aa6de55d353a12e584ae4d62546a47
|
[] |
no_license
|
VentiFang/Python_local_module
|
6b3d0b22399e817057dfd15d647a14bb1e41980e
|
c44f55379eca2818b29732c2815480ee755ae3fb
|
refs/heads/master
| 2020-11-29T11:24:54.932967
| 2019-12-25T12:57:14
| 2019-12-25T12:57:14
| 230,101,875
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 367
|
py
|
# encoding: utf-8
# module typed_ast._ast3
# from C:\Users\84788\AppData\Roaming\Python\Python36\site-packages\typed_ast\_ast3.cp36-win_amd64.pyd
# by generator 1.147
# no doc
# no imports
from .expr import expr
class Await(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'value',
)
|
[
"5149528+ventifang@user.noreply.gitee.com"
] |
5149528+ventifang@user.noreply.gitee.com
|
ba87365da898457af4031b0b36b298bc772ebe8b
|
5f55ab13627ca048b3e5877ea31ea3e020035c66
|
/fab_support/heroku_utils.py
|
f8c23849bad5f5d0f6e1853ed602de3efeabc44a
|
[
"MIT"
] |
permissive
|
drummonds/fab_support
|
6872e0f9c2523abe80bfce1c45def3f2755280cc
|
de2c9595e8cf499848f0cc5661e7f1d6465609a2
|
refs/heads/master
| 2023-08-17T03:01:26.241905
| 2019-12-08T18:21:12
| 2019-12-08T18:21:12
| 120,188,534
| 4
| 0
|
MIT
| 2023-09-11T15:15:24
| 2018-02-04T13:45:13
|
Python
|
UTF-8
|
Python
| false
| false
| 1,668
|
py
|
"""
Utilities that work with Heroku
"""
from fabric.api import local
import json
import re
import sys
from time import sleep
def list_databases(app=""):
"""
List
:param app: Name of app to use if default '' will return current list
:return: list of database names
"""
result = []
for i in range(5):
try:
if app:
fab_result = json.loads(
local(f"heroku info --json --app={app}", capture=True)
)
else:
fab_result = json.loads(local("heroku info --json", capture=True))
for addon in fab_result["addons"]:
if addon["addon_service"]["name"] == "heroku-postgresql":
name = addon["config_vars"][0]
if name == "DATABASE_URL":
colour = ""
else:
# Extract colour from name like 'HEROKU_POSTGRESQL_PURPLE_URL'
found = re.search("HEROKU_POSTGRESQL_(.*)_URL", name)
colour = found.group(1)
result.append([name, colour])
break
except IndexError: # Returned if database config_var are not yet setup
print(f"Trying to list databases attempt {i} as not yet setup")
print(f"Returned: {fab_result}")
sleep(15)
if result:
return result
else:
sys.exit("Failed to get database list")
def first_colour_database(app=""):
db_list = list_databases(app)
for name, colour in db_list:
if colour:
return [name, colour]
return ["", ""] # Not found
|
[
"hum3@drummond.info"
] |
hum3@drummond.info
|
3a159715b882957fef0a068235fd680ac5fe57bd
|
41e4e2ba911a3cd5d32890014ec564cd312202ec
|
/class.py
|
7a43ff255505abf47f143f585a3a7510b7465730
|
[] |
no_license
|
PythonStudy20161/ptyhon-study-2016-loooooooooong
|
1c6e4337d533172978dc17a113681de3070c9567
|
0df7ca586c1936f08f21f2549edbd35323d1d1b4
|
refs/heads/master
| 2021-01-10T16:10:35.944363
| 2016-02-15T09:27:23
| 2016-02-15T09:27:23
| 49,255,290
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,388
|
py
|
#_*_coding:utf-8_*_
#类定义
class people:
name = ''
age = 0
__weight = 0
def __init__(self,n,a,w):
self.name = n
self.age = a
self.__weight = w
def speak(self):
print("%s is speaking: I am %d years old:%d" %(self.name,self.age,self.__weight))
#工程师继承人
class engerneer(people):
C_language=''
def __init__(self,n,a,w,c):
people.__init__(self,n,a,w)
self.C_language=c
def speak(self):
print("%s is speaking: I am %d years old,and I am use C program language %d"%(self.name,self.age,self.C_language))
#默认不继承的管理职位
class manager():
manager_year=''
def __init__(self,mey):
self.manager_year=mey
def speak(self):
print("%s is speaking: I am manager for %d years. "%(self.name,self.manager_year))
#多重继承职位
class VP(manager,engerneer):
count=''
def __init__(self,n,a,w,c,mey,count):
engerneer.__init__(self,n,a,w,c)
manager.__init__(self,mey)
self.count=count
def speak(self):
print("%s is speaking: I am %d years old,use C %d year,manager %d people."%(self.name,self.age,self.C_language,self.manager_year))
p = people('tom',10,30)
p.speak()
print "\n**************\n"
e=engerneer('gerry',22,100,2)
e.speak()
print "\n**************\n"
v=VP('boss',30,80,10,100,5)
v.speak()
|
[
"wangcy1202@qq.com"
] |
wangcy1202@qq.com
|
77b72cfff8d656608f7071e4b9eab19128ccfa70
|
c5ba5f13a09eaa39d011aef6115eba82b6ea1a2d
|
/blockchain.py
|
384f36ddcbbf3042d16550930628e83b59683a95
|
[] |
no_license
|
Bram0202/SimpleBlockchain
|
637d4ad99caf24a26fa16e4cdcb7bca56dedfb00
|
c1c656574eef33b075502fafee40befd2e00c4f1
|
refs/heads/master
| 2021-10-07T23:47:54.449303
| 2018-12-05T19:58:25
| 2018-12-05T19:58:25
| 160,570,845
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,482
|
py
|
# A really simple blockchain
# Based on https://www.youtube.com/watch?v=b81Ib_oYbFk
# Coded by Bram Bakx
# Made on MacOs and Python 3.7
import datetime
import hashlib
# Every block in the blockchain is an instance of Block.
class Block:
blockNumber = 0 # The number of the block
data = None # The data you want to store in the block
next = None # The pointer to te next block in the blockchain
hash = None
nonce = 0 # The number of hashes needed to mine the block
previousHash = 0x0 # The hash of the previous block in the blockchain
timestamp = datetime.datetime.now()
# Store the blocks data
def __init__(self, data):
self.data = data
# Create the block's hash. Merge all the data together in one big string and run that through sha256
# It is very import to add previousHash to the new hash, because if previousHash changes all the hashes change
def hash(self):
h = hashlib.sha256()
h.update(
str(self.nonce).encode('utf-8') +
str(self.data).encode('utf-8') +
str(self.previousHash).encode('utf-8') +
str(self.timestamp).encode('utf-8') +
str(self.blockNumber).encode('utf-8')
)
return h.hexdigest()
# The block as printed to the console
def __str__(self):
return "Block Hash: " + str(self.hash()) + "\nBlockNo: " + str(self.blockNumber) + "\nBlock Data: " + str(self.data) + "\nHashes: " + str(self.nonce) + "\n--------------"
class Blockchain:
# determine the mining difficulty
diff = 20
maxNonce = 2**32
target = 2 ** (256-diff)
block = Block("Genesis")
dummy = head = block
def add(self, block):
block.previousHash = self.block.hash()
block.blockNumber = self.block.blockNumber + 1
self.block.next = block
self.block = self.block.next
# The hash has to be lower than the target to be accepted in the blockchain
def mine(self, block):
for n in range(self.maxNonce):
if int(block.hash(), 16) <= self.target:
self.add(block)
print(block)
break
else:
block.nonce += 1
blockchain = Blockchain()
# Generate 10 random blocks
for n in range(10):
blockchain.mine(Block("Block " + str(n+1)))
# Print each block to the blockchain
while blockchain.head != None:
print(blockchain.head)
blockchain.head = blockchain.head.next
|
[
"35409298+Bram0202@users.noreply.github.com"
] |
35409298+Bram0202@users.noreply.github.com
|
bb144c7ce9f5bd86b79ab81a75c8c30a0600ef9e
|
c1fe97208afe479b7ae1ee67d69866a6911564ca
|
/RESTapi/RESTapi/urls.py
|
2263238e921b2dab6b9cc25d7b275620ac985147
|
[] |
no_license
|
jaindhairyahere/Python_Django
|
a0a46c57b6ca60d0942ae181fe28ea56bb1ee948
|
f170a2e38b78df698a02821a454a3baea0c358a6
|
refs/heads/master
| 2020-06-18T09:17:56.364928
| 2019-11-02T18:34:12
| 2019-11-02T18:34:12
| 196,249,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,022
|
py
|
"""RESTapi URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from rest_framework import routers
import quantify_capital_assignment
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
path('admin/', admin.site.urls),
path('api/movies/',include('quantify_capital_assignment.urls'),name='api-movies')
]
|
[
"jaindhairya2001@gmail.com"
] |
jaindhairya2001@gmail.com
|
14f35829f8f135139c0aa2e99b2d60b13539859b
|
7970f2d6680136e4632ee9a8dd12604ddd3da296
|
/app/auth/forms.py
|
413fbc0d73217fcd07e68ef37e2e60b30a98de39
|
[] |
no_license
|
wsl3/flask-dog
|
7f2c4db3bd29484cb4ce13ba644278b6c94d6a6e
|
f43da7a2ee927ddef3d3d7a271a4b56401e2d051
|
refs/heads/master
| 2020-03-29T11:15:36.673537
| 2018-09-22T04:06:03
| 2018-09-22T04:06:03
| 149,843,493
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,645
|
py
|
# encoding:utf8
from ..model import User
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import DataRequired, Length, Email, Regexp, EqualTo
from wtforms import ValidationError
class LoginForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(),
Length(1, 64), Email()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField("keep me logged in")
submit = SubmitField('Log In')
class RegisterForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(),
Length(1, 64), Email()])
username = StringField('Username', validators=[DataRequired(),
Length(1, 64), Regexp('^[a-zA-Z][0-9a-zA-Z._]*$', 0,
"用户名只包含字母,数字,.和_,并且以字母开头!")])
password = PasswordField('Password', validators=[DataRequired(), EqualTo('password2', message="password must match")])
password2 = PasswordField('Confirm Password', validators=[DataRequired()])
submit = SubmitField('Register')
# 自定义验证函数
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError("邮箱已经注册!")
def validate_username(self, field):
if User.query.filter_by(username=field.data).first():
raise ValidationError("该用户名正在使用!")
|
[
"2350622075@qq.com"
] |
2350622075@qq.com
|
f44958f0b88a495a911e5fa382f84413c57818bf
|
7ec18f8ec146ea5a82394052066ea9986ce59d88
|
/AES_CTR-256key.py
|
9b8475843d1c0fa5de5c31cd5032db3dbcbefd21
|
[] |
no_license
|
snehamuppala/ComputerSecurity
|
b260f94f34e0ea42ddb6f6e0761413aea601b379
|
cf84a36c2fec03c23201d9c0f0c11ee1b1a61e44
|
refs/heads/master
| 2020-04-14T04:30:04.698368
| 2019-08-22T05:58:04
| 2019-08-22T05:58:04
| 163,637,461
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,223
|
py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 30 15:15:58 2018
@author: snehamuppala
"""
#import following libraries
import binascii
import os
from Crypto.Cipher import AES
from Crypto.Util import Counter
import time
print("AES in the CTR mode -256 bit key")
def int_of_string(s):
return int(binascii.hexlify(iv), 16)
def encrypt_message(key, plaintext):
#initialization Vector
iv = Random.get_random_bytes(16)
ctr = Counter.new(128, initial_value=int_of_string(iv))
aes = AES.new(key, AES.MODE_CTR, counter=ctr)
return iv + aes.encrypt(plaintext)
def decrypt_message(key, ciphertext):
iv = ciphertext[:16]
ctr = Counter.new(128, initial_value=int_of_string(iv))
aes = AES.new(key, AES.MODE_CTR, counter=ctr)
return aes.decrypt(ciphertext[16:])
#Function to check for correctness
def compare(plaintext, decrypt):
if(plaintext==decrypt):
print("Success:original data is equal to decrypted data ")
else:
print("Failure: original data is not equal to decrypted data ")
#Files of 1kb and 1mb
File_1kb="/Users/snehamuppala/Desktop/computer_security/hw3/1kb.txt"
File_1mb="/Users/snehamuppala/Desktop/computer_security/hw3/1mb.txt"
#Files of 1kb and 1mb- to store encrypted data
File_1kb_Encrypted="/Users/snehamuppala/Desktop/computer_security/hw3/1kb_Encrypted_ctr.txt"
File_1mb_Encrypted="/Users/snehamuppala/Desktop/computer_security/hw3/1mb_Encrypted_ctr.txt"
#Files of 1kb and 1mb- to store Decrypted data
File_1kb_Decrypted="/Users/snehamuppala/Desktop/computer_security/hw3/1kb_Decrypted_ctr.txt"
File_1mb_Decrypted="/Users/snehamuppala/Desktop/computer_security/hw3/1mb_Decrypted_ctr.txt"
print(" ")
#key generating 256 bit
start_time = time.time()
key = Random.get_random_bytes(32)
print "Time taken to generate key 256 bit: %s seconds" % (time.time() - start_time)
#reading files-plaintext
infile_1kb= open(File_1kb)
infile_1mb= open(File_1mb)
plaintext_1kb=infile_1kb.read()
plaintext_1mb=infile_1mb.read()
#encrypting and Decrypting 1kb and 1mb file
print(" ")
print("***********Encrption of 1kb File***********")
start_time = time.time()
ciphertext_1kb=encrypt_message(key,plaintext_1kb)
print "Time taken to Encrypt File 1KB= %s seconds " % (time.time() - start_time)
total_time=(time.time() - start_time)
bytes_speed=(total_time)/len(plaintext_1kb)
print ("Speed per byte to Encrypt File 1KB :"+str(bytes_speed))
print(" ")
print("***********Encrption of 1mb File***********")
start_time = time.time()
ciphertext_1mb=encrypt_message(key,plaintext_1mb)
print "Time taken to Encrypt File 1KB= %s seconds " % (time.time() - start_time)
total_time=(time.time() - start_time)
bytes_speed=(total_time)/len(plaintext_1mb)
print ("Speed per byte to Encrypt File 1KB :"+str(bytes_speed))
outfile_1kb = open(File_1kb_Encrypted, 'wb')
outfile_1mb = open(File_1mb_Encrypted, 'wb')
#writing into files
outfile_1kb.write(ciphertext_1kb)
outfile_1mb.write(ciphertext_1mb)
cipher_1kb= open(File_1kb_Encrypted)
cipher_1mb= open(File_1mb_Encrypted)
cipher_1KB=cipher_1kb.read()
cipher_1MB=cipher_1mb.read()
print(" ")
print("***********Decrption of 1kb File***********")
start_time = time.time()
Decrypted_1kb=decrypt_message(key,ciphertext_1kb)
print "Time taken to Decrypt File 1KB= %s seconds" % (time.time() - start_time)
total_time=(time.time() - start_time)
bytes_speed=(total_time)/len(cipher_1KB)
print ("Speed per byte to Decrypt File 1KB :"+str(bytes_speed))
print(" ")
print("***********Decrption of 1mb File***********")
start_time = time.time()
Decrypted_1mb=decrypt_message(key,ciphertext_1mb)
print "Time taken to Decrypt File 1MB= %s seconds " % (time.time() - start_time)
total_time=(time.time() - start_time)
bytes_speed=(total_time)/len(cipher_1MB)
print ("Speed per byte to Decrypt File 1MB :"+str(bytes_speed))
#writing into files
outfile_1kb_DEC = open(File_1kb_Decrypted, 'wb')
outfile_1mb_DEC = open(File_1mb_Decrypted, 'wb')
outfile_1kb_DEC.write(Decrypted_1kb)
outfile_1mb_DEC.write(Decrypted_1mb)
print(" ")
print("checking for correctness:")
print("File-1kb:")
compare(plaintext_1kb, Decrypted_1kb)
print("File-1mb:")
compare(plaintext_1mb, Decrypted_1mb)
|
[
"snehamuppala@snehas-MBP.lan1"
] |
snehamuppala@snehas-MBP.lan1
|
3782d6d2f7c29ca9075bf5626f3544a4b4345a54
|
96bd02743d3727c7e12824fcb472e4d4fe2a49bf
|
/globalvars.py
|
8c2e741803846893b7f327f0bad49e14dd5e7ce1
|
[] |
no_license
|
barthess/ECLGenerator
|
1f13d11f0d350c93fb4980be5495da4e23fe1ec0
|
c746e5c5929425037d5668dbc4ee7cfef2153e1d
|
refs/heads/master
| 2016-09-05T20:34:59.184202
| 2012-05-18T12:03:50
| 2012-05-18T12:03:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,297
|
py
|
#!/usr/bin/python
# -*- coding: utf8 -*-
# log file
logfile = open('error.log','w')
# Некоторые константы
# ширина фейковых полей для поддержания ширины колонок
column_strut = 'x' * 1024
# дефолтный список колонок
# данный список содержит правильные названия колонок, как их генерит CAD
# они должны идти в том порядке, как должны появляться перечне
# Колонки с таким названием останутся в базе после очистки
column_names = ['Part', 'PartN', 'Part Num', 'Value', 'VID', 'Vendor Part Num', 'Mfg Name', 'Package', 'Country of Origin', 'Unit Price']
column_num = len(column_names)
# словарь для поиска по готовому массиву
#
# Тут хранятся все известные нам элементы. Если попадется неизвестный -
# скрипт прервет выполнение и предложит пользователю добавить сюда
# неизвестный элемент
#
# Как заполнять:
# 'key' : ['ед. число','мн. число','смещение','количество']
#
# Смещение и количество по умолчанию равны -1, они будут заполняться
# скриптом во время анализа главной таблицы.
component_des = { 'C' : ['Конденсатор','Конденсаторы',-1,-1], \
'E' : ['Перемычка','Перемычки',-1,-1], \
'R' : ['Резистор','Резисторы',-1,-1], \
'D' : ['Микросхема','Микросхемы',-1,-1], \
'DA': ['Микросхема','Микросхемы',-1,-1], \
'DD': ['Микросхема','Микросхемы',-1,-1], \
'L' : ['Дроссель','Дроссели',-1,-1], \
'RK': ['Терморезистор','Терморезисторы',-1,-1], \
'RP': ['Резистор подстроечный','Резисторы подстроечные',-1,-1], \
'S' : ['Переключатель','Переключатели',-1,-1], \
'SB': ['Выключатель кнопочный','Выключатели кнопочные',-1,-1], \
'VD': ['Диод','Диоды',-1,-1], \
'VT': ['Транзистор','Транзисторы',-1,-1], \
'XP': ['Вилка','Вилки',-1,-1], \
'XS': ['Розетка','Розетки',-1,-1], \
'Z' : ['Фильтр радиочастотный','Фильтры радиочастотные',-1,-1], \
'ZQ': ['Резонатор кварцевый','Резонаторы кварцевые',-1,-1], \
}
# Отдельно посортируем, потому что питоновый словарь выбирает
# элементы в случайном порядке
pos_names = sorted(component_des.keys())
|
[
"barthess@yandex.ru"
] |
barthess@yandex.ru
|
fc9374a5c687930f7f70c24aa1fa404c87ed40b5
|
a2b598d8e89c1755f683d6b6fe35c3f1ef3e2cf6
|
/search/[boj]1600_말이되고픈원숭이_bfs.py
|
9adea0a931916597dac7a119b2bacf6cfbd9340c
|
[
"MIT"
] |
permissive
|
DongHyunByun/algorithm_practice
|
cbe82606eaa7f372d9c0b54679bdae863aab0099
|
dcd595e6962c86f90f29e1d68f3ccc9bc673d837
|
refs/heads/master
| 2022-09-24T22:47:01.556157
| 2022-09-11T07:36:42
| 2022-09-11T07:36:42
| 231,518,518
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,176
|
py
|
from collections import deque
K=int(input())
W,H=map(int,input().split())
L=[]
for i in range(H):
L.append(list(map(int,input().split())))
dR=[0,0,-1,1]
dC=[1,-1,0,0]
dhR=[-1,-2,-2,-1,1,2,2,1]
dhC=[-2,-1,1,2,2,1,-1,-2]
cL=[[[0 for k in range(K+1)] for j in range(W)] for i in range(H)]
for i in range(K+1):
cL[0][0][i]=1
ans=0
q=deque([[0,0,0]])
while(q):
temp=q.popleft()
r=temp[0]
c=temp[1]
h=temp[2]
if r==H-1 and c==W-1:
ans=cL[r][c][h]
break
# 인접칸이동
for d in range(4):
tempR=r+dR[d]
tempC=c+dC[d]
if 0<=tempR<H and 0<=tempC<W and L[tempR][tempC]==0 and cL[tempR][tempC][h]==0:
cL[tempR][tempC][h]=cL[r][c][h]+1
q.append([tempR,tempC,h])
# 말뛰기
if h!=K:
for k in range(8):
tempR=r+dhR[k]
tempC=c+dhC[k]
if 0<=tempR<H and 0<=tempC<W and L[tempR][tempC]==0 and cL[tempR][tempC][h+1]==0:
cL[tempR][tempC][h+1]=cL[r][c][h]+1
q.append([tempR,tempC,h+1])
'''
#값확인
for i in range(H):
print(cL[i])
'''
print(ans-1)
|
[
"noreply@github.com"
] |
DongHyunByun.noreply@github.com
|
e32438c91eb86f5c0c18828a86ac1fe03c58447e
|
21049c34adbb283c7f5e74dcdd52ea20d82b6342
|
/tests/confprol/test_quantic_booleans.py
|
9296834839b7499d7abd8c502c0a01cc1ae05138
|
[] |
no_license
|
DanielBV/Confprol
|
79a2a03ea42bbd09a189f5c52632c20a0028360d
|
f9590bc2fa54ef729f47ba1c98854e6220c5c0ad
|
refs/heads/master
| 2020-06-02T04:54:41.419171
| 2019-09-08T08:53:04
| 2019-09-08T08:53:04
| 191,042,905
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,938
|
py
|
import unittest
from expressions.booleans.quantic_axis import QuanticAxis
from main import execute
from antlr4 import InputStream
from expressions.booleans.quantic_boolean import QuanticBoolean
from unittest.mock import patch
class TestQuanticBooleans(unittest.TestCase):
@patch('builtins.print')
def test_evaluate_quantic_boolean_in_condition(self,mocked_print):
program = """
if not xTrue{
run away with 6;
}"""
execute(InputStream(program), False)
mocked_print.assert_called_once_with(
"EvaluateQuanticBooleanException line 2: Quantic booleans can't be evaluated or operated as regular booleans. Use evalX() or evalY() to evaluate them first.")
@patch('builtins.print')
def test_multiply_quantic_boolean(self, mocked_print):
program = """
run away with 3 * xFalse;
"""
execute(InputStream(program), False)
mocked_print.assert_called_once_with(
"EvaluateQuanticBooleanException line 2: Quantic booleans can't be evaluated or operated as regular booleans. Use evalX() or evalY() to evaluate them first.")
@patch('builtins.print')
def test_sum_quantic_boolean(self, mocked_print):
program = """
run away with 3 + xFalse;
"""
execute(InputStream(program), False)
mocked_print.assert_called_once_with(
"EvaluateQuanticBooleanException line 2: Quantic booleans can't be evaluated or operated as regular booleans. Use evalX() or evalY() to evaluate them first.")
@patch('builtins.print')
def test_divide_quantic_boolean(self, mocked_print):
program = """
run away with 3 / xFalse;
"""
execute(InputStream(program), False)
mocked_print.assert_called_once_with(
"EvaluateQuanticBooleanException line 2: Quantic booleans can't be evaluated or operated as regular booleans. Use evalX() or evalY() to evaluate them first.")
@patch('builtins.print')
def test_minus_quantic_boolean(self, mocked_print):
program = """
run away with 3 - xFalse;
"""
execute(InputStream(program), False)
mocked_print.assert_called_once_with(
"EvaluateQuanticBooleanException line 2: Quantic booleans can't be evaluated or operated as regular booleans. Use evalX() or evalY() to evaluate them first.")
@patch('builtins.print')
def test_equals_quantic_boolean(self, mocked_print):
program = """
run away with xTrue := xFalse;
"""
execute(InputStream(program), False)
mocked_print.assert_called_once_with(
"EvaluateQuanticBooleanException line 2: Quantic booleans can't be evaluated or operated as regular booleans. Use evalX() or evalY() to evaluate them first.")
@patch('builtins.print')
def test_quantic_boolean_to_int(self, mocked_print):
program = """
int(yTrue);"""
execute(InputStream(program), False)
mocked_print.assert_called_once_with(
"EvaluateQuanticBooleanException line 2: Quantic booleans can't be evaluated or operated as regular booleans. Use evalX() or evalY() to evaluate them first.")
@patch('builtins.print')
def test_quantic_boolean_to_float(self, mocked_print):
program = """
float(yTrue);"""
execute(InputStream(program), False)
mocked_print.assert_called_once_with(
"EvaluateQuanticBooleanException line 2: Quantic booleans can't be evaluated or operated as regular booleans. Use evalX() or evalY() to evaluate them first.")
def test_quantic_boolean_to_string(self):
program = """
run away with string(yFalse);"""
string = execute(InputStream(program), True)
self.assertIn("[Quantic Boolean",string)
@patch('builtins.print')
def test_evaluate_x_not_quantic_boolean(self,mocked_print):
program = """
evalX(True);"""
execute(InputStream(program), False)
mocked_print.assert_called_once_with(
"ValueException line 2: Cannot evaluate a non quantic value.")
@patch('builtins.print')
def test_evaluate_x_none(self, mocked_print):
program = """
evalX(None);"""
execute(InputStream(program), False)
mocked_print.assert_called_once_with(
"ValueException line 2: Cannot evaluate a non quantic value.")
@patch('builtins.print')
def test_evaluate_y(self,mocked_print):
program = """
evalY(True);"""
execute(InputStream(program), False)
mocked_print.assert_called_once_with(
"ValueException line 2: Cannot evaluate a non quantic value.")
@patch('builtins.print')
def test_evaluate_y_none(self, mocked_print):
program = """
evalY(None);"""
execute(InputStream(program), False)
mocked_print.assert_called_once_with(
"ValueException line 2: Cannot evaluate a non quantic value.")
def test_evaluate_same_axis(self):
program = """
run away with [evalX(xTrue), evalX(xFalse), evalY(yTrue), evalY(yFalse)];
"""
result = execute(InputStream(program), False)
self.assertEqual([True,False,True,False],result)
@patch('random.randint')
def test_evaluate_x_different_axis_true(self, mocked_random):
mocked_random.return_value = 6
program = """
run away with evalX(yTrue);
"""
result = execute(InputStream(program), False)
mocked_random.assert_called_once()
self.assertTrue(result)
@patch('random.randint')
def test_evaluate_x_different_axis_false(self, mocked_random):
mocked_random.return_value = 1
program = """
run away with evalX(yTrue);
"""
result = execute(InputStream(program), False)
mocked_random.assert_called_once()
self.assertFalse(result)
@patch('random.randint')
def test_evaluate_y_different_axis_true(self, mocked_random):
mocked_random.return_value = 6
program = """
run away with evalY(xTrue);
"""
result = execute(InputStream(program), False)
mocked_random.assert_called_once()
self.assertEqual(True, result)
@patch('random.randint')
def test_evaluate_y_different_axis_false(self, mocked_random):
mocked_random.return_value = 1
program = """
run away with evalY(xTrue);
"""
result = execute(InputStream(program), False)
mocked_random.assert_called_once()
self.assertFalse(result)
def test_evaluate_in_other_axis_changes_the_axis_x_axis(self):
boolean = QuanticBoolean(QuanticAxis.X,True)
value = boolean.evaluate(QuanticAxis.Y).value
self.assertEqual(QuanticAxis.Y, boolean.object.axis)
self.assertEqual(value, boolean.evaluate(QuanticAxis.Y).value)
def test_evaluate_in_other_axis_changes_the_axis_y_axis(self):
boolean = QuanticBoolean(QuanticAxis.Y, True)
value = boolean.evaluate(QuanticAxis.X).value
self.assertEqual(QuanticAxis.X, boolean.object.axis)
self.assertEqual(value, boolean.evaluate(QuanticAxis.X).value)
def test_return_quantic_boolean(self):
program = """
run away with xTrue;
"""
result = execute(InputStream(program), False)
self.assertIsNotNone(result)
self.assertTrue(result.value)
def test_quantic_boolean_attributes_x_axis(self):
program = """
a == xTrue;
a.a == 3;
b == xTrue;
c == xFalse;
c.c == 6;
d == xFalse;
run away with [a.a,has_attribute(b,"a"),c.c,has_attribute(d,"c")];
"""
result = execute(InputStream(program), False)
self.assertEqual([3, False,6,False], result)
def test_quantic_boolean_attributes_y_axis(self):
program = """
a == yTrue;
a.a == 3;
b == yTrue;
c == yFalse;
c.c == 6;
d == yFalse;
run away with [a.a,has_attribute(b,"a"),c.c,has_attribute(d,"c")];
"""
result = execute(InputStream(program), False)
self.assertEqual([3, False, 6, False], result)
|
[
"23432294+DanielBV@users.noreply.github.com"
] |
23432294+DanielBV@users.noreply.github.com
|
55031643697f159cc378ffde4db5cf2f28880521
|
875757cad2e865dbffed813ed6c0e9e3a0c064d0
|
/kalmantracker.py
|
6b46e42a445bef05a1fadc1b6ed973ea5db29024
|
[] |
no_license
|
domagalasebastian/pose-estimation-and-skeleton-tracking
|
f0981b2e869e16c6dddd24069a0b2f8af59ae404
|
e33a08e60e527ae2f9458ab0e4663f242977e33f
|
refs/heads/master
| 2022-04-22T18:39:43.487781
| 2020-04-24T12:53:06
| 2020-04-24T12:53:06
| 258,511,580
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,194
|
py
|
from scipy.linalg import inv
import numpy as np
class Tracker:
def __init__(self, x, bounding_box, key_points, person_id):
"""
Init single tracker for detected person.
"""
self.person_id = person_id
self.key_points = key_points
self.bounding_box = bounding_box
self.matched_detection = False
self.unmatched_tracks = 0
# Kalman filter parameters
self.x = x
self.dT = 1
self.F = np.array([[1, 0, 0, 0, self.dT, 0, 0, 0],
[0, 1, 0, 0, 0, self.dT, 0, 0],
[0, 0, 1, 0, 0, 0, self.dT, 0],
[0, 0, 0, 1, 0, 0, 0, self.dT],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1]])
self.G = np.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
self.H = np.array([[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0]])
self.P = np.diag(10.0 * np.ones(8))
self.Q = np.diag(0.01 * np.ones(4))
self.R = np.identity(4)
self.R[2, 2] = 10.0
self.R[3, 3] = 10.0
def predict(self):
"""
Predict phase of Kalman filter.
"""
self.x = self.F.dot(self.x)
self.P = self.F.dot(self.P).dot(self.F.T) + self.G.dot(self.Q).dot(self.G.T)
def update(self, z):
"""
Update phase of Kalman filter.
:param z: measurement - bounding box from detection
"""
S = self.H.dot(self.P).dot(self.H.T) + self.R
K = self.P.dot(self.H.T).dot(inv(S))
e = z - self.H.dot(self.x)
self.x += K.dot(e)
self.P -= K.dot(self.H).dot(self.P)
|
[
"noreply@github.com"
] |
domagalasebastian.noreply@github.com
|
dec3a8f2aa7e5c7a87875dbd1e1a8c5e8f5f43ad
|
13800b7827598e76428a335559b7bf11867ec2f0
|
/python/ccxt/delta.py
|
17de2a8e8ab8283f9b968529f38b5cef16b161a6
|
[
"MIT"
] |
permissive
|
ccxt/ccxt
|
b40a0466f5c430a3c0c6026552ae697aa80ba6c6
|
e4065f6a490e6fc4dd7a72b375428b2faa570668
|
refs/heads/master
| 2023-09-04T03:41:29.787733
| 2023-09-03T19:25:57
| 2023-09-03T19:25:57
| 91,253,698
| 30,798
| 8,190
|
MIT
| 2023-09-14T21:59:09
| 2017-05-14T15:41:56
|
Python
|
UTF-8
|
Python
| false
| false
| 131,990
|
py
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
from ccxt.abstract.delta import ImplicitAPI
import hashlib
from ccxt.base.types import OrderSide
from ccxt.base.types import OrderType
from typing import Optional
from typing import List
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import AuthenticationError
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class delta(Exchange, ImplicitAPI):
def describe(self):
return self.deep_extend(super(delta, self).describe(), {
'id': 'delta',
'name': 'Delta Exchange',
'countries': ['VC'], # Saint Vincent and the Grenadines
'rateLimit': 300,
'version': 'v2',
# new metainfo interface
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': True,
'future': False,
'option': True,
'addMargin': True,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDeposit': None,
'fetchDepositAddress': True,
'fetchDeposits': None,
'fetchFundingHistory': False,
'fetchFundingRate': True,
'fetchFundingRateHistory': False,
'fetchFundingRates': True,
'fetchIndexOHLCV': True,
'fetchLedger': True,
'fetchLeverage': True,
'fetchLeverageTiers': False, # An infinite number of tiers, see examples/js/delta-maintenance-margin-rate-max-leverage.js
'fetchMarginMode': False,
'fetchMarketLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': True,
'fetchMySettlementHistory': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterest': True,
'fetchOpenOrders': True,
'fetchOrderBook': True,
'fetchPosition': True,
'fetchPositionMode': False,
'fetchPositions': True,
'fetchPremiumIndexOHLCV': False,
'fetchSettlementHistory': True,
'fetchStatus': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTransfer': None,
'fetchTransfers': None,
'fetchVolatilityHistory': False,
'fetchWithdrawal': None,
'fetchWithdrawals': None,
'reduceMargin': True,
'setLeverage': True,
'setMargin': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': False,
},
'timeframes': {
'1m': '1m',
'3m': '3m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'1d': '1d',
'7d': '7d',
'1w': '1w',
'2w': '2w',
'1M': '30d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/99450025-3be60a00-2931-11eb-9302-f4fd8d8589aa.jpg',
'test': {
'public': 'https://testnet-api.delta.exchange',
'private': 'https://testnet-api.delta.exchange',
},
'api': {
'public': 'https://api.delta.exchange',
'private': 'https://api.delta.exchange',
},
'www': 'https://www.delta.exchange',
'doc': [
'https://docs.delta.exchange',
],
'fees': 'https://www.delta.exchange/fees',
'referral': 'https://www.delta.exchange/app/signup/?code=IULYNB',
},
'api': {
'public': {
'get': [
'assets',
'indices',
'products',
'products/{symbol}',
'tickers',
'tickers/{symbol}',
'l2orderbook/{symbol}',
'trades/{symbol}',
'stats',
'history/candles',
'history/sparklines',
'settings',
],
},
'private': {
'get': [
'orders',
'products/{product_id}/orders/leverage',
'positions/margined',
'positions',
'orders/history',
'fills',
'fills/history/download/csv',
'wallet/balances',
'wallet/transactions',
'wallet/transactions/download',
'wallets/sub_accounts_transfer_history',
'users/trading_preferences',
'sub_accounts',
'profile',
'deposits/address',
'orders/leverage',
],
'post': [
'orders',
'orders/bracket',
'orders/batch',
'products/{product_id}/orders/leverage',
'positions/change_margin',
'positions/close_all',
'wallets/sub_account_balance_transfer',
'orders/cancel_after',
'orders/leverage',
],
'put': [
'orders',
'orders/bracket',
'orders/batch',
'positions/auto_topup',
'users/update_mmp',
'users/reset_mmp',
],
'delete': [
'orders',
'orders/all',
'orders/batch',
],
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0015'),
'maker': self.parse_number('0.0010'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0015')],
[self.parse_number('100'), self.parse_number('0.0013')],
[self.parse_number('250'), self.parse_number('0.0013')],
[self.parse_number('1000'), self.parse_number('0.001')],
[self.parse_number('5000'), self.parse_number('0.0009')],
[self.parse_number('10000'), self.parse_number('0.00075')],
[self.parse_number('20000'), self.parse_number('0.00065')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.001')],
[self.parse_number('100'), self.parse_number('0.001')],
[self.parse_number('250'), self.parse_number('0.0009')],
[self.parse_number('1000'), self.parse_number('0.00075')],
[self.parse_number('5000'), self.parse_number('0.0006')],
[self.parse_number('10000'), self.parse_number('0.0005')],
[self.parse_number('20000'), self.parse_number('0.0005')],
],
},
},
},
'options': {
'networks': {
'TRC20': 'TRC20(TRON)',
'BEP20': 'BEP20(BSC)',
},
},
'precisionMode': TICK_SIZE,
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
# Margin required to place order with selected leverage and quantity is insufficient.
'insufficient_margin': InsufficientFunds, # {"error":{"code":"insufficient_margin","context":{"available_balance":"0.000000000000000000","required_additional_balance":"1.618626000000000000000000000"}},"success":false}
'order_size_exceed_available': InvalidOrder, # The order book doesn't have sufficient liquidity, hence the order couldnt be filled, for example, ioc orders
'risk_limits_breached': BadRequest, # orders couldn't be placed will breach allowed risk limits.
'invalid_contract': BadSymbol, # The contract/product is either doesn't exist or has already expired.
'immediate_liquidation': InvalidOrder, # Order will cause immediate liquidation.
'out_of_bankruptcy': InvalidOrder, # Order prices are out of position bankruptcy limits.
'self_matching_disrupted_post_only': InvalidOrder, # Self matching is not allowed during auction.
'immediate_execution_post_only': InvalidOrder, # orders couldn't be placed includes post only orders which will be immediately executed
'bad_schema': BadRequest, # {"error":{"code":"bad_schema","context":{"schema_errors":[{"code":"validation_error","message":"id is required","param":""}]}},"success":false}
'invalid_api_key': AuthenticationError, # {"success":false,"error":{"code":"invalid_api_key"}}
'invalid_signature': AuthenticationError, # {"success":false,"error":{"code":"invalid_signature"}}
'open_order_not_found': OrderNotFound, # {"error":{"code":"open_order_not_found"},"success":false}
'unavailable': ExchangeNotAvailable, # {"error":{"code":"unavailable"},"success":false}
},
'broad': {
},
},
})
def convert_expire_date(self, date):
# parse YYMMDD to timestamp
year = date[0:2]
month = date[2:4]
day = date[4:6]
reconstructedDate = '20' + year + '-' + month + '-' + day + 'T00:00:00Z'
return reconstructedDate
def create_expired_option_market(self, symbol):
# support expired option contracts
quote = 'USDT'
optionParts = symbol.split('-')
symbolBase = symbol.split('/')
base = None
expiry = None
optionType = None
if symbol.find('/') > -1:
base = self.safe_string(symbolBase, 0)
expiry = self.safe_string(optionParts, 1)
optionType = self.safe_string(optionParts, 3)
else:
base = self.safe_string(optionParts, 1)
expiry = self.safe_string(optionParts, 3)
optionType = self.safe_string(optionParts, 0)
settle = quote
strike = self.safe_string(optionParts, 2)
datetime = self.convert_expire_date(expiry)
timestamp = self.parse8601(datetime)
return {
'id': optionType + '-' + base + '-' + strike + '-' + expiry,
'symbol': base + '/' + quote + ':' + settle + '-' + expiry + '-' + strike + '-' + optionType,
'base': base,
'quote': quote,
'settle': settle,
'baseId': base,
'quoteId': quote,
'settleId': settle,
'active': False,
'type': 'option',
'linear': None,
'inverse': None,
'spot': False,
'swap': False,
'future': False,
'option': True,
'margin': False,
'contract': True,
'contractSize': self.parse_number('1'),
'expiry': timestamp,
'expiryDatetime': datetime,
'optionType': 'call' if (optionType == 'C') else 'put',
'strike': self.parse_number(strike),
'precision': {
'amount': None,
'price': None,
},
'limits': {
'amount': {
'min': None,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
},
'info': None,
}
def market(self, symbol):
if self.markets is None:
raise ExchangeError(self.id + ' markets not loaded')
if isinstance(symbol, str):
if symbol in self.markets:
return self.markets[symbol]
elif symbol in self.markets_by_id:
markets = self.markets_by_id[symbol]
return markets[0]
elif (symbol.find('-C') > -1) or (symbol.find('-P') > -1) or (symbol.find('C')) or (symbol.find('P')):
return self.create_expired_option_market(symbol)
raise BadSymbol(self.id + ' does not have market symbol ' + symbol)
def safe_market(self, marketId=None, market=None, delimiter=None, marketType=None):
isOption = (marketId is not None) and ((marketId.find('-C') > -1) or (marketId.find('-P') > -1) or (marketId.find('C')) or (marketId.find('P')))
if isOption and not (marketId in self.markets_by_id):
# handle expired option contracts
return self.create_expired_option_market(marketId)
return super(delta, self).safe_market(marketId, market, delimiter, marketType)
def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict [params]: extra parameters specific to the delta api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = self.publicGetSettings(params)
# full response sample under `fetchStatus`
result = self.safe_value(response, 'result', {})
return self.safe_integer_product(result, 'server_time', 0.001)
def fetch_status(self, params={}):
"""
the latest known information on the availability of the exchange API
:param dict [params]: extra parameters specific to the delta api endpoint
:returns dict: a `status structure <https://github.com/ccxt/ccxt/wiki/Manual#exchange-status-structure>`
"""
response = self.publicGetSettings(params)
#
# {
# "result": {
# "deto_liquidity_mining_daily_reward": "40775",
# "deto_msp": "1.0",
# "deto_staking_daily_reward": "23764.08",
# "enabled_wallets": [
# "BTC",
# ...
# ],
# "portfolio_margin_params": {
# "enabled_portfolios": {
# ".DEAVAXUSDT": {
# "asset_id": 5,
# "futures_contingency_margin_percent": "1",
# "interest_rate": "0",
# "maintenance_margin_multiplier": "0.8",
# "max_price_shock": "20",
# "max_short_notional_limit": "2000",
# "options_contingency_margin_percent": "1",
# "options_discount_range": "10",
# "options_liq_band_range_percentage": "25",
# "settling_asset": "USDT",
# "sort_priority": 5,
# "underlying_asset": "AVAX",
# "volatility_down_shock": "30",
# "volatility_up_shock": "45"
# },
# ...
# },
# "portfolio_enabled_contracts": [
# "futures",
# "perpetual_futures",
# "call_options",
# "put_options"
# ]
# },
# "server_time": 1650640673500273,
# "trade_farming_daily_reward": "100000",
# "circulating_supply": "140000000",
# "circulating_supply_update_time": "1636752800",
# "deto_referral_mining_daily_reward": "0",
# "deto_total_reward_pool": "100000000",
# "deto_trade_mining_daily_reward": "0",
# "kyc_deposit_limit": "20",
# "kyc_withdrawal_limit": "10000",
# "maintenance_start_time": "1650387600000000",
# "msp_deto_commission_percent": "25",
# "under_maintenance": "false"
# },
# "success": True
# }
#
result = self.safe_value(response, 'result', {})
underMaintenance = self.safe_string(result, 'under_maintenance')
status = 'maintenance' if (underMaintenance == 'true') else 'ok'
updated = self.safe_integer_product(result, 'server_time', 0.001, self.milliseconds())
return {
'status': status,
'updated': updated,
'eta': None,
'url': None,
'info': response,
}
def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
see https://docs.delta.exchange/#get-list-of-all-assets
:param dict [params]: extra parameters specific to the delta api endpoint
:returns dict: an associative dictionary of currencies
"""
response = self.publicGetAssets(params)
#
# {
# "result":[
# {
# "base_withdrawal_fee":"0.0005",
# "deposit_status":"enabled",
# "id":2,
# "interest_credit":true,
# "interest_slabs":[
# {"limit":"0.1","rate":"0"},
# {"limit":"1","rate":"0.05"},
# {"limit":"5","rate":"0.075"},
# {"limit":"10","rate":"0.1"},
# {"limit":"9999999999999999","rate":"0"}
# ],
# "kyc_deposit_limit":"10",
# "kyc_withdrawal_limit":"2",
# "min_withdrawal_amount":"0.001",
# "minimum_precision":4,
# "name":"Bitcoin",
# "precision":8,
# "sort_priority":1,
# "symbol":"BTC",
# "variable_withdrawal_fee":"0",
# "withdrawal_status":"enabled"
# },
# ],
# "success":true
# }
#
currencies = self.safe_value(response, 'result', [])
result = {}
for i in range(0, len(currencies)):
currency = currencies[i]
id = self.safe_string(currency, 'symbol')
numericId = self.safe_integer(currency, 'id')
code = self.safe_currency_code(id)
depositStatus = self.safe_string(currency, 'deposit_status')
withdrawalStatus = self.safe_string(currency, 'withdrawal_status')
depositsEnabled = (depositStatus == 'enabled')
withdrawalsEnabled = (withdrawalStatus == 'enabled')
active = depositsEnabled and withdrawalsEnabled
result[code] = {
'id': id,
'numericId': numericId,
'code': code,
'name': self.safe_string(currency, 'name'),
'info': currency, # the original payload
'active': active,
'deposit': depositsEnabled,
'withdraw': withdrawalsEnabled,
'fee': self.safe_number(currency, 'base_withdrawal_fee'),
'precision': self.parse_number(self.parse_precision(self.safe_string(currency, 'precision'))),
'limits': {
'amount': {'min': None, 'max': None},
'withdraw': {
'min': self.safe_number(currency, 'min_withdrawal_amount'),
'max': None,
},
},
'networks': {},
}
return result
def load_markets(self, reload=False, params={}):
markets = super(delta, self).load_markets(reload, params)
currenciesByNumericId = self.safe_value(self.options, 'currenciesByNumericId')
if (currenciesByNumericId is None) or reload:
self.options['currenciesByNumericId'] = self.index_by(self.currencies, 'numericId')
marketsByNumericId = self.safe_value(self.options, 'marketsByNumericId')
if (marketsByNumericId is None) or reload:
self.options['marketsByNumericId'] = self.index_by(self.markets, 'numericId')
return markets
def fetch_markets(self, params={}):
"""
retrieves data on all markets for delta
see https://docs.delta.exchange/#get-list-of-products
:param dict [params]: extra parameters specific to the exchange api endpoint
:returns dict[]: an array of objects representing market data
"""
response = self.publicGetProducts(params)
#
# {
# "meta":{"after":null, "before":null, "limit":100, "total_count":81},
# "result":[
# # the below response represents item from perpetual market
# {
# "annualized_funding":"5.475000000000000000",
# "is_quanto":false,
# "ui_config":{
# "default_trading_view_candle":"15",
# "leverage_slider_values":[1,3,5,10,25,50],
# "price_clubbing_values":[0.001,0.005,0.05,0.1,0.5,1,5],
# "show_bracket_orders":false,
# "sort_priority":29,
# "tags":[]
# },
# "basis_factor_max_limit":"0.15",
# "symbol":"P-LINK-D-151120",
# "id":1584,
# "default_leverage":"5.000000000000000000",
# "maker_commission_rate":"0.0005",
# "contract_unit_currency":"LINK",
# "strike_price":"12.507948",
# "settling_asset":{
# # asset structure
# },
# "auction_start_time":null,
# "auction_finish_time":null,
# "settlement_time":"2020-11-15T12:00:00Z",
# "launch_time":"2020-11-14T11:55:05Z",
# "spot_index":{
# # index structure
# },
# "trading_status":"operational",
# "tick_size":"0.001",
# "position_size_limit":100000,
# "notional_type":"vanilla", # vanilla, inverse
# "price_band":"0.4",
# "barrier_price":null,
# "description":"Daily LINK PUT options quoted in USDT and settled in USDT",
# "insurance_fund_margin_contribution":"1",
# "quoting_asset":{
# # asset structure
# },
# "liquidation_penalty_factor":"0.2",
# "product_specs":{"max_volatility":3,"min_volatility":0.3,"spot_price_band":"0.40"},
# "initial_margin_scaling_factor":"0.0001",
# "underlying_asset":{
# # asset structure
# },
# "state":"live",
# "contract_value":"1",
# "initial_margin":"2",
# "impact_size":5000,
# "settlement_price":null,
# "contract_type":"put_options", # put_options, call_options, move_options, perpetual_futures, interest_rate_swaps, futures, spreads
# "taker_commission_rate":"0.0005",
# "maintenance_margin":"1",
# "short_description":"LINK Daily PUT Options",
# "maintenance_margin_scaling_factor":"0.00005",
# "funding_method":"mark_price",
# "max_leverage_notional":"20000"
# },
# # the below response represents item from spot market
# {
# "position_size_limit": 10000000,
# "settlement_price": null,
# "funding_method": "mark_price",
# "settling_asset": null,
# "impact_size": 10,
# "id": 32258,
# "auction_finish_time": null,
# "description": "Solana tether spot market",
# "trading_status": "operational",
# "tick_size": "0.01",
# "liquidation_penalty_factor": "1",
# "spot_index": {
# "config": {"quoting_asset": "USDT", "service_id": 8, "underlying_asset": "SOL"},
# "constituent_exchanges": [
# {"exchange": "binance", "health_interval": 60, "health_priority": 1, "weight": 1},
# {"exchange": "huobi", "health_interval": 60, "health_priority": 2, "weight": 1}
# ],
# "constituent_indices": null,
# "description": "Solana index from binance and huobi",
# "health_interval": 300,
# "id": 105,
# "impact_size": "40.000000000000000000",
# "index_type": "spot_pair",
# "is_composite": False,
# "price_method": "ltp",
# "quoting_asset_id": 5,
# "symbol": ".DESOLUSDT",
# "tick_size": "0.000100000000000000",
# "underlying_asset_id": 66
# },
# "contract_type": "spot",
# "launch_time": "2022-02-03T10:18:11Z",
# "symbol": "SOL_USDT",
# "disruption_reason": null,
# "settlement_time": null,
# "insurance_fund_margin_contribution": "1",
# "is_quanto": False,
# "maintenance_margin": "5",
# "taker_commission_rate": "0.0005",
# "auction_start_time": null,
# "max_leverage_notional": "10000000",
# "state": "live",
# "annualized_funding": "0",
# "notional_type": "vanilla",
# "price_band": "100",
# "product_specs": {"kyc_required": False, "max_order_size": 2000, "min_order_size": 0.01, "quoting_precision": 4, "underlying_precision": 2},
# "default_leverage": "1.000000000000000000",
# "initial_margin": "10",
# "maintenance_margin_scaling_factor": "1",
# "ui_config": {
# "default_trading_view_candle": "1d",
# "leverage_slider_values": [],
# "price_clubbing_values": [0.01, 0.05, 0.1, 0.5, 1, 2.5, 5],
# "show_bracket_orders": False,
# "sort_priority": 2,
# "tags": []
# },
# "basis_factor_max_limit": "10000",
# "contract_unit_currency": "SOL",
# "strike_price": null,
# "quoting_asset": {
# "base_withdrawal_fee": "10.000000000000000000",
# "deposit_status": "enabled",
# "id": 5,
# "interest_credit": False,
# "interest_slabs": null,
# "kyc_deposit_limit": "100000.000000000000000000",
# "kyc_withdrawal_limit": "10000.000000000000000000",
# "min_withdrawal_amount": "30.000000000000000000",
# "minimum_precision": 2,
# "name": "Tether",
# "networks": [
# {"base_withdrawal_fee": "25", "deposit_status": "enabled", "memo_required": False, "network": "ERC20", "variable_withdrawal_fee": "0", "withdrawal_status": "enabled"},
# {"base_withdrawal_fee": "1", "deposit_status": "enabled", "memo_required": False, "network": "BEP20(BSC)", "variable_withdrawal_fee": "0", "withdrawal_status": "enabled"},
# {"base_withdrawal_fee": "1", "deposit_status": "disabled", "memo_required": False, "network": "TRC20(TRON)", "variable_withdrawal_fee": "0", "withdrawal_status": "disabled"}
# ],
# "precision": 8,
# "sort_priority": 1,
# "symbol": "USDT",
# "variable_withdrawal_fee": "0.000000000000000000",
# "withdrawal_status": "enabled"
# },
# "maker_commission_rate": "0.0005",
# "initial_margin_scaling_factor": "2",
# "underlying_asset": {
# "base_withdrawal_fee": "0.000000000000000000",
# "deposit_status": "enabled",
# "id": 66,
# "interest_credit": False,
# "interest_slabs": null,
# "kyc_deposit_limit": "0.000000000000000000",
# "kyc_withdrawal_limit": "0.000000000000000000",
# "min_withdrawal_amount": "0.020000000000000000",
# "minimum_precision": 4,
# "name": "Solana",
# "networks": [
# {"base_withdrawal_fee": "0.01", "deposit_status": "enabled", "memo_required": False, "network": "SOLANA", "variable_withdrawal_fee": "0", "withdrawal_status": "enabled"},
# {"base_withdrawal_fee": "0.01", "deposit_status": "enabled", "memo_required": False, "network": "BEP20(BSC)", "variable_withdrawal_fee": "0", "withdrawal_status": "enabled"}
# ],
# "precision": 8,
# "sort_priority": 7,
# "symbol": "SOL",
# "variable_withdrawal_fee": "0.000000000000000000",
# "withdrawal_status": "enabled"
# },
# "barrier_price": null,
# "contract_value": "1",
# "short_description": "SOL-USDT spot market"
# },
# ],
# "success":true
# }
#
markets = self.safe_value(response, 'result', [])
result = []
for i in range(0, len(markets)):
market = markets[i]
type = self.safe_string(market, 'contract_type')
if type == 'options_combos':
continue
# settlingAsset = self.safe_value(market, 'settling_asset', {})
quotingAsset = self.safe_value(market, 'quoting_asset', {})
underlyingAsset = self.safe_value(market, 'underlying_asset', {})
settlingAsset = self.safe_value(market, 'settling_asset')
productSpecs = self.safe_value(market, 'product_specs', {})
baseId = self.safe_string(underlyingAsset, 'symbol')
quoteId = self.safe_string(quotingAsset, 'symbol')
settleId = self.safe_string(settlingAsset, 'symbol')
id = self.safe_string(market, 'symbol')
numericId = self.safe_integer(market, 'id')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
settle = self.safe_currency_code(settleId)
callOptions = (type == 'call_options')
putOptions = (type == 'put_options')
moveOptions = (type == 'move_options')
spot = (type == 'spot')
swap = (type == 'perpetual_futures')
future = (type == 'futures')
option = (callOptions or putOptions or moveOptions)
strike = self.safe_string(market, 'strike_price')
expiryDatetime = self.safe_string(market, 'settlement_time')
expiry = self.parse8601(expiryDatetime)
contractSize = self.safe_number(market, 'contract_value')
amountPrecision = None
if spot:
amountPrecision = self.parse_number(self.parse_precision(self.safe_string(productSpecs, 'underlying_precision'))) # seems inverse of 'impact_size'
else:
# other markets(swap, futures, move, spread, irs) seem to use the step of '1' contract
amountPrecision = self.parse_number('1')
linear = (settle == base)
optionType = None
symbol = base + '/' + quote
if swap or future or option:
symbol = symbol + ':' + settle
if future or option:
symbol = symbol + '-' + self.yymmdd(expiry)
if option:
type = 'option'
letter = 'C'
optionType = 'call'
if putOptions:
letter = 'P'
optionType = 'put'
elif moveOptions:
letter = 'M'
optionType = 'move'
symbol = symbol + '-' + strike + '-' + letter
else:
type = 'future'
else:
type = 'swap'
state = self.safe_string(market, 'state')
result.append({
'id': id,
'numericId': numericId,
'symbol': symbol,
'base': base,
'quote': quote,
'settle': settle,
'baseId': baseId,
'quoteId': quoteId,
'settleId': settleId,
'type': type,
'spot': spot,
'margin': None if spot else False,
'swap': swap,
'future': future,
'option': option,
'active': (state == 'live'),
'contract': not spot,
'linear': None if spot else linear,
'inverse': None if spot else not linear,
'taker': self.safe_number(market, 'taker_commission_rate'),
'maker': self.safe_number(market, 'maker_commission_rate'),
'contractSize': contractSize,
'expiry': expiry,
'expiryDatetime': expiryDatetime,
'strike': self.parse_number(strike),
'optionType': optionType,
'precision': {
'amount': amountPrecision,
'price': self.safe_number(market, 'tick_size'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.parse_number('1'),
'max': self.safe_number(market, 'position_size_limit'),
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'min_size'),
'max': None,
},
},
'info': market,
})
return result
def parse_ticker(self, ticker, market=None):
#
# spot: fetchTicker, fetchTickers
#
# {
# "close": 30634.0,
# "contract_type": "spot",
# "greeks": null,
# "high": 30780.0,
# "low": 30340.5,
# "mark_price": "48000",
# "oi": "0.0000",
# "oi_change_usd_6h": "0.0000",
# "oi_contracts": "0",
# "oi_value": "0.0000",
# "oi_value_symbol": "BTC",
# "oi_value_usd": "0.0000",
# "open": 30464.0,
# "price_band": null,
# "product_id": 8320,
# "quotes": {},
# "size": 2.6816639999999996,
# "spot_price": "30637.91465121",
# "symbol": "BTC_USDT",
# "timestamp": 1689139767621299,
# "turnover": 2.6816639999999996,
# "turnover_symbol": "BTC",
# "turnover_usd": 81896.45613400004,
# "volume": 2.6816639999999996
# }
#
# swap: fetchTicker, fetchTickers
#
# {
# "close": 30600.5,
# "contract_type": "perpetual_futures",
# "funding_rate": "0.00602961",
# "greeks": null,
# "high": 30803.0,
# "low": 30265.5,
# "mark_basis": "-0.45601594",
# "mark_price": "30600.10481568",
# "oi": "469.9190",
# "oi_change_usd_6h": "2226314.9900",
# "oi_contracts": "469919",
# "oi_value": "469.9190",
# "oi_value_symbol": "BTC",
# "oi_value_usd": "14385640.6802",
# "open": 30458.5,
# "price_band": {
# "lower_limit": "29067.08312627",
# "upper_limit": "32126.77608693"
# },
# "product_id": 139,
# "quotes": {
# "ask_iv": null,
# "ask_size": "965",
# "best_ask": "30600.5",
# "best_bid": "30599.5",
# "bid_iv": null,
# "bid_size": "196",
# "impact_mid_price": null,
# "mark_iv": "-0.44931641"
# },
# "size": 1226303,
# "spot_price": "30612.85362773",
# "symbol": "BTCUSDT",
# "timestamp": 1689136597460456,
# "turnover": 37392218.45999999,
# "turnover_symbol": "USDT",
# "turnover_usd": 37392218.45999999,
# "volume": 1226.3029999999485
# }
#
# option: fetchTicker, fetchTickers
#
# {
# "contract_type": "call_options",
# "greeks": {
# "delta": "0.60873994",
# "gamma": "0.00014854",
# "rho": "7.71808010",
# "spot": "30598.49040622",
# "theta": "-30.44743017",
# "vega": "24.83508248"
# },
# "mark_price": "1347.74819696",
# "mark_vol": "0.39966303",
# "oi": "2.7810",
# "oi_change_usd_6h": "0.0000",
# "oi_contracts": "2781",
# "oi_value": "2.7810",
# "oi_value_symbol": "BTC",
# "oi_value_usd": "85127.4337",
# "price_band": {
# "lower_limit": "91.27423497",
# "upper_limit": "7846.19454697"
# },
# "product_id": 107150,
# "quotes": {
# "ask_iv": "0.41023239",
# "ask_size": "2397",
# "best_ask": "1374",
# "best_bid": "1322",
# "bid_iv": "0.38929375",
# "bid_size": "3995",
# "impact_mid_price": null,
# "mark_iv": "0.39965618"
# },
# "spot_price": "30598.43379314",
# "strike_price": "30000",
# "symbol": "C-BTC-30000-280723",
# "timestamp": 1689136932893181,
# "turnover_symbol": "USDT"
# }
#
timestamp = self.safe_integer_product(ticker, 'timestamp', 0.001)
marketId = self.safe_string(ticker, 'symbol')
symbol = self.safe_symbol(marketId, market)
last = self.safe_string(ticker, 'close')
quotes = self.safe_value(ticker, 'quotes', {})
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'high'),
'low': self.safe_number(ticker, 'low'),
'bid': self.safe_number(quotes, 'best_bid'),
'bidVolume': self.safe_number(quotes, 'bid_size'),
'ask': self.safe_number(quotes, 'best_ask'),
'askVolume': self.safe_number(quotes, 'ask_size'),
'vwap': None,
'open': self.safe_string(ticker, 'open'),
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_number(ticker, 'volume'),
'quoteVolume': self.safe_number(ticker, 'turnover'),
'info': ticker,
}, market)
def fetch_ticker(self, symbol: str, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
see https://docs.delta.exchange/#get-ticker-for-a-product-by-symbol
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict [params]: extra parameters specific to the delta api endpoint
:returns dict: a `ticker structure <https://github.com/ccxt/ccxt/wiki/Manual#ticker-structure>`
"""
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.publicGetTickersSymbol(self.extend(request, params))
#
# spot
#
# {
# "result": {
# "close": 30634.0,
# "contract_type": "spot",
# "greeks": null,
# "high": 30780.0,
# "low": 30340.5,
# "mark_price": "48000",
# "oi": "0.0000",
# "oi_change_usd_6h": "0.0000",
# "oi_contracts": "0",
# "oi_value": "0.0000",
# "oi_value_symbol": "BTC",
# "oi_value_usd": "0.0000",
# "open": 30464.0,
# "price_band": null,
# "product_id": 8320,
# "quotes": {},
# "size": 2.6816639999999996,
# "spot_price": "30637.91465121",
# "symbol": "BTC_USDT",
# "timestamp": 1689139767621299,
# "turnover": 2.6816639999999996,
# "turnover_symbol": "BTC",
# "turnover_usd": 81896.45613400004,
# "volume": 2.6816639999999996
# },
# "success": True
# }
#
# swap
#
# {
# "result": {
# "close": 30600.5,
# "contract_type": "perpetual_futures",
# "funding_rate": "0.00602961",
# "greeks": null,
# "high": 30803.0,
# "low": 30265.5,
# "mark_basis": "-0.45601594",
# "mark_price": "30600.10481568",
# "oi": "469.9190",
# "oi_change_usd_6h": "2226314.9900",
# "oi_contracts": "469919",
# "oi_value": "469.9190",
# "oi_value_symbol": "BTC",
# "oi_value_usd": "14385640.6802",
# "open": 30458.5,
# "price_band": {
# "lower_limit": "29067.08312627",
# "upper_limit": "32126.77608693"
# },
# "product_id": 139,
# "quotes": {
# "ask_iv": null,
# "ask_size": "965",
# "best_ask": "30600.5",
# "best_bid": "30599.5",
# "bid_iv": null,
# "bid_size": "196",
# "impact_mid_price": null,
# "mark_iv": "-0.44931641"
# },
# "size": 1226303,
# "spot_price": "30612.85362773",
# "symbol": "BTCUSDT",
# "timestamp": 1689136597460456,
# "turnover": 37392218.45999999,
# "turnover_symbol": "USDT",
# "turnover_usd": 37392218.45999999,
# "volume": 1226.3029999999485
# },
# "success": True
# }
#
# option
#
# {
# "result": {
# "contract_type": "call_options",
# "greeks": {
# "delta": "0.60873994",
# "gamma": "0.00014854",
# "rho": "7.71808010",
# "spot": "30598.49040622",
# "theta": "-30.44743017",
# "vega": "24.83508248"
# },
# "mark_price": "1347.74819696",
# "mark_vol": "0.39966303",
# "oi": "2.7810",
# "oi_change_usd_6h": "0.0000",
# "oi_contracts": "2781",
# "oi_value": "2.7810",
# "oi_value_symbol": "BTC",
# "oi_value_usd": "85127.4337",
# "price_band": {
# "lower_limit": "91.27423497",
# "upper_limit": "7846.19454697"
# },
# "product_id": 107150,
# "quotes": {
# "ask_iv": "0.41023239",
# "ask_size": "2397",
# "best_ask": "1374",
# "best_bid": "1322",
# "bid_iv": "0.38929375",
# "bid_size": "3995",
# "impact_mid_price": null,
# "mark_iv": "0.39965618"
# },
# "spot_price": "30598.43379314",
# "strike_price": "30000",
# "symbol": "C-BTC-30000-280723",
# "timestamp": 1689136932893181,
# "turnover_symbol": "USDT"
# },
# "success": True
# }
#
result = self.safe_value(response, 'result', {})
return self.parse_ticker(result, market)
def fetch_tickers(self, symbols: Optional[List[str]] = None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
see https://docs.delta.exchange/#get-tickers-for-products
:param str[]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict [params]: extra parameters specific to the delta api endpoint
:returns dict: a dictionary of `ticker structures <https://github.com/ccxt/ccxt/wiki/Manual#ticker-structure>`
"""
self.load_markets()
symbols = self.market_symbols(symbols)
response = self.publicGetTickers(params)
#
# spot
#
# {
# "result": [
# {
# "close": 30634.0,
# "contract_type": "spot",
# "greeks": null,
# "high": 30780.0,
# "low": 30340.5,
# "mark_price": "48000",
# "oi": "0.0000",
# "oi_change_usd_6h": "0.0000",
# "oi_contracts": "0",
# "oi_value": "0.0000",
# "oi_value_symbol": "BTC",
# "oi_value_usd": "0.0000",
# "open": 30464.0,
# "price_band": null,
# "product_id": 8320,
# "quotes": {},
# "size": 2.6816639999999996,
# "spot_price": "30637.91465121",
# "symbol": "BTC_USDT",
# "timestamp": 1689139767621299,
# "turnover": 2.6816639999999996,
# "turnover_symbol": "BTC",
# "turnover_usd": 81896.45613400004,
# "volume": 2.6816639999999996
# },
# ],
# "success":true
# }
#
# swap
#
# {
# "result": [
# {
# "close": 30600.5,
# "contract_type": "perpetual_futures",
# "funding_rate": "0.00602961",
# "greeks": null,
# "high": 30803.0,
# "low": 30265.5,
# "mark_basis": "-0.45601594",
# "mark_price": "30600.10481568",
# "oi": "469.9190",
# "oi_change_usd_6h": "2226314.9900",
# "oi_contracts": "469919",
# "oi_value": "469.9190",
# "oi_value_symbol": "BTC",
# "oi_value_usd": "14385640.6802",
# "open": 30458.5,
# "price_band": {
# "lower_limit": "29067.08312627",
# "upper_limit": "32126.77608693"
# },
# "product_id": 139,
# "quotes": {
# "ask_iv": null,
# "ask_size": "965",
# "best_ask": "30600.5",
# "best_bid": "30599.5",
# "bid_iv": null,
# "bid_size": "196",
# "impact_mid_price": null,
# "mark_iv": "-0.44931641"
# },
# "size": 1226303,
# "spot_price": "30612.85362773",
# "symbol": "BTCUSDT",
# "timestamp": 1689136597460456,
# "turnover": 37392218.45999999,
# "turnover_symbol": "USDT",
# "turnover_usd": 37392218.45999999,
# "volume": 1226.3029999999485
# },
# ],
# "success":true
# }
#
# option
#
# {
# "result": [
# {
# "contract_type": "call_options",
# "greeks": {
# "delta": "0.60873994",
# "gamma": "0.00014854",
# "rho": "7.71808010",
# "spot": "30598.49040622",
# "theta": "-30.44743017",
# "vega": "24.83508248"
# },
# "mark_price": "1347.74819696",
# "mark_vol": "0.39966303",
# "oi": "2.7810",
# "oi_change_usd_6h": "0.0000",
# "oi_contracts": "2781",
# "oi_value": "2.7810",
# "oi_value_symbol": "BTC",
# "oi_value_usd": "85127.4337",
# "price_band": {
# "lower_limit": "91.27423497",
# "upper_limit": "7846.19454697"
# },
# "product_id": 107150,
# "quotes": {
# "ask_iv": "0.41023239",
# "ask_size": "2397",
# "best_ask": "1374",
# "best_bid": "1322",
# "bid_iv": "0.38929375",
# "bid_size": "3995",
# "impact_mid_price": null,
# "mark_iv": "0.39965618"
# },
# "spot_price": "30598.43379314",
# "strike_price": "30000",
# "symbol": "C-BTC-30000-280723",
# "timestamp": 1689136932893181,
# "turnover_symbol": "USDT"
# },
# ],
# "success":true
# }
#
tickers = self.safe_value(response, 'result', [])
result = {}
for i in range(0, len(tickers)):
ticker = self.parse_ticker(tickers[i])
symbol = ticker['symbol']
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
def fetch_order_book(self, symbol: str, limit: Optional[int] = None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
see https://docs.delta.exchange/#get-l2-orderbook
:param str symbol: unified symbol of the market to fetch the order book for
:param int [limit]: the maximum amount of order book entries to return
:param dict [params]: extra parameters specific to the delta api endpoint
:returns dict: A dictionary of `order book structures <https://github.com/ccxt/ccxt/wiki/Manual#order-book-structure>` indexed by market symbols
"""
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['depth'] = limit
response = self.publicGetL2orderbookSymbol(self.extend(request, params))
#
# {
# "result":{
# "buy":[
# {"price":"15814.0","size":912},
# {"price":"15813.5","size":1279},
# {"price":"15813.0","size":1634},
# ],
# "sell":[
# {"price":"15814.5","size":625},
# {"price":"15815.0","size":982},
# {"price":"15815.5","size":1328},
# ],
# "symbol":"BTCUSDT"
# },
# "success":true
# }
#
result = self.safe_value(response, 'result', {})
return self.parse_order_book(result, market['symbol'], None, 'buy', 'sell', 'price', 'size')
def parse_trade(self, trade, market=None):
#
# public fetchTrades
#
# {
# "buyer_role":"maker",
# "price":"15896.5",
# "seller_role":"taker",
# "size":241,
# "symbol":"BTCUSDT",
# "timestamp":1605376684714595
# }
#
# private fetchMyTrades
#
# {
# "commission":"0.008335000000000000",
# "created_at":"2020-11-16T19:07:19Z",
# "fill_type":"normal",
# "id":"e7ff05c233a74245b72381f8dd91d1ce",
# "meta_data":{
# "effective_commission_rate":"0.0005",
# "order_price":"16249",
# "order_size":1,
# "order_type":"market_order",
# "order_unfilled_size":0,
# "trading_fee_credits_used":"0"
# },
# "order_id":"152999629",
# "price":"16669",
# "product":{
# "contract_type":"perpetual_futures",
# "contract_unit_currency":"BTC",
# "contract_value":"0.001",
# "id":139,
# "notional_type":"vanilla",
# "quoting_asset":{"minimum_precision":2,"precision":6,"symbol":"USDT"},
# "settling_asset":{"minimum_precision":2,"precision":6,"symbol":"USDT"},
# "symbol":"BTCUSDT",
# "tick_size":"0.5",
# "underlying_asset":{"minimum_precision":4,"precision":8,"symbol":"BTC"}
# },
# "product_id":139,
# "role":"taker",
# "side":"sell",
# "size":1
# }
#
id = self.safe_string(trade, 'id')
orderId = self.safe_string(trade, 'order_id')
timestamp = self.parse8601(self.safe_string(trade, 'created_at'))
timestamp = self.safe_integer_product(trade, 'timestamp', 0.001, timestamp)
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'size')
product = self.safe_value(trade, 'product', {})
marketId = self.safe_string(product, 'symbol')
symbol = self.safe_symbol(marketId, market)
sellerRole = self.safe_string(trade, 'seller_role')
side = self.safe_string(trade, 'side')
if side is None:
if sellerRole == 'taker':
side = 'sell'
elif sellerRole == 'maker':
side = 'buy'
takerOrMaker = self.safe_string(trade, 'role')
metaData = self.safe_value(trade, 'meta_data', {})
type = self.safe_string(metaData, 'order_type')
if type is not None:
type = type.replace('_order', '')
feeCostString = self.safe_string(trade, 'commission')
fee = None
if feeCostString is not None:
settlingAsset = self.safe_value(product, 'settling_asset', {})
feeCurrencyId = self.safe_string(settlingAsset, 'symbol')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
return self.safe_trade({
'id': id,
'order': orderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'side': side,
'price': priceString,
'amount': amountString,
'cost': None,
'takerOrMaker': takerOrMaker,
'fee': fee,
'info': trade,
}, market)
def fetch_trades(self, symbol: str, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
get the list of most recent trades for a particular symbol
see https://docs.delta.exchange/#get-public-trades
:param str symbol: unified symbol of the market to fetch trades for
:param int [since]: timestamp in ms of the earliest trade to fetch
:param int [limit]: the maximum amount of trades to fetch
:param dict [params]: extra parameters specific to the delta api endpoint
:returns Trade[]: a list of `trade structures <https://github.com/ccxt/ccxt/wiki/Manual#public-trades>`
"""
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.publicGetTradesSymbol(self.extend(request, params))
#
# {
# "result":[
# {
# "buyer_role":"maker",
# "price":"15896.5",
# "seller_role":"taker",
# "size":241,
# "symbol":"BTCUSDT",
# "timestamp":1605376684714595
# }
# ],
# "success":true
# }
#
result = self.safe_value(response, 'result', [])
return self.parse_trades(result, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# "time":1605393120,
# "open":15989,
# "high":15989,
# "low":15987.5,
# "close":15987.5,
# "volume":565
# }
#
return [
self.safe_timestamp(ohlcv, 'time'),
self.safe_number(ohlcv, 'open'),
self.safe_number(ohlcv, 'high'),
self.safe_number(ohlcv, 'low'),
self.safe_number(ohlcv, 'close'),
self.safe_number(ohlcv, 'volume'),
]
def fetch_ohlcv(self, symbol: str, timeframe='1m', since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
see https://docs.delta.exchange/#get-ohlc-candles
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int [since]: timestamp in ms of the earliest candle to fetch
:param int [limit]: the maximum amount of candles to fetch
:param dict [params]: extra parameters specific to the delta api endpoint
:returns int[][]: A list of candles ordered, open, high, low, close, volume
"""
self.load_markets()
market = self.market(symbol)
request = {
'resolution': self.safe_string(self.timeframes, timeframe, timeframe),
}
duration = self.parse_timeframe(timeframe)
limit = limit if limit else 2000 # max 2000
if since is None:
end = self.seconds()
request['end'] = end
request['start'] = end - limit * duration
else:
start = self.parse_to_int(since / 1000)
request['start'] = start
request['end'] = self.sum(start, limit * duration)
price = self.safe_string(params, 'price')
if price == 'mark':
request['symbol'] = 'MARK:' + market['id']
elif price == 'index':
request['symbol'] = market['info']['spot_index']['symbol']
else:
request['symbol'] = market['id']
params = self.omit(params, 'price')
response = self.publicGetHistoryCandles(self.extend(request, params))
#
# {
# "success":true,
# "result":[
# {"time":1605393120,"open":15989,"high":15989,"low":15987.5,"close":15987.5,"volume":565},
# {"time":1605393180,"open":15966,"high":15966,"low":15959,"close":15959,"volume":24},
# {"time":1605393300,"open":15973,"high":15973,"low":15973,"close":15973,"volume":1288},
# ]
# }
#
result = self.safe_value(response, 'result', [])
return self.parse_ohlcvs(result, market, timeframe, since, limit)
def parse_balance(self, response):
balances = self.safe_value(response, 'result', [])
result = {'info': response}
currenciesByNumericId = self.safe_value(self.options, 'currenciesByNumericId', {})
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'asset_id')
currency = self.safe_value(currenciesByNumericId, currencyId)
code = currencyId if (currency is None) else currency['code']
account = self.account()
account['total'] = self.safe_string(balance, 'balance')
account['free'] = self.safe_string(balance, 'available_balance')
result[code] = account
return self.safe_balance(result)
def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
see https://docs.delta.exchange/#get-wallet-balances
:param dict [params]: extra parameters specific to the delta api endpoint
:returns dict: a `balance structure <https://github.com/ccxt/ccxt/wiki/Manual#balance-structure>`
"""
self.load_markets()
response = self.privateGetWalletBalances(params)
#
# {
# "result":[
# {
# "asset_id":1,
# "available_balance":"0",
# "balance":"0",
# "commission":"0",
# "id":154883,
# "interest_credit":"0",
# "order_margin":"0",
# "pending_referral_bonus":"0",
# "pending_trading_fee_credit":"0",
# "position_margin":"0",
# "trading_fee_credit":"0",
# "user_id":22142
# },
# ],
# "success":true
# }
#
return self.parse_balance(response)
def fetch_position(self, symbol: str, params={}):
"""
fetch data on a single open contract trade position
see https://docs.delta.exchange/#get-position
:param str symbol: unified market symbol of the market the position is held in, default is None
:param dict [params]: extra parameters specific to the delta api endpoint
:returns dict: a `position structure <https://github.com/ccxt/ccxt/wiki/Manual#position-structure>`
"""
self.load_markets()
market = self.market(symbol)
request = {
'product_id': market['numericId'],
}
response = self.privateGetPositions(self.extend(request, params))
#
# {
# "result":{
# "entry_price":null,
# "size":0,
# "timestamp":1605454074268079
# },
# "success":true
# }
#
result = self.safe_value(response, 'result', {})
return self.parse_position(result, market)
def fetch_positions(self, symbols: Optional[List[str]] = None, params={}):
"""
fetch all open positions
see https://docs.delta.exchange/#get-margined-positions
:param str[]|None symbols: list of unified market symbols
:param dict [params]: extra parameters specific to the delta api endpoint
:returns dict[]: a list of `position structure <https://github.com/ccxt/ccxt/wiki/Manual#position-structure>`
"""
self.load_markets()
response = self.privateGetPositionsMargined(params)
#
# {
# "success": True,
# "result": [
# {
# "user_id": 0,
# "size": 0,
# "entry_price": "string",
# "margin": "string",
# "liquidation_price": "string",
# "bankruptcy_price": "string",
# "adl_level": 0,
# "product_id": 0,
# "product_symbol": "string",
# "commission": "string",
# "realized_pnl": "string",
# "realized_funding": "string"
# }
# ]
# }
#
result = self.safe_value(response, 'result', [])
return self.parse_positions(result, symbols)
def parse_position(self, position, market=None):
#
# fetchPosition
#
# {
# "entry_price":null,
# "size":0,
# "timestamp":1605454074268079
# }
#
#
# fetchPositions
#
# {
# "user_id": 0,
# "size": 0,
# "entry_price": "string",
# "margin": "string",
# "liquidation_price": "string",
# "bankruptcy_price": "string",
# "adl_level": 0,
# "product_id": 0,
# "product_symbol": "string",
# "commission": "string",
# "realized_pnl": "string",
# "realized_funding": "string"
# }
#
marketId = self.safe_string(position, 'product_symbol')
market = self.safe_market(marketId, market)
symbol = market['symbol']
timestamp = self.safe_integer_product(position, 'timestamp', 0.001)
sizeString = self.safe_string(position, 'size')
side = None
if sizeString is not None:
if Precise.string_gt(sizeString, '0'):
side = 'buy'
elif Precise.string_lt(sizeString, '0'):
side = 'sell'
return {
'info': position,
'id': None,
'symbol': symbol,
'notional': None,
'marginMode': None,
'liquidationPrice': self.safe_number(position, 'liquidation_price'),
'entryPrice': self.safe_number(position, 'entry_price'),
'unrealizedPnl': None, # todo - realized_pnl ?
'percentage': None,
'contracts': self.parse_number(sizeString),
'contractSize': self.safe_number(market, 'contractSize'),
'markPrice': None,
'side': side,
'hedged': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'maintenanceMargin': None,
'maintenanceMarginPercentage': None,
'collateral': None,
'initialMargin': None,
'initialMarginPercentage': None,
'leverage': None,
'marginRatio': None,
'stopLossPrice': None,
'takeProfitPrice': None,
}
def parse_order_status(self, status):
statuses = {
'open': 'open',
'pending': 'open',
'closed': 'closed',
'cancelled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# createOrder, cancelOrder, editOrder, fetchOpenOrders, fetchClosedOrders
#
# {
# "average_fill_price":null,
# "bracket_order":null,
# "bracket_stop_loss_limit_price":null,
# "bracket_stop_loss_price":null,
# "bracket_take_profit_limit_price":null,
# "bracket_take_profit_price":null,
# "bracket_trail_amount":null,
# "cancellation_reason":null,
# "client_order_id":null,
# "close_on_trigger":"false",
# "commission":"0",
# "created_at":"2020-11-16T02:38:26Z",
# "id":152870626,
# "limit_price":"10000",
# "meta_data":{"source":"api"},
# "order_type":"limit_order",
# "paid_commission":"0",
# "product_id":139,
# "reduce_only":false,
# "side":"buy",
# "size":0,
# "state":"open",
# "stop_order_type":null,
# "stop_price":null,
# "stop_trigger_method":"mark_price",
# "time_in_force":"gtc",
# "trail_amount":null,
# "unfilled_size":0,
# "user_id":22142
# }
#
id = self.safe_string(order, 'id')
clientOrderId = self.safe_string(order, 'client_order_id')
timestamp = self.parse8601(self.safe_string(order, 'created_at'))
marketId = self.safe_string(order, 'product_id')
marketsByNumericId = self.safe_value(self.options, 'marketsByNumericId', {})
market = self.safe_value(marketsByNumericId, marketId, market)
symbol = marketId if (market is None) else market['symbol']
status = self.parse_order_status(self.safe_string(order, 'state'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'order_type')
type = type.replace('_order', '')
price = self.safe_string(order, 'limit_price')
amount = self.safe_string(order, 'size')
remaining = self.safe_string(order, 'unfilled_size')
average = self.safe_string(order, 'average_fill_price')
fee = None
feeCostString = self.safe_string(order, 'paid_commission')
if feeCostString is not None:
feeCurrencyCode = None
if market is not None:
settlingAsset = self.safe_value(market['info'], 'settling_asset', {})
feeCurrencyId = self.safe_string(settlingAsset, 'symbol')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': None,
'average': average,
'filled': None,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': None,
}, market)
def create_order(self, symbol: str, type: OrderType, side: OrderSide, amount, price=None, params={}):
"""
create a trade order
see https://docs.delta.exchange/#place-order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float [price]: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict [params]: extra parameters specific to the delta api endpoint
:param bool [params.reduceOnly]: *contract only* indicates if self order is to reduce the size of a position
:returns dict: an `order structure <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
self.load_markets()
orderType = type + '_order'
market = self.market(symbol)
request = {
'product_id': market['numericId'],
# 'limit_price': self.price_to_precision(market['symbol'], price),
'size': self.amount_to_precision(market['symbol'], amount),
'side': side,
'order_type': orderType,
# 'client_order_id': 'string',
# 'time_in_force': 'gtc', # gtc, ioc, fok
# 'post_only': 'false', # 'true',
# 'reduce_only': 'false', # 'true',
}
if type == 'limit':
request['limit_price'] = self.price_to_precision(market['symbol'], price)
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'client_order_id')
params = self.omit(params, ['clientOrderId', 'client_order_id'])
if clientOrderId is not None:
request['client_order_id'] = clientOrderId
reduceOnly = self.safe_value(params, 'reduceOnly')
if reduceOnly:
request['reduce_only'] = reduceOnly
params = self.omit(params, 'reduceOnly')
response = self.privatePostOrders(self.extend(request, params))
#
# {
# "result":{
# "average_fill_price":null,
# "bracket_order":null,
# "bracket_stop_loss_limit_price":null,
# "bracket_stop_loss_price":null,
# "bracket_take_profit_limit_price":null,
# "bracket_take_profit_price":null,
# "bracket_trail_amount":null,
# "cancellation_reason":null,
# "client_order_id":null,
# "close_on_trigger":"false",
# "commission":"0",
# "created_at":"2020-11-16T02:38:26Z",
# "id":152870626,
# "limit_price":"10000",
# "meta_data":{"source":"api"},
# "order_type":"limit_order",
# "paid_commission":"0",
# "product_id":139,
# "reduce_only":false,
# "side":"buy",
# "size":0,
# "state":"open",
# "stop_order_type":null,
# "stop_price":null,
# "stop_trigger_method":"mark_price",
# "time_in_force":"gtc",
# "trail_amount":null,
# "unfilled_size":0,
# "user_id":22142
# },
# "success":true
# }
#
result = self.safe_value(response, 'result', {})
return self.parse_order(result, market)
def edit_order(self, id: str, symbol, type, side, amount=None, price=None, params={}):
"""
edit a trade order
see https://docs.delta.exchange/#edit-order
:param str id: order id
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of the currency you want to trade in units of the base currency
:param float [price]: the price at which the order is to be fullfilled, in units of the quote currency
:param dict [params]: extra parameters specific to the delta api endpoint
:returns dict: an `order structure <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
self.load_markets()
market = self.market(symbol)
request = {
'id': int(id),
'product_id': market['numericId'],
# 'limit_price': self.price_to_precision(symbol, price),
# 'size': self.amount_to_precision(symbol, amount),
}
if amount is not None:
request['size'] = int(self.amount_to_precision(symbol, amount))
if price is not None:
request['limit_price'] = self.price_to_precision(symbol, price)
response = self.privatePutOrders(self.extend(request, params))
#
# {
# "success": True,
# "result": {
# "id": "ashb1212",
# "product_id": 27,
# "limit_price": "9200",
# "side": "buy",
# "size": 100,
# "unfilled_size": 50,
# "user_id": 1,
# "order_type": "limit_order",
# "state": "open",
# "created_at": "..."
# }
# }
#
result = self.safe_value(response, 'result')
return self.parse_order(result, market)
def cancel_order(self, id: str, symbol: Optional[str] = None, params={}):
"""
cancels an open order
see https://docs.delta.exchange/#cancel-order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict [params]: extra parameters specific to the delta api endpoint
:returns dict: An `order structure <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
self.check_required_symbol('cancelOrder', symbol)
self.load_markets()
market = self.market(symbol)
request = {
'id': int(id),
'product_id': market['numericId'],
}
response = self.privateDeleteOrders(self.extend(request, params))
#
# {
# "result":{
# "average_fill_price":null,
# "bracket_order":null,
# "bracket_stop_loss_limit_price":null,
# "bracket_stop_loss_price":null,
# "bracket_take_profit_limit_price":null,
# "bracket_take_profit_price":null,
# "bracket_trail_amount":null,
# "cancellation_reason":"cancelled_by_user",
# "client_order_id":null,
# "close_on_trigger":"false",
# "commission":"0",
# "created_at":"2020-11-16T02:38:26Z",
# "id":152870626,
# "limit_price":"10000",
# "meta_data":{"source":"api"},
# "order_type":"limit_order",
# "paid_commission":"0",
# "product_id":139,
# "reduce_only":false,
# "side":"buy",
# "size":0,
# "state":"cancelled",
# "stop_order_type":null,
# "stop_price":null,
# "stop_trigger_method":"mark_price",
# "time_in_force":"gtc",
# "trail_amount":null,
# "unfilled_size":0,
# "user_id":22142
# },
# "success":true
# }
#
result = self.safe_value(response, 'result')
return self.parse_order(result, market)
def cancel_all_orders(self, symbol: Optional[str] = None, params={}):
"""
cancel all open orders in a market
see https://docs.delta.exchange/#cancel-all-open-orders
:param str symbol: unified market symbol of the market to cancel orders in
:param dict [params]: extra parameters specific to the delta api endpoint
:returns dict[]: a list of `order structures <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
self.check_required_symbol('cancelAllOrders', symbol)
self.load_markets()
market = self.market(symbol)
request = {
'product_id': market['numericId'],
# 'cancel_limit_orders': 'true',
# 'cancel_stop_orders': 'true',
}
response = self.privateDeleteOrdersAll(self.extend(request, params))
#
# {
# "result":{},
# "success":true
# }
#
return response
def fetch_open_orders(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetch all unfilled currently open orders
see https://docs.delta.exchange/#get-active-orders
:param str symbol: unified market symbol
:param int [since]: the earliest time in ms to fetch open orders for
:param int [limit]: the maximum number of open order structures to retrieve
:param dict [params]: extra parameters specific to the delta api endpoint
:returns Order[]: a list of `order structures <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
return self.fetch_orders_with_method('privateGetOrders', symbol, since, limit, params)
def fetch_closed_orders(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetches information on multiple closed orders made by the user
see https://docs.delta.exchange/#get-order-history-cancelled-and-closed
:param str symbol: unified market symbol of the market orders were made in
:param int [since]: the earliest time in ms to fetch orders for
:param int [limit]: the maximum number of order structures to retrieve
:param dict [params]: extra parameters specific to the delta api endpoint
:returns Order[]: a list of `order structures <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
return self.fetch_orders_with_method('privateGetOrdersHistory', symbol, since, limit, params)
def fetch_orders_with_method(self, method, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
self.load_markets()
request = {
# 'product_ids': market['id'], # comma-separated
# 'contract_types': types, # comma-separated, futures, perpetual_futures, call_options, put_options, interest_rate_swaps, move_options, spreads
# 'order_types': types, # comma-separated, market, limit, stop_market, stop_limit, all_stop
# 'start_time': since * 1000,
# 'end_time': self.microseconds(),
# 'after', # after cursor for pagination
# 'before', # before cursor for pagination
# 'page_size': limit, # number of records per page
}
market = None
if symbol is not None:
market = self.market(symbol)
request['product_ids'] = market['numericId'] # accepts a comma-separated list of ids
if since is not None:
request['start_time'] = str(since) + '000'
if limit is not None:
request['page_size'] = limit
response = getattr(self, method)(self.extend(request, params))
#
# {
# "success": True,
# "result": [
# {
# "id": "ashb1212",
# "product_id": 27,
# "limit_price": "9200",
# "side": "buy",
# "size": 100,
# "unfilled_size": 50,
# "user_id": 1,
# "order_type": "limit_order",
# "state": "open",
# "created_at": "..."
# }
# ],
# "meta": {
# "after": "string",
# "before": "string"
# }
# }
#
result = self.safe_value(response, 'result', [])
return self.parse_orders(result, market, since, limit)
def fetch_my_trades(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetch all trades made by the user
see https://docs.delta.exchange/#get-user-fills-by-filters
:param str symbol: unified market symbol
:param int [since]: the earliest time in ms to fetch trades for
:param int [limit]: the maximum number of trades structures to retrieve
:param dict [params]: extra parameters specific to the delta api endpoint
:returns Trade[]: a list of `trade structures <https://github.com/ccxt/ccxt/wiki/Manual#trade-structure>`
"""
self.load_markets()
request = {
# 'product_ids': market['id'], # comma-separated
# 'contract_types': types, # comma-separated, futures, perpetual_futures, call_options, put_options, interest_rate_swaps, move_options, spreads
# 'start_time': since * 1000,
# 'end_time': self.microseconds(),
# 'after', # after cursor for pagination
# 'before', # before cursor for pagination
# 'page_size': limit, # number of records per page
}
market = None
if symbol is not None:
market = self.market(symbol)
request['product_ids'] = market['numericId'] # accepts a comma-separated list of ids
if since is not None:
request['start_time'] = str(since) + '000'
if limit is not None:
request['page_size'] = limit
response = self.privateGetFills(self.extend(request, params))
#
# {
# "meta":{
# "after":null,
# "before":null,
# "limit":10,
# "total_count":2
# },
# "result":[
# {
# "commission":"0.008335000000000000",
# "created_at":"2020-11-16T19:07:19Z",
# "fill_type":"normal",
# "id":"e7ff05c233a74245b72381f8dd91d1ce",
# "meta_data":{
# "effective_commission_rate":"0.0005",
# "order_price":"16249",
# "order_size":1,
# "order_type":"market_order",
# "order_unfilled_size":0,
# "trading_fee_credits_used":"0"
# },
# "order_id":"152999629",
# "price":"16669",
# "product":{
# "contract_type":"perpetual_futures",
# "contract_unit_currency":"BTC",
# "contract_value":"0.001",
# "id":139,
# "notional_type":"vanilla",
# "quoting_asset":{"minimum_precision":2,"precision":6,"symbol":"USDT"},
# "settling_asset":{"minimum_precision":2,"precision":6,"symbol":"USDT"},
# "symbol":"BTCUSDT",
# "tick_size":"0.5",
# "underlying_asset":{"minimum_precision":4,"precision":8,"symbol":"BTC"}
# },
# "product_id":139,
# "role":"taker",
# "side":"sell",
# "size":1
# }
# ],
# "success":true
# }
#
result = self.safe_value(response, 'result', [])
return self.parse_trades(result, market, since, limit)
def fetch_ledger(self, code: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetch the history of changes, actions done by the user or operations that altered balance of the user
see https://docs.delta.exchange/#get-wallet-transactions
:param str code: unified currency code, default is None
:param int [since]: timestamp in ms of the earliest ledger entry, default is None
:param int [limit]: max number of ledger entrys to return, default is None
:param dict [params]: extra parameters specific to the delta api endpoint
:returns dict: a `ledger structure <https://github.com/ccxt/ccxt/wiki/Manual#ledger-structure>`
"""
self.load_markets()
request = {
# 'asset_id': currency['numericId'],
# 'end_time': self.seconds(),
# 'after': 'string', # after cursor for pagination
# 'before': 'string', # before cursor for pagination
# 'page_size': limit,
}
currency = None
if code is not None:
currency = self.currency(code)
request['asset_id'] = currency['numericId']
if limit is not None:
request['page_size'] = limit
response = self.privateGetWalletTransactions(self.extend(request, params))
#
# {
# "meta":{"after":null,"before":null,"limit":10,"total_count":1},
# "result":[
# {
# "amount":"29.889184",
# "asset_id":5,
# "balance":"29.889184",
# "created_at":"2020-11-15T21:25:01Z",
# "meta_data":{
# "deposit_id":3884,
# "transaction_id":"0x41a60174849828530abb5008e98fc63c9b598288743ec4ba9620bcce900a3b8d"
# },
# "transaction_type":"deposit",
# "user_id":22142,
# "uuid":"70bb5679da3c4637884e2dc63efaa846"
# }
# ],
# "success":true
# }
#
result = self.safe_value(response, 'result', [])
return self.parse_ledger(result, currency, since, limit)
def parse_ledger_entry_type(self, type):
types = {
'pnl': 'pnl',
'deposit': 'transaction',
'withdrawal': 'transaction',
'commission': 'fee',
'conversion': 'trade',
# 'perpetual_futures_funding': 'perpetual_futures_funding',
# 'withdrawal_cancellation': 'withdrawal_cancellation',
'referral_bonus': 'referral',
'commission_rebate': 'rebate',
# 'promo_credit': 'promo_credit',
}
return self.safe_string(types, type, type)
def parse_ledger_entry(self, item, currency=None):
#
# {
# "amount":"29.889184",
# "asset_id":5,
# "balance":"29.889184",
# "created_at":"2020-11-15T21:25:01Z",
# "meta_data":{
# "deposit_id":3884,
# "transaction_id":"0x41a60174849828530abb5008e98fc63c9b598288743ec4ba9620bcce900a3b8d"
# },
# "transaction_type":"deposit",
# "user_id":22142,
# "uuid":"70bb5679da3c4637884e2dc63efaa846"
# }
#
id = self.safe_string(item, 'uuid')
direction = None
account = None
metaData = self.safe_value(item, 'meta_data', {})
referenceId = self.safe_string(metaData, 'transaction_id')
referenceAccount = None
type = self.safe_string(item, 'transaction_type')
if (type == 'deposit') or (type == 'commission_rebate') or (type == 'referral_bonus') or (type == 'pnl') or (type == 'withdrawal_cancellation') or (type == 'promo_credit'):
direction = 'in'
elif (type == 'withdrawal') or (type == 'commission') or (type == 'conversion') or (type == 'perpetual_futures_funding'):
direction = 'out'
type = self.parse_ledger_entry_type(type)
currencyId = self.safe_integer(item, 'asset_id')
currenciesByNumericId = self.safe_value(self.options, 'currenciesByNumericId')
currency = self.safe_value(currenciesByNumericId, currencyId, currency)
code = None if (currency is None) else currency['code']
amount = self.safe_string(item, 'amount')
timestamp = self.parse8601(self.safe_string(item, 'created_at'))
after = self.safe_string(item, 'balance')
before = Precise.string_max('0', Precise.string_sub(after, amount))
status = 'ok'
return {
'info': item,
'id': id,
'direction': direction,
'account': account,
'referenceId': referenceId,
'referenceAccount': referenceAccount,
'type': type,
'currency': code,
'amount': self.parse_number(amount),
'before': self.parse_number(before),
'after': self.parse_number(after),
'status': status,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': None,
}
def fetch_deposit_address(self, code: str, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict [params]: extra parameters specific to the delta api endpoint
:param str [params.network]: unified network code
:returns dict: an `address structure <https://github.com/ccxt/ccxt/wiki/Manual#address-structure>`
"""
self.load_markets()
currency = self.currency(code)
request = {
'asset_symbol': currency['id'],
}
networkCode = self.safe_string_upper(params, 'network')
if networkCode is not None:
request['network'] = self.network_code_to_id(networkCode, code)
params = self.omit(params, 'network')
response = self.privateGetDepositsAddress(self.extend(request, params))
#
# {
# "success": True,
# "result": {
# "id": 1915615,
# "user_id": 27854758,
# "address": "TXYB4GdKsXKEWbeSNPsmGZu4ZVCkhVh1Zz",
# "memo": "",
# "status": "active",
# "updated_at": "2023-01-12T06:03:46.000Z",
# "created_at": "2023-01-12T06:03:46.000Z",
# "asset_symbol": "USDT",
# "network": "TRC20(TRON)",
# "custodian": "fireblocks"
# }
# }
#
result = self.safe_value(response, 'result', {})
return self.parse_deposit_address(result, currency)
def parse_deposit_address(self, depositAddress, currency=None):
#
# {
# "id": 1915615,
# "user_id": 27854758,
# "address": "TXYB4GdKsXKEWbeSNPsmGZu4ZVCkhVh1Zz",
# "memo": "",
# "status": "active",
# "updated_at": "2023-01-12T06:03:46.000Z",
# "created_at": "2023-01-12T06:03:46.000Z",
# "asset_symbol": "USDT",
# "network": "TRC20(TRON)",
# "custodian": "fireblocks"
# }
#
address = self.safe_string(depositAddress, 'address')
marketId = self.safe_string(depositAddress, 'asset_symbol')
networkId = self.safe_string(depositAddress, 'network')
self.check_address(address)
return {
'currency': self.safe_currency_code(marketId, currency),
'address': address,
'tag': self.safe_string(depositAddress, 'memo'),
'network': self.network_id_to_code(networkId),
'info': depositAddress,
}
def fetch_funding_rate(self, symbol: str, params={}):
"""
fetch the current funding rate
see https://docs.delta.exchange/#get-ticker-for-a-product-by-symbol
:param str symbol: unified market symbol
:param dict [params]: extra parameters specific to the delta api endpoint
:returns dict: a `funding rate structure <https://github.com/ccxt/ccxt/wiki/Manual#funding-rate-structure>`
"""
self.load_markets()
market = self.market(symbol)
if not market['swap']:
raise BadSymbol(self.id + ' fetchFundingRate() supports swap contracts only')
request = {
'symbol': market['id'],
}
response = self.publicGetTickersSymbol(self.extend(request, params))
#
# {
# "result": {
# "close": 30600.5,
# "contract_type": "perpetual_futures",
# "funding_rate": "0.00602961",
# "greeks": null,
# "high": 30803.0,
# "low": 30265.5,
# "mark_basis": "-0.45601594",
# "mark_price": "30600.10481568",
# "oi": "469.9190",
# "oi_change_usd_6h": "2226314.9900",
# "oi_contracts": "469919",
# "oi_value": "469.9190",
# "oi_value_symbol": "BTC",
# "oi_value_usd": "14385640.6802",
# "open": 30458.5,
# "price_band": {
# "lower_limit": "29067.08312627",
# "upper_limit": "32126.77608693"
# },
# "product_id": 139,
# "quotes": {
# "ask_iv": null,
# "ask_size": "965",
# "best_ask": "30600.5",
# "best_bid": "30599.5",
# "bid_iv": null,
# "bid_size": "196",
# "impact_mid_price": null,
# "mark_iv": "-0.44931641"
# },
# "size": 1226303,
# "spot_price": "30612.85362773",
# "symbol": "BTCUSDT",
# "timestamp": 1689136597460456,
# "turnover": 37392218.45999999,
# "turnover_symbol": "USDT",
# "turnover_usd": 37392218.45999999,
# "volume": 1226.3029999999485
# },
# "success": True
# }
#
result = self.safe_value(response, 'result', {})
return self.parse_funding_rate(result, market)
def fetch_funding_rates(self, symbols: Optional[List[str]] = None, params={}):
"""
fetch the funding rate for multiple markets
see https://docs.delta.exchange/#get-tickers-for-products
:param str[]|None symbols: list of unified market symbols
:param dict [params]: extra parameters specific to the delta api endpoint
:returns dict: a dictionary of `funding rates structures <https://github.com/ccxt/ccxt/wiki/Manual#funding-rates-structure>`, indexe by market symbols
"""
self.load_markets()
symbols = self.market_symbols(symbols)
request = {
'contract_types': 'perpetual_futures',
}
response = self.publicGetTickers(self.extend(request, params))
#
# {
# "result": [
# {
# "close": 30600.5,
# "contract_type": "perpetual_futures",
# "funding_rate": "0.00602961",
# "greeks": null,
# "high": 30803.0,
# "low": 30265.5,
# "mark_basis": "-0.45601594",
# "mark_price": "30600.10481568",
# "oi": "469.9190",
# "oi_change_usd_6h": "2226314.9900",
# "oi_contracts": "469919",
# "oi_value": "469.9190",
# "oi_value_symbol": "BTC",
# "oi_value_usd": "14385640.6802",
# "open": 30458.5,
# "price_band": {
# "lower_limit": "29067.08312627",
# "upper_limit": "32126.77608693"
# },
# "product_id": 139,
# "quotes": {
# "ask_iv": null,
# "ask_size": "965",
# "best_ask": "30600.5",
# "best_bid": "30599.5",
# "bid_iv": null,
# "bid_size": "196",
# "impact_mid_price": null,
# "mark_iv": "-0.44931641"
# },
# "size": 1226303,
# "spot_price": "30612.85362773",
# "symbol": "BTCUSDT",
# "timestamp": 1689136597460456,
# "turnover": 37392218.45999999,
# "turnover_symbol": "USDT",
# "turnover_usd": 37392218.45999999,
# "volume": 1226.3029999999485
# },
# ],
# "success":true
# }
#
rates = self.safe_value(response, 'result', [])
result = self.parse_funding_rates(rates)
return self.filter_by_array(result, 'symbol', symbols)
def parse_funding_rate(self, contract, market=None):
#
# {
# "close": 30600.5,
# "contract_type": "perpetual_futures",
# "funding_rate": "0.00602961",
# "greeks": null,
# "high": 30803.0,
# "low": 30265.5,
# "mark_basis": "-0.45601594",
# "mark_price": "30600.10481568",
# "oi": "469.9190",
# "oi_change_usd_6h": "2226314.9900",
# "oi_contracts": "469919",
# "oi_value": "469.9190",
# "oi_value_symbol": "BTC",
# "oi_value_usd": "14385640.6802",
# "open": 30458.5,
# "price_band": {
# "lower_limit": "29067.08312627",
# "upper_limit": "32126.77608693"
# },
# "product_id": 139,
# "quotes": {
# "ask_iv": null,
# "ask_size": "965",
# "best_ask": "30600.5",
# "best_bid": "30599.5",
# "bid_iv": null,
# "bid_size": "196",
# "impact_mid_price": null,
# "mark_iv": "-0.44931641"
# },
# "size": 1226303,
# "spot_price": "30612.85362773",
# "symbol": "BTCUSDT",
# "timestamp": 1689136597460456,
# "turnover": 37392218.45999999,
# "turnover_symbol": "USDT",
# "turnover_usd": 37392218.45999999,
# "volume": 1226.3029999999485
# }
#
timestamp = self.safe_integer_product(contract, 'timestamp', 0.001)
marketId = self.safe_string(contract, 'symbol')
fundingRateString = self.safe_string(contract, 'funding_rate')
fundingRate = Precise.string_div(fundingRateString, '100')
return {
'info': contract,
'symbol': self.safe_symbol(marketId, market),
'markPrice': self.safe_number(contract, 'mark_price'),
'indexPrice': self.safe_number(contract, 'spot_price'),
'interestRate': None,
'estimatedSettlePrice': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fundingRate': self.parse_number(fundingRate),
'fundingTimestamp': None,
'fundingDatetime': None,
'nextFundingRate': None,
'nextFundingTimestamp': None,
'nextFundingDatetime': None,
'previousFundingRate': None,
'previousFundingTimestamp': None,
'previousFundingDatetime': None,
}
def add_margin(self, symbol: str, amount, params={}):
"""
add margin
see https://docs.delta.exchange/#add-remove-position-margin
:param str symbol: unified market symbol
:param float amount: amount of margin to add
:param dict [params]: extra parameters specific to the delta api endpoint
:returns dict: a `margin structure <https://github.com/ccxt/ccxt/wiki/Manual#add-margin-structure>`
"""
return self.modify_margin_helper(symbol, amount, 'add', params)
def reduce_margin(self, symbol: str, amount, params={}):
"""
remove margin from a position
see https://docs.delta.exchange/#add-remove-position-margin
:param str symbol: unified market symbol
:param float amount: the amount of margin to remove
:param dict [params]: extra parameters specific to the delta api endpoint
:returns dict: a `margin structure <https://github.com/ccxt/ccxt/wiki/Manual#reduce-margin-structure>`
"""
return self.modify_margin_helper(symbol, amount, 'reduce', params)
def modify_margin_helper(self, symbol: str, amount, type, params={}):
self.load_markets()
market = self.market(symbol)
amount = str(amount)
if type == 'reduce':
amount = Precise.string_mul(amount, '-1')
request = {
'product_id': market['numericId'],
'delta_margin': amount,
}
response = self.privatePostPositionsChangeMargin(self.extend(request, params))
#
# {
# "result": {
# "auto_topup": False,
# "bankruptcy_price": "24934.12",
# "commission": "0.01197072",
# "created_at": "2023-07-20T03:49:09.159401Z",
# "entry_price": "29926.8",
# "liquidation_price": "25083.754",
# "margin": "4.99268",
# "margin_mode": "isolated",
# "product_id": 84,
# "product_symbol": "BTCUSDT",
# "realized_cashflow": "0",
# "realized_funding": "0",
# "realized_pnl": "0",
# "size": 1,
# "updated_at": "2023-07-20T03:49:09.159401Z",
# "user_id": 30084879
# },
# "success": True
# }
#
result = self.safe_value(response, 'result', {})
return self.parse_margin_modification(result, market)
def parse_margin_modification(self, data, market=None):
#
# {
# "auto_topup": False,
# "bankruptcy_price": "24934.12",
# "commission": "0.01197072",
# "created_at": "2023-07-20T03:49:09.159401Z",
# "entry_price": "29926.8",
# "liquidation_price": "25083.754",
# "margin": "4.99268",
# "margin_mode": "isolated",
# "product_id": 84,
# "product_symbol": "BTCUSDT",
# "realized_cashflow": "0",
# "realized_funding": "0",
# "realized_pnl": "0",
# "size": 1,
# "updated_at": "2023-07-20T03:49:09.159401Z",
# "user_id": 30084879
# }
#
marketId = self.safe_string(data, 'product_symbol')
market = self.safe_market(marketId, market)
return {
'info': data,
'type': None,
'amount': None,
'total': self.safe_number(data, 'margin'),
'code': None,
'symbol': market['symbol'],
'status': None,
}
def fetch_open_interest(self, symbol: str, params={}):
"""
retrieves the open interest of a derivative market
see https://docs.delta.exchange/#get-ticker-for-a-product-by-symbol
:param str symbol: unified market symbol
:param dict [params]: exchange specific parameters
:returns dict} an open interest structure{@link https://github.com/ccxt/ccxt/wiki/Manual#interest-history-structure:
"""
self.load_markets()
market = self.market(symbol)
if not market['contract']:
raise BadRequest(self.id + ' fetchOpenInterest() supports contract markets only')
request = {
'symbol': market['id'],
}
response = self.publicGetTickersSymbol(self.extend(request, params))
#
# {
# "result": {
# "close": 894.0,
# "contract_type": "call_options",
# "greeks": {
# "delta": "0.67324861",
# "gamma": "0.00022178",
# "rho": "4.34638266",
# "spot": "30178.53195697",
# "theta": "-35.64972577",
# "vega": "16.34381277"
# },
# "high": 946.0,
# "low": 893.0,
# "mark_price": "1037.07582681",
# "mark_vol": "0.35899491",
# "oi": "0.0910",
# "oi_change_usd_6h": "-90.5500",
# "oi_contracts": "91",
# "oi_value": "0.0910",
# "oi_value_symbol": "BTC",
# "oi_value_usd": "2746.3549",
# "open": 946.0,
# "price_band": {
# "lower_limit": "133.37794509",
# "upper_limit": "5663.66930164"
# },
# "product_id": 116171,
# "quotes": {
# "ask_iv": "0.36932389",
# "ask_size": "1321",
# "best_ask": "1054",
# "best_bid": "1020",
# "bid_iv": "0.34851914",
# "bid_size": "2202",
# "impact_mid_price": null,
# "mark_iv": "0.35896335"
# },
# "size": 152,
# "spot_price": "30178.53195697",
# "strike_price": "29500",
# "symbol": "C-BTC-29500-280723",
# "timestamp": 1689834695286094,
# "turnover": 4546.601744940001,
# "turnover_symbol": "USDT",
# "turnover_usd": 4546.601744940001,
# "volume": 0.15200000000000002
# },
# "success": True
# }
#
result = self.safe_value(response, 'result', {})
return self.parse_open_interest(result, market)
def parse_open_interest(self, interest, market=None):
#
# {
# "close": 894.0,
# "contract_type": "call_options",
# "greeks": {
# "delta": "0.67324861",
# "gamma": "0.00022178",
# "rho": "4.34638266",
# "spot": "30178.53195697",
# "theta": "-35.64972577",
# "vega": "16.34381277"
# },
# "high": 946.0,
# "low": 893.0,
# "mark_price": "1037.07582681",
# "mark_vol": "0.35899491",
# "oi": "0.0910",
# "oi_change_usd_6h": "-90.5500",
# "oi_contracts": "91",
# "oi_value": "0.0910",
# "oi_value_symbol": "BTC",
# "oi_value_usd": "2746.3549",
# "open": 946.0,
# "price_band": {
# "lower_limit": "133.37794509",
# "upper_limit": "5663.66930164"
# },
# "product_id": 116171,
# "quotes": {
# "ask_iv": "0.36932389",
# "ask_size": "1321",
# "best_ask": "1054",
# "best_bid": "1020",
# "bid_iv": "0.34851914",
# "bid_size": "2202",
# "impact_mid_price": null,
# "mark_iv": "0.35896335"
# },
# "size": 152,
# "spot_price": "30178.53195697",
# "strike_price": "29500",
# "symbol": "C-BTC-29500-280723",
# "timestamp": 1689834695286094,
# "turnover": 4546.601744940001,
# "turnover_symbol": "USDT",
# "turnover_usd": 4546.601744940001,
# "volume": 0.15200000000000002
# }
#
timestamp = self.safe_integer_product(interest, 'timestamp', 0.001)
marketId = self.safe_string(interest, 'symbol')
return {
'symbol': self.safe_symbol(marketId, market),
'baseVolume': self.safe_number(interest, 'oi_value'),
'quoteVolume': self.safe_number(interest, 'oi_value_usd'),
'openInterestAmount': self.safe_number(interest, 'oi_contracts'),
'openInterestValue': self.safe_number(interest, 'oi'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'info': interest,
}
def fetch_leverage(self, symbol: str, params={}):
"""
fetch the set leverage for a market
see https://docs.delta.exchange/#get-order-leverage
:param str symbol: unified market symbol
:param dict [params]: extra parameters specific to the delta api endpoint
:returns dict: a `leverage structure <https://github.com/ccxt/ccxt/wiki/Manual#leverage-structure>`
"""
self.load_markets()
market = self.market(symbol)
request = {
'product_id': market['numericId'],
}
#
# {
# "result": {
# "index_symbol": null,
# "leverage": "10",
# "margin_mode": "isolated",
# "order_margin": "0",
# "product_id": 84,
# "user_id": 30084879
# },
# "success": True
# }
#
return self.privateGetProductsProductIdOrdersLeverage(self.extend(request, params))
def set_leverage(self, leverage, symbol: Optional[str] = None, params={}):
"""
set the level of leverage for a market
see https://docs.delta.exchange/#change-order-leverage
:param float leverage: the rate of leverage
:param str symbol: unified market symbol
:param dict [params]: extra parameters specific to the delta api endpoint
:returns dict: response from the exchange
"""
self.check_required_symbol('setLeverage', symbol)
self.load_markets()
market = self.market(symbol)
request = {
'product_id': market['numericId'],
'leverage': leverage,
}
#
# {
# "result": {
# "leverage": "20",
# "margin_mode": "isolated",
# "order_margin": "0",
# "product_id": 84
# },
# "success": True
# }
#
return self.privatePostProductsProductIdOrdersLeverage(self.extend(request, params))
def fetch_settlement_history(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetches historical settlement records
see https://docs.delta.exchange/#get-product-settlement-prices
:param str symbol: unified market symbol of the settlement history
:param int [since]: timestamp in ms
:param int [limit]: number of records
:param dict [params]: exchange specific params
:returns dict[]: a list of [settlement history objects]
"""
self.load_markets()
market = None
if symbol is not None:
market = self.market(symbol)
request = {
'states': 'expired',
}
if limit is not None:
request['page_size'] = limit
response = self.publicGetProducts(self.extend(request, params))
#
# {
# "result": [
# {
# "contract_value": "0.001",
# "basis_factor_max_limit": "10.95",
# "maker_commission_rate": "0.0003",
# "launch_time": "2023-07-19T04:30:03Z",
# "trading_status": "operational",
# "product_specs": {
# "backup_vol_expiry_time": 31536000,
# "max_deviation_from_external_vol": 0.75,
# "max_lower_deviation_from_external_vol": 0.75,
# "max_upper_deviation_from_external_vol": 0.5,
# "max_volatility": 3,
# "min_volatility": 0.1,
# "premium_commission_rate": 0.1,
# "settlement_index_price": "29993.536675710806",
# "vol_calculation_method": "orderbook",
# "vol_expiry_time": 31536000
# },
# "description": "BTC call option expiring on 19-7-2023",
# "settlement_price": "0",
# "disruption_reason": null,
# "settling_asset": {},
# "initial_margin": "1",
# "tick_size": "0.1",
# "maintenance_margin": "0.5",
# "id": 117542,
# "notional_type": "vanilla",
# "ui_config": {},
# "contract_unit_currency": "BTC",
# "symbol": "C-BTC-30900-190723",
# "insurance_fund_margin_contribution": "1",
# "price_band": "2",
# "annualized_funding": "10.95",
# "impact_size": 200,
# "contract_type": "call_options",
# "position_size_limit": 255633,
# "max_leverage_notional": "200000",
# "initial_margin_scaling_factor": "0.000002",
# "strike_price": "30900",
# "is_quanto": False,
# "settlement_time": "2023-07-19T12:00:00Z",
# "liquidation_penalty_factor": "0.5",
# "funding_method": "mark_price",
# "taker_commission_rate": "0.0003",
# "default_leverage": "100.000000000000000000",
# "state": "expired",
# "auction_start_time": null,
# "short_description": "BTC Call",
# "quoting_asset": {},
# "maintenance_margin_scaling_factor":"0.000002"
# }
# ],
# "success": True
# }
#
result = self.safe_value(response, 'result', [])
settlements = self.parse_settlements(result, market)
sorted = self.sort_by(settlements, 'timestamp')
return self.filter_by_symbol_since_limit(sorted, market['symbol'], since, limit)
def parse_settlement(self, settlement, market):
#
# {
# "contract_value": "0.001",
# "basis_factor_max_limit": "10.95",
# "maker_commission_rate": "0.0003",
# "launch_time": "2023-07-19T04:30:03Z",
# "trading_status": "operational",
# "product_specs": {
# "backup_vol_expiry_time": 31536000,
# "max_deviation_from_external_vol": 0.75,
# "max_lower_deviation_from_external_vol": 0.75,
# "max_upper_deviation_from_external_vol": 0.5,
# "max_volatility": 3,
# "min_volatility": 0.1,
# "premium_commission_rate": 0.1,
# "settlement_index_price": "29993.536675710806",
# "vol_calculation_method": "orderbook",
# "vol_expiry_time": 31536000
# },
# "description": "BTC call option expiring on 19-7-2023",
# "settlement_price": "0",
# "disruption_reason": null,
# "settling_asset": {},
# "initial_margin": "1",
# "tick_size": "0.1",
# "maintenance_margin": "0.5",
# "id": 117542,
# "notional_type": "vanilla",
# "ui_config": {},
# "contract_unit_currency": "BTC",
# "symbol": "C-BTC-30900-190723",
# "insurance_fund_margin_contribution": "1",
# "price_band": "2",
# "annualized_funding": "10.95",
# "impact_size": 200,
# "contract_type": "call_options",
# "position_size_limit": 255633,
# "max_leverage_notional": "200000",
# "initial_margin_scaling_factor": "0.000002",
# "strike_price": "30900",
# "is_quanto": False,
# "settlement_time": "2023-07-19T12:00:00Z",
# "liquidation_penalty_factor": "0.5",
# "funding_method": "mark_price",
# "taker_commission_rate": "0.0003",
# "default_leverage": "100.000000000000000000",
# "state": "expired",
# "auction_start_time": null,
# "short_description": "BTC Call",
# "quoting_asset": {},
# "maintenance_margin_scaling_factor":"0.000002"
# }
#
datetime = self.safe_string(settlement, 'settlement_time')
marketId = self.safe_string(settlement, 'symbol')
return {
'info': settlement,
'symbol': self.safe_symbol(marketId, market),
'price': self.safe_number(settlement, 'settlement_price'),
'timestamp': self.parse8601(datetime),
'datetime': datetime,
}
def parse_settlements(self, settlements, market):
result = []
for i in range(0, len(settlements)):
result.append(self.parse_settlement(settlements[i], market))
return result
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
requestPath = '/' + self.version + '/' + self.implode_params(path, params)
url = self.urls['api'][api] + requestPath
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
elif api == 'private':
self.check_required_credentials()
timestamp = str(self.seconds())
headers = {
'api-key': self.apiKey,
'timestamp': timestamp,
}
auth = method + timestamp + requestPath
if (method == 'GET') or (method == 'DELETE'):
if query:
queryString = '?' + self.urlencode(query)
auth += queryString
url += queryString
else:
body = self.json(query)
auth += body
headers['Content-Type'] = 'application/json'
signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha256)
headers['signature'] = signature
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return None
#
# {"error":{"code":"insufficient_margin","context":{"available_balance":"0.000000000000000000","required_additional_balance":"1.618626000000000000000000000"}},"success":false}
#
error = self.safe_value(response, 'error', {})
errorCode = self.safe_string(error, 'code')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
return None
|
[
"travis@travis-ci.org"
] |
travis@travis-ci.org
|
ee7d397f558b64e5cab17da27e5b007f42f76fc8
|
f0681b8c129e8afce21e340697502230f45ce930
|
/venv/Lib/site-packages/com/vmware/vcenter/topology_client.py
|
e0bbdca3236e92ef0addbe7a6c829e7cfec9bad2
|
[] |
no_license
|
dungla2011/python_pyvmomi_working_sample_vmware_easy
|
8852b6fdcd0f7d0f648f6f7b6c6e4f70c7213746
|
a3b6d86a802f28c7ee249fc03523d5e5f0a2e3bd
|
refs/heads/main
| 2023-07-05T14:56:46.551091
| 2021-08-20T12:19:39
| 2021-08-20T12:19:39
| 395,496,219
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 30,734
|
py
|
# -*- coding: utf-8 -*-
#---------------------------------------------------------------------------
# Copyright 2021 VMware, Inc. All rights reserved.
# AUTO GENERATED FILE -- DO NOT MODIFY!
#
# vAPI stub file for package com.vmware.vcenter.topology.
#---------------------------------------------------------------------------
"""
The ``com.vmware.vcenter.topology_client`` module provides classes to retrieve
all vCenter and Platform Services Controller nodes and replication status in
the topology.
"""
__author__ = 'VMware, Inc.'
__docformat__ = 'restructuredtext en'
import sys
from vmware.vapi.bindings import type
from vmware.vapi.bindings.converter import TypeConverter
from vmware.vapi.bindings.enum import Enum
from vmware.vapi.bindings.error import VapiError
from vmware.vapi.bindings.struct import VapiStruct
from vmware.vapi.bindings.stub import (
ApiInterfaceStub, StubFactoryBase, VapiInterface)
from vmware.vapi.bindings.common import raise_core_exception
from vmware.vapi.data.validator import (UnionValidator, HasFieldsOfValidator)
from vmware.vapi.exception import CoreException
from vmware.vapi.lib.constants import TaskType
from vmware.vapi.lib.rest import OperationRestMetadata
class Nodes(VapiInterface):
"""
The ``Nodes`` interface provides methods to retrieve vCenter and Platform
Services Controller nodes information in the topology. This class was added
in vSphere API 6.7.2.
"""
_VAPI_SERVICE_ID = 'com.vmware.vcenter.topology.nodes'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _NodesStub)
self._VAPI_OPERATION_IDS = {}
class ApplianceType(Enum):
"""
The ``Nodes.ApplianceType`` class defines values for valid appliance types
for the vCenter and Platform Services Controller node. See
:class:`Nodes.Info`. This enumeration was added in vSphere API 6.7.2.
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
VCSA_EMBEDDED = None
"""
vCenter Server Appliance with an embedded Platform Services Controller.
This class attribute was added in vSphere API 6.7.2.
"""
VCSA_EXTERNAL = None
"""
vCenter Server Appliance with an external Platform Services Controller.
This class attribute was added in vSphere API 6.7.2.
"""
PSC_EXTERNAL = None
"""
An external Platform Services Controller. This class attribute was added in
vSphere API 6.7.2.
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`ApplianceType` instance.
"""
Enum.__init__(string)
ApplianceType._set_values([
ApplianceType('VCSA_EMBEDDED'),
ApplianceType('VCSA_EXTERNAL'),
ApplianceType('PSC_EXTERNAL'),
])
ApplianceType._set_binding_type(type.EnumType(
'com.vmware.vcenter.topology.nodes.appliance_type',
ApplianceType))
class Info(VapiStruct):
"""
The ``Nodes.Info`` class contains vCenter or Platform Services Controller
node details. This class was added in vSphere API 6.7.2.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
_validator_list = [
UnionValidator(
'type',
{
'VCSA_EMBEDDED' : [('replication_partners', True)],
'PSC_EXTERNAL' : [('replication_partners', True)],
'VCSA_EXTERNAL' : [('client_affinity', True)],
}
),
]
def __init__(self,
domain=None,
type=None,
replication_partners=None,
client_affinity=None,
):
"""
:type domain: :class:`str`
:param domain: Domain name of the node. This attribute was added in vSphere API
6.7.2.
:type type: :class:`Nodes.ApplianceType`
:param type: Appliance type of the node. This attribute was added in vSphere API
6.7.2.
:type replication_partners: :class:`list` of :class:`str`
:param replication_partners: List of replication partners' node identifiers. Identifiers can be
either IP address or DNS resolvable name of the partner node. This
attribute was added in vSphere API 6.7.2.
When clients pass a value of this class as a parameter, the
attribute must contain identifiers for the resource type:
``com.vmware.vcenter.VCenter.name``. When methods return a value of
this class as a return value, the attribute will contain
identifiers for the resource type:
``com.vmware.vcenter.VCenter.name``.
This attribute is optional and it is only relevant when the value
of ``type`` is one of :attr:`Nodes.ApplianceType.VCSA_EMBEDDED` or
:attr:`Nodes.ApplianceType.PSC_EXTERNAL`.
:type client_affinity: :class:`str`
:param client_affinity: Identifier of the affinitized Platform Services Controller node.
Identifier can be either IP address or DNS resolvable name of the
affinitized node. This attribute was added in vSphere API 6.7.2.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``com.vmware.vcenter.VCenter.name``. When methods return a value of
this class as a return value, the attribute will be an identifier
for the resource type: ``com.vmware.vcenter.VCenter.name``.
This attribute is optional and it is only relevant when the value
of ``type`` is :attr:`Nodes.ApplianceType.VCSA_EXTERNAL`.
"""
self.domain = domain
self.type = type
self.replication_partners = replication_partners
self.client_affinity = client_affinity
VapiStruct.__init__(self)
Info._set_binding_type(type.StructType(
'com.vmware.vcenter.topology.nodes.info', {
'domain': type.StringType(),
'type': type.ReferenceType(__name__, 'Nodes.ApplianceType'),
'replication_partners': type.OptionalType(type.ListType(type.IdType())),
'client_affinity': type.OptionalType(type.IdType()),
},
Info,
False,
None))
class Summary(VapiStruct):
"""
The ``Nodes.Summary`` class contains commonly used information of vCenter
or Platform Services Controller node. This class was added in vSphere API
6.7.2.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
_validator_list = [
UnionValidator(
'type',
{
'VCSA_EMBEDDED' : [('replication_partners', True)],
'PSC_EXTERNAL' : [('replication_partners', True)],
'VCSA_EXTERNAL' : [('client_affinity', True)],
}
),
]
def __init__(self,
node=None,
type=None,
replication_partners=None,
client_affinity=None,
):
"""
:type node: :class:`str`
:param node: Identifier for the vCenter or Platform Services Controller node.
Identifier can be either IP address or DNS resolvable name of the
node. This attribute was added in vSphere API 6.7.2.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``com.vmware.vcenter.VCenter.name``. When methods return a value of
this class as a return value, the attribute will be an identifier
for the resource type: ``com.vmware.vcenter.VCenter.name``.
:type type: :class:`Nodes.ApplianceType`
:param type: Appliance type of the node. This attribute was added in vSphere API
6.7.2.
:type replication_partners: :class:`list` of :class:`str`
:param replication_partners: List of replication partners' node identifiers. Identifiers can be
either IP address or DNS resolvable name of the partner node. This
attribute was added in vSphere API 6.7.2.
When clients pass a value of this class as a parameter, the
attribute must contain identifiers for the resource type:
``com.vmware.vcenter.VCenter.name``. When methods return a value of
this class as a return value, the attribute will contain
identifiers for the resource type:
``com.vmware.vcenter.VCenter.name``.
This attribute is optional and it is only relevant when the value
of ``type`` is one of :attr:`Nodes.ApplianceType.VCSA_EMBEDDED` or
:attr:`Nodes.ApplianceType.PSC_EXTERNAL`.
:type client_affinity: :class:`str`
:param client_affinity: Identifier of the affinitized Platform Services Controller node.
Identifier can be either IP address or DNS resolvable name of the
affinitized node. This attribute was added in vSphere API 6.7.2.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``com.vmware.vcenter.VCenter.name``. When methods return a value of
this class as a return value, the attribute will be an identifier
for the resource type: ``com.vmware.vcenter.VCenter.name``.
This attribute is optional and it is only relevant when the value
of ``type`` is :attr:`Nodes.ApplianceType.VCSA_EXTERNAL`.
"""
self.node = node
self.type = type
self.replication_partners = replication_partners
self.client_affinity = client_affinity
VapiStruct.__init__(self)
Summary._set_binding_type(type.StructType(
'com.vmware.vcenter.topology.nodes.summary', {
'node': type.IdType(resource_types='com.vmware.vcenter.VCenter.name'),
'type': type.ReferenceType(__name__, 'Nodes.ApplianceType'),
'replication_partners': type.OptionalType(type.ListType(type.IdType())),
'client_affinity': type.OptionalType(type.IdType()),
},
Summary,
False,
None))
class FilterSpec(VapiStruct):
"""
The ``Nodes.FilterSpec`` class contains attribute used to filter the
results when listing vCenter and Platform Services Controller nodes (see
:func:`Nodes.list`). This class was added in vSphere API 6.7.2.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
types=None,
):
"""
:type types: :class:`set` of :class:`Nodes.ApplianceType` or ``None``
:param types: Types of the appliance that a vCenter and Platform Services
Controller node must be to match the filter (see
:class:`Nodes.ApplianceType`. This attribute was added in vSphere
API 6.7.2.
If None or empty, node of any ApplianceType match the filter.
"""
self.types = types
VapiStruct.__init__(self)
FilterSpec._set_binding_type(type.StructType(
'com.vmware.vcenter.topology.nodes.filter_spec', {
'types': type.OptionalType(type.SetType(type.ReferenceType(__name__, 'Nodes.ApplianceType'))),
},
FilterSpec,
False,
None))
def list(self,
filter=None,
):
"""
Returns information about all vCenter and Platform Services Controller
nodes matching the :class:`Nodes.FilterSpec`. This method was added in
vSphere API 6.7.2.
:type filter: :class:`Nodes.FilterSpec` or ``None``
:param filter: Specification of matching vCenter and Platform Services Controller
nodes for which information should be returned.
If None, the behavior is equivalent to a :class:`Nodes.FilterSpec`
with all attributes None which means all nodes match the filter.
:rtype: :class:`list` of :class:`Nodes.Summary`
:return: commonly used information for all vCenter and Platform Services
Controller nodes matching the :class:`Nodes.FilterSpec`.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
if the user can not be authenticated.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if the user doesn't have the required privileges.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidArgument`
if the :attr:`Nodes.FilterSpec.types` attribute contains a value
that is not supported.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have all of the privileges described as follows:
* Method execution requires ``System.Read``.
"""
return self._invoke('list',
{
'filter': filter,
})
def get(self,
node,
):
"""
Retrieve details for a given identifier of the vCenter or Platform
Services Controller node. This method was added in vSphere API 6.7.2.
:type node: :class:`str`
:param node: Identifier of the vCenter or Platform Services Controller node.
Identifier can be either IP address or DNS resolvable name of the
node.
The parameter must be an identifier for the resource type:
``com.vmware.vcenter.VCenter.name``.
:rtype: :class:`Nodes.Info`
:return: vCenter or Platform Services Controller node details with
replication partners and client affinity information as applicable.
See :class:`Nodes.Info`.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
if the user can not be authenticated.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if the user doesn't have the required privileges.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if a node doesn't exist for given node identifier.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have all of the privileges described as follows:
* Method execution requires ``System.Read``.
"""
return self._invoke('get',
{
'node': node,
})
class ReplicationStatus(VapiInterface):
"""
The ``ReplicationStatus`` interface provides methods to retrieve
replication status information of vCenter and Platform Services Controller
nodes of type VCSA_EMBEDDED/PSC_EXTERNAL (see :attr:`Nodes.Info.type`).
This class was added in vSphere API 6.7.2.
"""
_VAPI_SERVICE_ID = 'com.vmware.vcenter.topology.replication_status'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _ReplicationStatusStub)
self._VAPI_OPERATION_IDS = {}
class Summary(VapiStruct):
"""
The ``ReplicationStatus.Summary`` class contains replication information of
partner vCenter or Platform Services Controller node of type
VCSA_EMBEDDED/PSC_EXTERNAL (see :attr:`Nodes.Info.type`). This class was
added in vSphere API 6.7.2.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
node=None,
replication_partner=None,
partner_available=None,
status_available=None,
replicating=None,
change_lag=None,
):
"""
:type node: :class:`str`
:param node: Identifier for the vCenter or Platform Services Controller node.
Identifier can be either IP address or DNS resolvable name of the
node. This attribute was added in vSphere API 6.7.2.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``com.vmware.vcenter.VCenter.name``. When methods return a value of
this class as a return value, the attribute will be an identifier
for the resource type: ``com.vmware.vcenter.VCenter.name``.
:type replication_partner: :class:`str`
:param replication_partner: Identifier for the vCenter or Platform Services Controller
replication partner. Identifier can be either IP address or DNS
resolvable name of the replication partner. This attribute was
added in vSphere API 6.7.2.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``com.vmware.vcenter.VCenter.name``. When methods return a value of
this class as a return value, the attribute will be an identifier
for the resource type: ``com.vmware.vcenter.VCenter.name``.
:type partner_available: :class:`bool`
:param partner_available: Indicates if the VMware Directory Service on partner is reachable
or not. This attribute was added in vSphere API 6.7.2.
:type status_available: :class:`bool`
:param status_available: Indicates if the replication status for the node with respect to
replication partner can be retrieved or not. This attribute was
added in vSphere API 6.7.2.
:type replicating: :class:`bool` or ``None``
:param replicating: Indicates if node is processing replication changes from the
replication partner. This attribute was added in vSphere API 6.7.2.
This attribute will be None if the partner host or replication
status is not available, i.e, if
:attr:`ReplicationStatus.Summary.partner_available` or
:attr:`ReplicationStatus.Summary.status_available` is false.
:type change_lag: :class:`long` or ``None``
:param change_lag: Number of replication changes node is behind the replication
partner. This attribute was added in vSphere API 6.7.2.
This attribute will be None if the partner host or replication
status is not available, i.e, if
:attr:`ReplicationStatus.Summary.partner_available` or
:attr:`ReplicationStatus.Summary.status_available` is false.
"""
self.node = node
self.replication_partner = replication_partner
self.partner_available = partner_available
self.status_available = status_available
self.replicating = replicating
self.change_lag = change_lag
VapiStruct.__init__(self)
Summary._set_binding_type(type.StructType(
'com.vmware.vcenter.topology.replication_status.summary', {
'node': type.IdType(resource_types='com.vmware.vcenter.VCenter.name'),
'replication_partner': type.IdType(resource_types='com.vmware.vcenter.VCenter.name'),
'partner_available': type.BooleanType(),
'status_available': type.BooleanType(),
'replicating': type.OptionalType(type.BooleanType()),
'change_lag': type.OptionalType(type.IntegerType()),
},
Summary,
False,
None))
class FilterSpec(VapiStruct):
"""
The ``ReplicationStatus.FilterSpec`` class contains attribute used to
filter the results when listing replication status for the vCenter and
Platform Services Controller nodes (see :func:`ReplicationStatus.list`) of
type VCSA_EMBEDDED/PSC_EXTERNAL (see :attr:`Nodes.Info.type`). This class
was added in vSphere API 6.7.2.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
nodes=None,
):
"""
:type nodes: :class:`set` of :class:`str` or ``None``
:param nodes: Identifier that a vCenter and Platform Services Controller node
must have to match the filter. (see
:attr:`ReplicationStatus.Summary.node`). This attribute was added
in vSphere API 6.7.2.
When clients pass a value of this class as a parameter, the
attribute must contain identifiers for the resource type:
``com.vmware.vcenter.VCenter.name``. When methods return a value of
this class as a return value, the attribute will contain
identifiers for the resource type:
``com.vmware.vcenter.VCenter.name``.
If None or empty, all vCenter and Platform Services Controller
nodes of type VCSA_EMBEDDED/PSC_EXTERNAL match the filter.
"""
self.nodes = nodes
VapiStruct.__init__(self)
FilterSpec._set_binding_type(type.StructType(
'com.vmware.vcenter.topology.replication_status.filter_spec', {
'nodes': type.OptionalType(type.SetType(type.IdType())),
},
FilterSpec,
False,
None))
def list(self,
filter=None,
):
"""
Returns the replication information of vCenter and Platform Services
Controller nodes of type VCSA_EMBEDDED/PSC_EXTERNAL (see
:attr:`Nodes.Info.type`) matching the
:class:`ReplicationStatus.FilterSpec`. This method was added in vSphere
API 6.7.2.
:type filter: :class:`ReplicationStatus.FilterSpec` or ``None``
:param filter: Specification of matching vCenter and Platform Services Controller
nodes for which information should be returned.
If None, the behavior is equivalent to a
:class:`ReplicationStatus.FilterSpec` with all attributes None
which means all vCenter and Platform Services Controller nodes of
type VCSA_EMBEDDED/PSC_EXTERNAL match the filter.
:rtype: :class:`list` of :class:`ReplicationStatus.Summary`
:return: Commonly used replication information about vCenter and Platform
Services Controller nodes matching the
:class:`ReplicationStatus.FilterSpec`.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
if the user can not be authenticated.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if the user doesn't have the required privileges.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidArgument`
if the :attr:`ReplicationStatus.FilterSpec.nodes` attribute
contains a invalid value.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have all of the privileges described as follows:
* Method execution requires ``System.Read``.
"""
return self._invoke('list',
{
'filter': filter,
})
class _NodesStub(ApiInterfaceStub):
def __init__(self, config):
# properties for list operation
list_input_type = type.StructType('operation-input', {
'filter': type.OptionalType(type.ReferenceType(__name__, 'Nodes.FilterSpec')),
})
list_error_dict = {
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.invalid_argument':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidArgument'),
}
list_input_value_validator_list = [
]
list_output_validator_list = [
]
list_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/vcenter/topology/nodes',
path_variables={
},
query_parameters={
}
)
# properties for get operation
get_input_type = type.StructType('operation-input', {
'node': type.IdType(resource_types='com.vmware.vcenter.VCenter.name'),
})
get_error_dict = {
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/vcenter/topology/nodes/{node}',
path_variables={
'node': 'node',
},
query_parameters={
}
)
operations = {
'list': {
'input_type': list_input_type,
'output_type': type.ListType(type.ReferenceType(__name__, 'Nodes.Summary')),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType(__name__, 'Nodes.Info'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'list': list_rest_metadata,
'get': get_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.vcenter.topology.nodes',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=True)
class _ReplicationStatusStub(ApiInterfaceStub):
def __init__(self, config):
# properties for list operation
list_input_type = type.StructType('operation-input', {
'filter': type.OptionalType(type.ReferenceType(__name__, 'ReplicationStatus.FilterSpec')),
})
list_error_dict = {
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.invalid_argument':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidArgument'),
}
list_input_value_validator_list = [
]
list_output_validator_list = [
]
list_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/vcenter/topology/replication-status',
path_variables={
},
query_parameters={
}
)
operations = {
'list': {
'input_type': list_input_type,
'output_type': type.ListType(type.ReferenceType(__name__, 'ReplicationStatus.Summary')),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'list': list_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.vcenter.topology.replication_status',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=True)
class StubFactory(StubFactoryBase):
_attrs = {
'Nodes': Nodes,
'ReplicationStatus': ReplicationStatus,
}
|
[
"dungla2011@gmail.com"
] |
dungla2011@gmail.com
|
69cf7c3a5c1ab63a7ae7dcb61ca8cd54486815ce
|
12d7a7cdce5f180c831373a873ff4a5e96d331e8
|
/pykeops/examples/pytorch/test_float16_2.py
|
697c0d50182ca764933ee47be89b07d354f7c848
|
[
"MIT"
] |
permissive
|
MrHuff/keops
|
db9b8c4dd18941cd3fe33af27a2493d7d9d6af05
|
a7f44609ba444af8d9fcb11bc3a75f2024841dfa
|
refs/heads/master
| 2020-12-23T18:43:47.441141
| 2020-02-21T20:50:48
| 2020-02-21T20:50:48
| 237,236,633
| 0
| 0
|
MIT
| 2020-01-30T14:57:35
| 2020-01-30T14:57:35
| null |
UTF-8
|
Python
| false
| false
| 2,787
|
py
|
# Test for half precision support in KeOps
# We perform a gaussian convolution with half, single and double precision
# and compare timings and accuracy
import GPUtil
from threading import Thread
import time
class Monitor(Thread):
def __init__(self, delay):
super(Monitor, self).__init__()
self.stopped = False
self.delay = delay # Time between calls to GPUtil
self.start()
def run(self):
while not self.stopped:
GPUtil.showUtilization()
time.sleep(self.delay)
def stop(self):
self.stopped = True
backend = "torch" # "torch" or "numpy", but only "torch" works for now
device_id = 0
if backend == "torch":
import torch
from pykeops.torch import LazyTensor
else:
import numpy as np
from pykeops.numpy import LazyTensor
import timeit
def K(x,y,b,p,**kwargs):
x_i = LazyTensor( x[:,None,:] )
y_j = LazyTensor( y[None,:,:] )
b_j = LazyTensor( b[None,:,:] )
p = LazyTensor( p )
D_ij = ((x_i - y_j)**2).sum(axis=2)
K_ij = ((- p*D_ij).exp() * b_j)
K_ij = K_ij.min(axis=1,call=False,**kwargs)
return K_ij
M, N, D = 1000000, 1000000, 3
if backend == "torch":
torch.manual_seed(1)
x = torch.randn(M, D, dtype=torch.float64).cuda(device_id)
y = torch.randn(N, D, dtype=torch.float64).cuda(device_id)
b = torch.randn(N, 1, dtype=torch.float64).cuda(device_id)
p = torch.randn(1, dtype=torch.float64).cuda(device_id)
xf = x.float()
yf = y.float()
bf = b.float()
pf = p.float()
xh = x.half()
yh = y.half()
bh = b.half()
ph = p.half()
else:
x = np.random.randn(M, D)
y = np.random.randn(N, D)
b = np.random.randn(N, 1)
xf = x.astype(np.float32)
yf = y.astype(np.float32)
bf = b.astype(np.float32)
xh = x.astype(np.float16)
yh = y.astype(np.float16)
bh = b.astype(np.float16)
Ntest_half, Ntest_float = 1, 1
# monitor = Monitor(1e-6)
# computation using float32
K_keops32 = K(xf,yf,bf,pf)
res_float = K_keops32()
res_float = res_float[:100,:];
print("comp float, time : ",timeit.timeit("K_keops32()",number=Ntest_float,setup="from __main__ import K_keops32"))
# monitor.stop()
#print(res_float)
# computation using float16
# monitor = Monitor(1e-6)
K_keops16 = K(xh[:100,:],yh,bh,ph,sum_scheme="direct_sum")
K_ij = K_keops16()
res_half = K_ij
print("comp half, time : ",timeit.timeit("K_keops16()",number=Ntest_half,setup="from __main__ import K_keops16"))
# monitor.stop()
#print(res_half)
if backend == "torch":
print("relative mean error half vs float : ",((res_half.float()-res_float).abs().mean()/res_float.abs().mean()).item())
print("relative max error half vs float : ",((res_half.float()-res_float).abs().max()/res_float.abs().mean()).item())
|
[
"joan.glaunes@gmail.com"
] |
joan.glaunes@gmail.com
|
adbb9a7a1e4db3deb6ad8946b7d5de3c7945fb7a
|
5490c06e109468a05dbaa9c5863c948bf1b37dd6
|
/volume.py
|
ded0b1157fd67d5133cff24699049a7b1d8e88b9
|
[] |
no_license
|
openwireless/raspi_samples
|
08e79da7bd3110a7d66de548090ecf7d5a9b5cb8
|
1d7809d7d9dcc182bd4ff25e5916a994f7649da0
|
refs/heads/master
| 2020-08-24T13:18:48.580282
| 2020-02-29T06:59:40
| 2020-02-29T06:59:40
| 216,833,162
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 312
|
py
|
# volume.py
import sys
import time
import mcp3008
CH = 0
try:
while True:
data = mcp3008.readAdcValue(CH)
print("adc: {:4} ".format(data))
mV = mcp3008. convertVoltage(data)
print("mV: {:4}".format(mV))
time.sleep(0.2)
except KeyboardInterrupt:
sys.exit(0)
|
[
"dai@owalliance.org"
] |
dai@owalliance.org
|
6dfc1af33d012cf4d8a6305b2aa3c0c59006672d
|
ac14d024ccf1816cd4232769c4ff47ab673e8102
|
/07-ObjectOrientedProgramming/communication.py
|
8077375f2c8e97b166e394118e51a665a150259b
|
[] |
no_license
|
jan-podolanko/pp1
|
173de537d751a2451b9af9ccba06fc3ca74f1473
|
aaa57ce6588037ba85bc41787359328c491dbbb8
|
refs/heads/master
| 2020-08-07T08:31:59.608327
| 2020-01-27T10:59:35
| 2020-01-27T10:59:35
| 213,373,271
| 0
| 0
| null | 2020-01-13T12:28:41
| 2019-10-07T12:08:11
|
Python
|
UTF-8
|
Python
| false
| false
| 102
|
py
|
from message import Message
import sms
import email
x = sms.Sms()
y = email.Email()
x.send()
y.send()
|
[
"jan.podolanko11@gmail.com"
] |
jan.podolanko11@gmail.com
|
6fdf1d75a2e50ba01d5d50b38ab45d7c0428a68d
|
ab79ca83f97aff1f5e00d46781e0355b8e26b4c7
|
/LogTranslation/main.py
|
87925cdd8e34bfc8c58ce7150355ef3775e8d46a
|
[] |
no_license
|
AngusGLChen/LearningTransfer
|
d966ece2b94b3287f7cf0468ae7afd9591c64d99
|
956c9a9e557deb959b26ae42fb46eba38fb417dd
|
refs/heads/master
| 2021-01-19T06:42:47.967713
| 2016-06-20T19:18:09
| 2016-06-20T19:18:09
| 61,573,656
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 867
|
py
|
'''
Created on Jul 24, 2015
@author: Angus
'''
import os
import LogTranslation.UserMode
import LogTranslation.CollaborationMode
import LogTranslation.SubmissionMode
import LogTranslation.ObservationMode
import LogTranslation.SurveyMode
course_path = "/Volumes/NETAC/EdX/Clear-out/FP101x/"
'''
# User mode
if os.path.isdir(course_path):
LogTranslation.UserMode.user_mode(course_path)
# Collaboration mode
if os.path.isdir(course_path):
LogTranslation.CollaborationMode.collaboration_mode(course_path)
# Submission mode
if os.path.isdir(course_path):
LogTranslation.submission_mode(course_path)
# Observation mode
if os.path.isdir(course_path):
LogTranslation.ObservationMode.observation_mode(course_path)
'''
# Survey mode
if os.path.isdir(course_path):
LogTranslation.SurveyMode.survey_mode(course_path)
print "All finished."
|
[
"angus.glchen@gmail.com"
] |
angus.glchen@gmail.com
|
f6e0063275462f1892e450d8ea8dbd68d671ac0d
|
030508decb1b6c95e7c87e95c34815543b2dbdfb
|
/py/py_sparkling/ml/models.py
|
96aa8a9c8b54bb3801530e344b66066445a332d8
|
[
"Apache-2.0"
] |
permissive
|
rkamath3/sparkling-water
|
b80b59cd526593dabb2af50e887e931f06664df4
|
af9d6470dfcffbc8bf3b7086c2404130ed215f72
|
refs/heads/master
| 2021-07-22T18:10:30.337464
| 2019-01-04T16:50:44
| 2019-01-04T16:50:44
| 164,205,445
| 0
| 0
|
Apache-2.0
| 2020-05-11T19:43:23
| 2019-01-05T10:39:54
|
Scala
|
UTF-8
|
Python
| false
| false
| 3,222
|
py
|
from pyspark.ml.util import JavaMLReadable, JavaMLWritable
from pyspark.ml.wrapper import JavaModel
from pysparkling.initializer import *
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf
from pyspark.sql.types import DoubleType
class H2OGBMModel(JavaModel, JavaMLWritable, JavaMLReadable):
pass
class H2ODeepLearningModel(JavaModel, JavaMLWritable, JavaMLReadable):
pass
class H2OAutoMLModel(JavaModel, JavaMLWritable, JavaMLReadable):
pass
class H2OXGBoostModel(JavaModel, JavaMLWritable, JavaMLReadable):
pass
class H2OMOJOModel(JavaModel, JavaMLWritable, JavaMLReadable):
@staticmethod
def create_from_mojo(path_to_mojo):
spark_session = SparkSession.builder.getOrCreate()
# We need to make sure that Sparkling Water classes are available on the Spark driver and executor paths
Initializer.load_sparkling_jar(spark_session._sc)
return H2OMOJOModel(spark_session._jvm.org.apache.spark.ml.h2o.models.JavaH2OMOJOModelHelper.createFromMojo(path_to_mojo))
def predict(self, dataframe):
return self.transform(dataframe)
def getConvertUnknownCategoricalLevelsToNa(self):
return self._java_obj.getConvertUnknownCategoricalLevelsToNa()
def setConvertUnknownCategoricalLevelsToNa(self, value):
self._java_obj.setConvertUnknownCategoricalLevelsToNa(value)
return self
class H2OMOJOPipelineModel(JavaModel, JavaMLWritable, JavaMLReadable):
@staticmethod
def create_from_mojo(path_to_mojo):
spark_session = SparkSession.builder.getOrCreate()
# We need to make sure that Sparkling Water classes are available on the Spark driver and executor paths
Initializer.load_sparkling_jar(spark_session._sc)
return H2OMOJOPipelineModel(spark_session._jvm.org.apache.spark.ml.h2o.models.JavaH2OMOJOPipelineModelHelper.createFromMojo(path_to_mojo))
def predict(self, dataframe):
return self.transform(dataframe)
def get_input_names(self):
return list(self._java_obj.getInputNames())
def get_input_types(self):
enum_list = list(self._java_obj.getInputTypes())
return [enum.name() for enum in enum_list]
def get_output_names(self):
return list(self._java_obj.getOutputNames())
def get_output_types(self):
enum_list = list(self._java_obj.getOutputTypes())
return [enum.name() for enum in enum_list]
def get_named_mojo_output_columns(self):
return self._java_obj.getNamedMojoOutputColumns()
def set_named_mojo_output_columns(self, value):
self._java_obj.setNamedMojoOutputColumns(value)
return self
def select_prediction_udf(self, column):
if column not in self.get_output_names():
raise ValueError("Column '" + column + "' is not defined as the output column in MOJO Pipeline.")
if self.get_named_mojo_output_columns():
func = udf(lambda d: d, DoubleType())
return func("prediction." + column).alias(column)
else:
idx = self.get_output_names().index(column)
func = udf(lambda arr: arr[idx], DoubleType())
return func("prediction.preds").alias(column)
|
[
"noreply@github.com"
] |
rkamath3.noreply@github.com
|
df58f5e1ec17bbe279003f0d458ca5494bef6045
|
08ccab3c704bf8f7b783d44831c399552f6aa284
|
/simulation/code/data/interpret/rsi.py
|
42ad6b980443e332becba399f2b2d3320222ed3d
|
[] |
no_license
|
artainmo/trading_bot
|
88dfa234f3cc21ee1d2b9271725ea14fa816a0e5
|
31ef810dc9ae98fb065b163ff14bb8bb4b2a9514
|
refs/heads/main
| 2023-07-19T21:21:11.925428
| 2023-07-07T22:25:10
| 2023-07-07T22:25:10
| 322,807,598
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,767
|
py
|
import utils.global_variables as g
from utils.classes import *
from utils.file_handler import *
from utils.time_handler import *
def set_rsi_signal(coin, sell, buy, price=None):
coin.opp["rsi"]["sell_signal_amplifier"] = sell
coin.opp["rsi"]["buy_signal_amplifier"] = buy
coin.opp["rsi"]["proposed_buy_price"] = price
return coin
def rsi_sell_signal(coin, last_line):
if float(last_line["rsi"]) > g.SELL_RSI["level1"]["value"]:
if float(last_line["rsi"]) > g.SELL_RSI["level2"]["value"]:
coin = set_rsi_signal(coin, g.SELL_RSI["level2"]["amplifier"], None)
else:
coin = set_rsi_signal(coin, g.SELL_RSI["level1"]["amplifier"], None)
return coin
def rsi_buy_signal(coin, last_line):
if float(last_line["rsi"]) < g.BUY_RSI["level1"]["value"]:
if float(last_line["rsi"]) < g.BUY_RSI["level2"]["value"]:
coin = set_rsi_signal(coin, 1/g.SELL_RSI["level2"]["amplifier"], g.BUY_RSI["level2"]["amplifier"])
else:
coin = set_rsi_signal(coin, 1/g.SELL_RSI["level1"]["amplifier"], g.BUY_RSI["level1"]["amplifier"])
return coin
def get_rsi_signal(coin, account):
last_line = coin.last_line(g.BUY_RSI["type"])
coin = set_rsi_signal(coin, 1, None)
if account.euros["balance"] > 10:
coin = rsi_buy_signal(coin, last_line)
if account.coins[coin.market_name]["balance"]:
coin = rsi_sell_signal(coin, last_line)
if coin.opp["rsi"]["buy_signal_amplifier"] != None and g.BUY_RSI["buy"] == "trailing":
last_line = coin.last_line("_min")
coin.opp["rsi"]["proposed_buy_price"] = float(last_line["low"]) + (float(last_line["low"]) * (g.BUY_RSI["trailing"] / float(coin.opp["rsi"]["buy_signal_amplifier"])))
return coin
|
[
"tainmontarthur@icloud.com"
] |
tainmontarthur@icloud.com
|
890d57a5bfff1ebccd7bc8621e3064881da5e0ec
|
e2b67927b1b7113917d17c62dacedde3e9457ce9
|
/Assignment3/trainModel.py
|
c8627233580dd8e41dec95d45caab1036b4fc01a
|
[] |
no_license
|
SumanSudhir/Computer_Vision
|
d0440b5d461437ada52ecfe71abb8b7d08bbaca1
|
5da6a05390c1039c26bea67bddb548d654e47fad
|
refs/heads/master
| 2020-04-22T11:48:22.184688
| 2019-03-26T15:24:22
| 2019-03-26T15:24:22
| 170,353,365
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,367
|
py
|
import argparse
import os
from shutil import copy2
import torchfile
import Model
from Linear import LinearLayer
from ReLu import ReLu
from Convolution import ConvolutionLayer,FlattenLayer
from Criterion import Criterion
import torch
import final
# suffix='.py'
parser = argparse.ArgumentParser()
parser.add_argument("-modelName", "--model_name")
parser.add_argument("-data", "--data_path")
parser.add_argument("-target", "--target_path")
args = parser.parse_args()
model_name = args.model_name
data_path = args.data_path
labels_path = args.target_path
# dir_name = modelName.rsplit(suffix,1)[0]
try:
os.mkdir(model_name)
print("Directory created")
except:
print("Directory already exists")
model_one = torch.load(model_name)
Train_Data = torchfile.load(data_path)
Train_Label = torchfile.load(labels_path)
# model_one=Model.Model()
# model_one.addLayer(ConvolutionLayer( (1,108,108) , 12 , 15, 6))
# model_one.addLayer(ReLu())
# model_one.addLayer(ConvolutionLayer( (15,17,17) , 5 , 9, 3)) #9,5,5
# model_one.addLayer(FlattenLayer())
# model_one.addLayer(ReLu())
# model_one.addLayer(LinearLayer(225,90))
# model_one.addLayer(ReLu())
# model_one.addLayer(LinearLayer(90,18))
# model_one.addLayer(ReLu())
# model_one.addLayer(LinearLayer(18,6))
model_one=final.train(model_one,Train_Data,Train_Label)
torch.save(model_one,'./'+model_name+'/'+model_name)
|
[
"noreply@github.com"
] |
SumanSudhir.noreply@github.com
|
0c77f08757363fad861f315fc7ecea7e4c7196ef
|
4a59e35a12af911f588224f07aab52d24fd6b044
|
/venv/lib/python2.7/site-packages/mbed_host_tests/host_tests_plugins/module_reset_stlink.py
|
8e38f6fe715b7912581f3a624a1c828f5eae39d6
|
[] |
no_license
|
ryankurte/mbed-node
|
95caba48404e06c4f21f48a850152c08d911bbc8
|
3584d391fca00fc3cda138c26ae28fdbe5527d83
|
refs/heads/master
| 2021-01-22T13:23:08.506740
| 2017-11-02T17:40:26
| 2017-11-02T17:40:26
| 100,665,570
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,674
|
py
|
"""
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: Przemyslaw Wirkus <Przemyslaw.Wirkus@arm.com>
"""
from host_test_plugins import HostTestPluginBase
class HostTestPluginResetMethod_Stlink(HostTestPluginBase):
# Plugin interface
name = 'HostTestPluginResetMethod_Stlink'
type = 'ResetMethod'
capabilities = ['stlink']
required_parameters = []
stable = False
def __init__(self):
""" ctor
"""
HostTestPluginBase.__init__(self)
def is_os_supported(self, os_name=None):
"""! In this implementation this plugin only is supporeted under Windows machines
"""
# If no OS name provided use host OS name
if not os_name:
os_name = self.mbed_os_support()
# This plugin only works on Windows
if os_name and os_name.startswith('Windows'):
return True
return False
def setup(self, *args, **kwargs):
"""! Configure plugin, this function should be called before plugin execute() method is used.
"""
# Note you need to have eACommander.exe on your system path!
self.ST_LINK_CLI = 'ST-LINK_CLI.exe'
return True
def execute(self, capability, *args, **kwargs):
"""! Executes capability by name
@param capability Capability name
@param args Additional arguments
@param kwargs Additional arguments
@details Each capability e.g. may directly just call some command line program or execute building pythonic function
@return Capability call return value
"""
result = False
if self.check_parameters(capability, *args, **kwargs) is True:
if capability == 'stlink':
# Example:
# ST-LINK_CLI.exe -Rst -Run
cmd = [self.ST_LINK_CLI,
'-Rst', '-Run']
result = self.run_command(cmd)
return result
def load_plugin():
""" Returns plugin available in this module
"""
return HostTestPluginResetMethod_Stlink()
|
[
"ryan.kurte@trutest.co.nz"
] |
ryan.kurte@trutest.co.nz
|
d9b0791b9147a2aa58c6341fd0be6328e8c8a3c8
|
ed069d7f1a4f6f8c448594f0188711782e12dc7a
|
/clusterpost/bigchaindb/tests/web/test_block_tendermint.py
|
2f789c55477a4ac65069455a2e32d792f2dc439c
|
[
"Apache-2.0",
"CC-BY-4.0",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-free-unknown",
"MIT"
] |
permissive
|
Lonero-Team/Decentralized-Internet
|
d1bfb5aa53ce6dc193eefed4842f251ce4ef0035
|
5b55be855c0f096204911e21487b4dae54392672
|
refs/heads/master
| 2023-08-31T01:54:46.643100
| 2023-08-30T03:18:55
| 2023-08-30T03:18:55
| 218,808,524
| 516
| 185
|
MIT
| 2023-09-14T19:41:21
| 2019-10-31T16:18:59
|
Python
|
UTF-8
|
Python
| false
| false
| 2,425
|
py
|
# Copyright BigchainDB GmbH and BigchainDB contributors
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
import pytest
from bigchaindb.models import Transaction
from bigchaindb.lib import Block
BLOCKS_ENDPOINT = '/api/v1/blocks/'
@pytest.mark.bdb
@pytest.mark.usefixtures('inputs')
def test_get_block_endpoint(b, client, alice):
import copy
tx = Transaction.create([alice.public_key], [([alice.public_key], 1)], asset={'cycle': 'hero'})
tx = tx.sign([alice.private_key])
# with store_bulk_transactions we use `insert_many` where PyMongo
# automatically adds an `_id` field to the tx, therefore we need the
# deepcopy, for more info see:
# https://api.mongodb.com/python/current/faq.html#writes-and-ids
tx_dict = copy.deepcopy(tx.to_dict())
b.store_bulk_transactions([tx])
block = Block(app_hash='random_utxo',
height=31,
transactions=[tx.id])
b.store_block(block._asdict())
res = client.get(BLOCKS_ENDPOINT + str(block.height))
expected_response = {'height': block.height, 'transactions': [tx_dict]}
assert res.json == expected_response
assert res.status_code == 200
@pytest.mark.bdb
@pytest.mark.usefixtures('inputs')
def test_get_block_returns_404_if_not_found(client):
res = client.get(BLOCKS_ENDPOINT + '123')
assert res.status_code == 404
res = client.get(BLOCKS_ENDPOINT + '123/')
assert res.status_code == 404
@pytest.mark.bdb
def test_get_block_containing_transaction(b, client, alice):
tx = Transaction.create([alice.public_key], [([alice.public_key], 1)], asset={'cycle': 'hero'})
tx = tx.sign([alice.private_key])
b.store_bulk_transactions([tx])
block = Block(app_hash='random_utxo',
height=13,
transactions=[tx.id])
b.store_block(block._asdict())
res = client.get('{}?transaction_id={}'.format(BLOCKS_ENDPOINT, tx.id))
expected_response = [block.height]
assert res.json == expected_response
assert res.status_code == 200
@pytest.mark.bdb
def test_get_blocks_by_txid_endpoint_returns_empty_list_not_found(client):
res = client.get(BLOCKS_ENDPOINT + '?transaction_id=')
assert res.status_code == 200
assert len(res.json) == 0
res = client.get(BLOCKS_ENDPOINT + '?transaction_id=123')
assert res.status_code == 200
assert len(res.json) == 0
|
[
"kamalandrew55@gmail.com"
] |
kamalandrew55@gmail.com
|
8401d66cae8ef1c647fa233b5dbd596708a9903e
|
193bde1a9ab7c5fffbd76cfe165c47afac82e6de
|
/data/coco.py
|
f5f8e877eead61446e2e75f19d992c73d1d4ad17
|
[
"MIT"
] |
permissive
|
ZTao-z/multiflow-resnet-ssd
|
12e1900a3ecaa8e4013fb868e15870df1f6a42d9
|
51f8dbbb70ce00f5b5e03e774e7f4f962523602f
|
refs/heads/master
| 2020-12-27T01:34:00.132522
| 2020-02-07T08:56:22
| 2020-02-07T08:56:22
| 237,720,533
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,276
|
py
|
"""VOC Dataset Classes
Original author: Francisco Massa
https://github.com/fmassa/vision/blob/voc_dataset/torchvision/datasets/voc.py
Updated by: Ellis Brown, Max deGroot
"""
import os
import pickle
import os.path
import sys
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
import cv2
import numpy as np
import json
import uuid
from utils.pycocotools.coco import COCO
from utils.pycocotools.cocoeval import COCOeval
from utils.pycocotools import mask as COCOmask
class COCODetection(data.Dataset):
"""VOC Detection Dataset Object
input is image, target is annotation
Arguments:
root (string): filepath to VOCdevkit folder.
image_set (string): imageset to use (eg. 'train', 'val', 'test')
transform (callable, optional): transformation to perform on the
input image
target_transform (callable, optional): transformation to perform on the
target `annotation`
(eg: take in caption string, return tensor of word indices)
dataset_name (string, optional): which dataset to load
(default: 'VOC2007')
"""
def __init__(self, root, image_sets, preproc=None, target_transform=None,
dataset_name='COCO'):
self.root = root
self.cache_path = os.path.join(self.root, 'cache')
self.image_set = image_sets
self.preproc = preproc
self.target_transform = target_transform
self.name = dataset_name
self.ids = list()
self.annotations = list()
self._view_map = {
'minival2014' : 'val2014', # 5k val2014 subset
'valminusminival2014' : 'val2014', # val2014 \setminus minival2014
'test-dev2015' : 'test2015',
'test-dev2017': 'test2017',
'val2017': 'val2017',
}
for (year, image_set) in image_sets:
coco_name = image_set+year
data_name = (self._view_map[coco_name]
if coco_name in self._view_map
else coco_name)
annofile = self._get_ann_file(coco_name)
_COCO = COCO(annofile)
self._COCO = _COCO
self.coco_name = coco_name
cats = _COCO.loadCats(_COCO.getCatIds())
self._classes = tuple(['__background__'] + [c['name'] for c in cats])
self.num_classes = len(self._classes)
self._class_to_ind = dict(zip(self._classes, range(self.num_classes)))
self._class_to_coco_cat_id = dict(zip([c['name'] for c in cats],
_COCO.getCatIds()))
indexes = _COCO.getImgIds()
self.image_indexes = indexes
self.ids.extend([self.image_path_from_index(data_name, index) for index in indexes ])
if image_set.find('test') != -1:
print('test set will not load annotations!')
else:
self.annotations.extend(self._load_coco_annotations(coco_name, indexes,_COCO))
def image_path_from_index(self, name, index):
"""
Construct an image path from the image's "index" identifier.
"""
# Example image path for index=119993:
# images/train2014/COCO_train2014_000000119993.jpg
if(name=='test2017' or name=='val2017'):
file_name = (
str(index).zfill(12) + '.jpg')
image_path = os.path.join(self.root, 'images',
name, file_name)
else:
file_name = ('COCO_' + name + '_' +
str(index).zfill(12) + '.jpg')
image_path = os.path.join(self.root, 'images',
name, file_name)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _get_ann_file(self, name):
prefix = 'instances' if name.find('test') == -1 \
else 'image_info'
return os.path.join(self.root, 'annotations',
prefix + '_' + name + '.json')
def _load_coco_annotations(self, coco_name, indexes, _COCO):
cache_file=os.path.join(self.cache_path,coco_name+'_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = pickle.load(fid)
print('{} gt roidb loaded from {}'.format(coco_name,cache_file))
return roidb
gt_roidb = [self._annotation_from_index(index, _COCO)
for index in indexes]
with open(cache_file, 'wb') as fid:
pickle.dump(gt_roidb,fid,pickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb
def _annotation_from_index(self, index, _COCO):
"""
Loads COCO bounding-box instance annotations. Crowd instances are
handled by marking their overlaps (with all categories) to -1. This
overlap value means that crowd "instances" are excluded from training.
"""
im_ann = _COCO.loadImgs(index)[0]
width = im_ann['width']
height = im_ann['height']
annIds = _COCO.getAnnIds(imgIds=index, iscrowd=None)
objs = _COCO.loadAnns(annIds)
# Sanitize bboxes -- some are invalid
valid_objs = []
for obj in objs:
x1 = np.max((0, obj['bbox'][0]))
y1 = np.max((0, obj['bbox'][1]))
x2 = np.min((width - 1, x1 + np.max((0, obj['bbox'][2] - 1))))
y2 = np.min((height - 1, y1 + np.max((0, obj['bbox'][3] - 1))))
if obj['area'] > 0 and x2 >= x1 and y2 >= y1:
obj['clean_bbox'] = [x1, y1, x2, y2]
valid_objs.append(obj)
objs = valid_objs
num_objs = len(objs)
res = np.zeros((num_objs, 5))
# Lookup table to map from COCO category ids to our internal class
# indices
coco_cat_id_to_class_ind = dict([(self._class_to_coco_cat_id[cls],
self._class_to_ind[cls])
for cls in self._classes[1:]])
for ix, obj in enumerate(objs):
cls = coco_cat_id_to_class_ind[obj['category_id']]
res[ix, 0:4] = obj['clean_bbox']
res[ix, 4] = cls
return res
def __getitem__(self, index):
img_id = self.ids[index]
target = self.annotations[index]
img = cv2.imread(img_id, cv2.IMREAD_COLOR)
height, width, _ = img.shape
if self.target_transform is not None:
target = self.target_transform(target)
if self.preproc is not None:
img, target = self.preproc(img, target)
# target = self.target_transform(target, width, height)
#print(target.shape)
return img, target
def __len__(self):
return len(self.ids)
def pull_image(self, index):
'''Returns the original image object at index in PIL form
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
PIL img
'''
img_id = self.ids[index]
return cv2.imread(img_id, cv2.IMREAD_COLOR)
def pull_tensor(self, index):
'''Returns the original image at an index in tensor form
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
tensorized version of img, squeezed
'''
to_tensor = transforms.ToTensor()
return torch.Tensor(self.pull_image(index)).unsqueeze_(0)
def _print_detection_eval_metrics(self, coco_eval):
IoU_lo_thresh = 0.5
IoU_hi_thresh = 0.95
def _get_thr_ind(coco_eval, thr):
ind = np.where((coco_eval.params.iouThrs > thr - 1e-5) &
(coco_eval.params.iouThrs < thr + 1e-5))[0][0]
iou_thr = coco_eval.params.iouThrs[ind]
assert np.isclose(iou_thr, thr)
return ind
ind_lo = _get_thr_ind(coco_eval, IoU_lo_thresh)
ind_hi = _get_thr_ind(coco_eval, IoU_hi_thresh)
# precision has dims (iou, recall, cls, area range, max dets)
# area range index 0: all area ranges
# max dets index 2: 100 per image
precision = \
coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, :, 0, 2]
ap_default = np.mean(precision[precision > -1])
print('~~~~ Mean and per-category AP @ IoU=[{:.2f},{:.2f}] '
'~~~~'.format(IoU_lo_thresh, IoU_hi_thresh))
print('{:.1f}'.format(100 * ap_default))
for cls_ind, cls in enumerate(self._classes):
if cls == '__background__':
continue
# minus 1 because of __background__
precision = coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, cls_ind - 1, 0, 2]
ap = np.mean(precision[precision > -1])
print('{:.1f}'.format(100 * ap))
print('~~~~ Summary metrics ~~~~')
coco_eval.summarize()
def _do_detection_eval(self, res_file, output_dir):
ann_type = 'bbox'
coco_dt = self._COCO.loadRes(res_file)
coco_eval = COCOeval(self._COCO, coco_dt)
coco_eval.params.useSegm = (ann_type == 'segm')
coco_eval.evaluate()
coco_eval.accumulate()
self._print_detection_eval_metrics(coco_eval)
eval_file = os.path.join(output_dir, 'detection_results.pkl')
with open(eval_file, 'wb') as fid:
pickle.dump(coco_eval, fid, pickle.HIGHEST_PROTOCOL)
print('Wrote COCO eval results to: {}'.format(eval_file))
def _coco_results_one_category(self, boxes, cat_id):
results = []
for im_ind, index in enumerate(self.image_indexes):
dets = boxes[im_ind].astype(np.float)
if dets == []:
continue
scores = dets[:, -1]
xs = dets[:, 0]
ys = dets[:, 1]
ws = dets[:, 2] - xs + 1
hs = dets[:, 3] - ys + 1
results.extend(
[{'image_id' : index,
'category_id' : cat_id,
'bbox' : [xs[k], ys[k], ws[k], hs[k]],
'score' : scores[k]} for k in range(dets.shape[0])])
return results
def _write_coco_results_file(self, all_boxes, res_file):
# [{"image_id": 42,
# "category_id": 18,
# "bbox": [258.15,41.29,348.26,243.78],
# "score": 0.236}, ...]
results = []
for cls_ind, cls in enumerate(self._classes):
if cls == '__background__':
continue
print('Collecting {} results ({:d}/{:d})'.format(cls, cls_ind,
self.num_classes ))
coco_cat_id = self._class_to_coco_cat_id[cls]
results.extend(self._coco_results_one_category(all_boxes[cls_ind],
coco_cat_id))
'''
if cls_ind ==30:
res_f = res_file+ '_1.json'
print('Writing results json to {}'.format(res_f))
with open(res_f, 'w') as fid:
json.dump(results, fid)
results = []
'''
#res_f2 = res_file+'_2.json'
print('Writing results json to {}'.format(res_file))
with open(res_file, 'w') as fid:
json.dump(results, fid)
def evaluate_detections(self, all_boxes, output_dir):
res_file = os.path.join(output_dir, ('detections_' +
self.coco_name +
'_results'))
res_file += '.json'
self._write_coco_results_file(all_boxes, res_file)
# Only do evaluation on non-test sets
if self.coco_name.find('test') == -1:
self._do_detection_eval(res_file, output_dir)
# Optionally cleanup results json file
|
[
"1124693098@qq.com"
] |
1124693098@qq.com
|
788f072e1d52deec996ceaf82ad64905a7504fb8
|
029eb1b7f66c7746f5982876eeb7c04b063768c6
|
/preppy/util.py
|
927b6542d4ab15a1e6eb40b223b1f99d5b8931f5
|
[] |
no_license
|
phueb/Preppy
|
2910c5aeddb4fe68936255aaead35da241ddea09
|
0ab8b24f05cc7f17bef1ecee61bade6368bca8ff
|
refs/heads/master
| 2021-12-18T18:49:01.344945
| 2021-12-07T22:45:02
| 2021-12-07T22:45:02
| 216,908,086
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,941
|
py
|
from typing import List
import numpy as np
import random
from functools import reduce
from operator import iconcat
def shuffle_at_sentence_level(tokens: List[str],
shuffle_seed: int = 20,
) -> List[str]:
"""
shuffle at sentence-level (as opposed to document-level)
this remove clustering of same-age utterances within documents
"""
# TODO sentences are detected with punctuation, but periods can occur in numbers, not just at boundaries
# TODO: use a more sophisticated sentence boundary detector
random.seed(shuffle_seed)
print('WARNING: Shuffling sentences')
sentences: List[List[str]] = split_into_sentences(tokens)
random.shuffle(sentences)
res = reduce(iconcat, sentences, []) # flatten list of lists
return res
def split_into_sentences(tokens: List[str],
) -> List[List[str]]:
res = [[]]
for n, w in enumerate(tokens):
res[-1].append(w)
if w.endswith('.') or w.endswith('?') or w.endswith('!') and n < len(tokens) - 1: # prevent empty list at end
res.append([])
return res
def chunk_sentences(sentences: List[List[str]],
split_size: int,
):
for i in range(0, len(sentences), split_size):
yield sentences[i:i + split_size]
def make_windows_mat(
part: List[int],
num_windows: int,
num_tokens_in_window: int,
) -> np.ndarray:
"""
return a matrix, where rows are windows.
each window is an ordered array of word IDs.
windows are created by sliding a moving window across tokens, moving one token at a time.
"""
result = np.zeros((num_windows, num_tokens_in_window), dtype=np.int)
for window_id in range(num_windows):
window = part[window_id:window_id + num_tokens_in_window]
result[window_id, :] = window
return result
|
[
"info@philhuebner.com"
] |
info@philhuebner.com
|
a6ad23dae35f0032fdacdcd9fe3ce3b3685cadea
|
a518141ca3ba2b6fa63a7961b51936d9438ff022
|
/374 - Big Mod.py
|
2f05d44da61847985307d73265b4c4dc5537fdcd
|
[] |
no_license
|
jlhung/UVA-Python
|
ec93b2c98e04c753e8356f3e4825584fae4a8663
|
7a0db4fecffd7ac4f377f93da41291a8e998ee9b
|
refs/heads/master
| 2022-11-28T04:47:49.270187
| 2020-08-10T13:19:58
| 2020-08-10T13:19:58
| 116,969,745
| 19
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 516
|
py
|
'''
20180108 jlhung v1.0
'''
def mod(b, p, m):
if p == 0:
return 1
elif p == 1:
return b % m
else:
result = mod(b, p//2, m)
if p % 2:
return result * result * b % m
else:
return result * result % m
while True:
try:
b = input()
if b == "":
b = int(input())
else:
b = int(b)
p = int(input())
m = int(input())
except EOFError:
break
print(mod(b, p, m))
|
[
"35291112+jlhung@users.noreply.github.com"
] |
35291112+jlhung@users.noreply.github.com
|
5fece1f998efd4af893e3696457085c23ad76158
|
e4da549a973dfb987fe101f66affabd04e2d69ea
|
/lib/dev_kits/py/sample_random_bot.py
|
8faa13bfd617b0b4e37caaae88d782e52580db4b
|
[] |
no_license
|
dbe/blobwars
|
736cb3eba2a5fd9876e28e5da601f272ce4bc502
|
b13e0babaa4f003bf24874289ec80d05ac9f563f
|
refs/heads/master
| 2021-01-01T18:47:54.367799
| 2011-11-02T01:42:01
| 2011-11-02T01:42:01
| 2,654,235
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 479
|
py
|
from sample_toolkit import SampleBotBase, GameStateSample, INVALID_COORDINATE
from random import randint
class RandomBot(SampleBotBase):
def get_move(self,game_state):
possible_moves = self.get_all_possible_moves(game_state)
if possible_moves:
return possible_moves[randint(0, len(possible_moves) - 1)]
bot = RandomBot()
for game_state in GameStateSample.get():
coord = bot.get_move(game_state) or INVALID_COORDINATE
game_state.send_move(coord.x, coord.y)
|
[
"michel.tricot@gmail.com"
] |
michel.tricot@gmail.com
|
2b4b1a60eec4c471dfa51f0a70d2e808fdf2e929
|
8247d5f604407d40335d8315fe79ea38f0d7f43a
|
/Intro to XLRD.py
|
d6a468ee10fdd87aeab41fbae4e0039b07b023a4
|
[] |
no_license
|
jtsulliv/Data-Wrangling
|
450d95b531697cf02989b691a53444838856577c
|
09d441e0435ffba67dcd567c4189d706cdccf5f5
|
refs/heads/master
| 2021-01-01T05:18:49.641027
| 2017-09-21T14:28:01
| 2017-09-21T14:28:01
| 56,181,298
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,890
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 04 14:11:43 2016
@author: jsullivan
"""
'''Reading Excel files'''
import xlrd
datafile = "2013_ERCOT_Hourly_Load_Data.xls"
def parse_file(datafile):
workbook = xlrd.open_workbook(datafile) # reading into workbook
sheet = workbook.sheet_by_index(0) # specify which sheet
''' here we're looping through all rows and columns and reading
it into a python list called 'data'
'''
data = [[sheet.cell_value(r, col) # pulling in the value from the excel file
for col in range(sheet.ncols)] # looping through columns
for r in range(sheet.nrows)] # looping through rows
print "\nList Comprehension"
print "data[3][2]:",
print data[3][2] # printing value from row 3, column 2
'''printing all of the values
from row 50
'''
print "\nCells in a nested loop:"
for row in range(sheet.nrows):
for col in range(sheet.ncols):
if row == 50:
print sheet.cell_value(row, col),
### other useful methods:
print "\nROWS, COLUMNS, and CELLS:"
print "Number of rows in the sheet:",
print sheet.nrows
print "Type of data in cell (row 3, col 2):",
print sheet.cell_type(3, 2) # type is 2, which indicates floating point number
print "Value in cell (row 3, col 2):",
print sheet.cell_value(3, 2)
print "Get a slice of values in column 3, from rows 1-3:"
print sheet.col_values(3, start_rowx=1, end_rowx=4)
print "\nDATES:"
print "Type of data in cell (row 1, col 0):",
print sheet.cell_type(1, 0)
exceltime = sheet.cell_value(1, 0)
print "Time in Excel format:",
print exceltime
print "Convert time to a Python datetime tuple, from the Excel float:",
print xlrd.xldate_as_tuple(exceltime, 0)
return data
data = parse_file(datafile)
|
[
"jtsulliv@gmail.com"
] |
jtsulliv@gmail.com
|
e4a87ad830f53458b6245a581a732946af00a667
|
1af9baf72b56cbae13fc45f016c079fed47e5378
|
/config.py
|
71d491639f8afc72158efb23742fc6efb7aea97d
|
[
"MIT"
] |
permissive
|
mumblepins-docker/dev-images
|
77b823d9818045063ba3f6bc9a8640e1e3fa542c
|
83bb77a96181a484ed9efcc27ca4c49391940f92
|
refs/heads/master
| 2021-01-20T10:53:48.820828
| 2018-03-13T17:13:51
| 2018-03-13T17:13:51
| 101,654,904
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 741
|
py
|
class DockerConfig:
include_dirs = ['root-fs']
DOCKER_IMAGE = "mumblepins/circleci-dev"
fill_in_data = {
'Dockerfile.meta': '### Build-time metadata ###'
}
save_dir = 'workspace'
special_tags = {
'latest': 'stretch',
'ubuntu': 'bionic',
'ubuntu-LTS': 'xenial',
'debian': 'stretch',
'ubuntu-debuild': 'bionic-debuild',
'ubuntu-LTS-debuild': 'xenial-debuild'
}
latest = 'stretch'
ignore_lines = [
'Selecting previously unselected ',
'Preparing to unpack',
'update-alternatives'
]
@classmethod
def values(cls):
return {k:v for k, v in cls.__dict__.items() if (not k.startswith('__')) and (not k=='values')}
|
[
"dansull@gmail.com"
] |
dansull@gmail.com
|
596aff5f32520950917bc78082f4e3f8ba81229b
|
a2bc197977491ca26b0662e3b76ee45e20b72bae
|
/100 exercise/no52.py
|
a9c2d1c3cabbbeba50bebd5dd877956676ad808c
|
[] |
no_license
|
phucduongBKDN/100exercies
|
50cbe168d69cf6cbf17d6877a1c1fdb7efeb0308
|
82e59cc0ff7cbf6c9a69acaaee3a9524b0aec365
|
refs/heads/master
| 2020-05-25T10:12:16.470912
| 2019-05-24T13:32:10
| 2019-05-24T13:32:10
| 187,754,937
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 117
|
py
|
# Excel Sheet Column Number
thisdict = {
"A": 1,
"B": 2,
"C": 3
}
j = input("Enter char: ")
print(thisdict[j])
|
[
"phucduong2397@gmail.com"
] |
phucduong2397@gmail.com
|
12d466c02636611357c4637d713cb7d391492899
|
874cb7592221cf7fa86f7d752c3a698696f7412d
|
/firstPass.py
|
0302ecca5f46333dec872fa794e8e3bab6ed4eef
|
[] |
no_license
|
53apnil/Assembler_Assignment
|
bc48f8ed3739509d11115d7c8e969c10b9ac4e0f
|
bcb805be954c38a66ff2316304d8714b65a925c0
|
refs/heads/master
| 2020-09-03T07:39:41.468319
| 2019-11-04T04:45:36
| 2019-11-04T04:45:36
| 219,418,541
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,890
|
py
|
import error as ERR
def check(operand,symTab):
for i in range(0,len(symTab)):
if operand==symTab[i][1]:
return i
return 0
def checkLit(lit,litTab):
for h in range(0 ,len(litTab)):
if lit==litTab[h][1]:
return 0
return 1
def checkAdd(tempStr,regTab,symTab,litTab,errorTab,size,litNo,lineNo):
operSplit=tempStr[1].split(',')
if operSplit[0] in regTab and operSplit[1] in regTab:
size+=2
return litNo
elif operSplit[0] in regTab and operSplit[1].isdecimal():
size+=3
if checkLit(operSplit[1],litTab):
litNo+=1
litList=[litNo,operSplit[1],(hex(int(operSplit[1])))[2:].upper()]
litTab.append(litList)
return litNo
return litNo
elif operSplit[0] in regTab and check(operSplit[1],symTab):
size+=6
return litNo
else:
ERR.putError(errorTab,'',lineNo,4)
return litNo
def checkMov(tempStr,regTab,symTab,litTab,errorTab,size,litNo,lineNo):
operSplit=tempStr[1].split(',')
if operSplit[0] in regTab and operSplit[1] in regTab:
size+=2
return litNo
elif operSplit[0] in regTab and operSplit[1].isdecimal():
size+=5
if checkLit(operSplit[1],litTab):
litNo+=1
litList=[litNo,operSplit[1],(hex(int(operSplit[1])))[2:].upper()]
litTab.append(litList)
return litNo
return litNo
elif operSplit[0] in regTab and check(operSplit[1],symTab):
size+=5
return litNo
else:
ERR.putError(errorTab,'',lineNo,4)
return litNo
def checkInc(tempStr,regTab,symTab,errorTab,size,lineNo):
if tempStr[1] in regTab:
size+=2
elif "dword" in tempStr[1]:
size+=6
else:
ERR.putError(errorTab,'',lineNo,4)
def checkJmp(tempStr,regTab,symTab,errorTab,size,lineNo,keyWords,i):
if tempStr[1] in keyWords:
ERR.putError(errorTab,tempStr[1],line,4)
return i
elif (check(tempStr[1],symTab))==0:
i=i+1
emptyList=[i,tempStr[1],0,'t','','','','U',lineNo]
symTab.append(emptyList)
size+=2
return i
else:
size+=2
return i
|
[
"noreply@github.com"
] |
53apnil.noreply@github.com
|
93851e6871cc6d70e6c8e9d14a812fcd7e0acead
|
2884e44c7c8b5f1dd7405fba24549e8135605ad8
|
/plastiqpublicapi/http/requests_client.py
|
ae679af90406697881039f9d58679f4a7e954e1d
|
[
"MIT"
] |
permissive
|
jeffkynaston/sdk-spike-python-apimatic
|
d44d2464ba43c12dabe3ae3ba01ef268f73c16f3
|
e1ca52116aabfcdb2f36c24ebd866cf00bb5c6c9
|
refs/heads/main
| 2023-07-01T15:17:50.623155
| 2021-08-05T22:45:12
| 2021-08-05T22:45:12
| 393,186,601
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,882
|
py
|
# -*- coding: utf-8 -*-
"""
plastiqpublicapi
This file was automatically generated by APIMATIC v3.0 (
https://www.apimatic.io ).
"""
from cachecontrol import CacheControl
from requests import session
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from plastiqpublicapi.http.http_client import HttpClient
from plastiqpublicapi.http.http_method_enum import HttpMethodEnum
from plastiqpublicapi.http.http_response import HttpResponse
class RequestsClient(HttpClient):
"""An implementation of HttpClient that uses Requests as its HTTP Client
Attributes:
timeout (int): The default timeout for all API requests.
"""
def __init__(self,
timeout=60,
cache=False,
max_retries=None,
backoff_factor=None,
retry_statuses=None,
retry_methods=None,
verify=True):
"""The constructor.
Args:
timeout (float): The default global timeout(seconds).
"""
self.timeout = timeout
self.session = session()
retries = Retry(total=max_retries, backoff_factor=backoff_factor,
status_forcelist=retry_statuses, allowed_methods=retry_methods)
self.session.mount('http://', HTTPAdapter(max_retries=retries))
self.session.mount('https://', HTTPAdapter(max_retries=retries))
if cache:
self.session = CacheControl(self.session)
self.session.verify = verify
def execute_as_string(self, request):
"""Execute a given HttpRequest to get a string response back
Args:
request (HttpRequest): The given HttpRequest to execute.
Returns:
HttpResponse: The response of the HttpRequest.
"""
response = self.session.request(
HttpMethodEnum.to_string(request.http_method),
request.query_url,
headers=request.headers,
params=request.query_parameters,
data=request.parameters,
files=request.files,
timeout=self.timeout
)
return self.convert_response(response, False, request)
def execute_as_binary(self, request):
"""Execute a given HttpRequest to get a binary response back
Args:
request (HttpRequest): The given HttpRequest to execute.
Returns:
HttpResponse: The response of the HttpRequest.
"""
response = self.session.request(
HttpMethodEnum.to_string(request.http_method),
request.query_url,
headers=request.headers,
params=request.query_parameters,
data=request.parameters,
files=request.files,
timeout=self.timeout
)
return self.convert_response(response, True, request)
def convert_response(self, response, binary, http_request):
"""Converts the Response object of the HttpClient into an
HttpResponse object.
Args:
response (dynamic): The original response object.
http_request (HttpRequest): The original HttpRequest object.
Returns:
HttpResponse: The converted HttpResponse object.
"""
if binary:
return HttpResponse(
response.status_code,
response.reason,
response.headers,
response.content,
http_request
)
else:
return HttpResponse(
response.status_code,
response.reason,
response.headers,
response.text,
http_request
)
|
[
"jeff.kynaston@plastiq.com"
] |
jeff.kynaston@plastiq.com
|
1f68d063652856af257f1df8c6243c1690e0fe35
|
04a7b23a14722f213309a0a8cb1c146d8da3f1f9
|
/test/test_nestedtensor.py
|
ba70fbf9c7c97dd2b2e892cb7a56a0f9141cb26b
|
[
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
mingfeima/pytorch
|
017e79723d3510d011ad46e8b63bc900f7468b4e
|
a2f44d82f837500c9921ecf86b90fab0dbc27084
|
refs/heads/master
| 2023-02-20T04:30:34.439394
| 2023-02-16T21:16:03
| 2023-02-17T00:25:05
| 138,236,721
| 1
| 0
|
NOASSERTION
| 2021-05-17T01:51:42
| 2018-06-22T00:49:24
|
C++
|
UTF-8
|
Python
| false
| false
| 109,751
|
py
|
# Owner(s): ["module: nestedtensor"]
import unittest
import numpy as np
import torch
import torch.nn
from torch.testing._internal.common_device_type import (
dtypes,
dtypesIfCUDA,
instantiate_device_type_tests,
onlyCPU,
onlyCUDA,
skipMeta,
)
from torch.testing._internal.common_dtype import floating_types_and_half
from torch.testing._internal.common_utils import (
freeze_rng_state,
gradcheck,
instantiate_parametrized_tests,
IS_FBCODE,
parametrize,
run_tests,
subtest,
TestCase,
)
# Tests are ported from pytorch/nestedtensor.
# This makes porting as_nested_tensor easier in the future.
def _iter_constructors():
# yield as_nested_tensor
yield torch.nested.nested_tensor
# Helper function to generate a pair of random nested tensors
# one is contiguous, the other is not, but they appear to have same entries
# an output nested tensor consists of
# * `len(ragged_sizes)` matrices
# * matrices[i].shape == (20, ragged_sizes[i])
def random_nt_noncontiguous_pair(ragged_sizes, device="cpu", dtype=torch.float16):
xs = []
for size in ragged_sizes:
xs.append(torch.randn((size, 20), device=device, dtype=dtype))
# contiguous nested tensor
ys = []
for x in xs:
ys.append(x.transpose(-1, -2))
nt_contiguous = torch.nested.nested_tensor(ys)
# noncontiguous nested tensor
n = len(ragged_sizes)
nt_noncontiguous = torch.nested.nested_tensor(xs).transpose(-1, -2)
return nt_contiguous, nt_noncontiguous
# Helper functions to pad a noncontiguous nested tensor
# can be replaced once to_padded_tensor supports noncontiguous memory
def noncontiguous_to_padded_tensor(input, shape=None):
tensors = input.unbind()
ntensors = len(tensors)
assert ntensors > 0
if shape is None:
shape = []
for size in tensors[0].shape:
shape.append(size)
for i in range(1, ntensors):
new_shape = tensors[i].shape
for j in range(len(shape)):
shape[j] = max(shape[j], new_shape[j])
shape = [ntensors] + shape
result = tensors[0].new_zeros(shape)
for itensor in range(ntensors):
tensor = tensors[itensor]
view = result[itensor]
for idim in range(tensor.dim()):
view = view.narrow(idim, 0, tensor.size(idim))
view.copy_(tensor)
return result
# Helper function to generate a random nested tensor
def random_nt(device, dtype, num_tensors, max_dims, min_dims=None):
if min_dims is None:
min_dims = tuple([0] * len(max_dims))
ts1 = []
for _ in range(num_tensors):
tensor_dims = tuple([torch.randint(low=min_dim, high=max_dim, size=(1,)).item()
for (min_dim, max_dim) in zip(min_dims, max_dims)])
t1 = torch.randn(tensor_dims, device=device, dtype=dtype)
ts1.append(t1)
return torch.nested.nested_tensor(ts1, device=device, dtype=dtype)
class TestNestedTensor(TestCase):
@parametrize("batch_size", [2, 4])
@parametrize("max_seq_len", [3, 5])
@parametrize("vocab_size", [10, 20])
def test_2d_nested_tensor(self, batch_size, max_seq_len, vocab_size):
data = []
nested_tensor_ref_list = []
for _ in range(batch_size):
if max_seq_len == 0:
length = 0
else:
length = np.random.randint(low=1, high=max_seq_len)
row = list(np.random.randint(low=0, high=vocab_size, size=(length,)))
data.append(row)
nested_tensor_ref_list.append(torch.tensor(row))
nested_tensor = torch.nested.nested_tensor(data, dtype=torch.int64)
nested_tensor_list = nested_tensor.unbind()
for id in range(batch_size):
self.assertEqual(
nested_tensor_list[id],
nested_tensor_ref_list[id].type(torch.int64)
)
@parametrize("batch_size", [2, 4])
@parametrize("max_seq_len", [3, 5])
@parametrize("vocab_size", [10, 20])
def test_3d_nested_tensor(self, batch_size, max_seq_len, vocab_size):
data = []
nested_tensor_ref_list = []
for _ in range(batch_size):
if max_seq_len == 0:
length = 0
else:
length = np.random.randint(low=1, high=max_seq_len)
row = list(np.random.randint(low=0, high=vocab_size, size=(length,)))
row = [list(item * np.arange(max_seq_len)) for item in row]
data.append(row)
nested_tensor_ref_list.append(torch.Tensor(row))
nested_tensor = torch.nested.nested_tensor(data, dtype=torch.int64)
nested_tensor_list = nested_tensor.unbind()
for id in range(batch_size):
self.assertEqual(
nested_tensor_list[id],
nested_tensor_ref_list[id].type(torch.int64)
)
@parametrize("batch_size", [2, 4])
@parametrize("max_seq_len", [3, 5])
@parametrize("vocab_size", [10, 20])
def test_3d_nested_tensor_float(self, batch_size, max_seq_len, vocab_size):
data = []
nested_tensor_ref_list = []
for _ in range(batch_size):
if max_seq_len == 0:
length = 0
else:
length = np.random.randint(low=1, high=max_seq_len)
row = list(
np.random.randint(low=0, high=vocab_size, size=(length,)).astype(float)
)
row = [list(item * np.arange(max_seq_len)) for item in row]
data.append(row)
nested_tensor_ref_list.append(torch.Tensor(row))
nested_tensor = torch.nested.nested_tensor(data, dtype=torch.float)
nested_tensor_list = nested_tensor.unbind()
for id in range(batch_size):
self.assertEqual(
nested_tensor_list[id],
nested_tensor_ref_list[id].type(torch.float)
)
@torch.inference_mode()
def _test_unbind_case(self, a, b):
nt = torch.nested.nested_tensor([a, b])
a1, b1 = nt.unbind()
self.assertTrue(a is not a1)
self.assertTrue(b is not b1)
nt = torch.nested.nested_tensor([a, b], dtype=a.dtype)
a1, b1 = nt.unbind(0)
self.assertEqual(a, a1)
self.assertEqual(b, b1)
a = torch.randn((2, 3)).add_(1)
nt = torch.nested.nested_tensor([a])
self.assertEqual(a, nt.unbind(0)[0])
@torch.inference_mode()
def test_unbind_0(self):
self._test_unbind_case(
torch.tensor([1, 2]), torch.tensor([7, 8]),
)
@torch.inference_mode()
def test_unbind_1(self):
self._test_unbind_case(
torch.tensor([1]), torch.tensor([7]),
)
@torch.inference_mode()
def test_unbind_3(self):
self._test_unbind_case(
torch.tensor([1.0]), torch.tensor([]),
)
@torch.inference_mode()
def test_unbind_4(self):
self._test_unbind_case(
torch.tensor([]), torch.tensor([]),
)
@torch.inference_mode()
def test_unbind_dim(self):
def _test_fn(unbind_fn):
a = torch.rand(3, 2)
b = torch.rand(2, 3)
nt = torch.nested.nested_tensor([a, b])
self.assertRaises(RuntimeError, lambda: unbind_fn(nt, 1))
# Both of these tests are necessary, because we're using
# torch_function.
_test_fn(lambda x, dim: x.unbind(dim))
# TODO: Re-enable this once using torch_dispatch
# _test_fn(lambda x, dim: torch.unbind(x, dim))
@torch.inference_mode()
def test_nested_tensor(self):
self.assertRaises(TypeError, lambda: torch.nested.nested_tensor(torch.tensor([3.0])))
self.assertRaises(TypeError, lambda: torch.nested.nested_tensor(4.0))
@torch.inference_mode()
def test_nested_tensor_matching_dim(self):
self.assertRaisesRegex(
RuntimeError,
"Found dimension 1 for Tensor at index 1 and dimension 0 for Tensor at index 0.",
lambda: torch.nested.nested_tensor([torch.tensor(1.0), torch.tensor([])]),
)
self.assertRaisesRegex(
RuntimeError,
"Found dimension 1 for Tensor at index 2 and dimension 0 for Tensor at index 1.",
lambda: torch.nested.nested_tensor(
[torch.tensor(1.0), torch.tensor(2.0), torch.tensor([])]
),
)
@torch.inference_mode()
def test_default_nested_tensor(self):
self.assertRaises(TypeError, lambda: torch.nested.nested_tensor())
default_nested_tensor = torch.nested.nested_tensor([])
default_tensor = torch.tensor([])
# self.assertEqual(default_nested_tensor.nested_dim(), 1)
# self.assertEqual(default_nested_tensor.nested_size(), ())
self.assertEqual(default_nested_tensor.dim(), default_tensor.dim())
self.assertEqual(default_nested_tensor.layout, default_tensor.layout)
self.assertEqual(default_nested_tensor.device, default_tensor.device)
self.assertEqual(default_nested_tensor.dtype, default_tensor.dtype)
self.assertEqual(
default_nested_tensor.requires_grad, default_tensor.requires_grad
)
self.assertIsNone(default_tensor.grad)
# TODO: Re-enable once we have a performance driven
# use case and implementation.
# self.assertEqual(default_nested_tensor.is_pinned(),
# default_tensor.is_pinned())
@torch.inference_mode()
def test_dim(self):
for constructor in _iter_constructors():
a1 = constructor([])
self.assertEqual(a1.dim(), 1)
a1 = constructor([torch.tensor(3.0)])
self.assertEqual(a1.dim(), 1)
a1 = constructor([torch.tensor([1, 2, 3, 4])])
self.assertEqual(a1.dim(), 2)
@unittest.skipIf(IS_FBCODE, "numel is not virtual in fbcode.")
@torch.inference_mode()
def test_numel(self):
for constructor in _iter_constructors():
a1 = constructor([])
self.assertEqual(a1.numel(), 0)
a1 = constructor([torch.tensor(3.0), torch.tensor(4.0)])
self.assertEqual(a1.numel(), 2)
a1 = constructor([torch.randn(2, 2, 2)])
self.assertEqual(a1.numel(), 8)
a1 = constructor([torch.randn([1, 2, 3]), torch.randn(3, 2, 1)])
self.assertEqual(a1.numel(), 12)
a1 = constructor([torch.randn([1, 1, 3]), torch.randn(3, 2, 4)])
self.assertEqual(a1.numel(), 27)
a1 = constructor([torch.randn([5, 5, 5]), torch.randn(6, 6, 6)])
self.assertEqual(a1.numel(), 341)
# Interesting edge case
a1 = constructor([torch.randn([1, 2, 3]), torch.randn(1, 2, 0)])
self.assertEqual(a1.numel(), 6)
@torch.inference_mode()
def test_size(self):
for constructor in _iter_constructors():
a1 = constructor([])
self.assertRaisesRegex(
RuntimeError,
"NestedTensorImpl doesn't support sizes",
lambda: a1.size(),
)
def test_size_dim(self):
a = torch.nested.nested_tensor([])
self.assertEqual(a.size(0), 0)
a = torch.nested.nested_tensor([torch.tensor(1)])
self.assertEqual(a.size(0), 1)
a = torch.nested.nested_tensor([torch.tensor(1), torch.tensor(2)])
self.assertEqual(a.size(0), 2)
a = torch.nested.nested_tensor([torch.rand(1, 2),
torch.rand(1, 8)])
self.assertEqual(a.size(0), 2)
self.assertEqual(a.size(1), 1)
self.assertRaisesRegex(
RuntimeError, "Given dimension 2 is irregular and does not have a size", lambda: a.size(2))
a = torch.nested.nested_tensor([torch.rand(3, 4),
torch.rand(5, 4)])
self.assertEqual(a.size(0), 2)
self.assertRaisesRegex(
RuntimeError, "Given dimension 1 is irregular and does not have a size", lambda: a.size(1))
self.assertEqual(a.size(2), 4)
@unittest.skipIf(IS_FBCODE, "stride is not virtual in fbcode.")
@torch.inference_mode()
def test_stride(self):
for constructor in _iter_constructors():
a1 = constructor([])
self.assertRaisesRegex(
RuntimeError,
"NestedTensorImpl doesn't support strides",
lambda: a1.stride(),
)
@unittest.skipIf(IS_FBCODE, "is_contiguous is not virtual in fbcode.")
@torch.inference_mode()
def test_is_contiguous(self):
# Test empty case
nt_empty = torch.nested.nested_tensor([])
assert nt_empty.is_contiguous()
self.assertEqual(nt_empty, nt_empty.contiguous())
nt_contiguous, nt_noncontiguous = random_nt_noncontiguous_pair((2, 3, 6, 7))
# Test contiguous case
assert nt_contiguous.is_contiguous()
self.assertEqual(nt_contiguous, nt_contiguous.contiguous())
# Test non_contiguous case
assert not nt_noncontiguous.is_contiguous()
self.assertEqual(nt_contiguous, nt_noncontiguous.contiguous())
@torch.inference_mode()
def test_repr_string(self):
a = torch.nested.nested_tensor([])
expected = "nested_tensor([" "\n\n])"
self.assertEqual(str(a), expected)
self.assertEqual(repr(a), expected)
a = torch.nested.nested_tensor([torch.tensor(1.0)])
expected = "nested_tensor([" "\n tensor(1.)" "\n])"
self.assertEqual(str(a), expected)
self.assertEqual(repr(a), expected)
a = torch.nested.nested_tensor([torch.tensor([[1, 2]]), torch.tensor([[4, 5]])])
expected = (
"nested_tensor([" "\n tensor([[1, 2]])" "," "\n tensor([[4, 5]])" "\n])"
)
self.assertEqual(str(a), expected)
self.assertEqual(repr(a), expected)
def test_to_padded_tensor_on_empty_tensor(self):
nt = torch.nested.nested_tensor([])
empty = torch.nested.to_padded_tensor(nt, 4)
self.assertEqual(empty, torch.tensor([]))
def test_nested_namespace(self):
nt = torch.nested.nested_tensor([torch.randn(2, 3), torch.randn(4, 5)])
result = nt.to_padded_tensor(4)
nested_namespace_result = torch.nested.to_padded_tensor(nt, 4)
self.assertEqual(result, nested_namespace_result)
def test_to(self):
ntensors = 4
nt = random_nt(torch.device('cpu'), torch.float32, ntensors, (4, 4))
def test_copy_behavior(t, non_blocking=False):
self.assertIs(t, t.to(t, non_blocking=non_blocking))
self.assertIs(t, t.to(t.dtype, non_blocking=non_blocking))
self.assertIs(t, t.to(torch.empty_like(t), non_blocking=non_blocking))
self.assertIsNot(t, t.to(t, non_blocking=non_blocking, copy=True))
self.assertIsNot(t, t.to(t.dtype, non_blocking=non_blocking, copy=True))
self.assertIsNot(t, t.to(torch.empty_like(t), non_blocking=non_blocking, copy=True))
devices = [t.device]
if t.device.type == 'cuda':
if t.device.index == -1:
devices.append('cuda:{}'.format(torch.cuda.current_device()))
elif t.device.index == torch.cuda.current_device():
devices.append('cuda')
for device in devices:
self.assertIs(t, t.to(device, non_blocking=non_blocking))
self.assertIs(t, t.to(device, t.dtype, non_blocking=non_blocking))
self.assertIsNot(t, t.to(device, non_blocking=non_blocking, copy=True))
self.assertIsNot(t, t.to(device, t.dtype, non_blocking=non_blocking, copy=True))
test_copy_behavior(nt)
self.assertEqual(nt.device, nt.to('cpu').device)
self.assertEqual(nt.device, nt.to('cpu', dtype=torch.float32).device)
self.assertIs(torch.float32, nt.to('cpu', dtype=torch.float32).dtype)
self.assertEqual(nt.device, nt.to(torch.float32).device)
self.assertIs(torch.float32, nt.to(dtype=torch.float32).dtype)
def test_data_ptr(getter):
self.assertEqual(getter(nt), getter(nt.to('cpu')))
self.assertEqual(getter(nt), getter(nt.to(dtype=nt.dtype, device=nt.device, copy=False)))
self.assertEqual(getter(nt), getter(nt.to('cpu', copy=False)))
self.assertNotEqual(getter(nt), getter(nt.to('cpu', copy=True)))
test_data_ptr(lambda nt: nt.data_ptr())
if torch.cuda.is_available():
for non_blocking in [True, False]:
for cuda in ['cuda', 'cuda:0' if torch.cuda.device_count() == 1 else 'cuda:1']:
nt2 = random_nt(cuda, torch.float32, ntensors, (4, 4))
test_copy_behavior(nt2, non_blocking)
self.assertEqual(nt2.device, nt2.to(cuda, non_blocking=non_blocking).device)
self.assertEqual(nt.device, nt2.to('cpu', non_blocking=non_blocking).device)
self.assertEqual(nt2.device, nt.to(cuda, non_blocking=non_blocking).device)
self.assertIs(torch.int32, nt2.to('cpu', dtype=torch.int32, non_blocking=non_blocking).dtype)
self.assertEqual(nt.device, nt2.to('cpu', dtype=torch.int32, non_blocking=non_blocking).device)
self.assertIs(torch.int32, nt2.to(dtype=torch.int32).dtype)
self.assertEqual(nt2.device, nt2.to(dtype=torch.int32).device)
def test_copy_(self):
ntensors = 4
nt = random_nt(torch.device('cpu'), torch.float32, ntensors, (4, 4))
nt_copy = torch.empty_like(nt)
nt_copy.copy_(nt)
for (nt_ub, nt_copy_ub) in zip(nt.unbind(), nt_copy):
self.assertEqual(nt_ub, nt_copy_ub)
nt_error = torch.nested.nested_tensor([torch.tensor([0, 0])])
self.assertRaisesRegex(
RuntimeError,
"copy_ only supports tensors that are the same size for Nested implementations",
lambda: nt_error.copy_(nt)
)
if torch.cuda.is_available():
nt = random_nt(torch.device('cuda'), torch.float32, ntensors, (4, 4))
nt_copy = torch.empty_like(nt, device=torch.device('cpu'))
nt_copy.copy_(nt, non_blocking=True)
torch.cuda.current_stream(torch.cuda.current_device()).synchronize()
for (nt_ub, nt_copy_ub) in zip(nt.unbind(), nt_copy):
self.assertEqual(nt_ub, nt_copy_ub)
nt_copy = torch.empty_like(nt, device=torch.device('cpu'))
nt_copy.copy_(nt, non_blocking=False)
for (nt_ub, nt_copy_ub) in zip(nt.unbind(), nt_copy):
self.assertEqual(nt_ub, nt_copy_ub)
def test_fill_(self):
ntensors = 4
nt = random_nt(torch.device('cpu'), torch.float32, ntensors, (4, 4))
nt.fill_(10.)
for nt_ub in nt.unbind():
t = torch.empty_like(nt_ub)
t.fill_(10.)
self.assertEqual(nt_ub, t)
fill_tensor = torch.tensor([11.])
self.assertRaisesRegex(
RuntimeError,
"fill_ only supports 0-dimension value tensor",
lambda: nt.fill_(fill_tensor)
)
nt.fill_(fill_tensor[0])
for nt_ub in nt.unbind():
t = torch.empty_like(nt_ub)
t.fill_(11.)
self.assertEqual(nt_ub, t)
def test_ones_like(self):
ntensors = 4
nt = random_nt(torch.device('cpu'), torch.float32, ntensors, (4, 4))
ones_nt = torch.ones_like(nt)
for nt_ub in ones_nt.unbind():
t = torch.ones_like(nt_ub)
self.assertEqual(nt_ub, t)
class TestNestedTensorDeviceType(TestCase):
# Helper function to generate a pair of random nested tensors
# the 2 nested tensors have same shapes
def random_nt_pair(self, device, dtype, num_tensors, max_dims):
ts1 = []
ts2 = []
for _ in range(num_tensors):
tensor_dims = tuple([torch.randint(low=0, high=max_dim, size=(1,)).item() for max_dim in max_dims])
t1 = torch.randn(tensor_dims, device=device, dtype=dtype)
t2 = torch.randn(tensor_dims, device=device, dtype=dtype)
ts1.append(t1)
ts2.append(t2)
return (torch.nested.nested_tensor(ts1, device=device, dtype=dtype),
torch.nested.nested_tensor(ts2, device=device, dtype=dtype))
@dtypes(*floating_types_and_half())
def test_detach(self, device, dtype):
a = torch.randn(2, 4, device=device, dtype=dtype, requires_grad=False)
b = torch.randn(5, 4, device=device, dtype=dtype, requires_grad=False)
x = torch.nested.nested_tensor([a, b], requires_grad=True)
x_detach = x.detach()
z = x_detach * 4
self.assertFalse(x_detach.requires_grad)
self.assertFalse(z.requires_grad)
a = torch.randn(2, 4, device=device, dtype=dtype, requires_grad=True)
b = torch.randn(5, 4, device=device, dtype=dtype, requires_grad=True)
x = torch.nested.as_nested_tensor([a, b])
y = x * 2
y = y.detach()
self.assertFalse(y.requires_grad)
self.assertIsNone(y.grad_fn)
z = x + y
torch.nested.to_padded_tensor(z, 0).sum().backward()
# This is an incorrect gradient, but we assume that's what the user
# wanted. detach() is an advanced option.
self.assertEqual(a.grad, torch.ones(2, 4, device=device, dtype=dtype))
self.assertEqual(b.grad, torch.ones(5, 4, device=device, dtype=dtype))
@dtypes(torch.float, torch.float16, torch.double)
def test_unbind_noncontiguous(self, device, dtype):
nt_contiguous, nt_noncontiguous = random_nt_noncontiguous_pair((2, 3, 6, 7), device, dtype)
ub_contiguous = nt_contiguous.unbind()
ub_noncontiguous = nt_noncontiguous.unbind()
self.assertEqual(len(ub_contiguous), len(ub_noncontiguous))
n = len(ub_contiguous)
for i in range(n):
self.assertEqual(ub_contiguous[i], ub_noncontiguous[i])
@dtypes(torch.float)
@skipMeta
def test_to_then_from_padded_tensor_no_transform0213(self, device, dtype):
t = torch.randn(4, 4, 4, device=device, dtype=dtype)
ts = list(torch.unbind(t))
ts[0] = ts[0][:-1]
nt = torch.nested.nested_tensor(ts, device=device, dtype=dtype)
padded = torch.nested.to_padded_tensor(nt, 0)
nt_to = torch._nested_from_padded_and_nested_example(padded, nt)
for (t1, t2) in zip(nt.unbind(), nt_to.unbind()):
self.assertEqual(t1, t2)
self.assertEqual(nt.device, nt_to.device)
@dtypes(torch.float)
@dtypesIfCUDA(torch.float, torch.half)
@skipMeta
@torch.inference_mode()
def test_layer_norm(self, device, dtype):
def _test(size):
# Simple shapes test
t0 = torch.randn(2, size, device=device, dtype=dtype, requires_grad=False)
t1 = torch.randn(2, size, device=device, dtype=dtype, requires_grad=False)
ts = [t0, t1, t0, t1]
nt = torch.nested.nested_tensor(ts, device=device, dtype=dtype)
layer_norm = torch.nn.LayerNorm(size, device=device, dtype=dtype)
nt_result = layer_norm(nt)
for (nt_subresult, t) in zip(nt_result.unbind(), ts):
t_result = layer_norm(t.reshape(1, -1, size).squeeze(0))
self.assertEqual(nt_subresult, t_result)
# More complex nt test with different lengths for each tensor
t0 = torch.randn(4, size, device=device, dtype=dtype, requires_grad=False)
t1 = torch.randn(10, size, device=device, dtype=dtype, requires_grad=False)
t2 = torch.randn(7, size, device=device, dtype=dtype, requires_grad=False)
ts = [t0, t1, t2, t0, t2]
nt = torch.nested.nested_tensor(ts, device=device, dtype=dtype)
layer_norm = torch.nn.LayerNorm(size, device=device, dtype=dtype)
nt_result = layer_norm(nt)
for (nt_subresult, t) in zip(nt_result.unbind(), ts):
t_result = layer_norm(t.reshape(1, -1, size).squeeze(0))
self.assertEqual(nt_subresult, t_result)
if size <= 128:
# Test with multidimensional tensors after irregular dim
# (run only with smaller dimensions to ensure fast execution)
t0 = torch.randn(4, size, size, 4, device=device, dtype=dtype, requires_grad=False)
t1 = torch.randn(10, size, size, 4, device=device, dtype=dtype, requires_grad=False)
t2 = torch.randn(7, size, size, 4, device=device, dtype=dtype, requires_grad=False)
ts = [t0, t1, t2, t0, t2]
nt = torch.nested.nested_tensor(ts, device=device, dtype=dtype)
layer_norm = torch.nn.LayerNorm((size, size, 4), device=device, dtype=dtype)
nt_result = layer_norm(nt)
for (nt_subresult, t) in zip(nt_result.unbind(), ts):
t_result = layer_norm(t.reshape(1, -1, size, size, 4).squeeze(0))
self.assertEqual(nt_subresult, t_result)
# Test where the normalizing dimensions are not all
layer_norm = torch.nn.LayerNorm((size, 4), device=device, dtype=dtype)
nt_result = layer_norm(nt)
for (nt_subresult, t) in zip(nt_result.unbind(), ts):
t_result = layer_norm(t.reshape(1, -1, size, size, 4).squeeze(0))
self.assertEqual(nt_subresult, t_result)
for size in (1024, 1023, 513, 512, 256, 128, 2, 4, 32):
_test(size)
@dtypes(torch.float)
@dtypesIfCUDA(torch.float, torch.half)
@skipMeta
@torch.inference_mode()
def test_layer_norm_breaking(self, device, dtype):
size = 128
t0 = torch.randn(4, size, size, 4, device=device, dtype=dtype, requires_grad=False)
t1 = torch.randn(10, size, size, 4, device=device, dtype=dtype, requires_grad=False)
t2 = torch.randn(7, size, size, 4, device=device, dtype=dtype, requires_grad=False)
ts = [t0, t1, t2, t0, t2]
nt = torch.nested.nested_tensor(ts, device=device, dtype=dtype)
layer_norm = torch.nn.LayerNorm((4, size, size, 4), device=device, dtype=dtype)
self.assertRaisesRegex(
RuntimeError,
"normalized_shape extends into irregular dimensions for the nested tensor",
lambda: layer_norm(nt),
)
layer_norm = torch.nn.LayerNorm((size + 1, size, 4), device=device, dtype=dtype)
self.assertRaisesRegex(
RuntimeError,
"The shape at dimension 0",
lambda: layer_norm(nt),
)
@skipMeta
@torch.inference_mode()
def test_embedding(self, device):
inputs = [
torch.randint(100, (L,), device=device, dtype=torch.int64)
for L in torch.randint(5, 50, (8,))
]
x = torch.nested.nested_tensor(inputs, device=device, dtype=torch.int64)
emb = torch.nn.Embedding(100, 8, device=device)
y = emb(x)
ys = y.unbind()
for i, inp in enumerate(inputs):
self.assertEqual(emb(inp), ys[i])
@dtypes(torch.float, torch.float16)
def test_to_padded_tensor_simple(self, device, dtype):
t = torch.randn(4, 4, 4, device=device, dtype=dtype)
ts = list(torch.unbind(t))
ts[0] = ts[0][:-1]
nt = torch.nested.nested_tensor(ts, device=device, dtype=dtype)
for padding_value in (0, 1):
padded = torch.nested.to_padded_tensor(nt, padding_value)
correct_output = t.clone()
if padding_value == 0:
correct_output[0][-1] = torch.zeros_like(correct_output[0][-1])
else:
correct_output[0][-1] = torch.ones_like(correct_output[0][-1])
self.assertEqual(padded, correct_output)
self.assertEqual(padded.device, torch.device(device))
self.assertEqual(padded.dtype, dtype)
@dtypes(torch.float, torch.float16)
def test_to_padded_tensor_output_size(self, device, dtype):
t = torch.randn(4, 4, 4, device=device, dtype=dtype)
output_size = (4, 6, 5)
ts = list(torch.unbind(t))
ts[0] = ts[0][:-1]
nt = torch.nested.nested_tensor(ts, device=device, dtype=dtype)
for padding_value in (0, 1):
padded = torch.nested.to_padded_tensor(nt, padding_value, output_size=output_size)
correct_output = torch.ones(output_size, device=device, dtype=dtype) * padding_value
correct_output[:4:, :4, :4] = t.clone()
if padding_value == 0:
correct_output[0][3] = torch.zeros_like(correct_output[0][3])
else:
correct_output[0][3] = torch.ones_like(correct_output[0][3])
self.assertEqual(padded, correct_output)
self.assertEqual(padded.device, torch.device(device))
self.assertEqual(padded.dtype, dtype)
@dtypes(torch.float, torch.float16, torch.double)
def test_to_padded_tensor_dim2(self, device, dtype):
ts = [
torch.randn(160, device=device, dtype=dtype),
torch.randn(1240, device=device, dtype=dtype),
torch.randn(2400, device=device, dtype=dtype),
]
nt = torch.nested.nested_tensor(ts, device=device, dtype=dtype)
pad = 42
correct_output = []
for t in ts:
next_output = torch.ones_like(ts[2]) * pad
correct_output.append(next_output)
next_output[:t.size(0)].copy_(t)
correct_output = torch.stack(correct_output)
padded = torch.nested.to_padded_tensor(nt, pad)
self.assertEqual(padded, correct_output)
@dtypes(torch.float, torch.float16, torch.double)
def test_to_padded_tensor_dim3(self, device, dtype):
ts = [
torch.randn(16, 21, device=device, dtype=dtype),
torch.randn(24, 32, device=device, dtype=dtype),
torch.randn(40, 53, device=device, dtype=dtype),
]
nt = torch.nested.nested_tensor(ts, device=device, dtype=dtype)
pad = 42
correct_output = []
for t in ts:
next_output = torch.ones_like(ts[2]) * pad
correct_output.append(next_output)
next_output[:t.size(0), :t.size(1)].copy_(t)
correct_output = torch.stack(correct_output)
padded = torch.nested.to_padded_tensor(nt, pad)
self.assertEqual(padded, correct_output)
@dtypes(torch.float, torch.float16, torch.double)
def test_to_padded_tensor_dim4(self, device, dtype):
ts = [
torch.randn(16, 21, 13, device=device, dtype=dtype),
torch.randn(24, 32, 14, device=device, dtype=dtype),
torch.randn(40, 53, 16, device=device, dtype=dtype),
]
nt = torch.nested.nested_tensor(ts, device=device, dtype=dtype)
pad = 42
correct_output = []
for t in ts:
next_output = torch.ones_like(ts[2]) * pad
correct_output.append(next_output)
next_output[:t.size(0), :t.size(1), :t.size(2)].copy_(t)
correct_output = torch.stack(correct_output)
padded = torch.nested.to_padded_tensor(nt, pad)
self.assertEqual(padded, correct_output)
# TODO: test noncontiguous to_padded_tensor
# For now this tests the functionality of noncontiguous_to_padded_tensor
# and the error message of to_padded_tensor
# since to_padded_tensor does not support noncontiguous buffer yet
@dtypes(torch.float, torch.float16, torch.double)
@torch.inference_mode()
def test_to_padded_tensor_noncontiguous(self, device, dtype):
nt_contiguous, nt_noncontiguous = random_nt_noncontiguous_pair((2, 3, 6, 7), device, dtype)
# test noncontiguous_to_padded_tensor functionality
self.assertEqual(
torch.nested.to_padded_tensor(nt_contiguous, 0.0),
noncontiguous_to_padded_tensor(nt_noncontiguous))
# test to_padded_tensor error message
self.assertRaisesRegex(
RuntimeError,
r"for now to_padded_tensor only supports contiguous nested tensor",
lambda: torch.nested.to_padded_tensor(nt_noncontiguous, 0.0)
)
@skipMeta
def test_device_checks(self, device):
nt = torch.nested.nested_tensor([], device=device)
is_cuda = 'cuda' in str(device)
self.assertEqual(nt.is_cuda, is_cuda)
@dtypes(torch.float, torch.float16, torch.double)
def test_nested_tensor_indexing(self, device, dtype):
# edge case: empty nested tensor
nt0 = torch.nested.nested_tensor([])
self.assertRaises(IndexError, lambda: nt0[0])
# normal case
x0 = torch.randn((2, 5), device=device, dtype=dtype)
x1 = torch.randn((3, 4), device=device, dtype=dtype)
nt = torch.nested.nested_tensor([x0, x1])
# single index: only support integer in the batch dimension
self.assertEqual(nt[0], x0)
self.assertEqual(nt[-1], x1)
self.assertRaises(IndexError, lambda: nt[2])
self.assertRaises(IndexError, lambda: nt[-3])
self.assertRaises(NotImplementedError, lambda: nt[:])
self.assertRaises(NotImplementedError, lambda: nt[...])
# tuple of indices: only support integer in the batch dimension
# + all possible indexing in the original tensor dimensions
self.assertEqual(nt[0, 0, 0], x0[0, 0])
self.assertEqual(nt[0, 1, :], x0[1, :])
self.assertEqual(nt[1, ...], x1)
self.assertRaises(IndexError, lambda: nt[1, 4, 2])
self.assertRaises(NotImplementedError, lambda: nt[:, 1, 1])
# test select on non-batch dimensions
self.assertEqual(nt.select(1, 0)[0], x0.select(0, 0))
self.assertEqual(nt.select(1, 0)[1], x1.select(0, 0))
self.assertRaises(IndexError, lambda: nt.select(1, 3))
self.assertEqual(nt.select(2, 0)[0], x0.select(1, 0))
self.assertEqual(nt.select(2, 0)[1], x1.select(1, 0))
self.assertRaises(IndexError, lambda: nt.select(2, 5))
# make sure indexing returns a view
nt[0].fill_(100.0)
answer = torch.tensor(100.0, device=device, dtype=dtype).expand((2, 5))
self.assertEqual(nt[0], answer)
nt[1, 1, :].fill_(200.0)
answer = torch.tensor(200.0, device=device, dtype=dtype).expand(4)
self.assertEqual(nt[1, 1, :], answer)
# Test that indexing works when requires_grad_(True)
# previously this was failing because the backward kernel for select.int uses .sizes()
nt = torch.nested.nested_tensor([x0, x1]).requires_grad_(True)
self.assertEqual(nt[0], x0)
self.assertEqual(nt[-1], x1)
grad_x0 = torch.randn((2, 5), device=device, dtype=dtype)
nt[0].backward(grad_x0)
expected_grad = torch.nested.nested_tensor([grad_x0, torch.zeros((3, 4), device=device, dtype=dtype)])
self.assertEqual(nt.grad, expected_grad)
@parametrize("func", [subtest(torch.nn.functional.relu, name='relu'),
subtest(torch.nn.functional.relu_, name='relu_'),
subtest(torch.nn.functional.gelu, name='gelu'),
subtest(torch._C._nn.gelu_, name='gelu_'),
subtest(torch.tanh, name='tanh'),
subtest(torch.tanh_, name='tanh_'),
subtest(torch.neg, name='neg')])
def test_activations(self, device, func):
nt, nt_noncontiguous = random_nt_noncontiguous_pair((2, 3, 6, 7), device=device, dtype=torch.float32)
nested_result = func(nt)
self.assertTrue(nested_result.is_nested)
for t, t_res in zip(nt.unbind(), nested_result.unbind()):
self.assertEqual(func(t), t_res)
self.assertRaisesRegex(
RuntimeError,
"NestedTensor must be contiguous to get buffer.",
lambda: func(nt_noncontiguous))
@dtypes(*floating_types_and_half())
def test_nested_tensor_chunk(self, device, dtype):
# Transformer use case
a = torch.randn(3, 3 * 4, device=device, dtype=dtype)
b = torch.randn(2, 3 * 4, device=device, dtype=dtype)
c = torch.randn(1, 3 * 4, device=device, dtype=dtype)
a_chunks = a.chunk(3, dim=-1)
b_chunks = b.chunk(3, dim=-1)
c_chunks = c.chunk(3, dim=-1)
a_nt = [a_chunks[0], b_chunks[0], c_chunks[0]]
b_nt = [a_chunks[1], b_chunks[1], c_chunks[1]]
c_nt = [a_chunks[2], b_chunks[2], c_chunks[2]]
nt = torch.nested.nested_tensor([a, b, c])
chunked = nt.chunk(3, dim=-1)
self.assertEqual(chunked[0], torch.nested.nested_tensor(a_nt))
self.assertEqual(chunked[1], torch.nested.nested_tensor(b_nt))
self.assertEqual(chunked[2], torch.nested.nested_tensor(c_nt))
for chunk in chunked:
self.assertFalse(chunk.is_contiguous())
# Failure chunking on ragged dimensions
self.assertRaisesRegex(
RuntimeError, "Chunk for nested tensors is currently only supported for the last dimension.",
lambda: torch.chunk(nt, 5, dim=1))
self.assertRaisesRegex(
RuntimeError, "Chunk for nested tensors is currently only supported for the last dimension.",
lambda: torch.chunk(nt, 5, dim=0))
# Failure on non-contiguous nt
_, nt_noncontiguous = random_nt_noncontiguous_pair((2, 3), device, dtype)
self.assertRaisesRegex(
RuntimeError, "chunk expects `self` to be contiguous.", lambda: torch.chunk(nt_noncontiguous, 5, dim=-1))
# Failure when calling non divisible n_chunks
self.assertRaisesRegex(
RuntimeError, "Chunk for nested tensors is only supported for "
"nested tensors with trailing dimension divisible by chunks.",
lambda: torch.chunk(nt, 5, dim=-1))
# Failure when calling backward on a chunk
a = torch.randn(3, 3 * 4, device=device, dtype=dtype, requires_grad=True)
b = torch.randn(2, 3 * 4, device=device, dtype=dtype, requires_grad=True)
nt_grad = torch.nested.as_nested_tensor([a, b])
chunked = torch.chunk(nt_grad, 2, dim=-1)
self.assertRaisesRegex(RuntimeError, "derivative for aten::chunk is not implemented",
lambda: chunked[0].backward(chunked[0].clone()))
@dtypes(torch.float, torch.float16, torch.double)
@torch.inference_mode()
def test_nested_tensor_indexing_noncontiguous(self, device, dtype):
nt_contiguous, nt_noncontiguous = random_nt_noncontiguous_pair((2, 3, 6, 7), device, dtype)
self.assertEqual(nt_contiguous.size(0), nt_noncontiguous.size(0))
n = nt_contiguous.size(0)
for i in range(n):
self.assertEqual(nt_contiguous[i], nt_noncontiguous[i])
@dtypes(torch.float, torch.float16)
@skipMeta
@torch.inference_mode()
def test_nested_tensor_add(self, device, dtype):
(nt1, nt2) = self.random_nt_pair(device, dtype, 4, (4, 4))
ref = torch.nested.nested_tensor([t1 + t2 for (t1, t2) in zip(nt1.unbind(), nt2.unbind())])
out = nt1 + nt2
self.assertEqual(ref, out)
@onlyCUDA
@dtypes(torch.float, torch.float16)
@torch.inference_mode()
@parametrize("embedding_dim", [8, 128, 256, 384])
def test_nested_tensor_dense_elementwise(self, device, dtype, embedding_dim):
batch_size = 32
seq_lens = torch.randint(low=0, high=10, size=(batch_size,))
ts = [torch.randn((seq_len, embedding_dim)) for seq_len in seq_lens]
nt = torch.nested.nested_tensor(ts, device=device, dtype=dtype)
t = torch.randn((batch_size, 1, embedding_dim), device=device, dtype=dtype)
ref_add = torch.nested.nested_tensor([t1 + t2 for (t1, t2) in zip(nt.unbind(), t.unbind())])
ref_mul = torch.nested.nested_tensor([t1 * t2 for (t1, t2) in zip(nt.unbind(), t.unbind())])
self.assertEqual(nt.add(t), ref_add)
self.assertEqual(nt.mul(t), ref_mul)
@dtypes(torch.float, torch.float16)
@skipMeta
@torch.inference_mode()
def test_nested_tensor_mul(self, device, dtype):
# nested tensor * nested tensor
(nt1, nt2) = self.random_nt_pair(device, dtype, 4, (4, 4))
ref = torch.nested.nested_tensor([t1 * t2 for (t1, t2) in zip(nt1.unbind(), nt2.unbind())])
out = nt1 * nt2
self.assertEqual(ref, out)
# nested tensor * scalar
number = 10.0
scalar = torch.tensor(number).to(dtype).to(device)
ref = torch.nested.nested_tensor([t * number for t in nt1.unbind()])
out_number0 = nt1 * number
out_number1 = number * nt1
out_scalar0 = nt1 * scalar
out_scalar1 = scalar * nt1
self.assertEqual(out_number0, ref)
self.assertEqual(out_number1, ref)
self.assertEqual(out_scalar0, ref)
self.assertEqual(out_scalar1, ref)
# error case: numel == 1 but dim > 0
vector = torch.tensor([number]).to(dtype).to(device)
self.assertRaisesRegex(
RuntimeError,
"Expected both self and other to be nested, but got a nested self and non-nested other",
lambda: nt1.mul(vector)
)
self.assertRaisesRegex(
RuntimeError,
"Expected both self and other to be nested, but got a non-nested self and nested other",
lambda: vector.mul(nt1)
)
@dtypes(torch.float, torch.float16)
@skipMeta
@torch.inference_mode()
def test_nested_tensor_div(self, device, dtype):
nt, nt2 = self.random_nt_pair(device, dtype, 4, (4, 4))
scale = 4.0
ref = torch.nested.nested_tensor([t / scale for t in nt.unbind()])
out = nt / 4.0
self.assertEqual(ref, out)
ref_transposed = ref.transpose(1, 2)
out = nt.transpose(1, 2) / 4.0
self.assertEqual(ref_transposed, out)
ref = torch.nested.nested_tensor([t / t2 for (t, t2) in zip(nt.unbind(), nt2.unbind())])
out = nt / nt2
self.assertEqual(ref, out)
out = nt.transpose(1, 2) / nt2.transpose(1, 2)
self.assertEqual(ref.transpose(1, 2), out)
nt_transpose_copy = torch.nested.nested_tensor([t.transpose(0, 1) for t in nt.unbind()])
self.assertRaisesRegex(
RuntimeError, "div requires strides to match when given NestedTensors",
lambda: nt_transpose_copy.transpose(1, 2) / nt2)
nt = torch.nested.nested_tensor([torch.randn(i, 4) for i in [3, 4, 5]], device=device, dtype=dtype)
nt_chunks = nt.chunk(2, -1)
self.assertRaisesRegex(
RuntimeError, "div requires offsets to match when given NestedTensors",
lambda: nt_chunks[0] / nt_chunks[1])
@dtypes(torch.float, torch.float16)
@skipMeta
@torch.inference_mode()
def test_nested_tensor_add_in_place(self, device, dtype):
(nt1, nt2) = self.random_nt_pair(device, dtype, 4, (4, 4))
ref = torch.nested.nested_tensor([t1 + t2 for (t1, t2) in zip(nt1.unbind(), nt2.unbind())])
nt1 += nt2
self.assertEqual(ref, nt1)
@dtypes(torch.float, torch.float16)
@skipMeta
@torch.inference_mode()
def test_nested_tensor_mul_in_place(self, device, dtype):
# nested tensor * nested tensor
(nt1, nt2) = self.random_nt_pair(device, dtype, 4, (4, 4))
ref = torch.nested.nested_tensor([t1 * t2 for (t1, t2) in zip(nt1.unbind(), nt2.unbind())])
nt1 *= nt2
self.assertEqual(ref, nt1)
# nested tensor * scalar
number = 10.0
scalar = torch.tensor(number).to(dtype).to(device)
ref = torch.nested.nested_tensor([t * number for t in nt1.unbind()])
out_number = nt1.clone()
out_number *= number
out_scalar = nt1.clone()
out_scalar *= scalar
self.assertEqual(out_number, ref)
self.assertEqual(out_scalar, ref)
self.assertRaisesRegex(
RuntimeError,
r"output with shape \[.*\] doesn't match the broadcast shape \[.*\]",
lambda: scalar.mul_(nt1)
)
# error case: numel == 1 but dim > 0
vector = torch.tensor([number]).to(dtype).to(device)
self.assertRaisesRegex(
RuntimeError,
"Expected both self and other to be nested, but got a nested self and non-nested other",
lambda: nt1.mul_(vector)
)
self.assertRaisesRegex(
RuntimeError,
"Expected both self and other to be nested, but got a non-nested self and nested other",
lambda: vector.mul_(nt1)
)
@onlyCPU
@skipMeta
@dtypes(torch.float)
def test_nested_tensor_sum_dim(self, device, dtype):
params = ((2, (1, 1)), ((4), (4, 4)), (10, (3, 5, 7)))
def test_sum(device, dtype, ntensors, max_sizes, dim, keepdim=True):
nt = random_nt(device, dtype, ntensors, max_sizes)
nt2 = nt.clone()
ub2 = nt2.unbind()
nt.requires_grad_(True)
[t.requires_grad_(True) for t in ub2]
nt_sum = nt.sum(dim=dim, keepdim=keepdim)
ub2_sum = [t.sum(-1, keepdim=keepdim) for t in ub2]
self.assertEqual(nt_sum, torch.nested.nested_tensor(ub2_sum))
# test backward
# generate gradient tensor that has the same size as the output
size = nt_sum._nested_tensor_size()
gt2 = []
for i in range(ntensors):
gt2.append(torch.randn(size[i].tolist(), device=device, dtype=dtype))
gt = torch.nested.nested_tensor(gt2).clone()
nt_sum.backward(gt)
for t2, g2 in zip(ub2_sum, gt2):
t2.backward(g2)
self.assertEqual(nt.grad, torch.nested.nested_tensor([t.grad for t in ub2]))
return
for ntensors, max_sizes in params:
test_sum(device, dtype, ntensors, max_sizes, len(max_sizes))
# Test error inputs
with self.assertRaisesRegex(RuntimeError, "NestedTensor can only be reduced across the last"):
torch.nested.nested_tensor([torch.tensor([3, 4, 5]), torch.tensor([1, 2])]).sum(0, keepdim=True)
with self.assertRaisesRegex(RuntimeError, "NestedTensor only allows reduction of a single"):
torch.nested.nested_tensor([torch.tensor([[3, 4, 5]]), torch.tensor([[1, 2]])]).sum([0, 1], keepdim=True)
with self.assertRaisesRegex(RuntimeError, "NestedTensor always requires keepdim=True for now."):
torch.nested.nested_tensor([torch.tensor([3, 4, 5]), torch.tensor([1, 2])]).sum(-1)
@dtypes(torch.float, torch.float16)
def test_contiguous(self, device, dtype):
# Since we don't have access to the buffer in python this is harder to show what
# we are testing for. When we call chunk on a consistent dim of a NT
# for chunk_size > 1 the resulting tensors are views of the original NT
# whose numels is now less than the size of the buffer. Clone was
# previously creating a new NT with a buffer that was the same size as the
# original.
nt_contiguous = torch.nested.nested_tensor([torch.randn(2, 20, device=device, dtype=dtype),
torch.randn(4, 20, device=device, dtype=dtype)])
# Split up the last dimension which has a consistent size of 20 into 5 chunks
chunks = nt_contiguous.chunk(5, dim=-1)
# # Check chunks are contiguous after calling contiguous
for chunk in chunks:
self.assertFalse(chunk.is_contiguous())
self.assertTrue(chunk.contiguous().is_contiguous())
@dtypes(torch.float, torch.float16)
@skipMeta
def test_clone(self, device, dtype):
nt1 = random_nt(device, dtype, 4, (4, 4), (1, 1))
nt2 = nt1.clone()
# Verify the values match
self.assertEqual(nt1, nt2)
# Verify modifying nt2 doesn't affect nt1
nt2.mul_(nt1)
ub1 = nt1.unbind()
ub2 = nt2.unbind()
for i in range(len(ub1)):
self.assertNotEqual(ub1[i], ub2[i])
nt1.clone(memory_format=torch.preserve_format)
msg = "Nested tensor clone supports Preserve and Contiguous memory formats, called clone with memory format: ChannelsLast"
with self.assertRaisesRegex(RuntimeError, msg):
nt1.clone(memory_format=torch.channels_last)
# cannot test torch.float16 because: RuntimeError: "bernoulli_scalar_cpu_" not implemented for 'Half'
@dtypes(torch.float, torch.double)
def test_dropout(self, device, dtype):
# edge case: empty nested tensor
nt0 = torch.nested.nested_tensor([])
y = torch.nn.functional.dropout(nt0, 0.5)
self.assertEqual(nt0, y)
# normal nested tensor
ntensors = 4
nt = random_nt(device, dtype, ntensors, (4, 4))
# edge case: invalid dropout
self.assertRaises(ValueError, lambda: torch.nn.Dropout(-0.1))
self.assertRaises(ValueError, lambda: torch.nn.Dropout(1.1))
self.assertRaises(ValueError, lambda: torch.nn.functional.dropout(nt, -0.1))
self.assertRaises(ValueError, lambda: torch.nn.functional.dropout(nt, 1.1))
# edge case: no dropout
dropouter = torch.nn.Dropout(0.0)
y0 = dropouter(nt)
y1 = torch.nn.functional.dropout(nt, 0.0)
self.assertEqual(nt, y0)
self.assertEqual(nt, y1)
# edge case: all dropout
dropouter = torch.nn.Dropout(1.0)
y0 = dropouter(nt)
y1 = torch.nn.functional.dropout(nt, 1.0)
nt0 = nt.clone()
for i in range(ntensors):
nt0[i].fill_(0.0)
self.assertEqual(nt0, y0)
self.assertEqual(nt0, y1)
# normal case: normal dropout
p = 0.2
y = torch.nn.functional.dropout(nt, p)
expect = nt.clone()
for i in range(ntensors):
actual_tensor = y[i].view(-1)
expect_tensor = expect[i].view(-1)
for j in range(actual_tensor.shape[0]):
if actual_tensor[j].item() == 0.0:
expect_tensor[j] = 0.0
else:
expect_tensor[j] /= 1.0 - p
self.assertEqual(y, expect)
with freeze_rng_state():
dropouter = torch.nn.Dropout(p)
y0 = dropouter(nt)
with freeze_rng_state():
y1 = torch.nn.functional.dropout(nt, p)
self.assertEqual(y0, y1)
@dtypes(torch.float, torch.double)
def test_dropout_noncontiguous(self, device, dtype):
ntensors = 4
nt0 = random_nt(device, dtype, ntensors, (4, 4))
nt1 = nt0.transpose(-1, -2)
p = 0.3
with freeze_rng_state():
dropouter = torch.nn.Dropout(p)
y0 = dropouter(nt0)
with freeze_rng_state():
y1 = torch.nn.functional.dropout(nt1, p).transpose(-1, -2)
self.assertEqual(y0, y1)
# cannot test torch.float16 because: RuntimeError: "softmax_kernel_impl" not implemented for 'Half'
@dtypes(torch.float, torch.double)
def test_softmax(self, device, dtype):
# normal nested tensor
ntensors = 4
nt = random_nt(device, dtype, ntensors, (4, 4))
# error case: softmax across nested dimension
self.assertRaisesRegex(
RuntimeError,
"Cannot apply softmax across nested dimension 0",
lambda: torch.nn.functional.softmax(nt, 0)
)
self.assertRaisesRegex(
RuntimeError,
"Cannot apply softmax across nested dimension 0",
lambda: torch.nn.functional.softmax(nt, -3)
)
# error case: dimension out of range
self.assertRaises(IndexError, lambda: torch.nn.functional.softmax(nt, 3))
self.assertRaises(IndexError, lambda: torch.nn.functional.softmax(nt, -4))
# normal case: should equal to padding -inf
softmaxer = torch.nn.Softmax(1)
y0 = softmaxer(nt)
y1 = torch.nn.functional.softmax(nt, 1)
self.assertEqual(y0, y1)
pt = torch.nested.to_padded_tensor(nt, float("-inf"))
# if an entire slice is padded, then softmax will return 0.0 / 0.0 = nan
# however, physically speaking that should be 0.0
expect = torch.nn.functional.softmax(pt, 1).nan_to_num_(0.0)
self.assertEqual(torch.nested.to_padded_tensor(y0, 0.0), expect)
# edge case: empty nested tensor
nt0 = torch.nested.nested_tensor([])
y = torch.nn.functional.softmax(nt0, 1)
self.assertEqual(nt0, y)
# edge case: nesting scalars
nt1 = torch.nested.nested_tensor([torch.tensor(0.0), torch.tensor(1.0)])
self.assertRaises(RuntimeError, lambda: torch.nn.functional.softmax(nt1, 0))
self.assertRaises(IndexError, lambda: torch.nn.functional.softmax(nt1, 1))
@dtypes(torch.float, torch.double)
@torch.inference_mode()
def test_softmax_noncontiguous(self, device, dtype):
nt_contiguous, nt_noncontiguous = random_nt_noncontiguous_pair((2, 3, 6, 7), device, dtype)
self.assertEqual(
torch.nn.functional.softmax(nt_contiguous, -1),
torch.nn.functional.softmax(nt_noncontiguous, -1))
def _test_bmm(self, device, dtype):
# error case: one is nested but the other is not
nt = torch.nested.nested_tensor([torch.randn(2), torch.randn(3)], device=device, dtype=dtype)
t = torch.randn(4, device=device, dtype=dtype)
self.assertRaisesRegex(
RuntimeError,
"Expected both to be nested, but got a nested self and non-nested other",
lambda: nt.bmm(t)
)
self.assertRaisesRegex(
RuntimeError,
"Expected both to be nested, but got a non-nested self and nested other",
lambda: t.bmm(nt)
)
# error case: not 3D tensors
nt0 = torch.nested.nested_tensor([], device=device, dtype=dtype)
nt1 = torch.nested.nested_tensor([torch.randn(2), torch.randn(3)], device=device, dtype=dtype)
nt2 = torch.nested.nested_tensor([torch.randn((2, 4)), torch.randn((3, 4))], device=device, dtype=dtype)
self.assertRaisesRegex(
RuntimeError,
"batch1 must be a 3D tensor",
lambda: nt0.bmm(nt0)
)
self.assertRaisesRegex(
RuntimeError,
"batch1 must be a 3D tensor",
lambda: nt0.bmm(nt1)
)
self.assertRaisesRegex(
RuntimeError,
"batch1 must be a 3D tensor",
lambda: nt0.bmm(nt2)
)
self.assertRaisesRegex(
RuntimeError,
"batch1 must be a 3D tensor",
lambda: nt1.bmm(nt0)
)
self.assertRaisesRegex(
RuntimeError,
"batch1 must be a 3D tensor",
lambda: nt1.bmm(nt1)
)
self.assertRaisesRegex(
RuntimeError,
"batch1 must be a 3D tensor",
lambda: nt1.bmm(nt2)
)
self.assertRaisesRegex(
RuntimeError,
"batch2 must be a 3D tensor",
lambda: nt2.bmm(nt0)
)
self.assertRaisesRegex(
RuntimeError,
"batch2 must be a 3D tensor",
lambda: nt2.bmm(nt1)
)
# error case: incompatible batch size
nt0 = torch.nested.nested_tensor([torch.randn((2, 4)), torch.randn((3, 4))], device=device, dtype=dtype)
nt1 = torch.nested.nested_tensor([torch.randn((4, 6)),
torch.randn((4, 5)),
torch.randn((4, 7))],
device=device, dtype=dtype)
self.assertRaisesRegex(
RuntimeError,
"Expected size for the 1st dimension of batch2 tensor to be: 2 but got: 3.",
lambda: nt0.bmm(nt1)
)
self.assertRaisesRegex(
RuntimeError,
"Expected size for the 1st dimension of batch2 tensor to be: 3 but got: 2.",
lambda: nt1.bmm(nt0)
)
# error case: underlying matrices cannot be multiplied
nt0 = torch.nested.nested_tensor([torch.randn((2, 4)), torch.randn((3, 4))], device=device, dtype=dtype)
self.assertRaisesRegex(
RuntimeError,
r"0-th nested matrices in batch cannot be multiplied \(2x4 and 2x4\)",
lambda: nt0.bmm(nt0)
)
# normal nested tensor
nt0 = torch.nested.nested_tensor([torch.randn((2, 4)), torch.randn((3, 7))], device=device, dtype=dtype)
nt1 = torch.nested.nested_tensor([torch.randn((4, 6)), torch.randn((7, 5))], device=device, dtype=dtype)
actual = torch.nested.to_padded_tensor(nt0.bmm(nt1), 0.0)
expect = torch.nested.to_padded_tensor(nt0, 0.0).bmm(torch.nested.to_padded_tensor(nt1, 0.0))
if dtype == torch.float16:
self.assertEqual(actual, expect, rtol=1e-3, atol=1e-3)
else:
self.assertEqual(actual, expect)
# test tensorcore path
nt0 = torch.nested.nested_tensor([torch.randn((2, 8)), torch.randn((3, 16))], device=device, dtype=dtype)
nt1 = torch.nested.nested_tensor([torch.randn((8, 8)), torch.randn((16, 8))], device=device, dtype=dtype)
actual = torch.nested.to_padded_tensor(nt0.bmm(nt1), 0.0)
expect = torch.nested.to_padded_tensor(nt0, 0.0).bmm(torch.nested.to_padded_tensor(nt1, 0.0))
if dtype == torch.float16:
self.assertEqual(actual, expect, rtol=1e-3, atol=1e-3)
else:
self.assertEqual(actual, expect)
@onlyCUDA
@dtypes(torch.float, torch.double, torch.float16)
def test_bmm_cuda(self, device, dtype):
self._test_bmm(device, dtype)
@onlyCPU
# cannot test torch.float16 because: RuntimeError: "addmm_impl_cpu_" not implemented for 'Half'
@dtypes(torch.float, torch.double)
def test_bmm_cpu(self, device, dtype):
self._test_bmm(device, dtype)
# cannot test torch.float16 because: RuntimeError: "addmm_impl_cpu_" not implemented for 'Half'
@dtypes(torch.float, torch.double)
def test_bmm_noncontiguous(self, device, dtype):
nt0_contiguous, nt0_noncontiguous = random_nt_noncontiguous_pair((2, 3), device, dtype)
nt1_contiguous, nt1_noncontiguous = random_nt_noncontiguous_pair((6, 7), device, dtype)
self.assertEqual(
nt0_contiguous.transpose(-1, -2).bmm(nt1_contiguous),
nt0_noncontiguous.transpose(-1, -2).bmm(nt1_noncontiguous))
@dtypes(torch.float, torch.double)
def test_matmul_with_bmm_path(self, device, dtype):
def unbind_rebind_matmul(nt1, nt2):
t1s = nt1.unbind()
t2s = nt2.unbind()
out_ts = [t1.matmul(t2) for t1, t2 in zip(t1s, t2s)]
return torch.nested.nested_tensor(out_ts)
# [N, n_head, *, head_dim], [N, n_head, head_dim, *]
N = np.random.randint(2, 5)
n_heads = np.random.randint(2, 5)
head_dim = 3
t1s = []
t2s = []
for _ in range(N):
seq_len1 = np.random.randint(2, 5)
seq_len2 = np.random.randint(2, 5)
t1s.append(torch.randn(n_heads, seq_len1, head_dim))
t2s.append(torch.randn(n_heads, head_dim, seq_len2))
nt1 = torch.nested.nested_tensor(t1s, device=device, dtype=dtype)
nt2 = torch.nested.nested_tensor(t2s, device=device, dtype=dtype)
self.assertEqual(torch.matmul(nt1, nt2), unbind_rebind_matmul(nt1, nt2))
# test with noncontiguous
t3s = []
t4s = []
for _ in range(N):
seq_len = np.random.randint(2, 5)
t3s.append(torch.randn(seq_len, n_heads, head_dim))
t4s.append(torch.randn(seq_len, n_heads, head_dim))
nt3 = torch.nested.nested_tensor(t3s, device=device, dtype=dtype).transpose(1, 2)
nt4 = torch.nested.nested_tensor(t4s, device=device, dtype=dtype).transpose(1, 2).transpose(2, 3)
self.assertEqual(torch.matmul(nt3, nt4), unbind_rebind_matmul(nt3, nt4))
# cannot test torch.float16 because: RuntimeError: "bmm" not implemented for 'Half'
@dtypes(torch.float, torch.double)
def test_matmul(self, device, dtype):
# error case: one is nested but the other is not
nt = torch.nested.nested_tensor([torch.randn(2), torch.randn(3)], device=device, dtype=dtype)
t = torch.randn(4, device=device, dtype=dtype)
self.assertRaisesRegex(
RuntimeError,
"Expected both to be nested, but got a nested self and non-nested other",
lambda: torch.matmul(nt, t)
)
self.assertRaisesRegex(
RuntimeError,
"Expected both to be nested, but got a non-nested self and nested other",
lambda: torch.matmul(t, nt)
)
# error case: not 3+D tensors
nt0 = torch.nested.nested_tensor([], device=device, dtype=dtype)
nt1 = torch.nested.nested_tensor([torch.randn(2), torch.randn(3)], device=device, dtype=dtype)
nt2 = torch.nested.nested_tensor([torch.randn((2, 4)), torch.randn((3, 4))], device=device, dtype=dtype)
self.assertRaisesRegex(
RuntimeError,
r"matmul: For nested tensors, only inputs with >= 3 dims are currently supported. 1st input has rank: [0-9]+",
lambda: torch.matmul(nt0, nt0)
)
self.assertRaisesRegex(
RuntimeError,
r"matmul: For nested tensors, only inputs with >= 3 dims are currently supported. 1st input has rank: [0-9]+",
lambda: torch.matmul(nt0, nt1)
)
self.assertRaisesRegex(
RuntimeError,
r"matmul: For nested tensors, only inputs with >= 3 dims are currently supported. 1st input has rank: [0-9]+",
lambda: torch.matmul(nt0, nt2)
)
self.assertRaisesRegex(
RuntimeError,
r"matmul: For nested tensors, only inputs with >= 3 dims are currently supported. 1st input has rank: [0-9]+",
lambda: torch.matmul(nt1, nt0)
)
self.assertRaisesRegex(
RuntimeError,
r"matmul: For nested tensors, only inputs with >= 3 dims are currently supported. 1st input has rank: [0-9]+",
lambda: torch.matmul(nt1, nt1)
)
self.assertRaisesRegex(
RuntimeError,
r"matmul: For nested tensors, only inputs with >= 3 dims are currently supported. 1st input has rank: [0-9]+",
lambda: torch.matmul(nt1, nt2)
)
self.assertRaisesRegex(
RuntimeError,
r"matmul: For nested tensors, only inputs with >= 3 dims are currently supported. 2nd input has rank: [0-9]+",
lambda: torch.matmul(nt2, nt0)
)
self.assertRaisesRegex(
RuntimeError,
r"matmul: For nested tensors, only inputs with >= 3 dims are currently supported. 2nd input has rank: [0-9]+",
lambda: torch.matmul(nt2, nt1)
)
# error case: incompatible batch size
nt0 = torch.nested.nested_tensor([torch.randn((2, 4)), torch.randn((3, 4))], device=device, dtype=dtype)
nt1 = torch.nested.nested_tensor([torch.randn((4, 6)),
torch.randn((4, 5)),
torch.randn((4, 7))],
device=device, dtype=dtype)
self.assertRaisesRegex(
RuntimeError,
r"matmul: Expected size for the 1st dimension of 2nd input tensor to be: [0-9]+ but got: [0-9]+.",
lambda: torch.matmul(nt0, nt1)
)
self.assertRaisesRegex(
RuntimeError,
r"matmul: Expected size for the 1st dimension of 2nd input tensor to be: [0-9]+ but got: [0-9]+.",
lambda: torch.matmul(nt1, nt0)
)
# error case: incompatible (wrong) batch sizes that shouldn't even broadcast?
nt0 = torch.nested.nested_tensor([torch.randn((2, 2, 4)),
torch.randn((2, 3, 4))],
device=device, dtype=dtype)
nt1 = torch.nested.nested_tensor([torch.randn((3, 4, 6)),
torch.randn((3, 4, 5))],
device=device, dtype=dtype)
self.assertRaisesRegex(
RuntimeError,
"matmul(): For nested tensors, batch dimensions must have the same sizes,",
lambda: torch.matmul(nt0, nt1)
)
# error case: incompatible batch sizes that should technically broadcast
nt0 = torch.nested.nested_tensor([torch.randn((2, 2, 4)),
torch.randn((1, 3, 4))],
device=device, dtype=dtype)
nt1 = torch.nested.nested_tensor([torch.randn((1, 4, 6)),
torch.randn((3, 4, 5))],
device=device, dtype=dtype)
self.assertRaisesRegex(
RuntimeError,
"matmul(): For nested tensors, batch dimensions must have the same sizes,",
lambda: torch.matmul(nt0, nt1)
)
# error case: underlying matrices cannot be multiplied
nt0 = torch.nested.nested_tensor([torch.randn((2, 4)), torch.randn((3, 4))], device=device, dtype=dtype)
self.assertRaisesRegex(
RuntimeError,
"matmul(): Nested tensors cannot be matrix multiplied",
lambda: torch.matmul(nt0, nt0)
)
# normal nested tensor: 3D
nt0 = torch.nested.nested_tensor([torch.randn((2, 4)), torch.randn((3, 7))], device=device, dtype=dtype)
nt1 = torch.nested.nested_tensor([torch.randn((4, 6)), torch.randn((7, 5))], device=device, dtype=dtype)
actual = torch.nested.to_padded_tensor(torch.matmul(nt0, nt1), 0.0)
expect = torch.matmul(torch.nested.to_padded_tensor(nt0, 0.0), torch.nested.to_padded_tensor(nt1, 0.0))
self.assertEqual(actual, expect)
# normal nested tensor: 4D (with testing for batch_size=1)
nt0 = torch.nested.nested_tensor([torch.randn((1, 2, 4)),
torch.randn((8, 3, 7))],
device=device, dtype=dtype)
nt1 = torch.nested.nested_tensor([torch.randn((1, 4, 6)),
torch.randn((8, 7, 5))],
device=device, dtype=dtype)
actual = torch.nested.to_padded_tensor(torch.matmul(nt0, nt1), 0.0)
expect = torch.matmul(torch.nested.to_padded_tensor(nt0, 0.0), torch.nested.to_padded_tensor(nt1, 0.0))
self.assertEqual(actual, expect)
# normal nested tensor: 5D
nt0 = torch.nested.nested_tensor([torch.randn((8, 9, 2, 4)),
torch.randn((8, 9, 3, 7))],
device=device, dtype=dtype)
nt1 = torch.nested.nested_tensor([torch.randn((8, 9, 4, 6)),
torch.randn((8, 9, 7, 5))],
device=device, dtype=dtype)
actual = torch.nested.to_padded_tensor(torch.matmul(nt0, nt1), 0.0)
expect = torch.matmul(torch.nested.to_padded_tensor(nt0, 0.0), torch.nested.to_padded_tensor(nt1, 0.0))
self.assertEqual(actual, expect)
# cannot test torch.float16 because: RuntimeError: "bmm" not implemented for 'Half'
@dtypes(torch.float, torch.double)
def test_matmul_noncontiguous(self, device, dtype):
nt0_contiguous, nt0_noncontiguous = random_nt_noncontiguous_pair((2, 3), device, dtype)
nt1_contiguous, nt1_noncontiguous = random_nt_noncontiguous_pair((6, 7), device, dtype)
self.assertEqual(
torch.matmul(nt0_contiguous.transpose(-1, -2), nt1_contiguous),
torch.matmul(nt0_noncontiguous.transpose(-1, -2), nt1_noncontiguous))
@dtypes(torch.float, torch.double)
def test_linear(self, device, dtype):
a = torch.randn(1, 2, device=device, dtype=dtype)
b = torch.randn(2, 2, device=device, dtype=dtype)
c = torch.randn(3, 2, device=device, dtype=dtype)
nt = torch.nested.nested_tensor([a, b, c])
weight = torch.randn(2, 2, device=device, dtype=dtype)
bias = torch.randn(2, device=device, dtype=dtype)
# success case
torch.functional.F.linear(nt, weight, bias)
# invalid nested tensor dimension
msg = r'Linear requires nested_tensor.dim == 3 and dense_matrix.dim == 2. Nested tensor dim: 2. Dense tensor dim: 2'
nt1 = torch.nested.nested_tensor([torch.randn(1, device=device, dtype=dtype),
torch.randn(2, device=device, dtype=dtype)])
with self.assertRaisesRegex(RuntimeError, msg):
torch.functional.F.linear(nt1, weight, bias)
# invalid weight shape
msg = r'Linear requires nested_tensor.dim == 3 and dense_matrix.dim == 2. Nested tensor dim: 3. Dense tensor dim: 3'
weight1 = torch.randn(2, 2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, msg):
torch.functional.F.linear(nt, weight1, bias)
# inconsistent last dim of nested tensor
msg = r"Expected all tensors in nested tensor to have the same trailing dimension, instead last dimension equals:"
nt2 = torch.nested.nested_tensor([torch.randn(1, 2, device=device, dtype=dtype),
torch.randn(2, 3, device=device, dtype=dtype)])
with self.assertRaisesRegex(RuntimeError, msg):
torch.functional.F.linear(nt2, weight, bias)
# Mismatch of nested tensor last dim and weight dimension
weight2 = torch.randn(2, 4, device=device, dtype=dtype)
msg = r"Shape mismatch for NestedTensor Linear: Expected input's \(a nested tensor\) 'last_dim'" \
r" to equal 'weight.size\(1\), but got: last_dim = 2, and weight.size\(1\) = 4"
with self.assertRaisesRegex(RuntimeError, msg):
torch.functional.F.linear(nt, weight2, bias)
# Nested tensor input and nested weight
nt_weight = nt.clone()
msg = r"Linear does not support nested weight when input is a nested tensor."
with self.assertRaisesRegex(RuntimeError, msg):
torch.functional.F.linear(nt, nt_weight, bias)
# TODO: test noncontiguous linear
# For now this tests the error message of linear
# since linear does not support noncontiguous buffer yet
@dtypes(torch.float, torch.double)
def test_linear_noncontiguous(self, device, dtype):
nt_contiguous, nt_noncontiguous = random_nt_noncontiguous_pair((2, 3, 6, 7), device, dtype)
weight = torch.randn((8, 5), device=device, dtype=dtype)
self.assertRaisesRegex(
RuntimeError,
r"for now linear only supports contiguous nested tensor",
lambda: torch.nn.functional.linear(nt_noncontiguous, weight)
)
@dtypes(torch.float, torch.float16, torch.double)
def test_transpose(self, device, dtype):
nt = random_nt(device, dtype, 4, (4, 4))
# error case: transpose nested dimension
self.assertRaisesRegex(
RuntimeError,
"Nested tensor dimension 0 cannot be transposed",
lambda: nt.transpose(0, 1)
)
self.assertRaisesRegex(
RuntimeError,
"Nested tensor dimension 0 cannot be transposed",
lambda: nt.transpose(1, -3)
)
# error case: dimension out of range
self.assertRaises(IndexError, lambda: nt.transpose(1, 3))
self.assertRaises(IndexError, lambda: nt.transpose(-4, -1))
# normal case
ntT = nt.transpose(-1, -2)
ptT_from_ntT = noncontiguous_to_padded_tensor(ntT)
pt = torch.nested.to_padded_tensor(nt, 0.0)
ptT = pt.transpose(-1, -2)
self.assertEqual(ptT, ptT_from_ntT)
@dtypes(torch.float, torch.float16, torch.double)
def test_squeeze_unsqueeze(self, device, dtype):
a = torch.arange(6).reshape(2, 3)
b = torch.arange(15).reshape(5, 3)
nt = torch.nested.nested_tensor([a, b], device=device, dtype=dtype)
# error case: squeeze no dimension
self.assertRaisesRegex(
RuntimeError,
"For nested tensors, squeeze without the dim argument",
lambda: nt.squeeze()
)
# error case: squeeze nested dimension
self.assertRaisesRegex(
RuntimeError,
"For nested tensors, squeezing dimension 0",
lambda: nt.squeeze(0)
)
# error case: dimension out of range
self.assertRaises(IndexError, lambda: nt.squeeze(3))
# error case: squeeze nested tensor of singleton tensors
c = torch.ones(1)
nt_singleton = torch.nested.nested_tensor([c, c], device=device, dtype=dtype)
self.assertRaisesRegex(
RuntimeError,
"For nested tensors, squeezing a nested tensor of singleton",
lambda: nt_singleton.squeeze(1)
)
# squeezing a dim which does not have size 1 should be a no-op
nt2 = nt.squeeze(-1)
self.assertEqual(nt, nt2)
# test cases that should work
nt_sizes = nt._nested_tensor_size()
nt_strides = nt._nested_tensor_strides()
for i in range(-2, 4):
if (i == 0):
# cannot unsqueeze batch dim
continue
nt_unsqueezed = nt.unsqueeze(i)
# negative dim will correspond to unsqueeze() applied at dim = dim + nt.dim() + 1
wrapped_i = i + nt.dim() + 1 if i < 0 else i
# col_index into nt size tensor is requires subtraction of 1 to ignore batch dim
size_idx = wrapped_i - 1
self.assertEqual(nt_unsqueezed._nested_tensor_size()[:, size_idx], torch.ones(2, dtype=torch.long))
unsqueezed_stride = nt_unsqueezed._nested_tensor_strides()[:, size_idx]
if (i == nt.ndim or i == -1):
self.assertEqual(unsqueezed_stride, torch.ones(2, dtype=torch.long))
else:
stride_col_after = nt_strides[:, size_idx]
size_col_after = nt_sizes[:, size_idx]
self.assertEqual(unsqueezed_stride, stride_col_after * size_col_after)
nt_squeezed = nt_unsqueezed.squeeze(i)
self.assertEqual(nt_squeezed, nt)
self.assertEqual(nt_squeezed._nested_tensor_size(), nt_sizes)
self.assertEqual(nt_squeezed._nested_tensor_strides(), nt_strides)
@dtypes(torch.float, torch.float16, torch.double)
def test_transpose_inference_mode_interaction(self, device, dtype):
nt = random_nt(device, dtype, 4, (4, 4))
# Construct in default mode and transpose while in inference mode
with torch.inference_mode():
ntT = nt.transpose(-1, -2)
ptT_from_ntT = noncontiguous_to_padded_tensor(ntT)
pt = torch.nested.to_padded_tensor(nt, 0.0)
ptT = pt.transpose(-1, -2)
self.assertEqual(ptT, ptT_from_ntT)
# Construct and transpose while in inference mode
with torch.inference_mode():
nt = random_nt(device, dtype, 4, (4, 4))
ntT = nt.transpose(-1, -2)
ptT_from_ntT = noncontiguous_to_padded_tensor(ntT)
pt = torch.nested.to_padded_tensor(nt, 0.0)
ptT = pt.transpose(-1, -2)
self.assertEqual(ptT, ptT_from_ntT)
@dtypes(torch.float, torch.float16, torch.double)
def test_view(self, device, dtype):
nt = random_nt(device, dtype, 4, (4, 4))
# error case: empty shape
self.assertRaisesRegex(
RuntimeError,
r"shape '\[\]' is invalid for a nested tensor",
lambda: nt.view(())
)
# error case: empty nested tensor
nt_empty = torch.nested.nested_tensor([])
self.assertRaisesRegex(
RuntimeError,
"empty nested tensor cannot be reshaped",
lambda: nt_empty.view(-1)
)
# error case: -1 for batch size
self.assertRaisesRegex(
RuntimeError,
r"view: For now nested view cannot change or infer the implicit batch dimension",
lambda: nt.view(-1, 2, 3)
)
self.assertRaisesRegex(
RuntimeError,
r"shape '\[.*\]' is invalid for input of size [0-9]+",
lambda: nt.view(4, 2, 3)
)
# normal case
x0 = torch.randn((2, 20), device=device, dtype=dtype)
x1 = torch.randn((3, 20), device=device, dtype=dtype)
nt = torch.nested.nested_tensor([x0, x1])
pt = torch.nested.to_padded_tensor(nt, 0.0)
# error case, trying to reshape batch dim to a legit shape
self.assertRaisesRegex(
RuntimeError,
r"For now nested view cannot change or infer the implicit batch dimension",
lambda: nt.transpose(-1, -2).view(40, -1)
)
# inherit only the ragged dimension
# (2, 20) -> (2, 5, 4)
# (3, 20) -> (3, 5, 4)
nt1 = nt.view(2, -1, 5, 4)
# (2, 3, 20) -> (2, 3, 5, 4) -> (2, 4, 5, 4)
pt1 = pt.view(2, -1, 5, 4)
self.assertEqual(noncontiguous_to_padded_tensor(nt1), pt1)
# more than one -1 (even for "old" dims), should fail
# this attempts to do # (2, (2, 3), 5, 4) -> (2, (2, 3), 5, 2, 2)
# but we ban "inherit old behavior" for >1 dimension
self.assertRaisesRegex(
RuntimeError,
r"only one dimension can be inferred",
lambda: nt1.view(2, -1, -1, 2, 2)
)
@dtypes(torch.float, torch.float16, torch.double)
def test_view_inference_mode_interaction(self, device, dtype):
# Construct in default mode and view while in inference mode
nt = torch.nested.nested_tensor([torch.randn((2, 20)), torch.randn((3, 20))], device=device, dtype=dtype)
with torch.inference_mode():
ntT = nt.view(2, -1, 4, 5)
ptT_from_ntT = noncontiguous_to_padded_tensor(ntT)
pt = torch.nested.to_padded_tensor(nt, 0.0)
ptT = pt.view(2, -1, 4, 5)
self.assertEqual(ptT, ptT_from_ntT)
# Construct and view while in inference mode
with torch.inference_mode():
nt = torch.nested.nested_tensor([torch.randn((2, 20)), torch.randn((3, 20))], device=device, dtype=dtype)
ntT = nt.view(2, -1, 4, 5)
ptT_from_ntT = noncontiguous_to_padded_tensor(ntT)
pt = torch.nested.to_padded_tensor(nt, 0.0)
ptT = pt.view(2, -1, 4, 5)
self.assertEqual(ptT, ptT_from_ntT)
@dtypes(torch.float, torch.float16, torch.double)
def test_reshape(self, device, dtype):
nt = random_nt(device, dtype, 4, (4, 4))
# error case: empty shape
self.assertRaisesRegex(
RuntimeError,
r"shape '\[\]' is invalid for a nested tensor",
lambda: nt.reshape(())
)
# error case: empty nested tensor
nt_empty = torch.nested.nested_tensor([])
self.assertRaisesRegex(
RuntimeError,
"empty nested tensor cannot be reshaped",
lambda: nt_empty.reshape(-1)
)
# error case: -1 for batch size
self.assertRaisesRegex(
RuntimeError,
r"reshape: For now nested reshape cannot change or infer the implicit batch dimension",
lambda: nt.reshape(-1, 2, 3)
)
self.assertRaisesRegex(
RuntimeError,
r"shape '\[.*\]' is invalid for input of size [0-9]+",
lambda: nt.reshape(4, 2, 3)
)
# normal case
x0 = torch.randn((2, 20), device=device, dtype=dtype)
x1 = torch.randn((3, 20), device=device, dtype=dtype)
nt = torch.nested.nested_tensor([x0, x1]) # (2, (2, 3), 20)
pt = torch.nested.to_padded_tensor(nt, 0.0)
# error case, trying to reshape batch dim to a legit shape
self.assertRaisesRegex(
RuntimeError,
r"reshape: For now nested reshape cannot change or infer the implicit batch dimension",
lambda: nt.transpose(-1, -2).reshape(40, -1)
)
# inherit only the ragged dimension
# (2, 20) -> (2, 5, 4)
# (3, 20) -> (3, 5, 4)
nt1 = nt.reshape(2, -1, 5, 4)
# (2, 3, 20) -> (2, 3, 5, 4) -> (2, 4, 5, 4)
pt1 = pt.reshape(2, -1, 5, 4)
self.assertEqual(noncontiguous_to_padded_tensor(nt1), pt1)
# more than one -1 (even for "old" dims), should fail
# this attempts to do # (2, (2, 3), 5, 4) -> (2, (2, 3), 5, 2, 2)
# but we ban "inherit old behavior" for >1 dimension
self.assertRaisesRegex(
RuntimeError,
r"only one dimension can be inferred",
lambda: nt1.reshape(2, -1, -1, 2, 2)
)
@parametrize("input_dim", [3, 4])
def test_scaled_dot_product_attention(self, device, input_dim):
def rand_tensor(*shape):
return torch.randn(shape, device=device)
E = 8
if input_dim == 3:
# Shape: (N, L, E); ragged L
query = torch.nested.nested_tensor([rand_tensor(2, E), rand_tensor(3, E), rand_tensor(4, E)])
# Shape: (N, S, E); ragged S
key = torch.nested.nested_tensor([rand_tensor(3, E), rand_tensor(4, E), rand_tensor(5, E)])
value = torch.nested.nested_tensor([rand_tensor(3, E), rand_tensor(4, E), rand_tensor(5, E)])
elif input_dim == 4:
# In the 4D case the L and S is ragged
# Shape: (N, N', L, E); ragged N' and L
query = torch.nested.nested_tensor([rand_tensor(2, 2, E), rand_tensor(3, 3, E), rand_tensor(4, 4, E)])
# Shape: (N, N', S, E); ragged N' and S
key = torch.nested.nested_tensor([rand_tensor(2, 3, E), rand_tensor(3, 4, E), rand_tensor(4, 5, E)])
value = torch.nested.nested_tensor([rand_tensor(2, 3, E), rand_tensor(3, 4, E), rand_tensor(4, 5, E)])
else:
self.fail(f"Invalid input_dim {input_dim} encountered in SDP test")
def rand_mask(size):
return torch.randint(0, 2, size=size, dtype=torch.bool, device=device)
# Shape: (N, L, S); ragged L and S matching above
attn_mask = torch.nested.nested_tensor([rand_mask((2, 3)), rand_mask((3, 4)), rand_mask((4, 5))])
dropout_p = 0.0 # no dropout for reproducibility
# Success case: no attn_mask set and is_causal=False.
actual = torch.nn.functional.scaled_dot_product_attention(
query, key, value, attn_mask=None, is_causal=False, dropout_p=dropout_p)
expected_outputs = []
for q, k, v in zip(query.unbind(), key.unbind(), value.unbind()):
output = torch.nn.functional.scaled_dot_product_attention(
q.unsqueeze(0), k.unsqueeze(0), v.unsqueeze(0), attn_mask=None, dropout_p=dropout_p)
expected_outputs.append(output.squeeze(0))
expected_output_nested = torch.nested.nested_tensor(expected_outputs)
self.assertEqual(actual, expected_output_nested)
# Error case: explicit attn_mask set.
with self.assertRaisesRegex(RuntimeError, "not supported when an explicit attn_mask is set"):
torch.nn.functional.scaled_dot_product_attention(
query, key, value, attn_mask=attn_mask, dropout_p=dropout_p)
# Error case: is_causal=True.
with self.assertRaisesRegex(RuntimeError, "not supported when is_causal=True"):
torch.nn.functional.scaled_dot_product_attention(
query, key, value, dropout_p=dropout_p, is_causal=True)
@dtypes(torch.float, torch.float16, torch.double)
def test_empty_like(self, device, dtype):
ntensors = 4
nt = random_nt(device, dtype, ntensors, (4, 4))
# Create empty on same device as original nested tensor
nt_empty = torch.empty_like(nt)
assert nt.is_same_size(nt_empty)
self.assertEqual(nt.dtype, nt_empty.dtype)
self.assertEqual(nt.device, nt_empty.device)
self.assertEqual(nt.layout, nt_empty.layout)
if torch.cuda.is_available():
if device == "cpu":
nt_cuda = torch.empty_like(nt, device='cuda')
self.assertEqual(torch.device("cuda").type, nt_cuda.device.type)
else:
nt_cpu = torch.empty_like(nt, device='cpu')
self.assertEqual(torch.device("cpu").type, nt_cpu.device.type)
# Check changing dtype of empty_like nested tensor output
dtype_set = {torch.float, torch.float16, torch.double}
for other_dtype in dtype_set - {dtype}:
nt_empty_other_dtype = torch.empty_like(nt, dtype=other_dtype)
self.assertEqual(nt.dtype, dtype)
self.assertEqual(nt_empty_other_dtype.dtype, other_dtype)
self.assertEqual(nt.device, nt_empty.device)
self.assertEqual(nt.layout, nt_empty.layout)
# Create tensor for autograd
nt_empty_req_grad = torch.empty_like(nt, requires_grad=True)
self.assertEqual(nt_empty_req_grad.requires_grad, True)
# Test noncontiguous tensor fails to copy
nt_cont, nt_noncont = random_nt_noncontiguous_pair((2, 3, 6, 7))
nt_empty = torch.empty_like(nt_cont)
assert nt_cont.is_same_size(nt_empty)
with self.assertRaisesRegex(RuntimeError, "empty_like only supports contiguous memory format for Nested Tensors"):
nt_empty = torch.empty_like(nt_noncont)
class TestNestedTensorAutograd(TestCase):
# Note [Gradcheck args check_batched_grad=False] the common_utils testing version of gradcheck
# includes the default parameters used for testing ops with gradcheck. However nested tensor
# does not support the stack op therefore we turn it off for these tests
def _create_leaf_nested_tensor_from_list(self, tensor_device, requires_grad=False):
return torch.nested.nested_tensor([torch.randn(1, 2,),
torch.randn(7, 8)], requires_grad=requires_grad, device=tensor_device)
def _create_nested_tensor_from_list(self, tensor_device, requires_grad=False):
return torch.nested.as_nested_tensor([torch.randn(1, 2, requires_grad=requires_grad),
torch.randn(7, 8, requires_grad=requires_grad)], device=tensor_device)
def _create_nested_tensor_from_mask(self, tensor_device, requires_grad=False):
data = torch.randn(2, 3, 4, requires_grad=requires_grad, device=tensor_device)
mask = torch.ones_like(data[:, :, 0]).bool()
return torch._nested_tensor_from_mask(data, mask)
def test_as_nested_tensor_propagates_gradients(self, device):
a = torch.arange(3, dtype=torch.float, device=device)
b = torch.arange(5, dtype=torch.float, device=device)
nt = torch.nested.as_nested_tensor([a, b])
# tensors with requires_grad=False are leaves
self.assertTrue(nt.is_leaf)
self.assertTrue(not nt.requires_grad)
a = torch.arange(3, dtype=torch.float, requires_grad=True, device=device)
b = torch.arange(5, dtype=torch.float, requires_grad=True, device=device)
nt2 = torch.nested.as_nested_tensor([a, b])
fake_grad = torch.nested.nested_tensor([torch.ones_like(a), torch.zeros_like(b)], device=device)
nt2.backward(fake_grad)
self.assertEqual(a.grad, fake_grad[0])
self.assertEqual(b.grad, fake_grad[1])
def test_nested_tensor_generates_leaf(self, device):
a = torch.arange(3, dtype=torch.float, requires_grad=True, device=device)
b = torch.arange(5, dtype=torch.float, requires_grad=True, device=device)
nt = torch.nested.nested_tensor([a, b], requires_grad=False)
self.assertTrue(nt.is_leaf)
self.assertTrue(not nt.requires_grad)
nt2 = torch.nested.nested_tensor([a, b], requires_grad=True)
self.assertTrue(nt2.is_leaf)
self.assertTrue(nt2.requires_grad)
fake_grad = torch.nested.nested_tensor([torch.ones_like(a), torch.zeros_like(b)], device=device)
nt2.backward(fake_grad)
self.assertEqual(nt2.grad, fake_grad)
self.assertEqual(a.grad, None)
self.assertEqual(b.grad, None)
def test_set_requires_grad_from_list(self, device):
nt = self._create_nested_tensor_from_list(device)
nt.requires_grad_()
assert nt.requires_grad
def test_set_requires_grad_from_mask(self, device):
nt = self._create_nested_tensor_from_mask(device)
nt.requires_grad_()
assert nt.requires_grad
def test_backward_for_add_op(self, device):
nt_1 = self._create_nested_tensor_from_mask(device)
nt_2 = self._create_nested_tensor_from_mask(device)
nt_1.requires_grad_()
c = nt_1 + nt_2
assert nt_1.requires_grad
assert c.requires_grad
grad_output = self._create_nested_tensor_from_mask(device)
c.backward(grad_output)
# Grad check doesn't work with nested yet.
# d/dnt_1 (nt + nt_1) = 1*grad_output
self.assertEqual(nt_1.grad, grad_output)
# Test Factory Functions
def test_nested_tensor_to_padded_tensor(self, device):
for padding_val in [0, 1]:
nt = self._create_leaf_nested_tensor_from_list(tensor_device=device, requires_grad=True)
out = torch.nested.to_padded_tensor(nt, padding_val)
grad_output = torch.ones(out.shape, device=device)
out.backward(grad_output)
self.assertEqual(nt.grad, torch.nested.nested_tensor([torch.ones(1, 2), torch.ones(7, 8)], device=device))
def test_nested_tensor_from_mask_and_to_padded(self, device):
N, L, D = 2, 4, 4
mask = torch.ones(N, L, device=device)
for i in range(1, N):
end = torch.randint(1, L - 1, (1,), device=device)
mask[i, end:] = 0
mask[0, :] = 1
mask = mask.bool()
data = torch.randn(N, L, D, requires_grad=True, dtype=torch.float64, device=device)
def grad_test_func(inpt):
nt = torch._nested_tensor_from_mask(inpt, mask)
# This implicitly tests to_padded_tensor grads
return torch.nested.to_padded_tensor(nt, 0)
assert gradcheck(grad_test_func, inputs=data, check_batched_grad=False)
def test_nested_tensor_from_padded(self, device):
nested_size = torch.tensor([[1, 2], [2, 2]])
padded_tensor = torch.randn(2, 2, 2, dtype=torch.float64, device=device)
padded_tensor[0, 1, :] = 0
padded_tensor.requires_grad_()
def grad_test_func(tensor, nested_size):
nt = torch._nested_from_padded(tensor, nested_size, fuse_transform_0213=False)
# This implicitly tests to_padded_tensor grads
return torch.nested.to_padded_tensor(nt, 0)
data = (padded_tensor, nested_size)
assert gradcheck(grad_test_func, inputs=data, check_batched_grad=False)
def test_nested_tensor_from_padded_fused(self, device):
nested_size = torch.tensor([[1, 8], [2, 8]])
padded_tensor = torch.randn(2, 2, 2, 4, dtype=torch.float64, device=device)
padded_tensor[0, 1, :] = 0
padded_tensor.requires_grad_()
def grad_test_func(tensor, nested_size):
nt = torch._nested_from_padded(tensor, nested_size, fuse_transform_0213=True)
# This implicitly tests to_padded_tensor grads
return torch.nested.to_padded_tensor(nt, 0)
data = (padded_tensor, nested_size)
assert gradcheck(grad_test_func, inputs=data, check_batched_grad=False)
def test_nested_tensor_from_list(self, device):
a = torch.randn(1, 2, requires_grad=True, dtype=torch.float64, device=device)
b = torch.randn(2, 2, requires_grad=True, dtype=torch.float64, device=device)
c = torch.randn(10, 2, requires_grad=True, dtype=torch.float64, device=device)
def grad_test_func(a, b, c):
c = torch.nested.as_nested_tensor([a, b, c])
# This implictily tests to_padded_tensor grads
return torch.nested.to_padded_tensor(c, 0)
data = (a, b, c)
assert gradcheck(grad_test_func, inputs=data, check_batched_grad=False)
def test_dropout_backward(self):
nt = torch.nested.nested_tensor([torch.randn((2, 5)), torch.randn((3, 4))], requires_grad=True)
p = 0.2
y = torch.nn.functional.dropout(nt, p)
y.backward(nt.clone().detach())
self.assertEqual(nt.grad, y)
def test_nested_tensor_bmm_gradcheck(self, device):
a = torch.randn(2, 6, requires_grad=True, dtype=torch.float64, device=device)
b = torch.randn(3, 6, requires_grad=True, dtype=torch.float64, device=device)
c = torch.randn(6, 4, requires_grad=True, dtype=torch.float64, device=device)
d = torch.randn(6, 5, requires_grad=True, dtype=torch.float64, device=device)
def grad_test_func(a, b, c, d):
nt0 = torch.nested.as_nested_tensor([a, b])
nt1 = torch.nested.as_nested_tensor([c, d])
result = nt0.bmm(nt1)
return torch.nested.to_padded_tensor(result, 0.0)
data = (a, b, c, d)
assert torch.autograd.gradcheck(grad_test_func, inputs=data)
def test_nested_tensor_bmm_backward(self, device):
nt0 = torch.nested.nested_tensor([torch.randn((2, 6)), torch.randn((3, 6))], requires_grad=True, device=device)
nt1 = torch.nested.nested_tensor([torch.randn((6, 4)), torch.randn((6, 5))], requires_grad=True, device=device)
with torch.no_grad():
pt0 = torch.nested.to_padded_tensor(nt0, 0.0).requires_grad_(True)
pt1 = torch.nested.to_padded_tensor(nt1, 0.0).requires_grad_(True)
ynt = nt0.bmm(nt1)
ypt = pt0.bmm(pt1)
ynt.backward(ynt.clone())
ypt.backward(ypt.clone())
self.assertEqual(torch.nested.to_padded_tensor(nt0.grad, 0.0), pt0.grad)
self.assertEqual(torch.nested.to_padded_tensor(nt1.grad, 0.0), pt1.grad)
def test_nested_tensor_matmul_gradcheck(self, device):
a = torch.randn(2, 6, requires_grad=True, dtype=torch.float64, device=device)
b = torch.randn(3, 6, requires_grad=True, dtype=torch.float64, device=device)
c = torch.randn(6, 4, requires_grad=True, dtype=torch.float64, device=device)
d = torch.randn(6, 5, requires_grad=True, dtype=torch.float64, device=device)
def grad_test_func(a, b, c, d):
nt0 = torch.nested.as_nested_tensor([a, b])
nt1 = torch.nested.as_nested_tensor([c, d])
result = torch.matmul(nt0, nt1)
return torch.nested.to_padded_tensor(result, 0.0)
data = (a, b, c, d)
assert torch.autograd.gradcheck(grad_test_func, inputs=data)
def test_nested_tensor_matmul_backward(self, device):
nt0 = torch.nested.nested_tensor([torch.randn((7, 2, 6)), torch.randn((7, 3, 6))], requires_grad=True, device=device)
nt1 = torch.nested.nested_tensor([torch.randn((7, 6, 4)), torch.randn((7, 6, 5))], requires_grad=True, device=device)
with torch.no_grad():
pt0 = torch.nested.to_padded_tensor(nt0, 0.0).requires_grad_(True)
pt1 = torch.nested.to_padded_tensor(nt1, 0.0).requires_grad_(True)
ynt = torch.matmul(nt0, nt1)
ypt = torch.matmul(pt0, pt1)
ynt.backward(ynt.clone())
ypt.backward(ypt.clone())
self.assertEqual(torch.nested.to_padded_tensor(nt0.grad, 0.0), pt0.grad)
self.assertEqual(torch.nested.to_padded_tensor(nt1.grad, 0.0), pt1.grad)
def test_nested_tensor_transpose_gradcheck(self, device):
a = torch.randn(2, 5, requires_grad=True, device=device)
b = torch.randn(3, 4, requires_grad=True, device=device)
def grad_test_func(a, b):
nt = torch.nested.as_nested_tensor([a, b])
result = nt.transpose(-2, -1).transpose(-2, -1)
return torch.nested.to_padded_tensor(result, 0.0)
data = (a, b)
assert torch.autograd.gradcheck(grad_test_func, inputs=data, eps=1e-3)
def test_nested_tensor_transpose_backward(self, device):
nt = torch.nested.nested_tensor([torch.randn((2, 5)), torch.randn((3, 4))], requires_grad=True, device=device)
with torch.no_grad():
pt = torch.nested.to_padded_tensor(nt, 0.0).requires_grad_(True)
ynt = nt.transpose(-2, -1)
ypt = pt.transpose(-2, -1)
ynt.backward(ynt.clone())
ypt.backward(ypt.clone())
self.assertEqual(torch.nested.to_padded_tensor(nt.grad, 0.0), pt.grad)
def test_nested_tensor_reshape_gradcheck(self, device):
a = torch.randn(2, 6, requires_grad=True, device=device)
b = torch.randn(3, 6, requires_grad=True, device=device)
def grad_test_func(a, b):
nt = torch.nested.as_nested_tensor([a, b])
result = nt.reshape(2, -1, 2, 3)
return torch.nested.to_padded_tensor(result, 0.0)
data = (a, b)
assert torch.autograd.gradcheck(grad_test_func, inputs=data, eps=1e-3)
def test_nested_tensor_reshape_backward(self):
nt = torch.nested.nested_tensor([torch.randn((2, 6)), torch.randn((3, 6))], requires_grad=True)
with torch.no_grad():
pt = torch.nested.to_padded_tensor(nt, 0.0).requires_grad_(True)
ynt = nt.reshape(2, -1, 2, 3)
ypt = pt.reshape(2, -1, 2, 3)
ynt.backward(ynt.clone())
ypt.backward(ypt.clone())
self.assertEqual(torch.nested.to_padded_tensor(nt.grad, 0.0), pt.grad)
def test_nested_tensor_squeeze_backward(self, device):
nt = torch.nested.nested_tensor([torch.randn((2, 6, 1)), torch.randn((3, 6, 1))], requires_grad=True, device=device)
with torch.no_grad():
pt = torch.nested.to_padded_tensor(nt, 0.0).requires_grad_(True)
ynt = nt.squeeze(-1)
ypt = pt.squeeze(-1)
ynt.backward(ynt.clone())
ypt.backward(ypt.clone())
self.assertEqual(torch.nested.to_padded_tensor(nt.grad, 0.0), pt.grad)
def test_nested_tensor_squeeze_gradcheck(self, device):
a = torch.randn((2, 6, 1), dtype=torch.float64, requires_grad=True, device=device)
b = torch.randn((3, 6, 1), dtype=torch.float64, requires_grad=True, device=device)
def grad_test_func(a, b):
nt = torch.nested.as_nested_tensor([a, b])
result = nt.squeeze(-1)
return torch.nested.to_padded_tensor(result, 0.0)
assert torch.autograd.gradcheck(grad_test_func, inputs=(a, b), eps=1e-3)
def test_nested_tensor_unsqueeze_backward(self, device):
nt = torch.nested.nested_tensor([torch.randn((2, 6)), torch.randn((3, 6))], requires_grad=True, device=device)
with torch.no_grad():
pt = torch.nested.to_padded_tensor(nt, 0.0).requires_grad_(True)
ynt = nt.unsqueeze(2)
ypt = pt.unsqueeze(2)
ynt.backward(ynt.clone())
ypt.backward(ypt.clone())
self.assertEqual(torch.nested.to_padded_tensor(nt.grad, 0.0), pt.grad)
def test_nested_tensor_unsqueeze_gradcheck(self, device):
a = torch.randn((2, 6), dtype=torch.float64, requires_grad=True, device=device)
b = torch.randn((3, 6), dtype=torch.float64, requires_grad=True, device=device)
def grad_test_func(a, b):
nt = torch.nested.as_nested_tensor([a, b])
result = nt.unsqueeze(-1)
return torch.nested.to_padded_tensor(result, 0.0)
assert torch.autograd.gradcheck(grad_test_func, inputs=(a, b), eps=1e-3)
def test_nested_tensor_linear(self, device):
a = torch.randn(1, 2, requires_grad=True, dtype=torch.float64, device=device)
b = torch.randn(2, 2, requires_grad=True, dtype=torch.float64, device=device)
c = torch.randn(3, 2, requires_grad=True, dtype=torch.float64, device=device)
weight = torch.randn(2, 2, requires_grad=True, dtype=torch.float64, device=device)
bias = torch.randn(2, requires_grad=True, dtype=torch.float64, device=device)
def grad_test_func(a, b, c, weight, bias=None):
nt = torch.nested.as_nested_tensor([a, b, c])
# This implicitly tests to_padded_tensor grads
d = torch.functional.F.linear(nt, weight, bias)
return torch.nested.to_padded_tensor(d, 0)
data = (a, b, c, weight, bias)
assert gradcheck(grad_test_func, inputs=data, check_batched_grad=False)
# Test linear with no bias added
data = (a, b, c, weight)
assert gradcheck(grad_test_func, inputs=data, check_batched_grad=False)
def test_nested_tensor_linear_plus_transpose(self, device):
a = torch.randn(1, 2, requires_grad=True, dtype=torch.float64, device=device)
b = torch.randn(2, 2, requires_grad=True, dtype=torch.float64, device=device)
c = torch.randn(3, 2, requires_grad=True, dtype=torch.float64, device=device)
weight = torch.randn(2, 2, requires_grad=True, dtype=torch.float64, device=device)
bias = torch.randn(2, requires_grad=True, dtype=torch.float64, device=device)
def grad_test_func(a, b, c, weight, bias=None):
nt = torch.nested.as_nested_tensor([a, b, c])
# This implicitly tests to_padded_tensor grads
d = torch.functional.F.linear(nt, weight, bias)
d = d.transpose(-1, -2).contiguous()
return torch.nested.to_padded_tensor(d, 0)
data = (a, b, c, weight, bias)
assert gradcheck(grad_test_func, inputs=data, check_batched_grad=False)
# Test linear with no bias added
data = (a, b, c, weight)
assert gradcheck(grad_test_func, inputs=data, check_batched_grad=False)
def test_nested_tensor_softmax(self, device):
a = torch.randn(1, 2, requires_grad=True, dtype=torch.float64, device=device)
b = torch.randn(2, 2, requires_grad=True, dtype=torch.float64, device=device)
c = torch.randn(3, 2, requires_grad=True, dtype=torch.float64, device=device)
def grad_test_func(a, b, c, dim):
nt = torch.nested.as_nested_tensor([a, b, c])
# This implicitly tests to_padded_tensor grads
d = torch.functional.F.softmax(nt, dim=dim)
return torch.nested.to_padded_tensor(d, 0)
# softmax over last dim
data = (a, b, c, -1)
assert gradcheck(grad_test_func, inputs=data, check_batched_grad=False)
def test_nested_tensor_linear_backward(self, device):
a = torch.randn(1, 2, requires_grad=False, device=device)
b = torch.randn(2, 2, requires_grad=False, device=device)
c = torch.randn(3, 2, requires_grad=False, device=device)
weight = torch.randn(2, 2, requires_grad=True, device=device)
bias = torch.randn(2, requires_grad=True, device=device)
nt = torch.nested.as_nested_tensor([a, b, c], device=device)
out = torch.functional.F.linear(nt, weight, bias)
out.backward(out.clone())
assert weight.grad is not None
assert bias.grad is not None
assert a.grad is None
assert b.grad is None
assert c.grad is None
def test_values_grad_with_broadcast(self, device):
a = torch.randn(1, 2, 4, requires_grad=True, dtype=torch.float64, device=device)
b = torch.randn(2, 2, 4, requires_grad=True, dtype=torch.float64, device=device)
c = torch.randn(3, 2, 4, requires_grad=True, dtype=torch.float64, device=device)
def grad_test_func(a, b, c):
nt = torch.nested.as_nested_tensor([a, b, c])
buffer = nt.values()
return buffer.sum()
data = (a, b, c)
assert gradcheck(grad_test_func, inputs=data, check_batched_grad=False)
def test_to_buffer_series_ops_grad_with_broadcast(self, device):
a = torch.randn(1, 1, 2, requires_grad=True, dtype=torch.float64, device=device)
b = torch.randn(1, 1, 2, requires_grad=True, dtype=torch.float64, device=device)
c = torch.randn(1, 1, 2, requires_grad=True, dtype=torch.float64, device=device)
def grad_test_func(a, b, c):
nt = torch.nested.as_nested_tensor([a, b, c])
buffer = nt.values()
buffer = buffer * 2
return buffer.exp()
data = (a, b, c)
assert gradcheck(grad_test_func, inputs=data, check_batched_grad=False)
def test_unbind_flow_through(self, device):
a = torch.randn(1, 2, 4, requires_grad=True, dtype=torch.float64, device=device)
b = torch.randn(2, 2, 4, requires_grad=True, dtype=torch.float64, device=device)
c = torch.randn(3, 2, 4, requires_grad=True, dtype=torch.float64, device=device)
def grad_test_func(a, b, c):
nt = torch.nested.as_nested_tensor([a, b, c])
ntT = nt.transpose(-1, -2)
unbound = ntT.unbind()
d = unbound[0]
d = torch.pow(d, 2)
return d
data = (a, b, c)
assert gradcheck(grad_test_func, inputs=data, check_batched_grad=False)
def test_indexing_backward(self, device):
x0 = torch.randn((2, 5))
x1 = torch.randn((3, 4))
nt = torch.nested.nested_tensor([x0, x1], device=device, requires_grad=True)
self.assertEqual(nt[0], x0)
self.assertEqual(nt[-1], x1)
grad_x0 = torch.randn((2, 5), device=device)
nt[0].backward(grad_x0)
expected_grad = torch.nested.nested_tensor([grad_x0, torch.zeros((3, 4), device=device)])
self.assertEqual(nt.grad, expected_grad)
def test_gelu_backward(self, device):
a = torch.randn(1, 2, 4, requires_grad=True, dtype=torch.float64, device=device)
b = torch.randn(2, 2, 4, requires_grad=True, dtype=torch.float64, device=device)
c = torch.randn(3, 2, 4, requires_grad=True, dtype=torch.float64, device=device)
def grad_test_func(a, b, c):
nt = torch.nested.as_nested_tensor([a, b, c])
nt_gelu = torch.nn.functional.gelu(nt)
return torch.nested.to_padded_tensor(nt_gelu, 0)
data = (a, b, c)
assert gradcheck(grad_test_func, inputs=data, check_batched_grad=False)
def test_relu_backward(self, device):
a = torch.randn(1, 2, 4, requires_grad=True, dtype=torch.float64, device=device)
b = torch.randn(2, 2, 4, requires_grad=True, dtype=torch.float64, device=device)
c = torch.randn(3, 2, 4, requires_grad=True, dtype=torch.float64, device=device)
def grad_test_func(a, b, c):
nt = torch.nested.as_nested_tensor([a, b, c])
nt_relu = torch.nn.functional.relu(nt)
return torch.nested.to_padded_tensor(nt_relu, 0)
data = (a, b, c)
assert gradcheck(grad_test_func, inputs=data, check_batched_grad=False)
@parametrize("size", [1024, 1023, 513, 512, 256, 128, 32, 4, 2])
def test_layer_norm_backward(self, device, size):
a = torch.randn(1, 2, size, requires_grad=True, dtype=torch.float64, device=device)
b = torch.randn(2, 2, size, requires_grad=True, dtype=torch.float64, device=device)
c = torch.randn(3, 2, size, requires_grad=True, dtype=torch.float64, device=device)
def grad_test_func(a, b, c):
nt = torch.nested.as_nested_tensor([a, b, c])
layer_norm = torch.nn.LayerNorm(nt.size(-1), device=device, dtype=torch.float64)
nt_layer_norm = layer_norm(nt)
return torch.nested.to_padded_tensor(nt_layer_norm, 0)
data = (a, b, c)
assert gradcheck(grad_test_func, inputs=data, check_batched_grad=False)
# Could either mark slow or reduce size
@parametrize("size", [128, 32, 4, 2])
def test_layer_norm_backward_5d(self, device, size):
a = torch.randn(4, size, size, 4, requires_grad=True, dtype=torch.float64, device=device)
b = torch.randn(7, size, size, 4, requires_grad=True, dtype=torch.float64, device=device)
c = torch.randn(10, size, size, 4, requires_grad=True, dtype=torch.float64, device=device)
def grad_test_func(a, b, c):
nt = torch.nested.as_nested_tensor([a, b, c])
layer_norm = torch.nn.LayerNorm((size, size, nt.size(-1)), device=device, dtype=torch.float64)
nt_layer_norm = layer_norm(nt)
return torch.nested.to_padded_tensor(nt_layer_norm, 0)
data = (a, b, c)
assert gradcheck(grad_test_func, inputs=data, check_batched_grad=False)
instantiate_parametrized_tests(TestNestedTensor)
instantiate_device_type_tests(TestNestedTensorDeviceType, globals())
instantiate_device_type_tests(TestNestedTensorAutograd, globals())
if __name__ == '__main__':
run_tests()
|
[
"pytorchmergebot@users.noreply.github.com"
] |
pytorchmergebot@users.noreply.github.com
|
5c7688a54ed8add5b8c146f0f4fa3bb0ac3a5514
|
ce37d136e932a08e82cae57aef67ced6d36f55fc
|
/elevennote/src/accounts/views.py
|
a31b8f5ea79b0291e86492dc1cbb146b3e56856d
|
[] |
no_license
|
vk59/cs102
|
33f16158c8b2ae117e04e74fc00e41420a8d737f
|
202536377147d8e92c82c4a1f02a0579044c1ebe
|
refs/heads/master
| 2022-12-22T10:29:16.646896
| 2020-08-23T17:21:10
| 2020-08-23T17:21:10
| 211,481,266
| 1
| 0
| null | 2022-12-08T09:53:46
| 2019-09-28T10:09:04
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,415
|
py
|
import random
from decouple import config
from django.contrib.auth import authenticate, login
from django.views.generic import FormView
from django.shortcuts import redirect
from .forms import UserCreationForm
from django.core.mail import send_mail
class RegisterView(FormView):
template_name = 'registration/register.html'
form_class = UserCreationForm
success_url = '/'
def form_valid(self, form):
form.save()
email = self.request.POST['email']
password = self.request.POST['password1']
user = authenticate(email=email, password=password)
secret = str(random.random())
secret_key = ''
for i in range(2, 17):
secret_key += secret[i]
user.secret_key = secret_key
user.save()
login(self.request, user)
send_mail(
"Confirm your account",
f"To confirm your email in Elevennote, click this link:\nhttp://localhost:8000/accounts/confirm/{user.secret_key}",
'snegovivan78@gmail.com',
recipient_list=[email],
fail_silently=False
)
return super(RegisterView, self).form_valid(form)
def ConfirmView(request, secret_key):
msg = "fail"
if request.user.secret_key == secret_key:
request.user.is_confirmed = True
request.user.save()
msg = "success"
return redirect(f"/notes/?msg={msg}")
|
[
"jkostylev592@gmail.com"
] |
jkostylev592@gmail.com
|
646fdae2a5d2507b13d577d67ece3bb4d15666a1
|
711b0851cbfbf1c8aecdb772c921910e0d42e4c1
|
/app/base/base.py
|
b1f7e6cf4edb2ecc85fa2ffef706a6119cf59156
|
[] |
no_license
|
FlashXFork/falcon-example
|
652dc9780a6919d574cd8388bc7a446bb91e63b3
|
5c9475c06ae63d57b1142561cffdf703f24efd8b
|
refs/heads/master
| 2022-01-09T04:36:25.342467
| 2018-09-27T18:22:25
| 2018-09-27T18:22:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20
|
py
|
class BaseAPIClass
|
[
"connect.anirudh@gmail.com"
] |
connect.anirudh@gmail.com
|
296950f78c5f7d827df946aa9b259ea8dd33b247
|
3a440f725a95e0963fef518c05d1790034d75ba7
|
/slipy/spectrum/etc/getitem.py
|
43a7f43afbd55b524df14efc58fa1acb5eed88e5
|
[
"BSD-3-Clause"
] |
permissive
|
glentner/slipy-dev
|
ac6990a710306f31274d12487a421a4479c36e74
|
db8fa6ada3ce3246879d5aae9b0f150c56c23382
|
refs/heads/master
| 2021-01-17T23:38:19.511679
| 2016-01-14T18:59:08
| 2016-01-14T18:59:08
| 49,592,067
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 262
|
py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# Copyright (c) Geoffrey Lentner 2016. All Rights Reserved.
# slipy/spectrum/etc/getitem.py
# TODO: etc/getitem.py
"""
"""
def _getitem(self, key):
"""
"""
raise NotImplementedError()
|
[
"glentner@nd.edu"
] |
glentner@nd.edu
|
f43f7781fe91298f8cac39f9eca02476c6049fa9
|
2bf91522311f2f76ab2383bff6b87ef61b614030
|
/rango/models.py
|
f8a99e2864a53f12adbb22d2fa781b6e46ecb25a
|
[] |
no_license
|
AnnaB0/Tango_with_django_project
|
42ef5247ec5e43546150686d65a9bcb93d5fbf54
|
b6509a62a8dea6e512519d8706a760f2c03a8cce
|
refs/heads/master
| 2020-12-14T00:44:47.709702
| 2020-02-14T17:35:20
| 2020-02-14T17:35:20
| 234,581,472
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,342
|
py
|
from django.db import models
from django.contrib import admin
from django.template.defaultfilters import slugify
django.contrib.auth.models import User
class Category(models.Model):
NAME_MAX_LENGTH=128
name=models.CharField(max_length=NAME_MAX_LENGTH, unique=True)
views=models.IntegerField(default=0)
likes=models.IntegerField(default=0)
slug=models.SlugField(unique=True)
def save(self,*args,**kwargs):
self.slug=slugify(self.name)
super(Category,self).save(*args,**kwargs)
class Meta:
verbose_name_plural = 'Categories'
def __str__(self):
return self.name
class Page (models.Model):
TITLE_MAX_LENGTH=128
URL_MAX_LENGTH=200
category=models.ForeignKey(Category, on_delete=models.CASCADE)
title=models.CharField(max_length=TITLE_MAX_LENGTH)
url=models.URLField()
views=models.IntegerField(default=0)
def __str__(self):
return self.title
class PageAdmin (admin.ModelAdmin):
list_display=('title','category','url')
class UserProfile(models.Model):
user=models.OneToOneField(User,on_delete=models.CASCADE)
website=models.URLField(blank=True)
picture=models.ImageField(upload_to='profile_images',blank=True)
def __str__(self):
return self.user.username
|
[
"2373329b@student.gla.ac.uk"
] |
2373329b@student.gla.ac.uk
|
672b680c38cdfd95904425dd31e0b8dd9d43dae0
|
d9fb2e6c1ae4ccc8901b61d2fae787603f28755e
|
/twitter_nlp_toolkit/tweet_sentiment_classifier/models/__init__.py
|
2526ee4027af336b77fdc7c407968b14fc962460
|
[
"MIT"
] |
permissive
|
markok20/twitter-toolbox
|
1f59d92aa242a2dbec481b6e1755991d46124ef0
|
5e4b7881923394392619a5ced22857772cccb08b
|
refs/heads/master
| 2022-12-29T20:26:57.405675
| 2020-10-05T23:22:39
| 2020-10-05T23:22:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 81
|
py
|
from .bow_models import *
from .bert_models import *
from .lstm_models import *
|
[
"46228485+eschibli@users.noreply.github.com"
] |
46228485+eschibli@users.noreply.github.com
|
a0a6adbb51fbc0d45b803132e2002d4d8fe25008
|
28691ec55ebce9ec7045d12ea9675932ce12d671
|
/py2ecotect-project/branches/sandbox/py2ecotect/application/timer.py
|
66c9eba970f638ae9c19e2b45578438bed4321b8
|
[] |
no_license
|
ianclarksmith/design-automation
|
1e71315193effc0c18b4a8b41300bda6f41a3f09
|
e27cc028fe582395f4a62f06697137867bb0fc33
|
refs/heads/master
| 2020-04-22T22:28:39.385395
| 2009-10-26T02:48:37
| 2009-10-26T02:48:37
| 37,266,915
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,503
|
py
|
import py2ecotect as p2e
class Timer(object):
#===========================================================================
# Commands
#===========================================================================
def restart(self):
"""
Restarts the internal timer. The count property continues from it's
last value.
Parameter(s)
There are no parameters for this command.
"""
p2e._app.Exec("timer.restart")
def start(self):
"""
Starts the internal timer, resetting the count property to zero.
Parameter(s)
There are no parameters for this command.
"""
p2e._app.Exec("timer.start")
def stop(self):
"""
Stops the internal timer.
Parameter(s)
There are no parameters for this command.
"""
p2e._app.Exec("timer.stop")
#===========================================================================
# Properties
#===========================================================================
@apply
def count():
def fget(self):
"""
Retrieves the number of times the OnTimer(count) event has been
triggered since it was first started.
Parameter(s)
There are no parameters for this property.
Return Value(s)
Getting this property returns the following value(s).
count
The number of times the timer has triggered since it was started.
"""
val = p2e._app.Request("get.timer.count")
return p2e._util._convert_str_to_type(val, int)
def fset(self, count):
"""
Sets the counter value for the number of times the OnTimer(count) event
has been triggered since it was first started.
Parameter(s)
This property takes the following parameters.
count
The number of timer triggers to report.
"""
arg_str = p2e._util._convert_args_to_string("set.timer.count", count)
p2e._app.Exec(arg_str)
return property(**locals())
@apply
def interval():
def fget(self):
"""
Retrieves the timer interval in milliseconds. This is basically the
time gap between each calling of the OnTimer(count) event.
Parameter(s)
There are no parameters for this property.
Return Value(s)
Getting this property returns the following value(s).
msec
The number of milliseconds (thousandths of a second) between each
triggerering of the timer. Thus, a value of 1000 would mean one call
every second.
"""
val = p2e._app.Request("get.timer.interval")
return p2e._util._convert_str_to_type(val, int)
def fset(self, msec):
"""
Sets the timer interval in milliseconds between each calling of the
OnTimer(count) event.
Parameter(s)
This property takes the following parameters.
msec
The number of milliseconds (thousandths of a second) between each
triggerering of the timer. Thus, a value of 1000 would mean one call
every second. The minimum time gap you can set is 50 milliseconds
(20 times per second).
"""
arg_str = p2e._util._convert_args_to_string("set.timer.interval", msec)
p2e._app.Exec(arg_str)
return property(**locals())
@apply
def running():
def fget(self):
"""
Retrieves a value that shows whether the timmer is currently running or
not.
Parameter(s)
There are no parameters for this property.
Return Value(s)
Getting this property returns the following value(s).
running
This is a boolean value where 1 means running and 0 means stopped.
"""
val = p2e._app.Request("get.timer.running")
return p2e._util._convert_str_to_type(val, int)
def fset(self, running):
"""
Sets the status of the timer to running or not. Sending 0 or false is
the same as calling timer.stop whilst 1 or true is the same as calling
timer.restart.
Parameter(s)
This property takes the following parameters.
running
This is a boolean value where 1 or true sets the timer running and 0 or
false stops it.
"""
arg_str = p2e._util._convert_args_to_string("set.timer.running",
running)
p2e._app.Exec(arg_str)
return property(**locals())
|
[
"manikewell@d56020b2-6ac5-11de-89a9-0b20f3e2dceb"
] |
manikewell@d56020b2-6ac5-11de-89a9-0b20f3e2dceb
|
04fb4b01c64b77e2d2a2dffc669bf5135d1cad0c
|
6dbe812830c74ba1ac9d651edb01fd2708c7416a
|
/renovateproject/cases/migrations/0030_service_type.py
|
69b181216b595cd5a97631eeb52eb982225e7e78
|
[] |
no_license
|
zhengjieAdriod/myproject
|
d60f78422e9b8f2db54e96a9545a7b97ec9bfde9
|
9a5b418b15e3a7705c15815a61d0e22f9862a427
|
refs/heads/master
| 2021-01-01T18:21:14.436932
| 2017-09-25T10:04:52
| 2017-09-25T10:04:52
| 98,318,395
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 455
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-08-23 05:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cases', '0029_schemeinservice'),
]
operations = [
migrations.AddField(
model_name='service',
name='type',
field=models.CharField(blank=True, max_length=70),
),
]
|
[
"zhangxiaoqi@ichinait.com"
] |
zhangxiaoqi@ichinait.com
|
a8ae10cb9b112f6f3e8db92fe4e479f238c984c2
|
66b4f2ff918ee9d3bf0e31a6cc8a08f4920fb497
|
/tool.py
|
eb25fcb7eb1f4e06e1e7ba9c136bfd2fa59fe1a6
|
[] |
no_license
|
svonton/Project--Memorization-Tool
|
dcd7b8a8a5e81fb1f7b0b76f833ae13f48b6cb6e
|
ccb6628987655d81bd856982c5c6d3a6f0cc0d28
|
refs/heads/main
| 2023-04-24T17:25:47.616761
| 2021-05-17T16:27:38
| 2021-05-17T16:27:38
| 368,252,491
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,269
|
py
|
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy import Column, Integer, String
from sqlalchemy.orm import sessionmaker
invite_message = """
1. Add flashcards
2. Practice flashcards
3. Exit\n"""
sub_menu_message = """
1. Add a new flashcard
2. Exit\n"""
practice_menu_message = """press "y" to see the answer:
press "n" to skip:
press "u" to update:\n"""
practice_submenu_message = """press "d" to delete the flashcard:
press "e" to edit the flashcard:\n"""
learning_menu_message = """press "y" if your answer is correct:
press "n" if your answer is wrong:\n"""
possible_variants = [1, 2, 3]
engine = create_engine('sqlite:///flashcard.db?check_same_thread=False')
Base = declarative_base()
class FlashCard(Base):
__tablename__ = 'flashcard'
id = Column(Integer, primary_key=True)
question = Column(String)
answer = Column(String)
box = Column(Integer, default=0)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
def add_flashcard():
try:
user_choice = input(sub_menu_message)
if int(user_choice) in possible_variants[:2]:
if int(user_choice) == 1:
question = ''
answer = ''
while question == '':
question = input('Question:\n')
while answer == '':
answer = input('Answer:\n')
new_data = FlashCard(question=question, answer=answer)
session.add(new_data)
session.commit()
add_flashcard()
else:
return
else:
print(f'\n{user_choice} is not an option')
add_flashcard()
except ValueError:
print(f'\n{user_choice} is not an option')
add_flashcard()
def leitner_system(cntx):
choice = input(learning_menu_message)
if choice == 'y':
cntx.box += 1
if cntx.box == 3:
session.delete(cntx)
session.commit()
elif choice == 'n':
cntx.box = 0
session.commit()
else:
print(f'\n{choice} is not an option')
def practice_flashcards():
result_list = session.query(FlashCard).all()
for i in range(len(result_list)):
print(f'Question: {result_list[i].question}')
choice = input(practice_menu_message)
if choice == 'y':
print(f'Answer: {result_list[i].answer}')
leitner_system(result_list[i])
elif choice == 'n':
leitner_system(result_list[i])
elif choice == 'u':
sub_choice = input(practice_submenu_message)
if sub_choice == 'd':
session.delete(result_list[i])
session.commit()
elif sub_choice == 'e':
new_question = ''
new_answer = ''
while new_question == '':
print(f'current question: {result_list[i].question}')
new_question = input('please write a new question:\n')
while new_answer == '':
print(f'current answer: {result_list[i].answer}')
new_answer = input('please write a new answer:\n')
result_list[i].question = new_question
result_list[i].answer = new_answer
session.commit()
else:
print(f'\n{sub_choice} is not an option')
else:
print(f'\n{choice} is not an option')
return
while True:
try:
user_choice = input(invite_message)
if int(user_choice) in possible_variants:
if int(user_choice) == 1:
add_flashcard()
elif int(user_choice) == 2:
if len(session.query(FlashCard).all()) > 0:
practice_flashcards()
else:
print('There is no flashcard to practice!')
session.query(FlashCard).delete()
session.commit
else:
print('Bye!')
break
else:
print(f'\n{user_choice} is not an option')
except ValueError:
print(f'\n{user_choice} is not an option')
|
[
"56838368+svonton@users.noreply.github.com"
] |
56838368+svonton@users.noreply.github.com
|
6a089bb1e07bd373e43a4e28f81cfc6919ec446b
|
de296bcc3dda26dbaf876711c17d0bbfee342393
|
/run_configs_lstm.py
|
0108ab36a722a2f48bdf12022f5e03114f3f398d
|
[] |
no_license
|
vedhasua/tiny_emotionet
|
9d55f45256937b406e82c2257ecedc647f10e3ce
|
228aaf922833217ae2e0f159ebf933a377b07bb4
|
refs/heads/master
| 2021-05-21T01:01:46.618630
| 2020-08-31T09:24:40
| 2020-08-31T09:24:40
| 252,478,653
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,867
|
py
|
from deepemotion_keras import main
param_given = True #f
culture = 'German' #f
eval_cross = True #f
modality = 'audio' #f
get_turn_feature = True #f
uncertainty_target = False #f
invert_std = False #f
loss_unc = 'ccc_2' #f
weight_unc = 0.5 #f
balance_weight = False #f
uncertainty_weight = False #f
batch_sizes = [34] #PARAM 1 better for valence, 34 better for arousal
learning_rates = [0.001] #[0.00025,0.0005,0.001,0.002] ## PARAM
#max_num_epochs = 500 #f 500/100 (for BS 1)
first_lstm = True #f
num_cells_1 = 200 #f
num_cells_2 = 64 #f
num_cells_3 = 32 #f
num_cells_4 = 32 #f
#num_cells_1 = [200,5] #f
#num_cells_2 = [64,20] #f
#num_cells_3 = [32,30] #f
#num_cells_4 = [32,50] #f
last_lstm = False #f
batch_norm = False #f - requires a high learning rate, but no improvement
last_specific = False #f - no multi-task for the beginning
comb_smoothing = False #na
bidirectional = True #na
dropout = 0.0 #f - no big difference
final_activation = 'linear' #f - tanh does not work for CNN
loss_function = 'ccc_2' #f
shift_secs = [0.0,0.4,0.8,1.2,1.6,2.0,2.4,2.8,3.2,3.6,4.0,4.4,4.8,5.2,5.6,6.0] #PARAM - 0.05 opt for window size 0.1, uni-directional LSTM
targets_avls = ['A','V'] #f
feature_type_a = 'funcegem' #f 'mfcc' & 'funcegem' best, 'egem' works best for fusion, 'mfcccomp' worse, 'funccomp' better for valence, but bad for arousal on devel
feature_type_v = 'faus' #f 'faus+lips' have approx. the same performance
window_size = 0.5 #f
xbow_cs = 1000 #na
xbow_na = 10 #na
random_seeds = [0] ## PARAM
add_noise = False #f # not implemented
append_results_file = 'all_results_lstm.txt'
for targets_avl in targets_avls:
for shift_sec in shift_secs:
for batch_size in batch_sizes:
##
if batch_size==1:
max_num_epochs = 100
elif batch_size<10:
max_num_epochs = 250
else:
max_num_epochs = 200
##
for learning_rate in learning_rates:
for random_seed in random_seeds:
main(param_given,
culture=culture,
eval_cross=eval_cross,
modality=modality,
get_turn_feature=get_turn_feature,
uncertainty_target=uncertainty_target,
invert_std=invert_std,
loss_unc=loss_unc,
weight_unc=weight_unc,
balance_weight=balance_weight,
uncertainty_weight=uncertainty_weight,
batch_size=batch_size,
learning_rate=learning_rate,
max_num_epochs=max_num_epochs,
first_lstm=first_lstm,
num_cells_1=num_cells_1,
num_cells_2=num_cells_2,
num_cells_3=num_cells_3,
num_cells_4=num_cells_4,
last_lstm=last_lstm,
batch_norm=batch_norm,
last_specific=last_specific,
comb_smoothing=comb_smoothing,
bidirectional=bidirectional,
dropout=dropout,
final_activation=final_activation,
loss_function=loss_function,
shift_sec=shift_sec,
targets_avl=targets_avl,
feature_type_a=feature_type_a,
feature_type_v=feature_type_v,
window_size=window_size,
xbow_cs=xbow_cs,
xbow_na=xbow_na,
random_seed=random_seed,
add_noise=add_noise,
append_results_file=append_results_file)
|
[
"megonnashop@gmail.com"
] |
megonnashop@gmail.com
|
2a2073d56153bd0c9fbc7ba6d2c12d2005b41b1f
|
8e79b76bc1f67291cef9118eff4adf3f529235ba
|
/210110_졸업작품3.py
|
00304439eb05e619e5a6ba253136b503de244c13
|
[] |
no_license
|
hacks0921/2021year
|
cdac5f8218aa41d3b6fc551f2d72456ff04d5144
|
b375cf180c4f10f2c7fc41b2d5c06bb0bcb658f3
|
refs/heads/main
| 2023-06-06T17:47:20.817199
| 2021-07-08T23:16:19
| 2021-07-08T23:16:19
| 328,184,451
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,723
|
py
|
import sys
from PyQt5 import QtWidgets
from PyQt5 import uic
import os
import openpyxl
from openpyxl import Workbook
from openpyxl.drawing.image import Image
from openpyxl import load_workbook
import shutil
from PIL import Image
from PyQt5.QtCore import Qt,QThread,pyqtSignal
import time
class Form(QtWidgets.QDialog):
file_name = pyqtSignal(str)
def __init__(self, parent=None):
QtWidgets.QDialog.__init__(self, parent)
self.ui = uic.loadUi("1012_QT_UI_V0.1.ui")
self.ui.pushButton_1.clicked.connect(self.startProgressBar)
self.ui.show()
def filename(self):
a = self.ui.lineEdit_1.text()
return a
def startProgressBar(self): # 프로그래스바 시작
print("startProgressBar 시작 ")
self.thread = MyThread() # MyThread 클레스 선언
# MytThread에서 사용한 change_value라는 pyqtSignal을 받아서 연결해준다
self.thread.change_value.connect(self.setProgressVal) # change_value값이날라오면 연결한다 setProgressvVal 함수에 전달한다
self.thread.start() # 쓰레드 실행, 파일이름을 함수에 전달
global xlsx_dir # 전역변수 지정
xlsx_dir = self.ui.lineEdit_1.text() # 엑셀 파일 이름 변수에 저장
# print(xlsx_dir)
print("startProgressBar 실행 ")
def setProgressVal(self,Val): # Val값을 받아서 Progressbar를 업데이트 한다
self.ui.progressBar_1.setValue(Val) # setProgressVal 함수 선언 Val를 전달 받아서 progessbar의 Setvalu를 Val로 입력
print("setProgressVal 실행 ")
class MyThread(QThread): # Qthread 실제 실행하고자 하는 함수 !!!!
change_value = pyqtSignal(int) # 시크널을 change_value 라는 변수에 담아서 보내준다
print("MyThread 실행")
def __init__(self): # 폼 구성
super().__init__()
def run(self):
print("doaction")
# xlsx_dir = "test"
print(xlsx_dir)
try:
file_name = load_workbook("./" + xlsx_dir + ".xlsx")
worksheet = file_name._sheets[0] # sheet name or sheet number or list of sheet numbers and names
count = 0
fail = 0
for row in worksheet.iter_rows():
count += 1
img_path = row[0].value # 파일 경로 설정
if os.path.isfile(img_path):
dir_path = './' + str(row[1].value) + '_' + str(row[2].value) # 저장 경로 생성
if not os.path.exists(dir_path): # 저장 경로가 없으면 신규 생성
os.mkdir(dir_path)
name = os.path.split(row[0].value) # 파일 경로 분리
name = os.path.splitext(name[1]) # 파일 확장자 분리
save_path = dir_path + '/' + str(name[0]) + str(name[1]) # 저장 경로 설정
shutil.copy2(img_path, save_path) # 파일 경로에 있는 이미지를 해당 폴더로 이동
else:
print("FAIL : ", img_path)
fail += 1
print(count)
max_row = worksheet.max_row
percent = round(100 * (count / max_row))
self.change_value.emit(percent)
print("total iamges : {:5d}, fail images : {:5d}".format(count, fail))
print("완료")
self.ui.pushButton_1.setText('완료')
except:
print("에러")
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
w = Form()
sys.exit(app.exec())
|
[
"noreply@github.com"
] |
hacks0921.noreply@github.com
|
c46f8dae7fd09611eb31831a1cfcf7515a1fe8be
|
0726c911c1976871326aa05315b2c6203a08ca7e
|
/Part2/data_show/scatter_squares.py
|
b5d1b6ac57c85f3da9c95a2af2676492352e549d
|
[] |
no_license
|
chaojimali666/core_python
|
e40c96f86f5eb102ed0d37aa601110aae5680097
|
6dd9d070dc69431c2a81f686adb518c603555e14
|
refs/heads/master
| 2020-04-27T23:58:14.574105
| 2019-03-10T09:00:38
| 2019-03-10T09:00:38
| 174,798,577
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 474
|
py
|
import matplotlib.pyplot as plt
x_values = list(range(1,1001))
y_values = [x**2 for x in x_values]
plt.scatter(x_values,y_values,c=y_values,cmap=plt.cm.Reds,edgecolor='none',s=40)
plt.title("Square Numbers",fontsize=24)
plt.xlabel("Value",fontsize = 14)
plt.ylabel("Square of Value",fontsize = 14)
#设置刻度标记大小
plt.tick_params(axis='both',which='major',labelsize=14)
plt.axis([0,1100,0,1100000])
plt.savefig('squares_plot.png',bbox_inches='tight')
plt.show()
|
[
"wuyongqi@pku.edu.cn"
] |
wuyongqi@pku.edu.cn
|
d2452f37382def6e2f7783ebe775609f57ee6871
|
337d17b845f5fdd7f32f6a0607e494eed488a601
|
/exercism/python/allergies/allergies.py
|
94996a9f501f4e4cd5f60208ca0a0a5f52f0d875
|
[] |
no_license
|
karsibali/solutions
|
e6130abe026a26558434239cde39c6a14a9712ba
|
4ba5d7ac41fecc87491cae2c88293bd798db31fd
|
refs/heads/master
| 2020-04-29T00:13:34.168323
| 2018-12-27T15:43:26
| 2018-12-27T15:43:26
| 175,686,183
| 1
| 0
| null | 2019-03-14T19:27:00
| 2019-03-14T19:27:00
| null |
UTF-8
|
Python
| false
| false
| 520
|
py
|
class Allergies(object):
ALLERGENS = (
'eggs',
'peanuts',
'shellfish',
'strawberries',
'tomatoes',
'chocolate',
'pollen',
'cats'
)
def __init__(self, score):
self._allergens = set(
a for i, a in enumerate(self.ALLERGENS)
if 1 << i & score > 0
)
def is_allergic_to(self, allergen):
return allergen in self._allergens
@property
def lst(self):
return list(self._allergens)
|
[
"ozan.onay@gmail.com"
] |
ozan.onay@gmail.com
|
2df5454fd3d14ee509a6de1f6940893dc8473334
|
f1ee89e8ae5bcbe40602fd520df44b1ce10cfabc
|
/check.py
|
1f00caf8f7121a6d25473202b1021c205430959d
|
[
"CC0-1.0"
] |
permissive
|
fenajojuk/social-media-hacker-list
|
30400630510cd11492d335d9a6ba774a4729dac1
|
c725bead4e12c3b37b506338d4607bc9a4fb82f2
|
refs/heads/main
| 2023-06-23T22:53:39.004996
| 2021-07-27T19:13:12
| 2021-07-27T19:13:12
| 390,160,789
| 1
| 0
|
CC0-1.0
| 2021-07-28T00:04:24
| 2021-07-28T00:04:23
| null |
UTF-8
|
Python
| false
| false
| 2,711
|
py
|
"""check all urls in readme for ~OK (acceptable) response"""
import re
import sys
from json import loads
from requests import request
from urllib.parse import urlparse
from dateutil.parser import parse
from datetime import datetime
TOKEN = sys.argv[1] if len(sys.argv) > 1 else None
LIMIT = int(sys.argv[2]) if len(sys.argv) > 2 else None
PATTERN = r'(https?:\/\/(?:www\.|(?!www))[a-zA-Z0-9][a-zA-Z0-9-]+[a-z' \
r'A-Z0-9]\.[^\s)\']{2,}|www\.[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-' \
r'Z0-9]\.[^\s]{2,}|https?:\/\/(?:www\.|(?!www))[a-zA-Z0-9]+' \
r'\.[^\s)\']{2,}|www\.[a-zA-Z0-9]+\.[^\s)\']{2,})'
def path(url):
return urlparse(url).path.split('/')
def host(url, domain):
return domain in urlparse(url).netloc
def ok(status, url):
return status in [200, 403, 406] or \
(status == 502 and host(url, 'reddit.com'))
def active(latest_change):
delta = datetime.now() - latest_change.replace(tzinfo=None)
return delta.days <= 365
def check_url(url, method, retry=0):
response = request(method, url, headers={'Accept': '*/*'})
success = ok(response.status_code, url)
if success or retry > 0:
return success, response.status_code
return check_url(url, "GET", 1)
def active_repo(url):
[owner, repo] = path(url)[1:3]
api_url = f'https://api.github.com/repos/{owner}/{repo}'
headers = {'Accept': 'application/vnd.github.v3+json'}
if TOKEN:
headers['Authorization'] = f'token {TOKEN}'
response = request('GET', api_url, headers=headers)
if not ok(response.status_code, url):
return False, response.status_code
if not active(parse(loads(response.content)['updated_at'])):
return False, "INACTIVE"
return True, 200
def main():
readme = open("README.md", "r").read()
urls = list(set(re.findall(PATTERN, readme)))[0:LIMIT]
fails, total, progress = [], len(urls), 0
print(f'Checking {total} entries...')
for index, url in enumerate(urls):
is_repo = host(url, 'github.com') and len(path(url)) > 2
try:
success, code = active_repo(url) if is_repo \
else check_url(url, "HEAD")
if not success:
fails.append((code, url))
except Exception as e:
fails.append((f'error: {e}', url))
percent = (index * 100) // total
if percent % 10 == 0 and percent > progress:
print(f'...{percent} % ({len(fails)})')
progress = percent
if fails:
output = '\n'.join([f'- {m}: {u}' for m, u in fails])
print(f'{len(fails)} failure(s):\n{output}')
sys.exit(1)
print(f'no issues')
if __name__ == '__main__':
main()
|
[
"neea.rusch@mobilefirst.me"
] |
neea.rusch@mobilefirst.me
|
2dac4e05c6c0a13aed4ccf25c89ee088301f0655
|
9bd634bcb47955c598f8134e98f3b3c4ec003bad
|
/Module3/practice/02_task_Fraction.py
|
3915b28d9802919372f60a22e6c9a8b768739aae
|
[] |
no_license
|
EgorF1/SpecialistPython2_v2
|
2ee1e1e9610772bea9d9083fc4d308500c8b91aa
|
36a7fb16c2fcd9ad0d100ff86c747a32f7fb6562
|
refs/heads/master
| 2023-04-02T08:27:44.483856
| 2021-04-05T12:02:27
| 2021-04-05T12:02:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 748
|
py
|
# Задание "Простые дроби"
# Сюда отправляем задание с классом дроби
class Fraction:
def __init__(self, fract_str): # Дробь в конструктор передается в виде строки
# А мы храним дробь в виде
self.numerator = ... # числителя
self.denominator = ... # знаменатель
# целую часть перебрасываем в числитель
# минус, если он есть, тоже храним в числителе
# Примеры создания дробей:
fract1 = Fraction("3 12/15")
fract2 = Fraction("-1 2/6")
fract3 = Fraction("2/4")
fract4 = Fraction("-2/4")
|
[
"t89236238012@gmail.com"
] |
t89236238012@gmail.com
|
cee5d8fad0b945c2ff08e6e8fc785439dfb414ca
|
35f0ca560a2815561a5a69d6b5435437a5386d82
|
/카펫.py
|
f26bfeded6ac1d5ddb336bb501cf5b0a77357d10
|
[] |
no_license
|
celeist666/ProgrammersAlgo
|
bb4a6363682a754b3a488c4355e3fe1a46e47e40
|
675dd27a02b9b50e6250125f33b18692a8b065b9
|
refs/heads/master
| 2022-12-11T15:42:13.858190
| 2020-09-11T01:21:43
| 2020-09-11T01:21:43
| 283,899,230
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 282
|
py
|
# red부분의 제곱근값 이하이므로 이를 완전 탐색해서 red의 가로세로를 알아낸다.
def solution(brown, red):
for i in range(1, int(red**(1/2))+1):
if red % i == 0:
if 2*(i + red//i) == brown-4:
return [red//i+2, i+2]
|
[
"athebate@gmail.com"
] |
athebate@gmail.com
|
6e7c08a6e30b14024e358e4c72154aa2fda938c6
|
d273f5b023f7d6e7e2a1050571ebd47f661f8200
|
/general_operations/geometry_shapes/circle.py
|
aead7db6a138197648d0388cfddea8693a68d174
|
[] |
no_license
|
TiphaineL/magnet_position
|
1d756e2b9d26776116a86e1d4ec559d096300e51
|
2133828ec34cb73dda18c8bc176ab1ac2a15b6b3
|
refs/heads/master
| 2020-09-01T05:14:27.660266
| 2020-05-01T04:28:35
| 2020-05-01T04:28:35
| 218,887,872
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 920
|
py
|
import numpy as np
import matplotlib.pyplot as plt
class circle:
def __init__(self, center,radius,orientation):
self.center = center
self.radius = radius
self.orientation = orientation
def calculate_circle_all_coordinates(self):
self.x_coordinates = np.arange(self.center[0] - self.radius, self.center[0] + self.radius, self.radius / 100000)
self.y_coordinates_negative = -np.sqrt(np.abs(self.radius ** 2 - (self.x_coordinates - self.center[0]) ** 2)) + self.center[1]
self.y_coordinates_positive = np.sqrt(np.abs(self.radius ** 2 - (self.x_coordinates - self.center[0]) ** 2)) + self.center[1]
def draw_circle(self,colour):
circle.calculate_circle_all_coordinates(self)
plt.plot(self.x_coordinates, self.y_coordinates_negative,colour)
plt.plot(self.x_coordinates, self.y_coordinates_positive,colour)
plt.axis('scaled')
|
[
"lagadec.tiphaine@gmail.com"
] |
lagadec.tiphaine@gmail.com
|
3a1bed5164417b2da92caffdfc92db2b84c1a9ac
|
1b8d162160f5ab6d6a6b8940b8ab83b482abb409
|
/tests/query/test_bool.py
|
6dea261aa5c66f2089120c8aa168e698734ae55f
|
[
"Apache-2.0"
] |
permissive
|
jlinn/pylastica
|
f81e438a109dfe06adc7e9b70fdf794c5d01a53f
|
0fbf68ed3e17d665e3cdf1913444ebf1f72693dd
|
refs/heads/master
| 2020-05-19T14:07:38.794717
| 2014-07-23T23:43:00
| 2014-07-23T23:43:00
| 10,442,284
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,654
|
py
|
__author__ = 'Joe Linn'
import unittest
import pylastica
from tests.base import Base
class BoolTest(unittest.TestCase, Base):
def test_search(self):
client = self._get_client()
index = client.get_index('test')
index.create(options=True)
doc_type = index.get_doc_type('helloworld')
doc_type.add_document(pylastica.Document(1, {'id': 1, 'email': 'joe@test.com', 'username': 'joe', 'test': ['2', '3', '5']}))
doc_type.add_document(pylastica.Document(2, {'id': 2, 'email': 'bob@test.com', 'username': 'bob', 'test': ['1', '3', '6']}))
doc_type.add_document(pylastica.Document(3, {'id': 3, 'email': 'bill@test.com', 'username': 'bill', 'test': ['2', '3', '7']}))
index.refresh()
bool_query = pylastica.query.Bool()
term_query1 = pylastica.query.Term({'test': '2'})
bool_query.add_must(term_query1)
result_set = doc_type.search(bool_query)
self.assertEqual(2, len(result_set))
term_query2 = pylastica.query.Term({'test': '5'})
bool_query.add_must(term_query2)
result_set = doc_type.search(bool_query)
self.assertEqual(1, len(result_set))
term_query3 = pylastica.query.Term({'username': 'joe'})
bool_query.add_must(term_query3)
result_set = doc_type.search(bool_query)
self.assertEqual(1, len(result_set))
term_query4 = pylastica.query.Term({'username': 'bob'})
bool_query.add_must(term_query4)
result_set = doc_type.search(bool_query)
self.assertEqual(0, len(result_set))
index.delete()
if __name__ == '__main__':
unittest.main()
|
[
"joe@venturocket.com"
] |
joe@venturocket.com
|
4bd7b939b951f4344a99f0ce9271f9b5aca3f91f
|
573fa4524ea8be37d0bcc2e3195d9529235f577b
|
/uvicore/auth/authenticators/base.py
|
00e0788e542e4087320aad0db3122c3e0b19a2d4
|
[
"MIT"
] |
permissive
|
webclinic017/framework
|
72753f541f3c7f2cb13ae314785f23fb56ae5908
|
9c21b85e9e470c6d789899340332a9abd0b26ab1
|
refs/heads/master
| 2023-09-04T07:37:57.966539
| 2021-10-15T19:36:42
| 2021-10-15T19:36:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,734
|
py
|
import uvicore
from uvicore.support import module
from uvicore.support.dumper import dump, dd
from uvicore.http.request import HTTPConnection
from uvicore.contracts import UserInfo, UserProvider
from uvicore.typing import Dict, Optional, List, Tuple
from uvicore.contracts import Authenticator as AuthenticatorInterface
@uvicore.service()
class Authenticator(AuthenticatorInterface):
"""Base authenticator class"""
def __init__(self, config: Dict):
self.config = config
@property
def log(self):
return uvicore.log.name('uvicore.auth')
async def retrieve_user(self, username: str, password: str, provider: Dict, request: HTTPConnection, **kwargs) -> Optional[UserInfo]:
"""Retrieve user from User Provider backend"""
# Import user provider defined in auth config
user_provider: UserProvider = module.load(provider.module).object()
# Get user from user provider and validate password. User will be Anonymous
# if user not found, disabled or validation failed
user = await user_provider.retrieve_by_credentials(
# Require parameters
username=username,
password=password,
request=request,
# Pass in options from auth config
**provider.options,
# Pass in options from the calling authenticator
**kwargs,
)
# Do not throw error if no user or not validated here. We let the middleware handle that
return user
async def create_user(self, provider: Dict, request: HTTPConnection, **kwargs):
# Import user provider defined in auth config
user_provider: UserProvider = module.load(provider.module).object()
# Create user from user provider
# Returned user is actual backend user, NOT Auth User object
user = await user_provider.create_user(request, **kwargs)
return user
async def sync_user(self, provider: Dict, request: HTTPConnection, **kwargs):
# Import user provider defined in auth config
user_provider: UserProvider = module.load(provider.module).object()
# Create user from user provider
# Returned user is actual backend user, NOT Auth User object
user = await user_provider.sync_user(request, **kwargs)
return user
def auth_header(self, request) -> Tuple[str, str, str]:
"""Extract authorization header parts"""
authorization = request.headers.get('Authorization')
if not authorization: return (authorization, '', '')
# Partition is a bit more performant that split
scheme, _, param = authorization.partition(' ')
return authorization, scheme.lower(), param
|
[
"mail@mreschke.com"
] |
mail@mreschke.com
|
1654d27a923d58b9d627a5fe61e68b0b1a04d947
|
c065ff2a6a377aea2303b7b8482558049958a7ec
|
/toydrill/1562239654/tactile.tac
|
23ebffe0c957aeec513c0a3051c9be9817663a24
|
[] |
no_license
|
waedbara/vision2tactile
|
7bc9861eecb4247fd254ea58dc508ed18a03b1af
|
edbc9dfee61b4a4b1f0caebb2f16faef090dff32
|
refs/heads/master
| 2022-04-02T20:43:16.621687
| 2019-12-11T08:07:39
| 2019-12-11T08:07:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 290
|
tac
|
,3595,3733,3650,3703,3508,3644,3718,3552,3572,3502,3583,3725,3510,3461,3386,3379,3285,3455,2995,2841,3738,3710,3697,3568,2008,2012,1859,2110,2628,3230,3458,3318,3390,3200,3240,3268,3288,3261,2972,3211,3470,3255,3348,3351,3302,3361,3304,2866,2734,3482,3468,3470,3374,1997,1996,2863,2093,2468
|
[
"brayan.inf@gmail.com"
] |
brayan.inf@gmail.com
|
b770a1573629fd73844895566f2af2b7f90b59b2
|
5db507026cf745767195c2134b3291f17abd7a1d
|
/수업/5주차 수업내용/ball.py
|
dc6a71bae978c2b1db56e560dd51e41e1e9f171f
|
[] |
no_license
|
LEEMIREUK/2DGP
|
1430ee01a1fad4e9ea568782d2b06092a036ef05
|
7112606622377e7f5e75db753102ed70a6820325
|
refs/heads/master
| 2023-01-29T08:04:43.458759
| 2020-12-04T09:00:21
| 2020-12-04T09:00:21
| 294,226,158
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 938
|
py
|
from pico2d import *
from gobj import *
import gfw_image
class Ball:
balls = []
def __init__(self, pos, delta, big=False):
imageName = '/ball41x41.png' if big else '/ball21x21.png'
self.image = gfw_image.load(RES_DIR + imageName)
self.pos = pos
self.delta = delta
self.radius = self.image.h // 2
print('Radius = %d' % self.radius)
def draw(self):
self.image.draw(*self.pos)
def update(self):
x, y = self.pos
dx, dy = self.delta
x += dx
y += dy
gravity = 0.1
dy -= gravity
bottom = y - self.radius
if bottom < 50 and dy < 0:
dy *= rand(-0.8)
if dy <= 1:
dy = 0
if x < -100 or x > get_canvas_width() + 100:
Ball.balls.remove(self)
print("Ball count - %d" % len(Ball.balls))
self.pos = x, y
self.delta = dx, dy
|
[
"sslejds@naver.com"
] |
sslejds@naver.com
|
d1710bafd9de4a1e82dbe146ab0812f32a4d79fd
|
a2d36e471988e0fae32e9a9d559204ebb065ab7f
|
/huaweicloud-sdk-vod/huaweicloudsdkvod/v1/model/confirm_asset_upload_req.py
|
abcc2de26b8e169ce1f8a8e0688358f836ba95d3
|
[
"Apache-2.0"
] |
permissive
|
zhouxy666/huaweicloud-sdk-python-v3
|
4d878a90b8e003875fc803a61414788e5e4c2c34
|
cc6f10a53205be4cb111d3ecfef8135ea804fa15
|
refs/heads/master
| 2023-09-02T07:41:12.605394
| 2021-11-12T03:20:11
| 2021-11-12T03:20:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,796
|
py
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ConfirmAssetUploadReq:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'asset_id': 'str',
'status': 'str'
}
attribute_map = {
'asset_id': 'asset_id',
'status': 'status'
}
def __init__(self, asset_id=None, status=None):
"""ConfirmAssetUploadReq - a model defined in huaweicloud sdk"""
self._asset_id = None
self._status = None
self.discriminator = None
self.asset_id = asset_id
self.status = status
@property
def asset_id(self):
"""Gets the asset_id of this ConfirmAssetUploadReq.
媒资ID。
:return: The asset_id of this ConfirmAssetUploadReq.
:rtype: str
"""
return self._asset_id
@asset_id.setter
def asset_id(self, asset_id):
"""Sets the asset_id of this ConfirmAssetUploadReq.
媒资ID。
:param asset_id: The asset_id of this ConfirmAssetUploadReq.
:type: str
"""
self._asset_id = asset_id
@property
def status(self):
"""Gets the status of this ConfirmAssetUploadReq.
上传状态。 取值如下: - CREATED:创建成功。 - FAILED:创建失败。 - CANCELLED:创建取消。
:return: The status of this ConfirmAssetUploadReq.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ConfirmAssetUploadReq.
上传状态。 取值如下: - CREATED:创建成功。 - FAILED:创建失败。 - CANCELLED:创建取消。
:param status: The status of this ConfirmAssetUploadReq.
:type: str
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ConfirmAssetUploadReq):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
5d3d54ab106ea1896ca64c8ccc21053587f74482
|
7bd2e8be5f7c85814b65ea89c8010579177c9bd7
|
/view.py
|
3fa5d0224688faacaa25e7026136572d30b66f49
|
[] |
no_license
|
13580769346/bj18
|
e7f9f339185302d3812c40808a69acc0564fe217
|
ada9e50d9f7cff44427da9d27e59539c0b520136
|
refs/heads/master
| 2020-08-23T07:53:58.937629
| 2019-10-21T14:49:01
| 2019-10-21T14:49:01
| 216,574,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 99
|
py
|
from django.http import HttpResponse
import time
def index(request):
return HttpResponse('ok')
|
[
"495431861@qq.com"
] |
495431861@qq.com
|
209ab1d35b27d2962dc6d5a2f093da0fd655bb9a
|
3a80dc963659b662d289231f544f0ac60075e83c
|
/exercises/05_basic_scripts/task_5_3a.py
|
e8517344b7f10185db427c3223b8ba08d47a8fb6
|
[] |
no_license
|
notwhale/pyneng-examples-exercises
|
c1c6ee2d2fc935b63a7cd609900317d1ede574e7
|
8c3c997132096180f2e835e55c840a80fb513c58
|
refs/heads/master
| 2023-05-23T04:00:43.318285
| 2021-06-15T04:21:52
| 2021-06-15T04:21:52
| 287,457,974
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,593
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Задание 5.3a
Дополнить скрипт из задания 5.3 таким образом, чтобы, в зависимости от выбранного режима,
задавались разные вопросы в запросе о номере VLANа или списка VLANов:
* для access: 'Введите номер VLAN:'
* для trunk: 'Введите разрешенные VLANы:'
Ограничение: Все задания надо выполнять используя только пройденные темы.
То есть эту задачу можно решить без использования условия if и циклов for/while.
"""
access_template = [
"switchport mode access",
"switchport access vlan {}",
"switchport nonegotiate",
"spanning-tree portfast",
"spanning-tree bpduguard enable",
]
trunk_template = [
"switchport trunk encapsulation dot1q",
"switchport mode trunk",
"switchport trunk allowed vlan {}",
]
# Решение
mode = input('Введите режим работы интерфейса (access/trunk): ')
intf = input('Введите тип и номер интерфейса: ')
vlan_q = {
"trunk": 'Введите разрешенные VLANы: ',
"access": 'Введите номер VLAN: '
}
vlan = input(vlan_q[mode])
result = {
"trunk": trunk_template,
"access" : access_template
}
print('interface {}'.format(intf))
print('\n'.join(result[mode]).format(vlan))
|
[
"notwhale@gmail.com"
] |
notwhale@gmail.com
|
5f603c43da8418866b1c607b364dd64f03f63bf2
|
06a76cc0c398240c04acee0d8514f5f047c9ab9b
|
/tests/test_cleaning.py
|
da0ff654d7c513e600ee46d6c692dcff3439e6bc
|
[] |
no_license
|
1eg1on/cs_go_parsing
|
aec5ce31eb50a2b51b939bb29f4bdc1717c4577e
|
d3e3176fe615d4947cb4ca5e250358a4acb508c9
|
refs/heads/main
| 2023-08-17T11:13:00.969395
| 2021-09-24T12:14:03
| 2021-09-24T12:14:03
| 409,950,917
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,794
|
py
|
import pytest
import pandas as pd
from csgo.parser.cleaning import associate_entities, replace_entities, remove_dupes
class TestCleaning:
"""Class to test CSGO data cleaning functions"""
def test_association(self):
"""Test entity association"""
a = ["misutaaa-", "ZyW0o//", "peeter"]
b = ["misuta", "Zywoo", "peter"]
c = associate_entities(a, b)
assert c["misutaaa-"] == "misuta"
def test_lcss_metric(self):
"""Test LCSS metric"""
a = ["misutaaa-", "ZyW0o//", "peeter"]
b = ["misuta", "Zywoo", "peter"]
c = associate_entities(a, b, metric="lcss")
assert c["misutaaa-"] == "misuta"
def test_hamming_metric(self):
"""Test Hamming metric"""
a = ["misutaaa-", "ZyW0o//", "peeter"]
b = ["misuta", "Zywoo", "peter"]
c = associate_entities(a, b, metric="hamming")
assert c["misutaaa-"] == "misuta"
def test_levenshtein_metric(self):
"""Test Levenshtein metric"""
a = ["misutaaa-", "ZyW0o//", "peeter"]
b = ["misuta", "Zywoo", "peter"]
c = associate_entities(a, b, metric="levenshtein")
assert c["misutaaa-"] == "misuta"
def test_jaro_metric(self):
"""Test Jaro-Winkler metric"""
a = ["misutaaa-", "ZyW0o//", "peeter"]
b = ["misuta", "Zywoo", "peter"]
c = associate_entities(a, b, metric="jaro")
assert c["misutaaa-"] == "misuta"
def test_wrong_metric(self):
"""Tests if submitting a wrong metric raises an error."""
a = ["misutaaa-", "ZyW0o//"]
b = ["misuta", "Zywoo", "peter"]
with pytest.raises(ValueError):
associate_entities(a, b, metric="bad_metric")
def test_entity_replace(self):
"""Tests if entity replacement works for a dataframe."""
df = pd.DataFrame(
{"Person": ["sid", "peter", "joao"], "Country": ["DE", "US", "BR"]}
)
entities = {"DE": "Germany", "US": "USA", "BR": "Brazil"}
new_df = replace_entities(df, "Country", entities)
assert new_df.Country.tolist() == ["Germany", "USA", "Brazil"]
def test_entity_replace_no_col(self):
"""Tests if entity replacement fails on a non-contained column."""
df = pd.DataFrame(
{"Person": ["sid", "peter", "joao"], "Country": ["DE", "US", "BR"]}
)
entities = {"DE": "Germany", "US": "USA", "BR": "Brazil"}
with pytest.raises(ValueError):
replace_entities(df, "Countryyy", entities)
def test_remove_dupes(self):
"""Tests remove dupes"""
df = pd.DataFrame({"Person": ["peter", "peter"], "Country": ["US", "US"]})
no_dupes = remove_dupes(df, cols=["Person", "Country"])
assert no_dupes.shape[0] == 1
|
[
"maxim.komatovskiy@skoltech.ru"
] |
maxim.komatovskiy@skoltech.ru
|
4b9296cf929939a105afe6f79ff869b0a5287bbf
|
e2197a4c8502138c0cf2d8219527aa7e2b5980c6
|
/assignments/six/q2.py
|
0b0866769abc11e56792ea56aa982925a85f1a0e
|
[] |
no_license
|
sircpl/stanford-algorithms-1
|
af5efc610648fca38e8b8447670ff9d4365a568a
|
7bb9b319b52080f6ddbc64566572c1ef36c60aa8
|
refs/heads/master
| 2021-05-29T15:12:06.516409
| 2015-07-28T04:24:29
| 2015-07-28T04:24:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,491
|
py
|
__author__ = 'cman'
from utils import data
import heapq
def _rebalance(minheap, maxheap):
minlen = len(minheap)
maxlen = len(maxheap)
if minlen > maxlen:
if (minlen - maxlen) > 1:
minval = heapq.heappop(minheap)
heapq.heappush(maxheap, -minval)
else:
if (maxlen - minlen) > 1:
maxval = -heapq.heappop(maxheap)
heapq.heappush(minheap, maxval)
def medians(ints):
minheap, maxheap = [], []
if len(ints) < 2:
raise Exception('median maintenance algo only relevant for >= 2 elements')
a = ints.pop(0)
meds = [a]
b = ints.pop(0)
# add smaller to the max heap, bigger to min heap
if a > b:
meds.append(b)
heapq.heappush(minheap, a)
heapq.heappush(maxheap, -b)
else:
meds.append(a)
heapq.heappush(minheap, b)
heapq.heappush(maxheap, -a)
for i in ints:
if i < -maxheap[0]:
heapq.heappush(maxheap, -i)
else:
heapq.heappush(minheap, i)
_rebalance(minheap, maxheap)
if len(minheap) > len(maxheap):
med = minheap[0]
elif len(maxheap) > len(minheap):
med = -maxheap[0]
else:
med = min(-maxheap[0], minheap[0])
meds.append(med)
return meds
if __name__ == '__main__':
ints = data.read_ints('q2.input')
meds = medians(ints)
sum = 0
for m in meds:
sum = (sum + m) % 10000
print(sum)
|
[
"colin.p.lancaster@gmail.com"
] |
colin.p.lancaster@gmail.com
|
3887ef5ba797cc8c6afd0e1c1b5fca46c761706d
|
f5e642d33aa2cbcc147646cf0c827bc2098e7eb9
|
/python3/varaiable_with_incremental_digit.py
|
d835bed0a6f51117a4692e48903ebceef1ca3f2c
|
[] |
no_license
|
keeeeeeeeeeta/MySandbox
|
9d194e5656d53df997b6266d5e3cd2e1062eec59
|
65fbb1f5e185d636819b3be20dcf5bb33aecee73
|
refs/heads/master
| 2020-03-27T07:18:53.859242
| 2018-08-26T13:17:03
| 2018-08-26T13:17:03
| 146,180,928
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 829
|
py
|
ans_candidate_list = ["cat",
"dog",
"bird",
"penguin",
]
num_answers = len(ans_candidate_list)
#num_answers = 4
#ans_candidate_0 = "cat"
#ans_candidate_1 = "dog"
#ans_candidate_2 = "bird"
#ans_candidate_3 = "penguin"
#ans_candidate_list = list("" * num_answers)
ans_candidate_list = [""] * num_answers
#print(ans_candidate_list)
#print("length of that is ...\n", len(ans_candidate_list))
for i in range(0, num_answers):
ans_candidate_list = ans_candidate_$i
#ans_candidate_list = [ans_candidate_0,
# ans_candidate_1,
# ans_candidate_2,
# ans_candidate_3
# ]
#
#target_answer = random.randint(0, num_answers)
#hangman(ans_candidate_list[target_answer])
|
[
"megalomania_12@hotmail.com"
] |
megalomania_12@hotmail.com
|
88c4468df4f2f59a62715b68ba38cca6fbdcf5cc
|
7bbcfd9a8ba15476cc1d186587120d46ffe08009
|
/TrdVwSpider/spiders/TradingView.py
|
e96d7790f27bd6233848947a2925b55236b0ef1d
|
[] |
no_license
|
rasoulkhaksari/Async_Scrap_Websocket
|
a8ffc30ed4e34eea01bf0131106704536a2ab4ee
|
8eb693ed695d5b7872e468ae51e3ebd4841db9d4
|
refs/heads/main
| 2023-07-16T00:37:50.204952
| 2021-08-27T00:24:10
| 2021-08-27T00:24:10
| 400,337,929
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 855
|
py
|
import scrapy
class TradingViewSpider(scrapy.Spider):
name = 'tradingviewspider'
start_urls=['https://www.tradingview.com/markets/cryptocurrencies/prices-all/']
def parse(self, response):
for tr in response.css("#js-screener-container table tbody tr"):
yield {
'Title': tr.css("td:nth-child(1) a::text").get().strip(),
'Mkt_Cap': tr.css("td:nth-child(2)::text").get(),
'FD_Mkt_Cap': tr.css("td:nth-child(3)::text").get(),
'LAST': tr.css("td:nth-child(4)::text").get(),
'Avail_Coins': tr.css("td:nth-child(5)::text").get(),
'Total_Coins': tr.css("td:nth-child(6)::text").get(),
'Traded_Vol': tr.css("td:nth-child(7)::text").get(),
'Chg': tr.css("td:nth-child(8)::text").get(),
}
|
[
"rasoulkhaksari@gmail.com"
] |
rasoulkhaksari@gmail.com
|
cb520c6f7226e6d93db405f5b980098484172815
|
fa05f4126ab47814159936e9e686dfd9519a1302
|
/MovieListProject/urls.py
|
626ebb48f844c22161c365f7d1fdcabb810d08e2
|
[] |
no_license
|
paridev/MoviesList
|
4192aaaa724bae348c7aed5efd2881fcb31205fd
|
2dab6f5ef609d48235cb7676f84020a1f4820e39
|
refs/heads/master
| 2022-12-30T12:34:08.773162
| 2020-10-04T20:36:43
| 2020-10-04T20:36:43
| 301,220,721
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 811
|
py
|
"""MovieListProject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('MovieListApp.urls')),
]
|
[
"pari.sha.dev@gmail.com"
] |
pari.sha.dev@gmail.com
|
bf27f64704824bf3d156ec80e7c388c25163aad4
|
54b5dc2323a5166ab0a61acaa01794f9b14cb1df
|
/brute.py
|
a0abc02d9ca2d25dd10575f1c4fce49b082a3c34
|
[
"MIT"
] |
permissive
|
MikeWent/cf-hash-bruteforce
|
b7a2e46c7b79ad486611997779af0bf73bec11ed
|
d98131596c584fac16b670696548bb78b8895656
|
refs/heads/master
| 2021-09-02T09:33:37.686439
| 2018-01-01T13:38:07
| 2018-01-01T13:38:07
| 105,568,596
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,254
|
py
|
#!/usr/bin/env python3
import hashlib
class COLORS:
HEADER = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
ORANGE = '\033[93m'
RED = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def get_hashes(text):
text = text.encode('utf-8')
md5 = "MD5: " + hashlib.md5(text).hexdigest()
sha1 = "SHA1: " + hashlib.sha1(text).hexdigest()
sha256 = "SHA256: " + hashlib.sha256(text).hexdigest()
return md5, sha1, sha256
def number_generator(start, end):
n = start
while n <= end:
yield n
n += 1
PHRASE = input('Enter word to bruteforce: '+COLORS.BOLD)
print(COLORS.ENDC, end='')
RAW_CYCLES = input('Set maximum bruteforce cycles [500]: '+COLORS.BOLD)
if RAW_CYCLES == '':
CYCLES = 500
else:
try:
CYCLES = int(RAW_CYCLES)
except:
print(COLORS.RED+'Incorrect value!'+COLORS.ENDC)
exit()
print(COLORS.ENDC+'---')
for n in number_generator(0, CYCLES):
variant = PHRASE + str(n)
hashes = get_hashes(variant)
for hashstr in hashes:
if hashstr.endswith('cf'):
print(COLORS.BOLD+COLORS.GREEN+variant+COLORS.ENDC)
print(hashstr)
print('---')
print(COLORS.BLUE+'Done!'+COLORS.ENDC)
|
[
"git@meew.me"
] |
git@meew.me
|
fc1cd42e2c9a348ff35531e0e0e89433005fd76a
|
c2499b131c1936107638555eb524c733962455ec
|
/testcases/test_case_1822.sikuli/test_case_1822.py
|
b4fa613cc0a8da2613e79fed5f61163f67bee522
|
[] |
no_license
|
liupeng330/mac_automation
|
b9fbfcd1ea53691d22951ce82de30f0dace4b09e
|
a53f8a12d545f06fecb5bfae6d4cc3aab68bc2df
|
refs/heads/master
| 2021-01-20T09:01:52.383478
| 2015-07-28T10:16:39
| 2015-07-28T10:16:39
| 39,117,004
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,331
|
py
|
import os
from nose.plugins.attrib import attr
import time
import helper
from sikuli import *
from global_config import *
from controls.sign_in_control import *
from controls.rt_control import *
from controls.system_tray_control import *
import ops.operations as op
from base import *
class TestCase_1822(TestCase_Base):
""" Social Info: Like or unlike media in Shared with me """
def setUp(self):
try:
TestCase_Base.setUp(self)
log("Start to verify if RT is signed in or not")
if self.RT.is_sign_in:
log("RT has been signed in already, start to sign out")
self.RT.sign_out()
op.launch_RT_before_running_case()
self.RT.remove_all_from_cloud_view()
# test clip
test_name = "Download.mp4"
test_case_path = os.path.join(
test_content_path, "original", test_name)
assert os.path.isfile(test_case_path), "The media file doesn't exist in '" + test_case_path + "'"
# upload clip to cloud
assert helper.upload_video_via_API(
test_case_path), "Fail to upload test clip '" + self.test_name + "' via API"
assert_step(self.RT.switch_to_all_view())
# share the clip to account_username2
assert_step(self.RT.share_media_in_library_view(["Download"]))
op.switch_to_account2()
log("after sign in")
except:
TestCase_Base.tearDown(self)
raise
@attr('BVT')
def test_like_unlike_media(self):
assert_step(self.RT.switch_to_shared_with_me_view())
# verify the shared item is in the view
assert self.RT.does_exist_in_library(new_shared_media_item, "Download", default_wait_time), \
"The shared item doesn't exist in 'Share with me' view"
# like the shared album
log("Like a shared media")
assert_step(self.RT.like_media(new_shared_media_item))
# exit the gallery mode
log("Cloud gallery view")
self.RT.close_gallery_view()
time.sleep(2)
# unlike the same album
log("Unlike the shared media")
assert_step(self.RT.unlike_media(shared_media_item))
def tearDown(self):
TestCase_Base.tearDown(self)
|
[
"330liupeng@gmail.com"
] |
330liupeng@gmail.com
|
a5dfde187c82574476f24c98be4e7986b5c80b60
|
9f51f10153db959a9d57e98f51b69a1ec0b8a57f
|
/data/SegmentationDataset.py
|
7c0e922868b51a8e725beb8f23842f26db099766
|
[] |
no_license
|
StanfordDataScience/dssg_gsv
|
3b2b71feac57ba91ae94c7c6adc7bd02f8a8f39a
|
f64c688d7343a7c6606d5a1a4a894b26dc47791e
|
refs/heads/main
| 2023-07-27T19:47:26.072137
| 2021-09-10T20:55:23
| 2021-09-10T20:55:23
| 376,937,000
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,274
|
py
|
"""SegmentationDataset.py
-------------------------------------------------------------------------
Created by: Shubhang Desai
Date created: April 2021
Last revised: June 15, 2021
Project: GSV
Subproject: ml-buildings
-------------------------------------------------------------------------
Abstraction for a Cityscape dataset to be used for pre-training.
"""
import torch
from torch.utils.data import Dataset
from torchvision import transforms
from PIL import Image, ImageDraw
import pandas as pd
import numpy as np
import os, json
labels = ['rider', 'persongroup', 'motorcycle', 'traffic sign', 'road', 'car', 'trailer', 'wall', 'license plate', 'bicyclegroup', 'motorcyclegroup', 'ridergroup', 'pole', 'vegetation', 'ground', 'ego vehicle', 'out of roi', 'rectification border', 'sidewalk', 'train', 'person', 'polegroup', 'bridge', 'caravan', 'bus', 'dynamic', 'truckgroup', 'rail track', 'guard rail', 'sky', 'tunnel', 'bicycle', 'building', 'terrain', 'cargroup', 'truck', 'traffic light', 'fence', 'parking', 'static']
class SegmentationDataset(Dataset):
def __init__(self, img_dir, setname):
"""
Initializes dataset of segmentation images/masks
Paramters
---------
img_dir : str
directories which contains images in `imgs/` subdir and `[setname].csv` for labels
setname : str
one of ['train', 'val', 'test']
"""
city_folders = [os.path.join(img_dir, 'imgs', setname, city) for city in os.listdir(os.path.join(img_dir, 'imgs', setname))]
self.images = []
for city_folder in city_folders: self.images.extend(os.path.join(city_folder, image_name) for image_name in os.listdir(city_folder))
self.masks = [path.replace('imgs', 'masks').replace('leftImg8bit.png', 'gtFine_polygons.json') for path in self.images]
self.transform = {
'train': transforms.Compose([
#transforms.RandomResizedCrop(224),
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(brightness=.05),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}[setname]
def __len__(self):
return len(self.images)
def __getitem__(self, i):
image = Image.open(self.images[i])
image = self.transform(image)
mask_data = json.load(open(self.masks[i], 'r'))
mask = Image.new('RGB', (mask_data['imgWidth'], mask_data['imgHeight']))
draw = ImageDraw.Draw(mask)
for obj in mask_data['objects']: draw.polygon([tuple(coord) for coord in obj['polygon']], fill=(labels.index(obj['label']), 0, 0))
assert mask.size == (2048, 1024), 'Expected (2048, 1024), got ' + str(mask.size)
mask = np.array(mask.resize((512, 256)))[16:16+224, 144:144+224, 0]
return image, torch.Tensor(mask).long()
|
[
"acf67@cornell.edu"
] |
acf67@cornell.edu
|
9809a6eed241dc36cf145acf485b56a0c367148e
|
077dbaa15e31d0fab8e26cda8e59b0582a13a11b
|
/bios/urls.py
|
317886f62c6e735324bf95653b2c56d182b3dc29
|
[] |
no_license
|
thurloat/results
|
ad0794056f7bf215415935cb9db569ad5dcdeb61
|
183f6ef86ccc01242d1da98bc05d2f8bf17a2303
|
refs/heads/master
| 2021-01-19T04:52:20.688810
| 2009-08-14T14:36:57
| 2009-08-14T14:36:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 928
|
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
urls.py
Created by Adam on 2009-07-02.
Copyright (c) 2009 __MyCompanyName__. All rights reserved.
"""
from django.conf.urls.defaults import *
urlpatterns = patterns('bios.views',
(r'^$', 'show_bios_overview_mobile'),
(r'^img/(?P<id>.+)/$', 'image_view'),
(r'^flag/(?P<id>.+)/$', 'flag_view'),
(r'^athletes/(?P<country>[A-Z]{3})/$', 'show_athletes_all_country'),
(r'^athletes/(?P<country>[A-Z]{3})/(?P<crewNum>.+)/$', 'show_athletes_country_crew'),
(r'^athlete/(?P<identifier>.+)/$', 'show_athlete'),
(r'^upload/$', 'bio_upload'),
(r'^crew/(?P<key>.+)/$', 'show_crew'),
(r'^purgec$', 'bio_delete_country'),
(r'^purgea$', 'bio_delete_athlete'),
(r'^purgecr$', 'bio_delete_crew'),
)
|
[
"adam@Thurloat.local"
] |
adam@Thurloat.local
|
be242b756ac86cf439b6ea3566fdcc7c02f809ee
|
b51dae034662f95d34b79a3f89e234d077443433
|
/Codes/11_Modules&Packages.py
|
5ce00156307424652b0a6cd79f36837e4ca522be
|
[] |
no_license
|
busratican/PYTHON
|
555f6dfeb86e4503a6ec465670d87355a63bb533
|
a1265b92c4ffd0e4b90ef43186497b9b29c1ecba
|
refs/heads/master
| 2020-03-19T12:17:26.211510
| 2018-06-24T10:58:05
| 2018-06-24T10:58:05
| 136,508,317
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 381
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 6 21:02:48 2018
@author: Busra
"""
#Modules and Packages
#module:piece of software that has specific functionality.Each module is a different file.
#for example,lets we module math operation like addition and substraction:
#MODULES
#.../math
#../math/addition.py
#../math/substraction.py
#Please look at the math folder.
|
[
"busragul1022@gmail.com"
] |
busragul1022@gmail.com
|
6271b04087343064f71998a629e765387f373ed3
|
d0a84d97aaa8dcc2dff4a6b33ce98dee6d474496
|
/com.CheckProofing/Test_w_04_Palette_PO_T4_LastChance/Utility_Page.py
|
76ab669f775bbd10411bd8701874ad2512ed09ac
|
[] |
no_license
|
ahmed-test001/python
|
21a27248c4571a13c0ed4dccab256aede1beea3a
|
eab59b9a54fae1a51fbc18c391599eb3b0e28b3d
|
refs/heads/master
| 2023-03-10T21:00:54.634028
| 2021-02-27T05:31:58
| 2021-02-27T05:31:58
| 342,778,794
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 955
|
py
|
import glob
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__),".."))
class utilityPage:
unique_list = []
def write_Category_URL(self):
path = 'C:/Users/a.ferdous.CORP/PycharmProjects/com.CheckProofing/Test_w_04_Palette_PO_T4_LastChance/creative/*.htm'
with open('../TextFolder_Unique_URL/UniqueList_2.txt',"w")as f:
files = glob.glob(path)
for x in files:
# if "DD" in x:
self.unique_list.append(x)
someline = x + '\n'
f.writelines(someline)
print(someline)
def total_Count_URL(self):
count=0
with open('../TextFolder_Unique_URL/UniqueList_2.txt')as f:
for x in f:
count += 1
print("Total Number of URL: ", count)
if __name__ == '__main__':
util = utilityPage()
util.write_Category_URL()
util.total_Count_URL()
|
[
"ahmedu.ferdous@gmail.com"
] |
ahmedu.ferdous@gmail.com
|
b4fcabcc52d7108677b0248e2da7f62e36253e79
|
89e79c0a3f33de5fc03eec13c3346131b447a748
|
/searchAgents.py
|
95fa7a2539c5853b31eedb3efd29133b02fa9412
|
[] |
no_license
|
gutorsantos/pacman-berkeley
|
09ec7c8be4a6a9ef3b0a901a8167fab867cf0276
|
df13c0b83ed06bffa794fa7271e1eef94d5a82bc
|
refs/heads/master
| 2023-07-08T09:44:30.715966
| 2021-08-12T00:32:35
| 2021-08-12T00:32:35
| 395,149,411
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,793
|
py
|
# searchAgents.py
# ---------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
"""
This file contains all of the agents that can be selected to control Pacman. To
select an agent, use the '-p' option when running pacman.py. Arguments can be
passed to your agent using '-a'. For example, to load a SearchAgent that uses
depth first search (dfs), run the following command:
> python pacman.py -p SearchAgent -a fn=depthFirstSearch
Commands to invoke other search strategies can be found in the project
description.
Please only change the parts of the file you are asked to. Look for the lines
that say
"*** YOUR CODE HERE ***"
The parts you fill in start about 3/4 of the way down. Follow the project
description for details.
Good luck and happy searching!
"""
from game import Directions
from game import Agent
from game import Actions
import util
import time
import search
class GoWestAgent(Agent):
"An agent that goes West until it can't."
def getAction(self, state):
"The agent receives a GameState (defined in pacman.py)."
if Directions.WEST in state.getLegalPacmanActions():
return Directions.WEST
else:
return Directions.STOP
#######################################################
# This portion is written for you, but will only work #
# after you fill in parts of search.py #
#######################################################
class SearchAgent(Agent):
"""
This very general search agent finds a path using a supplied search
algorithm for a supplied search problem, then returns actions to follow that
path.
As a default, this agent runs DFS on a PositionSearchProblem to find
location (1,1)
Options for fn include:
depthFirstSearch or dfs
breadthFirstSearch or bfs
Note: You should NOT change any code in SearchAgent
"""
def __init__(self, fn='depthFirstSearch', prob='PositionSearchProblem', heuristic='nullHeuristic'):
# Warning: some advanced Python magic is employed below to find the right functions and problems
# Get the search function from the name and heuristic
if fn not in dir(search):
raise AttributeError, fn + ' is not a search function in search.py.'
func = getattr(search, fn)
if 'heuristic' not in func.func_code.co_varnames:
print('[SearchAgent] using function ' + fn)
self.searchFunction = func
else:
if heuristic in globals().keys():
heur = globals()[heuristic]
elif heuristic in dir(search):
heur = getattr(search, heuristic)
else:
raise AttributeError, heuristic + ' is not a function in searchAgents.py or search.py.'
print('[SearchAgent] using function %s and heuristic %s' % (fn, heuristic))
# Note: this bit of Python trickery combines the search algorithm and the heuristic
self.searchFunction = lambda x: func(x, heuristic=heur)
# Get the search problem type from the name
if prob not in globals().keys() or not prob.endswith('Problem'):
raise AttributeError, prob + ' is not a search problem type in SearchAgents.py.'
self.searchType = globals()[prob]
print('[SearchAgent] using problem type ' + prob)
def registerInitialState(self, state):
"""
This is the first time that the agent sees the layout of the game
board. Here, we choose a path to the goal. In this phase, the agent
should compute the path to the goal and store it in a local variable.
All of the work is done in this method!
state: a GameState object (pacman.py)
"""
if self.searchFunction == None: raise Exception, "No search function provided for SearchAgent"
starttime = time.time()
problem = self.searchType(state) # Makes a new search problem
self.actions = self.searchFunction(problem) # Find a path
totalCost = problem.getCostOfActions(self.actions)
print('Path found with total cost of %d in %.1f seconds' % (totalCost, time.time() - starttime))
if '_expanded' in dir(problem): print('Search nodes expanded: %d' % problem._expanded)
def getAction(self, state):
"""
Returns the next action in the path chosen earlier (in
registerInitialState). Return Directions.STOP if there is no further
action to take.
state: a GameState object (pacman.py)
"""
if 'actionIndex' not in dir(self): self.actionIndex = 0
i = self.actionIndex
self.actionIndex += 1
if i < len(self.actions):
return self.actions[i]
else:
return Directions.STOP
class PositionSearchProblem(search.SearchProblem):
"""
A search problem defines the state space, start state, goal test, successor
function and cost function. This search problem can be used to find paths
to a particular point on the pacman board.
The state space consists of (x,y) positions in a pacman game.
Note: this search problem is fully specified; you should NOT change it.
"""
def __init__(self, gameState, costFn = lambda x: 1, goal=(1,1), start=None, warn=True, visualize=True):
"""
Stores the start and goal.
gameState: A GameState object (pacman.py)
costFn: A function from a search state (tuple) to a non-negative number
goal: A position in the gameState
"""
self.walls = gameState.getWalls()
self.startState = gameState.getPacmanPosition()
if start != None: self.startState = start
self.goal = goal
self.costFn = costFn
self.visualize = visualize
if warn and (gameState.getNumFood() != 1 or not gameState.hasFood(*goal)):
print 'Warning: this does not look like a regular search maze'
# For display purposes
self._visited, self._visitedlist, self._expanded = {}, [], 0 # DO NOT CHANGE
def getStartState(self):
return self.startState
def isGoalState(self, state):
isGoal = state == self.goal
# For display purposes only
if isGoal and self.visualize:
self._visitedlist.append(state)
import __main__
if '_display' in dir(__main__):
if 'drawExpandedCells' in dir(__main__._display): #@UndefinedVariable
__main__._display.drawExpandedCells(self._visitedlist) #@UndefinedVariable
return isGoal
def getSuccessors(self, state):
"""
Returns successor states, the actions they require, and a cost of 1.
As noted in search.py:
For a given state, this should return a list of triples,
(successor, action, stepCost), where 'successor' is a
successor to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that successor
"""
successors = []
for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
x,y = state
dx, dy = Actions.directionToVector(action)
nextx, nexty = int(x + dx), int(y + dy)
if not self.walls[nextx][nexty]:
nextState = (nextx, nexty)
cost = self.costFn(nextState)
successors.append( ( nextState, action, cost) )
# Bookkeeping for display purposes
self._expanded += 1 # DO NOT CHANGE
if state not in self._visited:
self._visited[state] = True
self._visitedlist.append(state)
return successors
def getCostOfActions(self, actions):
"""
Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999.
"""
if actions == None: return 999999
x,y= self.getStartState()
cost = 0
for action in actions:
# Check figure out the next state and see whether its' legal
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]: return 999999
cost += self.costFn((x,y))
return cost
class StayEastSearchAgent(SearchAgent):
"""
An agent for position search with a cost function that penalizes being in
positions on the West side of the board.
The cost function for stepping into a position (x,y) is 1/2^x.
"""
def __init__(self):
self.searchFunction = search.uniformCostSearch
costFn = lambda pos: .5 ** pos[0]
self.searchType = lambda state: PositionSearchProblem(state, costFn, (1, 1), None, False)
class StayWestSearchAgent(SearchAgent):
"""
An agent for position search with a cost function that penalizes being in
positions on the East side of the board.
The cost function for stepping into a position (x,y) is 2^x.
"""
def __init__(self):
self.searchFunction = search.uniformCostSearch
costFn = lambda pos: 2 ** pos[0]
self.searchType = lambda state: PositionSearchProblem(state, costFn)
def manhattanHeuristic(position, problem, info={}):
"The Manhattan distance heuristic for a PositionSearchProblem"
xy1 = position
xy2 = problem.goal
return abs(xy1[0] - xy2[0]) + abs(xy1[1] - xy2[1])
def euclideanHeuristic(position, problem, info={}):
"The Euclidean distance heuristic for a PositionSearchProblem"
xy1 = position
xy2 = problem.goal
return ( (xy1[0] - xy2[0]) ** 2 + (xy1[1] - xy2[1]) ** 2 ) ** 0.5
#####################################################
# This portion is incomplete. Time to write code! #
#####################################################
class CornersProblem(search.SearchProblem):
"""
This search problem finds paths through all four corners of a layout.
You must select a suitable state space and successor function
"""
def __init__(self, startingGameState):
"""
Stores the walls, pacman's starting position and corners.
"""
self.walls = startingGameState.getWalls()
self.startingPosition = startingGameState.getPacmanPosition()
top, right = self.walls.height-2, self.walls.width-2
self.corners = ((1,1), (1,top), (right, 1), (right, top))
for corner in self.corners:
if not startingGameState.hasFood(*corner):
print 'Warning: no food in corner ' + str(corner)
self._expanded = 0 # DO NOT CHANGE; Number of search nodes expanded
# Please add any code here which you would like to use
# in initializing the problem
"*** YOUR CODE HERE ***"
def getStartState(self):
"""
Returns the start state (in your state space, not the full Pacman state
space)
"""
"*** YOUR CODE HERE ***"
return (self.startingPosition, [])
def isGoalState(self, state):
"""
Returns whether this search state is a goal state of the problem.
"""
position = state[0]
visited = state[1]
if(position in self.corners):
if(not position in visited):
visited += [position]
if(len(visited) == 4):
return True
return False
def getSuccessors(self, state):
"""
Returns successor states, the actions they require, and a cost of 1.
As noted in search.py:
For a given state, this should return a list of triples, (successor,
action, stepCost), where 'successor' is a successor to the current
state, 'action' is the action required to get there, and 'stepCost'
is the incremental cost of expanding to that successor
"""
successors = []
x,y = state[0]
visited = state[1]
for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
dx, dy = Actions.directionToVector(action)
nextx, nexty = int(x + dx), int(y + dy)
hitsWall = self.walls[nextx][nexty]
if(not hitsWall):
l = list(visited)
nextState = (nextx, nexty)
cost = 1
if nextState in self.corners:
if (not nextState in l):
l.append(nextState)
successors.append(((nextState, l), action, cost))
self._expanded += 1 # DO NOT CHANGE
return successors
def getCostOfActions(self, actions):
"""
Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999. This is implemented for you.
"""
if actions == None: return 999999
x,y= self.startingPosition
for action in actions:
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]: return 999999
return len(actions)
def cornersHeuristic(state, problem):
"""
A heuristic for the CornersProblem that you defined.
state: The current search state
(a data structure you chose in your search problem)
problem: The CornersProblem instance for this layout.
This function should always return a number that is a lower bound on the
shortest path from the state to a goal of the problem; i.e. it should be
admissible (as well as consistent).
"""
from util import manhattanDistance
corners = problem.corners # These are the corner coordinates
walls = problem.walls # These are the walls of the maze, as a Grid (game.py)
position = state[0]
visited = state[1]
unvisited = []
if(problem.isGoalState(position)):
return 0
for c in corners:
if(c not in visited):
unvisited.append(c)
h = 0
distances = []
for u in unvisited:
d = manhattanDistance(position, u)
distances.append(d)
h = max(distances)
return h
class AStarCornersAgent(SearchAgent):
"A SearchAgent for FoodSearchProblem using A* and your foodHeuristic"
def __init__(self):
self.searchFunction = lambda prob: search.aStarSearch(prob, cornersHeuristic)
self.searchType = CornersProblem
class FoodSearchProblem:
"""
A search problem associated with finding the a path that collects all of the
food (dots) in a Pacman game.
A search state in this problem is a tuple ( pacmanPosition, foodGrid ) where
pacmanPosition: a tuple (x,y) of integers specifying Pacman's position
foodGrid: a Grid (see game.py) of either True or False, specifying remaining food
"""
def __init__(self, startingGameState):
self.start = (startingGameState.getPacmanPosition(), startingGameState.getFood())
self.walls = startingGameState.getWalls()
self.startingGameState = startingGameState
self._expanded = 0 # DO NOT CHANGE
self.heuristicInfo = {} # A dictionary for the heuristic to store information
def getStartState(self):
return self.start
def isGoalState(self, state):
return state[1].count() == 0
def getSuccessors(self, state):
"Returns successor states, the actions they require, and a cost of 1."
successors = []
self._expanded += 1 # DO NOT CHANGE
for direction in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
x,y = state[0]
dx, dy = Actions.directionToVector(direction)
nextx, nexty = int(x + dx), int(y + dy)
if not self.walls[nextx][nexty]:
nextFood = state[1].copy()
nextFood[nextx][nexty] = False
successors.append( ( ((nextx, nexty), nextFood), direction, 1) )
return successors
def getCostOfActions(self, actions):
"""Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999"""
x,y= self.getStartState()[0]
cost = 0
for action in actions:
# figure out the next state and see whether it's legal
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]:
return 999999
cost += 1
return cost
class AStarFoodSearchAgent(SearchAgent):
"A SearchAgent for FoodSearchProblem using A* and your foodHeuristic"
def __init__(self):
self.searchFunction = lambda prob: search.aStarSearch(prob, foodHeuristic)
self.searchType = FoodSearchProblem
def foodHeuristic(state, problem):
"""
Your heuristic for the FoodSearchProblem goes here.
This heuristic must be consistent to ensure correctness. First, try to come
up with an admissible heuristic; almost all admissible heuristics will be
consistent as well.
If using A* ever finds a solution that is worse uniform cost search finds,
your heuristic is *not* consistent, and probably not admissible! On the
other hand, inadmissible or inconsistent heuristics may find optimal
solutions, so be careful.
The state is a tuple ( pacmanPosition, foodGrid ) where foodGrid is a Grid
(see game.py) of either True or False. You can call foodGrid.asList() to get
a list of food coordinates instead.
If you want access to info like walls, capsules, etc., you can query the
problem. For example, problem.walls gives you a Grid of where the walls
are.
If you want to *store* information to be reused in other calls to the
heuristic, there is a dictionary called problem.heuristicInfo that you can
use. For example, if you only want to count the walls once and store that
value, try: problem.heuristicInfo['wallCount'] = problem.walls.count()
Subsequent calls to this heuristic can access
problem.heuristicInfo['wallCount']
"""
position, foodGrid = state
food_list = foodGrid.asList()
distances = []
if(len(food_list) == 0):
return 0
for f in food_list:
distances.append(mazeDistance(position, f, problem.startingGameState))
return max(distances)
# food_list = foodGrid.asList()
# distances = []
# if(len(food_list) == 0):
# return 0
# for f in food_list:
# k = position + f
# if (k in problem.heuristicInfo):
# distances.append(problem.heuristicInfo[k])
# else:
# problem.heuristicInfo[k] = mazeDistance(position, f, problem.startingGameState)
# if(len(distances) > 0):
# return max(distances)
# else:
# return 0
class ClosestDotSearchAgent(SearchAgent):
"Search for all food using a sequence of searches"
def registerInitialState(self, state):
self.actions = []
currentState = state
while(currentState.getFood().count() > 0):
nextPathSegment = self.findPathToClosestDot(currentState) # The missing piece
self.actions += nextPathSegment
for action in nextPathSegment:
legal = currentState.getLegalActions()
if action not in legal:
t = (str(action), str(currentState))
raise Exception, 'findPathToClosestDot returned an illegal move: %s!\n%s' % t
currentState = currentState.generateSuccessor(0, action)
self.actionIndex = 0
print 'Path found with cost %d.' % len(self.actions)
def findPathToClosestDot(self, gameState):
"""
Returns a path (a list of actions) to the closest dot, starting from
gameState.
"""
from search import breadthFirstSearch
# Here are some useful elements of the startState
startPosition = gameState.getPacmanPosition()
food = gameState.getFood()
walls = gameState.getWalls()
problem = AnyFoodSearchProblem(gameState)
return breadthFirstSearch(problem)
class AnyFoodSearchProblem(PositionSearchProblem):
"""
A search problem for finding a path to any food.
This search problem is just like the PositionSearchProblem, but has a
different goal test, which you need to fill in below. The state space and
successor function do not need to be changed.
The class definition above, AnyFoodSearchProblem(PositionSearchProblem),
inherits the methods of the PositionSearchProblem.
You can use this search problem to help you fill in the findPathToClosestDot
method.
"""
def __init__(self, gameState):
"Stores information from the gameState. You don't need to change this."
# Store the food for later reference
self.food = gameState.getFood()
# Store info for the PositionSearchProblem (no need to change this)
self.walls = gameState.getWalls()
self.startState = gameState.getPacmanPosition()
self.costFn = lambda x: 1
self._visited, self._visitedlist, self._expanded = {}, [], 0 # DO NOT CHANGE
def isGoalState(self, state):
"""
The state is Pacman's position. Fill this in with a goal test that will
complete the problem definition.
"""
x,y = state
return state in self.food.asList()
def mazeDistance(point1, point2, gameState):
"""
Returns the maze distance between any two points, using the search functions
you have already built. The gameState can be any game state -- Pacman's
position in that state is ignored.
Example usage: mazeDistance( (2,4), (5,6), gameState)
This might be a useful helper function for your ApproximateSearchAgent.
"""
x1, y1 = point1
x2, y2 = point2
walls = gameState.getWalls()
assert not walls[x1][y1], 'point1 is a wall: ' + str(point1)
assert not walls[x2][y2], 'point2 is a wall: ' + str(point2)
prob = PositionSearchProblem(gameState, start=point1, goal=point2, warn=False, visualize=False)
return len(search.bfs(prob))
|
[
"gustavo.r.santos@outlook.com"
] |
gustavo.r.santos@outlook.com
|
683c47aa2f3a2e18a964daa205fbf11433d07209
|
7e3d11d117aa42a729af2775aab66d9466a9b390
|
/stock_class(starterFile).py
|
8c7397ac1a55aaa9cd6e40aa0226129e27b4f468
|
[] |
no_license
|
ge8matt/CEIS150-Course-Project
|
87bb9562e1cb875f2c5e1eb2de4554121a9f0290
|
676b8ca7731e7e51d344502c065d5a8989e6dac2
|
refs/heads/main
| 2023-08-14T22:16:31.214000
| 2021-10-06T02:46:28
| 2021-10-06T02:46:28
| 402,296,925
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,012
|
py
|
# Summary: This module contains the class definitions that will be used in the stock analysis program
# Author:
# Date:
from datetime import datetime
# Create Stock class here
class Stock:
def __init__(self, symbol, name, shares):
self._symbol = symbol
self._name = name
self._shares = shares
self.DataList = [] # list of daily stock data
@property
def name(self):
return self._name
@name.setter
def name(self,name):
self._name = name
# Create DailyData class here.
# Unit Test - Do Not Change Code Below This Line *** *** *** *** *** *** *** *** ***
# main() is used for unit testing only. It will run when stock_class.py is run.
# Run this to test your class code. Once you have eliminated all errors, you are
# ready to continue with the next part of the project.
def main():
error_count = 0
error_list = []
print("Unit Testing Starting---")
# Test Add Stock
print("Testing Add Stock...",end="")
try:
testStock = Stock("TEST","Test Company",100)
print("Successful!")
except:
print("***Adding Stock Failed!")
error_count = error_count+1
error_list.append("Stock Constructor Error")
# Test Change Symbol
print("Testing Change Symbol...",end="")
try:
testStock.symbol = "NEWTEST"
print("***ERROR! Changing stock symbol should not be allowed.")
error_count = error_count+1
error_list.append("Stock symbol change allowed. Stock symbol changes should not be allowed.")
except:
print("Successful! - Stock symbol change blocked")
# Test Change Name
print("Test Change Name...",end="")
try:
testStock.name = "New Test Company"
if testStock.name == "New Test Company":
print("Successful!")
else:
print("***ERROR! Name change unsuccessful.")
error_count = error_count+1
error_list.append("Name Change Error")
except:
print("***ERROR! Name change failed.")
error_count = error_count+1
error_list.append("Name Change Failure")
# Test Change Shares
print("Test Change Shares...",end="")
try:
testStock.shares = 200
print("***ERROR! Changing stock shares directly should not be allowed.")
error_count = error_count+1
error_list.append("Stock shares change allowed. Change in shares should be done through buy() or sell().")
except:
print("Successful! - Stock shares change blocked")
# Test Buy and Sell
print("Test Buy shares...",end="")
try:
testStock.buy(50)
if testStock.shares == 150:
print("Successful!")
else:
print("***ERROR! Buy shares unsuccessful.")
error_count = error_count + 1
error_list.append("Buy Shares Failure!")
except:
print("***ERROR! Buy shares failed.")
error_count = error_count + 1
error_list.append("Buy Shares Failure!")
print("Test Sell shares...",end="")
try:
testStock.sell(25)
if testStock.shares == 125:
print("Successful!")
else:
print("***ERROR! Sell shares unsuccessful.")
error_count = error_count+1
error_list.append("Sell Shares Failure!")
except:
print("***ERROR! Sell shares failed.")
error_count = error_count + 1
error_list.append("Sell Shares Failure!")
# Test add daily data
print("Creating daily stock data...",end="")
daily_data_error = False
try:
dayData = DailyData(datetime.strptime("1/1/20","%m/%d/%y"),float(14.50),float(100000))
testStock.add_data(dayData)
if testStock.DataList[0].date != datetime.strptime("1/1/20","%m/%d/%y"):
error_count = error_count + 1
daily_data_error = True
error_list.append("Add Daily Data - Problem with Date")
if testStock.DataList[0].close != 14.50:
error_count = error_count + 1
daily_data_error = True
error_list.append("Add Daily Data - Problem with Closing Price")
if testStock.DataList[0].volume != 100000:
error_count = error_count + 1
daily_data_error = True
error_list.append("Add Daily Data - Problem with Volume")
except:
print("***ERROR! Add daily data failed.")
error_count = error_count + 1
error_list.append("Add daily data Failure!")
daily_data_error = True
if daily_data_error == True:
print("***ERROR! Creating daily data failed.")
else:
print("Successful!")
if (error_count) == 0:
print("Congratulations - All Tests Passed")
else:
print("-=== Problem List - Please Fix ===-")
for em in error_list:
print(em)
print("Goodbye")
# Program Starts Here
if __name__ == "__main__":
# run unit testing only if run as a stand-alone script
main()
|
[
"mlau@my.devry.edu"
] |
mlau@my.devry.edu
|
b1aa7ea3141770bd399797efad6d17bf39deb627
|
ef472552e618b5cb66e2d1b8040bcc99fae915c4
|
/generator_sample/gen_sample.py
|
0e01ef3817c1708a9e5451d9085cc8ce05cb210d
|
[] |
no_license
|
kumarhardik47/python-tutorials
|
544c1786081c6a2b7e40f2840f7d1745a146367b
|
e965febc907987b3b54ef16a9ec51bdc504fc1ed
|
refs/heads/master
| 2020-03-26T23:27:09.475974
| 2018-09-07T09:16:55
| 2018-09-07T09:16:55
| 145,542,824
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 279
|
py
|
import os
class Fib:
def __init__(self):
self.a = 0
self.b = 1
def __iter__(self):
return self
def next(self):
retval = self.a +self.b
self.a = self.b
self.b = retval
return retval
fib = iter(Fib())
print 0
print 1
for i in range(10):
print next(fib)
|
[
"kiran@localhost.localdomain"
] |
kiran@localhost.localdomain
|
f0e53c98f91c5f065040c630715d6ab7abe78881
|
2e5c0e502216b59a4e348437d4291767e29666ea
|
/Flask-Web/flasky/Lib/site-packages/dns/rdtypes/ANY/HIP.py
|
1c774bbff4eab998e5b99baee67f99272e514e84
|
[
"Apache-2.0",
"GPL-1.0-or-later"
] |
permissive
|
fengzse/Feng_Repository
|
8881b64213eef94ca8b01652e5bc48e92a28e1f5
|
db335441fa48440e72eefab6b5fd61103af20c5d
|
refs/heads/master
| 2023-07-24T04:47:30.910625
| 2023-02-16T10:34:26
| 2023-02-16T10:34:26
| 245,704,594
| 1
| 0
|
Apache-2.0
| 2023-07-15T00:54:20
| 2020-03-07T20:59:04
|
Python
|
UTF-8
|
Python
| false
| false
| 3,354
|
py
|
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2010, 2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import base64
import binascii
import dns.exception
import dns.rdata
import dns.rdatatype
class HIP(dns.rdata.Rdata):
"""HIP record"""
# see: RFC 5205
__slots__ = ['hit', 'algorithm', 'key', 'servers']
def __init__(self, rdclass, rdtype, hit, algorithm, key, servers):
super().__init__(rdclass, rdtype)
object.__setattr__(self, 'hit', hit)
object.__setattr__(self, 'algorithm', algorithm)
object.__setattr__(self, 'key', key)
object.__setattr__(self, 'servers', dns.rdata._constify(servers))
def to_text(self, origin=None, relativize=True, **kw):
hit = binascii.hexlify(self.hit).decode()
key = base64.b64encode(self.key).replace(b'\n', b'').decode()
text = ''
servers = []
for server in self.servers:
servers.append(server.choose_relativity(origin, relativize))
if len(servers) > 0:
text += (' ' + ' '.join((x.to_unicode() for x in servers)))
return '%u %s %s%s' % (self.algorithm, hit, key, text)
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True,
relativize_to=None):
algorithm = tok.get_uint8()
hit = binascii.unhexlify(tok.get_string().encode())
if len(hit) > 255:
raise dns.exception.SyntaxError("HIT too long")
key = base64.b64decode(tok.get_string().encode())
servers = []
while 1:
token = tok.get()
if token.is_eol_or_eof():
break
server = tok.as_name(token, origin, relativize, relativize_to)
servers.append(server)
return cls(rdclass, rdtype, hit, algorithm, key, servers)
def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
lh = len(self.hit)
lk = len(self.key)
file.write(struct.pack("!BBH", lh, self.algorithm, lk))
file.write(self.hit)
file.write(self.key)
for server in self.servers:
server.to_wire(file, None, origin, False)
@classmethod
def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
(lh, algorithm, lk) = parser.get_struct('!BBH')
hit = parser.get_bytes(lh)
key = parser.get_bytes(lk)
servers = []
while parser.remaining() > 0:
server = parser.get_name(origin)
servers.append(server)
return cls(rdclass, rdtype, hit, algorithm, key, servers)
|
[
"fzhuse@gmail.com"
] |
fzhuse@gmail.com
|
ba454f85e30e19f707db52ff84fa97d079100090
|
8a8b0267c4db8847a898ac73ccb6e78e1744e24c
|
/Python_Net_Programming/pnp-ex01/broadcast/udpserver.py
|
b6270c96643b1e6cd819ae6a34c32278d5926f20
|
[] |
no_license
|
entirelymagic/Link_Academy
|
41ba890df6793924d186ea94dc8d13b0636c6679
|
844c39ff1281fae8406cd1a0dc06afd357f0bef3
|
refs/heads/master
| 2023-06-07T03:17:00.527924
| 2021-07-03T09:59:25
| 2021-07-03T09:59:25
| 314,755,255
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 141
|
py
|
import socket
server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server.bind(("0.0.0.0", 8005))
msg = server.recvfrom(16)
print(msg)
|
[
"elvislinkacademy"
] |
elvislinkacademy
|
697ae5846d96f604324973625902f39c27564424
|
c1127091c486afa9271ae4beca05b00aaa62ff06
|
/lockss_configuration/lockss-configuration-python/config_exchange.py
|
6120643be42ed52d623bf8d7ea28f685c91a4af5
|
[
"BSD-3-Clause"
] |
permissive
|
lockss/lockss-configuration-python
|
d747c6bc9cbf74748569597a951c0fc1df82d51a
|
d645b42e0fdae5ccfcaf80b26c09218c1e1c9c94
|
refs/heads/master
| 2020-03-28T13:58:12.286190
| 2018-09-12T08:22:07
| 2018-09-12T08:22:07
| 148,446,328
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,283
|
py
|
# coding: utf-8
"""
LOCKSS Configuration Service REST API
API of the LOCKSS Configuration REST Service # noqa: E501
OpenAPI spec version: 1.0.0
Contact: lockss-support@lockss.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ConfigExchange(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'props': 'dict(str, str)'
}
attribute_map = {
'props': 'props'
}
def __init__(self, props=None): # noqa: E501
"""ConfigExchange - a model defined in Swagger""" # noqa: E501
self._props = None
self.discriminator = None
self.props = props
@property
def props(self):
"""Gets the props of this ConfigExchange. # noqa: E501
The map of configuration items # noqa: E501
:return: The props of this ConfigExchange. # noqa: E501
:rtype: dict(str, str)
"""
return self._props
@props.setter
def props(self, props):
"""Sets the props of this ConfigExchange.
The map of configuration items # noqa: E501
:param props: The props of this ConfigExchange. # noqa: E501
:type: dict(str, str)
"""
if props is None:
raise ValueError("Invalid value for `props`, must not be `None`") # noqa: E501
self._props = props
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ConfigExchange, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ConfigExchange):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"dlvargas@stanford.edu"
] |
dlvargas@stanford.edu
|
0c02fc3a75f4781dfaaf085875102433acfc575b
|
436a23cfa35885c919cc74e6a539a33ec383237c
|
/training/sock_utils.py
|
77445f8c014f220b885f3c3561cb3a622f536aa9
|
[] |
no_license
|
CameronFoss/SQLAlchemy-Client-Server
|
2bd6104fa7dc9662e07b29a20b2783f1567c3391
|
295273c8cb35d780af25bd170e3527f97d88067e
|
refs/heads/main
| 2023-04-26T14:16:35.790194
| 2021-05-26T20:46:04
| 2021-05-26T20:46:04
| 368,533,957
| 1
| 1
| null | 2021-05-25T18:55:17
| 2021-05-18T13:08:34
|
Python
|
UTF-8
|
Python
| false
| false
| 1,195
|
py
|
import socket
import json
def send_message(host, port, msg_dict):
"""Connect to sock via host and port and sends a message to sock."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
msg_json = json.dumps(msg_dict)
sock.sendall(msg_json.encode('utf-8'))
# Close the socket so 'data' will be null in get_data_from_connection
sock.close()
def decode_message_chunks(chunks):
"""Decode message chunks into a Python dictionary."""
msg_bytes = b''.join(chunks)
msg_str = msg_bytes.decode("utf-8")
# Note: caller needs to catch errors thrown by json.loads
return json.loads(msg_str)
def get_data_from_connection(sock):
"""Accept a client connection and get data until they close the socket."""
try:
clientsocket, address = sock.accept()
except socket.timeout:
return []
print("Connection from", address[0])
message_chunks = []
while True:
try:
data = clientsocket.recv(4096)
except socket.timeout:
continue
if not data:
break
message_chunks.append(data)
clientsocket.close()
return message_chunks
|
[
"fossc@umich.edu"
] |
fossc@umich.edu
|
6d5b238a3ab38ee681515a4631abf1b6147f4397
|
2c59e943a98fe0c25b2f7267324fe850603def57
|
/leetcode/数组/remove_duplicates_from_sorted_array.py
|
5b12385139f7c3ffa4f8c445a947de82aad75cf6
|
[] |
no_license
|
xxNB/sword-offer
|
047dd6b3db7ea24af1a83d5a7a46ab47daa9ab85
|
e2699a43e9f7aecc475e21a4b2582c6ee3b41a1c
|
refs/heads/master
| 2021-07-06T18:44:47.074632
| 2019-01-16T15:57:27
| 2019-01-16T15:57:27
| 125,879,250
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 775
|
py
|
"""
100. Remove Duplicates from Sorted Array
Given a sorted array, remove the duplicates in place such that each element appear only once and return the new length.
Do not allocate extra space for another array, you must do this in place with constant memory.
Example
Given input array A = [1,1,2],
Your function should return length = 2, and A is now [1,2].
"""
class Solution:
"""
@param: nums: An ineger array
@return: An integer
"""
def removeDuplicates(self, A):
# write your code here
if A == []:
return 0
index = 0
for i in range(1, len(A)):
if A[index] != A[i]:
index += 1
# 跟上脚步去比较
A[index] = A[i]
return index + 1
|
[
"zhangxin@juxinli.com"
] |
zhangxin@juxinli.com
|
bca68ca7795bed595f511a30b2a1ec6ba44148b9
|
4e308e8bb7056f1fd6914777b38d98b18254867f
|
/DECOMPYLED/FireOne/FireOne.py
|
5d6a9c07271a0c970312cc62a8b5afe6861d5c82
|
[] |
no_license
|
bschreck/cuttlefish
|
165aae651bf58c1142cc47934802a7a3614e39da
|
0f44ccca0ebf1a6f78165001586fcb67b98b406a
|
refs/heads/master
| 2020-05-19T23:07:11.520086
| 2014-02-25T05:26:18
| 2014-02-25T05:26:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,087
|
py
|
# emacs-mode: -*- python-*-
import Live
import MidiRemoteScript
NOTE_OFF_STATUS = 128
NOTE_ON_STATUS = 144
CC_STATUS = 176
NUM_NOTES = 128
NUM_CC_NO = 128
NUM_CHANNELS = 16
JOG_DIAL_CC = 60
RWD_NOTE = 91
FFWD_NOTE = 92
STOP_NOTE = 93
PLAY_NOTE = 94
REC_NOTE = 95
SHIFT_NOTE = 70
FIRE_ONE_TRANSPORT = [RWD_NOTE,
FFWD_NOTE,
STOP_NOTE,
PLAY_NOTE,
REC_NOTE]
FIRE_ONE_F_KEYS = range(54, 64)
FIRE_ONE_CHANNEL = 0
class FireOne:
__module__ = __name__
__doc__ = ' Small script for the Tascam FireOne mapping transport, jog dial, and shift '
def __init__(self, c_instance):
self._FireOne__c_instance = c_instance
self._FireOne__shift_pressed = False
self._FireOne__rwd_pressed = False
self._FireOne__ffwd_pressed = False
self._FireOne__jog_dial_map_mode = Live.MidiMap.MapMode.absolute
self._FireOne__spooling_counter = 0
self.song().add_is_playing_listener(self._FireOne__playing_status_changed)
self.song().add_record_mode_listener(self._FireOne__recording_status_changed)
self.song().add_tracks_listener(self._FireOne__tracks_changed)
self._FireOne__playing_status_changed()
self._FireOne__recording_status_changed()
def application(self):
"""returns a reference to the application that we are running in
"""
return Live.Application.get_application()
def song(self):
"""returns a reference to the Live song instance that we do control
"""
return self._FireOne__c_instance.song()
def disconnect(self):
"""Live -> Script
Called right before we get disconnected from Live.
"""
self.send_midi(((NOTE_OFF_STATUS + FIRE_ONE_CHANNEL),
PLAY_NOTE,
0))
self.send_midi(((NOTE_OFF_STATUS + FIRE_ONE_CHANNEL),
REC_NOTE,
0))
self.song().remove_is_playing_listener(self._FireOne__playing_status_changed)
self.song().remove_record_mode_listener(self._FireOne__recording_status_changed)
self.song().remove_tracks_listener(self._FireOne__tracks_changed)
def connect_script_instances(self, instanciated_scripts):
"""Called by the Application as soon as all scripts are initialized.
You can connect yourself to other running scripts here, as we do it
connect the extension modules (MackieControlXTs).
"""
pass
def suggest_input_port(self):
"""Live -> Script
Live can ask the script for an input port name to find a suitable one.
"""
return str('FireOne Control')
def suggest_output_port(self):
"""Live -> Script
Live can ask the script for an output port name to find a suitable one.
"""
return str('FireOne Control')
def suggest_map_mode(self, cc_no, channel):
"""Live -> Script
Live can ask the script for a suitable mapping mode for a given CC.
"""
suggested_map_mode = Live.MidiMap.MapMode.absolute
if (cc_no == JOG_DIAL_CC):
suggested_map_mode = self._FireOne__jog_dial_map_mode
return suggested_map_mode
def can_lock_to_devices(self):
return False
def request_rebuild_midi_map(self):
"""Script -> Live
When the internal MIDI controller has changed in a way that you need to rebuild
the MIDI mappings, request a rebuild by calling this function
This is processed as a request, to be sure that its not too often called, because
its time-critical.
"""
self._FireOne__c_instance.request_rebuild_midi_map()
def send_midi(self, midi_event_bytes):
"""Script -> Live
Use this function to send MIDI events through Live to the _real_ MIDI devices
that this script is assigned to.
"""
self._FireOne__c_instance.send_midi(midi_event_bytes)
def refresh_state(self):
"""Live -> Script
Send out MIDI to completely update the attached MIDI controller.
Will be called when requested by the user, after for example having reconnected
the MIDI cables...
"""
pass
def build_midi_map(self, midi_map_handle):
"""Live -> Script
Build DeviceParameter Mappings, that are processed in Audio time, or
forward MIDI messages explicitly to our receive_midi_functions.
Which means that when you are not forwarding MIDI, nor mapping parameters, you will
never get any MIDI messages at all.
"""
script_handle = self._FireOne__c_instance.handle()
Live.MidiMap.forward_midi_cc(script_handle, midi_map_handle, FIRE_ONE_CHANNEL, JOG_DIAL_CC)
for note in FIRE_ONE_TRANSPORT:
Live.MidiMap.forward_midi_note(script_handle, midi_map_handle, FIRE_ONE_CHANNEL, note)
Live.MidiMap.forward_midi_note(script_handle, midi_map_handle, FIRE_ONE_CHANNEL, SHIFT_NOTE)
for index in range(len(self.song().tracks)):
if (len(FIRE_ONE_F_KEYS) > index):
Live.MidiMap.forward_midi_note(script_handle, midi_map_handle, FIRE_ONE_CHANNEL, FIRE_ONE_F_KEYS[index])
else:
break
def update_display(self):
"""Live -> Script
Aka on_timer. Called every 100 ms and should be used to update display relevant
parts of the controller
"""
if self._FireOne__ffwd_pressed:
self._FireOne__spooling_counter += 1
if ((self._FireOne__spooling_counter % 2) == 0):
self.song().jump_by(self.song().signature_denominator)
elif self._FireOne__rwd_pressed:
self._FireOne__spooling_counter += 1
if ((self._FireOne__spooling_counter % 2) == 0):
self.song().jump_by((-1 * self.song().signature_denominator))
def receive_midi(self, midi_bytes):
"""Live -> Script
MIDI messages are only received through this function, when explicitly
forwarded in 'build_midi_map'.
"""
cc_or_note = midi_bytes[1]
if ((midi_bytes[0] & 240) == CC_STATUS):
if (cc_or_note is JOG_DIAL_CC):
self._FireOne__jog_dial_message(cc_or_note, midi_bytes[2])
elif ((midi_bytes[0] & 240) in (NOTE_ON_STATUS,
NOTE_OFF_STATUS)):
value = midi_bytes[2]
if ((midi_bytes[0] & 240) == NOTE_OFF_STATUS):
value = 0
if (cc_or_note is SHIFT_NOTE):
self._FireOne__shift_pressed = (value != 0)
elif (cc_or_note in FIRE_ONE_TRANSPORT):
self._FireOne__transport_message(cc_or_note, value)
elif (cc_or_note in FIRE_ONE_F_KEYS):
self._FireOne__f_key_message(cc_or_note, value)
def __playing_status_changed(self):
""" Update the LED accordingly """
status = NOTE_OFF_STATUS
note = PLAY_NOTE
value = 0
if self.song().is_playing:
status = NOTE_ON_STATUS
value = 127
status += FIRE_ONE_CHANNEL
self.send_midi((status,
note,
value))
def __recording_status_changed(self):
""" Update the LED accordingly """
status = NOTE_OFF_STATUS
note = REC_NOTE
value = 0
if self.song().record_mode:
status = NOTE_ON_STATUS
value = 127
status += FIRE_ONE_CHANNEL
self.send_midi((status,
note,
value))
def __tracks_changed(self):
self.request_rebuild_midi_map()
def __transport_message(self, note, value):
""" One of the transport buttons was pressed or release """
assert (note in FIRE_ONE_TRANSPORT)
if ((note is PLAY_NOTE) and (value != 0)):
if self._FireOne__shift_pressed:
self.song().continue_playing()
else:
self.song().is_playing = True
elif ((note is STOP_NOTE) and (value != 0)):
self.song().is_playing = False
elif ((note is REC_NOTE) and (value != 0)):
self.song().record_mode = (not self.song().record_mode)
elif (note is FFWD_NOTE):
if ((value != 0) and (not self._FireOne__rwd_pressed)):
if self._FireOne__shift_pressed:
self.song().jump_by(1)
else:
self.song().jump_by(self.song().signature_denominator)
self._FireOne__ffwd_pressed = True
self._FireOne__spooling_counter = 0
elif (value == 0):
self._FireOne__ffwd_pressed = False
elif (note is RWD_NOTE):
if ((value != 0) and (not self._FireOne__ffwd_pressed)):
if self._FireOne__shift_pressed:
self.song().jump_by(-1)
else:
self.song().jump_by((-1 * self.song().signature_denominator))
self._FireOne__rwd_pressed = True
self._FireOne__spooling_counter = 0
elif (value == 0):
self._FireOne__rwd_pressed = False
def __jog_dial_message(self, cc_no, cc_value):
""" Jog Dial: the function is based on the shift status and the active view """
assert (cc_value in range(1, 128))
moved_forward = (cc_value in range(1, 64))
if (not self._FireOne__shift_pressed):
if self.application().view.is_view_visible('Session'):
index = list(self.song().scenes).index(self.song().view.selected_scene)
if moved_forward:
if (index < (len(self.song().scenes) - 1)):
index = (index + 1)
elif (index > 0):
index = (index - 1)
self.song().view.selected_scene = self.song().scenes[index]
else:
value = cc_value
if (not moved_forward):
value -= 64
value *= -1
self.song().jump_by(value)
elif self.application().view.is_view_visible('Session'):
index = list(self.song().tracks).index(self.song().view.selected_track)
if moved_forward:
if (index < (len(self.song().tracks) - 1)):
index = (index + 1)
elif (index > 0):
index = (index - 1)
self.song().view.selected_track = self.song().tracks[index]
else:
value = cc_value
if (not moved_forward):
value -= 64
value *= -0.10000000000000001
self.song().tempo = (self.song().tempo + (0.10000000000000001 * value))
def __f_key_message(self, f_key, value):
index = list(FIRE_ONE_F_KEYS).index(f_key)
assert (index >= 0)
assert (len(self.song().tracks) > index)
track = self.song().tracks[index]
assert (track != None)
if (value > 0):
if self._FireOne__shift_pressed:
if track.can_be_armed:
track.arm = (not track.arm)
else:
track.mute = (not track.mute)
# local variables:
# tab-width: 4
|
[
"bschreck@mit.edu"
] |
bschreck@mit.edu
|
0435ada236ecf42249ff98d1e29cbc6e5e46c607
|
16030618fe1481bea86289a15c4d2f6e8bc3808e
|
/Django/finalwebsite/pension/views.py
|
42da8f7031581d4114aecb2d98d982148e5bbf82
|
[] |
no_license
|
vfloresp/Sistemas_distribuidos
|
d3607d05b395da458f7ad62adff427356c51a79f
|
8b8ae67c5351d8576c64f38e0a46f46c0cf12f56
|
refs/heads/main
| 2023-05-05T19:28:35.526239
| 2021-05-26T20:47:07
| 2021-05-26T20:47:07
| 331,456,473
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,148
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
from .models import Pensionado
from django.template import loader
# Create your views here.
def index(request):
template = loader.get_template("pension/index.html")
contexto = {}
return HttpResponse(template.render(contexto, request))
def simulacion(request):
nombre = request.POST.get("nombre")
edad_actual = request.POST.get("edad_actual")
edad_retiro = request.POST.get("edad_retiro")
saldo_acumulado = request.POST.get("saldo_acumulado")
ahorro_mensual = request.POST.get("ahorro_mensual")
genero = request.POST.get("genero")
pensionado = Pensionado(
nombre=nombre,
edad_actual=edad_actual,
edad_retiro=edad_retiro,
saldo_acumulado=saldo_acumulado,
ahorro_mensual=ahorro_mensual,
genero=genero,
)
pensionado.save()
return HttpResponse(
"%s tendrás una pensión de %s pesos"
% pensionado.nombre
% pensionado.pension_mensual
)
def listado(request):
Pensionado.objects.all()
template = loader.get_template("pension/listadod.html")
|
[
"vflores@pop-os.localdomain"
] |
vflores@pop-os.localdomain
|
603dfd9080de185194161fca663cfce4d6ecf267
|
d7fcd508920f12bbb80c4f953b92d879758db376
|
/BasicConcepts/SyntaxErrors/Volume1_Chapter2_SyntaxErrors.py
|
682fdd42fdea897b54cd908258e602f6fef92ff1
|
[
"Unlicense"
] |
permissive
|
jpike/PythonProgrammingForKids
|
394c14a585f6fe779a571e4520fc6a59efa21af3
|
79a36d6db525d39f78e33b6f7b2d0da0d65a073c
|
refs/heads/master
| 2021-07-20T11:01:04.440281
| 2021-06-08T23:56:17
| 2021-06-08T23:56:17
| 108,654,449
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 361
|
py
|
# This program contains various "syntax" errors.
# It's your job to fix this program so that it runs correctly.
PRINT('Hello, world!')
primt('Hello, world!')
prin('Hello, world!')
print 'Hello, world!')
print['Hello, world!')
print('Hello, world!'
print(Hello, world!)
print('Hello, world!)
print 'Hello, world!'
print('Hello, world!")
print(''Hello, world!')
|
[
"jacob@jacobpike.com"
] |
jacob@jacobpike.com
|
3ec82d07bf02c984ab35547af825f962b899d545
|
d1b85f3093b5d6c9ba3a84c0a54f4dba8a8b0482
|
/src/models/_rl_helpers.py
|
d3d25241a3d26e09a1e75942c7f8da10cdcf085d
|
[
"MIT"
] |
permissive
|
qihongl/learn-hippo
|
dc3599b04a53f31e3ab0987693a392562a42a802
|
6a4a1be4fd6780d4c8413ffc6b1facade4741135
|
refs/heads/master
| 2023-09-04T21:13:47.143537
| 2022-02-18T22:03:53
| 2022-02-18T22:03:53
| 192,548,807
| 33
| 7
|
MIT
| 2021-09-21T04:50:59
| 2019-06-18T13:46:19
|
Python
|
UTF-8
|
Python
| false
| false
| 3,059
|
py
|
import numpy as np
import torch
from torch.nn.functional import smooth_l1_loss
'''helpers'''
eps = np.finfo(np.float32).eps.item()
def get_reward(a_t, y_t, penalty, allow_dk=True):
"""define the reward function at time t
Parameters
----------
a_t : int
action
a_t_targ : int
target action
penalty : int
the penalty magnitude of making incorrect state prediction
allow_dk : bool
if True, then activating don't know makes r_t = 0, regardless of a_t
Returns
-------
torch.FloatTensor, scalar
immediate reward at time t
"""
dk_id = y_t.size()[0]
# if y_t is all zeros (delay period), then action target DNE
if torch.all(y_t == 0):
# -1 is not in the range of a_t, so r_t = penalty unless a_t == dk
a_t_targ = torch.tensor(-1)
else:
a_t_targ = torch.argmax(y_t)
# compare action vs. target action
if a_t == dk_id and allow_dk:
r_t = 0
elif a_t_targ == a_t:
r_t = 1
else:
r_t = - penalty
return torch.from_numpy(np.array(r_t)).type(torch.FloatTensor).data
# return torch.tensor(r_t).type(torch.FloatTensor).clone().detach()
def compute_returns(rewards, gamma=0, normalize=False):
"""compute return in the standard policy gradient setting.
Parameters
----------
rewards : list, 1d array
immediate reward at time t, for all t
gamma : float, [0,1]
temporal discount factor
normalize : bool
whether to normalize the return
- default to false, because we care about absolute scales
Returns
-------
1d torch.tensor
the sequence of cumulative return
"""
# compute cumulative discounted reward since t, for all t
R = 0
returns = []
for r in rewards[::-1]:
R = r + gamma * R
returns.insert(0, R)
returns = torch.tensor(returns)
# normalize w.r.t to the statistics of this trajectory
if normalize:
returns = (returns - returns.mean()) / (returns.std() + eps)
return returns
def compute_a2c_loss(probs, values, returns, use_V=True):
"""compute the objective node for policy/value networks
Parameters
----------
probs : list
action prob at time t
values : list
state value at time t
returns : list
return at time t
Returns
-------
torch.tensor, torch.tensor
Description of returned object.
"""
policy_grads, value_losses = [], []
for prob_t, v_t, R_t in zip(probs, values, returns):
if use_V:
A_t = R_t - v_t.item()
value_losses.append(
smooth_l1_loss(torch.squeeze(v_t), torch.squeeze(R_t))
)
else:
A_t = R_t
value_losses.append(torch.FloatTensor(0).data)
# accumulate policy gradient
policy_grads.append(-prob_t * A_t)
policy_gradient = torch.stack(policy_grads).sum()
value_loss = torch.stack(value_losses).sum()
return policy_gradient, value_loss
|
[
"lvqihong1992@gmail.com"
] |
lvqihong1992@gmail.com
|
bf114a09d6a97cebd6a7d5f46428325554fd9c65
|
a6065fd0743734c4ad0619b066d583ea462c1bde
|
/DjangoTest/DjangoTest/__init__.py
|
3ef6feb4e947e6e8f93252fb6708fa9efd3af984
|
[] |
no_license
|
kangziwen/DjangoTest
|
4c3f1e25983a4b6553a9801f8221fa1970b843a2
|
c603f409ae1aaad5e64b3d7a48b58fd6103ec12e
|
refs/heads/master
| 2021-08-18T16:32:04.714241
| 2017-11-23T08:45:55
| 2017-11-23T08:45:55
| 111,491,210
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 277
|
py
|
'''
Django默认使用MySQLdb模块链接MySQL
主动修改为pymysql,在project同名文件夹下的__init__文件中添加如下代码即可:
'''
'''
django-admin startproject mysite
python manage.py startapp cmdb
'''
import pymysql
pymysql.install_as_MySQLdb()
|
[
"baobao.kang@godinsec.com"
] |
baobao.kang@godinsec.com
|
bc50fd1c8d8ed9f3119f0a1a543e344df1fb6491
|
5c7f6d5f17da26413698d64c51dda0743cdf41af
|
/9.little_knowledge/1.concate.py
|
97e4b3fd8ee3116ea8ea4e1993dd7d5e4b905a73
|
[] |
no_license
|
choococo/DeepLearning
|
349fb51761991a642c6d43b557f69b9a3d882c36
|
c28b3f7bfdf2d6238f829ecf2a23f0112913559f
|
refs/heads/master
| 2023-03-07T14:04:48.175679
| 2021-02-18T07:08:04
| 2021-02-18T07:08:04
| 316,958,218
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 656
|
py
|
import numpy as np
"""
cat操作是一个很好地操作,可以很好的对高维的数据进行拼接等操作,numpy和torch包中都有API
应用地方:
(1)曾经在复现MTCNN代码的时候用到:主要用到的地方,1)在制作数据样本时,使用
"""
seed = np.random.seed(0)
a = np.random.randint(0, 10, (10, 1))
b = np.random.randint(0, 10, (10, 1))
print(a)
print(b)
c = np.concatenate([a, b], axis=0) # 这里在0轴的时候,相当于列表的追加
print(c)
d = np.concatenate([a, b], axis=1) # 对a,b进行一对一的组合
print(d)
"""
[[5 7]
[0 6]
[3 8]
[3 8]
[7 1]
[9 6]
[3 7]
[5 7]
[2 8]
[4 1]]
"""
|
[
"lzy@liev.com"
] |
lzy@liev.com
|
f2be2563cae05a623512af9fc6b201160883394d
|
de945ae0b19c40b2c198ffea68cde5b2214a4a3d
|
/day01/01.py
|
5854f772b4167be97dfbe3923d90c36b80f0363f
|
[] |
no_license
|
bonanaaaaaa/AdventOfCode2018
|
bf5025260dedd7f1bee538d0006a1dbd04eafbba
|
110bf89240763485d1c3b6482db7f9f283ac31bd
|
refs/heads/master
| 2021-11-24T17:28:27.216700
| 2018-12-12T10:10:00
| 2018-12-12T10:10:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 91
|
py
|
file = open("input.txt", "r")
sum = 0
for line in file:
sum += int(line)
print(sum)
|
[
"pittawatm@gmail.com"
] |
pittawatm@gmail.com
|
a475af5cc275bf91e3cecce1e6d78442fa1eebbb
|
b53a98a19425c5f5ba20495bc1d8739c299b504f
|
/middleware/middle.py
|
fb3b0b6907742013564b526210dd9e5d0925cf45
|
[] |
no_license
|
davypython/py36_tests
|
bf8338812109c70437096eaa4b683aaf33b48b61
|
d63c36b3fc82774c7c02c5197ca53603b8a8717a
|
refs/heads/master
| 2023-03-13T05:15:32.498071
| 2021-02-26T07:02:31
| 2021-02-26T07:02:31
| 340,954,902
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,829
|
py
|
import os
import random
import re
from pymysql.cursors import DictCursor
from common.db_handler import DBHandler
from common.logger_handler import LoggerHandler
from common.yaml_handler import YamlHandler
from common.excel_handler import ExcelHandler
from config.path import logs_path, data_path
class MidDBHandler(DBHandler):
def __init__(self):
yaml_config = YamlHandler('config.yaml').yaml_load()
safe_config = YamlHandler('safe.yaml').yaml_load()
super().__init__(host=safe_config['db']['host'],
port=safe_config['db']['port'],
user=safe_config['db']['user'],
password=safe_config['db']['password'],
# 不要写成utf-8
charset=safe_config['db']['charset'],
# 指定数据库
database=safe_config['db']['database'],
cursorclass=DictCursor)
class MidHandler():
"""任务:中间层。common和调用层,使用项目的配置数据,填充common模块"""
# 设置属性
new_phone = ''
investor_user_id = ''
investor_user_token = ''
admin_user_id = ''
admin_user_token = ''
load_id = ''
load_token = ''
yaml_config = YamlHandler('config.yaml').yaml_load()
safe_config = YamlHandler('safe.yaml').yaml_load()
# logger获取
log_file = os.path.join(logs_path, yaml_config['logger']['File'])
logger = LoggerHandler(Logger_Name=yaml_config['logger']['Logger_Name'],
File=log_file,
Logger_Level=yaml_config['logger']['Logger_Level'],
Hand_Level=yaml_config['logger']['Hand_Level'],
File_Hand_Level=yaml_config['logger']['File_Hand_Level'])
# 需要替换的数据
investor_phone =safe_config['investor_user']['mobile_phone']
investor_pwd = safe_config['investor_user']['pwd']
admin_phone = safe_config['admin_user']['mobile_phone']
admin_pwd = safe_config['admin_user']['pwd']
loan_phone = safe_config['loan_user']['mobile_phone']
loan_pwd = safe_config['loan_user']['pwd']
@classmethod
def replace_data(cls, string):
'''替换表格数据函数'''
pattern = '#(.*?)#'
results = re.finditer(pattern=pattern, string=string)
for result in results:
old = result.group()
key = result.group(1)
new = str(getattr(cls, key, ''))
string = string.replace(old, new)
return string
# excel对象
excel_file = os.path.join(data_path, 'cases.xlsx')
excel = ExcelHandler(excel_file)
# excelwrite = ExcelHandler(excel_file).write('', '哈哈', row='', column='')
# 数据库
db_class = MidDBHandler
@classmethod
def random_number_1(cls):
'''随机生成电话'''
while True:
mobile_number = '1' + random.choice(['3', '5'])
for i in range(9):
mobile_number += str(random.randint(1, 9))
sql = 'SELECT mobile_phone FROM member WHRER mobile_phone={};'.format(str(mobile_number))
db = MidDBHandler()
db_num = db.connect(sql, fetchone=True)
if not db_num:
# cls.new_phone = mobile_number
return mobile_number
if __name__ == '__main__':
sql = 'select leave_amount from member where id=2067;'
data = MidHandler.db_class()
info = data.connect(sql, fetchone=True)
print(info)
da=MidHandler.replace_data('{"mobile_phone":"#investor_phone#","pwd":"#investor_pwd#","mobile_phone":"#admin_phone#","mobile_phone":"#load_phone#","pwd":"#load_pwd#"}')
print(da)
new_phone = MidHandler.random_number_1()
print(new_phone)
|
[
"809021517@qq.com"
] |
809021517@qq.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.