blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
960ffd5a1114323566d13ce38ba5cf51302f2e5b
|
e5091c3a8477fa12e1adfdb1f3d826eb6e9bb2be
|
/Other/intergalactic_bidding.py
|
f984457c5e665269999ef48ee9d0fb5f7225cee3
|
[] |
no_license
|
leonardoAnjos16/Competitive-Programming
|
1db3793bfaa7b16fc9a2854c502b788a47f1bbe1
|
4c9390da44b2fa3c9ec4298783bfb3258b34574d
|
refs/heads/master
| 2023-08-14T02:25:31.178582
| 2023-08-06T06:54:52
| 2023-08-06T06:54:52
| 230,381,501
| 7
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 356
|
py
|
n, s = input().split()
n, s = int(n), int(s)
bidders = []
for i in range(n):
t, b = input().split()
bidders.append((int(b), t))
bidders.sort(reverse=True)
ans = []
for i in range(n):
if bidders[i][0] <= s:
ans.append(bidders[i][1])
s -= bidders[i][0]
if s > 0:
ans = []
print(len(ans))
for name in ans:
print(name)
|
[
"leonardoanjos.1a2015@gmail.com"
] |
leonardoanjos.1a2015@gmail.com
|
0b2a1b09f993eb6e2b2ef1d40bd379881a10aca3
|
c788a1ab2fe51ca73fdcf59103691a2836571765
|
/strong_password.py
|
ee0d0106f5ceacfd33587bd702b990de5489ec81
|
[] |
no_license
|
Navyashree008/if_else_2
|
81da4133e40c2e1be40c2e441c315c88469eb0a7
|
fcaefc19b37ea654ea8ba9ee219bb3393e83d2ba
|
refs/heads/main
| 2023-01-31T07:48:39.592170
| 2020-12-14T15:00:13
| 2020-12-14T15:00:13
| 321,382,295
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,119
|
py
|
alphabet=input("enter a alphabet")
num=int(input("enter a number"))
num2=int(input("enter another num"))
special_char=input("enter a special character")
if alphabet >="A"and alphabet <="Z":
if num>=0 and num<=9:
if num2>=0 and num2<=9:
if special_char== "@" or special_char=="$" or special_char == "&":
print("its a strong password")
else:
print("enter a special charecter")
else:
print("enter another no")
else:
print("enter proper number")
else:
print("enter any capital alphabet")
# password="N98@"
# alphabet=input("enter a alphabet")
# num=input("enter a number")
# special_char=input("enter a special character")
# if alphabet in password:
# print("wait its in prosess")
# if num in password:
# print("one more step to login")
# if special_char in password:
# print("its a strong password")
# else:
# print("check the password once again")
# else:
# print("check the password once again")
# else:
# print("check the password once again")
|
[
"you@example.com"
] |
you@example.com
|
011604ab86feec9546a04148b9cb87f5d86bd5ee
|
94c01ae8de8dd0fcee5d7ff92d1d41c2fa419eba
|
/Driver.py
|
10d5182e1463c859fb05273c8b0728e4d5bac5ba
|
[] |
no_license
|
sawrov/Ebay-Scraper
|
19a0de8dba6aba7ae5115c35472bd0191c093fa9
|
42d087bb112b91c07f6df50670a9e567859e2cb6
|
refs/heads/master
| 2023-01-18T21:41:20.109068
| 2020-11-20T04:19:51
| 2020-11-20T04:19:51
| 291,286,158
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 810
|
py
|
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.support.ui import WebDriverWait as wait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
class Driver:
def __init__(self):
self.driver=webdriver.Chrome(ChromeDriverManager().install())
# self.driver.set_window_position(0,0)
self.driver.maximize_window()
def loadurl(self,url):
try:
self.driver.get(url)
# _=wait(self.driver, 5).until(EC.element_to_be_clickable((By.XPATH, "//*[@id='prcIsum']")))
return True
except:
print("Invalid Url")
self.terminate()
quit()
def terminate(self):
self.driver.quit()
|
[
"sawrov@hotmail.com"
] |
sawrov@hotmail.com
|
d40a00ad19ac0a3ebecd9179e679ea0c18b4bcaa
|
9f6814e23c49a5afa6d06e7dacbd8e119f72e587
|
/Labcli/bin/python-config
|
dddadb47f7ef55b63d97109e17e048701819b680
|
[] |
no_license
|
solazverULA/Labcli
|
5bfcf5e1d3664d3c077a2aaee17f3ba153a01625
|
0738c0c45a69de37c2806ea6675c8fc75d0d7402
|
refs/heads/master
| 2020-03-19T00:13:08.687863
| 2018-06-27T04:50:50
| 2018-06-27T04:50:50
| 135,463,274
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,348
|
#!/home/astrid/virtualenv/Labcli/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
|
[
"astrid.rodriguez15@gmail.com"
] |
astrid.rodriguez15@gmail.com
|
|
01ef188532c003cbc646c055d9eaa0860fbc9e9c
|
ebfa6975e2951e61cc9e701de482d83c949f65e2
|
/Batch_Render/TransfromUI.py
|
45e9cab68221b5188770204dd995d3ec4b40a3b8
|
[] |
no_license
|
317431629/Nuke_Tool
|
e6d0bcd30ab34053f9b7ea422cca4d14787a46f9
|
c44fdd4133dc08c817aaae9751fb3264cb305f11
|
refs/heads/master
| 2020-05-16T14:55:32.095363
| 2019-07-19T09:06:47
| 2019-07-19T09:06:47
| 183,116,400
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 643
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
@File : TransfromUI.py
@Author: Fighter~
@Date : 2019/4/23 22:01
@Desc :
'''
from PyQt5 import uic
with open("C:/Users/Administrator/.nuke/Batch_Render\MainUI.py","w") as f:
uic.compileUi("C:/Users/Administrator/.nuke/Batch_Render\UI\MainUI.ui",f)
with open("C:/Users/Administrator/.nuke/Batch_Render\SubUI.py","w") as f:
uic.compileUi("C:/Users/Administrator/.nuke/Batch_Render\UI\SubUI.ui",f)
with open("C:/Users/Administrator/.nuke/Batch_Render\SrogressBar.py", "w") as f:
uic.compileUi("C:/Users/Administrator/.nuke/Batch_Render\UI\ProgressBar.ui", f)
|
[
"noreply@github.com"
] |
317431629.noreply@github.com
|
ddaa51e87f3dea75a23e6de7437d498db478de5c
|
ddd54e481c12104bd53ec7cb2a627fb0646d5f3c
|
/AI in Healthcare/Project-3D_Hypocampal_segmentation/Model_training/src/data_prep/SlicesDataset.py
|
22619d9ae0aadd067eea771b4a09c4362898512a
|
[
"MIT"
] |
permissive
|
abhishekdiphu/AI_for_Medicine_Specialization_Coursera
|
e811b94914503d4d956da403a23b6a6deb1e3671
|
75a8fd1effc385a8c5fb34ee2a274a7dfe6580f5
|
refs/heads/master
| 2023-03-28T22:05:08.080661
| 2021-04-07T19:32:47
| 2021-04-07T19:32:47
| 266,577,227
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,363
|
py
|
"""
Module for Pytorch dataset representations
"""
import torch
from torch.utils.data import Dataset
class SlicesDataset(Dataset):
"""
This class represents an indexable Torch dataset
which could be consumed by the PyTorch DataLoader class
"""
def __init__(self, data):
self.data = data
self.slices = []
for i, d in enumerate(data):
for j in range(d["image"].shape[0]):
self.slices.append((i, j))
def __getitem__(self, idx):
"""
This method is called by PyTorch DataLoader class to return a sample with id idx
Arguments:
idx {int} -- id of sample
Returns:
Dictionary of 2 Torch Tensors of dimensions [1, W, H]
"""
slc = self.slices[idx]
sample = dict()
sample["id"] = idx
# You could implement caching strategy here if dataset is too large to fit
# in memory entirely
# Also this would be the place to call transforms if data augmentation is used
# TASK: Create two new keys in the "sample" dictionary, named "image" and "seg"
# The values are 3D Torch Tensors with image and label data respectively.
# First dimension is size 1, and last two hold the voxel data from the respective
# slices. Write code that stores the 2D slice data in the last 2 dimensions of the 3D Tensors.
# Your tensor needs to be of shape [1, patch_size, patch_size]
# Don't forget that you need to put a Torch Tensor into your dictionary element's value
# Hint: your 3D data sits in self.data variable, the id of the 3D volume from data array
# and the slice number are in the slc variable.
# Hint2: You can use None notation like so: arr[None, :] to add size-1
# dimension to a Numpy array
# <YOUR CODE GOES HERE>
sample["image"] = torch.from_numpy(self.data[slc[0]]['image'][slc[1]][None, :]).type(torch.cuda.FloatTensor)
sample["seg"] = torch.from_numpy(self.data[slc[0]]['seg'][slc[1]][None, :]).type(torch.cuda.LongTensor)
return sample
def __len__(self):
"""
This method is called by PyTorch DataLoader class to return number of samples in the dataset
Returns:
int
"""
return len(self.slices)
|
[
"abhishekdiphu@gmail.com"
] |
abhishekdiphu@gmail.com
|
bbcc380c2c8e11ce64219feba0f9de012fbe618d
|
196f04b443e0f79ff131f18baa8c78d1dfa036d2
|
/tests/updateLength_test.py
|
2f977b3d6ccde44484b230bdfd0471955cf0c5bf
|
[] |
no_license
|
Highstaker/Python-LinkedList-studies
|
b252881cba25813b9b74e9d59ebb26527e49098f
|
f761a52bac93c4736f210140c510906718ced089
|
refs/heads/master
| 2021-01-10T17:49:09.132200
| 2016-01-25T14:23:46
| 2016-01-25T14:23:46
| 50,355,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 575
|
py
|
#!/usr/bin/python3 -u
# -*- coding: utf-8 -*-
from unittest import TestCase
from LinkedList import LinkedList
class TestLinkedList(TestCase):
def test__updateLength(self):
ll = LinkedList()
ll._updateLength()
self.assertEqual(ll.getLength(), 0)
ll.insert("cero")
self.assertEqual(ll.getLength(), 1)
ll._updateLength()
self.assertEqual(ll.getLength(), 1)
ll.insert("uno")
ll.insert("dos")
ll.insert("tres")
ll.insert("cuatro")
ll.insert("cinco")
self.assertEqual(ll.getLength(), 6)
ll._updateLength()
self.assertEqual(ll.getLength(), 6)
|
[
"heights999@yandex.ru"
] |
heights999@yandex.ru
|
4bb888070e1232caddf94f6d9cd66a889ff46eea
|
bd327e439bd231efb32a39ce2a0eeb376c015c09
|
/onlinesalesproject/OnlineSales/app/migrations/0003_auto_20191108_1302.py
|
c4c439e65dc1b19019cb296a1cf36fb744c17bbd
|
[] |
no_license
|
rudved/OnlineSales
|
883f2aa4eb670388e0cf6df2af5e07a96186b2f9
|
5add27d93be5187bb7a8e7030c79d8aabbaefc58
|
refs/heads/master
| 2020-09-26T19:28:39.768904
| 2019-12-06T12:35:07
| 2019-12-06T12:35:07
| 226,326,111
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
# Generated by Django 2.2.5 on 2019-11-08 07:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0002_marchentmodel_password'),
]
operations = [
migrations.AlterField(
model_name='marchentmodel',
name='mrt_id',
field=models.IntegerField(default=False, primary_key=True, serialize=False),
),
]
|
[
"57656235+krudved@users.noreply.github.com"
] |
57656235+krudved@users.noreply.github.com
|
7630d8d78c12533b4d771f6fecc0374ead28fbc5
|
ce4d87abfc3b6d7a0b6ff82a0d99074cdb317b49
|
/selectiveHearing/selectivehearing/controllers/audioFiles.py
|
c62fa6a742489883753c7a3a76cbb44a496031ce
|
[] |
no_license
|
knorby/shearing
|
00c6b6fb22a4f20df3277ad9fa92f7189c389fa3
|
8f05ce802ed3120f8fb201200d64b368e598f182
|
refs/heads/master
| 2021-05-15T01:42:26.557661
| 2012-07-01T08:42:10
| 2012-07-01T08:42:10
| 4,848,953
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 301
|
py
|
from pylons import config
from paste.fileapp import DirectoryApp
class AudiofilesController(object):
def audioFiles(self, environ, start_response):
app = DirectoryApp(config["selectivehearing.audio_upload_dir"])
return app(environ, start_response)
__call__ = audioFiles
|
[
"kali.norby@gmail.com"
] |
kali.norby@gmail.com
|
562f901ed91f295fdbd2e5d1f0fac070dc8d90f3
|
f53bdd2b62dae9afd76bbfa983a2853b88d78c97
|
/dash_obtainer.py
|
1941efbff476f93243dd4de506a6a3f1b82deea4
|
[] |
no_license
|
ianbloom/kerry_dash
|
5cc38975adeb8c8bdf6453cf1e4778b2d0e213f4
|
cb31c787735d8d61189ac08185f592750ed97523
|
refs/heads/master
| 2020-04-01T18:57:13.531593
| 2018-10-24T18:31:38
| 2018-10-24T18:31:38
| 153,525,344
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,245
|
py
|
from api_helpers.super_func import *
from api_helpers.lm_api import *
from pprint import pprint
import argparse
import os
import sys
def DASH_OBTAIN(lm_id, lm_key, lm_company, dash_id):
resource_path = f'/dashboard/dashboards/{dash_id}'
query_params = ''
data = ''
# Obtain response
return_dict = LM_GET(lm_id, lm_key, lm_company, resource_path, query_params, data)
dash_body = json.loads(return_dict['body'].decode())
# Remove identifying information
dash_body.pop('id')
dash_body.pop('groupId')
dash_body.pop('groupName')
dash_body.pop('fullName')
dash_body.pop('widgetTokens')
# Obtain widgetsConfig which will help us build a widgets array
widgets_config = dash_body['widgetsConfig']
resource_path = f'/dashboard/dashboards/{dash_id}/widgets'
query_params = ''
data = ''
# Obtain response
return_dict = LM_GET(lm_id, lm_key, lm_company, resource_path, query_params, data)
widget_list = json.loads(return_dict['body'].decode())
# widget_items will be iterated through and searched for id
widget_items = widget_list['items']
# Iterate through widgets_config and initialize widgets_array
widgets_array = []
for widget_id, position in widgets_config.items():
# iterate through widget_items and match on id
for item in widget_items:
# One is a string and one is an int, normalize
if(int(widget_id) == int(item['id'])):
# Remove identifying information
item.pop('dashboardId')
# Initialize dictionary to place in widgets array
widget_array_dict = {}
widget_array_dict['config'] = item
widget_array_dict['position'] = position
widgets_array.append(widget_array_dict)
dash_body['widgets'] = widgets_array
# Replaced with widget property
dash_body.pop('widgetsConfig')
dash_body.pop('groupFullPath')
# Iterate through widgets, pop id from each config object post build
for widget in dash_body['widgets']:
# It's been done already?
widget['config'].pop('id', None)
widget['config'].pop('dataSourceId', None)
# Remove ids from datapoint objects
if('dataPoint' in widget['config'].keys()):
widget['config']['dataPoint'].pop('dataPointId', None)
widget['config']['dataPoint'].pop('dataSourceId', None)
# Remove ids from graphInfo -> dataPoints
if('graphInfo' in widget['config'].keys()):
widget['config']['graphInfo'].pop('id', None)
for dp in widget['config']['graphInfo']['dataPoints']:
dp.pop('id', None)
dp.pop('customGraphId', None)
dp.pop('dataPointId', None)
dp.pop('dataSourceId', None)
# Remove ids from bigNumberInfo
if('bigNumberInfo' in widget['config'].keys()):
for dp in widget['config']['bigNumberInfo']['dataPoints']:
dp.pop('id', None)
dp.pop('customGraphId', None)
dp.pop('dataPointId', None)
dp.pop('dataSourceId', None)
if('columns' in widget['config'].keys()):
for cl in widget['config']['columns']:
cl.pop('dataPointId', None)
# Collect name for use as filename
dash_name = dash_body['name']
dash_name = dash_name.replace(':', '_')
# Convert the dash_body dictionary back into a string
dash_body_string = json.dumps(dash_body)
cwd = os.getcwd()
file = open(f'{cwd}/dashboards/{dash_name}.json', 'w')
file.write(dash_body_string)
file.close()
|
[
"ian.bloom@gmail.com"
] |
ian.bloom@gmail.com
|
1d05631df31cdc2d73d8ef2445b26ad916a8fea5
|
d29b63ff2cbf1aa0441ca88d96d0f959acadee9e
|
/order/views.py
|
d35b39dec1bd9defb5a14258913196a50b6bebd7
|
[] |
no_license
|
AiperiAkhumbai/online_store
|
5737d2e8f52dcfdfbd15eb9769d9fd9da1a5a717
|
f4f7e85de5aa116d789324f5adf441f6af7551b4
|
refs/heads/master
| 2023-01-19T17:51:17.020575
| 2020-12-01T11:21:38
| 2020-12-01T11:21:38
| 316,841,389
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 618
|
py
|
from rest_framework import viewsets
from rest_framework import filters
from rest_framework.authentication import TokenAuthentication
from rest_framework.authtoken.serializers import AuthTokenSerializer
from rest_framework.permissions import IsAdminUser, IsAuthenticated
from .models import Order
from .serializers import OrderSerializers
class OrderViewSet(viewsets.ModelViewSet):
serializer_class = OrderSerializers
queryset = Order.objects.all()
permission_classes = (IsAuthenticated,)
filter_backends = (filters.SearchFilter,)
search_fields = ('created_at')
|
[
"aiperiahumbaeva@gmail.com"
] |
aiperiahumbaeva@gmail.com
|
3b44a1973c54eba885e0e6267d56783db8b817cc
|
bda06272e940ea9721b64637011e24f681059f83
|
/config.py
|
44307248f003d6002136e6a43a200f654a7af841
|
[
"MIT"
] |
permissive
|
melisajuma/Pitch
|
26cdb2760d5f105a8dc68287253fd13324e3928c
|
eb09d918afdee1541b7a90be1978061d707a3fb9
|
refs/heads/master
| 2021-06-14T20:04:22.514001
| 2019-08-06T12:17:43
| 2019-08-06T12:17:43
| 200,617,753
| 0
| 0
|
MIT
| 2021-06-02T21:49:10
| 2019-08-05T08:45:34
|
Python
|
UTF-8
|
Python
| false
| false
| 982
|
py
|
import os
#from sqlalchemy import create_engine
class Config:
SQLACHEMY_TRACK_MODIFICATIONS = False
#SQLALCHEMY_DATABASE_MODIFICATIONS = 'postgresql+psycopg2://moringa:mel123\q@localhost/pitch'
SECRET_KEY = 'happy'
# MAIL_SERVER = 'smtp.gmail.com'
# MAIL_PORT = 587
# MAIL_USE_TLS = True
# MAIL_USERNAME = os.environ.get("MAIL_USERNAME")
# MAIL_PASSWORD = os.environ.get("MAIL_PASSWORD")
class ProdConfig(Config):
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://moringa:mel123@localhost/pitch'
class DevConfig(Config):
# SQLALCHEMY_DATABASE_URI='postgresql+psycopg2://moringa:mel123@localhost/pitch'
DEBUG = True
#engine = create_engine('postgresql://moringa:mel123@localhost/pitch')
#class TestConfig(Config):
#SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://moringa:mel123@localhost/pitch'
config_options = {
'development': DevConfig,
'production': ProdConfig,
}
|
[
"Melisaakinyi95@gmail.com"
] |
Melisaakinyi95@gmail.com
|
d7d8cf1d86e120a7693cff3bc74ec011c58d2ec5
|
209a7a4023a9a79693ec1f6e8045646496d1ea71
|
/COMP0016_2020_21_Team12-datasetsExperimentsAna/pwa/FADapp/pythonScripts/venv/Lib/site-packages/pandas/tests/extension/base/ops.py
|
23d3baaf3f83c148bcce7453dd98117e1924f28c
|
[
"MIT"
] |
permissive
|
anzhao920/MicrosoftProject15_Invictus
|
5e2347015411bbffbdf0ceb059df854661fb240c
|
15f44eebb09561acbbe7b6730dfadf141e4c166d
|
refs/heads/main
| 2023-04-16T13:24:39.332492
| 2021-04-27T00:47:13
| 2021-04-27T00:47:13
| 361,913,170
| 0
| 0
|
MIT
| 2021-04-26T22:41:56
| 2021-04-26T22:41:55
| null |
UTF-8
|
Python
| false
| false
| 6,907
|
py
|
from typing import Optional, Type
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core import ops
from .base import BaseExtensionTests
class BaseOpsUtil(BaseExtensionTests):
def get_op_from_name(self, op_name):
return tm.get_op_from_name(op_name)
def check_opname(self, s, op_name, other, exc=Exception):
op = self.get_op_from_name(op_name)
self._check_op(s, op, other, op_name, exc)
def _check_op(self, s, op, other, op_name, exc=NotImplementedError):
if exc is None:
result = op(s, other)
if isinstance(s, pd.DataFrame):
if len(s.columns) != 1:
raise NotImplementedError
expected = s.iloc[:, 0].combine(other, op).to_frame()
self.assert_frame_equal(result, expected)
else:
expected = s.combine(other, op)
self.assert_series_equal(result, expected)
else:
with pytest.raises(exc):
op(s, other)
def _check_divmod_op(self, s, op, other, exc=Exception):
# divmod has multiple return values, so check separately
if exc is None:
result_div, result_mod = op(s, other)
if op is divmod:
expected_div, expected_mod = s // other, s % other
else:
expected_div, expected_mod = other // s, other % s
self.assert_series_equal(result_div, expected_div)
self.assert_series_equal(result_mod, expected_mod)
else:
with pytest.raises(exc):
divmod(s, other)
class BaseArithmeticOpsTests(BaseOpsUtil):
"""
Various Series and DataFrame arithmetic ops methods.
Subclasses supporting various ops should set the class variables
to indicate that they support ops of that kind
* series_scalar_exc = TypeError
* frame_scalar_exc = TypeError
* series_array_exc = TypeError
* divmod_exc = TypeError
"""
series_scalar_exc: Optional[Type[TypeError]] = TypeError
frame_scalar_exc: Optional[Type[TypeError]] = TypeError
series_array_exc: Optional[Type[TypeError]] = TypeError
divmod_exc: Optional[Type[TypeError]] = TypeError
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
# series & scalar
op_name = all_arithmetic_operators
s = pd.Series(data)
self.check_opname(s, op_name, s.iloc[0], exc=self.series_scalar_exc)
@pytest.mark.xfail(run=False, reason="_reduce needs implementation")
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):
# frame & scalar
op_name = all_arithmetic_operators
df = pd.DataFrame({"A": data})
self.check_opname(df, op_name, data[0], exc=self.frame_scalar_exc)
def test_arith_series_with_array(self, data, all_arithmetic_operators):
# ndarray & other series
op_name = all_arithmetic_operators
s = pd.Series(data)
self.check_opname(
s, op_name, pd.Series([s.iloc[0]] * len(s)), exc=self.series_array_exc
)
def test_divmod(self, data):
s = pd.Series(data)
self._check_divmod_op(s, divmod, 1, exc=self.divmod_exc)
self._check_divmod_op(1, ops.rdivmod, s, exc=self.divmod_exc)
def test_divmod_series_array(self, data, data_for_twos):
s = pd.Series(data)
self._check_divmod_op(s, divmod, data)
other = data_for_twos
self._check_divmod_op(other, ops.rdivmod, s)
other = pd.Series(other)
self._check_divmod_op(other, ops.rdivmod, s)
def test_add_series_with_extension_array(self, data):
s = pd.Series(data)
result = s + data
expected = pd.Series(data + data)
self.assert_series_equal(result, expected)
def test_error(self, data, all_arithmetic_operators):
# invalid ops
op_name = all_arithmetic_operators
with pytest.raises(AttributeError):
getattr(data, op_name)
@pytest.mark.parametrize("box", [pd.Series, pd.DataFrame])
def test_direct_arith_with_ndframe_returns_not_implemented(self, data, box):
# EAs should return NotImplemented for ops with Series/DataFrame
# Pandas takes care of unboxing the series and calling the EA's op.
other = pd.Series(data)
if box is pd.DataFrame:
other = other.to_frame()
if hasattr(data, "__add__"):
result = data.__add__(other)
assert result is NotImplemented
else:
raise pytest.skip(f"{type(data).__name__} does not implement add")
class BaseComparisonOpsTests(BaseOpsUtil):
"""Various Series and DataFrame comparison ops methods."""
def _compare_other(self, s, data, op_name, other):
op = self.get_op_from_name(op_name)
if op_name == "__eq__":
assert not op(s, other).all()
elif op_name == "__ne__":
assert op(s, other).all()
else:
# array
assert getattr(data, op_name)(other) is NotImplemented
# series
s = pd.Series(data)
with pytest.raises(TypeError):
op(s, other)
def test_compare_scalar(self, data, all_compare_operators):
op_name = all_compare_operators
s = pd.Series(data)
self._compare_other(s, data, op_name, 0)
def test_compare_array(self, data, all_compare_operators):
op_name = all_compare_operators
s = pd.Series(data)
other = pd.Series([data[0]] * len(data))
self._compare_other(s, data, op_name, other)
@pytest.mark.parametrize("box", [pd.Series, pd.DataFrame])
def test_direct_arith_with_ndframe_returns_not_implemented(self, data, box):
# EAs should return NotImplemented for ops with Series/DataFrame
# Pandas takes care of unboxing the series and calling the EA's op.
other = pd.Series(data)
if box is pd.DataFrame:
other = other.to_frame()
if hasattr(data, "__eq__"):
result = data.__eq__(other)
assert result is NotImplemented
else:
raise pytest.skip(f"{type(data).__name__} does not implement __eq__")
if hasattr(data, "__ne__"):
result = data.__ne__(other)
assert result is NotImplemented
else:
raise pytest.skip(f"{type(data).__name__} does not implement __ne__")
class BaseUnaryOpsTests(BaseOpsUtil):
def test_invert(self, data):
s = pd.Series(data, name="name")
result = ~s
expected = pd.Series(~data, name="name")
self.assert_series_equal(result, expected)
|
[
"ana.kapros@yahoo.ro"
] |
ana.kapros@yahoo.ro
|
f1295a102241d36828688531a187d86701051309
|
bbfa02377b18ee606f9d888c6d7f4635889938b8
|
/manage.py
|
20a885c4baaa6502165dfdfa79cfe689cd078d6d
|
[] |
no_license
|
wengyin777/DockerDjangoPostGreSQL
|
e9e05fb1a478069e65c7b7cb16f8ef53795f0330
|
4fafc1abac7a598a773c056cfff29bfda32603fa
|
refs/heads/main
| 2023-06-08T00:43:15.363513
| 2021-06-24T14:25:30
| 2021-06-24T14:25:30
| 379,948,778
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 672
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangodockerBLOG.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"holeyiho@gmail.com"
] |
holeyiho@gmail.com
|
3e2366528a198804fad9c195c700eb8b647db602
|
72e45ceed81324cd8d719483fede4c010fca1ccb
|
/test.py
|
bbaac87fb02e31e595661b3260fad4fbcc29560d
|
[] |
no_license
|
jingxm/RssReader
|
be5bceface2039b99f97ef55b235337fc0a48799
|
17eca96c3bc386c46b329ac296b974d16faaa789
|
refs/heads/master
| 2020-12-30T23:37:06.619081
| 2017-03-29T18:02:46
| 2017-03-29T18:02:46
| 86,613,390
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 139
|
py
|
from werkzeug.security import generate_password_hash, check_password_hash
pw = '123456'
pw_hash = generate_password_hash(pw)
print pw_hash
|
[
"hzmingjimmy@hotmail.com"
] |
hzmingjimmy@hotmail.com
|
546e6a53e7a616482335e6390aa31a35a8528bfc
|
9025e719e334557827707e15ae8fcfde4b8151cf
|
/action-recognition/src/utils.py
|
5a8f42a64e467221aa0aea858e21d89114d4c87a
|
[] |
no_license
|
feliferr/computer-vision
|
01e1423dc57462908e7f21f1c557c69b752277df
|
a957587e9b09c26a983b887e5a82876b08d4cb9a
|
refs/heads/master
| 2021-09-07T14:20:21.203339
| 2021-09-03T21:03:46
| 2021-09-03T21:03:46
| 216,015,742
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 842
|
py
|
import os
from google.cloud import storage
BUCKET_NAME = os.getenv("BUCKET_NAME")
SPLIT_PATTERN = f"gs://{BUCKET_NAME}/"
client = storage.Client()
bucket = client.get_bucket(BUCKET_NAME)
def download_gs_file(gs_file_path):
query_path = gs_file_path.split(SPLIT_PATTERN)[1]
blob = bucket.get_blob(query_path)
os.makedirs(os.path.dirname(query_path))
with open(f"./{query_path}",'wb') as f:
f.write(blob.download_as_bytes())
return query_path
def upload_to_gs(file_path):
blob = bucket.blob(file_path)
blob.upload_from_filename(filename=f"./{file_path}")
def list_gs_files(gs_path):
query_path = gs_path.split(SPLIT_PATTERN)[1]
blobs = list(bucket.list_blobs(prefix=query_path))
gs_files_list = [f"gs://{BUCKET_NAME}/{blob.name}" for blob in blobs]
return gs_files_list
|
[
"feliferrgo@gmail.com"
] |
feliferrgo@gmail.com
|
37a347338614bf509c5ecdd47760cb7ee414efda
|
0874ecce812388593a34014cad26d3b4959d07ac
|
/awards/migrations/0022_remove_rate_rate.py
|
39c6e7f3c27d989ca01d3e057b456dcc8a7ab2a2
|
[
"MIT"
] |
permissive
|
melissa-koi/awwardsclone
|
d990f82eb9d1354a54af68d7fa61fe5856bfd2c1
|
b82447cea82672038ea9fa9d9ca9867bff3c35f0
|
refs/heads/main
| 2023-06-01T12:20:05.315928
| 2021-06-03T15:31:21
| 2021-06-03T15:31:21
| 372,236,218
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 321
|
py
|
# Generated by Django 3.2.3 on 2021-06-02 04:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('awards', '0021_profile_location'),
]
operations = [
migrations.RemoveField(
model_name='rate',
name='rate',
),
]
|
[
"melissawangui3@gmail.com"
] |
melissawangui3@gmail.com
|
69c4491d020596e64934f5c8a00289741f8a56d2
|
75eca2144c3c740c1e1a13b9ecc7670de7dc2b25
|
/budget-backend/src/services/transactions.py
|
cacb1da8dca761e0b378c7627cd78241854b312e
|
[] |
no_license
|
salty-armadillo/budget
|
66e3aa45cb1e6298378bbe4ed2b405ea16120726
|
6fab280a41715712d1b71f8cf58e7fba25a66d98
|
refs/heads/main
| 2023-03-09T09:17:52.448433
| 2021-02-14T05:51:55
| 2021-02-14T05:51:55
| 321,654,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,785
|
py
|
from mysql import connector
import configparser
def fetch_transactions(offset, length):
config = configparser.ConfigParser()
config.read('config.ini')
db = connector.connect(
host="localhost",
user=config["database"]["username"],
password=config["database"]["password"],
database='budgeting_db'
)
dbCursor = db.cursor()
dbCursor.execute(
f"SELECT * FROM transactions ORDER BY create_time DESC LIMIT {offset}, {length};"
)
results = [list(i) for i in dbCursor.fetchall()]
dbCursor.close()
db.close()
return results
def fetch_transactions_between(start, end):
config = configparser.ConfigParser()
config.read('config.ini')
db = connector.connect(
host="localhost",
user=config["database"]["username"],
password=config["database"]["password"],
database='budgeting_db'
)
dbCursor = db.cursor()
dbCursor.execute(
f"SELECT * FROM transactions WHERE create_time BETWEEN '{start}' AND '{end}' ORDER BY create_time DESC;"
)
results = [list(i) for i in dbCursor.fetchall()]
dbCursor.close()
db.close()
return results
def insert_transaction(create_time, amount, description, category):
config = configparser.ConfigParser()
config.read('config.ini')
db = connector.connect(
host="localhost",
user=config["database"]["username"],
password=config["database"]["password"],
database='budgeting_db'
)
dbCursor = db.cursor()
dbCursor.execute(
f"INSERT INTO transactions (create_time, amount, description, category) VALUES ('{create_time}', {amount}, '{description}', '{category}');"
)
db.commit()
dbCursor.close()
db.close()
return
|
[
"kamanchan27@gmail.com"
] |
kamanchan27@gmail.com
|
2d609b843b0027415ca77a5ba27308972efddf2f
|
d6a87754a86ed91eb3a71c8ec69fdb415a87ac34
|
/apache/apr_tables.py
|
46ae0ec9635e80bbd8b641f3001bc1020f86c31d
|
[
"Apache-2.0"
] |
permissive
|
GrahamDumpleton-abandoned/apswigpy
|
8505f3bb99c45b75dfe5da6e3a53ff3c7d4fd4cd
|
b821b94a78ceed5b8991f7c345aeeadca3729a90
|
refs/heads/master
| 2021-01-05T11:19:43.498210
| 2009-12-01T10:41:37
| 2009-12-01T10:41:37
| 241,006,887
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 144
|
py
|
import apache
if apache.version == (2, 2):
from apache22.apr_tables import *
else:
raise RuntimeError('Apache version not supported.')
|
[
"Graham.Dumpleton@gmail.com"
] |
Graham.Dumpleton@gmail.com
|
4516b0662054384e88012bb63a54923c9cb9062f
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2501/60586/311798.py
|
a5059b14e231f288884cef6e4628d790888c410d
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
x=int(input())
input()
if x==3:
print(1)
elif x==5:
print(3)
elif x==8 and input()=="8 1 2 3 4 5 6 7":
print(7)
print(input())
elif x==30:
print(15)
else:
print(x)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
2ae0d10e42b22fb1cb5d6d1517d8248f2646b176
|
4072be1b88309cd354ffae005fd22d8b56b845ff
|
/user_test_python/test_case_2.py
|
3f70836a2d992c15b577e635f21a0f117d4087c9
|
[] |
no_license
|
chernenko-art/tests_user_api
|
4bfce916e5e9f1a1f259d1c64f90d45a57e79cd8
|
adaeec35962d13aba8cb8f77a8ff9cbef556e189
|
refs/heads/master
| 2023-06-03T09:28:46.896333
| 2021-06-18T10:55:07
| 2021-06-18T10:55:07
| 372,411,020
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,883
|
py
|
import pytest
import logging
import time
from user_api import *
from conftest import *
# Конфигурация логов
FORMAT = '%(asctime)s,%(msecs)d %(levelname)-8s \
[%(filename)s:%(lineno)d:%(funcName)-20s] %(message)s'
logging.basicConfig(level=level_logging(),
format=FORMAT,
datefmt='%m-%d %H:%M',
filename='user_tests.log'
)
def test_case_2():
""" Тест-кейс test_case_2 включает в себя следующую последовательность действий:
1. Создание 5 пользователей и задач для них, с помощью метода create_user_with_task()
2. Изменение 1 поля каждого пользователя
3. Вход в систему под менеджером
4. Поиск пользователей по созданной задаче
5. Проверка выполнения задач пользователями
6. Добавление новой задачи пользователям
"""
logging.info('-'*15 + 'Запуск test_case_2')
# Заводим таймер для защиты от ошибок на сервере
timing = time.time()
# Цикл с таймером на 10 сек
while True:
try:
# Проверка времени выполнения цикла
if time.time() - timing > 10.0:
logging.error('Превышено время ожидания')
assert False
# Создание 5 пользователей и задач для них
logging.info('1. Создание 5 пользователей и задач для них')
# Получение данных о задаче
params_test = get_params_test()
task_1 = params_test['task_json']
# Массив для хранения данных созданных пользователей
user_list = []
for _ in range(5):
# Запрос случайных данных пользователей
user_email, user_name, password = random_user_generator()
c_u_w_t_json = create_user_with_task(user_email, user_name, [task_1])
if 'type' in c_u_w_t_json:
raise Exception(f'Error key "type" in response create_user_with_task(): {c_u_w_t_json}')
# Добавление email пользователя в массив
user_list.append(user_email)
# Изменение поля 'hobby' каждого пользователя
logging.info('2. Изменение 1 поля каждого пользователя')
for email in user_list:
u_o_f_json = user_one_field(email)
if u_o_f_json['message'] == 'Пользователь с таким email не найден!':
raise Exception(f'Error key "message" in response user_one_field(): {u_o_f_json}')
# Вход в систему под менеджером
logging.info('3. Вход в систему под менеджером')
# Получение данных авторизации менеджера
manager_email = params_test['manager_email']
manager_password = params_test['manager_password']
login_json = do_login(manager_email, manager_password)
if login_json["result"] == False:
raise Exception(f'Error key "result" in response json do_login(): {login_json}')
# Поиск пользователей по созданной задаче
logging.info('4. Поиск пользователей по созданной задаче')
search_params = ' '.join(user_list)
search_json = magic_search(search_params)
if 'code_error' in search_json:
raise Exception(f'Key "code_error" in response magic_search(): {search_json}')
# Проверка выполнения задач пользователями
logging.info('5. Проверка выполнения задач пользователями')
# Проверка задач всех пользователей
for i in range(len(search_json['results'])):
user = search_json['results'][i]['email']
task = search_json['results'][i]['tasks'][0]
if 'status' in task:
logging.info(f"Пользователь - '{user}', \
задача - '{task['name']}' \
статус - {task['status']}")
else:
raise Exception(f'Key "status" not in response magic_search(): {user}')
# Добавление новой задачи пользователям
logging.info('6. Добавление новой задачи пользователям')
task_2 = {"title": "Спринт 85", "description": "Провести fuctional test"}
for email in user_list:
task_json = create_task(task_2['title'], task_2['description'], manager_email, email)
if task_json['message'] != 'Задача успешно создана!':
raise Exception(f'Error key "message" in response json create_task(): {task_json}')
# Проверка успешности выполения test_case_2
# Обновление поиска пользователей по созданым задачам
search_json = magic_search(search_params)
if 'code_error' in search_json:
raise Exception(f'Key "code_error" in response magic_search(): {search_json}')
# Определение заданного массива задач и пользователей
spec_list = {}
for email in user_list:
# Определение заданного перечня задач
spec_tasks = [task_1['title'], task_2['title']]
spec_list.update({email: spec_tasks})
# Определение фактического массива задач и пользователей
result_list = {}
# Сбор полученных данных пользователей
if search_json['foundCount'] >= 1:
for i in range(len(search_json['results'])):
email = search_json['results'][i]['email']
if email in user_list:
tasks = search_json['results'][i]['tasks']
task_list = []
for task in tasks:
task_list.append(task['name'])
# реверс в списке, т.к. в response обратный порядок задач
result_list.update({email: task_list[::-1]})
# Сравнение заданного массива задач и пользователей с фактическим
assert spec_list == result_list
logging.info(f'test_case_2 успешно пройден: \
spec_list = {spec_list}, result_list = {result_list}')
break
else:
logging.error(f'test_case_2 провален \
spec_list = {spec_list}, result_list = {result_list}')
assert False
except Exception as err:
logging.error(err)
assert False
|
[
"ac.chernenko@gmail.com"
] |
ac.chernenko@gmail.com
|
df8485b15d3e4486fe165c69ac19f217a8e0386a
|
3981410f2a5f4911b0cdcfb4731b45aa4e0eb01a
|
/webapp/getting2.py
|
63618cb6e8361196d6474f1fb1fecf971b83565f
|
[] |
no_license
|
shakarbhattarai/MoodLamp
|
cf945eb99ec290f55d12d129b233ab8bd4bf65c1
|
eab4cbfa0ad739d9709d2130f624f7951a3a041f
|
refs/heads/master
| 2021-01-13T07:04:35.242532
| 2017-02-11T14:17:08
| 2017-02-11T14:17:08
| 81,600,945
| 10
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,438
|
py
|
import httplib, urllib, base64,json
#This gets emotions from givenurl
class imageProcess:
def __init__(self,imageurl):
headers = {
# Request headers
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': 'a0087b0f59144ae0a40ab33cdcdbec50',
}
self.imageurl=imageurl
params = urllib.urlencode({
})
try:
conn = httplib.HTTPSConnection('westus.api.cognitive.microsoft.com')
print self.imageurl
conn.request("POST", "/emotion/v1.0/recognize?%s" % params, "{'url':'"+self.imageurl+"'}", headers)
response = conn.getresponse()
self.data = response.read()
conn.close()
except Exception as e:
print("[Errno {0}] {1}".format(e.errno, e.strerror))
def get_json(self):
return self.data
def get_emotions(self):
answer=json.loads(self.data)
return answer
a=imageProcess("https://ig-s-c-a.akamaihd.net/hphotos-ak-xfa1/t51.2885-15/sh0.08/e35/p750x750/16464986_380891058936170_3981819473807015936_n.jpg?ig_cache_key=MTQ0NDY2OTExODA1MzQ4Nzk0Mg%3D%3D.2")
print a.get_emotions()
|
[
"shakarbhattarainp@gmail.com"
] |
shakarbhattarainp@gmail.com
|
1871ae0b900590bb3835539d692b3dfb8c5af04a
|
7fa4330f2167d25b41485b4fbcee867d5bb85c8e
|
/n.py
|
b70600d8c290b8c5bf7ed118e9665a2fda1515e4
|
[] |
no_license
|
StyleGame/pp.py
|
923380f095755cc37fce4cfb795ff195b7c0c9af
|
12b315556901a8bc5b39057395c6df11932ae041
|
refs/heads/main
| 2023-08-14T08:00:37.786472
| 2021-10-02T11:22:35
| 2021-10-02T11:22:35
| 349,479,061
| 0
| 1
| null | 2021-08-19T00:14:22
| 2021-03-19T15:59:51
|
Python
|
UTF-8
|
Python
| false
| false
| 3,502
|
py
|
import random,string
import requests,hashlib,random,string,time
import telebot
r = requests.session()
print("""
--------------------------------------------------
██████╗ ██╗ ██╗██████╗ ██████╗
██╔══██╗██║ ██║██╔══██╗██╔════╝
██████╔╝██║ ██║██████╔╝██║ ███╗
██╔═══╝ ██║ ██║██╔══██╗██║ ██║
██║ ╚██████╔╝██████╔╝╚██████╔╝
╚═╝ ╚═════╝ ╚═════╝ ╚═════╝
BY : @StyleGame
--------------------------------------------------
""")
#############
ID='1346823622'
token ='1806649080:AAFm6VdpWvxM1_1X2Htc4VIHbEPqF7hM80Y'
bot = telebot.TeleBot(token)
headPUB = {
"Content-Type": "application/json; charset=utf-8","User-Agent": f"Dalvik/2.1.0 (Linux; U; Android 5.1.1; SM-G973N Build/PPR1.910397.817)","Host": "igame.msdkpass.com","Connection": "Keep-Alive","Accept-Encoding": "gzip","Content-Length": "126"}
def CHECK(email,pess):
eml = email
pas = pess
YES = f"""
\033[0;32m[✓] Hacked PUBG :
[✓] Email: {eml}
[✓] Pass: {pas}
━━━━━━━━━━━━━"""
NO = f"""
\033[0;31m[-] NOT Hacked PUBG :
[-] Email: {eml}
[-] Pass: {pas}
━━━━━━━━━━━━━"""
pes = hashlib.md5(bytes(f'{pas}', encoding='utf-8')).hexdigest()
J = hashlib.md5(bytes("/account/login?account_plat_type=3&appid=dd921eb18d0c94b41ddc1a6313889627&lang_type=tr_TR&os=1{\"account\":\""+eml+"\",\"account_type\":1,\"area_code\":\"\",\"extra_json\":\"\",\"password\":\""+pes+"\"}3ec8cd69d71b7922e2a17445840866b26d86e283", encoding="utf-8")).hexdigest()
url = f"https://igame.msdkpass.com/account/login?account_plat_type=3&appid=dd921eb18d0c94b41ddc1a6313889627&lang_type=tr_TR&os=1&sig={J}"
daPU = "{\"account\":\""+eml+"\",\"account_type\":1,\"area_code\":\"\",\"extra_json\":\"\",\"password\":\""+pes+"\"}"
time.sleep(0.5)
GO=r.get(url, data=daPU,headers=headPUB).text
if '"Success"' in GO:
print(YES)
r.post(f'https://api.telegram.org/bot{token}/sendMessage?chat_id={ID}&text={YES}\nBY @Style_Game 💸')
with open('NWE-PUBG.txt', 'a') as x:
x.write(eml+':'+pas+' |@StyleGame @Style_Game0\n')
else:
print(NO)
@bot.message_handler(commands=['start', 'help'])
def send_welcome(message):
bot.reply_to(message, "Hi StyleGame, how are you doing?")
@bot.message_handler(func=lambda message: True)
def echo_all(message):
if message.text=="Checker":
bot.reply_to(message, "ok")
#F = "p.text"
#def FILname():
F = "p.txt"
try:
for x in open(F,'r').read().splitlines():
email = x.split(":")[0]
pess = x.split(":")[1]
CHECK(email,pess)
except FileNotFoundError:
print('\n[-] The file name is incorrect !\n')
return FILname()
else:
bot.reply_to(message, "Not Checker")
#FILname()
@bot.message_handler(content_types=['document'])
def name(c):
print("Go")
print(c.document.file_id)
raw=c.document.file_id
path = raw + ".txt"
file_info = bot.get_file(raw)
downloaded_file = bot.download_file(file_info.file_path)
with open("p.txt", 'wb') as new_file:
new_file.write(downloaded_file)
bot.reply_to(c, "Downloaded")
bot.polling()
|
[
"noreply@github.com"
] |
StyleGame.noreply@github.com
|
c625242064f1cc62043c3d10ce2128fa96f6c215
|
ede5d010543eb12221a664a6d7fce4d5b277baf3
|
/apiCentral/manage.py
|
91d08e339d2d4de987ee667063915991129af5ce
|
[
"MIT"
] |
permissive
|
tiagopossato/iot.central.interface
|
d1fa5886b54fe2c4a10f918faf60bcb04c24e987
|
a3b55aa8a1260a6823998e82b2c6bed9675a4543
|
refs/heads/master
| 2021-04-12T15:10:44.577412
| 2017-06-23T02:03:45
| 2017-06-23T02:03:45
| 94,553,984
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 808
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "apiCentral.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[
"tiago@debian"
] |
tiago@debian
|
4e0eafad69109e8c08e62e688f262b2e5bc9f9f6
|
b38abdc3e28e15d53c9173752421f11ee3f5319e
|
/users/migrations/0001_initial.py
|
d33a624c6cd89ae5a104e68e7da9203218487fd7
|
[
"MIT"
] |
permissive
|
stuartelimu/curly-robot
|
021b066e5273e131c0af6c8317359ce8369e446d
|
c170cb95f3fe6c806263edc977f039e11d84fbe0
|
refs/heads/master
| 2023-07-09T23:14:27.499624
| 2021-08-19T13:35:09
| 2021-08-19T13:35:09
| 397,565,899
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,873
|
py
|
# Generated by Django 3.2.6 on 2021-08-18 11:17
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
|
[
"stuartelimu@gmail.com"
] |
stuartelimu@gmail.com
|
8fcd74b148e19229dbb9dfb47b1bbca367df9e69
|
e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f
|
/indices/nnsantissima.py
|
543cf26c394dc5e4ca24b64951402154862496ac
|
[] |
no_license
|
psdh/WhatsintheVector
|
e8aabacc054a88b4cb25303548980af9a10c12a8
|
a24168d068d9c69dc7a0fd13f606c080ae82e2a6
|
refs/heads/master
| 2021-01-25T10:34:22.651619
| 2015-09-23T11:54:06
| 2015-09-23T11:54:06
| 42,749,205
| 2
| 3
| null | 2015-09-23T11:54:07
| 2015-09-18T22:06:38
|
Python
|
UTF-8
|
Python
| false
| false
| 44
|
py
|
ii = [('ClarGE2.py', 3), ('CoopJBT2.py', 3)]
|
[
"varunwachaspati@gmail.com"
] |
varunwachaspati@gmail.com
|
6558622836654c5378bd38de0253eb25a5b41d6a
|
7cf3ebd6c7b365e8e8f791304a4e7ac0095ec362
|
/src/foreign_if/python/UT/src/gmm/test009.py
|
5727c5ed42a68cbb1e5849107a506742e5e595ec
|
[
"BSD-2-Clause"
] |
permissive
|
KhushbooSanole/frovedis
|
22805039b54b215ca8bf01ab5683c19e1c0a276c
|
dcbff3609bddb30ceed755028aff4cf4203a4bf5
|
refs/heads/master
| 2023-06-19T08:30:16.651958
| 2021-07-21T05:15:53
| 2021-07-21T05:15:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,224
|
py
|
# Test frovedis lowerbound and sklearn lowerbound
import sys
import numpy as np
from frovedis.exrpc.server import FrovedisServer
from frovedis.matrix.dense import FrovedisRowmajorMatrix
from frovedis.mllib.gmm import GaussianMixture
import sklearn.mixture as sk
# initializing the Frovedis server
argvs = sys.argv
argc = len(argvs)
if (argc < 2):
print ('Please give frovedis_server calling command as the first argument \n(e.g. "mpirun -np 2 -x /opt/nec/nosupport/frovedis/ve/bin/frovedis_server")')
quit()
FrovedisServer.initialize(argvs[1])
train_mat = np.loadtxt("./input/gmm_data.txt")
# creating spectral agglomerative object
n_components = 2
try:
f_model = GaussianMixture(n_components=n_components)
# fitting the training matrix on gaussian mixture object
f_model.fit(train_mat)
f_lb = f_model.lower_bound_
except Exception as e:
print ("status=Exception: " + str(e))
sys.exit(1)
try:
sk_model = sk.GaussianMixture(n_components=n_components, random_state=0).fit(train_mat)
s_lb = sk_model.lower_bound_
except Exception as e:
print ("status=Exception: " + str(e))
sys.exit(1)
if(f_lb == s_lb):
print("status=Passed")
else:
print("status=Failed")
|
[
"takuy_araki@nec.com"
] |
takuy_araki@nec.com
|
e7c681e3f1bb4a3a767be6ee771e7ae6b2aa560c
|
0e6d0db669707edc925897c7bfe4599407d2a5c4
|
/projects/golem_integration/tests/actions/cookies/delete_cookie.py
|
0e0305df8ad899f1cb2242148298014eeb8f3d3f
|
[
"MIT"
] |
permissive
|
kangchenwei/keyautotest2
|
0f82b0c0811487953551f3a4d7daea5a92f390b8
|
f980d46cabfc128b2099af3d33968f236923063f
|
refs/heads/master
| 2020-04-05T19:41:41.520077
| 2019-01-18T09:20:58
| 2019-01-18T09:20:58
| 157,146,274
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 213
|
py
|
description = 'Verify golem action delete_cookie'
def test(data):
navigate('https://google.com')
add_cookie({'name': 'foo', 'value': 'bar'})
delete_cookie('foo')
assert get_cookie('foo') == None
|
[
"kangxinxin@testin.cn"
] |
kangxinxin@testin.cn
|
149dc5a5f54885b9b82d6e55af3d65f47f93a215
|
ef96f648d8ecf10c95c34b578c99001c0e9080f8
|
/tests/test_cli.py
|
37365717379121153abd31ae9da03675670e5fc0
|
[] |
no_license
|
eteeselink/ci-workshop
|
7314be3bafb148f437e3b406e7201cf71cf99102
|
b0a8430e856124fe9ea313a5e172295ab3adf54a
|
refs/heads/master
| 2021-01-01T05:37:25.645953
| 2014-10-17T09:51:14
| 2014-10-17T09:51:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 149
|
py
|
import unittest
import cli
class TestParser(unittest.TestCase):
def test_hello(self):
self.assertEqual(cli.parse([]), [])
None
|
[
"skrebbel@gmail.com"
] |
skrebbel@gmail.com
|
95235e3018bd6e772c58855ce50e8c6d8abe593e
|
398e3a7e8425a5b16487654d62c77eb2c1881091
|
/api/models/PaintJobModel.py
|
6845cd63f0e1f83727932797755038b5bf04843a
|
[] |
no_license
|
paarshan4800/car-paint-shop
|
c1b61ba7dbf84846d97b486be937345999974b9d
|
5c875bfb9637674347240b57d34ba03f6bd736e4
|
refs/heads/master
| 2023-06-08T15:07:19.977790
| 2021-06-25T18:05:31
| 2021-06-25T18:05:31
| 365,522,984
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 537
|
py
|
from api import db
from datetime import datetime
class PaintJob(db.Model):
id = db.Column(db.Integer, primary_key=True, nullable=False)
color = db.Column(db.String(100), nullable=False)
model = db.Column(db.String(255), nullable=False)
painted_time = db.Column(db.DateTime, default=datetime.now(), nullable=False)
user_email = db.Column(db.String(255), db.ForeignKey('user.email'), nullable=True)
def __repr__(self):
return "ID - {} Color - {} Time - {}".format(self.id, self.color, self.painted_time)
|
[
"paargav.shuttle@gmail.com"
] |
paargav.shuttle@gmail.com
|
cbbcb9d8e818cbc600ffcb9dc72505548efffff2
|
10537e7616456f8c21d0e2c4dc47c1bb92ef4990
|
/Adapter/Python/RoundPeg.py
|
a514610da5b6c7da8552b5de5d00bed0a33658f4
|
[
"MIT"
] |
permissive
|
DanielEIlie/Patterns
|
85d882bc22226e654113ab92a2f013de08b60f73
|
5c22674f089b3dd33ee368488a6ec9945defd5d5
|
refs/heads/develop
| 2021-12-28T03:10:11.376196
| 2021-12-09T10:45:09
| 2021-12-09T10:45:09
| 173,938,838
| 0
| 0
|
NOASSERTION
| 2021-12-09T10:45:10
| 2019-03-05T12:03:55
|
Fortran
|
UTF-8
|
Python
| false
| false
| 165
|
py
|
class RoundPeg:
def __init__(self, radius:float) -> None:
self.__radius__ = radius
@property
def Radius(self) -> float:
return self.__radius__
|
[
"daniel.ilie@woodplc.com"
] |
daniel.ilie@woodplc.com
|
62c32c2afec959c1510c6994c30db096919e9948
|
0da7a35582a87bcf3375fb7ffd36247dd1253a25
|
/Python/Day7/RecursionMultiplication.py
|
a684a4bb19a9ec4ddacd56eea2c179db322c2443
|
[] |
no_license
|
swapnadeepmohapatra/competitive-programming
|
08fa4b2894fd0195be9f5b9ef286fd4b61685055
|
9da873f352ab6551db141f7eed39957dd26143aa
|
refs/heads/master
| 2020-06-27T04:31:50.899259
| 2020-04-22T13:17:04
| 2020-04-22T13:17:04
| 199,844,850
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 288
|
py
|
def multiply(a, b):
if a == 0:
return 0
elif b == 0:
return 0
elif a == 1:
return b
elif b == 1:
return a
elif a < 0:
return - (b - multiply(b, a+1))
else:
return multiply(b, a-1) + b
print(multiply(1200000, 365))
|
[
"swapnadeep456@gmail.com"
] |
swapnadeep456@gmail.com
|
71efced43f44c1d48d6bab770ae7233c299c25c2
|
ea05e0a23987bf31019ef94108df2cb44147d536
|
/SingleLinearRegression/linearRegressionTemp.py
|
0164c4a93406ecbce2dc0bf7ee454f0df7aede0b
|
[] |
no_license
|
vidyesh95/TemperatureEstimation
|
1ae24adb72615ad53ab0c7251fd75da9448fe7db
|
57458adbf6f37e4ff140f22eda893a3a6075c254
|
refs/heads/master
| 2023-07-31T21:11:55.893712
| 2021-09-10T11:45:45
| 2021-09-10T11:45:45
| 405,060,190
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,093
|
py
|
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
import sklearn
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn import model_selection
from sklearn import linear_model
# y = mx + c
# F = 1.8 * C + 32
x = list(range(0, 30)) # C(Celsius)
# y = [1.8 * F + 32 for F in x] # F(Fahrenheit)
y = [1.8 * F + 32 + random.randint(-3, 3) for F in x] # F(Fahrenheit)
print(f'X:{x}')
print(f'Y:{y}')
plt.plot(x, y, '-*r')
# plt.show()
x = np.array(x).reshape(-1, 1)
y = np.array(y).reshape(-1, 1)
print(f'X:{x}')
print(f'Y:{y}')
xTrain, xTest, yTrain, yTest = model_selection.train_test_split(x, y, test_size=0.2)
print(f'Shape:{xTrain.shape}')
model = linear_model.LinearRegression()
model.fit(xTrain, yTrain)
print(f'Coefficient:{model.coef_}')
print(f'Intercept:{model.intercept_}')
accuracy = model.score(xTest, yTest)
print(f'Accuracy:{round(accuracy * 100, 2)}')
x = x.reshape(1, -1)[0]
m = model.coef_[0][0]
c = model.intercept_[0]
y = [m * F + c for F in x] # F(Fahrenheit)
plt.plot(x, y, '-*b')
plt.show()
|
[
"vidyesh95@gmail.com"
] |
vidyesh95@gmail.com
|
a57704dec76aa98e24a71da7bdb8a1bff8fa307c
|
fe5dd90085f6cde53aeab68fa6c70eb9a70a1d0a
|
/task_1_1.py
|
b6db207aefb97d7a1ece52ef7b506fe0f16bec37
|
[] |
no_license
|
youngcardinal/MFI_Labs_Robot
|
0045c373a9797495663292e9a8819052c8db7d26
|
0ef8697f2e52a963a89ff88642405f7b6a97f19a
|
refs/heads/master
| 2023-01-02T06:35:03.644948
| 2020-10-27T08:22:25
| 2020-10-27T08:22:25
| 306,322,170
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 163
|
py
|
#!/usr/bin/python3
from pyrob.api import *
@task
def task_1_1():
move_right()
move_down()
move_right()
if __name__ == '__main__':
run_tasks()
|
[
"siberia.on@gmail.com"
] |
siberia.on@gmail.com
|
218eed066a088d623136d6770b4d968334072c16
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_325/ch8_2020_03_02_19_18_22_057001.py
|
b4916fbd233446feb0c19469831a4bb984024cfc
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 47
|
py
|
def calcula_posicao(t,s,v):
return s + (v * t)
|
[
"you@example.com"
] |
you@example.com
|
6eb0972b0a6b9800e31f1f1e623a454308a16be4
|
efde71fc3e296804a9e5cb6bc2ab48ad575b7faa
|
/applications/ajax/coupon.py
|
bbc0873798767159e088eb8161fce704f4d0abbb
|
[] |
no_license
|
denispan1993/vitaliy
|
597cb546c9d1a14d7abc2931eb71fab38b878ec4
|
764d703ffc285f13a9f05e4c197bc75b495b5ff7
|
refs/heads/master
| 2021-04-29T21:54:45.095807
| 2018-02-10T19:05:23
| 2018-02-10T19:05:23
| 121,627,169
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,539
|
py
|
# -*- coding: utf-8 -*-
try:
from django.utils.simplejson import dumps
# import simplejson as json
except ImportError:
from json import dumps
# import json
from django.http import HttpResponse
from django.utils import timezone
from applications.cart.utils import get_cart_or_create
from applications.coupon.models import Coupon
__author__ = 'AlexStarov'
def coupon_test(request, ):
if request.is_ajax() and request.method == 'POST':
response = {'result': 'Bad', }
coupon_key = request.POST.get(u'value', None, )
if coupon_key:
try:
coupon = Coupon.objects.get(key=coupon_key, )
except Coupon.DoesNotExist:
response.update({'help_text': u'Номер купона не действительный', }, )
except Coupon.MultipleObjectsReturned:
response.update({'help_text': u'Странный какой-то купон', }, )
else:
if not coupon.start_of_the_coupon < timezone.now():
response.update({'help_text': u'Время использования этого купона еще не настало', }, )
else:
if not timezone.now() < coupon.end_of_the_coupon:
response.update({'help_text': u'Купон просрочен', }, )
else:
if not coupon.number_of_uses < coupon.number_of_possible_uses:
response.update({'help_text': u'Превышен лимит количества использований купона', }, )
else:
''' Берем текущую корзину '''
cart = get_cart_or_create(request,
user_object=None,
created=False, )
if cart:
''' Указывают, ли купоны на эту корзину? '''
coupons = cart.Cart_child.all()
if not coupons:
''' Если НЕТ
Ставим указатель этого купона на эту корзину '''
coupon.child_cart.add(cart, )
coupon.number_of_uses += 1
coupon.save()
response.update({'result': 'Ok',
'coupon_pk': coupon.pk,
'percentage_discount': coupon.percentage_discount,
'help_text': u'Этот купон предоставляет скидку в %d%% от суммы корзины' % coupon.percentage_discount, }, )
else:
response.update({'help_text': u'К этой корзине уже привязан купон со скидкой %d%%' % coupons[0].percentage_discount, }, )
else:
response.update({'help_text': u'Номер купона не задан', }, )
return HttpResponse(content=dumps(response, ),
content_type='application/javascript', )
return HttpResponse(status=400, )
|
[
"alex.starov@gmail.com"
] |
alex.starov@gmail.com
|
701e61b82f89ad94acd32860ae9a5237c8d1a504
|
a6287d01a2bfb7a846557a049130f4183587eac8
|
/neurst/neurst/layers/quantization/quant_dense_layer.py
|
59058303692e9e87ab455c4e2ec4b0fd86c1f365
|
[
"Apache-2.0"
] |
permissive
|
ohlionel/Prune-Tune
|
2737145589de5407699f9c6cd383112ec2f6a098
|
06613a99305f02312a0e64ee3c3c50e7b00dcf0e
|
refs/heads/main
| 2023-04-03T06:49:59.502938
| 2021-04-01T10:13:01
| 2021-04-01T10:13:01
| 321,608,509
| 12
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,695
|
py
|
# Copyright 2020 ByteDance Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from neurst.layers.quantization.quant_layers import QuantLayer
class QuantDense(tf.keras.layers.Dense, QuantLayer):
""" `tf.keras.layers.Dense` with quantization. """
def __init__(self, activation_quantizer=None, *args, **kwargs):
tf.keras.layers.Dense.__init__(self, *args, **kwargs)
QuantLayer.__init__(self, name=self.name)
self._quant_op = None
if activation_quantizer is not None:
self._quant_op = self.add_activation_quantizer(self.name + "_activ", activation_quantizer)
def build(self, input_shape):
tf.keras.layers.Dense.build(self, input_shape)
self.add_weight_quantizer(self.kernel)
self.v = self.kernel
self.built = True
def call(self, inputs):
self.kernel = tf.cast(self.quant_weight(self.v), inputs.dtype)
return tf.keras.layers.Dense.call(self, inputs)
def __call__(self, *args, **kwargs):
output = tf.keras.layers.Dense.__call__(self, *args, **kwargs)
if self._quant_op is None:
return output
return self._quant_op(output)
|
[
"liangjianze@bytedance.com"
] |
liangjianze@bytedance.com
|
aeaf44071c3f2543f95c49e07c90cf44102e7699
|
4d84b6991399e597c2b909e50f6cf17c4fc80c15
|
/courses/nlp/assignment_1/generate.py
|
30f6770fd4f49f054f34c98ca444b94c53a9b503
|
[] |
no_license
|
Byron-Edwards/NTU
|
4af328c75c429414f3283abb1e252efa7dd19ded
|
f3e5306edc1711f7471cd84f815697a02e97a02e
|
refs/heads/master
| 2021-01-03T17:42:15.534926
| 2020-11-15T09:24:21
| 2020-11-15T09:24:21
| 240,173,560
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,538
|
py
|
###############################################################################
# Language Modeling on Wikitext-2
#
# This file generates new sentences sampled from the language model
#
###############################################################################
import argparse
import torch
import data
parser = argparse.ArgumentParser(description='PyTorch Wikitext-2 Language Model')
# Model parameters.
parser.add_argument('--data', type=str, default='./data/wikitext-2',
help='location of the data corpus')
parser.add_argument('--checkpoint', type=str, default='./model.pt',
help='model checkpoint to use')
parser.add_argument('--outf', type=str, default='generated.txt',
help='output file for generated text')
parser.add_argument('--words', type=int, default='500',
help='number of words to generate')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--cuda', action='store_true',
help='use CUDA')
parser.add_argument('--temperature', type=float, default=1.0,
help='temperature - higher will increase diversity')
parser.add_argument('--log-interval', type=int, default=100,
help='reporting interval')
args = parser.parse_args()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
device = torch.device("cuda" if args.cuda else "cpu")
if args.temperature < 1e-3:
parser.error("--temperature has to be greater or equal 1e-3")
with open(args.checkpoint, 'rb') as f:
model = torch.load(f).to(device)
model.eval()
corpus = data.Corpus(args.data)
ntokens = len(corpus.dictionary)
# is_transformer_model = hasattr(model, 'model_type') and model.model_type == 'Transformer'
# if not is_transformer_model:
# hidden = model.init_hidden(1)
# input = torch.randint(ntokens, (1, 1), dtype=torch.long).to(device)
with open(args.outf, 'w',encoding='utf-8') as outf:
with torch.no_grad(): # no tracking history
print('-' * 89)
input_vector = input('Please input your words: ')
if len(input_vector) == 0:
input_vector = "You will never know what happened here"
outf.write('-' * 45 + 'Your input' + '-' * 45 + "\n")
outf.write(input_vector + " \n")
outf.write('-' * 45 + 'Your input' + '-' * 45 + "\n")
input_vector = torch.tensor([corpus.dictionary.word2idx[i] for i in input_vector.split()], dtype=torch.long).unsqueeze(dim=0).to(device)[:,-model.ngram:]
output_word = ""
for i in range(args.words):
# if is_transformer_model:
output = model(input_vector)
word_weights = output[-1].squeeze().div(args.temperature).exp().cpu()
word_idx = torch.multinomial(word_weights, 1)[0]
word_tensor = torch.Tensor([[word_idx]]).long().to(device)
input_vector = torch.cat([input_vector, word_tensor], 1)[:,-model.ngram:]
word = corpus.dictionary.idx2word[word_idx]
output_word = output_word + " " + word
outf.write(word + ('\n' if i % 20 == 19 else ' '))
if i % args.log_interval == 0:
print('| Generated {}/{} words'.format(i, args.words))
print('The following are generated words: \n {}'.format(output_word))
|
[
"byron_edwards@outlook.com"
] |
byron_edwards@outlook.com
|
345c7a7d5c51a0757e8d4395154aad955a8e5be1
|
941a5ba27b599553d8d21f12ebc9eeae4677d31b
|
/app01/stark.py
|
583ab6324c91eafccf651de9fa91ecab82751291
|
[] |
no_license
|
AIF333/Mystark
|
9326b375b86313472508972e1fa052486e15e056
|
959e5e26c9edd193d8471370f753934d20a14551
|
refs/heads/master
| 2020-05-01T13:32:09.033817
| 2019-03-27T13:12:51
| 2019-03-27T13:12:51
| 177,494,298
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,485
|
py
|
from django.shortcuts import render,HttpResponse,redirect
from django.urls import path
from stark.service.V1 import site, StarkConfig
from app01 import models
# 自定义UserInfo的配置类
class UserInfoConfig(StarkConfig):
def extend_url(self): # 扩展字段方法
path_list=[
path('sayHi/', self.sayHi),
]
return path_list
def sayHi(self,request):
return HttpResponse("Hi!")
def gender_display(self,is_header=False,row=None):
if is_header:
return "性别"
else:
'''
if row.gender == 1 :
return "男"
else:
return "女"
对于 choice字段这种判断等价于 row.get_字段_display()
'''
return row.get_gender_display()
def dp_display(self,is_header=False,row=None):
if is_header:
return "部门"
else:
return row.dp.title
def hobby_display(self,is_header=False,row=None):
if is_header:
return "爱好"
else:
return row.hobby.all().values_list("title")
'''
change_func传过去的是函数,因为在类内部,此时类还未创建,所以直接写就行
(正常写成 UserInfoConfig.change_func)
展示字段 不写则全字段展示,但是会存在不翻译问题,如 gender会显示0,1而不是男女 ,外键会展示 类对象,而不是 需要的字段
'''
list_display=['id','username','email',gender_display,dp_display]
# 组合搜索
comb_list=['gender','dp']
# 搜素列
search_list=["username","email"]
class HobbyConfig(StarkConfig):
search_list = ["title"]
#####################批量操作配置化#################################
mutil_list=[
{"func":"mutil_install","name":"批量装机"},
{"func":"mutil_export","name":"批量导出"},
{"func":"mutil_del","name":"批量删除"},
]
def mutil_del(self,select_value,pk_list):
print("批量删除开始")
print(select_value)
print(pk_list)
obj=self.mcls.objects.filter(pk__in=pk_list)
obj.delete()
#####################批量操作配置化结束#################################
# 进行site注册,即往site字典里加入 models
site.registry(models.UserInfo,UserInfoConfig)
site.registry(models.Role)
site.registry(models.Department)
site.registry(models.Hobby,HobbyConfig)
|
[
"1606896936@qq.com"
] |
1606896936@qq.com
|
b2555ced9051315bb27cd9fc944627d3f9946d73
|
0c296aa7a51011416fa1688b92e12f546b80018a
|
/app.py
|
a722703b07c82216990496e4f839183c41fd7932
|
[] |
no_license
|
apoorva2014/rpiwalker
|
e757d05114354ea653a02502c30658a2d3ca3658
|
2ee3253e9c82abb3da513c2bca9ab054558faaa0
|
refs/heads/master
| 2016-08-12T04:24:55.347590
| 2015-11-09T06:13:50
| 2015-11-09T06:13:50
| 45,809,114
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 182
|
py
|
from flask import Flask
app = Flask(__name__)
app.config.from_object(os.environ['APP_SETTINGS'])
db = SQLAlchemy(app)
from views import *
if __name__ == '__main__':
app.run()
|
[
"mahaja2@rpi.edu"
] |
mahaja2@rpi.edu
|
55eb7669469e7a9bdfc29138b1127a490533fec3
|
d866a082ad836283183a72adc18fe8022d925772
|
/intersection.py
|
ece994538c629fdfb580acf13f7c4908882dd9bf
|
[
"Apache-2.0"
] |
permissive
|
ralgond/SortedListIntersection
|
8a920dc2ef2ddfb93a7f7eb57dd9e4eb6117843f
|
1118de51d82f63f6a6536054b6cb1548bcb680a0
|
refs/heads/main
| 2023-02-20T03:52:11.974433
| 2021-01-17T09:26:40
| 2021-01-17T09:26:40
| 326,561,659
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 178
|
py
|
import sys
d = {}
for line in open(sys.argv[1]):
w = line.strip()
d[w] = 1
for line in open(sys.argv[2]):
w = line.strip()
if d.get(w) != None:
print(w)
|
[
"ht201509@163.com"
] |
ht201509@163.com
|
d8810763bbeebc82f38ce67a85bff4cc0449ac5f
|
a413623cb895eaeddcca92287caa29a49847a2d8
|
/manage.py
|
e20392d6d6edf11701d9cdc77d48956586c89efa
|
[] |
no_license
|
dgomezc1/R-Oil
|
8aebe9bbd40c6a6f86fc1740d455b9ffddb79263
|
3eb8ecd21a054572a40d59d56009aea64432c495
|
refs/heads/master
| 2023-09-03T12:20:55.650824
| 2021-11-18T03:06:20
| 2021-11-18T03:06:20
| 400,905,258
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 660
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Roil.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"dgomezc10@eafit.educ.co"
] |
dgomezc10@eafit.educ.co
|
7891dc4255d44a33b1a58dc2039f2567e9c41997
|
f952f548ef0a54afb0d34ee77f4a7914d6c1f9a9
|
/extract_imagenet_activation.py
|
c862819cc27c86cc3d6636b03f6971646d547d37
|
[
"MIT"
] |
permissive
|
helloTC/SemanticRelation
|
28ae0bad0eb86c46d2f9f49e3d0606d73233e4e8
|
d8a24ae825cd48c98ac2d4a463b6b46f72f2242c
|
refs/heads/master
| 2023-08-21T18:13:02.289528
| 2021-09-28T14:20:56
| 2021-09-28T14:20:56
| 277,487,713
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,794
|
py
|
import torch
from torchvision import models, transforms, datasets
import os
from scipy import stats, special
import numpy as np
from dnnbrain.dnn import analyzer as dnn_analyzer
def pearsonr(A, B):
"""
A broadcasting method to compute pearson r and p
-----------------------------------------------
Parameters:
A: matrix A, (i*k)
B: matrix B, (j*k)
Return:
rcorr: matrix correlation, (i*j)
pcorr: matrix correlation p, (i*j)
Example:
>>> rcorr, pcorr = pearsonr(A, B)
"""
if isinstance(A,list):
A = np.array(A)
if isinstance(B,list):
B = np.array(B)
if np.ndim(A) == 1:
A = A[None,:]
if np.ndim(B) == 1:
B = B[None,:]
A_mA = A - A.mean(1)[:, None]
B_mB = B - B.mean(1)[:, None]
ssA = (A_mA**2).sum(1)
ssB = (B_mB**2).sum(1)
rcorr = np.dot(A_mA, B_mB.T)/np.sqrt(np.dot(ssA[:,None], ssB[None]))
df = A.T.shape[1] - 2
r_forp = rcorr*1.0
r_forp[r_forp==1.0] = 0.0
t_squared = rcorr.T**2*(df/((1.0-rcorr.T)*(1.0+rcorr.T)))
pcorr = special.betainc(0.5*df, 0.5, df/(df+t_squared))
return rcorr, pcorr
def dnn_activation(data, model, layer_loc, channels=None):
"""
Extract DNN activation from the specified layer
This code is from the DNNBrain toolbox https://github.com/BNUCNL/dnnbrain
For readability, I separate it from the DNNBrain and directly call it for activation.
Parameters:
----------
data[tensor]: input stimuli of the model with shape as (n_stim, n_chn, n_r, n_c)
model[model]: DNN model
layer_loc[sequence]: a sequence of keys to find the location of
the target layer in the DNN model.
channels[list]: channel indices of interest
Return:
------
dnn_acts[array]: DNN activation
a 4D array with its shape as (n_stim, n_chn, n_r, n_c)
"""
# change to eval mode
model.eval()
# prepare dnn activation hook
dnn_acts = []
def hook_act(module, input, output):
act = output.detach().numpy().copy()
if channels is not None:
act = act[:, channels]
dnn_acts.append(act)
module = model
for k in layer_loc:
module = module._modules[k]
hook_handle = module.register_forward_hook(hook_act)
# extract dnn activation
model(data)
dnn_acts = dnn_acts[0]
hook_handle.remove()
return dnn_acts
if __name__ == '__main__':
parpath = '/nfs/e3/ImgDatabase/ImageNet_2012/ILSVRC2012_img_val/'
transform = transforms.Compose([transforms.Resize((224,224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485,0.456,0.406], std=[0.229,0.224,0.225])])
# Extract activation
layer_loc = [('fc')]
imagefolder = datasets.ImageFolder(parpath, transform=transform)
dataloader = torch.utils.data.DataLoader(imagefolder, batch_size=50, shuffle=False, num_workers=30)
cnnmodel = models.alexnet(pretrained=False)
# Could be directly downloaded from pytorch by setting pretrained=True
cnnmodel.load_state_dict(torch.load('/nfs/a1/userhome/huangtaicheng/workingdir/models/DNNmodel_param/alexnet.pth'))
cnnmodel.eval()
output_act = []
output_target = []
for i, (image, target) in enumerate(dataloader):
print('Iterate {}'.format(i+1))
outact = dnn_activation(image, cnnmodel, layer_loc)
# FC
outact = outact.mean(axis=0)
# Conv
# outact = np.mean(outact,axis=0)
# outact = outact.reshape(outact.shape[0], outact.shape[1]*outact.shape[2])
output_act.append(outact)
# break
output_act = np.array(output_act)
r, _ = pearsonr(output_act.reshape(1000,-1), output_act.reshape(1000,-1))
np.save('data/DCNNsim/valiation_corr_alexnet_fc.npy', r)
|
[
"taicheng_huang@sina.cn"
] |
taicheng_huang@sina.cn
|
90830dd8e0342d5d9af278ca6d2ecbf56abac769
|
699f51e33de1e73ccc871d5b473fc30fc015e7f6
|
/twillo_sms.py
|
d3ff45ce643b973e78748051ba8dad87a12b0784
|
[] |
no_license
|
arolson/MachineLearningForStocks
|
95025d1f212293a16ab490bc007486a7e66efef7
|
09c8f48a5436b767e4fee05dff848c9c16f63cc6
|
refs/heads/master
| 2021-08-30T03:56:24.180856
| 2017-12-15T23:32:21
| 2017-12-15T23:32:21
| 113,937,582
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
from twilio.rest import Client
# Your Account SID from twilio.com/console
account_sid = "AC3f0931dee2fb0698409a1185a150fe86"
# Your Auth Token from twilio.com/console
auth_token = "369e82850dd35b2f5c577ae2c57d95bc"
client = Client(account_sid, auth_token)
message = client.messages.create(
to="+19512839806",
from_="+15622739442",
body="Hello from Python!")
print(message.sid)
|
[
"arolson56@gmail.com"
] |
arolson56@gmail.com
|
5542f58f54a21369edbbba27fc812778c4d113d2
|
34306cc8bab39d1da55479fde38245c3b29d22c4
|
/Graduate/DS501/vanand_HW2/problem3.py
|
a9716783c69e61dfb9a99681cc2bd3f6bcf074d8
|
[] |
no_license
|
vanand23/WPI_Projects
|
ebba325fa9359155606d815219fa2c75f31ecf5c
|
4c18cc432ced229bb421a7b21978b8e8a88108b5
|
refs/heads/master
| 2023-01-21T10:34:56.783105
| 2022-01-02T05:57:00
| 2022-01-02T05:57:00
| 214,215,289
| 1
| 0
| null | 2023-01-09T11:58:45
| 2019-10-10T15:12:45
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 10,999
|
py
|
import pandas as pd
#-------------------------------------------------------------------------
'''
Problem 3: getting familiar with pandas package.
In this problem, please install the following python package:
* pandas
Pandas is the library for tabular data analysis in Python.
It provides fast, flexible, and expressive data structures designed to make working with tabular and multidimensional data both easy and intuitive.
To install numpy using pip, you could type `pip3 install pandas` in the terminal.
Reference: you could read the tutorials for Pandas:
https://www.learndatasci.com/tutorials/python-pandas-tutorial-complete-introduction-for-beginners/
https://pandas.pydata.org/pandas-docs/stable/getting_started/10min.html
'''
#--------------------------
def dataframe():
'''
Create the following data frame using Pandas:
|'height'| 'width' |
|--------|---------|
| 1 | 4 |
| 2 | 5 |
| 3 | 6 |
Output:
X: a pandas dataframe with two columns and 3 rows,
the first column is "height" including 3 records with values 1, 2, 3
the second column is "width" including 3 records with values 4, 5, 6
'''
#########################################
## INSERT YOUR CODE HERE
data = {
'height': [1,2,3],
'width': [4,5,6]
}
X = pd.DataFrame(data)
#########################################
return X
''' TEST: Now you can test the correctness of your code above by typing `nosetests -v test3.py:test_dataframe' in the terminal. '''
#--------------------------
def load_csv(filename="A.csv"):
'''
Load a data frame from CSV file.
The CSV file contains a header line (the first row), indicating the names of all the columns.
Input:
filename: a string indicating the filename of the CSV file.
Output:
X: a pandas dataframe loaded from the CSV file
Hint: you could solve this problem using one line of code with a function in pandas package.
'''
#########################################
## INSERT YOUR CODE HERE
X = pd.read_csv(filename)
#########################################
return X
''' TEST: Now you can test the correctness of your code above by typing `nosetests -v test3.py:test_load_csv' in the terminal. '''
#--------------------------
def search_height(X, t=2):
'''
Search for all the records in a dataframe with height (column) greater or equals to the threshold value
Input:
X: a dataframe
t: an integer scalar, the threshold of the height.
Output:
Y: the result dataframe, containing only the records with height greater or equals to the threshold
Hint: you could solve this problem using one line of code using pandas package.
'''
#########################################
## INSERT YOUR CODE HERE
Y = X[X['height'] >= t]
#########################################
return Y
''' TEST: Now you can test the correctness of your code above by typing `nosetests -v test3.py:test_search_height' in the terminal. '''
#--------------------------
def save_csv(X, filename="A2.csv"):
'''
save a data frame into a CSV file.
Note, the CSV file should contain no index column.
Input:
X: a pandas dataframe to be saved into the CSV file
filename: a string indicating the filename of the CSV file.
Hint: You could solve this problem using one line of code with a function in pandas package.
You could set the index parameter to avoid adding an index column in the CSV file.
'''
#########################################
## INSERT YOUR CODE HERE
X.to_csv(filename, index=False)
#########################################
return
''' TEST: Now you can test the correctness of your code above by typing `nosetests -v test3.py:test_save_csv' in the terminal. '''
#--------------------------
def sum_column(X, key='count'):
'''
Compute the sum of values in the key column of a data frame.
Suppose we have the following data frame X:
| 'ID' | 'count' |
|--------|---------|
| 1 | 4 |
| 1 | 5 |
| 2 | 6 |
| 2 | 7 |
and if key = 'count', we want to compute the sum of all values in the 'count' column: 4+5+6+7 = 22
The result in this case should be 22.
Input:
X: a dataframe
key: a string indicating the column to be used for summing the values.
Output:
S: an integer scalar, the sum of the values in the column
Hint: you could solve this problem using one line of code using pandas package.
'''
#########################################
## INSERT YOUR CODE HERE
S = X[key].sum()
#########################################
return S
''' TEST: Now you can test the correctness of your code above by typing `nosetests -v test3.py:test_sum_column' in the terminal. '''
#--------------------------
def aggregate(X, key = 'ID'):
'''
Suppose we have the following data frame X:
| 'ID' | 'count' |
|--------|---------|
| 1 | 4 |
| 1 | 5 |
| 2 | 6 |
| 2 | 7 |
We have duplicated values in ID column. Now we want to aggregate the 'count' values according to their 'ID's.
So that the record with ID=1, should have a count = 4+5
and the record with ID=2, should have a count = 6+7
The output should be:
| 'ID' | 'count' |
|--------|---------|
| 1 | 9 |
| 2 | 13 |
Input:
X: a pandas dataframe with duplicated key values
key: a string indicating the column to be used for grouping the rows.
Output:
Y: the aggregated dataframe, containing no duplicated ID's.
Hint: you could use the groupby() function of pandas and solve this problem using two line of code.
To convert an index into a column, you could use reset_index() method in pandas.
'''
#########################################
## INSERT YOUR CODE HERE
df = X.groupby(key).sum()
Y = df.reset_index()
#########################################
return Y
''' TEST: Now you can test the correctness of your code above by typing `nosetests -v test3.py:test_aggregate' in the terminal. '''
#--------------------------
def join(X,Y, key = 'ID'):
'''
Suppose we have the following data frame X:
| 'ID' | 'count' |
|--------|---------|
| 1 | 9 |
| 2 | 13 |
and we have another data frame Y:
| 'ID' | 'name' |
|--------|---------|
| 1 | 'Alex' |
| 2 | 'Bob' |
| 3 | 'Tom' |
Join the two tables with 'ID'. The output should be:
| 'ID' | 'count' | 'name' |
|--------|---------| ---------|
| 1 | 9 | 'Alex' |
| 2 | 13 | 'Bob' |
Input:
X: a pandas dataframe
Y: another pandas dataframe
key: a string indicating the column to be used for joining the tables
Output:
Z: the result dataframe, containing the join of the two tables.
Hint: you could use the groupby() function of pandas and solve this problem using two lines of code.
To convert an index into a column, you could use reset_index() method in pandas.
'''
#########################################
## INSERT YOUR CODE HERE
Z = pd.merge(X,Y, on=key)
#########################################
return Z
''' TEST: Now you can test the correctness of your code above by typing `nosetests -v test3.py:test_join' in the terminal. '''
#--------------------------
def filtering(X, key = 'ID', values=[1,3]):
'''
Suppose we have the following data frame X:
| 'ID' | 'name' |
|--------|---------|
| 1 | 'Alex' |
| 2 | 'Bob' |
| 3 | 'Tom' |
Filter the table with 'ID' (key), the values should be in the list "values".
If the value list is [1,3], which means that we only want to keep the rows with ID=1 or ID=3.
The output should be:
| 'ID' | 'name' |
|--------|---------|
| 1 | 'Alex' |
| 3 | 'Tom' |
Input:
X: a pandas dataframe
key: a string indicating the column to be used for filtering the tables
values: a list of values to keep in the table
Output:
Y: the result dataframe, containing the filtered table.
Hint: you could use the isin() function of pandas and solve this problem using one line of code.
'''
#########################################
## INSERT YOUR CODE HERE
Y = X[X[key].isin(values)]
#########################################
return Y
''' TEST: Now you can test the correctness of your code above by typing `nosetests -v test3.py:test_filtering' in the terminal. '''
#--------------------------------------------
''' TEST ALL functions in Problem 3:
Now you can test the correctness of all the above functions by typing `nosetests -v test3.py' in the terminal.
If your code passed all the tests, you will see the following message in the terminal:
---------- Problem 3 (10 points in total) ------------ ... ok
(1 points) dataframe ... ok
(1 points) load_csv ... ok
(1 points) search_height ... ok
(1 points) save_csv ... ok
(1 points) sum_column ... ok
(2 points) aggregate ... ok
(2 points) join ... ok
(1 points) filtering ... ok
----------------------------------------------------------------------
Ran 6 tests in 0.758s
OK
'''
#--------------------------------------------
|
[
"vandana1anand@gmail.com"
] |
vandana1anand@gmail.com
|
19290029eb01d0c3265d54c055a1f982f4ed1e36
|
486b3c2a20fcaa266a2ff1616b06d9a705550b27
|
/regression/single/neural_net.py
|
f6bcccb91b740974615bedc523671ff78c7b7c9f
|
[] |
no_license
|
cy18cn/ai_learning
|
81a82761fb29fa6a1d2eca90be8b1e32d674ad04
|
c13dbf919fc9bead8044d35acd927d525aa6c820
|
refs/heads/master
| 2022-10-11T17:15:50.649602
| 2020-06-05T08:51:54
| 2020-06-05T08:51:54
| 269,578,636
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,795
|
py
|
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import LogNorm
from regression.single.training_history import TrainingHistory
class NeuralNet(object):
def __init__(self, w, b, params):
self.params = params
self.w = w
self.b = b
def forward_batch(self, batch_x):
return np.dot(batch_x, self.w) + self.b
def backward_batch(self, batch_x, batch_y, batch_z):
m = batch_x.shape[0]
dz = batch_z - batch_y
db = dz.sum(axis=0, keepdims=True) / m
dw = np.dot(batch_x.T, dz) / m
return dw, db
def update(self, dw, db):
self.w = self.w - self.params.eta * dw
self.b = self.b - self.params.eta * db
def inference(self, batch_x):
return self.forward_batch(batch_x)
def check_loss(self, data_reader):
x, y = data_reader.get_whole_samples()
m = x.shape[0]
z = self.forward_batch(x)
return ((y - z) ** 2).sum() / 2 / m
def train(self, data_reader):
loss_history = TrainingHistory()
# batch_size默认为全量数据
if self.params.batch_size == -1:
self.params.batch_size = data_reader.num_train
# 每一轮的迭代次数
max_iteration = int(data_reader.num_train / self.params.batch_size)
for epoch in range(self.params.max_epoch):
print("epoch=%d" % epoch)
data_reader.shuffle()
for iteration in range(max_iteration):
batch_x, batch_y = data_reader.get_batch_samples(self.params.batch_size, iteration)
batch_z = self.forward_batch(batch_x)
dw, db = self.backward_batch(batch_x, batch_y, batch_z)
self.update(dw, db)
if iteration % 2 == 0:
loss = self.check_loss(data_reader)
print(epoch, iteration, loss)
loss_history.add(epoch * max_iteration + iteration, loss, self.w, self.b)
if loss < self.params.eps:
break
if loss < self.params.eps:
break
loss_history.show_history(self.params)
print(self.w, self.b)
self.show_contour(data_reader, loss_history, self.params.batch_size)
def show_contour(self, data_reader, loss_history, batch_size):
latest_loss, latest_iteration, latest_w, latest_b = loss_history.get_latest()
len1 = 50
len2 = 50
# w坐标向量 [1, 2, 3]
w = np.linspace(latest_w - 1, latest_w + 1, len1)
# b坐标向量 [4, 5]
b = np.linspace(latest_b - 1, latest_b + 1, len2)
# 从坐标向量中返回坐标矩阵: w, b在坐标系中共有6个点(1,4) (2,4) (3,4) (1,5) (2,5) (3,5)
# 返回坐标矩阵: [[1, 2, 3], [1, 2, 3]], [[4, 4, 4], [5, 5, 5]]
w, b = np.meshgrid(w, b)
len = len1 * len2
x, y = data_reader.get_whole_samples()
m = x.shape[0]
# ravel 扁平化 w.ravel() [1, 2, 3, 1, 2, 3]
z = np.dot(x, w.ravel().reshape(1, len)) + b.ravel().reshape(1, len)
loss = (z - y) ** 2
loss = loss.sum() / 2 / m
loss = loss.reshape(len1, len2)
plt.contour(w, b, loss, levels=np.logspace(-5, 5, 100), norm=LogNorm(), cmap=plt.cm.jet)
#
w_history = loss_history.w_history
b_history = loss_history.b_history
plt.plot(w_history, b_history)
plt.xlabel("w")
plt.ylabel("b")
plt.title(str.format("batchsize={0}, iteration={1}, eta={2}, w={3:.3f}, b={4:.3f}",
batch_size, latest_iteration, self.params.eta, latest_w, latest_b))
plt.axis([latest_w - 1, latest_w + 1, latest_b - 1, latest_b + 1])
plt.show()
|
[
"ryan.cao@airparking.cn"
] |
ryan.cao@airparking.cn
|
b9573afae9b48591e7bc0499cea8f203459554d8
|
ea4567b4388ea97c8ca718d9e331dc796439ee44
|
/exercise_learn/new_selenium_project/bdd_project/features/steps/simple_register.py
|
4805d2f7ac88a0148471f082bcb848f14bddf71e
|
[] |
no_license
|
Kingwolf9527/python_knowledge
|
ace65470ec706cae195b228b8e8d6ca8db574db8
|
1ccb3a788c172f3122a7c119d0607aa90934e59b
|
refs/heads/master
| 2020-12-04T06:12:49.809020
| 2020-02-10T18:22:36
| 2020-02-10T18:22:44
| 231,647,147
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 624
|
py
|
# ! /usr/bin/env python
# - * - coding:utf-8 - * -
# __author__ : KingWolf
# createtime : 2019/12/5 3:10
import sys
sys.path.append(r'F:\GitExtensions_python\project_spider\exercise_learn\new_selenium_project\bdd_project\features')
from behave import given,when,then,step_matcher
#调用正则处理
step_matcher('re')
@when('I open the register website')
def step_register_browser(context):
context.driver.get('http://www.5itest.cn/register?goto=/')
@then(u'I expect that the title is "([^\s]*)"')
def step_register_get_title(context,title_name):
title = context.driver.title
assert title_name in title
|
[
"lccr777@163.com"
] |
lccr777@163.com
|
8e8d4069dc3605e1f533561875aa4d70d189d33b
|
90d139b4ba5052fcb6a7cd5b36fd25135f8d7d30
|
/home2/bin/distro-3.8
|
d5854e2e8486089c3e5d7937a52b648730b4e273
|
[] |
no_license
|
Rithurajmgclt/flaskProject
|
7f995bcc13b17fce398e5102b5f9fd90a21b43a7
|
eb1382e320e2c28dc3ba41a5c6fd3fb1fcba50f4
|
refs/heads/master
| 2022-12-12T15:26:02.367309
| 2020-09-03T05:31:35
| 2020-09-03T05:31:35
| 285,311,123
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 240
|
8
|
#!/home/rithurajmg/Desktop/myproject/home2/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from distro import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"rithurajmg2015@gmail.com"
] |
rithurajmg2015@gmail.com
|
2bd5a77cd774823656aa9505437757beee47d01e
|
0191678f9e4bc9bf3487c18304a5b67704b485ae
|
/private/templates/EVASS/controllers.py
|
6b29b0f92a04b7022633cc6227949d45788f5593
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
code-for-india/sahana_shelter_worldbank
|
fba11d93e1af332afb3eb4fc3f62e1cece48ceaa
|
0034fc69e37f8719e7870e28cc576d9af05fc5b2
|
HEAD
| 2016-09-06T13:46:25.994167
| 2014-05-10T12:22:34
| 2014-05-10T12:22:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,619
|
py
|
# -*- coding: utf-8 -*-
from os import path
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
from gluon import current
from gluon.html import *
from gluon.storage import Storage
from s3 import S3FieldSelector, S3CustomController
from s3theme import formstyle_foundation_inline
THEME = "EVASS"
# =============================================================================
class index(S3CustomController):
""" Custom Home Page """
def __call__(self):
output = {}
T = current.T
request = current.request
s3 = current.response.s3
# Check logged in and permissions
auth = current.auth
settings = current.deployment_settings
roles = current.session.s3.roles
system_roles = auth.get_system_roles()
AUTHENTICATED = system_roles.AUTHENTICATED
# Login/Registration forms
self_registration = current.deployment_settings.get_security_self_registration()
registered = False
login_form = None
login_div = None
register_form = None
register_div = None
if AUTHENTICATED not in roles:
# This user isn't yet logged-in
if request.cookies.has_key("registered"):
# This browser has logged-in before
registered = True
if self_registration is True:
# Provide a Registration box on front page
register_form = auth.s3_registration_form()
register_div = DIV(H3(T("Register")),
P(XML(T("If you would like to help, then please %(sign_up_now)s") % \
dict(sign_up_now=B(T("sign-up now"))))))
if request.env.request_method == "POST":
post_script = \
'''$('#register_form').removeClass('hide')
$('#login_form').addClass('hide')'''
else:
post_script = ""
register_script = \
'''$('#register-btn').attr('href','#register')
$('#login-btn').attr('href','#login')
%s
$('#register-btn').click(function(){
$('#register_form').removeClass('hide')
$('#login_form').addClass('hide')
})
$('#login-btn').click(function(){
$('#register_form').addClass('hide')
$('#login_form').removeClass('hide')
})''' % post_script
s3.jquery_ready.append(register_script)
# s3.js_global.append(feed_control)
# Provide a login box on front page
request.args = ["login"]
auth.messages.submit_button = T("Login")
login_form = auth()
login_div = DIV(H3(T("Login")),
P(XML(T("Registered users can %(login)s to access the system") % \
dict(login=B(T("login"))))))
else:
output["event_list"] = self.event_list()
output["shelter_list"] = self.shelter_list()
output["organizations_btn"] = self.organizations_btn()
output["events_btn"] = self.events_btn()
output["incident_reports_btn"] = self.incident_reports_btn()
output["staff_btn"] = self.staff_btn()
output["volunteers_btn"] = self.volunteers_btn()
# @todo: implement evr module
#output["evacuees_btn"] = self.evacuees_btn()
output["warehouses_btn"] = self.warehouses_btn()
output["shelters_btn"] = self.shelters_btn()
output["hospitals_btn"] = self.hospitals_btn()
output["self_registration"] = self_registration
output["registered"] = registered
output["login_div"] = login_div
output["login_form"] = login_form
output["register_div"] = register_div
output["register_form"] = register_form
if settings.frontpage.rss:
s3.external_stylesheets.append("http://www.google.com/uds/solutions/dynamicfeed/gfdynamicfeedcontrol.css")
s3.scripts.append("http://www.google.com/jsapi?key=notsupplied-wizard")
s3.scripts.append("http://www.google.com/uds/solutions/dynamicfeed/gfdynamicfeedcontrol.js")
counter = 0
feeds = ""
for feed in settings.frontpage.rss:
counter += 1
feeds = "".join((feeds,
"{title:'%s',\n" % feed["title"],
"url:'%s'}" % feed["url"]))
# Don't add a trailing comma for old IEs
if counter != len(settings.frontpage.rss):
feeds += ",\n"
# feedCycleTime: milliseconds before feed is reloaded (5 minutes)
feed_control = "".join(('''
function LoadDynamicFeedControl(){
var feeds=[
''', feeds, '''
]
var options={
feedCycleTime:300000,
numResults:3,
stacked:true,
horizontal:false,
title:"''', str(T("News")), '''"
}
new GFdynamicFeedControl(feeds,'feed-control',options)
}
google.load('feeds','1')
google.setOnLoadCallback(LoadDynamicFeedControl)'''))
s3.js_global.append(feed_control)
self._view(THEME, "index.html")
return output
# -------------------------------------------------------------------------
def shelter_list(self):
""" Provide a dropdown of links to shelters """
T = current.T
s3db = current.s3db
resource = s3db.resource("cr_shelter",
filter = S3FieldSelector("status")
.belongs([2, None]))
data = resource.select(["id", "name"])
shelter_list = UL(_id = "shelter_list",
_class = "f-dropdown",
data = {"dropdown-content": ""})
rows = data["rows"]
if rows:
for row in rows:
shelter_list.append(LI(A(row["cr_shelter.name"],
_href=URL(c="cr",
f="shelter",
args=[row["cr_shelter.id"]])
)
)
)
return LI(A(T("Shelters"),
_class="button dropdown",
data = {"dropdown": "shelter_list"}),
shelter_list
)
else:
# @todo: check permission and provide an "Add Shelter" button
# if not shelters are yet registered
return ""
# -------------------------------------------------------------------------
def event_list(self):
""" Provide a dropdown of links to events """
T = current.T
s3db = current.s3db
resource = s3db.resource("event_event")
data = resource.select(["id", "name"])
event_list = UL(_id = "event_list",
_class = "f-dropdown",
data = {"dropdown-content": ""})
rows = data["rows"]
if rows:
for row in rows:
event_list.append(LI(A(row["event_event.name"],
_href=URL(c="event",
f="event",
args=[row["event_event.id"]])
)
)
)
return LI(A(T("Events"),
_class="button dropdown",
data = {"dropdown": "event_list"}),
event_list
)
else:
# @todo: check permission and provide an "Add Event" button
# if not events are yet registered?
return ""
# -------------------------------------------------------------------------
def organizations_btn(self):
return LI(A("Organizations",
_href=URL(c="org", f="organisation"),
_class="button button-home")
)
# -------------------------------------------------------------------------
def events_btn(self):
return LI(A("Events",
_href=URL(c="event", f="event"),
_class="button button-home")
)
# -------------------------------------------------------------------------
def incident_reports_btn(self):
return LI(A("Incident Reports",
_href=URL(c="irs", f="ireport"),
_class="button button-home",
_id="incident-report-btn")
)
# -------------------------------------------------------------------------
def staff_btn(self):
return LI(A("Staff",
_href=URL(c="hrm", f="staff", args=["summary"]),
_class="button button-home")
)
# -------------------------------------------------------------------------
def volunteers_btn(self):
return LI(A("Volunteers",
_href=URL(c="vol", f="volunteer"),
_class="button button-home")
)
# -------------------------------------------------------------------------
def evacuees_btn(self):
return LI(A("Evacuees",
_href=URL(c="evr", f="person"),
_class="button button-home")
)
# -------------------------------------------------------------------------
def warehouses_btn(self):
return LI(A("Warehouse",
_href=URL(c="inv", f="warehouse"),
_class="button button-home")
)
# -------------------------------------------------------------------------
def shelters_btn(self):
return LI(A("Shelters",
_href=URL(c="cr", f="shelter"),
_class="button button-home")
)
# -------------------------------------------------------------------------
def hospitals_btn(self):
return LI(A("Hospitals",
_href=URL(c="hms", f="hospital"),
_class="button button-home")
)
# END =========================================================================
|
[
"claudiococciarelli@gmail.com"
] |
claudiococciarelli@gmail.com
|
ac1491a43c6cbe6bd39017888d5ed97a4f9ffb3a
|
9900578dc0d8b4012bb5a03dd49a890d7a17ea5a
|
/keras_retinanet/utils/box.py
|
77f380dbb6b4b6d90722b0b6af606493d49e4786
|
[
"Apache-2.0"
] |
permissive
|
Dref360/keras-retinanet
|
28b86edd1d6694ac935f9efa0d1c0090288c1391
|
12953b160ccfbe535d73dc0c8be5d628a3cd1c65
|
refs/heads/master
| 2021-08-30T16:22:42.292649
| 2017-12-18T16:22:35
| 2017-12-18T16:22:35
| 114,407,190
| 1
| 0
| null | 2017-12-15T20:05:51
| 2017-12-15T20:05:51
| null |
UTF-8
|
Python
| false
| false
| 1,032
|
py
|
import numpy as np
class BoundBox:
def __init__(self, classes):
self.x, self.y = float(), float()
self.w, self.h = float(), float()
self.angle = float()
self.c = float()
self.class_num = classes
self.probs = np.zeros((classes,))
def overlap(x1,w1,x2,w2):
l1 = x1 - w1 / 2.;
l2 = x2 - w2 / 2.;
left = max(l1, l2)
r1 = x1 + w1 / 2.;
r2 = x2 + w2 / 2.;
right = min(r1, r2)
return right - left;
def box_intersection(a, b):
w = overlap(a.x, a.w, b.x, b.w);
h = overlap(a.y, a.h, b.y, b.h);
if w < 0 or h < 0: return 0;
area = w * h;
return area;
def box_union(a, b):
i = box_intersection(a, b);
u = a.w * a.h + b.w * b.h - i;
return u;
def box_iou(a, b):
return box_intersection(a, b) / box_union(a, b);
def prob_compare(box):
return box.probs[box.class_num]
def prob_compare2(boxa, boxb):
if (boxa.pi < boxb.pi):
return 1
elif(boxa.pi == boxb.pi):
return 0
else:
return -1
|
[
"frederic.branchaud-charron@usherbrooke.ca"
] |
frederic.branchaud-charron@usherbrooke.ca
|
21e64dca429efbe840b32f4192b4d23f5381e3a1
|
b102dc30de767ec3189b38f133fc1f4193e26526
|
/Python Code/Assignment 7/RoomsInfo.py
|
79d0e7846c0d5e7cf0898660e02e3cdf10fa51fb
|
[] |
no_license
|
Ardric/School-Projects
|
7b2d456bed9f7be146b81c133e6463985cb044dc
|
9923ae13b1d2a6bcc926f50e695a215671077c6e
|
refs/heads/master
| 2020-03-16T21:05:41.660196
| 2018-11-30T04:51:27
| 2018-11-30T04:51:27
| 132,984,531
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 698
|
py
|
#Daniel Lowdermilk
from RoomsFunctions import *
roomlist = [ {"number": 100, "name":"Jones Room", "sqft": 150, "seating": 9},
{"number": 105, "name":"Smith Room", "sqft": 550, "seating": 50},
{"number": 107, "sqft": 150, "seating": 12},
{"number": 109, "name":"Thomas Room", "sqft": 200, "seating": 18},
{"number": 111, "sqft": 150, "seating": 9},
{"number": 115, "name":"Scott's closet", "sqft": 12}
]
print("How much total sq feet:" , totalSqFt(roomlist))
print("Largest room:" , numberOfLargestRoom(roomlist))
print("How many named" , howmanyNamed(roomlist))
print("How many total seats: " , totalSeats(roomlist))
|
[
"noreply@github.com"
] |
Ardric.noreply@github.com
|
a57ef56195c31db990a6ba23f191faef05f9101a
|
cb55aa5b76a69399cd9511901ffc4208e486cd1a
|
/main.py
|
c806782b235b7cea06bc1e2a6e49afc22f12cb14
|
[
"MIT"
] |
permissive
|
indmind/AutoTypeRacer
|
29bc51e863814c4019e93b5d28576f0e579d661b
|
c9608313bcb5f6b57e5576c882d540917e149efb
|
refs/heads/master
| 2020-04-11T11:48:42.296663
| 2020-02-16T00:29:38
| 2020-02-16T00:29:38
| 161,760,273
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,682
|
py
|
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import NoSuchElementException
from dotenv import load_dotenv
from time import sleep
from os import getenv
from tabulate import tabulate
import sys
import pyautogui
# save each race result
history = []
# elements selector
signin_selector = '#tstats > table > tbody > tr.datarow > td:nth-child(1) > table > tbody > tr > td:nth-child(1) > a'
username_selector = 'body > div.DialogBox.trPopupDialog.editUserPopup > div > div > div.dialogContent > div > div.bodyWidgetHolder > div > table.gwt-DisclosurePanel.gwt-DisclosurePanel-open > tbody > tr:nth-child(2) > td > div > table > tbody > tr:nth-child(1) > td:nth-child(2) > input'
password_selector = 'body > div.DialogBox.trPopupDialog.editUserPopup > div > div > div.dialogContent > div > div.bodyWidgetHolder > div > table.gwt-DisclosurePanel.gwt-DisclosurePanel-open > tbody > tr:nth-child(2) > td > div > table > tbody > tr:nth-child(2) > td:nth-child(2) > table > tbody > tr:nth-child(1) > td > input'
signinconfirm_selector = 'body > div.DialogBox.trPopupDialog.editUserPopup > div > div > div.dialogContent > div > div.bodyWidgetHolder > div > table.gwt-DisclosurePanel.gwt-DisclosurePanel-open > tbody > tr:nth-child(2) > td > div > table > tbody > tr:nth-child(4) > td:nth-child(2) > table > tbody > tr > td:nth-child(1) > button'
play_selector = '#dUI > table > tbody > tr:nth-child(2) > td:nth-child(2) > div > div.mainViewport > div > table > tbody > tr:nth-child(2) > td > table > tbody > tr > td:nth-child(2) > table > tbody > tr:nth-child(1) > td > a'
# just to check if the race page is loaded
banner_selector = 'body > div.countdownPopup.horizontalCountdownPopup > div > table > tbody > tr > td > table > tbody > tr > td:nth-child(2)'
# this selector needs #gwt-uid-{uid} >
text_selector = 'table > tbody > tr:nth-child(2) > td > table > tbody > tr:nth-child(1) > td > table > tbody > tr:nth-child(1) > td > div > div'
input_selector = 'table > tbody > tr:nth-child(2) > td > table > tbody > tr:nth-child(2) > td > input'
raceagain_selector = 'table > tbody > tr:nth-child(3) > td > table > tbody > tr > td:nth-child(2) > a'
# after race selector
wpm_selector = 'table > tbody > tr:nth-child(4) > td > div > table > tbody > tr:nth-child(2) > td > table > tbody > tr > td:nth-child(2) > table > tbody > tr:nth-child(4) > td > table > tbody > tr:nth-child(1) > td:nth-child(2) > table > tbody > tr > td:nth-child(1) > div > div'
time_selector = 'table > tbody > tr:nth-child(4) > td > div > table > tbody > tr:nth-child(2) > td > table > tbody > tr > td:nth-child(2) > table > tbody > tr:nth-child(4) > td > table > tbody > tr:nth-child(2) > td:nth-child(2) > div > span'
point_selector = 'table > tbody > tr:nth-child(4) > td > div > table > tbody > tr:nth-child(2) > td > table > tbody > tr > td:nth-child(2) > table > tbody > tr:nth-child(4) > td > table > tbody > tr:nth-child(4) > td:nth-child(2) > div > div'
# check if element exist using css selector
def isElementExist(selector):
try:
browser.find_element_by_css_selector(selector)
except NoSuchElementException:
return False
return True
# get uid where race element nested
def bruteUID():
print("bruteforce-ing uid...")
uid = 0
# try checking the input selector element
while uid < 10000:
input_selector = '#gwt-uid-%d > table > tbody > tr:nth-child(2) > td > table > tbody > tr:nth-child(2) > td > input' % uid
if isElementExist(input_selector):
break
uid += 1
print("uid found:", uid)
return uid
# get text, input, and race-again element
def getRaceElementsSelector():
uid = "#gwt-uid-%d > " % bruteUID()
selectors = {
'text': uid + text_selector,
'input': uid + input_selector,
'raceagain': uid + raceagain_selector,
'wpm': uid + wpm_selector,
'time': uid + time_selector,
'point': uid + point_selector,
}
return selectors
# get and wait an element using css selector
def getAndWait(selector, key, max=60):
print('get and wait:', key)
return WebDriverWait(browser, max).until(EC.presence_of_element_located((By.CSS_SELECTOR, selector)))
# find an element using css selector
def find(selector, key):
print('find:', key)
return browser.find_element_by_css_selector(selector)
def secureClick(element, key):
while not element.is_displayed():
print(key, 'is not visible, waiting for 1s')
sleep(1)
print('click:', key)
element.send_keys(Keys.TAB)
element.click()
# login using data from .env
def login():
print("login...")
getAndWait(signin_selector, 'sigin').click()
getAndWait(username_selector, 'username').send_keys(getenv("username"))
find(password_selector, 'password').send_keys(getenv("password"))
find(signinconfirm_selector, 'signinconfirm').click()
sleep(5)
print("done login...")
# self explanatory
def race(count):
try:
#page loading check
getAndWait(banner_selector, 'banner')
selectors = getRaceElementsSelector()
# select text element
text = find(selectors['text'], 'text').text
print("text:", text)
# select text input element where we need to type the text
text_input = find(selectors['input'], "input")
# wait for game to start
while text_input.get_attribute('disabled'):
print("wait the race to start for 1s...")
sleep(1)
# after countdown is done, click the element (47)
text_input.click()
# type using pyautogui because I dont know how to set the typing speed
print("typing...")
pyautogui.typewrite(text, interval=0.14)
# save the result
result = [
text[:10] + '...' + text[-10:],
getAndWait(selectors['wpm'], 'wpm').text,
getAndWait(selectors['time'], 'time').text,
getAndWait(selectors['point'], 'point').text
]
history.append(result)
count -= 1
if count:
secureClick(find(selectors['raceagain'], "raceagain"), "raceagain")
race(count)
except TimeoutException:
print('kelamaan')
if __name__ == "__main__":
load_dotenv()
count = 1
guestMode = False
if len(sys.argv) > 1:
count = int(sys.argv[1])
if len(sys.argv) > 2:
if sys.argv[2] == "g":
print('Start in guest mode...')
guestMode = True
# disable image load and idk what disk-cache-size used for
prefs = {'profile.managed_default_content_settings.images':2, 'disk-cache-size': 4096}
options = webdriver.ChromeOptions()
options.add_experimental_option("prefs", prefs)
browser = webdriver.Chrome(chrome_options=options)
browser.get('https://play.typeracer.com/')
if not guestMode:
login()
# click the "enter typing race button"
getAndWait(play_selector, 'playbutton').click()
# RACE!!!!
race(count)
print('\nRESULTS:')
print(tabulate(history, headers=['text', 'speed', 'time', 'point'], showindex=True))
wpms = [int(res[1].split()[0]) for res in history]
points = sum([int(res[3]) for res in history])
print('\nAVERAGE WPM:', sum(wpms) / len(wpms))
print('TOTAL POINTS:', points)
|
[
"sattanationmail@gmail.com"
] |
sattanationmail@gmail.com
|
3f7af8186863e30d0b685bdc989acb234eaa86c1
|
138c4f45483128ac64376cb87c3e66bedffae85b
|
/pickle_test.py
|
cc5c970a6a71da0ef2f598ebd7435595814cf552
|
[] |
no_license
|
lddsdu/Cameratest
|
6844e5325f34c42b6b69737b9d72b3156782a762
|
e2de487bd23bec155cbb7d2240eccf93a69e3dce
|
refs/heads/master
| 2020-03-10T18:23:42.506341
| 2018-05-09T13:46:51
| 2018-05-09T13:46:51
| 129,524,853
| 0
| 0
| null | 2018-05-09T13:46:52
| 2018-04-14T14:32:36
|
Python
|
UTF-8
|
Python
| false
| false
| 639
|
py
|
import pickle
class Person:
def __init__(self,name,age,gender):
self.name = name
self.age = age
self.gender = gender
def intro(self):
print(self.name+" "+str(self.age)+" "+self.gender)
def main():
"""
deal with some type of data
:return:
"""
str2ser = "this is a fucking donkey"
dict2ser = {'age':13,'name':'jack'}
byte_byte = pickle.dumps(dict2ser)
obj = pickle.loads(byte_byte)
print(obj)
scr = Person("suncerui",22,"female")
byte_scr = pickle.dumps(scr)
newscr = pickle.loads(byte_scr)
newscr.intro()
if __name__ == '__main__':
main()
|
[
"201500130096@sdu.edu.cn"
] |
201500130096@sdu.edu.cn
|
281c25c74d7881528beaa1d83f626930a80f928b
|
79f0e12dd8a7aca0ea5afdcd4b259b3daec41765
|
/blog/models.py
|
01aa9bb6037c2034c764f3c1062b5ce385eb2ab6
|
[] |
no_license
|
mbabikir4/portfoliomo
|
0f35ac55e72e2fd13c2fec3fc15ade6c96c351ff
|
693e9186b4edbf0c0ef356eaa344e74b149bb7be
|
refs/heads/master
| 2022-11-27T19:47:06.354369
| 2020-08-06T16:31:40
| 2020-08-06T16:31:40
| 285,619,440
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 236
|
py
|
from django.db import models
# Create your models here.
class Blag(models.Model):
title = models.CharField(max_length=60)
text = models.TextField()
time = models.DateField()
def __str__(self):
return self.title
|
[
"mbabiker530@gmail.com"
] |
mbabiker530@gmail.com
|
5dfe3e9d7031248f08542ea207698a79be6ee5f4
|
2ee46c87820d4f63f207e88ba099ccf042a5cd27
|
/lib/sibra/ext/util.py
|
8caa59a85569688feccd629ddf6f7788afeaa90e
|
[
"Apache-2.0"
] |
permissive
|
marcoeilers/scion
|
38f4d04a31a116bba945a710a66b15c6a0953628
|
06f3f0b82dc8a535ce8b0a128282af00a8425a06
|
refs/heads/master
| 2022-09-16T00:10:57.258140
| 2017-06-09T07:51:24
| 2017-06-09T07:51:24
| 74,899,812
| 1
| 1
|
Apache-2.0
| 2021-07-20T14:38:16
| 2016-11-27T16:29:41
|
Python
|
UTF-8
|
Python
| false
| false
| 992
|
py
|
# Copyright 2016 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`util` --- SIBRA extension utilities
=========================================
"""
# SCION
from lib.sibra.ext.ext import FLAG_STEADY
from lib.sibra.ext.steady import SibraExtSteady
from lib.sibra.ext.ephemeral import SibraExtEphemeral
def parse_sibra_ext(raw): # pragma: no cover
flag = raw[0]
if flag & FLAG_STEADY:
return SibraExtSteady(raw)
else:
return SibraExtEphemeral(raw)
|
[
"kormat@gmail.com"
] |
kormat@gmail.com
|
022b31ddccb04273494fa2f2b62023044fc447a9
|
1ded6c4aeeee677925d3a951b2c85b4f3e8cb772
|
/Python自动化开发/day16/Django/Django/settings.py
|
4e72747efa923d27d630f229d5be02038bba431d
|
[] |
no_license
|
zhangyu-yaoshen/Python
|
90ec2aafcfaeabcdf2df66688be2d27e7062a021
|
be7d3e5cc80d4a961fc0fe44e4dbafe318e7fdec
|
refs/heads/master
| 2021-01-18T16:37:51.692730
| 2019-09-16T00:49:51
| 2019-09-16T00:49:51
| 100,464,481
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,238
|
py
|
"""
Django settings for Django project.
Generated by 'django-admin startproject' using Django 1.11.13.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^el6xoti+qx$87=@y0%$_d(x)@ned#9@gv!36ab)8p179*#yow'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app01',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
#'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Django.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Django.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
#设置静态页路径
STATICFILES_DIRS =(
os.path.join(BASE_DIR,"static"),
)
|
[
"494167883@qq.com"
] |
494167883@qq.com
|
c08eb5d024ad2583194e20ecffba206e0f2b10bb
|
f9954c6e9092b3b5d36385ae3e0b6507ea5553f1
|
/accounts/models.py
|
98d26d78e8d6261e49a32012e3e34932d001014d
|
[] |
no_license
|
vixen-python/student_db
|
a1d1912c5f7f7343de9bb4809d4297e06b2f1e97
|
d7327638c66de3f1e5fe1c90cbe2161d7a7d55d9
|
refs/heads/master
| 2023-06-27T04:52:58.095083
| 2021-07-24T13:10:14
| 2021-07-24T13:10:14
| 389,551,579
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 441
|
py
|
from django.contrib.auth.models import User
from django.db.models import Model, OneToOneField, CASCADE, TextField, ForeignKey, PROTECT, CharField
from student.models import Address
class Profile(Model):
user = OneToOneField(User, on_delete=CASCADE)
biography = TextField()
personal_phone = CharField(max_length=32, default='', null=False, blank=False)
permanent_address = ForeignKey(Address, on_delete=PROTECT, null=True)
|
[
"dominika.pupakova@localhost"
] |
dominika.pupakova@localhost
|
89dfc85dcf519e47155a9816a032b0c255263dd4
|
60dc29006d19fa1a7415426c1862aad3022cda68
|
/CineList/CineList/cinelist/settings.py
|
9442c59ef71958a0b8ff928cedd67006c54e3b6a
|
[] |
no_license
|
garyjohnson96/CineList
|
60db0395370907c110539446e6615e4acd6f414f
|
c72d7945afa7d35750aa94aec52e2760d8c67ccc
|
refs/heads/master
| 2020-05-18T14:20:40.704075
| 2019-05-01T19:45:51
| 2019-05-01T19:45:51
| 184,467,360
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,694
|
py
|
"""
Django settings for cinelist project.
Generated by 'django-admin startproject' using Django 2.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ws^o5a33jk!@t*ial@%r^s_$2hwsbpuuckb$n10*%#_pm)id6c'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'main.apps.MainConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crispy_forms',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'cinelist.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'), '/templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cinelist.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
# PostgreSQL Config
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'cinelist_dev',
'USER': 'postgres',
'PASSWORD': 'postgres',
'HOST': 'localhost',
'PORT': '5432',
}
# SQLite Config
# 'default': {
# 'ENGINE' : 'django.db.backends.sqlite3',
# 'NAME' : os.path.join(BASE_DIR, 'db.sqlite3')
# }
}
AUTH_USER_MODEL = 'main.User'
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Denver'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATIC_URL = '/static/'
# Redirect to home URL after login
LOGIN_REDIRECT_URL = '/user/dashboard'
CRISPY_TEMPLATE_PACK = 'bootstrap4'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
|
[
"gary3450@gmail.com"
] |
gary3450@gmail.com
|
f687983c959fc0273ff0f5d8c7a4813ba0a2a6a3
|
8c2fd5158db5558adc25b0c230a819841a7b8bbd
|
/ReversegamAISimulation-CH16/AIsim3.py
|
cd7fc185fde54f20a8ddd350eae9dcd92385d28e
|
[] |
no_license
|
OmarAlmighty/Invent-your-game-with-python-4th-edition
|
cacaa64e2520857ceaedd160e6cf10e387de05f5
|
8690ccf607963ce657489767cc4ab2b7596a4bd3
|
refs/heads/master
| 2020-06-05T07:58:10.589837
| 2019-08-31T23:53:00
| 2019-08-31T23:53:00
| 192,368,322
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,049
|
py
|
# Reversegam: a clone of Othello/Reversi
import random
import sys
WIDTH = 8 # board is 8 spaces wide
HEIGHT = 8 # board is 8 spaces tall
# Print the board passed to this function. Return None.
def drawBoard(board):
print(" 12345678")
print(" +--------+")
for y in range(HEIGHT):
print('%s|' % (y + 1), end='')
for x in range(WIDTH):
print(board[x][y], end='')
print('%s|' % (y + 1))
print(" +--------+")
print(" 12345678")
# Create a brand-new, blank board data structure.
def getNewBoard():
board = []
for i in range(WIDTH):
board.append([' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '])
return board
# Return False if the player's move on space xstart, ystart is invalid.
# If it is a valid move, return a list of spaces that would become the player's if they made a move here.
def isValidMove(board, tile, xstart, ystart):
if board[xstart][ystart] != ' ' or not isOnBoard(xstart, ystart):
return False
if tile == 'X':
otherTile = 'O'
else:
otherTile = 'X'
tilesToFlip = []
for xdirection, ydirection in [[0, 1], [1, 1], [1, 0], [1, -1],
[0, -1], [-1, -1], [-1, 0], [-1, 1]]:
x, y = xstart, ystart
x += xdirection # first move in x direction
y += ydirection # first move in y direction
while isOnBoard(x, y) and board[x][y] == otherTile:
# Keep moving in this x & y direction.
x += xdirection
y += ydirection
if isOnBoard(x, y) and board[x][y] == tile:
# There are pieces to flip over. Go in the reverse direction
# until we reach the original space, noting all the tiles along the way.
while True:
x -= xdirection
y -= ydirection
if x == xstart and y == ystart:
break
tilesToFlip.append([x, y])
if len(tilesToFlip) == 0: # If no tiles were flipped, this is not a valid move.
return False
return tilesToFlip
# Return True if the coordinates are located on the board.
def isOnBoard(x, y):
return x >= 0 and x <= WIDTH - 1 and y >= 0 and y <= HEIGHT - 1
# Return a new board with periods marking the valid moves the player can make.
def getBoardWithValidMoves(board, tile):
boardCopy = getBoardCopy(board)
for x, y in getValidMoves(boardCopy, tile):
boardCopy[x][y] = '.'
return boardCopy
# Return a list of [x,y] lists of valid moves for the given player on the given board.
def getValidMoves(board, tile):
validMoves = []
for x in range(WIDTH):
for y in range(HEIGHT):
if isValidMove(board, tile, x, y) != False:
validMoves.append([x, y])
return validMoves
# Determine the score by counting the tiles. Return a dictionary with keys 'X' and 'O'.
def getScoreOfBoard(board):
xscore = 0
oscore = 0
for x in range(WIDTH):
for y in range(HEIGHT):
if board[x][y] == 'X':
xscore += 1
if board[x][y] == 'O':
oscore += 1
return {'X': xscore, 'O': oscore}
# Let the player enter which tile they want to be.
# Return a list with the player's tile as the first item and the computer's tile as the second.
def enterPlayerTile():
tile = ''
while not (tile == 'X' or tile == 'O'):
print("Do you want to be X or O")
tile = input().upper()
# The first element in the list is the player's tile, and the second is the computer's tile.
if tile == 'X':
return ['X', 'O']
else:
return ['O', 'X']
# Randomly choose who goes first.
def whoGoesFirst():
if random.randint(0, 1) == '0':
return "Computer"
else:
return "Player"
# Place the tile on the board at xstart, ystart and flip any of the opponent's pieces.
# Return False if this is an invalid move; True if it is valid.
def makeMove(board, tile, xstart, ystart):
tilesToFlip = isValidMove(board, tile, xstart, ystart)
if tilesToFlip == False:
return False
board[xstart][ystart] = tile
for x, y in tilesToFlip:
board[x][y] = tile
return True
# Make a duplicate of the board list and return it.
def getBoardCopy(board):
boardCopy = getNewBoard()
for x in range(WIDTH):
for y in range(HEIGHT):
boardCopy[x][y] = board[x][y]
return boardCopy
# Return True if the position is in one of the four corners.
def isOnCorner(x, y):
return (x == 0 or x == WIDTH - 1) and (y == 0 or y == HEIGHT - 1)
# Let the player enter their move.
# Return the move as [x, y] (or return the strings 'hints' or 'quit').
def getPlayerMove(board, playerTile):
DIGITS1TO8 = '1 2 3 4 5 6 7 8'.split()
while True:
print('Enter your move, "quit" to end the game, or "hints" to toggle hints.')
move = input().lower()
if move == 'quit' or move == 'hints':
return move
if len(move) == 2 and move[0] in DIGITS1TO8 and move[1] in DIGITS1TO8:
x = int(move[0]) - 1
y = int(move[1]) - 1
if isValidMove(board, playerTile, x, y) == False:
continue
else:
break
else:
print('That is not a valid move. Enter the column (1-8) and then the row(1 - 8).')
print('For example, 81 will move on the top-right corner.')
return [x, y]
# Given a board and the computer's tile, determine where to
# move and return that move as an [x, y] list.
def getCornerBestMove(board, computerTile):
possibleMoves = getValidMoves(board, computerTile)
random.shuffle(possibleMoves) # Randomize the order of the moves.
# Always go for a corner if available.
for x, y in possibleMoves:
if isOnCorner(x, y):
return [x, y]
# Find the highest-scoring move possible.
bestScore = -1
for x, y in possibleMoves:
boardCopy = getBoardCopy(board)
makeMove(boardCopy, computerTile, x, y)
score = getScoreOfBoard(boardCopy)[computerTile]
if score > bestScore:
bestMove = [x, y]
bestScore = score
return bestMove
# Return the move that flips the least number of tiles.
def getWorstMove(board, tile):
possibleMoves = getValidMoves(board, tile)
random.shuffle(possibleMoves) # Randomize the order of the moves.
# Find the lowest-scoring move possible.
worstScore = 64
for x, y in possibleMoves:
boardCopy = getBoardCopy(board)
makeMove(boardCopy, tile, x, y)
score = getScoreOfBoard(board)[tile]
if score < worstScore:
worstMove = [x, y]
worstScore = score
return worstMove
def getRandomMove(board, tile):
possibleMoves = getValidMoves(board, tile)
return random.choice(possibleMoves)
def isOnSide(x, y):
return x == 0 or x == WIDTH - 1 or y == 0 or y == HEIGHT - 1
# Return a corner move, a side move, or the best move.
def getCornerSideBestMove(board, tile):
possibleMoves = getValidMoves(board, tile)
random.shuffle(possibleMoves) # Randomize the order of the moves.
# Always go for a corner if available.
for x, y in possibleMoves:
if isOnCorner(x, y):
return [x, y]
# If there is no corner move to make, return a side move.
for x, y in possibleMoves:
if isOnSide(x, y):
return [x, y]
# Do what the normal AI would do.
return getCornerBestMove(board, tile)
def printScore(board, playerTile, computerTile):
scores = getScoreOfBoard(board)
print("You: %s points. Computer: %s points" % (scores[playerTile], scores[computerTile]))
def playGame(playerTile, computerTile):
showHints = False
turn = whoGoesFirst()
print("The " + turn + " will go first")
# Clear the board and place starting pieces.
board = getNewBoard()
board[3][3] = 'X'
board[3][4] = 'O'
board[4][3] = 'O'
board[4][4] = 'X'
while True:
playerValidMoves = getValidMoves(board, playerTile)
computerValidMoves = getValidMoves(board, computerTile)
if playerValidMoves == [] and computerValidMoves == []:
return board # No one can move, so end the game.
elif turn == 'Player': # player's turn
if playerValidMoves != []:
'''if showHints:
validMovesBoard = getBoardWithValidMoves(board, playerTile)
drawBoard(validMovesBoard)
else:
drawBoard(board)
printScore(board, playerTile, computerTile)'''
move = getCornerBestMove(board, playerTile)
'''if move == 'quit':
print("Thanks for playing")
sys.exit() # terminate the program
elif move == 'hints':
showHints = not showHints
continue
else:'''
makeMove(board, playerTile, move[0], move[1])
turn = 'Computer'
elif turn == 'Computer': # Comouter's turn
if computerValidMoves != []:
''' drawBoard(board)
printScore(board, playerTile, computerTile)
input("Press enter to see computer\'s move.")'''
move = getWorstMove(board, computerTile)
makeMove(board, computerTile, move[0], move[1])
turn = 'Player'
NUM_GAMES = 250
xWins = oWins = ties = 0
print("Welcome to reversgam!")
playerTile, computerTile = ['X', 'O'] # enterPlayerTile()
for i in range(NUM_GAMES): # while True:
finalboard = playGame(playerTile, computerTile)
# Display the final score.
# drawBoard(finalboard)
scores = getScoreOfBoard(finalboard)
print('#%s: X scored %s points. O scored %s points.' % (i + 1, scores['X'], scores['O']))
if scores[playerTile] > scores[computerTile]:
xWins += 1
# print('You beat the computer by %s points! Congratulations!' %(scores[playerTile] - scores[computerTile]))
elif scores[playerTile] < scores[computerTile]:
oWins += 1
# print('You lost. The computer beat you by %s points.' %(scores[computerTile] - scores[playerTile]))
else:
ties += 1
# print("The game was a tie!")
# print("Do you want to play again ( yes or no)")
# if not input().lower().startswith('y'):
# break
print('X wins: %s (%s%%)' % (xWins, round(xWins / NUM_GAMES * 100, 1)))
print('O wins: %s (%s%%)' % (oWins, round(oWins / NUM_GAMES * 100, 1)))
print('Ties: %s (%s%%)' % (ties, round(ties / NUM_GAMES * 100, 1)))
|
[
"noreply@github.com"
] |
OmarAlmighty.noreply@github.com
|
b73f3c3982b8529e06f018d793b2bf8460f06409
|
2d7eededd805ba786d529cc49ba5e971f95a2c93
|
/sphinx_intl/utils.py
|
2799c5625576301caa861206c057a38f60789e22
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
tychoish/sphinx-intl
|
aa2dff841253ae93092610deadd066adc3db9e12
|
6f1acedd8281d1da334b84c6c7779db98677e351
|
refs/heads/master
| 2016-09-06T07:28:49.810051
| 2014-02-25T17:41:31
| 2014-02-25T17:41:31
| 17,294,150
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,093
|
py
|
import os
from multiprocessing import cpu_count
from multiprocessing.pool import Pool
def expand_tree(path, extension='pot'):
for root, sub_folders, files in os.walk(path):
for file in files:
if file.startswith('.#'):
continue
elif file.endswith('swp'):
continue
else:
f = os.path.join(root, file)
if extension != None:
if isinstance(extension, list):
if os.path.splitext(f)[1][1:] not in extension:
continue
else:
if not f.endswith(extension):
continue
yield f
class WorkerPool(object):
def __init__(self, size=None):
if size is None:
self.size = cpu_count()
else:
self.size = size
def __exit__(self, *args):
self.p.close()
self.p.join()
class ProcessPool(WorkerPool):
def __enter__(self):
self.p = Pool(self.size)
return self.p
|
[
"samk@10gen.com"
] |
samk@10gen.com
|
70ff638556ab19a45188805b904fd714427b1743
|
63a737196ad1b58448e89ab65d72ec6b17fc6232
|
/cpp/a.py
|
af245a114cad08aa163e9fa0e9268978e16ff752
|
[] |
no_license
|
varunragu23/Template
|
bc4db626742003982e6958110a73997c27d1699a
|
2620447afee53db3003dea4909b2aa42cb75d7f5
|
refs/heads/master
| 2021-06-25T07:22:40.688602
| 2021-04-03T01:15:27
| 2021-04-03T01:15:27
| 219,389,079
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 321
|
py
|
import sys
input = sys.stdin.readline
############ ---- Input Functions ---- ############
def inp():
return(int(input()))
def inlt():
return(list(map(int, input().split())))
def insr():
s = input()
return(list(s[:len(s) - 1]))
def invr():
return(map(int, input().split()))
### Your Code ###
|
[
"varunragu23@gmail.com"
] |
varunragu23@gmail.com
|
4b7657c2c6efc0fff42e63dcb8bbc2c3589dfdc7
|
aca6313e34bdb4a67be1cddbf96bdd020901ecb1
|
/server.py
|
1476f55187fd7aa7398917f1a9fd6333097c7f18
|
[] |
no_license
|
DanielGomes14/Distributed_Object_Detection
|
311826c79c7d714010a9c72e0917f55dabfeea66
|
78a324bac3cdaf79a232ce4f527392a8c01b33e7
|
refs/heads/master
| 2023-01-07T09:56:38.158701
| 2020-10-25T21:09:51
| 2020-10-25T21:09:51
| 277,298,806
| 1
| 0
| null | 2020-07-08T12:22:37
| 2020-07-05T12:35:22
| null |
UTF-8
|
Python
| false
| false
| 4,594
|
py
|
import argparse
import json
from flask import Flask,request, redirect, url_for
from flask import render_template
import os
from collections import Counter
from werkzeug.utils import secure_filename
import tempfile
import requests
from worker import recv_Img
import pickle
import cv2
from threading import Lock, Thread
class FlaskServer():
def __init__(self, port, max_persons ):
self.app = Flask(__name__)
self.max_persons=int(max_persons)
self.port=port
self.lock = Lock()
self.UPLOAD_FOLDER = './static'
self.app.config['UPLOAD_FOLDER'] = self.UPLOAD_FOLDER
self.app.route('/', methods=['POST'])(self.show_img)
self.app.route('/result',methods=['POST'])(self.get_imageinfo)
self.video_map={}
self.process_counter=0
self.app.run(host='127.0.0.1' ,port=self.port, threaded=True)
def show_img(self):
if request.files['video']:
video = request.files['video'].read()
fp = tempfile.NamedTemporaryFile(dir=self.UPLOAD_FOLDER) #save file in directory uploads
fp.write(video)
fp.seek(0)
vidcap = cv2.VideoCapture(fp.name)
#count number of frames
total = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
success,image = vidcap.read()
# check concorrency here
self.lock.acquire()
self.process_counter += 1
proc = self.process_counter
self.lock.release()
#save for each video,the total of frames of it, and a counter of frames that were already processed by the worker(s)
if proc not in self.video_map:
self.video_map[proc] = {"total" : total, "count" : 0 , "classes": {}, "timestamp" : 0}
count = 0
while success:
data = {'proc': proc,'frame': count}
img = pickle.dumps(image)
recv_Img.apply_async((data,img), serializer='pickle')
success,image = vidcap.read()
count += 1
self.video_map[proc]["total"]=count
fp.close()
return "Thanks :)"
else:
return "Could not read any files:/"
def get_imageinfo(self):
if request.method == 'POST':
data=request.json
frame_id = data['frame']
frame_proc = data['proc']
classes = data['classes']
timestamp = data['timestamp']
total = self.video_map[frame_proc]['total']
self.video_map[frame_proc]["count"] += 1
count = self.video_map[frame_proc]["count"]
self.video_map[frame_proc]["timestamp"] += float(timestamp)
lst=self.video_map[frame_proc]["classes"]
self.video_map[frame_proc]["classes"] = self.mergeDict(lst,classes)
if "person" in classes:
if classes["person"]>self.max_persons:
print("Frame "+str(frame_id)+ ": " + str(classes["person"]) + " <person> detected")
if total == count:
print("Processed frames: "+str(total))
print("Average processing time per frame: "+str(int(self.video_map[frame_proc]["timestamp"]/count*1000))+"ms")
print("Person objects detected: "+str(classes["person"]))
print("Total classes detected: " + str(len(self.video_map[frame_proc]['classes'])))
k = Counter(self.video_map[frame_proc]["classes"])
top = k.most_common(3)
print("Top 3 objects detected: "+ self.printTop3(top))
return ""
def printTop3(self,lst):
string=""
for i in lst:
string += i[0] + ", "
string=string[:len(string)-2]
return string
#update the dicionary with the classes and it's frequency
def mergeDict(self,dict1, dict2):
dict3 = {**dict1, **dict2}
for key, value in dict3.items():
if key in dict1 and key in dict2:
dict3[key] = value + dict1[key]
return dict3
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--max", dest='max',help="maximum number of persons in a frame", default=10)
parser.add_argument("-p", dest='port', type=int, help="HTTP port", default=5000)
args = parser.parse_args()
#pass the port and max number of persons to the Server
FlaskServer(args.port, args.max)
|
[
"noreply@github.com"
] |
DanielGomes14.noreply@github.com
|
f3a8ccb58daf70b7536db1eb9f47ed5b11d88863
|
329d6f3916154ba78be81e726d5a7aced93f8cb1
|
/leprikon/cms_plugins/courses.py
|
b458eda18994e4f3820517fe4a73476f974eb82d
|
[
"BSD-3-Clause"
] |
permissive
|
misli/leprikon
|
0ef4f5430cb773e48cd87a6676448ecfcc099365
|
f8c70a73897dc2dfa661a151688228b87d2aac0d
|
refs/heads/master
| 2022-12-29T23:08:17.613658
| 2020-10-18T07:47:34
| 2020-10-18T07:47:34
| 63,190,910
| 0
| 0
| null | 2016-07-12T20:39:53
| 2016-07-12T20:39:53
| null |
UTF-8
|
Python
| false
| false
| 1,107
|
py
|
from cms.plugin_pool import plugin_pool
from django.utils.translation import ugettext as _
from ..models.courses import (
CourseListPlugin, CoursePlugin, FilteredCourseListPlugin,
)
from .base import LeprikonPluginBase
@plugin_pool.register_plugin
class LeprikonCoursePlugin(LeprikonPluginBase):
name = _('Course')
model = CoursePlugin
raw_id_fields = ('course',)
def get_render_template(self, context, instance, placeholder):
return 'leprikon/cms/course/%s.html' % instance.template
@plugin_pool.register_plugin
class LeprikonCourseListPlugin(LeprikonPluginBase):
name = _('Course list')
model = CourseListPlugin
filter_horizontal = ('age_groups', 'target_groups', 'groups', 'leaders')
def get_render_template(self, context, instance, placeholder):
return 'leprikon/cms/course_list/%s.html' % instance.template
@plugin_pool.register_plugin
class LeprikonFilteredCourseListPlugin(LeprikonPluginBase):
name = _('Course list with search form')
model = FilteredCourseListPlugin
render_template = 'leprikon/cms/course_list_filtered.html'
|
[
"jakub.dornak@misli.cz"
] |
jakub.dornak@misli.cz
|
83f21c82e5aa27e69b1832cd8594a6d0dc3cc8f4
|
daf8abfcbbbff0dfe0425608af291f4be0a6bea7
|
/depot_tools/git_footers.py
|
3df4f82a6ad0594c3b7551b87a3ff319e41ca913
|
[
"BSD-3-Clause"
] |
permissive
|
mvines/cef
|
68389e7b0cfe0caf5cd61c80055b01382d34000d
|
a3faf18e55dc258ce7783c39c7ed22d7cecd4e74
|
refs/heads/master
| 2022-10-24T17:06:15.960761
| 2017-01-13T19:01:58
| 2017-01-13T19:06:55
| 78,868,621
| 0
| 1
| null | 2022-10-22T09:12:09
| 2017-01-13T17:05:59
| null |
UTF-8
|
Python
| false
| false
| 6,329
|
py
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import re
import sys
from collections import defaultdict
import git_common as git
FOOTER_PATTERN = re.compile(r'^\s*([\w-]+): (.*)$')
CHROME_COMMIT_POSITION_PATTERN = re.compile(r'^([\w/\-\.]+)@{#(\d+)}$')
def normalize_name(header):
return '-'.join([ word.title() for word in header.strip().split('-') ])
def parse_footer(line):
"""Returns footer's (key, value) if footer is valid, else None."""
match = FOOTER_PATTERN.match(line)
if match:
return (match.group(1), match.group(2))
else:
return None
def parse_footers(message):
"""Parses a git commit message into a multimap of footers."""
_, _, parsed_footers = split_footers(message)
footer_map = defaultdict(list)
if parsed_footers:
# Read footers from bottom to top, because latter takes precedense,
# and we want it to be first in the multimap value.
for (k, v) in reversed(parsed_footers):
footer_map[normalize_name(k)].append(v.strip())
return footer_map
def split_footers(message):
"""Returns (non_footer_lines, footer_lines, parsed footers).
Guarantees that:
(non_footer_lines + footer_lines) == message.splitlines().
parsed_footers is parse_footer applied on each line of footer_lines.
"""
message_lines = list(message.splitlines())
footer_lines = []
for line in reversed(message_lines):
if line == '' or line.isspace():
break
footer_lines.append(line)
else:
# The whole description was consisting of footers,
# which means those aren't footers.
footer_lines = []
footer_lines.reverse()
footers = map(parse_footer, footer_lines)
if not footer_lines or not all(footers):
return message_lines, [], []
return message_lines[:-len(footer_lines)], footer_lines, footers
def get_footer_change_id(message):
"""Returns a list of Gerrit's ChangeId from given commit message."""
return parse_footers(message).get(normalize_name('Change-Id'), [])
def add_footer_change_id(message, change_id):
"""Returns message with Change-ID footer in it.
Assumes that Change-Id is not yet in footers, which is then inserted at
earliest footer line which is after all of these footers:
Bug|Issue|Test|Feature.
"""
assert 'Change-Id' not in parse_footers(message)
return add_footer(message, 'Change-Id', change_id,
after_keys=['Bug', 'Issue', 'Test', 'Feature'])
def add_footer(message, key, value, after_keys=None):
"""Returns a message with given footer appended.
If after_keys is None (default), appends footer last.
Otherwise, after_keys must be iterable of footer keys, then the new footer
would be inserted at the topmost position such there would be no footer lines
after it with key matching one of after_keys.
For example, given
message='Header.\n\nAdded: 2016\nBug: 123\nVerified-By: CQ'
after_keys=['Bug', 'Issue']
the new footer will be inserted between Bug and Verified-By existing footers.
"""
assert key == normalize_name(key), 'Use normalized key'
new_footer = '%s: %s' % (key, value)
top_lines, footer_lines, parsed_footers = split_footers(message)
if not footer_lines:
if not top_lines or top_lines[-1] != '':
top_lines.append('')
footer_lines = [new_footer]
elif not after_keys:
footer_lines.append(new_footer)
else:
after_keys = set(map(normalize_name, after_keys))
# Iterate from last to first footer till we find the footer keys above.
for i, (key, _) in reversed(list(enumerate(parsed_footers))):
if normalize_name(key) in after_keys:
footer_lines.insert(i + 1, new_footer)
break
else:
footer_lines.insert(0, new_footer)
return '\n'.join(top_lines + footer_lines)
def get_unique(footers, key):
key = normalize_name(key)
values = footers[key]
assert len(values) <= 1, 'Multiple %s footers' % key
if values:
return values[0]
else:
return None
def get_position(footers):
"""Get the commit position from the footers multimap using a heuristic.
Returns:
A tuple of the branch and the position on that branch. For example,
Cr-Commit-Position: refs/heads/master@{#292272}
would give the return value ('refs/heads/master', 292272).
"""
position = get_unique(footers, 'Cr-Commit-Position')
if position:
match = CHROME_COMMIT_POSITION_PATTERN.match(position)
assert match, 'Invalid Cr-Commit-Position value: %s' % position
return (match.group(1), match.group(2))
raise ValueError('Unable to infer commit position from footers')
def main(args):
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('ref', nargs='?', help="Git ref to retrieve footers from."
" Omit to parse stdin.")
g = parser.add_mutually_exclusive_group()
g.add_argument('--key', metavar='KEY',
help='Get all values for the given footer name, one per '
'line (case insensitive)')
g.add_argument('--position', action='store_true')
g.add_argument('--position-ref', action='store_true')
g.add_argument('--position-num', action='store_true')
g.add_argument('--json', help="filename to dump JSON serialized headers to.")
opts = parser.parse_args(args)
if opts.ref:
message = git.run('log', '-1', '--format=%B', opts.ref)
else:
message = '\n'.join(l for l in sys.stdin)
footers = parse_footers(message)
if opts.key:
for v in footers.get(normalize_name(opts.key), []):
print v
elif opts.position:
pos = get_position(footers)
print '%s@{#%s}' % (pos[0], pos[1] or '?')
elif opts.position_ref:
print get_position(footers)[0]
elif opts.position_num:
pos = get_position(footers)
assert pos[1], 'No valid position for commit'
print pos[1]
elif opts.json:
with open(opts.json, 'w') as f:
json.dump(footers, f)
else:
for k in footers.keys():
for v in footers[k]:
print '%s: %s' % (k, v)
return 0
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[1:]))
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(1)
|
[
"mvines@silklabs.com"
] |
mvines@silklabs.com
|
e85b526dfe87681dd0b50c48dc746005e90ef526
|
717f5324f8d4ce44a94e2c0b654a2d2a4f0a3c74
|
/dwi_ml/models/projects/stacked_rnn.py
|
3773e99c3447328f360a6f70f3489c4955bc40fb
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jhlegarreta/dwi_ml
|
3ac7ef28f3bba13f34a8f38a9f910cf2946dcc7b
|
a7c03f26780677e4eaccff9b381d5a8ec6120293
|
refs/heads/master
| 2023-03-04T07:22:15.737775
| 2023-02-23T18:20:53
| 2023-02-23T18:20:53
| 242,525,253
| 0
| 0
|
MIT
| 2020-06-10T00:53:06
| 2020-02-23T13:49:01
|
Python
|
UTF-8
|
Python
| false
| false
| 11,527
|
py
|
# -*- coding: utf-8 -*-
import logging
from typing import List, Tuple, Union
import torch
from torch import Tensor
from torch.nn.utils.rnn import PackedSequence
keys_to_rnn_class = {'lstm': torch.nn.LSTM,
'gru': torch.nn.GRU}
# Note. This logger's logging level can be modified trough the main model,
# Learn2trackModel.
logger = logging.getLogger('model_logger') # Same logger as main dwi_ml.
class StackedRNN(torch.nn.Module):
"""
Recurrent model with recurrent layer sizes, and optional skip connections.
Needed because Pytorch does not provide a variable layer RNN, nor skip
connections.
"""
def __init__(self, rnn_torch_key: str, input_size: int,
layer_sizes: List[int], use_skip_connection: bool,
use_layer_normalization: bool, dropout: float):
"""
Parameters
----------
rnn_torch_key : str
Pytorch class of RNN to instantiate at each layer. Choices are
'lstm' or 'gru'.
input_size : int
Size of each step of the input to the model, i.e. the number of
features at each step. Note that the complete input will be of
shape (batch, seq, input_size).
layer_sizes : list of int
Size of each hidden layer. The real size will depend
on the skip_connection parameter.
use_skip_connection : bool, optional
If true, concatenate the model input to the input of each hidden
layer, and concatenate all hidden layers output as the output of
the model. See [1] (Figure 1) to visualize the architecture.
use_layer_normalization : bool, optional
If true, apply layer normalization to the forward connections. See
[2].
dropout : float
If non-zero, introduces a `Dropout` layer on the outputs of each
RNN layer except the last layer, with given dropout probability.
---
[1] https://arxiv.org/pdf/1308.0850v5.pdf
[2] https://arxiv.org/pdf/1607.06450.pdf
"""
if not isinstance(dropout, float) or not 0 <= dropout <= 1:
raise ValueError("dropout should be a rate in range [0, 1] "
"representing the probability of an element "
"being zeroed")
if dropout > 0 and len(layer_sizes) == 1:
logging.warning("dropout option adds dropout after all but last "
"recurrent layer, so non-zero dropout expects "
"num_layers greater than 1, but got dropout={} "
"and len(layer_sizes)={}"
.format(dropout, len(layer_sizes)))
super().__init__()
self.rnn_torch_key = rnn_torch_key
self.input_size = input_size
self.layer_sizes = layer_sizes
self.use_skip_connection = use_skip_connection
self.use_layer_normalization = use_layer_normalization
self.dropout = dropout
self.rnn_layers = []
self.layer_norm_layers = []
if self.dropout and self.dropout != 0:
self.dropout_module = torch.nn.Dropout(self.dropout)
else:
self.dropout_module = None
self.relu_sublayer = torch.nn.ReLU()
# Initialize model
rnn_cls = keys_to_rnn_class[self.rnn_torch_key]
last_layer_size = input_size
for i, layer_size in enumerate(layer_sizes):
# Instantiate RNN layer
# batch_first: If True, then the input and output tensors are
# provided as (batch, seq, feature), not (seq, batch, feature)
rnn_layer = rnn_cls(input_size=last_layer_size,
hidden_size=layer_size,
num_layers=1,
batch_first=True)
# Explicitly add module because it's not a named variable
self.add_module("rnn_{}".format(i), rnn_layer)
self.rnn_layers.append(rnn_layer)
if self.use_layer_normalization:
layer_norm = torch.nn.LayerNorm(layer_size)
self.add_module("layer_norm_{}".format(i), layer_norm)
self.layer_norm_layers.append(layer_norm)
last_layer_size = layer_size
# Account for skip connections in layer size. Last layer is
# different, see self.output_size().
if self.use_skip_connection:
last_layer_size += self.input_size
@property
def params(self):
"""All parameters necessary to create again the same model. Will be
used in the trainer, when saving the checkpoint state. Params here
will be used to re-create the model when starting an experiment from
checkpoint. You should be able to re-create an instance of your
model with those params."""
params = {
'rnn_torch_key': self.rnn_torch_key,
'input_size': self.input_size,
'output_size': self.output_size,
'layer_sizes': list(self.layer_sizes),
'use_skip_connections': self.use_skip_connection,
'use_layer_normalization': self.use_layer_normalization,
'dropout': self.dropout,
}
return params
@property
def output_size(self):
"""Returns the size of the last layer. If using skip connections, it is
the sum of all layers' sizes."""
if self.use_skip_connection:
return sum(self.layer_sizes)
else:
return self.layer_sizes[-1]
def forward(self, inputs: Union[Tensor, PackedSequence],
hidden_states: Tuple[Tensor, ...] = None):
"""
Parameters
----------
inputs : torch.Tensor or PackedSequence
Batch of input sequences. Size (seq, features).
Current implementation of the learn2track model calls this using
packed sequence. We run the RNN on the packed data, but the
normalization and dropout of their tensor version.
hidden_states : list[states]
One value per layer.
LSTM: States are tuples; (h_t, C_t)
Size of tensors are each [1, nb_streamlines, nb_neurons].
GRU: States are tensors; h_t.
Size of tensors are [1, nb_streamlines, nb_neurons].
Returns
-------
last_output : Tensor
The results. Shape is [nb_points, last layer size], or
[nb_points, sum of layer sizes] if skip_connections.
* If inputs was a PackedSequence, you can get the packed results:
last_output = PackedSequence(last_output,
inputs.batch_sizes,
inputs.sorted_indices,
inputs.unsorted_indices)
But this can't be used in the direction getter for the next step.
In our case, skipping.
out_hidden_states : tuple of Tensor
The last step hidden states (h_(t-1), C_(t-1) for LSTM) for each
layer.
"""
# If input is a tensor: RNN simply runs on it.
# Else: RNN knows what to do.
# We need to concatenate initial inputs with skip connections.
if isinstance(inputs, Tensor):
was_packed = False
init_inputs = inputs
elif isinstance(inputs, list):
raise TypeError("Unexpected input type! Data should not be a list."
"You could try using PackedSequences.")
elif isinstance(inputs, PackedSequence):
was_packed = True
init_inputs = inputs.data
else:
raise TypeError("Unexpected input type!")
# Arranging states
if hidden_states is None:
hidden_states = [None for _ in range(len(self.rnn_layers))]
# Initializing variables that we will want to return
out_hidden_states = []
# If skip connection, we need to keep in memory the output of all
# layers
outputs = []
# Running forward on each layer:
# linear --> layer norm --> dropout --> skip connection
last_output = inputs
for i in range(len(self.rnn_layers)):
logger.debug('Applying StackedRnn layer #{}. Layer is: {}'
.format(i, self.rnn_layers[i]))
if i > 0 and was_packed:
# Packing back the output tensor from previous layer;
# only the .data was kept for the direction getter.
last_output = PackedSequence(last_output, inputs.batch_sizes,
inputs.sorted_indices,
inputs.unsorted_indices)
# ** RNN **
# Either as 3D tensor or as packedSequence
last_output, new_state_i = self.rnn_layers[i](last_output,
hidden_states[i])
out_hidden_states.append(new_state_i)
# ** Other sub-layers **
# Forward functions for layer_norm, dropout and skip take tensors
# Does not matter if order of datapoints is not kept, applied on
# each data point separately
if was_packed:
last_output = last_output.data
logger.debug(' Output size after main sub-layer: {}'
.format(last_output.shape))
# Apply layer normalization
if self.use_layer_normalization:
last_output = self.layer_norm_layers[i](last_output)
logger.debug(' Output size after normalization: {}'
.format(last_output.shape))
if i < len(self.rnn_layers) - 1:
# Apply dropout except on last layer
if self.dropout > 0:
last_output = self.dropout_module(last_output)
logger.debug(' Output size after dropout: {}'
.format(last_output.shape))
# Apply ReLu activation except on last layer
last_output = self.relu_sublayer(last_output)
logger.debug(' Output size after reLu: {}'
.format(last_output.shape))
# Saving layer's last_output and states for later
if self.use_skip_connection:
# Keeping memory for the last layer's concatenation of all
# outputs.
outputs.append(last_output)
# Intermediate layers:
# Adding skip connection, i.e. initial input.
# See here: https://arxiv.org/pdf/1308.0850v5.pdf
if i < len(self.rnn_layers) - 1:
last_output = torch.cat((last_output, init_inputs),
dim=-1)
logger.debug(' Output size after skip connection: {}'
.format(last_output.shape))
# Final last_output
if self.use_skip_connection:
last_output = torch.cat(outputs, dim=-1)
logger.debug(
'Final skip connection: concatenating all outputs but not '
'input. Final shape is {}'.format(last_output.shape))
return last_output, out_hidden_states
|
[
"emmanuelle.renauld@usherbrooke.ca"
] |
emmanuelle.renauld@usherbrooke.ca
|
fb0b11da96cba44f0143963cff2fba448d91ca69
|
8fcae139173f216eba1eaa01fd055e647d13fd4e
|
/.history/scraper_20191220145250.py
|
77934478bcb784eb070135bbdbd1526238341e03
|
[] |
no_license
|
EnriqueGalindo/backend-web-scraper
|
68fdea5430a0ffb69cc7fb0e0d9bcce525147e53
|
895d032f4528d88d68719838a45dae4078ebcc82
|
refs/heads/master
| 2020-11-27T14:02:59.989697
| 2019-12-21T19:47:34
| 2019-12-21T19:47:34
| 229,475,085
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,753
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module docstring: One line description of what your program does.
There should be a blank line in between description above, and this
more detailed description. In this section you should put any caveats,
environment variable expectations, gotchas, and other notes about running
the program. Author tag (below) helps instructors keep track of who
wrote what, when grading.
"""
__author__ = "Enrique Galindo"
# Imports go at the top of your file, after the module docstring.
# One module per import line. These are for example only.
import sys
import requests
import re
regex_email = r'''(?:[a-z0-9!#$%&‘*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&‘*+/=?^_`{|}~-]+)*|“(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])*“)@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\[(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?|[a-z0-9-]*[a-z0-9]:(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)\])'''
regex_phone = r'''(1?\W*([2-9][0-8][0-9])\W*([2-9][0-9]{2})\W*([0-9]{4})(\se?x?t?(\d*))?)'''
def main(args):
"""Main function is declared as standalone, for testability"""
url = args[0]
response = requests.get(url)
response.raise_for_status()
url_list = re.findall(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', response.text)
email_list = set(re.findall(regex_email, response.text))
phone_list = set(re.findall(regex_phone, response.text))
for number in phone_list:
print(number[0])
print(email_list)
if __name__ == '__main__':
"""Docstring goes here"""
main(sys.argv[1:])
|
[
"egalindo@protonmail.com"
] |
egalindo@protonmail.com
|
4ef0b0b47c1614f2b84ff94635894f71988ece90
|
1852be4726dc1d83780740678819192277159e0f
|
/LC/398.py
|
31a847bbec409721c4f60cdb00f2601a3a0177b2
|
[
"MIT"
] |
permissive
|
szhu3210/LeetCode_Solutions
|
f0a32e30df54b655fdb9c7d48622382f29781409
|
64747eb172c2ecb3c889830246f3282669516e10
|
refs/heads/master
| 2020-06-30T05:45:40.550146
| 2017-08-11T04:10:25
| 2017-08-11T04:10:25
| 74,389,515
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 340
|
py
|
class Solution(object):
def __init__(self, nums):
self.nums = nums
def pick(self, target):
c = 0
for i, num in enumerate(self.nums):
if num != target:
continue
c += 1
n = random.randint(1, c)
if c==n:
res = i
return res
|
[
"troy@Troys-MacBook-Pro.local"
] |
troy@Troys-MacBook-Pro.local
|
bbc04c1a859ea9c13a40dd1dbbbd963a17e8088c
|
991a62edfd8f4acba6dbe5213a51be33702c3d74
|
/tests/10-deploy
|
8447817a63cda4f9458ae6028e6a42e0fa29fc3f
|
[] |
no_license
|
isabella232/cka-ubuntu-cni
|
d1d2e2cdd6a525f8ebbe2920043934a073ab7f29
|
d1b51091829d76c8c93f65a6b8add745b570d915
|
refs/heads/master
| 2023-03-17T14:50:25.355495
| 2017-05-19T01:17:40
| 2017-05-19T01:17:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,249
|
#!/usr/bin/python3
import amulet
import requests
import unittest
class TestCharm(unittest.TestCase):
def setUp(self):
self.d = amulet.Deployment()
self.d.add('cka-ubuntu-cni')
self.d.expose('cka-ubuntu-cni')
self.d.setup(timeout=900)
self.d.sentry.wait()
self.unit = self.d.sentry['cka-ubuntu-cni'][0]
def test_service(self):
# test we can access over http
page = requests.get('http://{}'.format(self.unit.info['public-address']))
self.assertEqual(page.status_code, 200)
# Now you can use self.d.sentry[SERVICE][UNIT] to address each of the units and perform
# more in-depth steps. Each self.d.sentry[SERVICE][UNIT] has the following methods:
# - .info - An array of the information of that unit from Juju
# - .file(PATH) - Get the details of a file on that unit
# - .file_contents(PATH) - Get plain text output of PATH file from that unit
# - .directory(PATH) - Get details of directory
# - .directory_contents(PATH) - List files and folders in PATH on that unit
# - .relation(relation, service:rel) - Get relation data from return service
if __name__ == '__main__':
unittest.main()
|
[
"ryeterrell@ryeterrell.net"
] |
ryeterrell@ryeterrell.net
|
|
340ae959c6b00d971f8f4050a2db26eca1f59498
|
da5c15b48a307aa6f849c3f1de4eb95c686ea714
|
/LRCphonebook_main.py
|
d11e3b77a3f44b75eddd15dfc818777484abd000
|
[] |
no_license
|
lenniecottrell/tkinter-Phonebook
|
c5f68448790425530c085bc2a7783d52c5b7f84f
|
41da0026f70eab968d0506a3567fc442e8773cf9
|
refs/heads/main
| 2023-02-18T10:13:25.765980
| 2021-01-21T16:22:04
| 2021-01-21T16:22:04
| 329,700,846
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,449
|
py
|
from tkinter import *
import tkinter as tk
from tkinter import messagebox
#import other modules to make sure we have them
import LRCphonebook_gui
import LRCphonebook_func
class ParentWindow(Frame):
def __init__ (self, master, *args, **kwargs):
Frame.__init__(self, master, *args, **kwargs)
# define our master frame config
self.master = master
self.master.minsize(500,300) #(height, Width)
self.master.maxsize(500,300)
# This center_window method will center our app on the user's screen
LRCphonebook_func.center_window(self,500,300)
self.master.title("The Tkinter Phonebook")
self.master.configure(bg='#F0F0F0')
# This protocol method is a tkinter built-in method to catch if
# the user clicks the upper corner, 'X' on Windows OS
self.master.protocol('WM_DELETE_WINDOW', lambda: LRCphonebook_func.ask_quit(self))
arg = self.master
# load in the GUI widgets from a separate module
# keeping your code compartmentalized and clutter free
LRCphonebook_gui.load_gui(self)
if __name__ == "__main__":
root = tk.Tk() #this is the syntax to call a tkinter window, put in the variable root
App = ParentWindow(root) #instantiating a class of ParentWindow called "App"
root.mainloop() #this keeps the window up until the user closes it
|
[
"noreply@github.com"
] |
lenniecottrell.noreply@github.com
|
bddb42ee8969f6b771690e3a872b74642d430ee9
|
7ff5767f2e249948f958facc86a92151f899617d
|
/python/测试.py
|
2b24ec2463a03f5fb1ef64a590f2ef881f0d6f0c
|
[] |
no_license
|
fairyang/01
|
3ba8026f0e6f3caa883f395f353270a19c36926b
|
5bb96ba9a3fa40a55289fff8a8c38554f6d9c495
|
refs/heads/master
| 2020-08-01T12:40:18.518522
| 2019-11-20T11:46:59
| 2019-11-20T11:46:59
| 210,999,872
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
chars=set()
for i in range(26):
chars.add(chr(ord("a")+i))
chars.add(chr(ord("A")+i))
a = input()
for word in a:
if word in chars:
print(word,end="")
|
[
"3345660949@qq.com"
] |
3345660949@qq.com
|
383ca50a8e75c8d8d520dafd5915341662956e4b
|
9bd76957feae4b01f961a968c6d89eda0c704a5d
|
/locations/api/views.py
|
6856a6a75f1f705578150d1a2a0692f12f1650d8
|
[] |
no_license
|
deepakkumar96/flirtjar
|
4312e64f6d87d9810d96724e0b766521a3a7d8bf
|
349e65b1b5f994b1efa2829600fbec150c5cf961
|
refs/heads/master
| 2020-05-02T22:09:38.316047
| 2019-04-01T03:36:33
| 2019-04-01T03:36:33
| 178,243,629
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,473
|
py
|
from django.db.models import Q
from rest_framework import views, viewsets, generics, status
from rest_framework.exceptions import NotFound
from accounts.models import Account
from profiles.serializers import UserInfoSerializer
from locations.serializers import UserLocationSerializer
from accounts.serializers import UserSerializer
from rest_framework.response import Response
from rest_framework.renderers import JSONRenderer, TemplateHTMLRenderer
from django.contrib.gis import measure, geos
from locations.api.utils import get_default_units, is_valid_status, is_valid_gender
from django.contrib.gis.geos import GEOSException
class UserLocationDetail(generics.RetrieveAPIView):
"""
# Return information including location of a particular user. (http://geojson.org/)
### __1. Description__
Takes either Id of the user and return Location Detail.
This return users location and important detail to
showing user on the map.
### __2. Parameters__(URL) :
User id must be passed in the url itself.
### __3. Response__
Return the information to represent a user in the map.
* `id` : Unique id of the user
* `email`: Email of the user
* `first_name`: First Name of the user
* `picture`: Profile Picture of the user
* `location`: Latest location of the user
### __4. Example :__
* `Request - `
<pre> POST /api/location/user/2/ </pre>
* `Response - `
<pre>
{
"errors": {},
"result": {
"id": 20,
"email": "example@gmail.com",
"first_name": "example",
"picture": "http://facebook/com/picture",
"location": {
"type": "Point",
"coordinates": [
68.500879846386,
10.617657679834
]
}
}
}
</pre>
"""
queryset = Account.objects.all()
serializer_class = UserLocationSerializer
lookup_field = 'pk'
class UserLocationDetailByEmail(generics.RetrieveAPIView):
"""
Docs
"""
queryset = Account.objects.all()
serializer_class = UserLocationSerializer
lookup_field = 'email'
class NearByLocationUsers(generics.RetrieveAPIView):
"""
# To fetch user profile nearby currently logged-in user.
### __1. Description__
This Return a list(Array in json) of all the other users who are near the currently logged-in
and the response always include current user's location along with others.
This return users location and important detail to showing user on the map.
### __2. Parameters__(URL) :
* `{near_by_distance}`: Specifies the distance under which other profile will be returned.
* `{distance_unit}`: Specifies unit of distance`(`Possible units are `km` - Kilometer, `m` - meters, `mm` - milli-meter`)`
### __2.1. Query Parameters__(URL) :
* `status`: specify map filters
* `gender`: Set gender(M, F, M_F) to filter nearby users by gender
### __3. Response__
Return An Array of user profile with the following information.
* `id` : Unique id of the user
* `email`: Email of the user
* `first_name`: First Name of the user
* `last_name`: First Name of the user
* `profile_picture`: Profile Picture of the user
* `location`: Latest location of the user
### __4. Example :__
* `Request - `
<pre> GET /api/location/nearby/100/km/ </pre>
* `Response - `
<pre>
{
"errors": {},
"result": [
{
"id": 20,
"email": "example@gmail.com",
"first_name": "example",
"picture": "http://facebook/com/picture",
"location": {
"type": "Point",
"coordinates": [
68.500879846386,
10.617657679834
]
},
...
]
}
</pre>
### __5. Possible Errors__
1. `404` if unit of distance is invalid
2. `404` if given user's location is undefined
"""
queryset = Account.objects.all()
serializer_class = UserInfoSerializer
# renderer_classes = (JSONRenderer,)
def get(self, request, *args, **kwargs):
if kwargs['unit'] not in get_default_units():
raise NotFound(kwargs['unit'] + ' is not a valid unit.')
try:
user = request.user
distance_from_point = {kwargs['unit']: kwargs['near_by']}
if not user.location:
raise NotFound('Given users location is undefined.')
near_by_users = Account.gis.filter(
location__distance_lte=(user.location, measure.D(**distance_from_point)),
show_me_on_nearby=True
)
status_filter = request.query_params.get('status', None)
gender_filetr = request.query_params.get('gender', None)
age_filter = request.query_params.get('age', None)
if status_filter and is_valid_status(status_filter):
near_by_users = near_by_users.filter(status=status_filter)
if gender_filetr and is_valid_gender(gender_filetr):
near_by_users = near_by_users.filter(gender=gender_filetr)
except Account.DoesNotExist:
raise NotFound('User not found.')
serializer = UserInfoSerializer(near_by_users, many=True)
return Response(serializer.data)
class NearByCustomLatLong(generics.RetrieveAPIView):
"""
# To fetch user profile nearby custom latitude & longitude.
### __1. Description__
This endpoint return a list(Array in json) of users nearby a custom latitude & longitude.
latitude and longitude is provided in query parameters with name 'lat' and
'long'.
### __2. Parameters__(URL) :
* `{near_by_distance}`: Specifies the distance under which other profile will be returned.
* `{distance_unit}`: Specifies unit of distance`(`Possible units are `km` - Kilometer, `m` - meters, `mm` - milli-meter`)`
### __3. Response__
Return An Array of user profile with the following information nearby given lat & long.
* `id` : Unique id of the user
* `email`: Email of the user
* `first_name`: First Name of the user
* `last_name`: First Name of the user
* `profile_picture`: Profile Picture of the user
* `location`: Latest location of the user
### __4. Example :__
* `Request - `
<pre> GET /api/location/nearby/1000000/m/?lat=72&long=23 </pre>
* `Response - `
<pre>
{
"errors": {},
"result": [
{
"id": 20,
"email": "example@gmail.com",
"first_name": "example",
"picture": "http://facebook/com/picture",
"location": {
"type": "Point",
"coordinates": [
68.500879846386,
10.617657679834
]
},
...
]
}
</pre>
### __5. Possible Errors__
1. `404` if unit of distance is invalid
2. `404` if given user's location is undefined
"""
serializer_class = UserSerializer
queryset = Account.objects.all()
def get(self, request, *args, **kwargs):
if kwargs['unit'] not in get_default_units():
raise NotFound(kwargs['unit']+' is not a valid unit.')
lati = request.query_params.get('lat', None)
longi = request.query_params.get('long', None)
try:
distance_from_point = {kwargs['unit']: kwargs['near_by']}
point = "POINT(%s %s)" % (lati, longi)
location = geos.fromstr(point)
near_by_users = Account.gis.filter(location__distance_lte=(location, measure.D(**distance_from_point)))
except Account.DoesNotExist:
raise NotFound('User not found.')
except GEOSException:
raise NotFound('lat or long or both not specified in url query parameters.')
serializer = UserInfoSerializer(near_by_users, many=True)
return Response(serializer.data)
|
[
"deepakkumar21120.dk@gmail.com"
] |
deepakkumar21120.dk@gmail.com
|
75e9270bd3c7116428f73f0fa3140d49cfbe75c0
|
7250fee18d67327d53aed964bef24f68178f6954
|
/problem_1/Fellow Codes Go Here/lena_bartell.py
|
5767b881715b67cf40ffb5554852d76f80ea5f75
|
[] |
no_license
|
mlpaff/insight_bos_coding_challenges
|
c154832acc7a7072405b13abe37f4e2f7f7d1284
|
4eeed843b66f3f1246c1ad8a6ff652c2f0c11076
|
refs/heads/master
| 2021-05-02T08:01:53.141603
| 2018-02-11T22:57:01
| 2018-02-11T22:57:01
| 120,843,554
| 2
| 0
| null | 2018-02-09T02:07:18
| 2018-02-09T02:07:17
| null |
UTF-8
|
Python
| false
| false
| 1,860
|
py
|
"""
Odd Even Linked List
Given a singly linked list, group all odd nodes together followed by the even nodes. Please note here we are talking about the node number and not the value in the nodes.
You should try to do it in place. The program should run in O(1) space complexity and O(nodes) time complexity.
Example:
Given 1->2->3->4->5->NULL,
return 1->3->5->2->4->NULL.
Note:
The relative order inside both the even and odd groups should remain as it was in the input.
The first node is considered odd, the second node even and so on ...
"""
#constructor for a Node of singly linked list
class ListNode:
def __init__(self, data):
self.data = data
self.next = None
def oddEvenList_Helper(head):
# Set odd start and current odd value
odd_head = head
odd_curr = odd_head
# Set even start and current even value
even_head = head.next
even_curr = even_head
while even_curr and even_curr.next:
# advance the odd track
odd_curr.next = even_curr.next
odd_curr = odd_curr.next
# advance the even track
even_curr.next = odd_curr.next
even_curr = even_curr.next
# add even head to the end of the odd track
odd_curr.next = even_head
# return the now-full odd track
return odd_head
#DO NOT CHANGE THIS FUNCTION
def oddEvenList(head):
return oddEvenList_Helper(head)
#test case
def main():
head = ListNode(1)
head.next = ListNode(2)
head.next.next = ListNode(3)
head.next.next.next = ListNode(4)
head.next.next.next.next = ListNode(5)
head = oddEvenList(head)
print ("Expected result: 1, 3, 5, 2, 4")
print ("Your result is {}, {}, {}, {}, {}".format(head.data, head.next.data, head.next.next.data, head.next.next.next.data, head.next.next.next.next.data))
if __name__ == "__main__":
main()
|
[
"lenabartell@gmail.com"
] |
lenabartell@gmail.com
|
f166cfc951ed9447469d351c36478a9aa96458ac
|
8d490df45de03ac7e22a1bbd8adb75acc4efca5b
|
/data_upload/model/isbcgc_cloudsql_mock_model.py
|
c9f4ca42dfa4e13460ec1a12cc0d4522aaf28573
|
[
"Apache-2.0"
] |
permissive
|
snamburi3/ISB-CGC-data-proc
|
0f0e6cadc766f54c363a653f192a0934a1dc1d4c
|
32de04f22c356bbceb27344822ad5e7df1f861f0
|
refs/heads/master
| 2020-06-29T12:21:53.327380
| 2016-11-21T23:31:29
| 2016-11-21T23:31:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 33,183
|
py
|
'''
a mock wrapper to google cloud sql.
Copyright 2015, Institute for Systems Biology.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
["limitations", "under", "the", "None", "License.", "None"],
'''
class ISBCGC_database_helper():
"""
this class mocks the cloud sql metadata upload
"""
self = None
def __init__(self, config, log):
pass
@classmethod
def initialize(cls, config, log):
pass
@classmethod
def select(cls, config, stmt, log, params = []):
if 'metadata_datadictionary' in stmt:
return [
["age_at_initial_pathologic_diagnosis", "metadata_clinical", "int"],
["anatomic_neoplasm_subdivision", "metadata_clinical", "controlled vocabulary text"],
["batch_number", "metadata_clinical", "controlled vocabulary text"],
["bcr", "metadata_clinical", "controlled vocabulary text"],
["clinical_M", "metadata_clinical", "controlled vocabulary text"],
["clinical_N", "metadata_clinical", "controlled vocabulary text"],
["clinical_stage", "metadata_clinical", "controlled vocabulary text"],
["clinical_T", "metadata_clinical", "controlled vocabulary text"],
["colorectal_cancer", "metadata_clinical", "controlled vocabulary text"],
["country", "metadata_clinical", "controlled vocabulary text"],
["days_to_birth", "metadata_clinical", "number"],
["days_to_death", "metadata_clinical", "number"],
["days_to_initial_pathologic_diagnosis", "metadata_clinical", "number"],
["days_to_last_followup", "metadata_clinical", "number"],
["days_to_submitted_specimen_dx", "metadata_clinical", "number"],
["Study", "metadata_clinical", "controlled vocabulary text"],
["ethnicity", "metadata_clinical", "controlled vocabulary text"],
["frozen_specimen_anatomic_site", "metadata_clinical", "controlled vocabulary text"],
["gender", "metadata_clinical", "controlled vocabulary text"],
["gleason_score_combined", "metadata_clinical", "number"],
["height", "metadata_clinical", "number"],
["histological_type", "metadata_clinical", "controlled vocabulary text"],
["history_of_colon_polyps", "metadata_clinical", "controlled vocabulary text"],
["history_of_neoadjuvant_treatment", "metadata_clinical", "controlled vocabulary text"],
["history_of_prior_malignancy", "metadata_clinical", "controlled vocabulary text"],
["hpv_calls", "metadata_clinical", "controlled vocabulary text"],
["hpv_status", "metadata_clinical", "controlled vocabulary text"],
["icd_10", "metadata_clinical", "controlled vocabulary text"],
["icd_o_3_histology", "metadata_clinical", "controlled vocabulary text"],
["icd_o_3_site", "metadata_clinical", "controlled vocabulary text"],
["lymphatic_invasion", "metadata_clinical", "controlled vocabulary text"],
["lymphnodes_examined", "metadata_clinical", "controlled vocabulary text"],
["lymphovascular_invasion_present", "metadata_clinical", "controlled vocabulary text"],
["menopause_status", "metadata_clinical", "controlled vocabulary text"],
["mononucleotide_and_dinucleotide_marker_panel_analysis_status", "metadata_clinical", "controlled vocabulary text"],
["mononucleotide_marker_panel_analysis_status", "metadata_clinical", "controlled vocabulary text"],
["neoplasm_histologic_grade", "metadata_clinical", "controlled vocabulary text"],
["new_tumor_event_after_initial_treatment", "metadata_clinical", "controlled vocabulary text"],
["number_of_lymphnodes_examined", "metadata_clinical", "number"],
["number_of_lymphnodes_positive_by_he", "metadata_clinical", "int"],
["number_pack_years_smoked", "metadata_clinical", "int"],
["ParticipantBarcode", "metadata_clinical", "text"],
["ParticipantUUID", "metadata_clinical", "UUID"],
["pathologic_M", "metadata_clinical", "controlled vocabulary text"],
["pathologic_N", "metadata_clinical", "controlled vocabulary text"],
["pathologic_stage", "metadata_clinical", "controlled vocabulary text"],
["pathologic_T", "metadata_clinical", "controlled vocabulary text"],
["person_neoplasm_cancer_status", "metadata_clinical", "controlled vocabulary text"],
["pregnancies", "metadata_clinical", "controlled vocabulary text"],
["primary_neoplasm_melanoma_dx", "metadata_clinical", "controlled vocabulary text"],
["primary_therapy_outcome_success", "metadata_clinical", "controlled vocabulary text"],
["prior_dx", "metadata_clinical", "controlled vocabulary text"],
["psa_value", "metadata_clinical", "number"],
["race", "metadata_clinical", "controlled vocabulary text"],
["residual_tumor", "metadata_clinical", "controlled vocabulary text"],
["TSSCode", "metadata_clinical", "controlled vocabulary text"],
["tobacco_smoking_history", "metadata_clinical", "controlled vocabulary text"],
["tumor_tissue_site", "metadata_clinical", "controlled vocabulary text"],
["TumorType", "metadata_clinical", "controlled vocabulary text"],
["vital_status", "metadata_clinical", "controlled vocabulary text"],
["weight", "metadata_clinical", "int"],
["weiss_venous_invasion", "metadata_clinical", "controlled vocabulary text"],
["year_of_initial_pathologic_diagnosis", "metadata_clinical", "int"],
["avg_percent_lymphocyte_infiltration", "metadata_biospecimen", "number"],
["avg_percent_monocyte_infiltration", "metadata_biospecimen", "number"],
["avg_percent_necrosis", "metadata_biospecimen", "number"],
["avg_percent_neutrophil_infiltration", "metadata_biospecimen", "number"],
["avg_percent_normal_cells", "metadata_biospecimen", "number"],
["avg_percent_stromal_cells", "metadata_biospecimen", "number"],
["avg_percent_tumor_cells", "metadata_biospecimen", "number"],
["avg_percent_tumor_nuclei", "metadata_biospecimen", "number"],
["batch_number", "metadata_biospecimen", "controlled vocabulary text"],
["bcr", "metadata_biospecimen", "controlled vocabulary text"],
["days_to_collection", "metadata_biospecimen", "number"],
["max_percent_lymphocyte_infiltration", "metadata_biospecimen", "number"],
["max_percent_monocyte_infiltration", "metadata_biospecimen", "number"],
["max_percent_necrosis", "metadata_biospecimen", "number"],
["max_percent_neutrophil_infiltration", "metadata_biospecimen", "number"],
["max_percent_normal_cells", "metadata_biospecimen", "number"],
["max_percent_stromal_cells", "metadata_biospecimen", "number"],
["max_percent_tumor_cells", "metadata_biospecimen", "number"],
["max_percent_tumor_nuclei", "metadata_biospecimen", "number"],
["min_percent_lymphocyte_infiltration", "metadata_biospecimen", "number"],
["min_percent_monocyte_infiltration", "metadata_biospecimen", "number"],
["min_percent_necrosis", "metadata_biospecimen", "number"],
["min_percent_neutrophil_infiltration", "metadata_biospecimen", "number"],
["min_percent_normal_cells", "metadata_biospecimen", "number"],
["min_percent_stromal_cells", "metadata_biospecimen", "number"],
["min_percent_tumor_cells", "metadata_biospecimen", "number"],
["min_percent_tumor_nuclei", "metadata_biospecimen", "number"],
["ParticipantBarcode", "metadata_biospecimen", "text"],
["Project", "metadata_biospecimen", "text"],
["SampleBarcode", "metadata_biospecimen", "text"],
["SampleUUID", "metadata_biospecimen", "UUID"],
["Study", "metadata_biospecimen", "controlled vocabulary text"],
["AliquotBarcode", "metadata_data", "text"],
["AliquotUUID", "metadata_data", "UUID"],
["analysis_id", "metadata_data", "UUID"],
["analyte_code", "metadata_data", "single letter code"],
["AnnotationCategory", "metadata_data", "controlled vocabulary text"],
["AnnotationClassification", "metadata_data", "controlled vocabulary text"],
["DataArchiveName", "metadata_data", "filename"],
["DataArchiveURL", "metadata_data", "hyperlink"],
["DataArchiveVersion", "metadata_data", "text"],
["DataCenterCode", "metadata_data", "controlled vocabulary text"],
["DataCenterName", "metadata_data", "text"],
["DataCenterType", "metadata_data", "controlled vocabulary text"],
["DataCGHubID", "metadata_data", "UUID"],
["DatafileMD5", "metadata_data", "32 digit hex number"],
["DatafileName", "metadata_data", "filename"],
["DatafileNameKey", "metadata_data", "GCS path"],
["DatafileUploaded", "metadata_data", "controlled vocabulary text"],
["DataLevel", "metadata_data", "controlled vocabulary text"],
["Datatype", "metadata_data", "controlled vocabulary text"],
["Disease Code", "metadata_data", "controlled vocabulary text"],
["GenomeReference", "metadata_data", "controlled vocabulary text"],
["IncludeForAnalysis", "metadata_data", "controlled vocabulary text"],
["last_modified", "metadata_data", "DATE"],
["library_strategy", "metadata_data", "controlled vocabulary text"],
["MAGETabArchiveName", "metadata_data", "filename"],
["MAGETabArchiveURL", "metadata_data", "hyperlink"],
["ParticipantBarcode", "metadata_data", "text"],
["Pipeline", "metadata_data", "controlled vocabulary text"],
["Platform", "metadata_data", "controlled vocabulary text"],
["platform_full_name", "metadata_data", "controlled vocabulary text"],
["Project", "metadata_data", "controlled vocabulary text"],
["reason_for_state", "metadata_data", "text"],
["Repository", "metadata_data", "controlled vocabulary text"],
["SampleBarcode", "metadata_data", "text"],
["SampleTypeCode", "metadata_data", "controlled vocabulary text"],
["SDRFFileName", "metadata_data", "filename"],
["SDRFFileNameKey", "metadata_data", "GCS path"],
["SecurityProtocol", "metadata_data", "controlled vocabulary text"],
["Species", "metadata_data", "controlled vocabulary text"],
["state", "metadata_data", "controlled vocabulary text"],
["Study", "metadata_data", "controlled vocabulary text"],
["wasDerivedFrom", "metadata_data", "text list"]
]
elif 'desc metadata_clinical' in stmt:
return [
["metadata_clinical_id", "int(11)", "NO", "PRI", "NULL", "auto_increment"],
["adenocarcinoma_invasion", "varchar(10)", "YES", "None", "NULL", "None"],
["age_at_initial_pathologic_diagnosis", "int(11)", "YES", "None", "NULL", "None"],
["anatomic_neoplasm_subdivision", "varchar(63)", "YES", "None", "NULL", "None"],
["batch_number", "int(11)", "YES", "None", "NULL", "None"],
["bcr", "varchar(63)", "YES", "None", "NULL", "None"],
["clinical_M", "varchar(12)", "YES", "None", "NULL", "None"],
["clinical_N", "varchar(12)", "YES", "None", "NULL", "None"],
["clinical_stage", "varchar(12)", "YES", "None", "NULL", "None"],
["clinical_T", "varchar(12)", "YES", "None", "NULL", "None"],
["colorectal_cancer", "varchar(10)", "YES", "None", "NULL", "None"],
["country", "varchar(63)", "YES", "None", "NULL", "None"],
["country_of_procurement", "varchar(63)", "YES", "None", "NULL", "None"],
["days_to_birth", "int(11)", "YES", "MUL", "NULL", "None"],
["days_to_death", "int(11)", "YES", "MUL", "NULL", "None"],
["days_to_initial_pathologic_diagnosis", "int(11)", "YES", "None", "NULL", "None"],
["days_to_last_followup", "int(11)", "YES", "None", "NULL", "None"],
["days_to_submitted_specimen_dx", "int(11)", "YES", "None", "NULL", "None"],
["Disease_Code", "varchar(6)", "YES", "MUL", "NULL", "None"],
["ethnicity", "varchar(20)", "YES", "MUL", "NULL", "None"],
["frozen_specimen_anatomic_site", "varchar(63)", "YES", "None", "NULL", "None"],
["gender", "varchar(15)", "YES", "MUL", "NULL", "None"],
["gleason_score_combined", "int(11)", "YES", "None", "NULL", "None"],
["height", "int(11)", "YES", "None", "NULL", "None"],
["histological_type", "varchar(63)", "YES", "MUL", "NULL", "None"],
["history_of_colon_polyps", "varchar(8)", "YES", "None", "NULL", "None"],
["history_of_neoadjuvant_treatment", "varchar(63)", "YES", "None", "NULL", "None"],
["history_of_prior_malignancy", "varchar(25)", "YES", "MUL", "NULL", "None"],
["hpv_calls", "varchar(20)", "YES", "None", "NULL", "None"],
["hpv_status", "varchar(20)", "YES", "None", "NULL", "None"],
["icd_10", "varchar(8)", "YES", "MUL", "NULL", "None"],
["icd_o_3_histology", "varchar(10)", "YES", "MUL", "NULL", "None"],
["icd_o_3_site", "varchar(8)", "YES", "MUL", "NULL", "None"],
["lymphatic_invasion", "varchar(8)", "YES", "MUL", "NULL", "None"],
["lymphnodes_examined", "varchar(8)", "YES", "None", "NULL", "None"],
["lymphovascular_invasion_present", "varchar(63)", "YES", "None", "NULL", "None"],
["menopause_status", "varchar(30)", "YES", "None", "NULL", "None"],
["mononucleotide_and_dinucleotide_marker_panel_analysis_status", "varchar(20)", "YES", "MUL", "NULL", "None"],
["mononucleotide_marker_panel_analysis_status", "varchar(20)", "YES", "MUL", "NULL", "None"],
["neoplasm_histologic_grade", "varchar(15)", "YES", "MUL", "NULL", "None"],
["new_tumor_event_after_initial_treatment", "varchar(8)", "YES", "MUL", "NULL", "None"],
["number_of_lymphnodes_examined", "int(11)", "YES", "None", "NULL", "None"],
["number_of_lymphnodes_positive_by_he", "int(11)", "YES", "MUL", "NULL", "None"],
["number_pack_years_smoked", "int(11)", "YES", "None", "NULL", "None"],
["ParticipantBarcode", "varchar(12)", "NO", "None", "NULL", "None"],
["ParticipantUUID", "varchar(36)", "NO", "None", "NULL", "None"],
["pathologic_M", "varchar(12)", "YES", "MUL", "NULL", "None"],
["pathologic_N", "varchar(12)", "YES", "MUL", "NULL", "None"],
["pathologic_stage", "varchar(10)", "YES", "MUL", "NULL", "None"],
["pathologic_T", "varchar(12)", "YES", "MUL", "NULL", "None"],
["person_neoplasm_cancer_status", "varchar(15)", "YES", "MUL", "NULL", "None"],
["pregnancies", "varchar(35)", "YES", "MUL", "NULL", "None"],
["primary_neoplasm_melanoma_dx", "varchar(10)", "YES", "MUL", "NULL", "None"],
["primary_therapy_outcome_success", "varchar(35)", "YES", "None", "NULL", "None"],
["prior_dx", "varchar(50)", "YES", "MUL", "NULL", "None"],
["psa_value", "float", "YES", "None", "NULL", "None"],
["race", "varchar(30)", "YES", "MUL", "NULL", "None"],
["residual_tumor", "varchar(5)", "YES", "None", "NULL", "None"],
["tobacco_smoking_history", "varchar(30)", "YES", "MUL", "NULL", "None"],
["TSSCode", "varchar(2)", "YES", "MUL", "NULL", "None"],
["tumor_tissue_site", "varchar(20)", "YES", "MUL", "NULL", "None"],
["tumor_type", "varchar(4)", "YES", "None", "NULL", "None"],
["venous_invasion", "varchar(63)", "YES", "None", "NULL", "None"],
["vital_status", "varchar(63)", "YES", "MUL", "NULL", "None"],
["weight", "varchar(63)", "YES", "None", "NULL", "None"],
["year_of_initial_pathologic_diagnosis", "varchar(63)", "YES", "MUL", "NULL", "None"]
]
elif "desc metadata_biospecimen" in stmt:
return [
["metadata_biospecimen_id", "int(11)", "NO", "None", "PRI", "auto_increment"],
["ParticipantBarcode", "varchar(12)", "NO", "None", "NULL", "None"],
["SampleBarcode", "varchar(16)", "NO", "None", "NULL", "None"],
["SampleUUID", "varchar(36)", "YES", "None", "NULL", "None"],
["batch_number", "int(11)", "YES", "None", "NULL", "None"],
["bcr", "varchar(63)", "YES", "None", "MUL", "None"],
["days_to_collection", "int(11)", "YES", "None", "NULL", "None"],
["days_to_sample_procurement", "int(11)", "YES", "None", "NULL", "None"],
["Disease_Code", "varchar(20)", "YES", "None", "MUL", "None"],
["Study", "varchar(20)", "YES", "None", "MUL", "None"],
["is_ffpe", "varchar(4)", "YES", "None", "NULL", "None"],
["preservation_method", "varchar(20)", "YES", "None", "NULL", "None"],
["Project", "varchar(20)", "NO", "None", "NULL", "None"],
["tissue_type", "varchar(15)", "YES", "None", "MUL", "None"],
["tumor_pathology", "varchar(50)", "YES", "None", "MUL", "None"],
["avg_percent_lymphocyte_infiltration", "float", "YES", "None", "NULL", "None"],
["avg_percent_monocyte_infiltration", "float", "YES", "None", "NULL", "None"],
["avg_percent_necrosis", "float", "YES", "None", "NULL", "None"],
["avg_percent_neutrophil_infiltration", "float", "YES", "None", "NULL", "None"],
["avg_percent_normal_cells", "float", "YES", "None", "NULL", "None"],
["avg_percent_stromal_cells", "float", "YES", "None", "NULL", "None"],
["avg_percent_tumor_cells", "float", "YES", "None", "NULL", "None"],
["avg_percent_tumor_nuclei", "float", "YES", "None", "NULL", "None"],
["max_percent_lymphocyte_infiltration", "float", "YES", "None", "NULL", "None"],
["max_percent_monocyte_infiltration", "float", "YES", "None", "NULL", "None"],
["max_percent_necrosis", "float", "YES", "None", "NULL", "None"],
["max_percent_neutrophil_infiltration", "float", "YES", "None", "NULL", "None"],
["max_percent_normal_cells", "float", "YES", "None", "NULL", "None"],
["max_percent_stromal_cells", "float", "YES", "None", "NULL", "None"],
["max_percent_tumor_cells", "float", "YES", "None", "NULL", "None"],
["max_percent_tumor_nuclei", "float", "YES", "None", "NULL", "None"],
["min_percent_lymphocyte_infiltration", "float", "YES", "None", "NULL", "None"],
["min_percent_monocyte_infiltration", "float", "YES", "None", "NULL", "None"],
["min_percent_necrosis", "float", "YES", "None", "NULL", "None"],
["min_percent_neutrophil_infiltration", "float", "YES", "None", "NULL", "None"],
["min_percent_normal_cells", "float", "YES", "None", "NULL", "None"],
["min_percent_stromal_cells", "float", "YES", "None", "NULL", "None"],
["min_percent_tumor_cells", "float", "YES", "None", "NULL", "None"],
["min_percent_tumor_nuclei", "float", "YES", "None", "NULL", "None"],
]
elif "desc metadata_data" in stmt:
return [
["metadata_data_id", "int(11)", "NO", "None", "PRI", "auto_increment"],
["ParticipantBarcode", "varchar(12)", "NO", "None", "MUL", "None"],
["SampleBarcode", "varchar(16)", "NO", "None", "MUL", "None"],
["AliquotBarcode", "varchar(28)", "NO", "None", "NULL", "None"],
["AliquotUUID", "varchar(36)", "YES", "None", "NULL", "None"],
["AnnotationCategory", "varchar(100)", "YES", "None", "NULL", "None"],
["AnnotationClassification", "varchar(100)", "YES", "None", "NULL", "None"],
["DataArchiveName", "varchar(100)", "YES", "None", "NULL", "None"],
["DataArchiveURL", "varchar(300)", "YES", "None", "NULL", "None"],
["DataArchiveVersion", "varchar(20)", "YES", "None", "NULL", "None"],
["DataCenterCode", "varchar(2)", "YES", "None", "MUL", "None"],
["DataCenterName", "varchar(20)", "YES", "None", "MUL", "None"],
["DataCenterType", "varchar(4)", "YES", "None", "MUL", "None"],
["DataCGHubID", "varchar(36)", "YES", "None", "NULL", "None"],
["DatafileMD5", "varchar(32)", "YES", "None", "NULL", "None"],
["DatafileName", "varchar(100)", "NO", "None", "MUL", "None"],
["DatafileNameKey", "varchar(200)", "NO", "None", "NULL", "None"],
["DatafileUploaded", "varchar(5)", "NO", "None", "MUL", "None"],
["DataLevel", "varchar(7)", "NO", "None", "NULL", "None"],
["Datatype", "varchar(30)", "YES", "None", "MUL", "None"],
["Disease_Code", "varchar(6)", "YES", "None", "NULL", "None"],
["GenomeReference", "varchar(32)", "YES", "None", "NULL", "None"],
["IncludeForAnalysis", "varchar(3)", "YES", "None", "NULL", "None"],
["MAGETabArchiveName", "varchar(250)", "YES", "None", "NULL", "None"],
["MAGETabArchiveURL", "varchar(240)", "YES", "None", "NULL", "None"],
["Pipeline", "varchar(45)", "NO", "None", "MUL", "None"],
["Platform", "varchar(40)", "NO", "None", "MUL", "None"],
["Project", "varchar(30)", "NO", "None", "NULL", "None"],
["Repository", "varchar(15)", "YES", "None", "NULL", "None"],
["SampleTypeCode", "varchar(2)", "YES", "None", "MUL", "None"],
["SDRFFileName", "varchar(75)", "YES", "None", "MUL", "None"],
["SDRFFileNameKey", "varchar(200)", "YES", "None", "NULL", "None"],
["SecurityProtocol", "varchar(30)", "NO", "None", "NULL", "None"],
["Species", "varchar(25)", "NO", "None", "NULL", "None"],
["Study", "varchar(20)", "NO", "None", "MUL", "None"],
["wasDerivedFrom", "varchar(150)", "YES", "None", "NULL", "None"],
["library_strategy", "varchar(10)", "YES", "None", "NULL", "None"],
["state", "varchar(12)", "YES", "None", "NULL", "None"],
["reason_for_state", "varchar(200)", "YES", "None", "NULL", "None"],
["analysis_id", "varchar(36)", "YES", "None", "NULL", "None"],
["analyte_code", "varchar(2)", "YES", "None", "MUL", "None"],
["last_modified", "varchar(10)", "YES", "None", "NULL", "None"],
["platform_full_name", "varchar(30)", "YES", "None", "NULL", "None"],
]
return []
@classmethod
def insert(cls, config, rows, table, log):
log.info('\t\tstarting mock insert for %s' % (table))
field_names = cls.field_names(table)
cls.column_insert(config, rows, table, field_names, log)
log.info('\t\tcompleted mock insert')
@classmethod
def column_insert(cls, config, rows, table, field_names, log):
log.info('\t\t\tinsert into %s.%s\n\t(%s)\nvalues\n\t(%s)' % (config['cloudsql']['db'], table, ', '.join(field_names), ', '.join(['%s']*len(field_names))))
# now save in batches
batch = 5
count = 0
inserts = []
for start in range(0, len(rows), batch):
for index in range(batch):
if start + index == len(rows):
break
inserts += [rows[start + index]]
log.info('\t\t\tmock insert rows %s to %s' % (start, start + index))
if 4 >= count:
for row in range(batch):
log.info('\t\t\t%s' % (','.join(str(insert) for insert in inserts[row])))
else:
break
count += 1
inserts = []
for start in range(0, len(rows), len(rows)/10):
for index in range(len(rows)/10):
if start + index == len(rows):
break
log.info('\t\t\tmock insert rows %s to %s' % (start, start + index))
@classmethod
def field_names(cls, table):
if 'metadata_clinical' == table:
retval = ['adenocarcinoma_invasion','age_at_initial_pathologic_diagnosis','anatomic_neoplasm_subdivision','batch_number','bcr',
'clinical_M','clinical_N','clinical_stage','clinical_T','colorectal_cancer','country','days_to_birth',
'days_to_death','days_to_initial_pathologic_diagnosis','days_to_last_followup','days_to_last_known_alive', 'days_to_submitted_specimen_dx',
'ethnicity','frozen_specimen_anatomic_site','gender','gleason_score_combined','height','histological_type','history_of_colon_polyps',
'history_of_neoadjuvant_treatment','history_of_prior_malignancy','hpv_calls','hpv_status','icd_10','icd_o_3_histology','icd_o_3_site',
'lymphatic_invasion','lymphnodes_examined','lymphovascular_invasion_present','menopause_status',
'mononucleotide_and_dinucleotide_marker_panel_analysis_status','mononucleotide_marker_panel_analysis_status','neoplasm_histologic_grade',
'new_tumor_event_after_initial_treatment','number_of_lymphnodes_examined','number_of_lymphnodes_positive_by_he',
'number_pack_years_smoked','ParticipantBarcode','ParticipantUUID','pathologic_M','pathologic_N','pathologic_stage','pathologic_T',
'person_neoplasm_cancer_status','pregnancies','primary_neoplasm_melanoma_dx','primary_therapy_outcome_success','prior_dx','psa_value',
'race','residual_tumor','tobacco_smoking_history','TSSCode','tumor_tissue_site','tumor_type',
'venous_invasion','vital_status','weight','year_of_initial_pathologic_diagnosis']
elif 'metadata_biospecimen' == table:
retval = ['ParticipantBarcode','SampleBarcode','SampleUUID','batch_number','bcr','days_to_collection','days_to_sample_procurement',
'SampleTypeCode', 'SampleType', 'SampleTypeLetterCode',
'Study','is_ffpe','preservation_method','Project','tissue_type','tumor_pathology','avg_percent_lymphocyte_infiltration',
'avg_percent_monocyte_infiltration','avg_percent_necrosis','avg_percent_neutrophil_infiltration','avg_percent_normal_cells',
'avg_percent_stromal_cells','avg_percent_tumor_cells','avg_percent_tumor_nuclei','max_percent_lymphocyte_infiltration',
'max_percent_monocyte_infiltration','max_percent_necrosis','max_percent_neutrophil_infiltration','max_percent_normal_cells',
'max_percent_stromal_cells','max_percent_tumor_cells','max_percent_tumor_nuclei','min_percent_lymphocyte_infiltration',
'min_percent_monocyte_infiltration','min_percent_necrosis','min_percent_neutrophil_infiltration','min_percent_normal_cells',
'min_percent_stromal_cells','min_percent_tumor_cells','min_percent_tumor_nuclei']
elif 'metadata_data' == table:
retval = ['ParticipantBarcode', 'SampleBarcode', 'AliquotBarcode', 'AliquotUUID', 'AnnotationCategory', 'AnnotationClassification',
'DataArchiveName', 'DataArchiveURL', 'DataArchiveVersion', 'DataCenterCode', 'DataCenterName', 'DataCenterType', 'DataCGHubID',
'DatafileMD5', 'DatafileName', 'DatafileNameKey', 'DatafileUploaded', 'DataLevel', 'Datatype', 'GenomeReference',
'IncludeForAnalysis', 'MAGETabArchiveName', 'MAGETabArchiveURL', 'Pipeline', 'Platform', 'Project', 'Repository', 'SampleType',
'SampleTypeCode', 'SDRFFileName', 'SDRFFileNameKey', 'SecurityProtocol', 'Species', 'Study', 'wasDerivedFrom', 'library_strategy',
'state', 'reason_for_state', 'analysis_id', 'analyte_code', 'last_modified', 'platform_full_name']
elif 'metadata_samples' == table:
retval = ['adenocarcinoma_invasion', 'age_at_initial_pathologic_diagnosis', 'anatomic_neoplasm_subdivision',
'avg_percent_lymphocyte_infiltration', 'avg_percent_monocyte_infiltration', 'avg_percent_necrosis',
'avg_percent_neutrophil_infiltration', 'avg_percent_normal_cells', 'avg_percent_stromal_cells',
'avg_percent_tumor_cells', 'avg_percent_tumor_nuclei', 'batch_number', 'bcr', 'clinical_M', 'clinical_N', 'clinical_stage',
'clinical_T', 'colorectal_cancer', 'country', 'country_of_procurement', 'days_to_birth', 'days_to_collection',
'days_to_death', 'days_to_initial_pathologic_diagnosis', 'days_to_last_followup', 'days_to_submitted_specimen_dx',
'Disease_Code', 'ethnicity', 'frozen_specimen_anatomic_site', 'gender', 'gleason_score_combined', 'height',
'histological_type', 'history_of_colon_polyps', 'history_of_neoadjuvant_treatment', 'history_of_prior_malignancy',
'hpv_calls', 'hpv_status', 'icd_10', 'icd_o_3_histology', 'icd_o_3_site', 'lymph_node_examined_count', 'lymphatic_invasion',
'lymphnodes_examined', 'lymphovascular_invasion_present', 'max_percent_lymphocyte_infiltration',
'max_percent_monocyte_infiltration', 'max_percent_necrosis', 'max_percent_neutrophil_infiltration',
'max_percent_normal_cells', 'max_percent_stromal_cells', 'max_percent_tumor_cells', 'max_percent_tumor_nuclei',
'menopause_status', 'min_percent_lymphocyte_infiltration', 'min_percent_monocyte_infiltration', 'min_percent_necrosis',
'min_percent_neutrophil_infiltration', 'min_percent_normal_cells', 'min_percent_stromal_cells', 'min_percent_tumor_cells',
'min_percent_tumor_nuclei', 'mononucleotide_and_dinucleotide_marker_panel_analysis_status',
'mononucleotide_marker_panel_analysis_status', 'neoplasm_histologic_grade', 'new_tumor_event_after_initial_treatment',
'number_of_lymphnodes_examined', 'number_of_lymphnodes_positive_by_he', 'number_pack_years_smoked', 'ParticipantBarcode',
'pathologic_M', 'pathologic_N', 'pathologic_T', 'pathologic_stage', 'person_neoplasm_cancer_status', 'pregnancies',
'preservation_method', 'primary_neoplasm_melanoma_dx', 'primary_therapy_outcome_success', 'prior_dx', 'Project',
'psa_value', 'race', 'residual_tumor', 'SampleBarcode', 'Study', 'tissue_type', 'tobacco_smoking_history',
'total_number_of_pregnancies', 'tumor_tissue_site', 'tumor_pathology', 'tumor_type', 'venous_invasion', 'vital_status',
'weight', 'year_of_initial_pathologic_diagnosis', 'SampleTypeCode', 'has_Illumina_DNASeq', 'has_BCGSC_HiSeq_RNASeq',
'has_UNC_HiSeq_RNASeq', 'has_BCGSC_GA_RNASeq', 'has_UNC_GA_RNASeq', 'has_HiSeq_miRnaSeq', 'has_GA_miRNASeq', 'has_RPPA',
'has_SNP6', 'has_27k', 'has_450k'
]
return retval
|
[
"mmiller@systemsbiology.org"
] |
mmiller@systemsbiology.org
|
d92eebf8f6dc3ad2e9788be0a4dcddf2163da147
|
f41309da5e0d26b24d974a009fa309a02fcaa20c
|
/aws_s3_policies/aws_s3_bucket_logging.py
|
1bd470dc1f5c59a7f31c6700e2b878afece7d8bb
|
[
"Apache-2.0"
] |
permissive
|
georgeSkoumas/panther-analysis
|
2e1e87f83c6533cb6d62ecb62e3f61b2ff4b5ed4
|
30b21c270504bf7c84f99207c9c6c2f6110843ae
|
refs/heads/master
| 2022-09-14T13:22:31.786275
| 2020-05-26T16:18:58
| 2020-05-26T16:18:58
| 267,569,230
| 1
| 0
|
Apache-2.0
| 2020-05-28T11:15:05
| 2020-05-28T11:15:04
| null |
UTF-8
|
Python
| false
| false
| 71
|
py
|
def policy(resource):
return resource['LoggingPolicy'] is not None
|
[
"noreply@github.com"
] |
georgeSkoumas.noreply@github.com
|
05b2e1238630e79e414eea73dae7aed0432327db
|
69b37d49e010efb0191d96cda1a0361473077cb8
|
/app/authentication/forms.py
|
37c7d354f5ff2c87b94730da99305d8b197420bb
|
[
"MIT"
] |
permissive
|
abhishek70/django-tdd-crm
|
4f06276c495e055a87102b922d6e3f50ca7c9998
|
e2a0ee03cbea5de237ce7f247a89c2bdd413effc
|
refs/heads/main
| 2023-03-23T23:03:44.995761
| 2021-03-17T07:06:53
| 2021-03-17T07:06:53
| 343,277,256
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 382
|
py
|
from django.forms import forms
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import UserCreationForm, UsernameField
User = get_user_model()
class CustomSignupForm(UserCreationForm):
class Meta:
model = User
fields = ("username",)
field_classes = {'username': UsernameField}
widgets = {
''
}
|
[
"abhishek.desai70@gmail.com"
] |
abhishek.desai70@gmail.com
|
f6fa69130099a5cb0936d72a06768ced4d37f9f2
|
b5cc6d7b5f7ccea36fce4eab961979404414f8b0
|
/spectral/sandbox/mass_matrix_gl.py
|
29a48a0b594287760493555bca03e5b0f75737f4
|
[] |
no_license
|
MiroK/cutFEM-beam
|
adf0c925dbe64b370dab48e82335617450675f5d
|
2fb3686804e836d4031fbf231a36a0f9ac8a3012
|
refs/heads/master
| 2021-01-21T23:54:32.868307
| 2015-02-14T13:14:59
| 2015-02-14T13:14:59
| 25,625,143
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,699
|
py
|
import sys
sys.path.insert(0, '../')
from points import gauss_legendre_points as gl_points
from functions import lagrange_basis as l_basis
from quadrature import GLQuadrature
from sympy import lambdify, symbols, integrate
import numpy.linalg as la
import numpy as np
x = symbols('x')
# If I N points have points
N = 10
points = gl_points([N])
assert len(points) == N
# This allows me to create N Lagrage polynomials whose degree is N-1
basis = l_basis([points])
basis = map(lambda f: lambdify(x, f), basis)
assert len(basis) == N
# If I make the mass matrix which combines these polynomials then the degree
# of the integrand is 2*(N-1) = 2*N - 2 < 2*N - 1 which means that the inner
# product over [-1, 1] is computed exactly by N point quadrature
quad_N = GLQuadrature(N)
quad_2N = GLQuadrature(2*N)
ip_N = quad_N.eval(lambda X: basis[0](X)*basis[1](X), domain=[[-1, 1]])
ip_2N = quad_2N.eval(lambda X: basis[0](X)*basis[1](X), domain=[[-1, 1]])
assert abs(ip_N - ip_2N) < 1E-15
# If I then make the mass matrix using same quadrature as was used to create
# the polynials then the mass matrix will be diagonal
M = np.zeros((N, N))
for i, bi in enumerate(basis):
for j, bj in enumerate(basis[i:], i):
M[i, j] = quad_N.eval(lambda X: bi(X)*bj(X), domain=[[-1, 1]])
M -= np.diag(M.diagonal())
assert la.norm(M)/N**2 < 1E-15
# Moreover this result is exact
quad = GLQuadrature(N)
basis = l_basis([points])
for i, bi in enumerate(basis):
for j, bj in enumerate(basis[i:], i):
M[i, j] = integrate(bi*bj, (x, -1, 1))
M[i, j] -= quad.eval(lambda X: lambdify(x, bi)(X)*lambdify(x, bj)(X),
domain=[[-1, 1]])
assert abs(M[i, j]) < 1E-11
|
[
"miroslav.kuchta@gmail.com"
] |
miroslav.kuchta@gmail.com
|
5c87d7e598faeed6b443d8f383eb36aac452a5d0
|
1aeb5a1fb033d2c3379256121b42e1b8b9a5264f
|
/hackerrank/python/Text Alignment.py
|
444f1fbd6483b01698a6e7da0b021a2bf9838b42
|
[] |
no_license
|
mf4lsb/Algorithms
|
9d1622f563209e86c76294b186adca1043d33b61
|
33ae9f7bf03935a78fbefc3e1670c1af43aa7aac
|
refs/heads/master
| 2021-01-01T09:57:51.746248
| 2017-05-19T06:33:24
| 2017-05-19T06:33:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 743
|
py
|
# https://www.hackerrank.com/challenges/text-alignment/submissions/code/14417013
thickness = int(input()) #This must be an odd number
c = 'H'
#Top Cone
for i in range(thickness):
print((c*i).rjust(thickness-1)+c+(c*i).ljust(thickness-1))
#Top Pillars
for i in range(thickness+1):
print((c*thickness).center(thickness*2)+(c*thickness).center(thickness*6))
#Middle Belt
for i in range((thickness+1)//2):
print((c*thickness*5).center(thickness*6))
#Bottom Pillars
for i in range(thickness+1):
print((c*thickness).center(thickness*2)+(c*thickness).center(thickness*6))
#Bottom Cone
for i in range(thickness):
print(((c*(thickness-i-1)).rjust(thickness)+c+(c*(thickness-i-1)).ljust(thickness)).rjust(thickness*6))
|
[
"johnjullies@users.noreply.github.com"
] |
johnjullies@users.noreply.github.com
|
b9d5ed86f5b5062fe18fffada8e72ca3ffddae10
|
d366d192f668ecaa0b70b454d7a9866c2888d600
|
/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
|
58ba244ade0f924d992254e75705da628dd0c6dc
|
[
"Apache-2.0"
] |
permissive
|
matrogersmtl/transformers
|
2bb853dd11907280f667661909ed31825d0b104b
|
7e71eb2ef77b0200cd079d0d0383342ed1da2df5
|
refs/heads/master
| 2023-06-22T10:30:14.688699
| 2023-06-19T14:23:29
| 2023-06-19T14:23:29
| 281,486,776
| 0
| 0
|
Apache-2.0
| 2020-07-21T19:32:17
| 2020-07-21T19:32:16
| null |
UTF-8
|
Python
| false
| false
| 85,992
|
py
|
# coding=utf-8
# Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch UniSpeechSat model."""
import math
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss
from ...activations import ACT2FN
from ...deepspeed import is_deepspeed_zero3_enabled
from ...modeling_outputs import (
BaseModelOutput,
CausalLMOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
Wav2Vec2BaseModelOutput,
XVectorOutput,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_unispeech_sat import UniSpeechSatConfig
logger = logging.get_logger(__name__)
_HIDDEN_STATES_START_POSITION = 2
# General docstring
_CONFIG_FOR_DOC = "UniSpeechSatConfig"
# Base docstring
_CHECKPOINT_FOR_DOC = "microsoft/unispeech-sat-base-100h-libri-ft"
_EXPECTED_OUTPUT_SHAPE = [1, 292, 768]
# CTC docstring
_CTC_EXPECTED_OUTPUT = "'MISTER QUILDER IS THE APOSTLE OF THE MIDDLE CLASSES AND WE ARE GLAD TO WELCOME HIS GOSPEL'"
_CTC_EXPECTED_LOSS = 39.88
# Frame class docstring
_FRAME_CLASS_CHECKPOINT = "microsoft/unispeech-sat-base-plus-sd"
_FRAME_EXPECTED_OUTPUT = [0, 0]
# Speaker Verification docstring
_XVECTOR_CHECKPOINT = "microsoft/unispeech-sat-base-plus-sv"
_XVECTOR_EXPECTED_OUTPUT = 0.97
UNISPEECH_SAT_PRETRAINED_MODEL_ARCHIVE_LIST = [
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
]
@dataclass
class UniSpeechSatForPreTrainingOutput(ModelOutput):
"""
Output type of [`UniSpeechSatForPreTrainingOutput`], with potential hidden states and attentions.
Args:
loss (*optional*, returned when model is in train mode, `torch.FloatTensor` of shape `(1,)`):
Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the [official
paper](https://arxiv.org/pdf/2006.11477.pdf) . (classification) loss.
projected_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):
Hidden-states of the model projected to *config.proj_codevector_dim* that can be used to predict the masked
projected quantized states.
projected_quantized_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):
Quantized extracted feature vectors projected to *config.proj_codevector_dim* representing the positive
target vectors for contrastive loss.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
projected_states: torch.FloatTensor = None
projected_quantized_states: torch.FloatTensor = None
codevector_perplexity: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
# Copied from transformers.models.wav2vec2.modeling_wav2vec2._compute_mask_indices
def _compute_mask_indices(
shape: Tuple[int, int],
mask_prob: float,
mask_length: int,
attention_mask: Optional[torch.LongTensor] = None,
min_masks: int = 0,
) -> np.ndarray:
"""
Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on
CPU as part of the preprocessing during training.
Args:
shape: The shape for which to compute masks. This should be of a tuple of size 2 where
the first element is the batch size and the second element is the length of the axis to span.
mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of
independently generated mask spans of length `mask_length` is computed by
`mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the
actual percentage will be smaller.
mask_length: size of the mask
min_masks: minimum number of masked spans
attention_mask: A (right-padded) attention mask which independently shortens the feature axis of
each batch dimension.
"""
batch_size, sequence_length = shape
if mask_length < 1:
raise ValueError("`mask_length` has to be bigger than 0.")
if mask_length > sequence_length:
raise ValueError(
f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}"
f" and `sequence_length`: {sequence_length}`"
)
# epsilon is used for probabilistic rounding
epsilon = np.random.rand(1).item()
def compute_num_masked_span(input_length):
"""Given input length, compute how many spans should be masked"""
num_masked_span = int(mask_prob * input_length / mask_length + epsilon)
num_masked_span = max(num_masked_span, min_masks)
# make sure num masked span <= sequence_length
if num_masked_span * mask_length > sequence_length:
num_masked_span = sequence_length // mask_length
# make sure num_masked span is also <= input_length - (mask_length - 1)
if input_length - (mask_length - 1) < num_masked_span:
num_masked_span = max(input_length - (mask_length - 1), 0)
return num_masked_span
# compute number of masked spans in batch
input_lengths = (
attention_mask.sum(-1).detach().tolist()
if attention_mask is not None
else [sequence_length for _ in range(batch_size)]
)
# SpecAugment mask to fill
spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool)
spec_aug_mask_idxs = []
max_num_masked_span = compute_num_masked_span(sequence_length)
if max_num_masked_span == 0:
return spec_aug_mask
for input_length in input_lengths:
# compute num of masked spans for this input
num_masked_span = compute_num_masked_span(input_length)
# get random indices to mask
spec_aug_mask_idx = np.random.choice(
np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False
)
# pick first sampled index that will serve as a dummy index to pad vector
# to ensure same dimension for all batches due to probabilistic rounding
# Picking first sample just pads those vectors twice.
if len(spec_aug_mask_idx) == 0:
# this case can only happen if `input_length` is strictly smaller then
# `sequence_length` in which case the last token has to be a padding
# token which we can use as a dummy mask id
dummy_mask_idx = sequence_length - 1
else:
dummy_mask_idx = spec_aug_mask_idx[0]
spec_aug_mask_idx = np.concatenate(
[spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx]
)
spec_aug_mask_idxs.append(spec_aug_mask_idx)
spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)
# expand masked indices to masked spans
spec_aug_mask_idxs = np.broadcast_to(
spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length)
)
spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length)
# add offset to the starting indexes so that indexes now create a span
offsets = np.arange(mask_length)[None, None, :]
offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(
batch_size, max_num_masked_span * mask_length
)
spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
# ensure that we cannot have indices larger than sequence_length
if spec_aug_mask_idxs.max() > sequence_length - 1:
spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1
# scatter indices to mask
np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)
return spec_aug_mask
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2NoLayerNormConvLayer with Wav2Vec2->UniSpeechSat
class UniSpeechSatNoLayerNormConvLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = nn.Conv1d(
self.in_conv_dim,
self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
stride=config.conv_stride[layer_id],
bias=config.conv_bias,
)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2LayerNormConvLayer with Wav2Vec2->UniSpeechSat
class UniSpeechSatLayerNormConvLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = nn.Conv1d(
self.in_conv_dim,
self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
stride=config.conv_stride[layer_id],
bias=config.conv_bias,
)
self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = hidden_states.transpose(-2, -1)
hidden_states = self.layer_norm(hidden_states)
hidden_states = hidden_states.transpose(-2, -1)
hidden_states = self.activation(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2GroupNormConvLayer with Wav2Vec2->UniSpeechSat
class UniSpeechSatGroupNormConvLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = nn.Conv1d(
self.in_conv_dim,
self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
stride=config.conv_stride[layer_id],
bias=config.conv_bias,
)
self.activation = ACT2FN[config.feat_extract_activation]
self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True)
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2PositionalConvEmbedding with Wav2Vec2->UniSpeechSat
class UniSpeechSatPositionalConvEmbedding(nn.Module):
def __init__(self, config):
super().__init__()
self.conv = nn.Conv1d(
config.hidden_size,
config.hidden_size,
kernel_size=config.num_conv_pos_embeddings,
padding=config.num_conv_pos_embeddings // 2,
groups=config.num_conv_pos_embedding_groups,
)
weight_norm = nn.utils.weight_norm
if hasattr(nn.utils.parametrizations, "weight_norm"):
weight_norm = nn.utils.parametrizations.weight_norm
if is_deepspeed_zero3_enabled():
import deepspeed
with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0):
self.conv = weight_norm(self.conv, name="weight", dim=2)
deepspeed.zero.register_external_parameter(self, self.conv.weight_v)
deepspeed.zero.register_external_parameter(self, self.conv.weight_g)
else:
self.conv = weight_norm(self.conv, name="weight", dim=2)
self.padding = UniSpeechSatSamePadLayer(config.num_conv_pos_embeddings)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = hidden_states.transpose(1, 2)
hidden_states = self.conv(hidden_states)
hidden_states = self.padding(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = hidden_states.transpose(1, 2)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2SamePadLayer with Wav2Vec2->UniSpeechSat
class UniSpeechSatSamePadLayer(nn.Module):
def __init__(self, num_conv_pos_embeddings):
super().__init__()
self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0
def forward(self, hidden_states):
if self.num_pad_remove > 0:
hidden_states = hidden_states[:, :, : -self.num_pad_remove]
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureEncoder with Wav2Vec2->UniSpeechSat
class UniSpeechSatFeatureEncoder(nn.Module):
"""Construct the features from raw audio waveform"""
def __init__(self, config):
super().__init__()
if config.feat_extract_norm == "group":
conv_layers = [UniSpeechSatGroupNormConvLayer(config, layer_id=0)] + [
UniSpeechSatNoLayerNormConvLayer(config, layer_id=i + 1)
for i in range(config.num_feat_extract_layers - 1)
]
elif config.feat_extract_norm == "layer":
conv_layers = [
UniSpeechSatLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)
]
else:
raise ValueError(
f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']"
)
self.conv_layers = nn.ModuleList(conv_layers)
self.gradient_checkpointing = False
self._requires_grad = True
def _freeze_parameters(self):
for param in self.parameters():
param.requires_grad = False
self._requires_grad = False
def forward(self, input_values):
hidden_states = input_values[:, None]
# make sure hidden_states require grad for gradient_checkpointing
if self._requires_grad and self.training:
hidden_states.requires_grad = True
for conv_layer in self.conv_layers:
if self._requires_grad and self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(conv_layer),
hidden_states,
)
else:
hidden_states = conv_layer(hidden_states)
return hidden_states
class UniSpeechSatFeatureExtractor(UniSpeechSatFeatureEncoder):
def __init__(self, config):
super().__init__(config)
warnings.warn(
f"The class `{self.__class__.__name__}` has been depreciated "
"and will be removed in Transformers v5. "
f"Use `{self.__class__.__bases__[0].__name__}` instead.",
FutureWarning,
)
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureProjection with Wav2Vec2->UniSpeechSat
class UniSpeechSatFeatureProjection(nn.Module):
def __init__(self, config):
super().__init__()
self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps)
self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size)
self.dropout = nn.Dropout(config.feat_proj_dropout)
def forward(self, hidden_states):
# non-projected hidden states are needed for quantization
norm_hidden_states = self.layer_norm(hidden_states)
hidden_states = self.projection(norm_hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states, norm_hidden_states
# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->UniSpeechSat
class UniSpeechSatAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, _ = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
# `past_key_value[0].shape[2] == key_value_states.shape[1]`
# is checking that the `sequence_length` of the `past_key_value` is the same as
# the provided `key_value_states` to support prefix tuning
if (
is_cross_attention
and past_key_value is not None
and past_key_value[0].shape[2] == key_value_states.shape[1]
):
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.reshape(*proj_shape)
value_states = value_states.reshape(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
if layer_head_mask.size() != (self.num_heads,):
raise ValueError(
f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
f" {layer_head_mask.size()}"
)
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned across GPUs when using tensor-parallelism.
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, past_key_value
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeedForward with Wav2Vec2->UniSpeechSat
class UniSpeechSatFeedForward(nn.Module):
def __init__(self, config):
super().__init__()
self.intermediate_dropout = nn.Dropout(config.activation_dropout)
self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.output_dropout = nn.Dropout(config.hidden_dropout)
def forward(self, hidden_states):
hidden_states = self.intermediate_dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
hidden_states = self.intermediate_dropout(hidden_states)
hidden_states = self.output_dense(hidden_states)
hidden_states = self.output_dropout(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderLayer with Wav2Vec2->UniSpeechSat
class UniSpeechSatEncoderLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = UniSpeechSatAttention(
embed_dim=config.hidden_size,
num_heads=config.num_attention_heads,
dropout=config.attention_dropout,
is_decoder=False,
)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.feed_forward = UniSpeechSatFeedForward(config)
self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, attention_mask=None, output_attentions=False):
attn_residual = hidden_states
hidden_states, attn_weights, _ = self.attention(
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
)
hidden_states = self.dropout(hidden_states)
hidden_states = attn_residual + hidden_states
hidden_states = self.layer_norm(hidden_states)
hidden_states = hidden_states + self.feed_forward(hidden_states)
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2AttnAdapterLayer with Wav2Vec2->UniSpeechSat
class UniSpeechSatAttnAdapterLayer(nn.Module):
def __init__(self, config):
"""
Implements adapter modules directly with 3D tensor weight as parameters and without using ModuleList to speed
up training throughput.
"""
super().__init__()
self.input_dim = config.adapter_attn_dim
self.hidden_dim = config.hidden_size
self.norm = nn.LayerNorm(self.hidden_dim)
self.linear_1 = nn.Linear(self.hidden_dim, self.input_dim)
self.act_fn = nn.ReLU()
self.linear_2 = nn.Linear(self.input_dim, self.hidden_dim)
def forward(self, hidden_states: torch.FloatTensor):
hidden_states = self.norm(hidden_states)
hidden_states = self.linear_1(hidden_states)
hidden_states = self.act_fn(hidden_states)
hidden_states = self.linear_2(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderLayerStableLayerNorm with Wav2Vec2->UniSpeechSat
class UniSpeechSatEncoderLayerStableLayerNorm(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = UniSpeechSatAttention(
embed_dim=config.hidden_size,
num_heads=config.num_attention_heads,
dropout=config.attention_dropout,
is_decoder=False,
)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.feed_forward = UniSpeechSatFeedForward(config)
self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
if getattr(config, "adapter_attn_dim", None) is not None:
self.adapter_layer = UniSpeechSatAttnAdapterLayer(config)
else:
self.adapter_layer = None
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
):
attn_residual = hidden_states
hidden_states = self.layer_norm(hidden_states)
hidden_states, attn_weights, _ = self.attention(
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
)
hidden_states = self.dropout(hidden_states)
hidden_states = attn_residual + hidden_states
hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states))
if self.adapter_layer is not None:
hidden_states = hidden_states + self.adapter_layer(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Encoder with Wav2Vec2->UniSpeechSat
class UniSpeechSatEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.pos_conv_embed = UniSpeechSatPositionalConvEmbedding(config)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layers = nn.ModuleList([UniSpeechSatEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
hidden_states: torch.tensor,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if attention_mask is not None:
# make sure padded tokens output 0
expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2])
hidden_states[~expand_attention_mask] = 0
# extend attention_mask
attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)
attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min
attention_mask = attention_mask.expand(
attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
)
position_embeddings = self.pos_conv_embed(hidden_states)
hidden_states = hidden_states + position_embeddings
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
for layer in self.layers:
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = np.random.uniform(0, 1)
skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False
if not skip_the_layer or deepspeed_zero3_is_enabled:
# under deepspeed zero3 all gpus must run in sync
if self.gradient_checkpointing and self.training:
# create gradient checkpointing function
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer),
hidden_states,
attention_mask,
)
else:
layer_outputs = layer(
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
)
hidden_states = layer_outputs[0]
if skip_the_layer:
layer_outputs = (None, None)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderStableLayerNorm with Wav2Vec2->UniSpeechSat
class UniSpeechSatEncoderStableLayerNorm(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.pos_conv_embed = UniSpeechSatPositionalConvEmbedding(config)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layers = nn.ModuleList(
[UniSpeechSatEncoderLayerStableLayerNorm(config) for _ in range(config.num_hidden_layers)]
)
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if attention_mask is not None:
# make sure padded tokens are not attended to
expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2])
hidden_states[~expand_attention_mask] = 0
# extend attention_mask
attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)
attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min
attention_mask = attention_mask.expand(
attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
)
position_embeddings = self.pos_conv_embed(hidden_states)
hidden_states = hidden_states + position_embeddings
hidden_states = self.dropout(hidden_states)
deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
for layer in self.layers:
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = np.random.uniform(0, 1)
skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False
if not skip_the_layer or deepspeed_zero3_is_enabled:
# under deepspeed zero3 all gpus must run in sync
# XXX: could optimize this like synced_gpus in generate_utils but not sure if it's worth the code complication
if self.gradient_checkpointing and self.training:
# create gradient checkpointing function
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer),
hidden_states,
attention_mask,
)
else:
layer_outputs = layer(
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
)
hidden_states = layer_outputs[0]
if skip_the_layer:
layer_outputs = (None, None)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
class UniSpeechSatGumbelVectorQuantizer(nn.Module):
"""
Vector quantization using gumbel softmax. See [CATEGORICAL REPARAMETERIZATION WITH
GUMBEL-SOFTMAX](https://arxiv.org/pdf/1611.01144.pdf) for more information.
"""
def __init__(self, config):
super().__init__()
self.num_groups = config.num_codevector_groups
self.num_vars = config.num_codevectors_per_group
if config.codevector_dim % self.num_groups != 0:
raise ValueError(
f"`config.codevector_dim {config.codevector_dim} must be divisible by `config.num_codevector_groups`"
f" {self.num_groups} for concatenation"
)
# storage for codebook variables (codewords)
self.codevectors = nn.Parameter(
torch.FloatTensor(1, self.num_groups * self.num_vars, config.codevector_dim // self.num_groups)
)
self.weight_proj = nn.Linear(config.hidden_size, self.num_groups * self.num_vars)
# can be decayed for training
self.temperature = 2
@staticmethod
def _compute_perplexity(probs, mask=None):
marginal_probs = probs.mean(dim=0)
perplexity = torch.exp(-torch.sum(marginal_probs * torch.log(marginal_probs + 1e-7), dim=-1)).sum()
return perplexity
def forward(self, hidden_states):
batch_size, sequence_length, hidden_size = hidden_states.shape
# project to codevector dim
hidden_states = self.weight_proj(hidden_states)
hidden_states = hidden_states.view(batch_size * sequence_length * self.num_groups, -1)
if self.training:
# sample code vector probs via gumbel in differentiateable way
codevector_probs = nn.functional.gumbel_softmax(
hidden_states.float(), tau=self.temperature, hard=True
).type_as(hidden_states)
# compute perplexity
codevector_soft_dist = torch.softmax(
hidden_states.view(batch_size * sequence_length, self.num_groups, -1).float(), dim=-1
)
perplexity = self._compute_perplexity(codevector_soft_dist)
else:
# take argmax in non-differentiable way
# comptute hard codevector distribution (one hot)
codevector_idx = hidden_states.argmax(dim=-1)
codevector_probs = hidden_states.new_zeros(*hidden_states.shape).scatter_(
-1, codevector_idx.view(-1, 1), 1.0
)
codevector_probs = codevector_probs.view(batch_size * sequence_length, self.num_groups, -1)
perplexity = self._compute_perplexity(codevector_probs)
codevector_probs = codevector_probs.view(batch_size * sequence_length, -1)
# use probs to retrieve codevectors
codevectors_per_group = codevector_probs.unsqueeze(-1) * self.codevectors
codevectors = codevectors_per_group.view(batch_size * sequence_length, self.num_groups, self.num_vars, -1)
codevectors = codevectors.sum(-2).view(batch_size, sequence_length, -1)
return codevectors, perplexity
class UniSpeechSatPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = UniSpeechSatConfig
base_model_prefix = "unispeech_sat"
main_input_name = "input_values"
_keys_to_ignore_on_load_missing = [r"position_ids"]
supports_gradient_checkpointing = True
def _init_weights(self, module):
"""Initialize the weights"""
# gumbel softmax requires special init
if isinstance(module, UniSpeechSatGumbelVectorQuantizer):
module.weight_proj.weight.data.normal_(mean=0.0, std=1)
module.weight_proj.bias.data.zero_()
nn.init.uniform_(module.codevectors)
elif isinstance(module, UniSpeechSatPositionalConvEmbedding):
nn.init.normal_(
module.conv.weight,
mean=0,
std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)),
)
nn.init.constant_(module.conv.bias, 0)
elif isinstance(module, UniSpeechSatFeatureProjection):
k = math.sqrt(1 / module.projection.in_features)
nn.init.uniform_(module.projection.weight, a=-k, b=k)
nn.init.uniform_(module.projection.bias, a=-k, b=k)
elif isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, nn.Conv1d):
nn.init.kaiming_normal_(module.weight)
if module.bias is not None:
k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))
nn.init.uniform_(module.bias, a=-k, b=k)
def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):
"""
Computes the output length of the convolutional layers
"""
def _conv_out_length(input_length, kernel_size, stride):
# 1D convolutional layer output length formula taken
# from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
return torch.div(input_length - kernel_size, stride, rounding_mode="floor") + 1
for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
return input_lengths
def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor):
# Effectively attention_mask.sum(-1), but not inplace to be able to run
# on inference mode.
non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1]
output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths).to(torch.long)
batch_size = attention_mask.shape[0]
attention_mask = torch.zeros(
(batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
)
# these two operations makes sure that all values before the output lengths idxs are attended to
attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1
attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
return attention_mask
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (UniSpeechSatEncoder, UniSpeechSatEncoderStableLayerNorm, UniSpeechSatFeatureEncoder)):
module.gradient_checkpointing = value
UNISPEECH_SAT_START_DOCSTRING = r"""
UniSpeechSat was proposed in [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech
Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael
Auli.
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving etc.).
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`UniSpeechSatConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
UNISPEECH_SAT_INPUTS_DOCSTRING = r"""
Args:
input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file
into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install
soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and
conversion into a tensor of type `torch.FloatTensor`. See [`Wav2Vec2Processor.__call__`] for details.
attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0,
1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
<Tip warning={true}>
`attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask ==
True`. For all models whose processor has `config.return_attention_mask == False`, such as
[microsoft/unispeech-sat-base-100h-libri-ft](https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft),
`attention_mask` should **not** be passed to avoid degraded performance when doing batched inference. For
such models `input_values` should simply be padded with 0 and passed without `attention_mask`. Be aware
that these models also yield slightly different results depending on whether `input_values` is padded or
not.
</Tip>
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare UniSpeechSat Model transformer outputting raw hidden-states without any specific head on top.",
UNISPEECH_SAT_START_DOCSTRING,
)
class UniSpeechSatModel(UniSpeechSatPreTrainedModel):
def __init__(self, config: UniSpeechSatConfig):
super().__init__(config)
self.config = config
self.feature_extractor = UniSpeechSatFeatureEncoder(config)
self.feature_projection = UniSpeechSatFeatureProjection(config)
self.masked_spec_embed = nn.Parameter(torch.FloatTensor(config.hidden_size).uniform_())
if config.do_stable_layer_norm:
self.encoder = UniSpeechSatEncoderStableLayerNorm(config)
else:
self.encoder = UniSpeechSatEncoder(config)
# Initialize weights and apply final processing
self.post_init()
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states
def _mask_hidden_states(
self,
hidden_states: torch.FloatTensor,
mask_time_indices: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
):
"""
Masks extracted features along time axis and/or along feature axis according to
[SpecAugment](https://arxiv.org/abs/1904.08779).
"""
# `config.apply_spec_augment` can set masking to False
if not getattr(self.config, "apply_spec_augment", True):
return hidden_states
# generate indices & apply SpecAugment along time axis
batch_size, sequence_length, hidden_size = hidden_states.size()
if mask_time_indices is not None:
# apply SpecAugment along time axis with given mask_time_indices
hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
elif self.config.mask_time_prob > 0 and self.training:
mask_time_indices = _compute_mask_indices(
(batch_size, sequence_length),
mask_prob=self.config.mask_time_prob,
mask_length=self.config.mask_time_length,
attention_mask=attention_mask,
min_masks=self.config.mask_time_min_masks,
)
mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool)
hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
if self.config.mask_feature_prob > 0 and self.training:
# generate indices & apply SpecAugment along feature axis
mask_feature_indices = _compute_mask_indices(
(batch_size, hidden_size),
mask_prob=self.config.mask_feature_prob,
mask_length=self.config.mask_feature_length,
min_masks=self.config.mask_feature_min_masks,
)
mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool)
mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)
hidden_states[mask_feature_indices] = 0
return hidden_states
@add_start_docstrings_to_model_forward(UNISPEECH_SAT_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=Wav2Vec2BaseModelOutput,
config_class=_CONFIG_FOR_DOC,
modality="audio",
expected_output=_EXPECTED_OUTPUT_SHAPE,
)
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
mask_time_indices: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, Wav2Vec2BaseModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
extract_features = self.feature_extractor(input_values)
extract_features = extract_features.transpose(1, 2)
if attention_mask is not None:
# compute reduced attention_mask corresponding to feature vectors
attention_mask = self._get_feature_vector_attention_mask(extract_features.shape[1], attention_mask)
hidden_states, extract_features = self.feature_projection(extract_features)
hidden_states = self._mask_hidden_states(
hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask
)
encoder_outputs = self.encoder(
hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = encoder_outputs[0]
if not return_dict:
return (hidden_states, extract_features) + encoder_outputs[1:]
return Wav2Vec2BaseModelOutput(
last_hidden_state=hidden_states,
extract_features=extract_features,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@add_start_docstrings("""UniSpeechSat Model with a quantizer and `VQ` head on top.""", UNISPEECH_SAT_START_DOCSTRING)
class UniSpeechSatForPreTraining(UniSpeechSatPreTrainedModel):
def __init__(self, config: UniSpeechSatConfig):
super().__init__(config)
self.unispeech_sat = UniSpeechSatModel(config)
self.dropout_features = nn.Dropout(config.feat_quantizer_dropout)
self.quantizer = UniSpeechSatGumbelVectorQuantizer(config)
self.project_q = nn.Linear(config.codevector_dim, config.proj_codevector_dim)
self.project_hid = nn.Linear(config.hidden_size, config.proj_codevector_dim)
self.dropout = nn.Dropout(config.final_dropout)
self.speaker_proj = nn.Linear(config.hidden_size, config.codevector_dim)
self.label_embeddings_concat = nn.Parameter(torch.FloatTensor(config.num_clusters, config.codevector_dim))
self.label_embeddings_concat.data.zero_()
self.layer_norm_for_extract = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
if self.config.do_stable_layer_norm:
self.layer_norm_for_extract.requires_grad = False
# Initialize weights and apply final processing
self.post_init()
def set_gumbel_temperature(self, temperature: int):
"""
Set the Gumbel softmax temperature to a given value. Only necessary for training
"""
self.quantizer.temperature = temperature
def freeze_feature_extractor(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameters will
not be updated during training.
"""
warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5."
"Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning,
)
self.freeze_feature_encoder()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.wav2vec2.feature_extractor._freeze_parameters()
@staticmethod
def compute_contrastive_logits(
target_features: torch.FloatTensor,
negative_features: torch.FloatTensor,
predicted_features: torch.FloatTensor,
temperature: int = 1,
):
"""
Compute logits for contrastive loss based using cosine similarity as the distance measure between
`[positive_feature, negative_features]` and `[predicted_features]`. Additionally, temperature can be applied.
"""
target_features = torch.cat([target_features, negative_features], dim=0)
logits = torch.cosine_similarity(predicted_features.float(), target_features.float(), dim=-1)
logits = logits.type_as(target_features)
# apply temperature
logits = logits / temperature
return logits
@add_start_docstrings_to_model_forward(UNISPEECH_SAT_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=UniSpeechSatForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, UniSpeechSatForPreTrainingOutput]:
r"""
Returns:
Example:
```python
>>> import torch
>>> from transformers import AutoFeatureExtractor, UniSpeechSatForPreTraining
>>> from transformers.models.unispeech_sat.modeling_unispeech_sat import _compute_mask_indices
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("microsoft/unispeech-sat-base")
>>> model = UniSpeechSatForPreTraining.from_pretrained("microsoft/unispeech-sat-base")
>>> # TODO: Add full pretraining example
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.unispeech_sat(
input_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
transformer_features = outputs[0]
# quantize all (unmasked) extracted features and project to final vq dim
extract_features = self.dropout_features(outputs[1])
# TODO(PVP) - add pretraining logic and add to tests
logits = extract_features
loss = quantized_features = codevector_perplexity = None
# layer normalization (has no effect when `config.do_stable_layer_norm == False`)
# extract_features = self.layer_norm_for_extract(extract_features)
# quantized_features, codevector_perplexity = self.quantizer(extract_features)
#
# project quantized features twice
# quantized_features = self.project_q(quantized_features)
# quantized_features = self.project_hid(quantized_features)
#
# loss = None
# logits = quantized_features
if not return_dict:
if loss is not None:
return (loss, logits, transformer_features, quantized_features, codevector_perplexity) + outputs[2:]
return (logits, transformer_features, quantized_features, codevector_perplexity) + outputs[2:]
return UniSpeechSatForPreTrainingOutput(
loss=loss,
logits=logits,
projected_states=transformer_features,
projected_quantized_states=quantized_features,
codevector_perplexity=codevector_perplexity,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""UniSpeechSat Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""",
UNISPEECH_SAT_START_DOCSTRING,
)
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForCTC with Wav2Vec2->UniSpeechSat, wav2vec2->unispeech_sat, WAV_2_VEC_2->UNISPEECH_SAT
class UniSpeechSatForCTC(UniSpeechSatPreTrainedModel):
def __init__(self, config, target_lang=None):
super().__init__(config)
self.unispeech_sat = UniSpeechSatModel(config)
self.dropout = nn.Dropout(config.final_dropout)
if config.vocab_size is None:
raise ValueError(
f"You are trying to instantiate {self.__class__} with a configuration that "
"does not define the vocabulary size of the language model head. Please "
"instantiate the model as follows: `UniSpeechSatForCTC.from_pretrained(..., vocab_size=vocab_size)`. "
"or define `vocab_size` of your model's configuration."
)
output_hidden_size = (
config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size
)
self.lm_head = nn.Linear(output_hidden_size, config.vocab_size)
if target_lang is not None and getattr(self.config, "adapter_attn_dim", None) is None:
raise ValueError(f"Cannot pass `target_lang`: {target_lang} if `config.adapter_attn_dim` is not defined.")
elif target_lang is None and getattr(self.config, "adapter_attn_dim", None) is not None:
logger.info("By default `target_lang` is set to 'eng'.")
elif target_lang is not None:
self.load_adapter(target_lang)
# Initialize weights and apply final processing
self.post_init()
def freeze_feature_extractor(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5."
"Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning,
)
self.freeze_feature_encoder()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.unispeech_sat.feature_extractor._freeze_parameters()
def freeze_base_model(self):
"""
Calling this function will disable the gradient computation for the base model so that its parameters will not
be updated during training. Only the classification head will be updated.
"""
for param in self.unispeech_sat.parameters():
param.requires_grad = False
@add_start_docstrings_to_model_forward(UNISPEECH_SAT_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=CausalLMOutput,
config_class=_CONFIG_FOR_DOC,
expected_output=_CTC_EXPECTED_OUTPUT,
expected_loss=_CTC_EXPECTED_LOSS,
)
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
) -> Union[Tuple, CausalLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):
Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to
the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.
All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
config.vocab_size - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.unispeech_sat(
input_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
hidden_states = self.dropout(hidden_states)
logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
if labels.max() >= self.config.vocab_size:
raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}")
# retrieve loss input_lengths from attention_mask
attention_mask = (
attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long)
)
input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
# assuming that padded tokens are filled with -100
# when not being attended to
labels_mask = labels >= 0
target_lengths = labels_mask.sum(-1)
flattened_targets = labels.masked_select(labels_mask)
# ctc_loss doesn't support fp16
log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)
with torch.backends.cudnn.flags(enabled=False):
loss = nn.functional.ctc_loss(
log_probs,
flattened_targets,
input_lengths,
target_lengths,
blank=self.config.pad_token_id,
reduction=self.config.ctc_loss_reduction,
zero_infinity=self.config.ctc_zero_infinity,
)
if not return_dict:
output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutput(
loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
)
@add_start_docstrings(
"""
UniSpeechSat Model with a sequence classification head on top (a linear layer over the pooled output) for tasks
like SUPERB Keyword Spotting.
""",
UNISPEECH_SAT_START_DOCSTRING,
)
class UniSpeechSatForSequenceClassification(UniSpeechSatPreTrainedModel):
def __init__(self, config):
super().__init__(config)
if hasattr(config, "add_adapter") and config.add_adapter:
raise ValueError(
"Sequence classification does not support the use of UniSpeechSat adapters (config.add_adapter=True)"
)
self.unispeech_sat = UniSpeechSatModel(config)
num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
if config.use_weighted_layer_sum:
self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size)
self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification.freeze_feature_extractor
def freeze_feature_extractor(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameters will
not be updated during training.
"""
warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5."
"Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning,
)
self.freeze_feature_encoder()
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification.freeze_feature_encoder with wav2vec2->unispeech_sat
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.unispeech_sat.feature_extractor._freeze_parameters()
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification.freeze_base_model with wav2vec2->unispeech_sat
def freeze_base_model(self):
"""
Calling this function will disable the gradient computation for the base model so that its parameters will not
be updated during training. Only the classification head will be updated.
"""
for param in self.unispeech_sat.parameters():
param.requires_grad = False
@add_start_docstrings_to_model_forward(UNISPEECH_SAT_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
modality="audio",
)
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification.forward with Wav2Vec2->UniSpeechSat, wav2vec2->unispeech_sat
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
) -> Union[Tuple, SequenceClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
outputs = self.unispeech_sat(
input_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if self.config.use_weighted_layer_sum:
hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
hidden_states = torch.stack(hidden_states, dim=1)
norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
else:
hidden_states = outputs[0]
hidden_states = self.projector(hidden_states)
if attention_mask is None:
pooled_output = hidden_states.mean(dim=1)
else:
padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)
hidden_states[~padding_mask] = 0.0
pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
UniSpeech-SAT Model with a frame classification head on top for tasks like Speaker Diarization.
""",
UNISPEECH_SAT_START_DOCSTRING,
)
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForAudioFrameClassification with Wav2Vec2->UniSpeechSat, wav2vec2->unispeech_sat, WAV_2_VEC_2->UNISPEECH_SAT
class UniSpeechSatForAudioFrameClassification(UniSpeechSatPreTrainedModel):
def __init__(self, config):
super().__init__(config)
if hasattr(config, "add_adapter") and config.add_adapter:
raise ValueError(
"Audio frame classification does not support the use of UniSpeechSat adapters (config.add_adapter=True)"
)
self.unispeech_sat = UniSpeechSatModel(config)
num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
if config.use_weighted_layer_sum:
self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.num_labels = config.num_labels
self.init_weights()
def freeze_feature_extractor(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5."
"Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning,
)
self.freeze_feature_encoder()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.unispeech_sat.feature_extractor._freeze_parameters()
def freeze_base_model(self):
"""
Calling this function will disable the gradient computation for the base model so that its parameters will not
be updated during training. Only the classification head will be updated.
"""
for param in self.unispeech_sat.parameters():
param.requires_grad = False
@add_start_docstrings_to_model_forward(UNISPEECH_SAT_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
checkpoint=_FRAME_CLASS_CHECKPOINT,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
modality="audio",
expected_output=_FRAME_EXPECTED_OUTPUT,
)
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, TokenClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
outputs = self.unispeech_sat(
input_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if self.config.use_weighted_layer_sum:
hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
hidden_states = torch.stack(hidden_states, dim=1)
norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
else:
hidden_states = outputs[0]
logits = self.classifier(hidden_states)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), torch.argmax(labels.view(-1, self.num_labels), axis=1))
if not return_dict:
output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
return output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.AMSoftmaxLoss
class AMSoftmaxLoss(nn.Module):
def __init__(self, input_dim, num_labels, scale=30.0, margin=0.4):
super(AMSoftmaxLoss, self).__init__()
self.scale = scale
self.margin = margin
self.num_labels = num_labels
self.weight = nn.Parameter(torch.randn(input_dim, num_labels), requires_grad=True)
self.loss = nn.CrossEntropyLoss()
def forward(self, hidden_states, labels):
labels = labels.flatten()
weight = nn.functional.normalize(self.weight, dim=0)
hidden_states = nn.functional.normalize(hidden_states, dim=1)
cos_theta = torch.mm(hidden_states, weight)
psi = cos_theta - self.margin
onehot = nn.functional.one_hot(labels, self.num_labels)
logits = self.scale * torch.where(onehot.bool(), psi, cos_theta)
loss = self.loss(logits, labels)
return loss
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.TDNNLayer
class TDNNLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.tdnn_dim[layer_id - 1] if layer_id > 0 else config.tdnn_dim[layer_id]
self.out_conv_dim = config.tdnn_dim[layer_id]
self.kernel_size = config.tdnn_kernel[layer_id]
self.dilation = config.tdnn_dilation[layer_id]
self.kernel = nn.Linear(self.in_conv_dim * self.kernel_size, self.out_conv_dim)
self.activation = nn.ReLU()
def forward(self, hidden_states):
hidden_states = hidden_states.unsqueeze(1)
hidden_states = nn.functional.unfold(
hidden_states,
(self.kernel_size, self.in_conv_dim),
stride=(1, self.in_conv_dim),
dilation=(self.dilation, 1),
)
hidden_states = hidden_states.transpose(1, 2)
hidden_states = self.kernel(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
@add_start_docstrings(
"""
UniSpeech-SAT Model with an XVector feature extraction head on top for tasks like Speaker Verification.
""",
UNISPEECH_SAT_START_DOCSTRING,
)
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForXVector with Wav2Vec2->UniSpeechSat, wav2vec2->unispeech_sat, WAV_2_VEC_2->UNISPEECH_SAT
class UniSpeechSatForXVector(UniSpeechSatPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.unispeech_sat = UniSpeechSatModel(config)
num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
if config.use_weighted_layer_sum:
self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
self.projector = nn.Linear(config.hidden_size, config.tdnn_dim[0])
tdnn_layers = [TDNNLayer(config, i) for i in range(len(config.tdnn_dim))]
self.tdnn = nn.ModuleList(tdnn_layers)
self.feature_extractor = nn.Linear(config.tdnn_dim[-1] * 2, config.xvector_output_dim)
self.classifier = nn.Linear(config.xvector_output_dim, config.xvector_output_dim)
self.objective = AMSoftmaxLoss(config.xvector_output_dim, config.num_labels)
self.init_weights()
def freeze_feature_extractor(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5."
"Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning,
)
self.freeze_feature_encoder()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.unispeech_sat.feature_extractor._freeze_parameters()
def freeze_base_model(self):
"""
Calling this function will disable the gradient computation for the base model so that its parameters will not
be updated during training. Only the classification head will be updated.
"""
for param in self.unispeech_sat.parameters():
param.requires_grad = False
def _get_tdnn_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):
"""
Computes the output length of the TDNN layers
"""
def _conv_out_length(input_length, kernel_size, stride):
# 1D convolutional layer output length formula taken
# from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
return (input_length - kernel_size) // stride + 1
for kernel_size in self.config.tdnn_kernel:
input_lengths = _conv_out_length(input_lengths, kernel_size, 1)
return input_lengths
@add_start_docstrings_to_model_forward(UNISPEECH_SAT_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
checkpoint=_XVECTOR_CHECKPOINT,
output_type=XVectorOutput,
config_class=_CONFIG_FOR_DOC,
modality="audio",
expected_output=_XVECTOR_EXPECTED_OUTPUT,
)
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
) -> Union[Tuple, XVectorOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
outputs = self.unispeech_sat(
input_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if self.config.use_weighted_layer_sum:
hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
hidden_states = torch.stack(hidden_states, dim=1)
norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
else:
hidden_states = outputs[0]
hidden_states = self.projector(hidden_states)
for tdnn_layer in self.tdnn:
hidden_states = tdnn_layer(hidden_states)
# Statistic Pooling
if attention_mask is None:
mean_features = hidden_states.mean(dim=1)
std_features = hidden_states.std(dim=1)
else:
feat_extract_output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(dim=1))
tdnn_output_lengths = self._get_tdnn_output_lengths(feat_extract_output_lengths)
mean_features = []
std_features = []
for i, length in enumerate(tdnn_output_lengths):
mean_features.append(hidden_states[i, :length].mean(dim=0))
std_features.append(hidden_states[i, :length].std(dim=0))
mean_features = torch.stack(mean_features)
std_features = torch.stack(std_features)
statistic_pooling = torch.cat([mean_features, std_features], dim=-1)
output_embeddings = self.feature_extractor(statistic_pooling)
logits = self.classifier(output_embeddings)
loss = None
if labels is not None:
loss = self.objective(logits, labels)
if not return_dict:
output = (logits, output_embeddings) + outputs[_HIDDEN_STATES_START_POSITION:]
return ((loss,) + output) if loss is not None else output
return XVectorOutput(
loss=loss,
logits=logits,
embeddings=output_embeddings,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
|
[
"noreply@github.com"
] |
matrogersmtl.noreply@github.com
|
f1267a7802801874908f75a47237e375a12075ab
|
5478ec3c49bb4812849897a5050fc74149dcb6c1
|
/15-Find and Draw Contours .py
|
152cfd2bea0ce7720b56ae416ebc5cc1abec2605
|
[] |
no_license
|
UdayKiranPadhy/OpenCV
|
96ebffb82801041b1a503d6cb02943a857631b66
|
fb81ea9a148e79f67ac88abeb1176b52cbd8e9ed
|
refs/heads/main
| 2023-07-01T19:24:21.807742
| 2021-08-12T04:28:00
| 2021-08-12T04:28:00
| 395,193,811
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 851
|
py
|
# Countour is defined as the curve joining continous points which has the same colour
# used for object or shape detection
from typing import Counter
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
img = cv.imread('opencvlogo.jpg')
imgray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
# For better accuracy we use binary image
ret , threshold = cv.threshold(imgray,220,255,0)
contours , hierarchy = cv.findContours(threshold,cv.RETR_TREE,cv.CHAIN_APPROX_NONE)
# countiurs is a numpy array of all the points (x,y) of the countourrs
# hierarchy contains topology order
print("Number of Countours = " + str(len(contours)))
cv.drawContours(img,contours,-1,(0,255,0),3)
cv.imshow("Original image",img)
cv.imshow("Grey image",imgray)
cv.imshow("Threshold image",threshold)
cv.waitKey(0)
cv.destroyAllWindows()
|
[
"noreply@github.com"
] |
UdayKiranPadhy.noreply@github.com
|
8641c37a5a132782ae60a999f89d1a590e6e76b6
|
97ce880b48a7156c965d6bbd3cb7df7a1cd4c0e6
|
/main.py
|
86a65a6cfe099c38a3af62a1695e8c335b2125fa
|
[] |
no_license
|
merveyubogluu/music_recommendation_system
|
2a77c9724cb1d8f32e38ec7dcfa1dfbcb8b34332
|
e255bb000726b7365d313fc65726144ad759f3a9
|
refs/heads/main
| 2023-05-31T12:09:22.338571
| 2021-07-05T08:28:05
| 2021-07-05T08:28:05
| 342,528,319
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,699
|
py
|
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import OneHotEncoder
from scipy.spatial import distance
data = pd.read_csv("SpotifyFeatures.csv")
data = data.drop(["track_id","key","mode","time_signature"],1)
# Song finder with song name and artist name
def find_song(word,artist):
a = 0
b = 0
for i in data["track_name"]:
if word.lower() in i.lower() and artist.lower() in data["artist_name"][a].lower():
print("Song Name: ",data["track_name"][a],", Artists: ",data["artist_name"][a])
b+=1
a+=1
if b == 0:
print("Nothing found. Please try something else :)")
# Preprocessing
df = data.copy()
df = df.drop(["artist_name","track_name"],1)
col = ['popularity', 'acousticness', 'danceability', 'duration_ms',
'energy', 'instrumentalness', 'liveness', 'loudness', 'speechiness',
'tempo', 'valence']
scaler = StandardScaler()
df[col] = scaler.fit_transform(df[col])
encoder = OneHotEncoder(sparse=False, handle_unknown="ignore")
enc = pd.DataFrame(encoder.fit_transform(np.array(df["genre"]).reshape(-1,1)))
enc.columns = df["genre"].unique()
enc.head()
df[enc.columns] = enc
df = df.drop("genre",1)
df.head()
df["name"] = data["track_name"]
df["artist"] = data["artist_name"]
df_2 = df.drop(["artist","name"],1)
def sim_track_find(word,artist):
a = 0
b = 0
song = []
indexes = []
for i in data["track_name"]:
if word.lower() in i.lower() and artist.lower() in data["artist_name"][a].lower():
song.append(df_2[a:a+1].values)
indexes.append(a)
b+=1
a+=1
if b == 0:
print("Nothing found. Please try something else :)")
return 0
return song[0][0], indexes[0]
def similar_tracks(data,number,song = "",artist = ""):
if (sim_track_find(song,artist) == 0):
return 0
else:
x=sim_track_find(song,artist)[0]
index = sim_track_find(song,artist)[1]
p = []
count=0
for i in df_2.values:
p.append([distance.cosine(x,i),count])
count+=1
p.sort()
song_names = df["name"]
artist_names = df["artist"]
print("\nSimilar songs to ",song_names[index]," by ", artist_names[index],"\n")
for i in range(1,number+1):
print(i,"- ",song_names[p[i][1]],", ",artist_names[p[i][1]])
song = input("Enter the song name (if you don't want to specify a song name please skip this): ")
artist = input("Enter the artist name (if you don't want to specify an artist name please skip this): ")
num = input("Number of song recommendations: ")
similar_tracks(df,int(num),song,artist)
|
[
"noreply@github.com"
] |
merveyubogluu.noreply@github.com
|
65a83a58c3e2b3d68719305a6fc7e24db803f398
|
33f4b82df582f0e372358c38148de556c30015c0
|
/bin/epylint
|
201e7ed30f9fc3d621d1e8708142d99a84671de3
|
[] |
no_license
|
waltergar/onlinepython
|
0cd3b2c92f6fee6507aad7e70d73efa30c7494ca
|
e6ffbca49513cd82bf432b0d36b8139bdf8d8065
|
refs/heads/master
| 2021-06-21T18:36:34.937686
| 2019-07-30T18:10:51
| 2019-07-30T18:10:51
| 199,703,209
| 1
| 0
| null | 2021-03-20T01:25:27
| 2019-07-30T18:06:32
|
Python
|
UTF-8
|
Python
| false
| false
| 269
|
#!/Volumes/MAC-DATA/Develop/PriceOptimization/Flask/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_epylint
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_epylint())
|
[
"apple@Apples-iMac.local"
] |
apple@Apples-iMac.local
|
|
1225096b6591b99d7d639025aca73c85e2cf413f
|
9036e22d7da8141e05012b8a25a3aa9e8028c95a
|
/HackerRank_Challenges/Simple Array Sum.py
|
c1c24dca21a169c216969821c3ccbe9fa7043a31
|
[] |
no_license
|
sarveshdakhane/Python
|
2500bc5fc1e9d447f9ed4527161a59db18fe668f
|
f9ef202c5758971d943d2d60cf9abeda36be59b1
|
refs/heads/master
| 2022-01-03T08:18:14.411362
| 2021-12-24T13:36:44
| 2021-12-24T13:36:44
| 251,586,887
| 0
| 0
| null | 2021-12-24T13:36:45
| 2020-03-31T11:46:23
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 423
|
py
|
import os
import sys
#
# Complete the simpleArraySum function below.
#
def simpleArraySum(ar):
i=0
t=0
for i in range(len(ar)):
t=t+ar[i]
return t
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
ar_count = int(input())
ar = list(map(int, input().rstrip().split()))
result = simpleArraySum(ar)
fptr.write(str(result) + '\n')
fptr.close()
|
[
"sarvesh.dakhane@hotmail.com"
] |
sarvesh.dakhane@hotmail.com
|
df060491348d6f647078f3f9b2f7746d26d948f9
|
50f679273c36d45b740afe0a9254b64b72a0b1bb
|
/acessarBlastTerminal.py
|
248b503582485bb6784ad3a5211c43bb689c451c
|
[] |
no_license
|
rodrigorochag/scriptsBioinfo
|
fbbf57fdb0b7b45bbe734889dcd33218d1b7fac9
|
4155279a0ebbfb7ae9aef2d9a83e839de3e9f30f
|
refs/heads/master
| 2022-02-03T14:04:45.656260
| 2019-08-02T19:11:49
| 2019-08-02T19:11:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 316
|
py
|
from Bio.Blast import NCBIWWW
from Bio import SeqIO
#Compara as sequencias com o banco de dados NCBI
arq = SeqIO.read('/home/ivan/Documents/bioinformatics/aedes.fasta', format='fasta')
print('Buscando no banco de dados....')
result = NCBIWWW.qblast('blastn','nt', arq.seq, format_type='Text')
print(result.read())
|
[
"noreply@github.com"
] |
rodrigorochag.noreply@github.com
|
2f48b86b98696ad8ea524ebc687ad19b1d7dca40
|
d9fb6c246965cbf290186268298859ddb913ee6e
|
/190819/ladder.py
|
fcb76fc06ebc1f5a4e1d93de8835e7e3f5a107c2
|
[] |
no_license
|
91hongppie/algorithm
|
1ca6d54de6eab252c708bf83835ace8a109d73fc
|
4c2fa8178e0ef7afbf0b736387f05cbada72f95d
|
refs/heads/master
| 2020-07-20T22:17:40.700366
| 2020-06-29T00:06:11
| 2020-06-29T00:06:11
| 206,717,677
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 664
|
py
|
import sys
sys.stdin = open('input_ladder.txt', 'r')
board = [[] for _ in range(100)]
for num in range(10):
num = int(input())
for j in range(100):
board[j] = list(map(int, input().split()))
first = board[99].index(2)
i, j = 98, first
while i >= 0:
if j < 99 and board[i][j+1] == 1:
while j < 99 and board[i][j+1] == 1:
j += 1
i -= 1
elif board[i][j-1] == 1:
while board[i][j-1] == 1 and j > 0:
j -= 1
i -= 1
elif board[i][j] == 1 and board[i-1][j] == 1:
i -= 1
print('#{} {}'.format(num, j))
|
[
"91hongppie@gmail.com"
] |
91hongppie@gmail.com
|
08e05c1a666cdefa4e9f91780880c51b03938fc0
|
2055cdd8c7dc9c5c9253836091a18af0e55065d1
|
/app/tests/test_support.py
|
e5e2db6b5c426ab50f43d68dceab832c0d9591a8
|
[
"MIT"
] |
permissive
|
eugenerbl/flask-aws-template
|
6e37822f885384e41f7c6faa3cfaadcdd1b57aa4
|
913f4f55aba3a3fa906a8b8390be3f4a0e65140f
|
refs/heads/master
| 2023-03-07T17:17:52.014482
| 2021-02-24T23:54:51
| 2021-02-24T23:54:51
| 342,068,933
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,019
|
py
|
import glob
import os
from unittest.mock import patch
from app.models import User
from app.tests import conftest
def delete_test_file():
files = glob.glob("*bobby.json")
os.remove(files[0])
def test_contact_us_logged_in_user(test_client_csrf, init_database):
u = User.query.filter_by(username=conftest.TEST_USER_USERNAME).first()
with patch('flask_login.utils._get_user') as current_user:
current_user.return_value.id = u.id
current_user.return_value.get_id.return_value = u.id
current_user.return_value.is_authenticated.return_value = True
params = dict(
name="Bobby Chariot",
email="bobby@chariot.email",
subject="Feedback",
message="Hello to you all",
csrf_token=test_client_csrf.csrf_token)
response = test_client_csrf.post('/support/contact_us', data=params)
assert response.status_code == 302
# then delete the file
delete_test_file()
def test_contact_us_anon(test_client_csrf):
params = dict(
name="Bobby Chariot",
email="bobby@chariot.email",
subject="Feedback",
message="Hello to you all",
csrf_token=test_client_csrf.csrf_token)
response = test_client_csrf.post('/support/contact_us', data=params)
assert response.status_code == 302
delete_test_file()
def test_contact_us_missing_email(test_client_csrf):
params = dict(
name="Bobby Chariot",
email="",
subject="Feedback",
message="Hello to you all",
csrf_token=test_client_csrf.csrf_token)
response = test_client_csrf.post('/support/contact_us', data=params)
assert response.status_code == 200
assert "This field is required" in str(response.data)
def test_contact_us_missing_name(test_client_csrf):
params = dict(
name="",
email="bobby@chariot.email",
subject="Feedback",
message="Hello to you all",
csrf_token=test_client_csrf.csrf_token)
response = test_client_csrf.post('/support/contact_us', data=params)
assert response.status_code == 200
assert "This field is required" in str(response.data)
def test_contact_us_missing_message(test_client_csrf):
params = dict(
name="Bobby Chariot",
email="bobby@chariot.email",
subject="Feedback",
message="",
csrf_token=test_client_csrf.csrf_token)
response = test_client_csrf.post('/support/contact_us', data=params)
assert response.status_code == 200
assert "This field is required" in str(response.data)
def test_contact_us_missing_subject(test_client_csrf):
params = dict(
name="Bobby Chariot",
email="bobby@chariot.email",
subject="",
message="Hello to you all",
csrf_token=test_client_csrf.csrf_token)
response = test_client_csrf.post('/support/contact_us', data=params)
assert response.status_code == 200
assert "This field is required" in str(response.data)
|
[
"35496054+eugenerbl@users.noreply.github.com"
] |
35496054+eugenerbl@users.noreply.github.com
|
8d14840ff520ee9b058a38fea78d1f7043dd9b71
|
e9de427c184d518b8230ce0a87ea45b19374869b
|
/silvia/11_class/coche/model.py
|
450391d9e3a9fb06cf588fc232b47d55d4e6c722
|
[] |
no_license
|
andresalbertoramos/Master-en-Programacion-con-Python_ed2
|
a78eea1ee2d5545384f6bc351369e75631f04e6c
|
a5ec6418fadedfab6f6cc56e581b41ca61d5215f
|
refs/heads/master
| 2022-03-23T14:17:27.038061
| 2019-12-18T18:16:43
| 2019-12-18T18:16:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 258
|
py
|
class Car(object):
def __init__(self, marca, modelo, color):
self.marca = marca
self.modelo = modelo
self.color = color
def __repr__(self):
return (f'Marca: {self.marca}. Modelo: {self.modelo}. Color: {self.color}')
|
[
"sarnaizgarcia@gmail.com"
] |
sarnaizgarcia@gmail.com
|
dcf60cde5a653561e24ac6476c0ca78ab82f6068
|
7385fcddd2bf160dc65688c06ac1d15ef941a23f
|
/i_preprocessing.py
|
7f24d637f4e1f6805145b423e088a96d3b35ab0d
|
[] |
no_license
|
MathLaci08/my-kaggle
|
082d7cb876e770c78f3b11dc7249ae49a49e62bc
|
4474e1f1c24b26822bbf86f76ed75fd8ad7eb9e9
|
refs/heads/master
| 2023-01-20T20:42:00.719292
| 2020-12-01T12:56:04
| 2020-12-01T12:56:04
| 244,465,883
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,262
|
py
|
import abc
import logging
import pathlib
import importlib
from typing import Tuple
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from scipy.stats import skew
from scipy.special import boxcox1p
n_components = 75
box_cox_lambda = 0.15
class IPreProcessing(abc.ABC):
"""
Class for data pre-processing related methods.
"""
def __init__(self):
"""
Class instance initializer method.
"""
try:
self.pp_X = None
self.pp_X_test = None
self.path = importlib.import_module(self.__module__).__file__
train_csv = pathlib.Path(self.path, "..\\data\\train.csv").resolve()
test_csv = pathlib.Path(self.path, "..\\data\\test.csv").resolve()
# read data from the provided csv files
self.X = pd.read_csv(train_csv)
self.y = None
self.X_test = pd.read_csv(test_csv)
self._step_index = -1
except FileNotFoundError as e:
logging.error("Please download the data, before creating instance!")
raise e
def _index(self):
self._step_index += 1
return self._step_index
@abc.abstractmethod
def process_data(self) -> None:
"""
Method for determining the preprocessed data. If the data set haven't been preprocessed before, or forced to be
ignored, the method calls all the necessary functions for the pre-processing.
"""
pass
def load_data(self, with_pca: bool = False) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""
Loads the previously processed data from the saved csv files.
:param with_pca: if True, function will return a data set on which pca was performed before
:return: train and test set if data is preprocessed, else None.
"""
try:
logging.info('Trying to load data...')
prefix = 'pp_pca' if with_pca else 'pp'
pp_train_csv = pathlib.Path(self.path, f"..\\data\\{prefix}_train.csv").resolve()
pp_test_csv = pathlib.Path(self.path, f"..\\data\\{prefix}_test.csv").resolve()
self.pp_X = pd.read_csv(pp_train_csv)
self.pp_X_test = pd.read_csv(pp_test_csv)
logging.info('DONE!')
return self.pp_X, self.pp_X_test
except FileNotFoundError:
logging.warning("Data is not preprocessed. Calling process_data() function...")
self.process_data()
return self.load_data(with_pca=with_pca)
@abc.abstractmethod
def _separate_target(self) -> np.ndarray:
"""
Private function for some preliminary steps. Drops non-labelled data, separates y from X and the test
identifiers from the test set. Also converts the numeric type categorical features to 'object' type.
:return: The test identifiers for future usage.
"""
pass
@abc.abstractmethod
def _detect_outliers(self) -> np.ndarray:
"""
Private function for detecting the outliers in the data set. First determines those numerical variables which
have much unique values, and then plots the target variable as the function of these features. Base on the
graphs it drops the outliers from the data, resets indices for X and y and finally plots the functions again.
:return: Set of all numerical feature names.
"""
pass
def _normalize_target(self) -> None:
"""
This private function checks the distribution of the target variable and then (if necessary) transforms it with
an appropriate transformation. Finally plots the distribution again.
"""
pass
@abc.abstractmethod
def _imputer(self) -> None:
"""
Private function for dealing with missing values in the data sets. The method first creates lists of the
feature names based on how to impute data in them, then fills the columns with appropriate values.
"""
pass
def _correlation_map(self):
"""
Private function for plotting the correlation map between the features.
"""
logging.info(f'#{self._index()} - Checking correlation between features...')
# correlation map between the remaining features
corr_map = self.X.join(self.y).corr()
plt.subplots(figsize=(12, 9))
sns.heatmap(corr_map, vmax=0.9, square=True)
plt.show()
logging.info(f'#{self._step_index} - DONE!')
@abc.abstractmethod
def _encode_categories(self) -> None:
"""
This private method stands for encoding categorical variables. Label encoding used for ordinal categories and
one-hot encoding used for nominal categories.
"""
pass
def _transform_skewed_features(self, numerical_vars: np.ndarray) -> None:
"""
Private method for transforming features with high skew.
:param numerical_vars: Set of all originally numerical variables.
"""
logging.info(f'#{self._index()} - Determine and transform skewed features...')
# check the skew of all numerical features
skewed_features = self.X[numerical_vars].apply(lambda x: skew(x.dropna())).sort_values(ascending=False)
logging.info("Skew in numerical features: \n")
skewness = pd.DataFrame({'Skew': skewed_features})
logging.info(skewness)
# transform skewed features
skewed_features = skewness[abs(skewness.Skew) > 0.75].index
logging.info(f"There are {skewed_features.size} skewed features")
for feature in skewed_features:
self.X[feature] = boxcox1p(self.X[feature], box_cox_lambda)
self.X_test[feature] = boxcox1p(self.X_test[feature], box_cox_lambda)
# check the skew of all numerical features again
skewed_features = self.X[numerical_vars].apply(lambda x: skew(x.dropna())).sort_values(ascending=False)
logging.info("Skew in numerical features: \n")
skewness = pd.DataFrame({'Skew': skewed_features})
logging.info(skewness)
logging.info(f'#{self._step_index} - DONE!')
def _standardize_data(self) -> None:
"""
This private function's job is the standardization of all the variables.
"""
logging.info(f'#{self._index()} - Standardizing variables...')
# standardize data
std_scaler = StandardScaler(copy=False)
self.X = pd.DataFrame(std_scaler.fit_transform(self.X), columns=self.X.columns)
self.X_test = pd.DataFrame(std_scaler.transform(self.X_test), columns=self.X.columns)
logging.info(f'#{self._step_index} - DONE!')
def _pca(self) -> None:
"""
This private function do the principal component analysis on our data, and as a result, dimension reduction
will be made.
"""
logging.info(f'#{self._index()} - Performing principal component analysis...')
# dimension reduction
logging.info(f"Number of features before PCA: {self.X.shape[1]}")
pca = PCA(n_components=n_components)
self.X = pd.DataFrame(
pca.fit_transform(self.X),
columns=["PCA" + str(n) for n in range(1, n_components + 1)]
)
self.X_test = pd.DataFrame(
pca.transform(self.X_test),
columns=["PCA" + str(n) for n in range(1, n_components + 1)]
)
logging.info(f"Number of features after PCA: {self.X.shape[1]}")
logging.info(f'#{self._step_index} - DONE!')
def _save_data(self, prefix: str = None) -> None:
"""
Private method for saving the preprocessed data to csv files.
:param prefix: prefix for the file name is necessary
"""
logging.info(f'#{self._index()} - Saving processed data...')
prefix = 'pp_' + prefix if prefix else 'pp'
self.X.to_csv(f'data\\{prefix}_train.csv', index=False)
self.X_test.to_csv(f'data\\{prefix}_test.csv', index=False)
self.already_preprocessed = True
logging.info(f'#{self._step_index} - DONE!')
|
[
"laci.szilas@gmail.com"
] |
laci.szilas@gmail.com
|
123d59a28954c31a3bc71c2027f41417fd8f536a
|
18a29196e0e958ca8ab6ae0565917f8e690cac69
|
/client.py
|
31b17793a69e3f6db2101029ab4f0d602634b79f
|
[
"MIT"
] |
permissive
|
Joee1995/PTTS-WebAPP
|
df3745cc430ee450c9d0b33d8c37a2d5fd450707
|
e5a25707e45b5b603e49529a9f0ce5c65eaa79ab
|
refs/heads/main
| 2023-04-03T06:37:33.033296
| 2021-04-12T10:10:01
| 2021-04-12T10:10:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,647
|
py
|
import os
import os.path as osp
import requests
from urllib.parse import urlencode
import json, time, uuid
import numpy as np
from scipy.io.wavfile import write
url = "http://127.0.0.1:5000"
payload = {
"text": "To install precompiled package of eSpeak NG on Linux, use standard package manager of your distribution.",
}
payload = urlencode(payload)
outputs_dir = "outputs"
os.makedirs(outputs_dir, exist_ok=True)
print("="*12 + " POST TEST " + "="*12)
headers = {
'content-type': "application/x-www-form-urlencoded"}
response = requests.request("POST", url+"/api/mytts", data=payload, headers=headers)
if response.status_code == 200:
content = response.content.decode('utf-8')
content = json.loads(content)
wave, sr = content['wave'], content['sr']
print('Saving audio...')
filename = osp.join(outputs_dir, f"{time.strftime('%Y-%m-%d')}_{uuid.uuid4()}.wav")
write(filename, sr, np.array(wave, dtype=np.float32))
print(f"Audios saved to {outputs_dir}. Done.")
print("POST TEST SUCCESSED!")
else:
print("POST TEST FAILED!")
print("="*12 + " GET TEST " + "="*12)
response = requests.request("GET", url+"/api/mytts?"+payload, headers=headers)
if response.status_code == 200:
content = response.content.decode('utf-8')
content = json.loads(content)
wave, sr = content['wave'], content['sr']
print('Saving audio...')
filename = osp.join(outputs_dir, f"{time.strftime('%Y-%m-%d')}_{uuid.uuid4()}.wav")
write(filename, sr, np.array(wave, dtype=np.float32))
print(f"Audios saved to {outputs_dir}. Done.")
print("GET TEST SUCCESSED!")
else:
print("GET TEST FAILED!")
|
[
"atomicoo95@gmail.com"
] |
atomicoo95@gmail.com
|
02c2173deed3b68a30b4cff87343685963c96958
|
e0aa52e8c070fe33abf2c01a067b8a5b31da827a
|
/Packages/LiveReload/__init__.py
|
3aebef06ad886cf835c500b5b3cdf523e8a98d73
|
[] |
no_license
|
ashed/ST-my-settings
|
b2f0ed90135e994fd4c3d66aa2efbd94dec017b9
|
8b61fac8107d17d21b7eeb054174fdece805b9e7
|
refs/heads/master
| 2023-07-10T18:45:34.177128
| 2021-06-16T05:05:45
| 2021-08-16T23:09:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 177
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
try:
from .LiveReload import *
from .server import *
except ValueError:
from LiveReload import *
from server import *
|
[
"jfcherng@gmail.com"
] |
jfcherng@gmail.com
|
81a7ec08ade9b3863913143bf961cb17756f5e8d
|
93b6823589c9f58aecf6dc819cbbed4247c7b022
|
/django_header_auth/models.py
|
7e40d1ccba68d2b44c080d9a16ada21c642c3bd8
|
[
"MIT"
] |
permissive
|
paiuolo/django-header-auth
|
25ddc1a8681609f175341a2a0b08a26a2235ba15
|
279d41c125445183ce29bad373b88cd17e2e0e82
|
refs/heads/master
| 2021-07-17T20:17:53.061807
| 2017-10-24T19:13:14
| 2017-10-24T19:13:14
| 103,425,895
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,389
|
py
|
from django.db import models
from django.contrib.auth import models as auth_models
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.conf import settings
from .functions import create_uuid, domain_email_extract
class ConsumerManager(auth_models.BaseUserManager):
def create_user(self, domain, email, username, password=None, **extra_fields):
"""
Creates and saves a User with the given username, email and
password.
"""
now = timezone.now()
if not domain:
raise ValueError('Users must have a domain')
if not email:
raise ValueError('Users must have an email address')
if not username:
raise ValueError('Users must have a username field')
print("creating user", "with domain", domain,
"email", email, 'username', username)
consumer = self.model(
domain=domain, email=email, username=username,
is_staff=False, is_active=True,
is_superuser=False,
last_login=now, date_joined=now, **extra_fields)
if password:
consumer.set_password(password)
consumer.save(using=self._db)
return consumer
def create_superuser(self, domain, email, username, password, **extra_fields):
u = self.create_user(domain, email, username, password, **extra_fields)
u.is_staff = True
u.is_active = True
u.is_superuser = True
u.save(using=self._db)
return u
class Consumer(auth_models.AbstractUser):
uuid = models.CharField(max_length=64, default=create_uuid, unique=True)
updated_at = models.DateTimeField(_('date updated'), null=True, blank=True)
domain = models.CharField(_('consumer domain'), max_length=255)
email = models.EmailField(_('email address'))
objects = ConsumerManager()
DOMAIN_FIELD = 'domain'
EMAIL_FIELD = 'email'
REQUIRED_FIELDS = ['domain', 'email']
@property
def is_alive(self):
return self.is_active
class Meta:
unique_together = (("domain", "email"),)
def get_full_name(self):
full_name = '%s %s' % (self.domain, self.email)
return full_name.strip()
def get_short_name(self):
return self.get_full_name()
def __str__(self):
return self.get_full_name()
|
[
"paiuolo@gmail.com"
] |
paiuolo@gmail.com
|
850826193bd88e83601ef9de6ed50645d745c700
|
7e6ecf52e90d618ebb2df7be864e1370543540a8
|
/32/onlyaccounts/message/models.py
|
cefd277962ffafb887b021668a79d47847d7c2af
|
[] |
no_license
|
rangai/draft_0
|
76be571d40043b98c9faa830a5924b4886e9d975
|
bf7ff892b167ebae0ad9378f9aebd0b3cf3c8e48
|
refs/heads/main
| 2023-08-27T03:58:49.876422
| 2021-11-03T07:22:31
| 2021-11-03T07:22:31
| 397,608,738
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 90
|
py
|
from django.conf import settings
from django.db import models
# Create your models here.
|
[
"fibo.112358132134@gmail.com"
] |
fibo.112358132134@gmail.com
|
4d1f75bdf7142fa391f263f38bed19200aa5562b
|
46aa44ec8afc1128883d3807b179104f34342fc7
|
/lab6/untitled8/app/admin.py
|
19681c93d1ee27d7aa260d2398ac8441192b45a1
|
[] |
no_license
|
Lisobol/lab
|
639010c431579479fd8a6d74e0188e686bbcde77
|
8c0eb3ee403747fd629edb5fcd5ad3a500ae1125
|
refs/heads/master
| 2021-09-26T23:50:14.102419
| 2018-11-04T20:42:51
| 2018-11-04T20:42:51
| 105,984,118
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,211
|
py
|
from django.contrib import admin
from .models import *
# Register your models here.
@admin.register(User1)
class UserAdmin(admin.ModelAdmin):
empty_value_display = 'null'
list_display = ('last_name', 'first_name', 'email', 'phone', 'passport', 'birthday')
list_filter = ('last_name',)
search_fields = ['last_name', 'first_name', 'email']
class BetTeam(admin.TabularInline):
model = BetTeam
extra = 1
@admin.register(Team)
class TeamAdmin(admin.ModelAdmin):
empty_value_display = 'null'
list_display = ('team_name','rating','sport','number_of_players')
list_filter = ('team_name',)
search_fields = ['team_name','sport']
inlines = (BetTeam,)
def bets(self, request):
bets = []
for s in BetTeam.objects.filter(team=request.name):
bets.append(s)
return bets
@admin.register(Bet)
class BetAdmin(admin.ModelAdmin):
empty_value_display = 'null'
def username(self, obj):
return "{}".format(obj.user)
inlines = (BetTeam,)
list_display = ('id', 'username', 'date', 'amount')
list_filter = ('id',)
search_fields = ['username', 'date', 'amount']
|
[
"ls1997@yandex.ru"
] |
ls1997@yandex.ru
|
a9d4319c57ebadc1c7eee898ed1ce3ab67813f74
|
eeaa8b6e081ed0f6e9e8eda5e44a717615bb6232
|
/salesforce_api/models/retrieve.py
|
c5c264eb8effe897d228ecd886726a6f77898143
|
[
"MIT"
] |
permissive
|
felixlindstrom/python-salesforce-api
|
3e0db77a59157ca9b5049296ba035733efd09569
|
37ed67bc46648a102c74f5d6fec0f45a9db79601
|
refs/heads/master
| 2021-11-21T16:17:45.137594
| 2021-06-23T18:25:33
| 2021-06-23T18:25:33
| 170,099,881
| 30
| 16
|
MIT
| 2021-11-17T08:44:39
| 2019-02-11T09:14:02
|
Python
|
UTF-8
|
Python
| false
| false
| 627
|
py
|
from typing import List
from . import base
class Options(base.Model):
def __init__(self):
self.single_package = True
self.unpackaged = []
class StatusMessage(base.Model):
def __init__(self, file: str, message: str):
self.file = file
self.message = message
class Status(base.Model):
def __init__(self, status: str, error_message: str, messages: List[StatusMessage] = None):
self.status = status
self.error_message = error_message
self.messages = messages or []
def append_message(self, message: StatusMessage):
self.messages.append(message)
|
[
"felix.lindstrom@bambora.com"
] |
felix.lindstrom@bambora.com
|
c2b188f8e938f833a46055227a40ee3981a8ce20
|
9eeff5ad1aa3b8982326015df0f0adc62011c732
|
/get_ids.py
|
9735e076ef3b07a035873bd08a532a0d45ead382
|
[] |
no_license
|
gkuwanto/page_id_pair_extract
|
6788226cd64b1725ec3f54058261eab9b0aeebeb
|
bc0196e4a092b4da0fb7dab9e0ce4bf94d79f4a9
|
refs/heads/master
| 2022-11-30T17:19:40.018330
| 2020-08-10T09:40:31
| 2020-08-10T09:40:31
| 286,238,001
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 620
|
py
|
import requests
import pandas as pd
import tqdm
import sys
def get_id_from_title(title):
posibility = list(requests.get(f'http://en.wikipedia.org/w/api.php?action=query&titles={title}&format=json').json()['query']['pages'].keys())
if len(posibility)>2:
print(title)
return int(posibility[0])
df = pd.read_csv(sys.argv[1], names = ["ll_from", "ll_lang", "ll_title"])
print(df.head())
en_ids = []
for title in tqdm.tqdm(df['ll_title']):
try:
en_ids.append(get_id_from_title(title))
except:
en_ids.append(-1)
df['ll_target_id'] = en_ids
df.to_csv('page_id_pair.csv',index=False)
|
[
"gkuwanto@gmail.com"
] |
gkuwanto@gmail.com
|
0527e4a7f8bf195dad7c6801f9d564aa1bc20e32
|
9f1d78698e6f5588caf9ee8241fac2739339417b
|
/scripts/gen_barcode_params.py
|
eb6578e6a064b20dafe52ecc217a5730e822c492
|
[] |
no_license
|
rstickels/slideseq-tools
|
172de9768944617ecd0e902544ead08cedf8e4d4
|
61e8b95b102e7f7b562d2c7a926f91990c3c3199
|
refs/heads/master
| 2023-02-20T10:34:30.816529
| 2021-01-05T19:12:14
| 2021-01-05T19:12:14
| 327,094,423
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,138
|
py
|
#!/usr/bin/python
# This script is to generate barcode_params.txt that is needed by extracting Illumina barcodes
import sys
import getopt
import csv
def main(argv):
inputfile = ''
outputfile = ''
lane = ''
try:
opts, args = getopt.getopt(argv,"hi:o:l:",["ifile=","ofile=","lane="])
except getopt.GetoptError:
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
elif opt in ("-l", "--lane"):
lane = arg
fout = open(outputfile,'w')
title = 'barcode_sequence_1\tlibrary_name\tbarcode_name\n';
fout.write(title)
with open(inputfile, 'r') as fin:
reader = csv.reader(fin, delimiter='\t')
idx_LANE = -1
idx_SAMPLE_BARCODE = -1
idx_LIBRARY = -1
idx_BARCODE_NAME = -1
i = 1
for row in reader:
if (i == 1):
if ('lane' in row):
idx_LANE = row.index('lane')
if ('sample_barcode' in row):
idx_SAMPLE_BARCODE = row.index('sample_barcode')
if ('library' in row):
idx_LIBRARY = row.index('library')
if ('barcode_name' in row):
idx_BARCODE_NAME = row.index('barcode_name')
else:
if (row[idx_LANE] != lane):
continue
str = ''
if (idx_SAMPLE_BARCODE >= 0):
str += row[idx_SAMPLE_BARCODE] + '\t'
else:
str += '\t'
if (idx_LIBRARY >= 0):
str += row[idx_LIBRARY] + '\t'
else:
str += '\t'
if (idx_BARCODE_NAME >= 0):
str += row[idx_BARCODE_NAME] + '\n'
else:
str += '\n'
fout.write(str)
i = i + 1
fin.close()
fout.close()
if __name__ == "__main__":
main(sys.argv[1:])
|
[
"noreply@github.com"
] |
rstickels.noreply@github.com
|
d010d2bce8a7ddf52123b43dd5dc2fff707ccf9c
|
f7ec0c3a56773d213ccd1a87c479c9afd6b3f6ed
|
/sabz/Scripts/django-admin.py
|
4b4cda2d2fa38c749822c5b5966290581ae4314b
|
[] |
no_license
|
kimiya-ab/Armsghsn-Sabz
|
1736f07e639e064340a2bbce256d3e866471a5b6
|
4abf675d2b0034e57ea3e117141b075c56663d36
|
refs/heads/master
| 2023-08-21T03:34:51.472447
| 2021-10-31T16:04:34
| 2021-10-31T16:04:34
| 408,550,695
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 706
|
py
|
#!c:\users\asus\desktop\swan tech\armaghan sabz\sabz\scripts\python.exe
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
|
[
"kimiyaabdollahi@yahoo.com"
] |
kimiyaabdollahi@yahoo.com
|
4bb9d20ca5b30eaa77f4944cafb8edd46792cbfa
|
2b21259e183426e86660b71a9b8f11c13bd452ab
|
/Python/func.py
|
597a3e680e407b04b8fd6556b8d0f1c6e990f3b5
|
[] |
no_license
|
D3nii/Random
|
68f67aaedbfa0edda7d2541465d75670ac0fa2cd
|
e6ce3acb9c4e373faca264001012a1468a73915b
|
refs/heads/master
| 2021-01-03T01:08:13.087455
| 2020-07-08T14:14:06
| 2020-07-08T14:14:06
| 239,850,442
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 149
|
py
|
def uppercase_and_reverse(text):
stru = text.upper()
stru1 = stru[::-1]
return stru1
print(uppercase_and_reverse("Banana"))
|
[
"danyal.jcc@gmail.com"
] |
danyal.jcc@gmail.com
|
bac173615703f2fcbf60f550a41931b31c1e0afd
|
eab4f0fa4b726455c3a3b246d30e2417472a9d09
|
/Part4/example4.7.1.py
|
24130a287b3fec26ab4ef0be01c742021c70d9c5
|
[] |
no_license
|
bigMathGit/Naver_scraping
|
11c5d7a1c98de1c47558b8159e016b4baaf22169
|
97371cb77a7444c735c375f4e0394b96ea8fa631
|
refs/heads/master
| 2020-03-22T04:46:52.713860
| 2018-08-07T03:02:21
| 2018-08-07T03:02:21
| 139,520,750
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 415
|
py
|
import json
jsonString = """{"arrayOfNums":[{"number":1},{"number":2},{"number":3}],"arrayOfFruits":[{"fruit":"apple"}, {"fruit":"banana"}, {"fruit":"pear"}]}"""
jsonObj = json.loads(jsonString)
print(jsonObj.get("arrayOfNums"))
print(jsonObj.get("arrayOfNums")[1])
print(jsonObj.get("arrayOfNums")[1].get("number")+jsonObj.get("arrayOfNums")[2].get("number"))
print(jsonObj.get("arrayOfFruits")[2].get("fruit"))
|
[
"wkdrl9569@gmail.com"
] |
wkdrl9569@gmail.com
|
31f167447802fe56f8855461d347bd71c52bfbf5
|
96ec573d8117ac4a6df15c1229a99aa26fb20b8f
|
/lldb/packages/Python/lldbsuite/test/functionalities/gdb_remote_client/TestRecognizeBreakpoint.py
|
c112c588625814ab7d2b9d5c1eff106526e3d20e
|
[
"NCSA",
"Apache-2.0",
"LLVM-exception"
] |
permissive
|
ponsheng/llvm-project
|
72d808e1547635996b2d9c6cb9a0b57e5fe35254
|
b72664fd21cc1e25cd8420eb37eae3a701976394
|
refs/heads/master
| 2022-12-23T20:25:14.173521
| 2019-06-29T01:53:26
| 2019-06-29T01:53:26
| 194,372,301
| 0
| 1
| null | 2019-06-29T06:24:23
| 2019-06-29T06:24:23
| null |
UTF-8
|
Python
| false
| false
| 5,370
|
py
|
from __future__ import print_function
import lldb
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
from gdbclientutils import *
class TestRecognizeBreakpoint(GDBRemoteTestBase):
""" This tests the case where the gdb-remote server doesn't support any
of the thread-info packets, and just tells which thread got the stop
signal with:
T05thread:01;
There was a bug in lldb that we would set the stop reason from this
packet too early - before we had updated the thread list. So when we
later updated the thread list, we would throw away this info. Normally
we would be able to reconstruct it from the thread info, but not if the
stub doesn't support it """
def test(self):
class MyResponder(MockGDBServerResponder):
def __init__(self):
MockGDBServerResponder.__init__(self)
self.thread_info_count = 0
self.after_cont = False
self.current_thread = 0
def cont(self):
# Simulate process stopping due to a breakpoint:
self.after_cont = True
return "T05thread:01;"
def vCont(self, packet):
self.after_cont = True
return "T05thread:01;"
def haltReason(self):
return "T02thread:01;"
def threadStopInfo(self, num):
return ""
def QThreadSuffixSupported(self):
return ""
def QListThreadsInStopReply(self):
return ""
def setBreakpoint(self, packet):
return "OK"
def qfThreadInfo(self):
return "m1"
def qsThreadInfo(self):
if (self.thread_info_count % 2) == 0:
str = "m2"
else:
str = "l"
self.thread_info_count += 1
return str
def readRegisters(self):
if self.after_cont and self.current_thread == 1:
return "c01e990080ffffff"
else:
return "badcfe10325476980"
def readRegister(self, regno):
return ""
def qXferRead(self, obj, annex, offset, length):
if annex == "target.xml":
return """<?xml version="1.0"?>
<target version="1.0">
<architecture>i386:x86-64</architecture>
<feature name="org.gnu.gdb.i386.core">
<reg name="rip" bitsize="64" regnum="0" type="code_ptr" group="general"/>
</feature>
</target>""", False
else:
return None, False
def selectThread(self, op, thread):
if op != 'g':
return ''
self.current_thread = thread
return "OK"
def other (self, packet):
if packet == "vCont?":
return "vCont;c;C;s;S"
return ''
python_os_plugin_path = os.path.join(self.getSourceDir(),
'operating_system_2.py')
command ="settings set target.process.python-os-plugin-path '{}'".format(
python_os_plugin_path)
self.runCmd(command)
self.server.responder = MyResponder()
target = self.dbg.CreateTarget("")
process = self.connect(target)
bkpt = target.BreakpointCreateByAddress(0xffffff8000991ec0)
self.assertEqual(bkpt.GetNumLocations(), 1, "Fake breakpoint was resolved.")
# Get the initial stop, and we should have two threads.
num_threads = len(process.threads)
self.assertEqual(num_threads, 2, "Got two threads")
thread_0 = process.threads[0]
self.assertEqual(thread_0.GetStopReason(), 1, "Thread_0 stopped for no reason")
self.assertEqual(thread_0.GetName(), "one", "Thread_0 is called one")
thread_1 = process.threads[1]
self.assertEqual(thread_1.GetStopReason(), 5, "Thread_0 stopped for SIGSTOP")
self.assertEqual(thread_1.GetName(), "two", "Thread_0 is called two")
# Now continue and we will fake hitting a breakpoint.
process.Continue()
self.assertEqual(process.GetState(),lldb.eStateStopped, "Process is stopped")
num_threads = len(process.threads)
num_threads = len(process.threads)
self.assertEqual(num_threads, 2, "Got two threads")
thread_0 = process.threads[0]
self.assertEqual(thread_0.GetStopReason(), 1, "Thread_0 stopped for no reason")
self.assertEqual(thread_0.GetName(), "one", "Thread_0 is called one")
thread_1 = process.threads[1]
self.assertEqual(thread_1.GetStopReason(), 3, "Thread_0 stopped for SIGTRAP")
self.assertEqual(thread_1.GetName(), "three", "Thread_0 is called three")
self.assertTrue(thread_1.IsValid(), "Thread_1 is valid")
self.assertEqual(thread_1.GetStopReason(), lldb.eStopReasonBreakpoint, "Stopped at breakpoint")
|
[
"jingham@apple.com"
] |
jingham@apple.com
|
34deffc77accfae3728f39824821f3ad812c61a9
|
ad3fb446d310e7de5f929ca788a3c955ca521e62
|
/binary-tree/max-depth.py
|
64933cbc4d369af9cab46179d1b6a597b542979f
|
[] |
no_license
|
markplotlib/data-structures
|
7c9491f09375a381ca13428818c1f1bc9dbdd716
|
247c252cde612072e6889d5fdd80f3ae39e36250
|
refs/heads/master
| 2023-03-02T22:38:04.186089
| 2021-02-18T06:04:18
| 2021-02-18T06:04:18
| 272,804,268
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 810
|
py
|
# https://leetcode.com/problems/maximum-depth-of-binary-tree/
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def maxDepth(self, root: TreeNode) -> int:
"""
>>> Solution().maxDepth(root=None)
0
>>> Solution().maxDepth(root=TreeNode(1))
1
>>> Solution().maxDepth(root=TreeNode(1,TreeNode(2)))
2
>>> Solution().maxDepth(TreeNode(3,TreeNode(9,None,None),TreeNode(20,TreeNode(15,None,None),TreeNode(7,None,None))))
3
"""
if root is None:
return 0
return 1 + max(Solution().maxDepth(root.left),
Solution().maxDepth(root.right))
|
[
"mark.chesney@gmail.com"
] |
mark.chesney@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.