hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3dd524d8e59e2c8188892e7a7fe2e15518d2a46b | 5,294 | py | Python | depthaware/data/sunrgbd_dataset.py | crmauceri/DepthAwareCNN-pytorch1.5 | 6d9b0cf001d7482df7d4cd7240fc36cbfc8356f9 | [
"MIT"
] | 3 | 2021-03-11T01:24:37.000Z | 2021-06-29T03:46:40.000Z | depthaware/data/sunrgbd_dataset.py | crmauceri/DepthAwareCNN-pytorch1.5 | 6d9b0cf001d7482df7d4cd7240fc36cbfc8356f9 | [
"MIT"
] | null | null | null | depthaware/data/sunrgbd_dataset.py | crmauceri/DepthAwareCNN-pytorch1.5 | 6d9b0cf001d7482df7d4cd7240fc36cbfc8356f9 | [
"MIT"
] | null | null | null | import os.path
from depthaware.data.base_dataset import *
from PIL import Image
import time
def make_dataset_fromlst(dataroot, listfilename):
"""
NYUlist format:
imagepath seglabelpath depthpath HHApath
"""
images = []
segs = []
depths = []
HHAs = []
with open(listfilename) as f:
content = f.readlines()
for x in content:
imgname, segname, depthname, HHAname = x.strip().split(' ')
images += [os.path.join(dataroot, imgname)]
segs += [os.path.join(dataroot, segname)]
depths += [os.path.join(dataroot, depthname)]
HHAs += [os.path.join(dataroot, HHAname)]
return {'images':images, 'segs':segs, 'HHAs':HHAs, 'depths':depths}
| 44.116667 | 113 | 0.647526 |
3dd551aff5d9acdfce555b2997eb9c881f846544 | 1,382 | py | Python | setup.py | elafefy11/flask_gtts | 8f14b9f114127d8fba240a88f3aa16eb17628872 | [
"MIT"
] | null | null | null | setup.py | elafefy11/flask_gtts | 8f14b9f114127d8fba240a88f3aa16eb17628872 | [
"MIT"
] | null | null | null | setup.py | elafefy11/flask_gtts | 8f14b9f114127d8fba240a88f3aa16eb17628872 | [
"MIT"
] | null | null | null | """
Flask-gTTS
-------------
A Flask extension to add gTTS Google text to speech, into the template,
it makes adding and configuring multiple text to speech audio files at
a time much easier and less time consuming
"""
from setuptools import setup
setup(
name='Flask-gTTS',
version='0.11',
url='https://github.com/mrf345/flask_gtts/',
download_url='https://github.com/mrf345/flask_gtts/archive/0.11.tar.gz',
license='MIT',
author='Mohamed Feddad',
author_email='mrf345@gmail.com',
description='gTTS Google text to speech flask extension',
long_description=__doc__,
py_modules=['gtts'],
packages=['flask_gtts'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'Flask',
'gTTS',
'static_parameters'
],
keywords=['flask', 'extension', 'google', 'text', 'speech',
'gTTS', 'TTS', 'text-to-speech'],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
],
setup_requires=['pytest-runner'],
test_requires=['pytest']
)
| 29.404255 | 76 | 0.633864 |
3dd5a2aa827f14ee73dd8f5c2368476016523c81 | 232 | py | Python | READ.py | BeatrizFS/MongoDB-Python | a23741d5f58ccad50e6239c963f78759f92098ac | [
"MIT"
] | null | null | null | READ.py | BeatrizFS/MongoDB-Python | a23741d5f58ccad50e6239c963f78759f92098ac | [
"MIT"
] | null | null | null | READ.py | BeatrizFS/MongoDB-Python | a23741d5f58ccad50e6239c963f78759f92098ac | [
"MIT"
] | null | null | null | from Arquivo1 import Produto
#READ
#Consultar o Banco de dados
#1.Retorna todas as informaes do Banco de dados
produtos = Produto.objects()
print(produtos)
for produto in produtos:
print(produto.Nome, produto.Valor) | 23.2 | 50 | 0.75 |
3dd7149bf486a0156690dac8d36a869ec269ebf6 | 9,280 | py | Python | src/aux_funcs.py | ArunBaskaran/Image-Driven-Machine-Learning-Approach-for-Microstructure-Classification-and-Segmentation-Ti-6Al-4V | 79ca40ababbc65464650c5519f9e7fdbf3c9d14d | [
"MIT"
] | 7 | 2020-03-19T05:04:30.000Z | 2022-03-31T10:29:42.000Z | src/aux_funcs.py | ArunBaskaran/Image-Driven-Machine-Learning-Approach-for-Microstructure-Classification-and-Segmentation-Ti-6Al-4V | 79ca40ababbc65464650c5519f9e7fdbf3c9d14d | [
"MIT"
] | 2 | 2020-08-19T03:24:31.000Z | 2021-03-02T00:18:46.000Z | src/aux_funcs.py | ArunBaskaran/Image-Driven-Machine-Learning-Approach-for-Microstructure-Classification-and-Segmentation-Ti-6Al-4V | 79ca40ababbc65464650c5519f9e7fdbf3c9d14d | [
"MIT"
] | 3 | 2020-09-17T04:15:04.000Z | 2021-01-18T08:37:39.000Z | """
----------------------------------ABOUT-----------------------------------
Author: Arun Baskaran
--------------------------------------------------------------------------
"""
import model_params
df = pd.read_excel('labels.xlsx', header=None, names=['id', 'label'])
total_labels = df['label']
for i in range(len(total_labels)):
total_labels[i]-=1
| 44.830918 | 210 | 0.650108 |
3dd842d4edbdc348779300fb523036992a49b5b2 | 125 | py | Python | manage.py | Stupnitskiy/BinaryAPI | e448936ceed96da72e2aa65847030ea56edb224f | [
"MIT"
] | null | null | null | manage.py | Stupnitskiy/BinaryAPI | e448936ceed96da72e2aa65847030ea56edb224f | [
"MIT"
] | null | null | null | manage.py | Stupnitskiy/BinaryAPI | e448936ceed96da72e2aa65847030ea56edb224f | [
"MIT"
] | null | null | null | from flask_script import Manager
from src import app
manager = Manager(app)
if __name__ == "__main__":
manager.run()
| 12.5 | 32 | 0.72 |
3dd84d6968111423f954120eed10897fd01c00ea | 1,355 | py | Python | CIFAR10.py | jimmyLeeMc/NeuralNetworkTesting | a6208cc8639a93ac24655495c9ace1acba21c76f | [
"MIT"
] | null | null | null | CIFAR10.py | jimmyLeeMc/NeuralNetworkTesting | a6208cc8639a93ac24655495c9ace1acba21c76f | [
"MIT"
] | null | null | null | CIFAR10.py | jimmyLeeMc/NeuralNetworkTesting | a6208cc8639a93ac24655495c9ace1acba21c76f | [
"MIT"
] | null | null | null |
#CIFAR
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
data = keras.datasets.cifar10
activations=[keras.activations.sigmoid, keras.activations.relu,
keras.layers.LeakyReLU(), keras.activations.tanh]
results=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
class_names=[0,1,2,3,4,5,6,7,8,9]
a=0
for i in range(4):
for j in range(4):
losssum=0
for k in range(6):
(train_images, train_labels), (test_images, test_labels) = data.load_data()
train_images = train_images/255.0
test_images = test_images/255.0
model = keras.Sequential([
keras.layers.Flatten(input_shape=(32,32,3)),
keras.layers.Dense(128, activations[i]),
keras.layers.Dense(10, activations[j])
# tanh softmax
])
model.compile(optimizer="adam",loss="sparse_categorical_crossentropy", metrics=["accuracy"])
history = model.fit(train_images, train_labels,
validation_split=0.25, epochs=5, batch_size=16, verbose=1)
prediction = model.predict(test_images)
losssum=losssum+history.history['loss'][len(history.history['loss'])-1]
results[a]=losssum/1
a=a+1
print(results)
| 38.714286 | 108 | 0.591882 |
3dd93f9bb15a42397c641e431fd3df72da46ab0d | 3,127 | py | Python | All_RasPy_Files/edgedetection.py | govindak-umd/Autonomous_Robotics | 5293b871c7032b40cbff7814bd773871ee2c5946 | [
"MIT"
] | 2 | 2020-05-14T11:23:30.000Z | 2020-05-25T06:30:57.000Z | All_RasPy_Files/edgedetection.py | govindak-umd/ENPM809T | 5293b871c7032b40cbff7814bd773871ee2c5946 | [
"MIT"
] | null | null | null | All_RasPy_Files/edgedetection.py | govindak-umd/ENPM809T | 5293b871c7032b40cbff7814bd773871ee2c5946 | [
"MIT"
] | 5 | 2020-06-09T22:09:15.000Z | 2022-01-31T17:11:19.000Z | # ENME 489Y: Remote Sensing
# Edge detection
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# Define slice of an arbitrary original image
f = np.empty((0))
index = np.empty((0))
# Create intensity data, including noise
for i in range(2000):
index = np.append(index, i)
if i <= 950:
f = np.append(f, 50 + np.random.normal(0,1))
elif i > 950 and i < 1000:
f = np.append(f, 50 + (i - 950)/2 + np.random.normal(0,1))
elif i >= 1000 and i < 1050:
f = np.append(f, 75 + (i - 1000)/2 + np.random.normal(0,1))
else:
f = np.append(f, 100 + np.random.normal(0,1))
print f.shape
print index.shape
plt.figure(2)
plt.plot(index, f, 'r-')
plt.title('Slice of Original Image: f(x)')
plt.xlabel('Pixel x')
plt.ylabel('Pixel intensity f(x)')
plt.grid()
plt.show()
# Plot the gradient (first derivative) of the original signal
messy = np.gradient(f)
plt.figure(3)
plt.plot(messy, 'r-')
plt.title('Derivative of Original Image Slice: df/dx')
plt.xlabel('Pixel x')
plt.ylabel('Derivative df/dx')
plt.grid()
plt.show()
# Define Gaussian filter
mean = 0
std = 5
var = np.square(std)
x = np.arange(-20, 20, 0.1)
kernel = (1/(std*np.sqrt(2*np.pi)))*np.exp(-np.square((x-mean)/std)/2)
print kernel.shape
plt.figure(4)
plt.plot(x, kernel, 'b-')
plt.title('Kernel: Gaussian Filter h(x)')
plt.xlabel('Pixel x')
plt.ylabel('Kernel h(x)')
plt.grid()
plt.show()
# Convolve original image signal with Gaussian filter
smoothed = np.convolve(kernel, f, 'same')
print smoothed.shape
plt.figure(5)
plt.plot(smoothed, 'r-')
plt.title('Apply Gaussian Filter: Convolve h(x) * f(x)')
plt.xlabel('Pixel x')
plt.ylabel('Convolution')
plt.grid()
plt.show()
# Plot the gradient (first derivative) of the filtered signal
edges = np.gradient(smoothed)
plt.figure(6)
plt.plot(edges, 'r-')
plt.title('Derivative of Convolved Image: d/dx[ h(x) * f(x) ] ')
plt.xlabel('Pixel x')
plt.ylabel('Derivative')
plt.grid()
plt.show()
# Plot the gradient (first derivative) of the Gaussian kernel
first_diff = np.gradient(kernel)
plt.figure(7)
plt.plot(first_diff, 'b-')
plt.title('1st Derivative of Gaussian: d/dx[ h(x) ]')
plt.xlabel('Pixel x')
plt.ylabel('Derivative')
plt.grid()
plt.show()
# Convolve original image signal with Gaussian filter
smoothed = np.convolve(first_diff, f, 'same')
print smoothed.shape
plt.figure(8)
plt.plot(smoothed, 'r-')
plt.title('Apply Gaussian Filter: Convolve d/dx[ h(x) ] * f(x)')
plt.xlabel('Pixel x')
plt.ylabel('Convolution')
plt.grid()
plt.show()
# Plot the second derivative of the Gaussian kernel: the Laplacian operator
laplacian = np.gradient(first_diff)
plt.figure(9)
plt.plot(laplacian, 'b-')
plt.title('2nd Derivative of Gaussian: Laplacian Operator d^2/dx^2[ h(x) ]')
plt.xlabel('Pixel x')
plt.ylabel('Derivative')
plt.grid()
plt.show()
# Convolve original image signal with Gaussian filter
smoothed = np.convolve(laplacian, f, 'same')
print smoothed.shape
plt.figure(10)
plt.plot(smoothed, 'r-')
plt.title('Apply Laplacian Operator: Convolve d^2/dx^2[ h(x) ] * f(x)')
plt.xlabel('Pixel x')
plt.ylabel('Convolution')
plt.grid()
plt.show()
| 23.689394 | 76 | 0.68692 |
3dda1806de2d35a90208c505c2c72da1466cf4a9 | 1,850 | py | Python | alipay/aop/api/domain/AlipayCommerceReceiptBatchqueryModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayCommerceReceiptBatchqueryModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayCommerceReceiptBatchqueryModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
| 28.90625 | 81 | 0.585405 |
3ddaf9735b2cb2b79bcc96e4e4c161028c28ae19 | 2,632 | py | Python | tests/test_timeconversion.py | FObersteiner/pyFuppes | 2a8c6e210855598dbf4fb491533bf22706340c9a | [
"MIT"
] | 1 | 2020-06-02T08:02:36.000Z | 2020-06-02T08:02:36.000Z | tests/test_timeconversion.py | FObersteiner/pyFuppes | 2a8c6e210855598dbf4fb491533bf22706340c9a | [
"MIT"
] | 3 | 2022-03-04T11:43:19.000Z | 2022-03-25T00:26:46.000Z | tests/test_timeconversion.py | FObersteiner/pyFuppes | 2a8c6e210855598dbf4fb491533bf22706340c9a | [
"MIT"
] | null | null | null | import unittest
from datetime import datetime, timezone
from pyfuppes import timeconversion
if __name__ == "__main__":
unittest.main()
| 32.9 | 81 | 0.587006 |
3ddb42001698eb4e38741ad5c0c31bf71b836bbd | 1,111 | py | Python | ucscentralsdk/methodmeta/LstorageCloneMeta.py | ragupta-git/ucscentralsdk | 2678008b5fb6b0fafafec388d0874147e95a1086 | [
"Apache-2.0"
] | null | null | null | ucscentralsdk/methodmeta/LstorageCloneMeta.py | ragupta-git/ucscentralsdk | 2678008b5fb6b0fafafec388d0874147e95a1086 | [
"Apache-2.0"
] | null | null | null | ucscentralsdk/methodmeta/LstorageCloneMeta.py | ragupta-git/ucscentralsdk | 2678008b5fb6b0fafafec388d0874147e95a1086 | [
"Apache-2.0"
] | null | null | null | """This module contains the meta information of LstorageClone ExternalMethod."""
from ..ucscentralcoremeta import MethodMeta, MethodPropertyMeta
method_meta = MethodMeta("LstorageClone", "lstorageClone", "Version142b")
prop_meta = {
"cookie": MethodPropertyMeta("Cookie", "cookie", "Xs:string", "Version142b", "InputOutput", False),
"dn": MethodPropertyMeta("Dn", "dn", "ReferenceObject", "Version142b", "InputOutput", False),
"in_array_name": MethodPropertyMeta("InArrayName", "inArrayName", "Xs:string", "Version142b", "Input", False),
"in_hierarchical": MethodPropertyMeta("InHierarchical", "inHierarchical", "Xs:string", "Version142b", "Input", False),
"in_target_org": MethodPropertyMeta("InTargetOrg", "inTargetOrg", "ReferenceObject", "Version142b", "Input", False),
"out_config": MethodPropertyMeta("OutConfig", "outConfig", "ConfigConfig", "Version142b", "Output", True),
}
prop_map = {
"cookie": "cookie",
"dn": "dn",
"inArrayName": "in_array_name",
"inHierarchical": "in_hierarchical",
"inTargetOrg": "in_target_org",
"outConfig": "out_config",
}
| 44.44 | 122 | 0.706571 |
3ddd545e8ac1636ac0a7d92a17cca391f2e23803 | 7,468 | py | Python | tool/powermon.py | virajpadte/Power_monitoring_JetsonTX1 | 3f337adb16ce09072d69147b705a0c705b3ad53c | [
"MIT"
] | null | null | null | tool/powermon.py | virajpadte/Power_monitoring_JetsonTX1 | 3f337adb16ce09072d69147b705a0c705b3ad53c | [
"MIT"
] | null | null | null | tool/powermon.py | virajpadte/Power_monitoring_JetsonTX1 | 3f337adb16ce09072d69147b705a0c705b3ad53c | [
"MIT"
] | null | null | null | import sys
import glob
import serial
import ttk
import tkFileDialog
from Tkinter import *
#for plotting we need these:
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
from drawnow import *
if __name__ == '__main__':
root = Tk()
root.title("Power Monitoring tool")
main = MainView(root)
root.mainloop()
| 35.561905 | 138 | 0.595742 |
3ddeb574a2024dfb0d06c0c742bbc0a272df7e2d | 900 | py | Python | shop/tests/products/views/test_product_details_view.py | nikolaynikolov971/NftShop | 09a535a6f708f0f6da5addeb8781f9bdcea72cf3 | [
"MIT"
] | null | null | null | shop/tests/products/views/test_product_details_view.py | nikolaynikolov971/NftShop | 09a535a6f708f0f6da5addeb8781f9bdcea72cf3 | [
"MIT"
] | null | null | null | shop/tests/products/views/test_product_details_view.py | nikolaynikolov971/NftShop | 09a535a6f708f0f6da5addeb8781f9bdcea72cf3 | [
"MIT"
] | null | null | null | from django.test import TestCase, Client
from django.urls import reverse
from shop.products.models import Product
from tests.base.mixins import ProductTestUtils
| 30 | 94 | 0.66 |
3de34122732924fae3421861027e4399e17b6da8 | 4,558 | py | Python | projetoFTP/servidor/sftps.py | MarciovsRocha/conectividade-sistemas-cyberfisicos | d76b8a540b55eb8a54ae99067b625010e85a2eb8 | [
"MIT"
] | null | null | null | projetoFTP/servidor/sftps.py | MarciovsRocha/conectividade-sistemas-cyberfisicos | d76b8a540b55eb8a54ae99067b625010e85a2eb8 | [
"MIT"
] | null | null | null | projetoFTP/servidor/sftps.py | MarciovsRocha/conectividade-sistemas-cyberfisicos | d76b8a540b55eb8a54ae99067b625010e85a2eb8 | [
"MIT"
] | null | null | null | import socket
import threading
import os
import sys
from pathlib import Path
#---------------------------------------------------
#------------------------------------------------
#-----------------------------------------------
#------------------------------------------------
#---------------------------------------------------------
# PROGRAMA PRINCIPAL
#---------------------------------------------------------
pydir= os.path.dirname(os.path.realpath(__file__))
print('Diretorio do script: ', pydir)
os.chdir(pydir)
print('Simple File Transfer Protocol Server\n')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(('', 9999))
except:
print('# erro de bind')
sys.exit()
s.listen(5)
print('aguardando conexes na porta ', 9999)
print('Canal de controle: cliente ----> [9999] servidor')
print('Canal de dados (call back): servidor ----> [9998] cliente')
while True:
conn, addr = s.accept()
print('recebi uma conexao do cliente ', addr)
t = threading.Thread( target=TrataCliente, args=(conn,addr,))
t.start()
| 28.4875 | 72 | 0.426942 |
3de3ed318e614e22c2b9f52348133eddba3a0fee | 2,424 | py | Python | messages.py | runjak/hoodedFigure | 539c9839dd47bc181e592bf4a61eaab361b8d316 | [
"MIT"
] | null | null | null | messages.py | runjak/hoodedFigure | 539c9839dd47bc181e592bf4a61eaab361b8d316 | [
"MIT"
] | null | null | null | messages.py | runjak/hoodedFigure | 539c9839dd47bc181e592bf4a61eaab361b8d316 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import random
sentences = [
"Going into the #dogpark is not allowed, @%s.",
"That's my favourite #dogpark @%s - no one is allowed to go into it!",
"That #dogpark you mention is forbidden! Please don't, @%s",
"The #dogpark should be secured with electrified barbwire. "
"Don't you agree, @%s?",
"Just make sure NOT TO ENTER the #dogpark @%s.",
"Why would you mention such nasty things like a #dogpark @%s?",
"Remember to share your #dogpark experience "
"so others may also survive @%s!",
"Hi @%s! City council discourages the term #dogpark for security reasons.",
"You are not a dog, @%s! Please don't think of the #dogpark.",
"@%s in the #dogpark all dogs have 8 legs. Scary.",
"Please return to safety @%s! Don't linger in the #dogpark.",
"Hey @%s I got notice that the #dogpark "
"will get fortified with spikes and lava soon.",
"Beware @%s. Today the #dogpark is full of deer. "
"Dangerous with their sharp claws and many heads.",
"There is a time and place for everything @%s. "
"But it's not the #dogpark. An acid pit is much saver.",
"@%s do you know that the #dogpark is actually a pond of molten lava?",
"@%s beware - flesh entering the #dogpark without correct papers "
"will actually turn into a liquid.",
"Only truely evil spirits may enter the #dogpark. Are you one of us, @%s?",
"I heard a five headed dragon near the #dogpark might try to dine on @%s.",
"@%s and I are sure that the #dogpark is protected by a smiling god "
"that replaces your blood with liquid led.",
"In the #dogpark everyone becomes a stick in an eternal play of fetch. "
"Be careful @%s.",
"You may eat your own dogfood - but please: "
"NEVER walk your own #dogpark, @%s.",
"There is a non-zero chance that thinking the word #dogpark "
"replaces your neurons with ants, @%s.",
"The #dogpark will not harm you, @%s. "
"Provided you have wings. And antlers.",
]
| 44.072727 | 79 | 0.636139 |
3de5ea40f8bf420e08e8aea386566d9bf26093f0 | 3,595 | py | Python | detectron/tests/test_track_losses.py | orestis-z/track-rcnn | 6b2405cb8308168106526b57027a1af3fe9df0f3 | [
"Apache-2.0"
] | 9 | 2020-10-16T22:20:09.000Z | 2022-03-22T11:08:01.000Z | detectron/tests/test_track_losses.py | orestis-z/track-rcnn | 6b2405cb8308168106526b57027a1af3fe9df0f3 | [
"Apache-2.0"
] | null | null | null | detectron/tests/test_track_losses.py | orestis-z/track-rcnn | 6b2405cb8308168106526b57027a1af3fe9df0f3 | [
"Apache-2.0"
] | 2 | 2021-10-04T14:27:52.000Z | 2022-03-22T11:07:53.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from scipy import spatial
import unittest
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from caffe2.python import workspace
from detectron.core.config import cfg
from detectron.core.config import assert_and_infer_cfg
import detectron.utils.c2 as c2_utils
from detectron.utils.math import cosine_similarity
from detectron.modeling.track_rcnn_heads import add_track_losses
from detectron.modeling.detector import DetectionModelHelper
c2_utils.import_custom_ops()
if __name__ == '__main__':
workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
unittest.main()
| 43.313253 | 115 | 0.69096 |
3de7f52d572f048f38c1b4744268152292a54283 | 4,497 | py | Python | torch/nn/_functions/thnn/upsampling.py | UmaTaru/run | be29e4d41a4de3dee27cd6796801bfe51382d294 | [
"MIT"
] | null | null | null | torch/nn/_functions/thnn/upsampling.py | UmaTaru/run | be29e4d41a4de3dee27cd6796801bfe51382d294 | [
"MIT"
] | null | null | null | torch/nn/_functions/thnn/upsampling.py | UmaTaru/run | be29e4d41a4de3dee27cd6796801bfe51382d294 | [
"MIT"
] | null | null | null | from numbers import Integral
import torch
from torch.autograd import Function
from torch._thnn import type2backend
from . import _all_functions
from ...modules.utils import _pair
from ...functional import _check_bilinear_2d_scale_factor
_all_functions.append(UpsamplingNearest2d)
_all_functions.append(UpsamplingBilinear2d)
| 34.328244 | 98 | 0.610407 |
3de882780eafbe1233cbdcdf8b3eb920ea7971b8 | 7,869 | py | Python | Day17/17_trick_shot.py | schca675/my-code-for-advent-of-code-2021 | e8bdb986930b444884d37e679a37ed25efe2b34e | [
"Apache-2.0"
] | null | null | null | Day17/17_trick_shot.py | schca675/my-code-for-advent-of-code-2021 | e8bdb986930b444884d37e679a37ed25efe2b34e | [
"Apache-2.0"
] | null | null | null | Day17/17_trick_shot.py | schca675/my-code-for-advent-of-code-2021 | e8bdb986930b444884d37e679a37ed25efe2b34e | [
"Apache-2.0"
] | null | null | null | # --- Day 17: Trick Shot ---
import math
import time
print("TEST")
start = time.time()
resolve_puzzle_part1("test_data.txt")
print("Time: {}".format(time.time()-start))
print("PUZZLE")
start = time.time()
resolve_puzzle_part1("data.txt")
print("Time: {}".format(time.time()-start))
| 35.931507 | 115 | 0.490532 |
3de9f24b49937335e24db781a7e382e77643515c | 568 | py | Python | zip_files.py | VladimirsHisamutdinovs/Advanced_Python_Operations | 509c219f70adcbe9b3dedd71bff819494bab9c83 | [
"Apache-2.0"
] | null | null | null | zip_files.py | VladimirsHisamutdinovs/Advanced_Python_Operations | 509c219f70adcbe9b3dedd71bff819494bab9c83 | [
"Apache-2.0"
] | null | null | null | zip_files.py | VladimirsHisamutdinovs/Advanced_Python_Operations | 509c219f70adcbe9b3dedd71bff819494bab9c83 | [
"Apache-2.0"
] | null | null | null | import zipfile
zip_file = zipfile.ZipFile("zip_archive.zip", "w")
zip_file.write("textfile_for_zip_01")
zip_file.write("textfile_for_zip_02")
zip_file.write("textfile_for_zip_03")
# print(zipfile.is_zipfile("zip_archive.zip"))
# zip_file = zipfile.ZipFile("zip_archive.zip")
# print(zip_file.namelist())
# print(zip_file.infolist())
# zip_info = zip_file.getinfo("textfile_for_zip_02")
# print(zip_info.file_size)
# print(zip_file.read("textfile_for_zip_01"))
zip_file.extract("textfile_for_zip_02")
zip_file.extractall()
zip_file.close() | 24.695652 | 53 | 0.748239 |
3deab92507c5a88674b2ab8baa0fe1cd63998a28 | 21,024 | py | Python | omdrivers/lifecycle/iDRAC/iDRACUpdate.py | rajroyce1212/Ansible-iDRAC | 4ce00b605ee2e128ad98b572759e860bae3da3dc | [
"Apache-2.0"
] | 61 | 2018-02-21T00:02:20.000Z | 2022-01-26T03:47:19.000Z | omdrivers/lifecycle/iDRAC/iDRACUpdate.py | rajroyce1212/Ansible-iDRAC | 4ce00b605ee2e128ad98b572759e860bae3da3dc | [
"Apache-2.0"
] | 31 | 2018-03-24T05:43:39.000Z | 2022-03-16T07:10:37.000Z | omdrivers/lifecycle/iDRAC/iDRACUpdate.py | rajroyce1212/Ansible-iDRAC | 4ce00b605ee2e128ad98b572759e860bae3da3dc | [
"Apache-2.0"
] | 25 | 2018-03-13T10:06:12.000Z | 2022-01-26T03:47:21.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
#
# Copyright 2018 Dell Inc. or its subsidiaries. All rights reserved.
# Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries.
# Other trademarks may be trademarks of their respective owners.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Vaideeswaran Ganesan
#
import sys
import os
import re
import json
import time
import glob
import xml.etree.ElementTree as ET
from enum import Enum
from datetime import datetime
from omsdk.sdkprint import PrettyPrint
from omsdk.sdkcenum import EnumWrapper, TypeHelper
from omsdk.lifecycle.sdkupdate import Update
from omsdk.catalog.sdkupdatemgr import UpdateManager
from omsdk.catalog.updaterepo import RepoComparator, UpdateFilterCriteria
from omsdk.catalog.updaterepo import UpdatePresenceEnum, UpdateNeededEnum, UpdateTypeEnum
from omdrivers.enums.iDRAC.iDRACEnums import *
from omsdk.sdkcunicode import UnicodeWriter
from omsdk.sdkfile import FileOnShare
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
try:
from pysnmp.hlapi import *
from pysnmp.smi import *
PySnmpPresent = True
except ImportError:
PySnmpPresent = False
##below methods to update firmware using redfish will be reimplemented using Type Manager system
def _get_scp_path(self, catalog_dir):
"""
:param catalog_dir: object for Folder containing Catalog on share.
:param catalog_dir: FileOnShare.
:returns: returns a tuple containing remote scp path(full) and the scp file name
"""
catalog_path_str = catalog_dir.remote_full_path
scp_file = 'scp_' + self.entity.ServiceTag + '_' + datetime.now().strftime('%Y%m%d_%H%M%S') + ".xml"
scp_path = catalog_path_str + os.path.sep + scp_file
return (scp_path, scp_file)
def update_from_repo_usingscp_redfish(self, catalog_dir, catalog_file, mount_point, apply_update=True,
reboot_needed=False, job_wait=True):
"""Performs firmware update on target server using scp RepositoyUpdate attribute
:param catalog_dir: object for Folder containing Catalog on share.
:param catalog_dir: FileOnShare.
:param catalog_file: Catalog file name
:param catalog_file: str.
:param mount_point: local share on which remote(catalog_dir) folder has been mounted
:param mount_point: str.
:returns: returns status of firmware update through scp
"""
(scp_path, scp_file) = self._get_scp_path(catalog_dir)
myshare = FileOnShare(scp_path).addcreds(catalog_dir.creds)
# exports only that component which contains RepositoryUpdate attribute
rjson = self.entity.config_mgr.scp_export(share_path=myshare, target='System.Embedded.1')
if 'Status' not in rjson or rjson['Status'] != 'Success':
return {'Status': 'Failed', 'Message': 'Export of scp failed for firmware update'}
scpattrval = {'RepositoryUpdate': catalog_file}
localfile = mount_point.share_path + os.path.sep + scp_file
self.edit_xml_file(localfile, scpattrval)
if reboot_needed:
shutdown = ShutdownTypeEnum.Graceful
else:
shutdown = ShutdownTypeEnum.NoReboot
rjson = self.entity.config_mgr.scp_import(share_path=myshare, shutdown_type=shutdown, job_wait=job_wait)
if job_wait:
rjson['file'] = localfile
rjson = self._job_mgr._job_wait(rjson['file'], rjson)
rjson['job_details'] = self.entity._update_get_repolist()
return rjson
def edit_xml_file(self, file_location, attr_val_dict):
"""Edit and save exported scp's attributes which are passed in attr_val_dict
:param file_location: locally mounted location(full path) of the exported scp .
:param file_location: str.
:param attr_val_dict: attribute and value pairs as dict
:param attr_val_dict: dict.
:returns: returns None
"""
tree = ET.parse(file_location)
root = tree.getroot()
for attr in attr_val_dict:
xpath = ".//*[@Name='" + str(attr) + "']"
attribute_element = root.find(xpath)
attribute_element.text = str(attr_val_dict.get(attr))
tree.write(file_location)
return
| 50.176611 | 137 | 0.581573 |
3dec8f27fe9f9465de4b1a61485314e099192b22 | 3,196 | py | Python | playthrough/management/commands/migrate_shogun.py | SciADV-Community/genki | b86811695c428ca93bdab3ea2f68e3a99713d4db | [
"MIT"
] | null | null | null | playthrough/management/commands/migrate_shogun.py | SciADV-Community/genki | b86811695c428ca93bdab3ea2f68e3a99713d4db | [
"MIT"
] | 11 | 2020-10-15T01:19:24.000Z | 2022-03-28T04:09:43.000Z | playthrough/management/commands/migrate_shogun.py | SciADV-Community/genki | b86811695c428ca93bdab3ea2f68e3a99713d4db | [
"MIT"
] | 1 | 2021-01-11T19:56:02.000Z | 2021-01-11T19:56:02.000Z | import argparse
import os
import sqlite3
from django.core.management.base import BaseCommand
from playthrough.models import Alias, Channel, Game, GameConfig, Guild, RoleTemplate, User
| 44.388889 | 104 | 0.59199 |
3ded44aff9cb2e2f9d8057ef0b9ba6ae462ea0c0 | 5,233 | py | Python | backend/model/benchmark-metrics/service/ocr.py | agupta54/ulca | c1f570ac254ce2ac73f40c49716458f4f7cbaee2 | [
"MIT"
] | 3 | 2022-01-12T06:51:51.000Z | 2022-02-23T18:54:33.000Z | backend/model/benchmark-metrics/service/ocr.py | agupta54/ulca | c1f570ac254ce2ac73f40c49716458f4f7cbaee2 | [
"MIT"
] | 6 | 2021-08-31T19:21:26.000Z | 2022-01-03T05:53:42.000Z | backend/model/benchmark-metrics/service/ocr.py | agupta54/ulca | c1f570ac254ce2ac73f40c49716458f4f7cbaee2 | [
"MIT"
] | 8 | 2021-08-12T08:07:49.000Z | 2022-01-25T04:40:51.000Z | import logging
from datetime import datetime
import numpy as np
from logging.config import dictConfig
from kafkawrapper.producer import Producer
from utils.mongo_utils import BenchMarkingProcessRepo
from configs.configs import ulca_notifier_input_topic, ulca_notifier_benchmark_completed_event, ulca_notifier_benchmark_failed_event
from models.metric_manager import MetricManager
log = logging.getLogger('file')
prod = Producer()
repo = BenchMarkingProcessRepo()
# Log config
dictConfig({
'version': 1,
'formatters': {'default': {
'format': '[%(asctime)s] {%(filename)s:%(lineno)d} %(threadName)s %(levelname)s in %(module)s: %(message)s',
}},
'handlers': {
'info': {
'class': 'logging.FileHandler',
'level': 'DEBUG',
'formatter': 'default',
'filename': 'info.log'
},
'console': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'default',
'stream': 'ext://sys.stdout',
}
},
'loggers': {
'file': {
'level': 'DEBUG',
'handlers': ['info', 'console'],
'propagate': ''
}
},
'root': {
'level': 'DEBUG',
'handlers': ['info', 'console']
}
}) | 55.084211 | 200 | 0.608064 |
3deea7c2a0399d6a1677f78e7cc36afe63de0fc2 | 1,780 | py | Python | keystroke/migrations/0001_initial.py | jstavanja/quiz-biometrics-api | 75e0db348668b14a85f94261aac092ae2d5fa9c6 | [
"MIT"
] | null | null | null | keystroke/migrations/0001_initial.py | jstavanja/quiz-biometrics-api | 75e0db348668b14a85f94261aac092ae2d5fa9c6 | [
"MIT"
] | null | null | null | keystroke/migrations/0001_initial.py | jstavanja/quiz-biometrics-api | 75e0db348668b14a85f94261aac092ae2d5fa9c6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-08-20 16:31
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| 34.901961 | 115 | 0.591573 |
3deeb28e7a4a40609c5fe55751360abc1b88afba | 1,603 | py | Python | komposisjon/komposisjon/rektangler_kvadrater.py | knutsenfiksdal/Oving_8 | 4e5d3a358cfb9127509a86a61c9499f22da9eabc | [
"MIT"
] | null | null | null | komposisjon/komposisjon/rektangler_kvadrater.py | knutsenfiksdal/Oving_8 | 4e5d3a358cfb9127509a86a61c9499f22da9eabc | [
"MIT"
] | null | null | null | komposisjon/komposisjon/rektangler_kvadrater.py | knutsenfiksdal/Oving_8 | 4e5d3a358cfb9127509a86a61c9499f22da9eabc | [
"MIT"
] | null | null | null |
# Kvadrat som bruker komposisjon og delegering
if __name__ == "__main__":
rektanglet = Rektangel(5, 5, 10, 5)
print(rektanglet)
print(rektanglet.areal())
rektanglet.strekk(0.5)
print(rektanglet)
print(rektanglet.areal())
kvadrat = Kvadrat(2, 2, 6)
print(kvadrat)
print(kvadrat.areal())
kvadrat.strekk(6)
print(kvadrat)
print(kvadrat.areal())
| 26.278689 | 75 | 0.646288 |
3defd9479c2f0a53049990dbf13feea2c96391cf | 16,024 | py | Python | DiscordBot/Commands/DiscordPoints.py | aronjanosch/kirbec-bot | 6d44e177c5cf6669564047fbbc8f6e8c342bca28 | [
"MIT"
] | null | null | null | DiscordBot/Commands/DiscordPoints.py | aronjanosch/kirbec-bot | 6d44e177c5cf6669564047fbbc8f6e8c342bca28 | [
"MIT"
] | null | null | null | DiscordBot/Commands/DiscordPoints.py | aronjanosch/kirbec-bot | 6d44e177c5cf6669564047fbbc8f6e8c342bca28 | [
"MIT"
] | null | null | null | from datetime import datetime
import discord
import itertools
from .utils import formatString, getUsageEmbed, getOopsEmbed
# IDEAS
# 1. Paying out points (without bets)
| 33.949153 | 119 | 0.602034 |
3df076848f2032b90ec31c8b5ee8c64134fd5e5c | 1,579 | py | Python | lunch/admin.py | KrzysztofSakowski/lunch-crawler | 6a93d6cfad634fb98f89bc22d68547801865c9ae | [
"Apache-2.0"
] | 1 | 2020-02-17T13:40:08.000Z | 2020-02-17T13:40:08.000Z | lunch/admin.py | KrzysztofSakowski/lunch-crawler | 6a93d6cfad634fb98f89bc22d68547801865c9ae | [
"Apache-2.0"
] | 4 | 2020-02-11T23:06:14.000Z | 2021-06-10T18:07:30.000Z | lunch/admin.py | KrzysztofSakowski/lunch-crawler | 6a93d6cfad634fb98f89bc22d68547801865c9ae | [
"Apache-2.0"
] | null | null | null | from django.contrib import admin
from .models import MenuFacebook, MenuEmail, UserProfile, Occupation, FacebookRestaurant, EmailRestaurant
admin.site.register(FacebookRestaurant, RestaurantAdmin)
admin.site.register(EmailRestaurant, RestaurantAdmin)
admin.site.register(MenuFacebook, MenuBaseAdmin)
admin.site.register(MenuEmail, MenuBaseAdmin)
admin.site.register(UserProfile, UserProfileAdmin)
admin.site.register(Occupation, SeatAdmin)
| 29.240741 | 105 | 0.723243 |
3df0af937b9149db956b0d8ec02537a403587abe | 19,082 | py | Python | src/oci/log_analytics/models/query_details.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 249 | 2017-09-11T22:06:05.000Z | 2022-03-04T17:09:29.000Z | src/oci/log_analytics/models/query_details.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 228 | 2017-09-11T23:07:26.000Z | 2022-03-23T10:58:50.000Z | src/oci/log_analytics/models/query_details.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 224 | 2017-09-27T07:32:43.000Z | 2022-03-25T16:55:42.000Z | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
| 35.600746 | 245 | 0.672466 |
3df0f23a4341291aa332900c1b4adf982ac1f716 | 2,740 | py | Python | moist.py | phiriv/moisture_sensor | 1e6a5d967ab639c67bae03847bd58ede31bde564 | [
"MIT"
] | null | null | null | moist.py | phiriv/moisture_sensor | 1e6a5d967ab639c67bae03847bd58ede31bde564 | [
"MIT"
] | null | null | null | moist.py | phiriv/moisture_sensor | 1e6a5d967ab639c67bae03847bd58ede31bde564 | [
"MIT"
] | null | null | null | Script to read temperature data from the DHT11:
# Importeer Adafruit DHT bibliotheek.
import Adafruit_DHT
import time
als = True
while als:
humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.DHT11, 4) #on gpio pin 4 or pin 7
if humidity is not None and temperature is not None:
humidity = round(humidity, 2)
temperature = round(temperature, 2)
print 'Temperature = {0:0.1f}*C Humidity = {1:0.1f}%'.format(temperature, humidity)
else:
print 'can not connect to the sensor!'
time.sleep(60) # read data every minute
Update from the Script above with modification of writing the data to a CSV.file:
# Importeer Adafruit DHT bibliotheek.
#time.strftime("%I:%M:%S")
import Adafruit_DHT
import time
import csv
import sys
csvfile = "temp.csv"
als = True
while als:
humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.DHT11, 4) # gpio pin 4 or pin number 7
if humidity is not None and temperature is not None:
humidity = round(humidity, 2)
temperature = round(temperature, 2)
print 'Temperature = {0:0.1f}*C Humidity = {1:0.1f}%'.format(temperature, humidity)
else:
print 'can not connect to the sensor!'
timeC = time.strftime("%I")+':' +time.strftime("%M")+':'+time.strftime("%S")
data = [temperature, timeC]
with open(csvfile, "a")as output:
writer = csv.writer(output, delimiter=",", lineterminator = '\n')
writer.writerow(data)
time.sleep(6) # update script every 60 seconds
Script to read data from the CSV and display it in a graph:
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.animation as animation
from datetime import datetime
fig = plt.figure()
rect = fig.patch
rect.set_facecolor('#0079E7')
ani = animation.FuncAnimation(fig, animate, interval = 6000)
plt.show()
*/
void setup() {
}
void loop() {
}
| 30.10989 | 104 | 0.622628 |
3df10878e5646297672b7b72bacac47ff05e414e | 4,168 | py | Python | route_distances/utils/routes.py | general-synthesis/route-distances | 2bc09a607bd7fa488357dcee96325669d8295f90 | [
"MIT"
] | null | null | null | route_distances/utils/routes.py | general-synthesis/route-distances | 2bc09a607bd7fa488357dcee96325669d8295f90 | [
"MIT"
] | null | null | null | route_distances/utils/routes.py | general-synthesis/route-distances | 2bc09a607bd7fa488357dcee96325669d8295f90 | [
"MIT"
] | null | null | null | """ Module containing helper routines for routes """
from typing import Dict, Any, Set, List, Tuple
import numpy as np
from route_distances.utils.type_utils import StrDict
def calc_depth(tree_dict: StrDict, depth: int = 0) -> int:
"""
Calculate the depth of a route, recursively
:param tree_dict: the route
:param depth: the current depth, don't specify for route
"""
children = tree_dict.get("children", [])
if children:
return max(calc_depth(child, depth + 1) for child in children)
return depth
def calc_llr(tree_dict: StrDict) -> int:
"""
Calculate the longest linear route for a synthetic route
:param tree_dict: the route
"""
return calc_depth(tree_dict) // 2
def extract_leaves(
tree_dict: StrDict,
) -> Set[str]:
"""
Extract a set with the SMILES of all the leaf nodes, i.e.
starting material
:param tree_dict: the route
:return: a set of SMILE strings
"""
leaves = set()
traverse(tree_dict, leaves)
return leaves
def is_solved(route: StrDict) -> bool:
"""
Find if a route is solved, i.e. if all starting material
is in stock.
To be accurate, each molecule node need to have an extra
boolean property called `in_stock`.
:param route: the route to analyze
"""
try:
find_leaves_not_in_stock(route)
except ValueError:
return False
return True
def route_score(
tree_dict: StrDict,
mol_costs: Dict[bool, float] = None,
average_yield=0.8,
reaction_cost=1.0,
) -> float:
"""
Calculate the score of route using the method from
(Badowski et al. Chem Sci. 2019, 10, 4640).
The reaction cost is constant and the yield is an average yield.
The starting materials are assigned a cost based on whether they are in
stock or not. By default starting material in stock is assigned a
cost of 1 and starting material not in stock is assigned a cost of 10.
To be accurate, each molecule node need to have an extra
boolean property called `in_stock`.
:param tree_dict: the route to analyze
:param mol_costs: the starting material cost
:param average_yield: the average yield, defaults to 0.8
:param reaction_cost: the reaction cost, defaults to 1.0
:return: the computed cost
"""
mol_cost = mol_costs or {True: 1, False: 10}
reactions = tree_dict.get("children", [])
if not reactions:
return mol_cost[tree_dict.get("in_stock", True)]
child_sum = sum(
1 / average_yield * route_score(child) for child in reactions[0]["children"]
)
return reaction_cost + child_sum
def route_scorer(routes: List[StrDict]) -> Tuple[List[StrDict], List[float]]:
"""
Scores and sort a list of routes.
Returns a tuple of the sorted routes and their costs.
:param routes: the routes to score
:return: the sorted routes and their costs
"""
scores = np.asarray([route_score(route) for route in routes])
sorted_idx = np.argsort(scores)
routes = [routes[idx] for idx in sorted_idx]
return routes, scores[sorted_idx].tolist()
def route_ranks(scores: List[float]) -> List[int]:
"""
Compute the rank of route scores. Rank starts at 1
:param scores: the route scores
:return: a list of ranks for each route
"""
ranks = [1]
for idx in range(1, len(scores)):
if abs(scores[idx] - scores[idx - 1]) < 1e-8:
ranks.append(ranks[idx - 1])
else:
ranks.append(ranks[idx - 1] + 1)
return ranks
| 28.744828 | 84 | 0.651631 |
3df45b763adea0ed603bc91664b6febfe07b4afe | 1,920 | py | Python | src/yafowil/tests/__init__.py | 2silver/yafowil | b9776503f98f145f7aaaa4f61b73e238c92c534c | [
"BSD-3-Clause"
] | 8 | 2015-12-15T21:14:00.000Z | 2019-11-11T22:13:18.000Z | src/yafowil/tests/__init__.py | 2silver/yafowil | b9776503f98f145f7aaaa4f61b73e238c92c534c | [
"BSD-3-Clause"
] | 21 | 2015-11-21T10:12:12.000Z | 2021-06-03T06:51:53.000Z | src/yafowil/tests/__init__.py | 2silver/yafowil | b9776503f98f145f7aaaa4f61b73e238c92c534c | [
"BSD-3-Clause"
] | 5 | 2016-11-23T13:41:52.000Z | 2020-06-08T18:21:00.000Z | from __future__ import print_function
from node.tests import NodeTestCase
from yafowil.base import factory
from yafowil.compat import IS_PY2
import lxml.etree as etree
import sys
import unittest
import yafowil.common
import yafowil.compound
import yafowil.persistence
import yafowil.table
if not IS_PY2:
from importlib import reload
if __name__ == '__main__':
run_tests()
| 25.945946 | 64 | 0.752083 |
3df5aa98eb0d85a8d21eb7afce122f2c8fabce6b | 1,350 | py | Python | tools/foolbox/bim_attack.py | GianmarcoMidena/adversarial-ML-benchmarker | 43cfcfdac36da88d37b12d956ea8735fd27ca4a9 | [
"MIT"
] | null | null | null | tools/foolbox/bim_attack.py | GianmarcoMidena/adversarial-ML-benchmarker | 43cfcfdac36da88d37b12d956ea8735fd27ca4a9 | [
"MIT"
] | null | null | null | tools/foolbox/bim_attack.py | GianmarcoMidena/adversarial-ML-benchmarker | 43cfcfdac36da88d37b12d956ea8735fd27ca4a9 | [
"MIT"
] | null | null | null | from foolbox.attacks import LinfinityBasicIterativeAttack
from foolbox.criteria import Misclassification
from foolbox.distances import MSE
from tools.foolbox.adversarial_attack import AdversarialAttack
| 51.923077 | 112 | 0.707407 |
3df5e6a39fd0846088495ee87733d03e26f82c02 | 292 | py | Python | Tabuada.py | tobiaspontes/ScriptsPython | 21ed779e49adca500ce5815dd100f4ec999a2571 | [
"MIT"
] | null | null | null | Tabuada.py | tobiaspontes/ScriptsPython | 21ed779e49adca500ce5815dd100f4ec999a2571 | [
"MIT"
] | null | null | null | Tabuada.py | tobiaspontes/ScriptsPython | 21ed779e49adca500ce5815dd100f4ec999a2571 | [
"MIT"
] | null | null | null | # Tabuada em Python
if __name__ == '__main__':
print(9
)
nro = int(input('Entre com um nmero: '))
print(f'\n\033[1;32mTabuada do {nro}'+'\n')
tabuada(nro)
| 20.857143 | 61 | 0.513699 |
3df87a91ac53ca2678893bfc4dee7db4ace5bf95 | 3,235 | py | Python | radix_tree.py | mouradmourafiq/data-analysis | 1df2ca020a554f1fdab7cc9e53115e249cc199ac | [
"BSD-2-Clause"
] | 17 | 2015-04-01T12:11:31.000Z | 2022-03-15T16:44:01.000Z | radix_tree.py | mouradmourafiq/data-analysis | 1df2ca020a554f1fdab7cc9e53115e249cc199ac | [
"BSD-2-Clause"
] | null | null | null | radix_tree.py | mouradmourafiq/data-analysis | 1df2ca020a554f1fdab7cc9e53115e249cc199ac | [
"BSD-2-Clause"
] | 17 | 2015-01-14T14:59:40.000Z | 2021-07-01T05:46:14.000Z | # -*- coding: utf-8 -*-
'''
Created on Dec 01, 2012
@author: Mourad Mourafiq
About: This is an attempt to implement the radix tree algo.
Features :
-> insert
-> remove
-> search
'''
NOK = "{'':[]}"
| 27.415254 | 77 | 0.483462 |
3df8c0e29455e554abfe1f3cc62c34726c6ded0b | 1,264 | py | Python | Python/PythonOOP/animals.py | JosephAMumford/CodingDojo | 505be74d18d7a8f41c4b3576ca050b97f840f0a3 | [
"MIT"
] | 2 | 2018-08-18T15:14:45.000Z | 2019-10-16T16:14:13.000Z | Python/PythonOOP/animals.py | JosephAMumford/CodingDojo | 505be74d18d7a8f41c4b3576ca050b97f840f0a3 | [
"MIT"
] | null | null | null | Python/PythonOOP/animals.py | JosephAMumford/CodingDojo | 505be74d18d7a8f41c4b3576ca050b97f840f0a3 | [
"MIT"
] | 6 | 2018-05-05T18:13:05.000Z | 2021-05-20T11:32:48.000Z |
# Create instance of Animal
animal1 = Animal("Edgar",30)
animal1.walk().walk().walk().run().run().display_health()
# Create instance of Dog
dog1 = Dog("Raspberry",150)
dog1.walk().walk().walk().run().run().pet().display_health()
# Create instance of Dragon
dragon1 = Dragon("Phantoon", 500)
dragon1.walk().run().fly().fly().fly().display_health()
# Create new Animal
animal2 = Animal("Probos",200)
#animal2.pet()
#AttributeError: 'Animal' object has no attribute 'pet'
#animal2.fly()
#AttributeError: 'Animal' object has no attribute 'fly'
animal2.display_health()
#Health: 50 - does not say "I am a Dragon"
| 22.175439 | 60 | 0.630538 |
3dfa41325fc23f6087b7a1ae8181579baa35af0a | 17,915 | py | Python | ai4water/preprocessing/transformations/_wrapper.py | moonson619/AI4Water-1 | 285d46824502b6a787e42570b72432f4f6acf45e | [
"MIT"
] | 17 | 2021-05-21T13:01:52.000Z | 2022-03-19T15:17:10.000Z | ai4water/preprocessing/transformations/_wrapper.py | moonson619/AI4Water-1 | 285d46824502b6a787e42570b72432f4f6acf45e | [
"MIT"
] | 3 | 2021-10-31T22:40:28.000Z | 2021-11-08T02:28:35.000Z | ai4water/preprocessing/transformations/_wrapper.py | moonson619/AI4Water-1 | 285d46824502b6a787e42570b72432f4f6acf45e | [
"MIT"
] | 7 | 2021-08-06T07:27:50.000Z | 2022-01-26T00:38:32.000Z |
from typing import Union, List, Dict
import numpy as np
import pandas as pd
from ai4water.utils.utils import jsonize, deepcopy_dict_without_clone
from ai4water.preprocessing.transformations import Transformation
| 42.252358 | 121 | 0.547251 |
3dfbbd5b64a3c6157f0b5de85518ecc1e0323285 | 3,684 | py | Python | main/tagcn_training.py | Stanislas0/KDD_CUP_2020_MLTrack2_SPEIT | 7362104002225055715f05ccfc5ee8f6ef433d50 | [
"Apache-2.0"
] | 18 | 2020-09-10T06:48:22.000Z | 2022-01-25T18:22:52.000Z | main/tagcn_training.py | Stanislas0/KDD_CUP_2020_MLTrack2_SPEIT | 7362104002225055715f05ccfc5ee8f6ef433d50 | [
"Apache-2.0"
] | null | null | null | main/tagcn_training.py | Stanislas0/KDD_CUP_2020_MLTrack2_SPEIT | 7362104002225055715f05ccfc5ee8f6ef433d50 | [
"Apache-2.0"
] | null | null | null | import os
import dgl
import time
import argparse
import numpy as np
import torch as th
import distutils.util
import torch.nn.functional as F
import utils
import models
import data_loader
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
dev = th.device('cuda' if th.cuda.is_available() else 'cpu')
if __name__ == '__main__':
argparser = argparse.ArgumentParser("training")
argparser.add_argument('--adj-path', type=str, default='../data/adj_matrix_formal_stage.pkl')
argparser.add_argument('--feat-path', type=str, default='../data/feature_formal_stage.npy')
argparser.add_argument('--label-path', type=str, default='../data/train_labels_formal_stage.npy')
argparser.add_argument('--output-dir', type=str, default='./saved_models/')
argparser.add_argument('--output-name', type=str, default='tagcn_128_3.pkl')
argparser.add_argument('--if-load-model', type=lambda x: bool(distutils.util.strtobool(x)), default=False)
argparser.add_argument('--model-dir', type=str, default='./saved_models/')
argparser.add_argument('--model-name', type=str, default='tagcn_128_3.pkl')
argparser.add_argument('--num-epochs', type=int, default=5000)
argparser.add_argument('--num-hidden', type=int, default=128)
argparser.add_argument('--num-layers', type=int, default=3)
argparser.add_argument('--lr', type=float, default=0.001)
argparser.add_argument('--dropout', type=float, default=0.1)
argparser.add_argument('--adj-norm', type=lambda x: bool(distutils.util.strtobool(x)), default=True)
argparser.add_argument('--feat-norm', type=str, default=None)
args = argparser.parse_args()
print(vars(args))
dataset = data_loader.KddDataset(args.adj_path, args.feat_path, args.label_path, indices)
adj = dataset.adj
features = dataset.features
labels = dataset.labels
train_mask = dataset.train_mask
val_mask = dataset.val_mask
test_mask = dataset.test_mask
size_raw = features.shape[0]
size_reduced = size_raw - 50000
graph = dgl.DGLGraph()
if args.adj_norm:
adj = utils.adj_preprocess(adj)
feat_norm_func = utils.feat_norm(args.feat_norm)
graph.from_scipy_sparse_matrix(adj)
features = th.FloatTensor(features).to(dev)
features[th.where(features < -1.0)[0]] = 0
features[th.where(features > 1.0)[0]] = 0
features = feat_norm_func(features)
labels = th.LongTensor(labels).to(dev)
graph.ndata['features'] = features
model = models.TAGCN(100, args.num_hidden, 20, args.num_layers, activation=F.leaky_relu, dropout=args.dropout)
if args.if_load_model:
model_states = th.load(os.path.join(args.model_dir, args.model_name), map_location=dev)
model.load_state_dict(model_states)
model = model.to(dev)
optimizer = th.optim.Adam(model.parameters(), lr=args.lr)
dur = []
for epoch in range(args.num_epochs):
t0 = time.time()
logits = model(graph, features).to(dev)
logp = F.log_softmax(logits, 1)[:size_reduced]
loss = F.nll_loss(logp[train_mask], labels[train_mask]).to(dev)
optimizer.zero_grad()
loss.backward()
optimizer.step()
dur.append(time.time() - t0)
if epoch % 10 == 0:
train_acc = utils.compute_acc(logp, labels, train_mask)
val_acc = utils.compute_acc(logp, labels, val_mask)
print('Epoch {:05d} | Loss {:.4f} | Train Acc {:.4f} | Val Acc {:.4f} '
'| Time(s) {:.4f} | GPU {:.1f} MiB'.format(
epoch, loss, train_acc, val_acc, np.mean(dur), th.cuda.max_memory_allocated() / 1000000))
th.save(model.state_dict(), os.path.join(args.output_dir, args.output_name))
| 41.393258 | 114 | 0.683496 |
3dfd83b71400b6e832cb757945e612ae86e6bd4c | 27,127 | py | Python | AltFS.py | g-mc/AltFS | 4d83a928cb1f1ec127e9565b578779ec4e659dae | [
"BSD-3-Clause"
] | 54 | 2019-02-27T15:57:27.000Z | 2021-10-10T21:51:50.000Z | AltFS.py | g-mc/AltFS | 4d83a928cb1f1ec127e9565b578779ec4e659dae | [
"BSD-3-Clause"
] | null | null | null | AltFS.py | g-mc/AltFS | 4d83a928cb1f1ec127e9565b578779ec4e659dae | [
"BSD-3-Clause"
] | 11 | 2019-03-01T19:07:25.000Z | 2020-12-03T14:56:44.000Z | #!/usr/bin/env python
"""
BSD 3-Clause License
Copyright (c) 2017, SafeBreach Labs
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Alternate Fileless File System
References:
Author: Dor Azouri <dor.azouri@safebreach.com>
Date: 2019-01-01
"""
import logging
import types
from exceptions_ import BucketValueMissingException, \
EndOfFileReachedException, \
FileIsClosedException, \
FileNotFoundException, \
InternalStorageOperationException, \
UnsupportedProviderException
from model.block import Block
from model.descriptor import Descriptor
import providers
from providers.common.calculations import \
calculate_bits_sum, \
calculate_next_available_index, \
split_string_by_max_size
from providers.common.machine_identification import \
get_machine_identification_string
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
def __init__(self, storage_provider_name, machine_identification_method,
max_block_size, **kwargs):
"""
Constructor for a new AltFS.
A new AltFS instance is created, given the storage provider name,
the machine identification method name,
and the desired maximal block size for that AltFS.
Note:
* Provider must reside in /providers and implement StorageProvider
* Machine identification method name should be implemented in
/providers/common/machine_identification.py and exported through
the global METHODS dictionary.
"""
logger.debug("initializing AltFS with storage provider: %s, " +
"machine identification method: %s" %
storage_provider_name, machine_identification_method)
# calculate checksum of machine identification string, used for
# calculating the bucket index of the first file system block
machine_identification_string = get_machine_identification_string(
machine_identification_method)
self._set_machine_id_checksum(machine_identification_string)
# initialize desired provider
self._storage_providers = AltFS._load_providers()
if storage_provider_name not in self._storage_providers:
raise UnsupportedProviderException(storage_provider_name)
self._storage_provider = \
self._storage_providers[storage_provider_name](
machine_identification_string, **kwargs)
# set the buckets count, used for the modulus hash function
self._buckets_count = self._storage_provider.get_buckets_count()
# set the first bucket ID, used for the fs descriptor (superblock)
self._first_bucket_id = \
self._machine_id_checksum % self._buckets_count
# set the max data block size
self.max_block_size = max_block_size
# log calculated initialization info
logger.info("INIT:number of buckets (=divider): %s" %
self._buckets_count)
logger.info("INIT:machine identification string: %s" %
machine_identification_string)
logger.info("INIT:machine identification checksum: %s" %
self._machine_id_checksum)
logger.info("INIT:first bucket ID: %s" %
self._first_bucket_id)
# iterate all buckets in storage to fill the blocks mapping
self._load_blocks_dict()
# load the descriptor superblock/create fresh if it does not exist
self._load_descriptor()
# mapping of open files (volatile runtime File instances)
self.files = {}
def _set_machine_id_checksum(self, machine_identification_string):
"""Sets the calculated checksum of the machine identification string"""
self._machine_id_checksum = calculate_bits_sum(
machine_identification_string)
def _load_blocks_dict(self):
"""
Fills the mapping of {block_id : (bucket_id, value_id)}.
Iterates through all values in all buckets in storage. Determining
which of the iterated values are part of the virtual FS is
provider-dependent.
Note: the filling policy is naive - any exception in the storage layer
is ignored, and iteration continues to next bucket.
"""
self._blocks_dict = {}
for bucket_id in xrange(self._buckets_count):
try:
values = self._storage_provider.get_value_ids_in_bucket(
bucket_id)
except Exception as e:
logger.error(e, exc_info=True)
continue
for value_id in values:
block = self._get_block(bucket_id, value_id)
self._blocks_dict[block.block_id] = (bucket_id, value_id)
def _load_descriptor(self):
"""
Loads the descriptor instance from the superblock.
Creates an empty descriptor if such block does not exist,
and writes it to storage.
"""
self._descriptor = Descriptor()
try: # try load the existing descriptor from superblock
first_block_data = self._storage_provider.get_block(
self._first_bucket_id, 0)
block = Block.generate_block_from_packed_str(first_block_data)
self._descriptor.__dict__ = block.data
except BucketValueMissingException: # superblock does not exist
logger.error("superblock does not exist. Creating a new empty one")
# create an empty descriptor and write it as a superblock (id=0)
self._write_block(
self._first_bucket_id, 0, self._generate_descriptor_block())
def _get_next_available_block_id(self, count=1, blacklist=None):
"""
Returns the next _count_ available block IDs.
Considering the IDs given in the blacklist parameter.
The next ID is the lowest available (re-use)
"""
if blacklist is None:
blacklist = []
ids = []
existing_ids = self._blocks_dict.keys() + blacklist
for i in xrange(count):
id_ = calculate_next_available_index(existing_ids)
ids.append(id_)
existing_ids.append(id_)
if count == 1:
return ids[0]
return ids
def _get_block(self, bucket_id, value_id):
"""
Loads the block the data from the desired value.
Returns it as aBlock instance.
Raises InternalStorageOperationException if provider has failed to read
"""
try:
block = Block.generate_block_from_packed_str(
self._storage_provider.get_block(bucket_id, value_id))
except Exception as e:
logger.error("reading of block at (%s:%s) has failed: %s" %
(bucket_id, value_id, str(e)))
raise InternalStorageOperationException(
InternalStorageOperationException.OPERATION_READ, str(e))
logger.debug("a block was read at (%s:%s):%s" %
(bucket_id, value_id, block.__dict__))
return block
def _get_block_by_id(self, block_id):
"""Returns a Block instance of the desired block ID."""
return self._get_block(*self._blocks_dict[block_id])
def _generate_data_termination_block(self, data="", block_id=None):
"""
Returns a Block instance to be used as the last data block of a file.
It closes the chain of data blocks by pointing to the superblock as
next block.
"""
new_block_id = block_id if block_id is not None else \
self._get_next_available_block_id()
return Block(block_id=new_block_id,
block_type=Block.TYPE_DATA,
data_length=len(data),
next_block_id=0,
data=data)
def _generate_descriptor_block(self):
"""
Returns a Block instance of type TYPE_DESCRIPTOR.
The current descriptor object is saved to it.
Note: The next block ID field is redundant so it's given a constant 1.
"""
return Block(block_id=0,
block_type=Block.TYPE_DESCRIPTOR,
data_length=len(self._descriptor.serialize()),
next_block_id=1,
data=self._descriptor.__dict__)
def _write_block(self, bucket_id, value_id, block):
"""
Writes the given Block instance to the given value_id.
Overrides the existing one.
Returns the value ID to which the block was written.
Note: if the given value ID is None, a new value is created in the
given bucket, and the block is written to it
Raises InternalStorageOperationException if provider failed to write
"""
logger.debug("writing block at (%s:%s):%s" %
(bucket_id, value_id, block.__dict__))
try:
value_id = self._storage_provider.write_block(
bucket_id, value_id, data=block.serialize())
except Exception as e:
logger.error("writing of block (id:%s) to (%s:%s) has failed: %s" %
(block.block_id, bucket_id, value_id, str(e)))
raise InternalStorageOperationException(
InternalStorageOperationException.OPERATION_WRITE, str(e))
# add the new block mapping
self._blocks_dict[block.block_id] = (bucket_id, value_id)
return value_id
def _delete_value(self, bucket_id, value_id):
"""
Deletes the value in the given bucket and value IDs.
Raises InternalStorageOperationException if provider failed to delete
"""
block = self._get_block(bucket_id, value_id)
logger.debug("deleting block ID %s (%s:%s)" %
(block.block_id, bucket_id, value_id))
try:
self._storage_provider.delete_block(bucket_id, value_id)
except Exception as e:
logger.error(
"deleting of block (id:%s) to (%s:%s) has failed: %s" %
(block.block_id, bucket_id, value_id, str(e)))
raise InternalStorageOperationException(
InternalStorageOperationException.OPERATION_DELETE, str(e))
# remove the mapping of the deleted block
del self._blocks_dict[block.block_id]
def _delete_data_blocks(self, start_block_id, until_block_id=None):
"""
Delete a chain of linked blocks.
Starting from the given block ID, ending in the data termination block.
"""
for block in list(self._get_blocks_generator(start_block_id)):
if until_block_id is not None and block.block_id == until_block_id:
break
bucket_id, value_id = self._blocks_dict[block.block_id]
self._delete_value(bucket_id, value_id)
def _get_block_by_file_offset(self, file_name, offset):
"""
Returns a tuple of: (block, offset inside block).
The block is the one in which the given offset is located
"""
start_block_id = self._descriptor.files_dict[file_name]
position = 0
for block in self._get_blocks_generator(start_block_id):
if position <= offset <= position + block.data_length:
return block, offset - position
position += block.data_length
raise EndOfFileReachedException(
"The given offset exceeds the file size")
def _create_data_blocks(self, data, terminating_at=None):
"""
Writes a chain of data blocks to hold the given data.
Optional terminating_at parameter defines the next_block_id of the last
data block in the chain. If omitted, the chain ends at the superblock.
"""
if len(data) == 0:
return []
chunks = list(split_string_by_max_size(data, self.max_block_size))
new_block_ids = self._get_next_available_block_id(count=len(chunks))
if isinstance(new_block_ids, int):
new_block_ids = [new_block_ids]
if terminating_at:
new_block_ids.append(terminating_at)
else:
new_block_ids.append(self._get_next_available_block_id(
count=1, blacklist=new_block_ids))
chunk = ""
for chunk_id, chunk in zip(range(len(chunks)), chunks):
new_block = Block(block_id=new_block_ids[chunk_id],
block_type=Block.TYPE_DATA,
data_length=len(chunk),
next_block_id=new_block_ids[chunk_id + 1],
data=chunk)
bucket_id = calculate_bits_sum(chunk) % self._buckets_count
self._write_block(bucket_id, None, new_block)
if not terminating_at:
new_block = self._generate_data_termination_block(
block_id=new_block_ids[-1])
bucket_id = calculate_bits_sum(chunk) % self._buckets_count
self._write_block(bucket_id, None, new_block)
return new_block_ids
def create_file(self, file_name):
"""
Returns a File object of the given name.
Note:
* If a file with that name already exists, it's corresponding File
instance is returned.
* Otherwise, the required data blocks are written to storage, and a
corresponding File instance is returned.
"""
if file_name in self.files:
# in case a File object already exists - return it
self.files[file_name]._set_open()
self.files[file_name].set_pointer(0)
return self.files[file_name]
if file_name not in self._descriptor.files_dict:
# in case file doesn't exist in storage - create it.
# creating a new empty file means adding only a single data
# termination block, as there are no actual data blocks yet
block = self._generate_data_termination_block()
# adding the required mapping needed in the descriptor:
self._descriptor.add_file(file_name, block.block_id)
# flushing the new descriptor after update, into storage
self._write_block(
self._first_bucket_id, 0, self._generate_descriptor_block())
# calculate the target bucket ID for the new block, by applying the
# hash function on the file name
target_bucket_id = calculate_bits_sum(
file_name) % self._buckets_count
# write the data termination block to the calculated bucket,
# creating a new value by passing None as the value ID
value_id = self._write_block(
target_bucket_id, None, block)
# add the new block mapping
self._blocks_dict[block.block_id] = (
target_bucket_id, value_id)
# in case the file exists in storage, a new File instance is created.
# We also do it in case we have just created the actual file in storage
self.files[file_name] = AltFS.File(self, file_name)
return self.files[file_name]
def delete_file(self, file_name):
"""
Deletes a file.
Results in:
* Deletion of all of the file's data blocks, including the data
termination block.
* Removal of the mappings of the file's blocks
* Deletion of the corresponding File instance.
* Removal of the file mapping from the descriptor
(updates the descriptor superblock in storage)
"""
if file_name not in self._descriptor.files_dict:
raise FileNotFoundException("Cannot delete a non-existent file")
block_id = self._descriptor.files_dict[file_name]
self._delete_data_blocks(block_id)
del self.files[file_name]
# remove the file from the descriptor object
self._descriptor.remove_file(file_name)
# write the update descriptor to storage
self._write_block(
self._first_bucket_id, 0, self._generate_descriptor_block())
def read_file(self, file_name, start, size):
"""
Returns the data from file given its name.
Starting from given offset and up to a maximum given size.
"""
if file_name not in self._descriptor.files_dict:
raise FileNotFoundException()
data = ""
position = 0
first_file_block = self._descriptor.files_dict[file_name]
for block in self._get_blocks_generator(first_file_block):
data += block.data
position += block.data_length
if position >= start + size:
break
self.files[file_name].set_pointer(start + size)
return data[start:start + size]
def write_file(self, file_name, start, data):
"""
Writes the given data to file given its name.
Starting from given offset.
"""
# in case data to write exceeds the current file size, create new
# blocks that will be later linked with the block where start is
# located. The overall result in this case should look like this:
# [current blocks...] -> [fork block] -> [new blocks] -> [superblock]
if start + len(data) >= self.get_size(file_name):
# create the data blocks, still not linked, hanging in the air
new_block_ids = self._create_data_blocks(data)
# the new blocks anyway exceed the current file size, so no need to
# connect the end of the new chain back to a current block
until_block_id = None
# calculate the the starting block out of the current file blocks,
# that will link to the new blocks
fork_block, offset_in_fork_block = self._get_block_by_file_offset(
file_name, start)
# in case data to write fits into the file size, we need to link the
# new data blocks from both sides. Eventually, it should look like
# this:
# [current blocks...] -> [fork block] -> [new blocks] ->
# [merging block] -> [current blocks...] -> [superblock]
else:
# calculate the block to which the new blocks end should link to
merging_block, offset_in_merging_block = \
self._get_block_by_file_offset(
file_name, start + len(data) - 1)
# calculate the the starting block out of the current file blocks,
# that will link to the new blocks
fork_block, offset_in_fork_block = self._get_block_by_file_offset(
file_name, start)
# handle edge case where the fork and merging blocks are the same.
# in this case, we just need to override that block's data
if fork_block.block_id == merging_block.block_id:
new_data = fork_block.data[:offset_in_fork_block] + \
data + fork_block.data[offset_in_fork_block + len(data):]
self._update_block(fork_block.block_id,
data=new_data, data_length=len(new_data))
self.files[file_name].set_pointer(start + len(data))
return
# in the general case, we create new data blocks to be connected as
# described, and cut data from the merging block, as the new data
# length demands
else:
new_block_ids = self._create_data_blocks(
data, terminating_at=merging_block.block_id)
if offset_in_merging_block < merging_block.data_length:
new_data = merging_block.data[offset_in_merging_block:]
self._update_block(
merging_block.block_id,
data=new_data,
data_length=len(new_data))
until_block_id = merging_block.block_id
# cut the data in the fork block, as the start offset demands
if offset_in_fork_block < fork_block.data_length:
new_data = fork_block.data[:offset_in_fork_block]
self._update_block(fork_block.block_id,
data=new_data, data_length=len(new_data))
# delete the current blocks, starting from the fork block and ending at
# the merging block/super block (depends on the above case - each case
# sets the until_block_id value accordingly)
self._delete_data_blocks(
fork_block.next_block_id, until_block_id=until_block_id)
self._update_block(fork_block.block_id, next_block_id=new_block_ids[0])
self.files[file_name].set_pointer(start + len(data))
return
def get_file_names(self):
"""Returns the names of all files currently in storage"""
return self._descriptor.get_file_names()
def get_size(self, file_name):
"""Returns the size of file, given its name"""
file_size = 0
first_file_block = self._descriptor.files_dict[file_name]
for block in self._get_blocks_generator(first_file_block):
file_size += block.data_length
return file_size
| 41.798151 | 79 | 0.629742 |
3dfe1030cd691567d0eb0ceab815ccdf039f3393 | 269 | py | Python | python-crypt-service/services/dbservice.py | Shirish-Singh/crypt-analysis | eed6d00925389ee0973733e6b7397cd460f97f99 | [
"Apache-2.0"
] | null | null | null | python-crypt-service/services/dbservice.py | Shirish-Singh/crypt-analysis | eed6d00925389ee0973733e6b7397cd460f97f99 | [
"Apache-2.0"
] | null | null | null | python-crypt-service/services/dbservice.py | Shirish-Singh/crypt-analysis | eed6d00925389ee0973733e6b7397cd460f97f99 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
from configurations import configuration
from pymongo import MongoClient
MONGO_HOST= configuration.MONGO_HOST
client = MongoClient(MONGO_HOST)
| 20.692308 | 40 | 0.814126 |
3dffaaba0f49d4e4bcf7fb58f40e51bc3b413470 | 448 | py | Python | simple_amqp_rpc/data.py | rudineirk/py-simple-amqp-rpc | 823b6efe271732495d4e3ccdcb9f4d85138c1d42 | [
"MIT"
] | null | null | null | simple_amqp_rpc/data.py | rudineirk/py-simple-amqp-rpc | 823b6efe271732495d4e3ccdcb9f4d85138c1d42 | [
"MIT"
] | 1 | 2021-06-01T22:28:43.000Z | 2021-06-01T22:28:43.000Z | simple_amqp_rpc/data.py | rudineirk/py-simple-amqp-rpc | 823b6efe271732495d4e3ccdcb9f4d85138c1d42 | [
"MIT"
] | null | null | null | from typing import Any, List
from dataclasses import dataclass, replace
from .consts import OK
| 15.448276 | 42 | 0.654018 |
ad000563b867048b766de0b54cb60801221e67a0 | 598 | py | Python | fileparse/python/main.py | mlavergn/benchmarks | 4663009772c71d7c94bcd13eec542d1ce33cef72 | [
"Unlicense"
] | null | null | null | fileparse/python/main.py | mlavergn/benchmarks | 4663009772c71d7c94bcd13eec542d1ce33cef72 | [
"Unlicense"
] | null | null | null | fileparse/python/main.py | mlavergn/benchmarks | 4663009772c71d7c94bcd13eec542d1ce33cef72 | [
"Unlicense"
] | null | null | null | #!/usr/bin/python
import timeit
setup = '''
import os
def FileTest(path):
file = open(path, "r")
lines = file.readlines()
data = [None for i in range(len(lines))]
i = 0
for line in lines:
data[i] = line.split(',')
j = 0
for field in data[i]:
data[i][j] = field.strip('\\'\\n')
j += 1
i += 1
return data
'''
elapsed = timeit.timeit("FileTest(os.getcwd() + '/../employees.txt')", setup=setup, number=1)
print(elapsed * 1000.0, "ms - cold")
elapsed = timeit.timeit("FileTest(os.getcwd() + '/../employees.txt')", setup=setup, number=1)
print(elapsed * 1000.0, "ms - warm")
| 20.62069 | 93 | 0.605351 |
ad005ad94d7f773d61fa5f1363d44b1d458fd462 | 5,475 | py | Python | boris/classification.py | fragaria/BorIS | 9585c83f29220d8f63910dabd98641ab41ace6cf | [
"MIT"
] | 1 | 2021-08-10T14:01:26.000Z | 2021-08-10T14:01:26.000Z | boris/classification.py | fragaria/BorIS | 9585c83f29220d8f63910dabd98641ab41ace6cf | [
"MIT"
] | 5 | 2018-04-04T14:31:34.000Z | 2020-06-08T07:50:23.000Z | boris/classification.py | fragaria/BorIS | 9585c83f29220d8f63910dabd98641ab41ace6cf | [
"MIT"
] | 4 | 2017-02-06T15:38:34.000Z | 2018-03-21T09:40:12.000Z | # -*- coding: utf-8 -*-
'''
Created on 25.9.2011
@author: xaralis
'''
from model_utils import Choices
SEXES = Choices(
(1, 'FEMALE', u'ena'),
(2, 'MALE', u'mu')
)
NATIONALITIES = Choices(
(1, 'CZ', u'esk republika'),
(2, 'EU', u'Jin - EU'),
(3, 'NON_EU', u'Jin - non-EU'),
(4, 'UNKNOWN', u'Neznmo')
)
ETHNIC_ORIGINS = Choices(
(1, 'NON_GYPSY', u'Ne-romsk'),
(2, 'GYPSY', u'Romsk'),
(3, 'NOT_MONITORED', u'Nesledovno')
)
LIVING_CONDITIONS = Choices(
(1, 'ALONE', u'Sm'),
(2, 'WITH_FAMILY', u'S rodii/rodinou'),
(3, 'WITH_FRIENDS', u'S pteli'),
(4, 'WITH_PARTNER', u'S partnerem'),
(5, 'WITH_PARTNER_AND_CHILDREN', u'S partnerem a dttem'),
(6, 'ALONE_WITH_CHILDREN', u'Sm s dttem'),
(7, 'UNKNOWN', u'Nen znmo')
)
ACCOMODATION_TYPES = Choices(
(1, 'WITH_PARENTS', u'Doma (u rodi)'),
(2, 'OWN_FLAT', u'Vlastn byt (i pronajat)'),
(3, 'FOREIGN_FLAT', u'Ciz byt'),
(4, 'PUBLIC_ACCOMODATION', u'Ubytovna'),
(5, 'SQUAT', u'Squat'),
(6, 'BARRACKS', u'Kasrna'),
(7, 'HOMELESS', u'Bez domova, na ulici'),
(8, 'UNKNOWN', u'Nen znmo')
)
EMPLOYMENT_TYPES = Choices(
(1, 'REGULAR', u'Pravideln zam.'),
(2, 'SCHOOL', u'kola'),
(3, 'OCCASIONAL_WORK', u'Pleitostn prce'),
(4, 'REGISTERED_ON_EB', u'Registrovn na P'),
(5, 'NO_EMPLOYMENT', u'Bez zamstnn'),
(6, 'STATE_SUPPORT', u'Dvky SZ'),
(8, 'UNKNOWN', u'Nen znmo')
)
EDUCATION_LEVELS = Choices(
(1, 'BASIC', u'Zkladn'),
(2, 'PRACTICAL_SECONDARY', u'Vyuen'),
(3, 'SECONDARY', u'Stedn s maturitou'),
(4, 'HIGHER_PRACTICAL', u'Vy odborn'),
(5, 'UNIVERSITY_GRADE', u'Vysokokolsk'),
(6, 'BASIC_NOT_COMPLETED', u'Neukonen zkladn'),
(7, 'UNKNOWN', u'Nen znmo')
)
DRUGS = Choices( # (Numbers reflect the old drug ids.)
(3, 'METHAMPHETAMINE', u'Pervitin, jin amfetaminy'),
(4, 'SUBUTEX_LEGAL', u'Subutex, Ravata, Buprenorphine alkaloid - legln'),
(5, 'TOBACCO', u'Tabk'),
(8, 'THC', u'THC'),
(9, 'ECSTASY', u'Extze'),
(10, 'DESIGNER_DRUGS', u'Designer drugs'),
(11, 'HEROIN', u'Heroin'),
(12, 'BRAUN', u'Braun a jin opity'),
(13, 'RAW_OPIUM', u'Surov opium'),
(14, 'SUBUTEX_ILLEGAL', u'Subutex, Ravata, Buprenorphine alkaloid - ilegln'),
(16, 'ALCOHOL', u'Alkohol',),
(17, 'INHALER_DRUGS', u'Inhalan ltky, edidla'),
(18, 'MEDICAMENTS', u'Medikamenty'),
(19, 'METHADONE', u'Metadon'),
(20, 'COCAINE', u'Kokain, crack'),
(21, 'SUBOXONE', u'Suboxone'),
(22, 'VENDAL', u'Vendal'),
(23, 'LSD', u'LSD'),
(24, 'PSYLOCIBE', u'Lysohlvky'),
(28, 'FENTANYL', u'Fentanyl'),
(25, 'UNKNOWN', u'Neznmo'),
(26, 'PATHOLOGICAL_GAMBLING', u'Patologick hrstv'),
(27, 'OTHER_NON_SUBSTANCE_ADDICTION', u'Jin neltkov zvislost'),
)
# Disable `application`, `first_try_application` and `primary_drug_usage` fields for these drugs
NON_APPLICATION_DRUGS = ['26', '27']
DRUG_APPLICATION_FREQUENCY = Choices(
(1, 'LESS_THAN_3X_A_MONTH', u'mn ne 3x msn'),
(2, 'ONCE_A_WEEK', u'1x tdn'),
(3, 'ON_WEEKENDS', u'vkendov'),
(4, 'EVERY_SECOND_DAY', u'obden'),
(5, 'DAILY', u'denn'),
(6, '2X_3X_A_DAY', u'2-3x denn'),
(7, 'MORE_THAN_3X_A_DAY', u'vce ne 3x denn'),
(8, 'NONE_FOR_MORE_THAN_6_MONTHS', u'neuita dle ne 6 msc'),
# (9, 'NONE_FOR_LAST_6_MONTHS', u'neuita poslednch 6 msc'), # Feature 103
(10, 'NONE_FOR_LAST_3_MONTHS', u'neuita posledn 3 msce'),
(11, 'NONE_FOR_LAST_1_MONTH', u'neuita v poslednm msci'),
(12, 'UNKNOWN', u'Nen znmo')
)
DRUG_APPLICATION_TYPES = Choices(
(1, 'VEIN_INJECTION', u'injekn do ly'),
(2, 'MUSCLE_INJECTION', u'injekn do svalu'),
(3, 'ORAL', u'stn'),
(4, 'SNIFFING', u'sniff (upn)'),
(5, 'SMOKING', u'kouen'),
(6, 'INHALATION', u'inhalace'),
(7, 'UNKNOWN', u'Nen znmo')
)
RISKY_BEHAVIOR_KIND = Choices(
(1, 'EQUIPMENT_SHARING', u'Sdlen nin'),
(2, 'SEX_WITHOUT_PROTECTION', u'Nechrnn sex'),
(3, 'SYRINGE_SHARING', u'Sdlen jehel'),
(4, 'INTRAVENOUS_APPLICATION', u'Nitroiln aplikace'),
(5, 'RISKY_APPLICATION', u'Rizikov aplikace'),
(6, 'OVERDOSING', u'Pedvkovn'),
(7, 'HEALTH_COMPLICATIONS', u'Zdravotn komplikace')
)
RISKY_BEHAVIOR_PERIODICITY = Choices(
(1, 'NEVER', u'Nikdy'),
(2, 'ONCE', u'Jednorzov'),
(3, 'OFTEN', u'Opakovan '),
(4, 'UNKNOWN', u'Nen znmo')
)
DISEASES = Choices(
(1, 'HIV', u'HIV'),
(2, 'VHA', u'VHA'),
(3, 'VHB', u'VHB'),
(4, 'VHC', u'VHC'),
(5, 'SYFILIS', u'Syfilis'),
)
DISEASE_TEST_RESULTS = Choices(
(0, 'UNKNOWN', u'Neznmo, zda testovn'),
(1, 'TESTED_POSITIVE', u'Testovn - pozitivn'),
(2, 'TESTED_NEGATIVE', u'Testovn - negativn'),
(3, 'TESTED_UNKNOWN', u'Testovn - vsledek neznm'),
(4, 'NOT_TESTED', u'Nikdy netestovn'),
(5, 'RESULT_NOT_ACCLAIMED', u'Nevyzvedl vsledek'),
)
DISEASE_TEST_SIGN = Choices(
('p', 'POSITIVE', u'Pozitivn'),
('n', 'NEGATIVE', u'Negativn'),
('r', 'REACTIVE', u'Reaktivn'),
('i', 'INCONCLUSIVE', u'Test neprkazn')
)
ANONYMOUS_TYPES = Choices(
(1, 'NON_USER', u'neuivatel'),
(2, 'NON_IV', u'neIV'),
(3, 'IV', u'IV'),
(4, 'NON_USER_PARENT', u'rodi'),
(5, 'THC', u'THC')
)
| 32.589286 | 96 | 0.606575 |
9a735bf957ffc30fea6d0bb1fe8f079ce7582eb6 | 23,569 | py | Python | extern/face_expression/face_expression/dataset.py | wangxihao/rgbd-kinect-pose | 03180723c99759ba2500bcd42b5fe7a1d26eb507 | [
"MIT"
] | 1 | 2022-02-07T06:12:26.000Z | 2022-02-07T06:12:26.000Z | extern/face_expression/face_expression/dataset.py | wangxihao/rgbd-kinect-pose | 03180723c99759ba2500bcd42b5fe7a1d26eb507 | [
"MIT"
] | null | null | null | extern/face_expression/face_expression/dataset.py | wangxihao/rgbd-kinect-pose | 03180723c99759ba2500bcd42b5fe7a1d26eb507 | [
"MIT"
] | null | null | null | import os
import sys
import json
import pickle
import h5py
from tqdm import tqdm
import numpy as np
import torch
import cv2
import scipy.spatial
import hydra
from face_expression import utils
from face_expression.third_party.face_mesh_mediapipe import FaceMeshMediaPipe
# class VoxCeleb2FaceDataset(torch.utils.data.Dataset):
# def __init__(
# self,
# h5_path,
# scheme_path,
# image_root,
# return_images=True,
# bbox_scale=2.0,
# image_shape=(256, 256),
# sample_range=None
# ):
# self.h5_path = h5_path
# self.scheme_path = scheme_path
# self.image_root = image_root
# self.return_images = return_images
# self.bbox_scale = bbox_scale
# self.image_shape = image_shape
# self.sample_range = sample_range
# # load scheme
# with open(scheme_path, 'rb') as f:
# self.scheme = pickle.load(f)
# if sample_range is not None:
# self.scheme = [self.scheme[i] for i in range(sample_range[0], sample_range[1], sample_range[2])]
# def open_h5_file(self):
# self.h5f = h5py.File(self.h5_path, mode='r')
# def load_image(self, identity_id, video_id, utterance_id, seq_index):
# image_dir = os.path.join(self.image_root, identity_id, video_id, utterance_id)
# names = sorted(os.listdir(image_dir))
# if seq_index < len(names):
# name = names[seq_index]
# path = os.path.join(image_dir, name)
# image = cv2.imread(path)
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# else:
# # black image mock
# name = names[0]
# path = os.path.join(image_dir, name)
# image = cv2.imread(path)
# image = np.zeros(image.shape, dtype=np.uint8)
# return image
# def get_camera_matrix(self, h, w):
# fx, fy = 3000.0, 3000.0
# cx, cy = w/2, h/2
# camera_martix = np.array([
# [fx, 0.0, cx],
# [0.0, fy, cy],
# [0.0, 0.0, 1.0]
# ])
# return camera_martix
# def get_transformation_matrix(self):
# transformation_matrix = np.eye(3, 4)
# return transformation_matrix
# def get_bbox(self, keypoints_2d):
# left, top, right, down = (
# keypoints_2d[:, 0].min(),
# keypoints_2d[:, 1].min(),
# keypoints_2d[:, 0].max(),
# keypoints_2d[:, 1].max()
# )
# # convex_hull = scipy.spatial.ConvexHull(points)
# # center_x, center_y = (np.mean(convex_hull.points[convex_hull.vertices, axis]) for axis in (0, 1))
# center_x, center_y = (left + right) / 2, (top + down) / 2
# w, h = right - left, down - top
# bbox = (
# center_x - w/2,
# center_y - h/2,
# center_x + w/2,
# center_y + h/2
# )
# bbox = utils.common.utils.common.get_square_bbox(bbox)
# bbox = utils.common.utils.common.scale_bbox(bbox, self.bbox_scale)
# return bbox
# def normalize_keypoints_2d(self, keypoints_2d):
# convex_hull = scipy.spatial.ConvexHull(keypoints_2d)
# center = np.mean(convex_hull.points[convex_hull.vertices], axis=0)
# keypoints_2d = (keypoints_2d - center) / np.sqrt(convex_hull.area)
# return keypoints_2d
# def load_sample(self, identity_id, video_id, utterance_id, seq_index):
# sample = dict()
# # load h5_data
# try:
# h5_data = self.h5f[identity_id][video_id][utterance_id]
# except Exception as e:
# print(identity_id, video_id, utterance_id, seq_index)
# print(e)
# sample['expression'] = h5_data['expressions'][seq_index]
# sample['pose'] = h5_data['poses'][seq_index]
# sample['beta'] = h5_data['betas'][:]
# sample['keypoints_2d'] = h5_data['face_keypoints_2d'][seq_index]
# # load image
# if self.return_images:
# image = self.load_image(identity_id, video_id, utterance_id, seq_index)
# orig_h, orig_w = image.shape[:2]
# # crop
# bbox = self.get_bbox(sample['keypoints_2d'])
# image = utils.common.utils.common.crop_image(image, bbox)
# # resize
# image = utils.common.utils.common.resize_image(image, self.image_shape)
# image = image / 255.0
# image = image.transpose(2, 0, 1)
# sample['image'] = image
# # load projection matrix
# h, w = image.shape[1:3]
# bbox_h, bbox_w = bbox[3] - bbox[1], bbox[2] - bbox[0]
# if 'camera_matrix' in h5_data:
# print('hey')
# camera_matrix = h5_data['camera_matrix'][:]
# else:
# camera_matrix = self.get_camera_matrix(orig_h, orig_w)
# camera_matrix = utils.common.utils.common.update_after_crop_and_resize(
# camera_matrix, bbox, (w/bbox_w, h/bbox_h)
# )
# # update keypoints 2d ufter crop and resize
# sample['keypoints_2d'][:, 0] -= bbox[0]
# sample['keypoints_2d'][:, 1] -= bbox[1]
# sample['keypoints_2d'][:, 0] *= w/bbox_w
# sample['keypoints_2d'][:, 1] *= h/bbox_h
# else:
# image = np.zeros((*self.image_shape, 3), dtype=np.uint8)
# image = image / 255.0
# image = image.transpose(2, 0, 1)
# h, w = image.shape[1:3]
# sample['image'] = image
# if 'camera_matrix' in h5_data:
# camera_matrix = h5_data['camera_matrix'][:]
# else:
# camera_matrix = self.get_camera_matrix(*self.image_shape)
# transformation_matrix = self.get_transformation_matrix()
# projection_matrix = camera_matrix @ transformation_matrix
# sample['camera_matrix'] = camera_matrix
# sample['projection_matrix'] = projection_matrix
# sample['h'] = h
# sample['w'] = w
# # normalize keypoints 2d
# sample['keypoints_2d'] = self.normalize_keypoints_2d(sample['keypoints_2d'])
# return sample
# def __len__(self):
# return len(self.scheme)
# def __getitem__(self, index):
# # this should be normally done in __init__, but due to DataLoader behaviour
# # when num_workers > 1, the h5 file is opened during first data access:
# # https://github.com/pytorch/pytorch/issues/11929#issuecomment-649760983
# if not hasattr(self, 'h5f'):
# self.open_h5_file()
# sample_key = self.scheme[index]
# sample = self.load_sample(*sample_key)
# return sample
# @staticmethod
# def build_scheme(h5f):
# scheme = []
# for identity_id in tqdm(h5f):
# for video_id in h5f[identity_id]:
# for utterance_id in h5f[identity_id][video_id]:
# seq_length = h5f[identity_id][video_id][utterance_id]['expressions'].shape[0]
# for seq_index in range(seq_length):
# scheme.append((identity_id, video_id, utterance_id, seq_index))
# scheme = sorted(scheme)
# return scheme
# @staticmethod
# def preprocess_dataset(face_root, image_root, openpose_root, h5_path):
# # load scheme
# scheme = []
# identity_id_list = sorted(os.listdir(face_root))
# for identity_id in tqdm(identity_id_list):
# identity_dir = os.path.join(face_root, identity_id)
# video_id_list = sorted(os.listdir(identity_dir))
# for video_id in video_id_list:
# video_dir = os.path.join(identity_dir, video_id)
# utterance_id_list = sorted(os.listdir(video_dir))
# for utterance_id in utterance_id_list:
# utterance_dir = os.path.join(video_dir, utterance_id)
# scheme.append((identity_id, video_id, utterance_id))
# scheme = sorted(scheme)
# # build h5 file
# with h5py.File(h5_path, 'w') as hf:
# for (identity_id, video_id, utterance_id) in tqdm(scheme):
# # load face
# face_dir = os.path.join(face_root, identity_id, video_id, utterance_id, 'joints_op_face')
# expressions = np.load(os.path.join(face_dir, 'expressions.npy')) * 100
# poses = np.load(os.path.join(face_dir, 'poses.npy'))
# betas = np.load(os.path.join(face_dir, 'betas.npy'))
# # load openpose keypoints 2d
# openpose_dir = os.path.join(openpose_root, identity_id, video_id, utterance_id)
# face_keypoints_2d_list = []
# names = sorted(os.listdir(openpose_dir))
# for name in names:
# path = os.path.join(openpose_dir, name)
# with open(path) as f:
# openpose_data = json.load(f)
# face_keypoints_2d = openpose_data['people'][0]['face_keypoints_2d']
# face_keypoints_2d = np.array(face_keypoints_2d).reshape(70, 3)
# face_keypoints_2d = face_keypoints_2d[:, :2] # remove confidences
# face_keypoints_2d_list.append(face_keypoints_2d)
# face_keypoints_2d_arr = np.array(face_keypoints_2d_list)
# # save to h5
# group = hf.create_group(f"{identity_id}/{video_id}/{utterance_id}")
# group['expressions'] = expressions
# group['poses'] = poses
# group['betas'] = betas
# group['face_keypoints_2d'] = face_keypoints_2d_arr
if __name__ == '__main__':
main()
| 37.058176 | 125 | 0.566252 |
9a75886d1c5240a727719c8116254cb13ec6d703 | 1,316 | py | Python | session7/OLED_Clock.py | rezafari/raspberry | e6720780f3c65ee1809040fc538f793fe44f0111 | [
"MIT"
] | null | null | null | session7/OLED_Clock.py | rezafari/raspberry | e6720780f3c65ee1809040fc538f793fe44f0111 | [
"MIT"
] | null | null | null | session7/OLED_Clock.py | rezafari/raspberry | e6720780f3c65ee1809040fc538f793fe44f0111 | [
"MIT"
] | null | null | null | ######################################################################
# OLED_Clock.py
#
# This program display date and time on OLED module
######################################################################
import Adafruit_SSD1306
from datetime import datetime
import time
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
# Setup Display
RST=24
device = Adafruit_SSD1306.SSD1306_128_64(rst=RST)
device.begin()
device.clear()
device.display()
width = device.width
height = device.height
fontFile = '/usr/share/fonts/truetype/freefont/FreeSansBold.ttf'
smallFont = ImageFont.truetype(fontFile, 12)
largeFont = ImageFont.truetype(fontFile, 33)
# Display a message on 3 lines, first line big font
while True:
now = datetime.now()
dateMessage = '{:%d %B %Y}'.format(now)
timeMessage = '{:%H:%M:%S}'.format(now)
DisplayMessage(dateMessage,timeMessage)
time.sleep(0.1)
| 29.244444 | 70 | 0.634498 |
9a75a5f4ae8ec0f7ef5613e16f951ea62c4bd8de | 9,601 | py | Python | odim/router.py | belda/odim | ea49284c4bfc76ac6cb436577c128b20c2c4004c | [
"MIT"
] | 5 | 2021-01-29T11:00:10.000Z | 2021-05-18T23:23:32.000Z | odim/router.py | belda/odim | ea49284c4bfc76ac6cb436577c128b20c2c4004c | [
"MIT"
] | 1 | 2021-11-16T10:22:43.000Z | 2021-11-16T10:22:43.000Z | odim/router.py | belda/odim | ea49284c4bfc76ac6cb436577c128b20c2c4004c | [
"MIT"
] | 1 | 2021-02-18T14:45:43.000Z | 2021-02-18T14:45:43.000Z | '''
Contains the extended FastAPI router, for simplified CRUD from a model
'''
from typing import Any, List, Optional, Sequence, Set, Type, Union
import fastapi
from fastapi import Depends, params
from pydantic import BaseModel, create_model
from odim import Odim, OkResponse, SearchResponse
from odim.dependencies import SearchParams
| 43.247748 | 187 | 0.584939 |
9a760367155f89800e9ffffd081d1132a56544e5 | 194 | py | Python | scripts/item/consume_2432803.py | Snewmy/swordie | ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17 | [
"MIT"
] | null | null | null | scripts/item/consume_2432803.py | Snewmy/swordie | ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17 | [
"MIT"
] | null | null | null | scripts/item/consume_2432803.py | Snewmy/swordie | ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17 | [
"MIT"
] | null | null | null | # Princess No Damage Skin (30-Days)
success = sm.addDamageSkin(2432803)
if success:
sm.chat("The Princess No Damage Skin (30-Days) has been added to your account's damage skin collection.")
| 38.8 | 109 | 0.747423 |
9a768a1c9833791d7a707ef08123594b6480d371 | 1,184 | py | Python | tests/test_product.py | technicapital/stake-python | 8d0a985923318ca7b92f23e0c9a8319a75f37ff2 | [
"Apache-2.0"
] | null | null | null | tests/test_product.py | technicapital/stake-python | 8d0a985923318ca7b92f23e0c9a8319a75f37ff2 | [
"Apache-2.0"
] | null | null | null | tests/test_product.py | technicapital/stake-python | 8d0a985923318ca7b92f23e0c9a8319a75f37ff2 | [
"Apache-2.0"
] | null | null | null | import asyncio
import aiohttp
import pytest
from .client import HttpClient
from .constant import Url
from .product import Product
| 26.909091 | 76 | 0.697635 |
9a76e7fea3dd34891002703a3d4d4adaf6c009dc | 1,346 | py | Python | data_utils.py | tar-bin/DeepAA | acdae33a410eec87eb22419fce0adb513fa97219 | [
"MIT"
] | 1 | 2021-07-27T09:31:20.000Z | 2021-07-27T09:31:20.000Z | data_utils.py | tar-bin/DeepAA | acdae33a410eec87eb22419fce0adb513fa97219 | [
"MIT"
] | null | null | null | data_utils.py | tar-bin/DeepAA | acdae33a410eec87eb22419fce0adb513fa97219 | [
"MIT"
] | null | null | null | import numpy as np
from PIL import Image, ImageOps
| 26.392157 | 83 | 0.581724 |
9a77a425a1b61dc019f50e24ad07e8460b1a7df9 | 2,839 | py | Python | ledfx/color.py | broccoliboy/LedFx | 1c90d5c3ddaf993a072eab92d3e373dd3b0fb45c | [
"MIT"
] | 524 | 2020-12-18T19:34:55.000Z | 2022-03-31T14:52:25.000Z | ledfx/color.py | broccoliboy/LedFx | 1c90d5c3ddaf993a072eab92d3e373dd3b0fb45c | [
"MIT"
] | 119 | 2020-12-18T21:28:12.000Z | 2022-03-31T14:44:02.000Z | ledfx/color.py | broccoliboy/LedFx | 1c90d5c3ddaf993a072eab92d3e373dd3b0fb45c | [
"MIT"
] | 85 | 2020-12-18T18:23:16.000Z | 2022-03-29T16:37:52.000Z | from collections import namedtuple
RGB = namedtuple("RGB", "red, green, blue")
COLORS = {
"red": RGB(255, 0, 0),
"orange-deep": RGB(255, 40, 0),
"orange": RGB(255, 120, 0),
"yellow": RGB(255, 200, 0),
"yellow-acid": RGB(160, 255, 0),
"green": RGB(0, 255, 0),
"green-forest": RGB(34, 139, 34),
"green-spring": RGB(0, 255, 127),
"green-teal": RGB(0, 128, 128),
"green-turquoise": RGB(0, 199, 140),
"green-coral": RGB(0, 255, 50),
"cyan": RGB(0, 255, 255),
"blue": RGB(0, 0, 255),
"blue-light": RGB(65, 105, 225),
"blue-navy": RGB(0, 0, 128),
"blue-aqua": RGB(0, 255, 255),
"purple": RGB(128, 0, 128),
"pink": RGB(255, 0, 178),
"magenta": RGB(255, 0, 255),
"black": RGB(0, 0, 0),
"white": RGB(255, 255, 255),
"brown": RGB(139, 69, 19),
"gold": RGB(255, 215, 0),
"hotpink": RGB(255, 105, 180),
"lightblue": RGB(173, 216, 230),
"lightgreen": RGB(152, 251, 152),
"lightpink": RGB(255, 182, 193),
"lightyellow": RGB(255, 255, 224),
"maroon": RGB(128, 0, 0),
"mint": RGB(189, 252, 201),
"olive": RGB(85, 107, 47),
"peach": RGB(255, 100, 100),
"plum": RGB(221, 160, 221),
"sepia": RGB(94, 38, 18),
"skyblue": RGB(135, 206, 235),
"steelblue": RGB(70, 130, 180),
"tan": RGB(210, 180, 140),
"violetred": RGB(208, 32, 144),
}
GRADIENTS = {
"Rainbow": {
"colors": [
"red",
"orange",
"yellow",
"green",
"green-turquoise",
"blue",
"purple",
"pink",
]
},
"Dancefloor": {"colors": ["red", "pink", "blue"]},
"Plasma": {"colors": ["blue", "purple", "red", "orange-deep", "yellow"]},
"Ocean": {"colors": ["blue-aqua", "blue"]},
"Viridis": {"colors": ["purple", "blue", "green-teal", "green", "yellow"]},
"Jungle": {"colors": ["green", "green-forest", "orange"]},
"Spring": {"colors": ["pink", "orange-deep", "yellow"]},
"Winter": {"colors": ["green-turquoise", "green-coral"]},
"Frost": {"colors": ["blue", "blue-aqua", "purple", "pink"]},
"Sunset": {"colors": ["blue-navy", "orange", "red"]},
"Borealis": {
"colors": [
"orange-deep",
"purple",
"green-turquoise",
"green",
]
},
"Rust": {"colors": ["orange-deep", "red"]},
"Christmas": {
"colors": [
"red",
"red",
"red",
"red",
"red",
"green",
"green",
"green",
"green",
"green",
],
"method": "repeat",
},
"Winamp": {
"colors": [
"green",
"yellow",
"orange",
"orange-deep",
"red",
]
},
}
| 27.833333 | 79 | 0.446988 |
9a7853c5ab201c882d582391f394325cd2ad7796 | 1,247 | py | Python | src/test/nspawn_test/support/header_test.py | Andrei-Pozolotin/nspawn | 9dd3926f1d1a3a0648f6ec14199cbf4069af1c98 | [
"Apache-2.0"
] | 15 | 2019-10-10T17:35:48.000Z | 2022-01-29T10:41:01.000Z | src/test/nspawn_test/support/header_test.py | Andrei-Pozolotin/nspawn | 9dd3926f1d1a3a0648f6ec14199cbf4069af1c98 | [
"Apache-2.0"
] | null | null | null | src/test/nspawn_test/support/header_test.py | Andrei-Pozolotin/nspawn | 9dd3926f1d1a3a0648f6ec14199cbf4069af1c98 | [
"Apache-2.0"
] | 2 | 2019-10-10T17:36:43.000Z | 2020-06-20T15:28:33.000Z |
from nspawn.support.header import *
| 25.44898 | 58 | 0.57498 |
9a786a26a6979489803db0c2519bf5cf50427d08 | 2,042 | py | Python | game1.py | akulakov/learnprogramming | ed7d557dabbef697773b4b369c8ed9cd3cdd55a6 | [
"Apache-2.0"
] | null | null | null | game1.py | akulakov/learnprogramming | ed7d557dabbef697773b4b369c8ed9cd3cdd55a6 | [
"Apache-2.0"
] | null | null | null | game1.py | akulakov/learnprogramming | ed7d557dabbef697773b4b369c8ed9cd3cdd55a6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from helpers import sjoin, cjoin
from random import shuffle
card_types = [
("tax",1,1), # tax everyone 2 coins => bank
("soldier",2,1),
("sergeant",3,1),
("captain",4,2),
("emperor",1,5),
("prince",1,1), # prince takes 1/3rd of bank
]
deck = [Card(*c) for c in card_types]
deck += [Card(*c) for c in card_types]
for _ in range(15):
deck.append(Card(*randchoice(card_types)))
shuffle(deck)
players = [Player('a', draw(deck,5)),
Player('b', draw(deck,5)),
Player('c', draw(deck,5))
]
| 24.023529 | 65 | 0.519589 |
9a78db38d0f259372303620cba450346c37cd245 | 683 | py | Python | src/plotting/plot_permeability.py | pgniewko/Deep-Rock | b714b98a2c391b4a43c62412769e5732cbd0d07a | [
"BSD-3-Clause"
] | 1 | 2019-11-18T04:51:02.000Z | 2019-11-18T04:51:02.000Z | src/plotting/plot_permeability.py | pgniewko/Deep-Rock | b714b98a2c391b4a43c62412769e5732cbd0d07a | [
"BSD-3-Clause"
] | null | null | null | src/plotting/plot_permeability.py | pgniewko/Deep-Rock | b714b98a2c391b4a43c62412769e5732cbd0d07a | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/env python
#
# Usage:
# python
#
import sys
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="darkgrid")
data = np.loadtxt(sys.argv[1])
kappa_LB, kappa_CNN = data.T
kappa_LB = 10.0 ** kappa_LB
kappa_CNN = 10.0 ** kappa_CNN
fig, ax = plt.subplots(1, 1, sharey=True, figsize=(7, 7))
ax.set_xscale("log", nonposx="clip")
ax.set_yscale("log", nonposy="clip")
plt.tick_params(axis="both", which="major", labelsize=15)
plt.tick_params(axis="both", which="minor", labelsize=12)
plt.plot(kappa_LB, kappa_CNN, "+", color="green")
plt.xlabel("lattice-Boltzmann", fontsize=20)
plt.ylabel("ConvNet", fontsize=20, labelpad=-8)
plt.show()
| 21.34375 | 57 | 0.707174 |
9a79ab000b884a1fa7eeff49e8a3570bf0211367 | 1,664 | py | Python | functions/python/todo-app.py | swiftycloud/swifty.todoapp | 1a36c6e6f1af4584a8c0151e15e9ffcf2453f8c1 | [
"MIT"
] | 5 | 2018-11-08T17:07:43.000Z | 2019-04-23T15:18:31.000Z | functions/python/todo-app.py | swiftycloud/swifty.todoapp | 1a36c6e6f1af4584a8c0151e15e9ffcf2453f8c1 | [
"MIT"
] | null | null | null | functions/python/todo-app.py | swiftycloud/swifty.todoapp | 1a36c6e6f1af4584a8c0151e15e9ffcf2453f8c1 | [
"MIT"
] | 3 | 2018-11-08T17:07:47.000Z | 2020-11-22T00:20:38.000Z | import bson
import json
import swifty
#
# GET /tasks -- list tasks
# POST /tasks $BODY -- add new task
# GET /tasks/ID -- get info about task
# PUT /tasks/ID -- update task (except status)
# DELETE /tasks/ID -- remove task
# POST /tasks/ID/done -- mark task as done
#
| 25.6 | 82 | 0.477163 |
9a79fb2f2787441274d55999dc0843161af999b5 | 401 | py | Python | dmoj/Uncategorized/tss17a.py | UserBlackBox/competitive-programming | 2aa8ffa6df6a386f8e47d084b5fa32d6d741bbbc | [
"Unlicense"
] | null | null | null | dmoj/Uncategorized/tss17a.py | UserBlackBox/competitive-programming | 2aa8ffa6df6a386f8e47d084b5fa32d6d741bbbc | [
"Unlicense"
] | null | null | null | dmoj/Uncategorized/tss17a.py | UserBlackBox/competitive-programming | 2aa8ffa6df6a386f8e47d084b5fa32d6d741bbbc | [
"Unlicense"
] | null | null | null | # https://dmoj.ca/problem/tss17a
# https://dmoj.ca/submission/2226280
import sys
n = int(sys.stdin.readline()[:-1])
for i in range(n):
instruction = sys.stdin.readline()[:-1].split()
printed = False
for j in range(3):
if instruction.count(instruction[j]) >= 2:
print(instruction[j])
printed = True
break
if not printed:
print('???') | 26.733333 | 51 | 0.578554 |
9a7ad9eea9244d2609a2517f92f7fc289fb240da | 1,159 | py | Python | todo/views/users_detail.py | josalhor/WebModels | 6b9cde3141c53562f40b129e6e1c87448ce9853a | [
"BSD-3-Clause"
] | null | null | null | todo/views/users_detail.py | josalhor/WebModels | 6b9cde3141c53562f40b129e6e1c87448ce9853a | [
"BSD-3-Clause"
] | 41 | 2021-03-23T12:58:25.000Z | 2021-05-25T11:38:42.000Z | todo/views/users_detail.py | josalhor/WebModels | 6b9cde3141c53562f40b129e6e1c87448ce9853a | [
"BSD-3-Clause"
] | null | null | null | from todo.templatetags.todo_tags import is_management
from django.contrib.auth.decorators import login_required, user_passes_test
from django.http import HttpResponse
from django.shortcuts import render
from todo.models import Designer, Management, Writer, Editor
| 30.5 | 75 | 0.667817 |
9a7cfcbc63f3c97c82737bfbbfa13e26624618e7 | 214 | py | Python | src/librhc/cost/__init__.py | arnavthareja/mushr_pixelart_mpc | db6ee6cae9b4cb1d3b213fed06690074372c824b | [
"BSD-3-Clause"
] | 5 | 2019-08-30T08:20:27.000Z | 2021-08-01T17:16:16.000Z | src/librhc/cost/__init__.py | arnavthareja/mushr_pixelart_mpc | db6ee6cae9b4cb1d3b213fed06690074372c824b | [
"BSD-3-Clause"
] | 1 | 2020-09-09T13:38:08.000Z | 2020-12-15T12:20:26.000Z | src/librhc/cost/__init__.py | arnavthareja/mushr_pixelart_mpc | db6ee6cae9b4cb1d3b213fed06690074372c824b | [
"BSD-3-Clause"
] | 4 | 2019-09-14T21:26:09.000Z | 2021-08-27T23:01:41.000Z | # Copyright (c) 2019, The Personal Robotics Lab, The MuSHR Team, The Contributors of MuSHR
# License: BSD 3-Clause. See LICENSE.md file in root directory.
from .waypoints import Waypoints
__all__ = ["Waypoints"]
| 30.571429 | 90 | 0.757009 |
9a7d3e4f21c385675dec5f7b1784429e468d978e | 1,401 | py | Python | 759/Employee Free Time.py | cccccccccccccc/Myleetcode | fb3fa6df7c77feb2d252feea7f3507569e057c70 | [
"Apache-2.0"
] | null | null | null | 759/Employee Free Time.py | cccccccccccccc/Myleetcode | fb3fa6df7c77feb2d252feea7f3507569e057c70 | [
"Apache-2.0"
] | null | null | null | 759/Employee Free Time.py | cccccccccccccc/Myleetcode | fb3fa6df7c77feb2d252feea7f3507569e057c70 | [
"Apache-2.0"
] | null | null | null | from typing import List
import heapq
# Definition for an Interval.
i1 = Interval(1,2)
i2 = Interval(6,7)
i3 = Interval(2,4)
i4 = Interval(2,5)
i5 = Interval(9,12)
A = Solution()
print(A.employeeFreeTime([[i1,i2],[i3],[i4,i5]])) | 31.840909 | 104 | 0.532477 |
9a7d9c6b811efb6d15e0d51600e0fd5bb7bf8479 | 41,312 | py | Python | Comms1_internal/Final.py | CoderStellaJ/CG4002 | 474bda123856d8a88bef5ff787259fcd9ba9f09a | [
"MIT"
] | null | null | null | Comms1_internal/Final.py | CoderStellaJ/CG4002 | 474bda123856d8a88bef5ff787259fcd9ba9f09a | [
"MIT"
] | 10 | 2020-01-28T14:17:26.000Z | 2020-02-05T04:53:06.000Z | Comms1_internal/Final.py | CoderStellaJ/CG4002 | 474bda123856d8a88bef5ff787259fcd9ba9f09a | [
"MIT"
] | 5 | 2021-01-21T08:00:56.000Z | 2021-09-28T05:06:36.000Z | from bluepy import btle
import concurrent
from concurrent import futures
import threading
import multiprocessing
import time
from time_sync import *
import eval_client
import dashBoardClient
from joblib import dump, load
import numpy # to count labels and store in dict
import operator # to get most predicted label
import json
import random # RNG in worst case
from sklearn.preprocessing import StandardScaler # to normalise data
"""
class EMGThread(object):
def __init__(self):
thread = threading.Thread(target=self.getEMGData, args=(beetle, ))
thread.daemon = True # Daemonize thread
thread.start() # Start the execution
def getEMGData(self, beetle):
while True:
try:
if beetle.waitForNotifications(2):
continue
except Exception as e:
reestablish_connection(beetle)
"""
# Program to find most frequent element in a list
if __name__ == '__main__':
# 50:F1:4A:CB:FE:EE: position 1, 1C:BA:8C:1D:30:22: position 2, 78:DB:2F:BF:2C:E2: position 3
start_time = time.time()
# global variables
"""
beetle_addresses = ["50:F1:4A:CC:01:C4", "50:F1:4A:CB:FE:EE", "78:DB:2F:BF:2C:E2",
"1C:BA:8C:1D:30:22"]
"""
beetle_addresses = ["50:F1:4A:CC:01:C4", "50:F1:4A:CB:FE:EE", "78:DB:2F:BF:2C:E2",
"1C:BA:8C:1D:30:22"]
divide_get_float = 100.0
global_delegate_obj = []
global_beetle = []
handshake_flag_dict = {"50:F1:4A:CB:FE:EE": True,
"78:DB:2F:BF:2C:E2": True, "1C:BA:8C:1D:30:22": True}
emg_buffer = {"50:F1:4A:CC:01:C4": ""}
buffer_dict = {"50:F1:4A:CB:FE:EE": "",
"78:DB:2F:BF:2C:E2": "", "1C:BA:8C:1D:30:22": ""}
incoming_data_flag = {"50:F1:4A:CB:FE:EE": False,
"78:DB:2F:BF:2C:E2": False, "1C:BA:8C:1D:30:22": False}
ground_truth = [1, 2, 3]
ACTIONS = ['muscle', 'weightlifting', 'shoutout']
POSITIONS = ['1 2 3', '3 2 1', '2 3 1', '3 1 2', '1 3 2', '2 1 3']
beetle1 = "50:F1:4A:CB:FE:EE"
beetle2 = "78:DB:2F:BF:2C:E2"
beetle3 = "1C:BA:8C:1D:30:22"
dance = "shoutout"
new_pos = "1 2 3"
# data global variables
num_datasets = 200
beetle1_data_dict = {"50:F1:4A:CB:FE:EE": {}}
beetle2_data_dict = {"78:DB:2F:BF:2C:E2": {}}
beetle3_data_dict = {"1C:BA:8C:1D:30:22": {}}
beetle1_moving_dict = {"50:F1:4A:CB:FE:EE": {}}
beetle2_moving_dict = {"78:DB:2F:BF:2C:E2": {}}
beetle3_moving_dict = {"1C:BA:8C:1D:30:22": {}}
beetle1_dancing_dict = {"50:F1:4A:CB:FE:EE": {}}
beetle2_dancing_dict = {"78:DB:2F:BF:2C:E2": {}}
beetle3_dancing_dict = {"1C:BA:8C:1D:30:22": {}}
datastring_dict = {"50:F1:4A:CB:FE:EE": "",
"78:DB:2F:BF:2C:E2": "", "1C:BA:8C:1D:30:22": ""}
packet_count_dict = {"50:F1:4A:CC:01:C4": 0, "50:F1:4A:CB:FE:EE": 0,
"78:DB:2F:BF:2C:E2": 0, "1C:BA:8C:1D:30:22": 0}
dataset_count_dict = {"50:F1:4A:CB:FE:EE": 0,
"78:DB:2F:BF:2C:E2": 0, "1C:BA:8C:1D:30:22": 0}
float_flag_dict = {"50:F1:4A:CB:FE:EE": False,
"78:DB:2F:BF:2C:E2": False, "1C:BA:8C:1D:30:22": False}
timestamp_flag_dict = {"50:F1:4A:CB:FE:EE": False,
"78:DB:2F:BF:2C:E2": False, "1C:BA:8C:1D:30:22": False}
comma_count_dict = {"50:F1:4A:CB:FE:EE": 0,
"78:DB:2F:BF:2C:E2": 0, "1C:BA:8C:1D:30:22": 0}
checksum_dict = {"50:F1:4A:CB:FE:EE": 0,
"78:DB:2F:BF:2C:E2": 0, "1C:BA:8C:1D:30:22": 0}
start_flag = {"50:F1:4A:CB:FE:EE": False,
"78:DB:2F:BF:2C:E2": False, "1C:BA:8C:1D:30:22": False}
end_flag = {"50:F1:4A:CB:FE:EE": False,
"78:DB:2F:BF:2C:E2": False, "1C:BA:8C:1D:30:22": False}
# clock synchronization global variables
dance_count = 0
clocksync_flag_dict = {"50:F1:4A:CB:FE:EE": False,
"78:DB:2F:BF:2C:E2": False, "1C:BA:8C:1D:30:22": False}
timestamp_dict = {"50:F1:4A:CB:FE:EE": [],
"78:DB:2F:BF:2C:E2": [], "1C:BA:8C:1D:30:22": []}
clock_offset_dict = {"50:F1:4A:CB:FE:EE": [],
"78:DB:2F:BF:2C:E2": [], "1C:BA:8C:1D:30:22": []}
[global_delegate_obj.append(0) for idx in range(len(beetle_addresses))]
[global_beetle.append(0) for idx in range(len(beetle_addresses))]
try:
eval_client = eval_client.Client("192.168.43.6", 8080, 6, "cg40024002group6")
except Exception as e:
print(e)
try:
board_client = dashBoardClient.Client("192.168.43.248", 8080, 6, "cg40024002group6")
except Exception as e:
print(e)
establish_connection("50:F1:4A:CC:01:C4")
time.sleep(2)
establish_connection("78:DB:2F:BF:2C:E2")
time.sleep(3)
# Load MLP NN model
mlp_dance = load('mlp_dance_LATEST.joblib')
establish_connection("50:F1:4A:CB:FE:EE")
time.sleep(3)
# Load Movement ML
mlp_move = load('mlp_movement_LATEST.joblib')
establish_connection("1C:BA:8C:1D:30:22")
with concurrent.futures.ThreadPoolExecutor(max_workers=7) as data_executor:
for beetle in global_beetle:
if beetle.addr == "50:F1:4A:CC:01:C4":
data_executor.submit(getEMGData, beetle)
data_executor.shutdown(wait=True)
# start collecting data only after 1 min passed
while True:
elapsed_time = time.time() - start_time
if int(elapsed_time) >= 60:
break
else:
print(elapsed_time)
time.sleep(1)
"""
for beetle in global_beetle:
print(beetle.addr)
emg_thread = EMGThread(global_beetle[3])
"""
while True:
with concurrent.futures.ThreadPoolExecutor(max_workers=7) as data_executor:
{data_executor.submit(getDanceData, beetle): beetle for beetle in global_beetle}
data_executor.shutdown(wait=True)
"""
with concurrent.futures.ThreadPoolExecutor(max_workers=7) as data_executor:
data_executor.submit(getEMGData, global_beetle[0])
data_executor.shutdown(wait=True)
"""
# do calibration once every 4 moves; change 4 to other values according to time calibration needs
if dance_count == 1:
print("Proceed to do time calibration...")
# clear clock_offset_dict for next time calibration
for beetle in global_beetle:
if beetle.addr != "50:F1:4A:CC:01:C4":
initHandshake(beetle)
if dance_count == 1:
dance_count = 0
dance_count += 1
pool = multiprocessing.Pool()
workers = [pool.apply_async(processData, args=(address, ))
for address in beetle_addresses]
result = [worker.get() for worker in workers]
pool.close()
try:
# change to 1 if using emg beetle, 0 if not using
for idx in range(1, len(result)):
for address in result[idx].keys():
if address == "50:F1:4A:CB:FE:EE":
beetle1_data_dict[address] = result[idx][address]
elif address == "78:DB:2F:BF:2C:E2":
beetle2_data_dict[address] = result[idx][address]
elif address == "1C:BA:8C:1D:30:22":
beetle3_data_dict[address] = result[idx][address]
except Exception as e:
pass
try:
for dataset_num, dataset_list in beetle1_data_dict["50:F1:4A:CB:FE:EE"].items():
if dataset_list[0] == 0: # moving data
beetle1_moving_dict["50:F1:4A:CB:FE:EE"].update(
{dataset_num: dataset_list})
else: # dancing data
beetle1_dancing_dict["50:F1:4A:CB:FE:EE"].update(
{dataset_num: dataset_list})
except Exception as e:
pass
try:
for dataset_num, dataset_list in beetle2_data_dict["78:DB:2F:BF:2C:E2"].items():
if dataset_list[0] == 0: # moving data
beetle2_moving_dict["78:DB:2F:BF:2C:E2"].update(
{dataset_num: dataset_list})
else: # dancing data
beetle2_dancing_dict["78:DB:2F:BF:2C:E2"].update(
{dataset_num: dataset_list})
except Exception as e:
pass
try:
for dataset_num, dataset_list in beetle3_data_dict["1C:BA:8C:1D:30:22"].items():
if dataset_list[0] == 0: # moving data
beetle3_moving_dict["1C:BA:8C:1D:30:22"].update(
{dataset_num: dataset_list})
else: # dancing data
beetle3_dancing_dict["1C:BA:8C:1D:30:22"].update(
{dataset_num: dataset_list})
except Exception as e:
pass
# clear buffer for next move
for address in buffer_dict.keys():
buffer_dict[address] = ""
# print(beetle1_data_dict)
# print(beetle2_data_dict)
# print(beetle3_data_dict)
with open(r'position.txt', 'a') as file:
file.write(json.dumps(beetle1_moving_dict) + "\n")
file.write(json.dumps(beetle2_moving_dict) + "\n")
file.write(json.dumps(beetle3_moving_dict) + "\n")
file.close()
with open(r'dance.txt', 'a') as file:
file.write(json.dumps(beetle1_dancing_dict) + "\n")
file.write(json.dumps(beetle2_dancing_dict) + "\n")
file.write(json.dumps(beetle3_dancing_dict) + "\n")
file.close()
# synchronization delay
try:
beetle1_time_ultra96 = calculate_ultra96_time(
beetle1_dancing_dict, clock_offset_dict["50:F1:4A:CB:FE:EE"][0])
beetle2_time_ultra96 = calculate_ultra96_time(
beetle2_dancing_dict, clock_offset_dict["78:DB:2F:BF:2C:E2"][0])
beetle3_time_ultra96 = calculate_ultra96_time(
beetle3_dancing_dict, clock_offset_dict["1C:BA:8C:1D:30:22"][0])
sync_delay = calculate_sync_delay(beetle1_time_ultra96, beetle2_time_ultra96, beetle3_time_ultra96)
except Exception as e:
print(e)
print("use default sync delay")
sync_delay = 950
# print("Beetle 1 ultra 96 time: ", beetle1_time_ultra96)
# print("Beetle 2 ultra 96 time: ", beetle2_time_ultra96)
# print("Beetle 3 ultra 96 time: ", beetle3_time_ultra96)
print("Synchronization delay is: ", sync_delay)
# machine learning
# ml_result = get_prediction(beetle1_data_dict)
"""
"""
beetle1_moving_dict = parse_data(beetle1_moving_dict, beetle1)
beetle2_moving_dict = parse_data(beetle2_moving_dict, beetle2)
beetle3_moving_dict = parse_data(beetle3_moving_dict, beetle3)
beetle1_moving_dict = normalise_data(beetle1_moving_dict)
beetle2_moving_dict = normalise_data(beetle2_moving_dict)
beetle3_moving_dict = normalise_data(beetle3_moving_dict)
# Predict movement direction of each beetle
try:
beetle1_move = predict_beetle(beetle1_moving_dict, mlp_move)
except Exception as e:
beetle1_move = 'S'
try:
beetle2_move = predict_beetle(beetle2_moving_dict, mlp_move)
except Exception as e:
beetle2_move = 'S'
try:
beetle3_move = predict_beetle(beetle3_moving_dict, mlp_move)
except Exception as e:
beetle3_move = 'S'
# Find new position
new_pos = find_new_position(
ground_truth, beetle1_move, beetle2_move, beetle3_move)
# PREDICT DANCE
if beetle1_dancing_dict[beetle1] and beetle2_dancing_dict[beetle2] and beetle3_dancing_dict[beetle3]:
# Get DANCE data from dictionaries in arguments
beetle1_dance_data = parse_data(beetle1_dancing_dict, beetle1)
beetle2_dance_data = parse_data(beetle2_dancing_dict, beetle2)
beetle3_dance_data = parse_data(beetle3_dancing_dict, beetle3)
# print(beetle1_data)
# Normalise DANCE data
beetle1_dance_data_norm = normalise_data(beetle1_dance_data)
beetle2_dance_data_norm = normalise_data(beetle2_dance_data)
beetle3_dance_data_norm = normalise_data(beetle3_dance_data)
# print(beetle1_data_norm)
# Predict DANCE of each beetle
beetle1_dance = predict_beetle(beetle1_dance_data_norm, mlp_dance)
beetle2_dance = predict_beetle(beetle2_dance_data_norm, mlp_dance)
beetle3_dance = predict_beetle(beetle3_dance_data_norm, mlp_dance)
# print(beetle1_dance)
dance_predictions = [beetle1_dance, beetle2_dance, beetle3_dance]
dance = most_frequent_prediction(dance_predictions)
elif beetle2_dancing_dict[beetle2] and beetle3_dancing_dict[beetle3]:
dance = eval_1beetle(beetle2_dancing_dict, beetle2)
elif beetle1_dancing_dict[beetle1] and beetle3_dancing_dict[beetle3]:
dance = eval_1beetle(beetle1_dancing_dict, beetle1)
elif beetle1_dancing_dict[beetle1] and beetle2_dancing_dict[beetle2]:
dance = eval_1beetle(beetle1_dancing_dict, beetle1)
elif beetle1_dancing_dict[beetle1]:
dance = eval_1beetle(beetle1_dancing_dict, beetle1)
elif beetle2_dancing_dict[beetle2]:
dance = eval_1beetle(beetle2_dancing_dict, beetle2)
elif beetle3_dancing_dict[beetle3]:
dance = eval_1beetle(beetle3_dancing_dict, beetle3)
else:
# RNG
dance = random.choice(ACTIONS)
print(dance)
print(new_pos)
# send data to eval and dashboard server
eval_client.send_data(new_pos, dance, str(sync_delay))
ground_truth = eval_client.receive_dancer_position().split(' ')
ground_truth = [int(ground_truth[0]), int(
ground_truth[1]), int(ground_truth[2])]
final_string = dance + " " + new_pos
board_client.send_data_to_DB("MLDancer1", final_string)
beetle1_moving_dict = {"50:F1:4A:CB:FE:EE": {}}
beetle2_moving_dict = {"78:DB:2F:BF:2C:E2": {}}
beetle3_moving_dict = {"1C:BA:8C:1D:30:22": {}}
beetle1_dancing_dict = {"50:F1:4A:CB:FE:EE": {}}
beetle2_dancing_dict = {"78:DB:2F:BF:2C:E2": {}}
beetle3_dancing_dict = {"1C:BA:8C:1D:30:22": {}}
| 47.052392 | 188 | 0.495788 |
9a7dca2e7b004aae5d55d6951056ac9880930921 | 3,100 | py | Python | tests/test_relations.py | OneRaynyDay/treeno | ce11b8447f471c0b5ea596a211b3855625ec43eb | [
"MIT"
] | 1 | 2021-12-28T19:00:01.000Z | 2021-12-28T19:00:01.000Z | tests/test_relations.py | OneRaynyDay/treeno | ce11b8447f471c0b5ea596a211b3855625ec43eb | [
"MIT"
] | null | null | null | tests/test_relations.py | OneRaynyDay/treeno | ce11b8447f471c0b5ea596a211b3855625ec43eb | [
"MIT"
] | null | null | null | import unittest
from treeno.base import PrintMode, PrintOptions
from treeno.expression import Array, Field, wrap_literal
from treeno.orderby import OrderTerm, OrderType
from treeno.relation import (
AliasedRelation,
Lateral,
SampleType,
Table,
TableQuery,
TableSample,
Unnest,
ValuesQuery,
)
if __name__ == "__main__":
unittest.main()
| 31 | 84 | 0.560968 |
9a7dd31031b6e51089b5322681d7a6bf9e613fcf | 4,257 | py | Python | tests/preprocess/test_har.py | henry1jin/alohamora | e51e2488ecdf3e9692d5bb6b25ebc88622087c20 | [
"MIT"
] | 5 | 2020-12-16T03:13:59.000Z | 2022-03-06T07:16:39.000Z | tests/preprocess/test_har.py | henry1jin/alohamora | e51e2488ecdf3e9692d5bb6b25ebc88622087c20 | [
"MIT"
] | 9 | 2020-09-25T23:25:59.000Z | 2022-03-11T23:45:14.000Z | tests/preprocess/test_har.py | henry1jin/alohamora | e51e2488ecdf3e9692d5bb6b25ebc88622087c20 | [
"MIT"
] | 3 | 2019-10-16T21:22:07.000Z | 2020-07-21T13:38:22.000Z | import random
from blaze.chrome.har import har_from_json, Har, HarLog, HarEntry, Request, Response
from blaze.config.environment import ResourceType
from blaze.preprocess.har import get_har_entry_type, har_entries_to_resources
from blaze.util.seq import ordered_uniq
from tests.mocks.config import get_config
from tests.mocks.har import get_har_json
| 45.287234 | 109 | 0.624383 |
9a7f8708794c267295590be4b52b94df73d85efd | 1,245 | py | Python | Loader.py | JaredDobry/Budgeting-Fool | 0f4ab5dea3b0750b7bf018de1d456a5587dbeb17 | [
"MIT"
] | null | null | null | Loader.py | JaredDobry/Budgeting-Fool | 0f4ab5dea3b0750b7bf018de1d456a5587dbeb17 | [
"MIT"
] | null | null | null | Loader.py | JaredDobry/Budgeting-Fool | 0f4ab5dea3b0750b7bf018de1d456a5587dbeb17 | [
"MIT"
] | null | null | null | from Budget import Budget, Item
FILENAME = 'Budget.txt'
| 28.295455 | 115 | 0.480321 |
9a801f3178565c7f1b1008bb487a050d3079d8d5 | 448 | py | Python | rush_hour/test_solution.py | ssebastianj/taip-2014 | 2a0e62c4bf755ff752136350c246456d65a8c3eb | [
"MIT"
] | null | null | null | rush_hour/test_solution.py | ssebastianj/taip-2014 | 2a0e62c4bf755ff752136350c246456d65a8c3eb | [
"MIT"
] | null | null | null | rush_hour/test_solution.py | ssebastianj/taip-2014 | 2a0e62c4bf755ff752136350c246456d65a8c3eb | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import unittest
import pytest
from .solution import calc_minimum_travels
| 23.578947 | 69 | 0.638393 |
9a822f57f1242bdc5f799bb28767d02eb1a10fd9 | 2,573 | py | Python | wordle.py | ccattuto/wordle-device | 65cbd95165cf6c8e7fae508358d58f7e013f5bc8 | [
"CC0-1.0"
] | 7 | 2022-02-01T17:20:29.000Z | 2022-02-15T08:09:19.000Z | wordle.py | ccattuto/wordle-device | 65cbd95165cf6c8e7fae508358d58f7e013f5bc8 | [
"CC0-1.0"
] | 1 | 2022-02-13T15:46:36.000Z | 2022-02-13T15:46:36.000Z | wordle.py | ccattuto/wordle-device | 65cbd95165cf6c8e7fae508358d58f7e013f5bc8 | [
"CC0-1.0"
] | 1 | 2022-02-03T17:33:13.000Z | 2022-02-03T17:33:13.000Z | #!/usr/bin/env python
import sys
from serial.tools import list_ports
import serial
import tweepy
# locate ESP32-C3 USB device
port = None
for device in list_ports.comports():
if device.vid == 0x303a and device.pid == 0x1001:
break
if not device:
sys.exit(-1)
ser = serial.Serial(device.device, baudrate=115200)
# Twitter streaming API
# CHANGE ME - your consumer key/secret below:
CONSUMER_KEY = 'XXX'
CONSUMER_SECRET = 'XXX'
# CHANGE ME - your access token/secret below:
ACCESS_TOKEN = 'XXX'
ACCESS_TOKEN_SECRET = 'XXX'
# LED matrix control (implemented in wordle.ino):
# - 5x5 matrix is viewed as a LED strip
# - sending 'Z' clears matrix and position cursor on first pixel (0)
# - sending 'G' / 'Y' / 'B' writes a green / yellow / "dark gray" pixel and advances cursor
# clear LED matrix
ser.write('Z'.encode())
# maps characters in tweet to 1-char commands over serial
symbol_map = {
'': 'G',
'': 'Y',
'': 'B',
'': 'B'
}
# write Wordle rows to LED matrix
# check whether a row of text is a worlde row
# looks for 1 to 5 consecutive "worlde rows" in tweet
# and pack them into a list. Returns [] otherwise.
# process tweet
# subclass tweepy
wordle_stream = WordleStream(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
# filter tweets containing the keyword 'worlde'
wordle_stream.filter(track=["wordle"])
| 24.504762 | 94 | 0.638943 |
9a8309c0c453e677a70c4041fea94265ebf3f4e3 | 4,664 | py | Python | DeepAlignmentNetwork/menpofit/lk/result.py | chiawei-liu/DeepAlignmentNetwork | 52621cd2f697abe372b88c9ea0ee08f0d93b43d8 | [
"MIT"
] | 220 | 2019-09-01T01:52:04.000Z | 2022-03-28T12:52:07.000Z | DeepAlignmentNetwork/menpofit/lk/result.py | chiawei-liu/DeepAlignmentNetwork | 52621cd2f697abe372b88c9ea0ee08f0d93b43d8 | [
"MIT"
] | 80 | 2015-01-05T16:17:39.000Z | 2020-11-22T13:42:00.000Z | DeepAlignmentNetwork/menpofit/lk/result.py | chiawei-liu/DeepAlignmentNetwork | 52621cd2f697abe372b88c9ea0ee08f0d93b43d8 | [
"MIT"
] | 64 | 2015-02-02T15:11:38.000Z | 2022-02-28T06:19:31.000Z | from menpofit.result import (ParametricIterativeResult,
MultiScaleParametricIterativeResult)
| 44 | 80 | 0.670455 |
9a831450ccec04bdfc6f981e2f3e5d2ad9771f21 | 6,533 | py | Python | source/model.py | BecauseWeCanStudios/LEGOVNO | 97654da906e5d8ee999fea6dbc062914cc5710b2 | [
"MIT"
] | null | null | null | source/model.py | BecauseWeCanStudios/LEGOVNO | 97654da906e5d8ee999fea6dbc062914cc5710b2 | [
"MIT"
] | null | null | null | source/model.py | BecauseWeCanStudios/LEGOVNO | 97654da906e5d8ee999fea6dbc062914cc5710b2 | [
"MIT"
] | null | null | null | import os
import keras
import skimage.io
import keras_contrib.applications
from metrics import *
from mrcnn import utils
from mrcnn import config
from imgaug import augmenters as iaa
from dataset import Dataset, PoseEstimationDataset
import numpy as np
import keras.backend as K
import mrcnn.model as modellib
| 30.386047 | 123 | 0.743762 |
9a83696d4e899b64faddbb5626cfd880f1149543 | 442 | py | Python | donations/urls.py | nanorepublica/django-donations | 349aaf17029f3f9b4723fead3fa28dd85959f14e | [
"BSD-3-Clause"
] | 9 | 2015-10-13T11:41:20.000Z | 2020-11-30T04:38:43.000Z | donations/urls.py | nanorepublica/django-donations | 349aaf17029f3f9b4723fead3fa28dd85959f14e | [
"BSD-3-Clause"
] | 63 | 2015-10-22T17:41:27.000Z | 2021-11-20T12:18:26.000Z | donations/urls.py | nanorepublica/django-donations | 349aaf17029f3f9b4723fead3fa28dd85959f14e | [
"BSD-3-Clause"
] | 3 | 2017-08-29T02:44:12.000Z | 2020-04-07T23:43:12.000Z | from django.conf.urls import include, url
from donations.views import DonateAPI, VerifyAPI
app_name = 'donations'
api_urls = ([
url(r'^donate/$', DonateAPI.as_view(), name="donate"),
url(r'^verify/(?P<pk>[0-9]+)$', VerifyAPI.as_view(), name="verify"),
], "donations")
donations = ([
url(r'^api/', include(api_urls, namespace="api")),
], "donations")
urlpatterns = [
url(r'^', include(donations, namespace="donations"))
]
| 23.263158 | 72 | 0.651584 |
9a83981c040624137fa42558baa04d53d347c0fc | 3,004 | py | Python | orc8r/tools/fab/vagrant.py | idoshveki/magma | 8022267bd8b8d94913fbb9a0836880361d785446 | [
"BSD-3-Clause"
] | 2 | 2020-11-05T18:58:26.000Z | 2021-02-09T06:42:49.000Z | orc8r/tools/fab/vagrant.py | idoshveki/magma | 8022267bd8b8d94913fbb9a0836880361d785446 | [
"BSD-3-Clause"
] | 10 | 2021-03-31T20:19:00.000Z | 2022-02-19T07:09:57.000Z | orc8r/tools/fab/vagrant.py | 119Vik/magma-1 | 107a7b374466a837fc0a49b283ba9d6ff1d702e3 | [
"BSD-3-Clause"
] | 3 | 2020-08-20T18:45:34.000Z | 2020-08-20T20:18:42.000Z | """
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
"""
import os.path
from fabric.api import local
from fabric.api import env
def __ensure_in_vagrant_dir():
"""
Error out if there is not Vagrant instance associated with this directory
"""
pwd = local('pwd', capture=True)
if not os.path.isfile(pwd + '/Vagrantfile'):
print("Error: Vagrantfile not found. Try executing from fbcode/magma")
exit(1)
return
def setup_env_vagrant(machine='magma', apply_to_env=True, force_provision=False):
""" Host config for local Vagrant VM.
Sets the environment to point at the local vagrant machine. Used
whenever we need to run commands on the vagrant machine.
"""
__ensure_in_vagrant_dir()
# Ensure that VM is running
isUp = local('vagrant status %s' % machine, capture=True) \
.find('running') < 0
if isUp:
# The machine isn't running. Most likely it's just not up. Let's
# first try the simple thing of bringing it up, and if that doesn't
# work then we ask the user to fix it.
print("VM %s is not running... Attempting to bring it up."
% machine)
local('vagrant up %s' % machine)
isUp = local('vagrant status %s' % machine, capture=True) \
.find('running')
if isUp < 0:
print("Error: VM: %s is still not running...\n"
" Failed to bring up %s'"
% (machine, machine))
exit(1)
elif force_provision:
local('vagrant provision %s' % machine)
ssh_config = local('vagrant ssh-config %s' % machine, capture=True)
ssh_lines = [line.strip() for line in ssh_config.split("\n")]
ssh_params = {key: val for key, val
in [line.split(" ", 1) for line in ssh_lines]}
host = ssh_params.get("HostName", "").strip()
port = ssh_params.get("Port", "").strip()
# some installations seem to have quotes around the file location
identity_file = ssh_params.get("IdentityFile", "").strip().strip('"')
host_string = 'vagrant@%s:%s' % (host, port)
if apply_to_env:
env.host_string = host_string
env.hosts = [env.host_string]
env.key_filename = identity_file
env.disable_known_hosts = True
else:
return {
"hosts": [host_string],
"host_string": host_string,
"key_filename": identity_file,
"disable_known_hosts": True,
}
def teardown_vagrant(machine):
""" Destroy a vagrant machine so that we get a clean environment to work
in
"""
__ensure_in_vagrant_dir()
# Destroy if vm if it exists
created = local('vagrant status %s' % machine, capture=True) \
.find('not created') < 0
if created:
local('vagrant destroy -f %s' % machine)
| 31.957447 | 81 | 0.617843 |
9a83eb7c6cde3a0afbb0a6028180ce05131c4869 | 1,988 | py | Python | cp_multiply/examples/make_box_packing_cp.py | gkonjevod/multiply_CP | 2410d242a29a340db8184e127d05c5da9d26f1b4 | [
"MIT"
] | null | null | null | cp_multiply/examples/make_box_packing_cp.py | gkonjevod/multiply_CP | 2410d242a29a340db8184e127d05c5da9d26f1b4 | [
"MIT"
] | null | null | null | cp_multiply/examples/make_box_packing_cp.py | gkonjevod/multiply_CP | 2410d242a29a340db8184e127d05c5da9d26f1b4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 19 23:38:25 2022
@author: goran
"""
from ..general_cp import GeneralCP
from ..cp_utils import to_degrees, dihedral_angle, normal
from math import sqrt, pi, tan, atan2
box_packing_cell_nodes = {'A': (2, 0),
'B': (4, 0),
'C': (2, 2),
'D': (4, 2),
'E': (4, 3),
'F': (3.5, 3.5),
'G': (0, 0)}
angle1 = to_degrees(atan2(sqrt(2)/2, 2))
folded_wall_coords = [ (2, 2, 0),
(2, 1, 2),
(2, 2, 2)]
folded_top_coords = [ (2, 1, 2),
(2, 2, 2),
(1, 2, 2)]
folded_slanted_coords = [(1, 2, 2),
(2, 2, 0),
(2, 1, 2)]
angle1_check = to_degrees(dihedral_angle(normal(folded_top_coords),
normal(folded_slanted_coords)))
print('angle1 = ', angle1)
angle2 = to_degrees(dihedral_angle(normal(folded_wall_coords),
normal(folded_slanted_coords)))
print('angle2 = ', angle2)
box_packing_cell_edges = {'AC': -90,
'BD': 180,
'CD': -180,
'CE': 90 + angle1,
'DE': -180,
'EF': 90 + angle2,
'BG': 0}
| 30.121212 | 94 | 0.454728 |
9a862a138eaba7a151db1b55e4b4a041ae8dbd8a | 11,001 | py | Python | kq/queue.py | grofers/kq | 1fc96e2a189901b91fdcde7f829b021b6555e217 | [
"MIT"
] | null | null | null | kq/queue.py | grofers/kq | 1fc96e2a189901b91fdcde7f829b021b6555e217 | [
"MIT"
] | 2 | 2018-09-24T15:43:48.000Z | 2020-06-23T11:15:17.000Z | kq/queue.py | grofers/kq | 1fc96e2a189901b91fdcde7f829b021b6555e217 | [
"MIT"
] | null | null | null | from __future__ import absolute_import, print_function, unicode_literals
import functools
import logging
import time
import uuid
import dill
import kafka
from kafka.errors import KafkaError
from kq.job import Job
def enqueue_with_key(self, key, obj, *args, **kwargs):
"""Place the function call (or the job) in the Kafka topic with key.
For example:
.. code-block:: python
import requests
from kq import Queue
q = Queue()
url = 'https://www.google.com'
# You can queue the function call with its arguments
job = q.enqueue_with_key('my_key', requests.get, url)
# Or you can queue a kq.job.Job instance directly
q.enqueue_with_key('my_key', job)
:param key: The key for the Kafka message. Jobs with the same key are
guaranteed to be placed in the same Kafka partition and processed
sequentially. If a job object is enqueued, its key is overwritten.
:type key: str
:param obj: Function or the job object to enqueue. If a function is
given, the function *must* be pickle-able.
:type obj: callable | kq.job.Job
:param args: Arguments for the function. Ignored if a KQ job object
is given for the first argument instead.
:type args: list
:param kwargs: Keyword arguments for the function. Ignored if a KQ
job instance is given as the first argument instead.
:type kwargs: dict
:return: The job that was enqueued
:rtype: kq.job.Job
"""
if isinstance(obj, Job):
func = obj.func
args = obj.args
kwargs = obj.kwargs
else:
func = obj
if not callable(func):
raise ValueError('{} is not a callable'.format(func))
job = Job(
id=str(uuid.uuid4()),
timestamp=int(time.time()),
topic=self._topic,
func=func,
args=args,
kwargs=kwargs,
timeout=self._timeout,
key=key
)
future = self._producer.send(self._topic, dill.dumps(job), key=key)
try:
future.get(timeout=self._timeout or 5)
except KafkaError as e:
self._logger.exception('Queuing failed: {}'.format(e.message))
return None
self._logger.info('Enqueued: {}'.format(job))
return job
def job(self, func):
"""Decorator which add a **delay** method to a function.
When the **delay** method is called, the function is queued as a job.
For example:
.. code-block:: python
from kq import Queue
queue = Queue()
@queue.job
def calculate_sum(a, b, c):
return a + b + c
# Enqueue the function as a job
calculate_sum.delay(1, 2, 3)
:param func: The function to decorate.
:type func: callable
:return: The decorated function with new method **delay**
:rtype: callable
"""
func.delay = delay
return func
def flush(self):
"""Force-flush all buffered records to the broker."""
self._logger.info('Flushing {} ...'.format(self))
self._producer.flush()
| 32.937126 | 78 | 0.579947 |
9a87b0a003cfac44c4b71f5b09ccd17d4a3eced1 | 8,683 | py | Python | python/accel_adxl345/accel_adxl345.py | iorodeo/accel_adxl345 | aadbca1c57840f66a61556ff02e72e8b8e4e93e0 | [
"Apache-2.0"
] | null | null | null | python/accel_adxl345/accel_adxl345.py | iorodeo/accel_adxl345 | aadbca1c57840f66a61556ff02e72e8b8e4e93e0 | [
"Apache-2.0"
] | null | null | null | python/accel_adxl345/accel_adxl345.py | iorodeo/accel_adxl345 | aadbca1c57840f66a61556ff02e72e8b8e4e93e0 | [
"Apache-2.0"
] | null | null | null | """
accel_adxl345.py
This modules defines the AccelADXL345 class for streaming data from the
ADXL345 accelerometers.
"""
import time
import serial
import sys
import numpy
import struct
BUF_EMPTY_NUM = 5
BUF_EMPTY_DT = 0.05
| 27.741214 | 86 | 0.524012 |
9a8866fd681b05cff1de0c32ef8dae40aefe5351 | 831 | py | Python | polling_stations/apps/data_collection/management/commands/import_tower_hamlets.py | mtravis/UK-Polling-Stations | 26e0331dc29253dc436a0462ffaa01e974c5dc52 | [
"BSD-3-Clause"
] | null | null | null | polling_stations/apps/data_collection/management/commands/import_tower_hamlets.py | mtravis/UK-Polling-Stations | 26e0331dc29253dc436a0462ffaa01e974c5dc52 | [
"BSD-3-Clause"
] | null | null | null | polling_stations/apps/data_collection/management/commands/import_tower_hamlets.py | mtravis/UK-Polling-Stations | 26e0331dc29253dc436a0462ffaa01e974c5dc52 | [
"BSD-3-Clause"
] | null | null | null | from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
| 34.625 | 82 | 0.683514 |
9a89981de0ecebc2323be5e00e13a157cd8dc62f | 8,490 | py | Python | pynaja/common/struct.py | xiaoxiamiya/naja | 222c3e1135bbd2b9a02181273a8a70201fbdf0f5 | [
"Apache-2.0"
] | 1 | 2021-09-07T07:13:53.000Z | 2021-09-07T07:13:53.000Z | pynaja/common/struct.py | xiaoxiamiya/naja | 222c3e1135bbd2b9a02181273a8a70201fbdf0f5 | [
"Apache-2.0"
] | null | null | null | pynaja/common/struct.py | xiaoxiamiya/naja | 222c3e1135bbd2b9a02181273a8a70201fbdf0f5 | [
"Apache-2.0"
] | null | null | null | import struct
from collections import OrderedDict
from configparser import RawConfigParser
from pynaja.common.async_base import Utils
from pynaja.common.error import ConstError
| 21.493671 | 74 | 0.607538 |
9a8b2c9a4fe128befea072dd96f7b456a616ecd8 | 15,178 | py | Python | YOLO/Stronger-yolo-pytorch/port2tf/yolov3.py | ForrestPi/ObjectDetection | 54e0821e73f67be5360c36f01229a123c34ab3b3 | [
"MIT"
] | 12 | 2020-03-25T01:24:22.000Z | 2021-09-18T06:40:16.000Z | YOLO/Stronger-yolo-pytorch/port2tf/yolov3.py | ForrestPi/ObjectDetection | 54e0821e73f67be5360c36f01229a123c34ab3b3 | [
"MIT"
] | 1 | 2020-04-22T07:52:36.000Z | 2020-04-22T07:52:36.000Z | YOLO/Stronger-yolo-pytorch/port2tf/yolov3.py | ForrestPi/ObjectDetection | 54e0821e73f67be5360c36f01229a123c34ab3b3 | [
"MIT"
] | 4 | 2020-03-25T01:24:26.000Z | 2020-09-20T11:29:09.000Z | # coding:utf-8
import numpy as np
import tensorflow as tf
from layers import *
from MobilenetV2 import MobilenetV2,MobilenetV2_dynamic
| 80.306878 | 158 | 0.566346 |
9a8ce9049f7230937ae69e4978f32515e2f46236 | 654 | py | Python | saltlint/rules/CmdWaitRecommendRule.py | Poulpatine/salt-lint | 304917d95d2730e7df8bd7b5dd29a3bd77c80250 | [
"MIT"
] | null | null | null | saltlint/rules/CmdWaitRecommendRule.py | Poulpatine/salt-lint | 304917d95d2730e7df8bd7b5dd29a3bd77c80250 | [
"MIT"
] | null | null | null | saltlint/rules/CmdWaitRecommendRule.py | Poulpatine/salt-lint | 304917d95d2730e7df8bd7b5dd29a3bd77c80250 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2020 Warpnet B.V.
import re
from saltlint.linter.rule import DeprecationRule
from saltlint.utils import LANGUAGE_SLS
| 28.434783 | 100 | 0.697248 |
9a8d3871c093dea84d65b938bf3c599a010db785 | 7,818 | py | Python | sdks/python/apache_beam/ml/inference/pytorch_test.py | hengfengli/beam | 83a8855e5997e0311e6274c03bcb38f94efbf8ef | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | sdks/python/apache_beam/ml/inference/pytorch_test.py | hengfengli/beam | 83a8855e5997e0311e6274c03bcb38f94efbf8ef | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | sdks/python/apache_beam/ml/inference/pytorch_test.py | hengfengli/beam | 83a8855e5997e0311e6274c03bcb38f94efbf8ef | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
import os
import shutil
import tempfile
import unittest
from collections import OrderedDict
import numpy as np
import pytest
import apache_beam as beam
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
# Protect against environments where pytorch library is not available.
# pylint: disable=wrong-import-order, wrong-import-position, ungrouped-imports
try:
import torch
from apache_beam.ml.inference.api import PredictionResult
from apache_beam.ml.inference.base import RunInference
from apache_beam.ml.inference.pytorch import PytorchInferenceRunner
from apache_beam.ml.inference.pytorch import PytorchModelLoader
except ImportError:
raise unittest.SkipTest('PyTorch dependencies are not installed')
if __name__ == '__main__':
unittest.main()
| 37.228571 | 80 | 0.661678 |
9a8deeda4be2011a1d0dba2c5373aa43b91fc628 | 6,636 | py | Python | example/test/L20_snake.py | Michael8968/skulpt | 15956a60398fac92ee1dab25bf661ffc003b2eaf | [
"MIT"
] | 2 | 2021-12-18T06:34:26.000Z | 2022-01-05T05:08:47.000Z | example/test/L20_snake.py | Michael8968/skulpt | 15956a60398fac92ee1dab25bf661ffc003b2eaf | [
"MIT"
] | null | null | null | example/test/L20_snake.py | Michael8968/skulpt | 15956a60398fac92ee1dab25bf661ffc003b2eaf | [
"MIT"
] | null | null | null |
import pygame
import sys
import time
import random
from pygame.locals import *
# Pygame Init
pygame.init()
# Play Surface
size = width, height = 800, 800
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Snake Change")
# Colors
red = (255, 0, 0)
green = (0, 255, 0)
black = (0, 0, 0)
white = (255, 255, 255)
brown = (165, 42, 42)
blue = (0, 0, 255)
# FPS controller
times = pygame.time.Clock()
# Game settings
delta = 10
snakePos = [100, 50]
snakeBody = [[100, 50], [90, 50], [80, 50]]
snakePos2 = [100, 350]
snakeBody2 = [[100, 350], [90, 350], [80, 350]]
foodPos = [400, 200]
#snakePos = [delta*4, delta*3]
#snakeBody = [[delta*4, delta*3], [delta*3, delta*3], [delta*2, delta*3]]
#foodPos = [delta*10, delta*3]
foodSpawn = True
direction = 'RIGHT'
direction2 = 'RIGHT'
score = 0
score2 = 0
gameover = False
winner = ''
# Show Score
while True:
screen.fill(white)
event = pygame.event.poll()
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif event.type == pygame.KEYDOWN:
direction = control_1p(direction)
direction2 = control_2p(direction2)
move(direction,snakePos)
move(direction2,snakePos2)
score, foodSpawn = motion(score, foodSpawn, snakeBody, snakePos, green)
score2, foodSpawn = motion(score2, foodSpawn, snakeBody2, snakePos2, blue)
gameover, winner = is_game_over()
#
#gameover, winner = is_game_over2()
if foodSpawn == False:
foodPos = [random.randrange(1, width // delta) * delta, random.randrange(1, height // delta) * delta]
foodSpawn = True
pygame.draw.rect(screen, brown, (foodPos[0], foodPos[1], delta, delta))
showScore(score,1)
showScore(score2,2)
if gameover:
showEnd()
while True:
times.tick(10)
event = pygame.event.poll()
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif event.type == KEYDOWN:
if event.key == K_RETURN:
snakePos = [100, 50]
snakeBody = [[100, 50], [90, 50], [80, 50]]
snakePos2 = [100, 350]
snakeBody2 = [[100, 350], [90, 350], [80, 350]]
foodPos = [400, 200]
foodSpawn = True
direction = 'RIGHT'
direction2 = 'RIGHT'
score = 0
score2 = 0
gameover = False
winner = ''
break
keys = pygame.key.get_pressed()
if keys[K_ESCAPE]:
pygame.quit()
sys.exit()
pygame.display.flip()
keys = pygame.key.get_pressed()
if keys[K_ESCAPE]:
pygame.quit()
sys.exit()
pygame.display.flip()
times.tick(30)
| 25.231939 | 109 | 0.518987 |
9a8e3182ccf25a9266ba55ff765c256d44cf7bcc | 4,203 | py | Python | readgadget/readrockstar.py | danielmarostica/pygadgetreader | 977949da7fcb6585f3e0270019d369c6967b317c | [
"BSD-3-Clause"
] | 6 | 2020-09-02T21:11:59.000Z | 2021-09-24T16:12:44.000Z | readgadget/readrockstar.py | danielmarostica/pygadgetreader | 977949da7fcb6585f3e0270019d369c6967b317c | [
"BSD-3-Clause"
] | 1 | 2021-09-24T14:40:03.000Z | 2021-09-25T20:07:13.000Z | readgadget/readrockstar.py | danielmarostica/pygadgetreader | 977949da7fcb6585f3e0270019d369c6967b317c | [
"BSD-3-Clause"
] | 1 | 2020-11-18T19:15:39.000Z | 2020-11-18T19:15:39.000Z | from .modules.common import *
import numpy as np
import os
from .modules.rs_structs import getRSformat
def compileReturnArray(RS,data):
"""compile data from RS binary and return requested value"""
arr = []
singleval = False
## return particle ID data
if data == 'particles':
npart = 0
for i in range(0,len(RS)):
npart += len(RS[i].particle_IDs)
arr = np.zeros((npart,2),dtype=np.int64)
npart = 0
for i in range(0,len(RS)):
n = len(RS[i].particle_IDs)
arr[npart:npart+n,0] = RS[i].particle_IDs
arr[npart:npart+n,1] = RS[i].particle_haloIDs
npart += n
return arr
## return halo struct data
if data in RS[0].halostruct.names:
singleval = True
if RS[0].debug: print('%s found in halodata' % data)
nhalos = 0
for i in range(0,len(RS)):
nhalos += RS[i].num_halos
if singleval:
arr.extend(RS[i].halodata[data])
else:
arr.extend(RS[i].halodata)
#print nhalos,len(arr)
return np.asarray(arr)
def readrockstar(binfile,data,**kwargs):
"""read rockstar binary file
Parameters
----------
binfile : string
path to rockstar binary file. Do NOT include file extention or leading number
data : string
requested data, see readme for details
Examples
--------
>>> halo_mass = readrockstar('/Users/bob/halos_020','m')
>>> halo_mass
array([ 7.25643648e+08, 5.70148608e+08, 3.97376288e+08,
3.66277274e+09, 1.99379231e+10, 5.01039648e+08,
...,
1.58950515e+09, 2.10782208e+09, 8.41401088e+09,
4.14653504e+08], dtype=float32)
"""
galaxies = 0
if 'galaxies' in kwargs and kwargs['galaxies']==1:
galaxies = 1
debug = 0
if 'debug' in kwargs and kwargs['debug']==1:
debug = 1
RS_DATA = []
for j in range(0,5000):
b = '%s.%d.bin' % (binfile,j)
if os.path.isfile(b):
if debug: print('reading %s' % b)
RS_DATA.append(RockstarFile(b,data,galaxies,debug))
else:
break
arr = compileReturnArray(RS_DATA,data)
return arr
| 30.23741 | 93 | 0.578158 |
9a8e4ada3be3bb52b1edcd6ad889f5b0b8142092 | 7,019 | py | Python | src/backend/preprocess/preprocess_helper.py | scmc/vch-mri | ffd2a7b60d770a76b545ce271f85e12f53cfb3ad | [
"MIT"
] | 1 | 2021-12-01T23:40:20.000Z | 2021-12-01T23:40:20.000Z | src/backend/preprocess/preprocess_helper.py | scmc/vch-mri | ffd2a7b60d770a76b545ce271f85e12f53cfb3ad | [
"MIT"
] | 5 | 2021-03-11T03:07:38.000Z | 2021-03-11T03:11:43.000Z | src/backend/preprocess/preprocess_helper.py | scmc/vch-mri | ffd2a7b60d770a76b545ce271f85e12f53cfb3ad | [
"MIT"
] | 18 | 2020-12-30T22:04:44.000Z | 2021-12-01T23:40:23.000Z | import boto3
from datetime import datetime, date
import re
import string
import pandas as pd
from spellchecker import SpellChecker
import uuid
import psycopg2
from psycopg2 import sql
import sys
sys.path.append('.')
from rule_processing import postgresql
compr = boto3.client(service_name='comprehend')
compr_m = boto3.client(service_name='comprehendmedical')
spell = SpellChecker()
conn = postgresql.connect()
spelling_list = [x[0] for x in queryTable(conn, 'spellchecker')]
conn.close()
# Add words to spell list
spell.word_frequency.load_words(spelling_list)
def preProcessText(col):
"""
Takes in a pandas.Series and preprocesses the text
"""
reponct = string.punctuation.replace("?","").replace("/","")
rehtml = re.compile('<.*>')
extr = col.str.strip()
extr = extr.str.replace(rehtml, '', regex=True)
extr = extr.str.translate(str.maketrans('','',reponct))
extr = extr.str.replace('[^0-9a-zA-Z?/ ]+', ' ', regex=True)
extr = extr.str.replace('\s+', ' ', regex=True)
extr = extr.str.lower()
return extr
def infer_icd10_cm(data: str, med_cond, diagnosis, symptoms):
"""
:data type: string to pass through Comprehend Medical icd10_cm
:med_cond type: List[]
:diagnosis type: List[]
:symptoms type: List[]
"""
if not data:
return
try:
icd10_result = compr_m.infer_icd10_cm(Text=data)
for resp in icd10_result['Entities']:
if resp['Score'] > 0.4:
resp_str = resp['Text']
category = ''
# first check Attributes
for attr in resp['Attributes']:
if attr['Score'] > 0.4:
if attr['Type'] == 'ACUITY' or attr['Type'] == 'DIRECTION':
resp_str = f'{attr["Text"]}' + ' ' + resp_str
elif attr['Type'] == 'SYSTEM_ORGAN_SITE':
resp_str = resp_str + ' ' + f'{attr["Text"]}'
for trait in resp['Traits']:
if trait['Score'] > 0.4:
if trait['Name'] == 'NEGATION':
category = 'NEG'
break #don't save anything for negation
elif trait['Name'] == 'SYMPTOM':
category = 'SYMP'
elif trait['Name'] == 'DIAGNOSIS':
category = 'DIAGN'
# add our response string to corresponding list
if not category:
resp_str = checkSpelling(resp_str)
med_cond.append(resp_str)
elif category == 'SYMP':
resp_str = checkSpelling(resp_str)
symptoms.append(resp_str)
elif category == 'DIAGN':
resp_str = checkSpelling(resp_str)
diagnosis.append(resp_str)
except Exception as ex:
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
def find_key_phrases(data:str, key_phrases, icd10cm_list, anatomy_list):
"""
:data type: string to pass through Comprehend Detect Key Phrases
:key_phrases type: List[]
:icd10cm_list type: List[]
:anatomy_list type: List[]
"""
if not data:
return
try:
kp_result = compr.detect_key_phrases(Text=data, LanguageCode='en')
for resp in kp_result['KeyPhrases']:
placed = False
if resp['Score'] > 0.4:
for icd10cm in icd10cm_list:
if contains_word(icd10cm, resp['Text']):
resp_str = checkSpelling(resp['Text'])
key_phrases.append(resp_str)
placed = True
break
elif contains_word(resp['Text'], icd10cm):
resp_str = checkSpelling(resp['Text'])
key_phrases.append(resp_str)
placed = True
break
if not placed:
for anatomy in anatomy_list:
if contains_word(anatomy, resp['Text']):
resp_str = checkSpelling(resp['Text'])
key_phrases.append(resp_str)
break
except Exception as ex:
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
| 34.747525 | 105 | 0.540248 |
9a8e57b168ea55c696b5fec3c4c437440c05734d | 1,182 | py | Python | script_example.py | op8867555/BGmi | 22a7b0292f0fe435e87208154826d8f5baeb7b67 | [
"MIT"
] | null | null | null | script_example.py | op8867555/BGmi | 22a7b0292f0fe435e87208154826d8f5baeb7b67 | [
"MIT"
] | null | null | null | script_example.py | op8867555/BGmi | 22a7b0292f0fe435e87208154826d8f5baeb7b67 | [
"MIT"
] | null | null | null | import datetime
from bgmi.script import ScriptBase
from bgmi.utils import parse_episode
if __name__ == "__main__":
s = Script()
print(s.get_download_url())
| 26.863636 | 86 | 0.5 |
9a8e626f8a8e604d6b65b5bcce02a4426d19dada | 677 | py | Python | 3. Python Advanced (September 2021)/3.1 Python Advanced (September 2021)/10. Exercise - Functions Advanced/11_fill_the_box.py | kzborisov/SoftUni | ccb2b8850adc79bfb2652a45124c3ff11183412e | [
"MIT"
] | 1 | 2021-02-07T07:51:12.000Z | 2021-02-07T07:51:12.000Z | 3. Python Advanced (September 2021)/3.1 Python Advanced (September 2021)/10. Exercise - Functions Advanced/11_fill_the_box.py | kzborisov/softuni | 9c5b45c74fa7d9748e9b3ea65a5ae4e15c142751 | [
"MIT"
] | null | null | null | 3. Python Advanced (September 2021)/3.1 Python Advanced (September 2021)/10. Exercise - Functions Advanced/11_fill_the_box.py | kzborisov/softuni | 9c5b45c74fa7d9748e9b3ea65a5ae4e15c142751 | [
"MIT"
] | null | null | null | from collections import deque
print(fill_the_box(2, 8, 2, 2, 1, 7, 3, 1, 5, "Finish"))
print(fill_the_box(5, 5, 2, 40, 11, 7, 3, 1, 5, "Finish"))
print(fill_the_box(10, 10, 10, 40, "Finish", 2, 15, 30))
| 29.434783 | 92 | 0.583456 |
9a8fedf028eb554590720a2eafe70d6a08a4c617 | 19,617 | py | Python | src/the_tale/the_tale/common/utils/views.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | null | null | null | src/the_tale/the_tale/common/utils/views.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | null | null | null | src/the_tale/the_tale/common/utils/views.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | null | null | null |
import smart_imports
smart_imports.all()
# for external code
ViewError = utils_exceptions.ViewError
# TODO: write metaclass for processing processor arguments
# TODO: refactor error/errors
# TODO: refactor error/errors
def mime_type_to_response_type(content_type):
if content_type is None:
return 'json'
if any(tp in content_type for tp in ('application/xhtml+xml', 'text/html', 'text/plain', 'text/xml')):
return 'html'
if any(tp in content_type for tp in ('application/x-javascript',)):
return 'js'
return 'json'
| 30.18 | 144 | 0.62247 |
9a90d892378e62b46598d590087d4afcc5ce7a6c | 269 | py | Python | NeoAnalysis_Py2.7/NeoAnalysis/__init__.py | Research-lab-KUMS/NeoAnalysis | 32b508dfade3069b1ec5cc7664574b8d3f2d5f57 | [
"MIT"
] | 23 | 2017-09-04T13:20:38.000Z | 2022-03-08T08:15:17.000Z | NeoAnalysis_Py2.7/NeoAnalysis/__init__.py | Research-lab-KUMS/NeoAnalysis | 32b508dfade3069b1ec5cc7664574b8d3f2d5f57 | [
"MIT"
] | 4 | 2018-01-05T13:44:29.000Z | 2021-09-30T17:08:15.000Z | NeoAnalysis_Py2.7/NeoAnalysis/__init__.py | neoanalysis/NeoAnalysis | c5f25b71e16997f3a05f70b1eead11f99a3b7e2b | [
"MIT"
] | 5 | 2017-11-26T19:40:46.000Z | 2021-03-11T17:25:23.000Z | __version__ = '0.10.0'
from NeoAnalysis.spikedetection import SpikeDetection
from NeoAnalysis.spikesorting import SpikeSorting
from NeoAnalysis.analogfilter import AnalogFilter
from NeoAnalysis.graphics import Graphics
from NeoAnalysis.popuanalysis import PopuAnalysis
| 38.428571 | 53 | 0.877323 |
9a91a0bb1c2222107ec4d2fbb68724bb0b797301 | 247 | py | Python | paperplane/backends/click/choice.py | abhilash1in/paperplane | 1dfda182dc8a70fe08fa2284ea63b434246c394b | [
"MIT"
] | null | null | null | paperplane/backends/click/choice.py | abhilash1in/paperplane | 1dfda182dc8a70fe08fa2284ea63b434246c394b | [
"MIT"
] | null | null | null | paperplane/backends/click/choice.py | abhilash1in/paperplane | 1dfda182dc8a70fe08fa2284ea63b434246c394b | [
"MIT"
] | null | null | null | import click
from typing import Any, Optional
from paperplane.backends.click import _prompt
| 30.875 | 84 | 0.777328 |
9a95474d7bed8dc0c9bdace087bfb79423d63386 | 1,012 | py | Python | lib/python/treadmill/api/nodeinfo.py | bretttegart/treadmill | 812109e31c503a6eddaee2d3f2e1faf2833b6aaf | [
"Apache-2.0"
] | 2 | 2017-10-31T18:48:20.000Z | 2018-03-04T20:35:20.000Z | lib/python/treadmill/api/nodeinfo.py | bretttegart/treadmill | 812109e31c503a6eddaee2d3f2e1faf2833b6aaf | [
"Apache-2.0"
] | null | null | null | lib/python/treadmill/api/nodeinfo.py | bretttegart/treadmill | 812109e31c503a6eddaee2d3f2e1faf2833b6aaf | [
"Apache-2.0"
] | null | null | null | """Implementation of allocation API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from treadmill import discovery
from treadmill import context
_LOGGER = logging.getLogger(__name__)
| 24.095238 | 66 | 0.609684 |
9a95d81d2c4081cc80031302b6a6bfe2482c9c94 | 167 | py | Python | new/views.py | Sravan996/django | 3a982382d5cfe9bfb498534f1effcf58a3771539 | [
"MIT"
] | null | null | null | new/views.py | Sravan996/django | 3a982382d5cfe9bfb498534f1effcf58a3771539 | [
"MIT"
] | null | null | null | new/views.py | Sravan996/django | 3a982382d5cfe9bfb498534f1effcf58a3771539 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.shortcuts import HttpResponse
# Create your views here.
| 20.875 | 41 | 0.790419 |
9a969dcb4bdc1a8eee56b110c60c1611472a3520 | 1,834 | py | Python | bob-ross/cluster-paintings.py | h4ckfu/data | bdc02fd5051dfb31e42f8e078832ceead92f9958 | [
"CC-BY-4.0"
] | 16,124 | 2015-01-01T06:18:12.000Z | 2022-03-31T00:46:52.000Z | bob-ross/cluster-paintings.py | h4ckfu/data | bdc02fd5051dfb31e42f8e078832ceead92f9958 | [
"CC-BY-4.0"
] | 179 | 2015-01-07T10:19:57.000Z | 2022-02-21T21:19:14.000Z | bob-ross/cluster-paintings.py | h4ckfu/data | bdc02fd5051dfb31e42f8e078832ceead92f9958 | [
"CC-BY-4.0"
] | 12,163 | 2015-01-03T14:23:36.000Z | 2022-03-31T10:10:23.000Z | """
Clusters Bob Ross paintings by features.
By Walter Hickey <walter.hickey@fivethirtyeight.com>
See http://fivethirtyeight.com/features/a-statistical-analysis-of-the-work-of-bob-ross/
"""
import numpy as np
from scipy.cluster.vq import vq, kmeans, whiten
import math
import csv
if __name__ == "__main__":
main() | 31.084746 | 93 | 0.640676 |
9a96b491ff08bc06ac888649b8beb70e3e05070b | 880 | py | Python | corvette/__init__.py | philipkiely/corvette | 71632f9ed9d628c207c79f6f1b2ee98d911657b7 | [
"MIT"
] | null | null | null | corvette/__init__.py | philipkiely/corvette | 71632f9ed9d628c207c79f6f1b2ee98d911657b7 | [
"MIT"
] | null | null | null | corvette/__init__.py | philipkiely/corvette | 71632f9ed9d628c207c79f6f1b2ee98d911657b7 | [
"MIT"
] | null | null | null | import os
import sys
import json
from corvette.autoindex import autoindex
| 30.344828 | 71 | 0.643182 |
9a970d49581e1f0dbf4db3373345dd1070a85ab1 | 1,965 | py | Python | main.py | theoboldt/pitemp | 366f2d1459144fa7f5e3e5526ee0a4e334f52d37 | [
"Apache-2.0"
] | null | null | null | main.py | theoboldt/pitemp | 366f2d1459144fa7f5e3e5526ee0a4e334f52d37 | [
"Apache-2.0"
] | null | null | null | main.py | theoboldt/pitemp | 366f2d1459144fa7f5e3e5526ee0a4e334f52d37 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import sensor
import lcd
import csv
import time
import os
import datetime
import sys
import re
import circular_buffer
lcd.init()
last_time = datetime.datetime.now()
last_minute = last_time.minute
probe_minute_01 = circular_buffer.CircularBuffer(size=30)
probe_minute_15 = circular_buffer.CircularBuffer(size=15)
probes_minute_30 = circular_buffer.CircularBuffer(size=30)
probes_minute_60 = circular_buffer.CircularBuffer(size=60)
# initialize buffers
current_temperature = sensor.read()
probe_minute_01.append(current_temperature)
probe_minute_15.append(current_temperature)
probes_minute_30.append(current_temperature)
probes_minute_60.append(current_temperature)
while True:
try:
current_time = datetime.datetime.now()
current_minute = current_time.minute
current_temperature = sensor.read()
if current_temperature == 9999:
lcd.top("Temperature")
lcd.bottom("Failed to read")
lcd.cleanup()
sys.exit(0)
probe_minute_01.append(current_temperature)
lcd.top("{:2.1f}".format(current_temperature) + chr(223) + "C " + current_time.strftime("%H:%M:%S"))
if last_minute != current_minute:
lcd.display_init()
probe_minute_15.append(current_temperature)
probes_minute_30.append(current_temperature)
probes_minute_60.append(current_temperature)
csv.append(current_time.strftime("%s") + ";" + str(current_time) + ";" + "{:2.1f}".format(
current_temperature).replace('.', ',') + "\n")
lcd.bottom("{:2.1f}".format(probes_minute_60.average) + chr(223) + " " + "{:2.1f}".format(
probes_minute_30.average) + chr(223) + " " + "{:2.1f}".format(probe_minute_15.average) + chr(223))
time.sleep(2)
last_minute = current_minute
last_time = current_time
except KeyboardInterrupt:
lcd.cleanup()
sys.exit(0)
| 30.230769 | 110 | 0.679898 |
9a9716f606a1775600dbcfac690fb2f212514d33 | 9,988 | py | Python | package/github-endpoints.py | wahyu9kdl/wahyu9kdl.github.io | c7c8ee1c3e7a2eb072467cb43e979ef4fc76d6fa | [
"MIT"
] | 2 | 2021-12-05T22:40:52.000Z | 2022-01-17T08:48:13.000Z | package/github-endpoints.py | wahyu9kdl/wahyu9kdl.github.io | c7c8ee1c3e7a2eb072467cb43e979ef4fc76d6fa | [
"MIT"
] | 1 | 2022-01-12T13:58:28.000Z | 2022-01-12T13:58:28.000Z | package/github-endpoints.py | wahyu9kdl/wahyu9kdl.github.io | c7c8ee1c3e7a2eb072467cb43e979ef4fc76d6fa | [
"MIT"
] | 1 | 2022-01-12T19:20:26.000Z | 2022-01-12T19:20:26.000Z | #!/usr/bin/python3
# I don't believe in license.
# You can do whatever you want with this program.
import os
import sys
import re
import time
import requests
import random
import argparse
from urllib.parse import urlparse
from functools import partial
from colored import fg, bg, attr
from multiprocessing.dummy import Pool
TOKENS_FILE = os.path.dirname(os.path.realpath(__file__))+'/.tokens'
MIN_LENGTH = 5
_url_chars = '[a-zA-Z0-9\-\.\?\#\$&@%=_:/\]\[]'
_not_url_chars = '[^a-zA-Z0-9\-\.\?\#\$&@%=_:/\]\[]'
t_endpoints = []
t_exclude = [
r'^http://$',
r'^https://$',
r'^javascript:$',
r'^tel:$',
r'^mailto:$',
r'^text/javascript$',
r'^application/json$',
r'^application/javascript$',
r'^text/plain$',
r'^text/html$',
r'^text/x-python$',
r'^text/css$',
r'^image/png$',
r'^image/jpeg$',
r'^image/x-icon$',
r'^img/favicon.ico$',
r'^application/x-www-form-urlencoded$',
r'/Users/[0-9a-zA-Z\-\_]/Desktop',
r'www.w3.org',
r'schemas.android.com',
r'www.apple.com',
# r'^#',
# r'^\?',
# r'^javascript:',
# r'^mailto:',
]
t_regexp = [
r'[\'"\(].*(http[s]?://'+_url_chars+'*?)[\'"\)]',
r'[\'"\(](http[s]?://'+_url_chars+'+)',
r'[\'"\(]('+_url_chars+'+\.sdirect'+_url_chars+'*)',
r'[\'"\(]('+_url_chars+'+\.htm'+_url_chars+'*)',
r'[\'"\(]('+_url_chars+'+\.php'+_url_chars+'*)',
r'[\'"\(]('+_url_chars+'+\.asp'+_url_chars+'*)',
r'[\'"\(]('+_url_chars+'+\.js'+_url_chars+'*)',
r'[\'"\(]('+_url_chars+'+\.xml'+_url_chars+'*)',
r'[\'"\(]('+_url_chars+'+\.ini'+_url_chars+'*)',
r'[\'"\(]('+_url_chars+'+\.conf'+_url_chars+'*)',
r'[\'"\(]('+_url_chars+'+\.cfm'+_url_chars+'*)',
r'href\s*[.=]\s*[\'"]('+_url_chars+'+)',
r'src\s*[.=]\s*[\'"]('+_url_chars+'+)',
r'url\s*[:=]\s*[\'"]('+_url_chars+'+)',
r'urlRoot\s*[:=]\s*[\'"]('+_url_chars+'+)',
r'endpoint[s]\s*[:=]\s*[\'"]('+_url_chars+'+)',
r'script[s]\s*[:=]\s*[\'"]('+_url_chars+'+)',
r'\.ajax\s*\(\s*[\'"]('+_url_chars+'+)',
r'\.get\s*\(\s*[\'"]('+_url_chars+'+)',
r'\.post\s*\(\s*[\'"]('+_url_chars+'+)',
r'\.load\s*\(\s*[\'"]('+_url_chars+'+)',
### a bit noisy
# r'[\'"](' + _url_chars + '+/' + _url_chars + '+)?[\'"]',
# r'content\s*[.=]\s*[\'"]('+_url_chars+'+)',
]
parser = argparse.ArgumentParser()
parser.add_argument( "-t","--token",help="your github token (required)" )
parser.add_argument( "-d","--domain",help="domain you are looking for (required)" )
parser.add_argument( "-e","--extend",help="also look for <dummy>example.com", action="store_true" )
parser.add_argument( "-a","--all",help="displays urls of all other domains", action="store_true" )
parser.add_argument( "-r","--relative",help="also displays relative urls", action="store_true" )
parser.add_argument( "-s","--source",help="display urls where endpoints are found", action="store_true" )
parser.add_argument( "-v","--verbose",help="verbose mode, for debugging purpose", action="store_true" )
parser.parse_args()
args = parser.parse_args()
t_tokens = []
if args.token:
t_tokens = args.token.split(',')
else:
if os.path.isfile(TOKENS_FILE):
fp = open(TOKENS_FILE,'r')
t_tokens = fp.read().split("\n")
fp.close()
if not len(t_tokens):
parser.error( 'auth token is missing' )
if args.source:
_source = True
else:
_source = False
if args.domain:
_domain = args.domain
else:
parser.error( 'domain is missing' )
if args.relative:
_relative = True
else:
_relative = False
if args.all:
_alldomains = True
else:
_alldomains = False
t_sort_order = [
{ 'sort':'indexed', 'order':'desc', },
{ 'sort':'indexed', 'order':'asc', },
{ 'sort':'', 'order':'desc', }
]
t_history = []
t_history_urls = []
_search = '"' + _domain + '"'
### this is a test, looks like we got more result that way
import tldextract
t_host_parse = tldextract.extract( _domain )
if args.extend:
# which one is
_search = '"' + t_host_parse.domain + '"'
else:
# the most effective ?
_search = '"' + t_host_parse.domain + '.' + t_host_parse.suffix + '"'
# or simply ?
# _search = '"' + _domain + '"'
# print(_search)
# exit()
###
if args.extend:
_regexp = r'(([0-9a-z_\-\.]+\.)?([0-9a-z_\-]+)?'+t_host_parse.domain+'([0-9a-z_\-\.]+)?\.[a-z]{1,5})'
_confirm = t_host_parse.domain
else:
_regexp = r'((([0-9a-z_\-\.]+)\.)?' + _domain.replace('.','\.')+')'
_confirm = _domain
if args.verbose:
print( "Search: %s" % _search )
print( "Regexp: %s" % _regexp)
print( "Confirm: %s" % _confirm)
print( "Relative urls: %s" % _relative)
print( "All domains: %s" % _alldomains)
for so in t_sort_order:
page = 1
if args.verbose:
print( '\n----- %s %s\n' % (so['sort'],so['order']) )
while True:
if args.verbose:
print("page %d" % page)
time.sleep( random.random() )
token = random.choice( t_tokens )
t_json = githubApiSearchCode( token, _search, page, so['sort'], so['order'] )
# print(t_json)
if not t_json or 'documentation_url' in t_json:
if args.verbose:
print(t_json)
t_tokens.remove(token)
if len(t_tokens) == 0:
exit()
continue
page = page + 1
if 'items' in t_json and len(t_json['items']):
pool = Pool( 30 )
pool.map( partial(readCode,_regexp,_confirm,_source,_relative,_alldomains), t_json['items'] )
pool.close()
pool.join()
else:
break
exit()
| 31.507886 | 135 | 0.505406 |
9a983eb032aad5191f1e045e13d058aec5f59848 | 7,952 | py | Python | information111/info/user/views.py | SNxiaobei/text | 637018ff89d992c2ed23f5c90fa2010023bc2ff3 | [
"MIT"
] | null | null | null | information111/info/user/views.py | SNxiaobei/text | 637018ff89d992c2ed23f5c90fa2010023bc2ff3 | [
"MIT"
] | null | null | null | information111/info/user/views.py | SNxiaobei/text | 637018ff89d992c2ed23f5c90fa2010023bc2ff3 | [
"MIT"
] | null | null | null | from flask import abort
from flask import current_app
from flask import g
from flask import request
from flask import session
from info import constants
from info import db
from info.models import Category, News, User
from info.utils.response_code import RET
from . import profile_blue
from flask import render_template,redirect,jsonify
from info.utils.common import user_login_data
from info.utils.image_storage import storage
"""
index.views:
"""
""""""
""""""
""""""
| 26.774411 | 106 | 0.649774 |
9a9a15482e95aa7f0388513fb55229cb50c955bb | 962 | py | Python | code/magicsquares/mgsq/three_by_three.py | gerritjvv/blog | 26dbba7b38ed7aae63467720fcac2d95da1a0d7f | [
"MIT"
] | null | null | null | code/magicsquares/mgsq/three_by_three.py | gerritjvv/blog | 26dbba7b38ed7aae63467720fcac2d95da1a0d7f | [
"MIT"
] | null | null | null | code/magicsquares/mgsq/three_by_three.py | gerritjvv/blog | 26dbba7b38ed7aae63467720fcac2d95da1a0d7f | [
"MIT"
] | null | null | null | """
Solves a 3x3 square programmatically.
It is not meant to be a full blown solution for magic squares, but rather a writeup
of my thoughts on how it can be solved.
"""
import statistics
def make_pairs(I, mid):
"""
We take pairs as [ [9, 1], [8, 2], [7, 3], [6, 4]]
:param I:
:param mid:
:return:
"""
h = 0
t = len(I) - 1
pairs = []
while h < mid-1:
pairs.append([I[h], I[t]])
h += 1
t -= 1
return pairs
if __name__ == '__main__':
squares(3)
| 20.041667 | 83 | 0.546778 |
9a9a50511abf52afcd61e02d8aeff1032454c0a6 | 3,379 | py | Python | utils/videoJob.py | dbpeng/aws-lambda-python-example-zencoder | 3c3e2d2ea88be824a62e41f16d6bdd79deeef2a0 | [
"MIT"
] | 1 | 2018-05-01T11:54:33.000Z | 2018-05-01T11:54:33.000Z | utils/videoJob.py | dbpeng/aws-lambda-python-example-zencoder | 3c3e2d2ea88be824a62e41f16d6bdd79deeef2a0 | [
"MIT"
] | 1 | 2021-06-01T22:18:53.000Z | 2021-06-01T22:18:53.000Z | utils/videoJob.py | dbpeng/aws-lambda-python-example-zencoder | 3c3e2d2ea88be824a62e41f16d6bdd79deeef2a0 | [
"MIT"
] | null | null | null | import os
import sys
from sqlalchemy import Column, ForeignKey, Integer, String, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
from datetime import datetime
import json
from base import Session, engine, Base
from enum import Enum
VIDEOS_S3_PATH = os.environ["VIDEOS_S3_PATH"]
# if __name__ == "__main__":
# session = Session()
# vjob = VideoTranscodeJob()
# vjob.setSrc("s3://wowza-video/hk33456678.mp4")
# vjob.setDst("13ffjsdhr")
# vjob.setConfig("zen-hls")
# vjob.setJobId("13556245")
# vjob.setVendor("zencoder")
# session.add(vjob)
# session.commit()
# # jobs = session.query(VideoTranscodeJob).all()
# # for job in jobs:
# # job.setProgress(4)
# # session.commit()
# session.close()
| 25.406015 | 76 | 0.629772 |
9a9af8b29d8ddd5b44627798d65817d8e0c206e0 | 3,411 | py | Python | alipay/aop/api/domain/MybankCreditSceneprodCommonQueryModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/domain/MybankCreditSceneprodCommonQueryModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/domain/MybankCreditSceneprodCommonQueryModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
| 29.405172 | 79 | 0.588977 |
9a9d13bd5f6b65068699065c4f4e5d2b6027979d | 32,570 | py | Python | train_end2end.py | lyn1874/daml | edd89c3baf018cdb407208d137364fcefd913896 | [
"MIT"
] | null | null | null | train_end2end.py | lyn1874/daml | edd89c3baf018cdb407208d137364fcefd913896 | [
"MIT"
] | null | null | null | train_end2end.py | lyn1874/daml | edd89c3baf018cdb407208d137364fcefd913896 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 13 18:23:08 2019
This scrip is for training the experiement end2end
@author: li
"""
import tensorflow as tf
import models.AE as AE
import optimization.loss_tf as loss_tf
from data import read_frame_temporal as rft
import numpy as np
import os
import math
import cv2
import shutil
import const
def train_moving_mnist_single_digit(model_group):
"""This function train a pure autoencoder for moving mnist single digit dataset
The goal of this type of experiments is to hope the latent can show some pattern between
anomalies and normal"""
motion_method = "conv3d"
data_set = "moving_mnist_single_digit"
version = 1 # version 1 means the activation layer in the last convolutional block is changed from
# learky-relu to tanh
args.z_mse_ratio = 0.001
num_layer = [5, 4]
stat = [6, 2, 1]
for model_type in model_group:
for single_layer in num_layer:
time_step, delta, interval = stat
num_enc_layer = single_layer
train_model(args.datadir, args.expdir, data_set, time_step, delta, model_type,
motion_method, interval, version, None, num_enc_layer, "learn_full")
if __name__ == '__main__':
args = const.args
print("-------------------------------------------------------------------")
print("------------------argument for current experiment------------------")
print("-------------------------------------------------------------------")
for arg in vars(args):
print(arg, getattr(args, arg))
print("-------------------------------------------------------------------")
print(type(args.version), args.version)
if args.version == 0:
print("only running experiment once")
train_end2end(args, args.data_set, args.model_type,
args.motion_method, version=args.version, bg_ind=None, augment_opt="none")
else:
for s_version in range(args.version):
print("running experiment for version %d" % s_version)
train_end2end(args, args.data_set, args.model_type,
args.motion_method, version=s_version, bg_ind=None, augment_opt="none")
| 49.725191 | 142 | 0.576727 |
9a9d1d892502aafdc91f1a1eaee4fb13e479814b | 10,781 | py | Python | cellfinder_napari/detect.py | neuromusic/cellfinder-napari | 9a58a3b2174c5cb4c740ace6373744b5bcc4cc3d | [
"BSD-3-Clause"
] | 7 | 2021-03-03T11:58:24.000Z | 2021-12-24T08:40:12.000Z | cellfinder_napari/detect.py | neuromusic/cellfinder-napari | 9a58a3b2174c5cb4c740ace6373744b5bcc4cc3d | [
"BSD-3-Clause"
] | 87 | 2021-03-08T18:58:26.000Z | 2022-03-30T15:37:08.000Z | cellfinder_napari/detect.py | neuromusic/cellfinder-napari | 9a58a3b2174c5cb4c740ace6373744b5bcc4cc3d | [
"BSD-3-Clause"
] | 5 | 2021-05-26T19:23:50.000Z | 2022-03-06T13:03:13.000Z | import napari
from pathlib import Path
from magicgui import magicgui
from typing import List
from cellfinder_napari.utils import brainglobe_logo
# TODO:
# how to store & fetch pre-trained models?
# TODO: params to add
NETWORK_VOXEL_SIZES = [5, 1, 1]
CUBE_WIDTH = 50
CUBE_HEIGHT = 20
CUBE_DEPTH = 20
# If using ROI, how many extra planes to analyse
MIN_PLANES_ANALYSE = 0
| 32.969419 | 119 | 0.576384 |
9a9de9279be39ea51b643d07bcacfa3cc557f3f2 | 1,414 | py | Python | setup.py | paxtonfitzpatrick/nltools | 9d52e2e1d665a21feb641ab16424e450aca0c971 | [
"MIT"
] | 65 | 2018-08-26T19:39:11.000Z | 2022-02-20T10:32:58.000Z | setup.py | paxtonfitzpatrick/nltools | 9d52e2e1d665a21feb641ab16424e450aca0c971 | [
"MIT"
] | 138 | 2018-08-15T22:31:45.000Z | 2022-02-14T18:23:46.000Z | setup.py | paxtonfitzpatrick/nltools | 9d52e2e1d665a21feb641ab16424e450aca0c971 | [
"MIT"
] | 18 | 2018-08-23T16:52:35.000Z | 2022-02-24T01:52:27.000Z | from setuptools import setup, find_packages
version = {}
with open("nltools/version.py") as f:
exec(f.read(), version)
with open("requirements.txt") as f:
requirements = f.read().splitlines()
extra_setuptools_args = dict(tests_require=["pytest"])
setup(
name="nltools",
version=version["__version__"],
author="Cosan Lab",
author_email="luke.j.chang@dartmouth.edu",
url="https://cosanlab.github.io/nltools",
python_requires=">=3.6",
install_requires=requirements,
extras_require={"interactive_plots": ["ipywidgets>=5.2.2"]},
packages=find_packages(exclude=["nltools/tests"]),
package_data={"nltools": ["resources/*"]},
include_package_data=True,
license="LICENSE.txt",
description="A Python package to analyze neuroimaging data",
long_description="nltools is a collection of python tools to perform "
"preprocessing, univariate GLMs, and predictive "
"multivariate modeling of neuroimaging data. It is the "
"analysis engine powering www.neuro-learn.org.",
keywords=["neuroimaging", "preprocessing", "analysis", "machine-learning"],
classifiers=[
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Operating System :: OS Independent",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
],
**extra_setuptools_args
)
| 35.35 | 79 | 0.681047 |
9a9e35c047e006353fb6423b17d95459f785de56 | 4,028 | py | Python | {{ cookiecutter.repo_name }}/src/config/config.py | johanngerberding/cookiecutter-data-science | db44c48cdce4886d42b610c04e758d758f834e32 | [
"MIT"
] | null | null | null | {{ cookiecutter.repo_name }}/src/config/config.py | johanngerberding/cookiecutter-data-science | db44c48cdce4886d42b610c04e758d758f834e32 | [
"MIT"
] | null | null | null | {{ cookiecutter.repo_name }}/src/config/config.py | johanngerberding/cookiecutter-data-science | db44c48cdce4886d42b610c04e758d758f834e32 | [
"MIT"
] | null | null | null | import os
import warnings
from dotenv import find_dotenv, load_dotenv
from yacs.config import CfgNode as ConfigurationNode
from pathlib import Path
# Please configure your own settings here #
# YACS overwrite these settings using YAML
__C = ConfigurationNode()
### EXAMPLE ###
"""
# data augmentation parameters with albumentations library
__C.DATASET.AUGMENTATION = ConfigurationNode()
__C.DATASET.AUGMENTATION.BLURRING_PROB = 0.25
__C.DATASET.AUGMENTATION.GAUSS_NOISE_PROB = 0.25
__C.DATASET.AUGMENTATION.GAUSS_VAR_LIMIT =(10.0, 40.0)
__C.DATASET.AUGMENTATION.BLUR_LIMIT = 7
...
# model backbone configs
__C.MODEL.BACKBONE = ConfigurationNode()
__C.MODEL.BACKBONE.NAME = 'mobilenet_v2'
__C.MODEL.BACKBONE.RGB = True
__C.MODEL.BACKBONE.PRETRAINED_PATH = 'C:/data-science/kaggle/bengali.ai/models/mobilenet_v2-b0353104.pth'
# model head configs
__C.MODEL.HEAD = ConfigurationNode()
__C.MODEL.HEAD.NAME = 'simple_head_module'
__C.MODEL.HEAD.ACTIVATION = 'leaky_relu'
__C.MODEL.HEAD.OUTPUT_DIMS = [168, 11, 7]
__C.MODEL.HEAD.INPUT_DIM = 1280 # mobilenet_v2
__C.MODEL.HEAD.HIDDEN_DIMS = [512, 256]
__C.MODEL.HEAD.BATCH_NORM = True
__C.MODEL.HEAD.DROPOUT = 0.4
"""
def get_cfg_defaults():
"""
Get a yacs CfgNode object with default values for my_project.
"""
# Return a clone so that the defaults will not be altered
# This is for the "local variable" use pattern recommended by the YACS repo.
# It will be subsequently overwritten with local YAML.
return __C.clone()
def combine_cfgs(path_cfg_data: Path=None, path_cfg_override: Path=None):
"""
An internal facing routine thaat combined CFG in the order provided.
:param path_output: path to output files
:param path_cfg_data: path to path_cfg_data files
:param path_cfg_override: path to path_cfg_override actual
:return: cfg_base incorporating the overwrite.
"""
if path_cfg_data is not None:
path_cfg_data=Path(path_cfg_data)
if path_cfg_override is not None:
path_cfg_override=Path(path_cfg_override)
# Path order of precedence is:
# Priority 1, 2, 3, 4 respectively
# .env > other CFG YAML > data.yaml > default.yaml
# Load default lowest tier one:
# Priority 4:
cfg_base = get_cfg_defaults()
# Merge from the path_data
# Priority 3:
if path_cfg_data is not None and path_cfg_data.exists():
cfg_base.merge_from_file(path_cfg_data.absolute())
# Merge from other cfg_path files to further reduce effort
# Priority 2:
if path_cfg_override is not None and path_cfg_override.exists():
cfg_base.merge_from_file(path_cfg_override.absolute())
# Merge from .env
# Priority 1:
list_cfg = update_cfg_using_dotenv()
if list_cfg is not []:
cfg_base.merge_from_list(list_cfg)
return cfg_base
def update_cfg_using_dotenv() -> list:
"""
In case when there are dotenvs, try to return list of them.
# It is returning a list of hard overwrite.
:return: empty list or overwriting information
"""
# If .env not found, bail
if find_dotenv() == '':
warnings.warn(".env files not found. YACS config file merging aborted.")
return []
# Load env.
load_dotenv(find_dotenv(), verbose=True)
# Load variables
list_key_env = {
"DATASET.TRAIN_DATA_PATH",
"DATASET.VAL_DATA_PATH",
"MODEL.BACKBONE.PRETRAINED_PATH",
"MODEL.SOLVER.LOSS.LABELS_WEIGHTS_PATH"
}
# Instantiate return list.
path_overwrite_keys = []
# Go through the list of key to be overwritten.
for key in list_key_env:
# Get value from the env.
value = os.getenv("path_overwrite_keys")
# If it is none, skip. As some keys are only needed during training and others during the prediction stage.
if value is None:
continue
# Otherwise, adding the key and the value to the dictionary.
path_overwrite_keys.append(key)
path_overwrite_keys.append(value)
return path_overwrite_keys
| 30.984615 | 115 | 0.712512 |
9a9e673814218a6b691d7522f64cfb8d20627d8f | 475 | py | Python | section_7/ex 30.py | thiagofreitascarneiro/Python-avancado-Geek-University | 861b742ad6b30955fcbe63274b8cf8afc6ca028f | [
"MIT"
] | null | null | null | section_7/ex 30.py | thiagofreitascarneiro/Python-avancado-Geek-University | 861b742ad6b30955fcbe63274b8cf8afc6ca028f | [
"MIT"
] | null | null | null | section_7/ex 30.py | thiagofreitascarneiro/Python-avancado-Geek-University | 861b742ad6b30955fcbe63274b8cf8afc6ca028f | [
"MIT"
] | null | null | null | list1 = []
list2 = []
list3 = []
cont = 0
while cont < 10:
valor = int(input('Digite um numero na lista 1: '))
list1.append(valor)
valor2 = int(input('Digite um numero na lista 2: '))
list2.append(valor2)
cont = cont + 1
if cont == 10:
for i in list1:
if i in list2:
if i not in list3:
list3.append(i)
print(list1)
print(list2)
print(f'Os nmeoros que contem em ambos os vetores so: {list3}')
| 23.75 | 65 | 0.562105 |
9a9ee79fbb5396d6313eb8172811069d5e290bd2 | 7,693 | py | Python | scripts/eval/eval.py | p0l0satik/PlaneDetector | 60d7330537b90ff0ca74247cd6dac2ca7fc627bc | [
"MIT"
] | null | null | null | scripts/eval/eval.py | p0l0satik/PlaneDetector | 60d7330537b90ff0ca74247cd6dac2ca7fc627bc | [
"MIT"
] | null | null | null | scripts/eval/eval.py | p0l0satik/PlaneDetector | 60d7330537b90ff0ca74247cd6dac2ca7fc627bc | [
"MIT"
] | null | null | null | import os
from shutil import rmtree
import cv2
import docker
import numpy as np
import open3d as o3d
from pypcd import pypcd
from src.metrics import metrics
from src.metrics.metrics import multi_value, mean
from src.parser import loaders, create_parser
UNSEGMENTED_COLOR = np.asarray([0, 0, 0], dtype=int)
algos = {
"ddpff": "ddpff:1.0"
}
all_plane_metrics = [
metrics.iou,
metrics.dice,
metrics.precision,
metrics.recall,
metrics.fScore
]
CLOUDS_DIR = "input"
PREDICTIONS_DIR = "output"
annot_sorters = {
'tum': lambda x: x,
'icl_tum': lambda x: int(x),
'icl': lambda x: x
}
if __name__ == "__main__":
parser = create_parser()
args = parser.parse_args()
prepare_clouds(args.dataset_path, args.loader)
with open("results.txt", 'w') as log_file:
for algo_name in algos.keys():
measure_algo(algo_name, args.annotations_path, args.loader, log_file)
| 38.273632 | 118 | 0.66203 |
9a9f85fc451de9881426ccefc8e13f03669bb8d6 | 491 | py | Python | cosmogrb/utils/fits_file.py | wematthias/cosmogrb | 09852eb4e6e7315bbede507e19a2d57f1b927c3f | [
"BSD-2-Clause"
] | 3 | 2020-03-08T18:20:32.000Z | 2022-03-10T17:27:26.000Z | cosmogrb/utils/fits_file.py | wematthias/cosmogrb | 09852eb4e6e7315bbede507e19a2d57f1b927c3f | [
"BSD-2-Clause"
] | 11 | 2020-03-04T17:21:15.000Z | 2020-06-09T12:20:00.000Z | cosmogrb/utils/fits_file.py | wematthias/cosmogrb | 09852eb4e6e7315bbede507e19a2d57f1b927c3f | [
"BSD-2-Clause"
] | 5 | 2020-03-18T18:05:05.000Z | 2022-03-21T16:06:38.000Z | from responsum.utils.fits_file import FITSFile, FITSExtension as FE
import pkg_resources
| 32.733333 | 106 | 0.757637 |
9a9fb2cd7765697e57d5b413e5af8232b235432f | 121,557 | py | Python | mi/instrument/seabird/sbe26plus/driver.py | rhan1498/marine-integrations | ad94c865e0e4cc7c8fd337870410c74b57d5c826 | [
"BSD-2-Clause"
] | null | null | null | mi/instrument/seabird/sbe26plus/driver.py | rhan1498/marine-integrations | ad94c865e0e4cc7c8fd337870410c74b57d5c826 | [
"BSD-2-Clause"
] | null | null | null | mi/instrument/seabird/sbe26plus/driver.py | rhan1498/marine-integrations | ad94c865e0e4cc7c8fd337870410c74b57d5c826 | [
"BSD-2-Clause"
] | null | null | null | """
@package mi.instrument.seabird.sbe26plus.ooicore.driver
@file /Users/unwin/OOI/Workspace/code/marine-integrations/mi/instrument/seabird/sbe26plus/ooicore/driver.py
@author Roger Unwin
@brief Driver for the ooicore
Release notes:
None.
"""
__author__ = 'Roger Unwin'
__license__ = 'Apache 2.0'
import re
import time
import string
from mi.core.log import get_logger ; log = get_logger()
from mi.instrument.seabird.driver import SeaBirdInstrumentDriver
from mi.instrument.seabird.driver import SeaBirdProtocol
from mi.instrument.seabird.driver import NEWLINE
from mi.instrument.seabird.driver import ESCAPE
from mi.core.util import dict_equal
from mi.core.common import BaseEnum
from mi.core.instrument.instrument_fsm import InstrumentFSM
from mi.core.instrument.instrument_driver import DriverEvent
from mi.core.instrument.instrument_driver import DriverAsyncEvent
from mi.core.instrument.instrument_driver import DriverProtocolState
from mi.core.instrument.instrument_driver import DriverParameter
from mi.core.instrument.protocol_param_dict import ParameterDictVisibility
from mi.core.instrument.data_particle import DataParticle, DataParticleKey, CommonDataParticleType
from mi.core.instrument.protocol_param_dict import ParameterDictType
from mi.core.instrument.driver_dict import DriverDictKey
from mi.core.instrument.chunker import StringChunker
from mi.core.exceptions import InstrumentParameterException
from mi.core.exceptions import SampleException
from mi.core.exceptions import InstrumentStateException
from mi.core.exceptions import InstrumentProtocolException
from mi.core.exceptions import InstrumentTimeoutException
from pyon.agent.agent import ResourceAgentState
# default timeout.
TIMEOUT = 60 # setsampling takes longer than 10 on bad internet days.
TIDE_REGEX = r'tide: start time = +(\d+ [A-Za-z]{3} \d{4} \d+:\d+:\d+), p = +([\-\d\.]+), pt = +([\-\d\.]+), t = +([\-\d\.]+)\r\n'
TIDE_REGEX_MATCHER = re.compile(TIDE_REGEX)
WAVE_REGEX = r'(wave: start time =.*?wave: end burst\r\n)'
WAVE_REGEX_MATCHER = re.compile(WAVE_REGEX, re.DOTALL)
STATS_REGEX = r'(deMeanTrend.*?H1/100 = [\d\.e+]+\r\n)'
STATS_REGEX_MATCHER = re.compile(STATS_REGEX, re.DOTALL)
TS_REGEX = r'( +)([\-\d\.]+) +([\-\d\.]+) +([\-\d\.]+)\r\n'
TS_REGEX_MATCHER = re.compile(TS_REGEX)
DC_REGEX = r'(Pressure coefficients.+?)TA3 = [\d+e\.].+?\r\n'
DC_REGEX_MATCHER = re.compile(DC_REGEX, re.DOTALL)
DS_REGEX = r'(SBE 26plus V.+?)logging = [\w, ].+?\r\n'
DS_REGEX_MATCHER = re.compile(DS_REGEX, re.DOTALL)
###
# Driver Constant Definitions
###
# Device prompts.
###############################################################################
# Data Particles
################################################################################
# presf_tide_measurement
# presf_wave_burst
# presf_wave_statistics
# presf_calibration_coefficients
# presf_operating_status
###############################################################################
# Driver
###############################################################################
###############################################################################
# Protocol
###############################################################################
| 45.054485 | 167 | 0.613276 |
9a9fc338c15aa55b529d0d570899ecd61a1b41cd | 514 | py | Python | Strings/count-index-find.py | tverma332/python3 | 544c4ec9c726c37293c8da5799f50575cc50852d | [
"MIT"
] | 3 | 2022-03-28T09:10:08.000Z | 2022-03-29T10:47:56.000Z | Strings/count-index-find.py | tverma332/python3 | 544c4ec9c726c37293c8da5799f50575cc50852d | [
"MIT"
] | 1 | 2022-03-27T11:52:58.000Z | 2022-03-27T11:52:58.000Z | Strings/count-index-find.py | tverma332/python3 | 544c4ec9c726c37293c8da5799f50575cc50852d | [
"MIT"
] | null | null | null | # 1) count = To count how many time a particular word & char. is appearing
x = "Keep grinding keep hustling"
print(x.count("t"))
# 2) index = To get index of letter(gives the lowest index)
x="Keep grinding keep hustling"
print(x.index("t")) # will give the lowest index value of (t)
# 3) find = To get index of letter(gives the lowest index) | Return -1 on failure.
x = "Keep grinding keep hustling"
print(x.find("t"))
'''
NOTE : print(x.index("t",34)) : Search starts from index value 34 including 34
'''
| 25.7 | 82 | 0.684825 |