hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
65e8c2b06a56311edf49d920f21df0bd1cab027c | 708 | py | Python | StationeersSaveFileDebugTools.py | lostinplace/StationeersSaveFileDebugTools | 372a2fc86a9fc3af25044a56131271b577d4d97b | [
"MIT"
] | null | null | null | StationeersSaveFileDebugTools.py | lostinplace/StationeersSaveFileDebugTools | 372a2fc86a9fc3af25044a56131271b577d4d97b | [
"MIT"
] | 1 | 2021-01-10T21:12:41.000Z | 2021-01-10T21:14:49.000Z | StationeersSaveFileDebugTools.py | lostinplace/StationeersSaveFileDebugTools | 372a2fc86a9fc3af25044a56131271b577d4d97b | [
"MIT"
] | null | null | null | import click
if __name__ == '__main__':
cli() | 26.222222 | 108 | 0.79661 |
65e9978ee2200931e2a3bf2760b84a179ae2b472 | 3,610 | py | Python | RL/get_depthmaps.py | RECON-Labs-Inc/svox2 | 2946c1573fc4c8c8f378bf8154c29ba8d62af927 | [
"BSD-2-Clause"
] | null | null | null | RL/get_depthmaps.py | RECON-Labs-Inc/svox2 | 2946c1573fc4c8c8f378bf8154c29ba8d62af927 | [
"BSD-2-Clause"
] | null | null | null | RL/get_depthmaps.py | RECON-Labs-Inc/svox2 | 2946c1573fc4c8c8f378bf8154c29ba8d62af927 | [
"BSD-2-Clause"
] | null | null | null | import sys
from pathlib import Path
from datetime import datetime
import argparse
import json
import torch
from torchvision.utils import save_image
import torchvision
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
import open3d as o3d
from svox2 import *
from pyvox.models import Vox, Color
from pyvox.writer import VoxWriter
from importlib import reload as reload
reload(svox2)
from svox2 import *
#TODO> modify this:
sys.path.append("/workspace/svox2/opt")
from util.dataset import datasets
from util import config_util
# Our nice tools
sys.path.append("/workspace/aseeo-research")
import RLResearch.utils.depth_utils as du
data_dir = "/workspace/datasets/dog"
exp_name = "std"
checkpoint_path = Path(data_dir)/"ckpt"/exp_name/"ckpt.npz"
# device = "cuda:0" if torch.cuda.is_available() else "cpu"
device = "cpu"
# Load arguments from json
json_config_path = Path(data_dir)/"ckpt"/exp_name/"args.json"
parser = argparse.ArgumentParser()
with open(str(json_config_path.resolve()), 'rt') as f:
t_args = argparse.Namespace()
t_args.__dict__.update(json.load(f))
args = parser.parse_args(namespace=t_args)
# parser = argparse.ArgumentParser()
# args = parser.parse_args([])
dataset = datasets["nsvf"](
data_dir,
split="test",
device=device,
factor=1,
n_images=None,
**config_util.build_data_options(args))
grid = SparseGrid.load(str(checkpoint_path.resolve()))
# grid.requires_grad_(True)
config_util.setup_render_opts(grid.opt, args)
print('Render options', grid.opt)
## Single camera position
img_id = 0
c2w = dataset.c2w[img_id].to(device = device)
print("Rendering pose:", img_id)
print(c2w)
print("ndc")
print(dataset.ndc_coeffs)
cam = svox2.Camera(c2w,
dataset.intrins.get('fx', img_id),
dataset.intrins.get('fy', img_id),
dataset.intrins.get('cx', img_id),
dataset.intrins.get('cy', img_id),
width=dataset.get_image_size(img_id)[1],
height=dataset.get_image_size(img_id)[0],
ndc_coeffs=dataset.ndc_coeffs)
print("Cam is cuda", cam.is_cuda)
print("Using thres", args.log_depth_map_use_thresh)
# NOTE: no_grad enables the fast image-level rendering kernel for cuvol backend only
# other backends will manually generate rays per frame (slow)
with torch.no_grad():
depth_img = grid.volume_render_depth_image(cam,
args.log_depth_map_use_thresh if
args.log_depth_map_use_thresh else None
, batch_size=500)
## Export colored pointcloud to check in meshlab
depth_o3d = o3d.geometry.Image(depth_img.numpy())
intrinsics = o3d.camera.PinholeCameraIntrinsic(
cam.width,
cam.height,
dataset.intrins.get('fx', img_id),
dataset.intrins.get('fy', img_id),
dataset.intrins.get('cx', img_id),
dataset.intrins.get('cx', img_id)
)
pointcloud = o3d.geometry.PointCloud.create_from_depth_image(depth_o3d, intrinsics, stride = 8)
o3d.io.write_point_cloud("/workspace/data/pointcloud.ply", pointcloud)
a = 5 | 30.59322 | 95 | 0.645983 |
65e9a81560bd3bd5d8fd30d98016ea9c330e4eba | 6,350 | py | Python | monty/exts/info/global_source.py | onerandomusername/monty-python | fcd8b2827eb9bbb2a05d28f80ac9e215589f03f7 | [
"MIT"
] | 20 | 2021-12-31T10:17:20.000Z | 2022-03-31T04:16:17.000Z | monty/exts/info/global_source.py | onerandomusername/monty-bot | b1c769e44b56bc45f37fc809064571d59c80db27 | [
"MIT"
] | 1 | 2022-03-13T22:34:33.000Z | 2022-03-13T22:34:52.000Z | monty/exts/info/global_source.py | onerandomusername/monty-bot | b1c769e44b56bc45f37fc809064571d59c80db27 | [
"MIT"
] | 3 | 2022-01-02T15:21:46.000Z | 2022-03-05T09:37:54.000Z | from __future__ import annotations
import os
from typing import TYPE_CHECKING, Final, List
from urllib.parse import urldefrag
import disnake
from disnake.ext import commands, tasks
from monty.log import get_logger
from monty.utils.helpers import encode_github_link
from monty.utils.messages import DeleteButton
if TYPE_CHECKING:
from monty.bot import Monty
from monty.exts.eval import Snekbox
logger = get_logger(__name__)
CODE_FILE = os.path.dirname(__file__) + "/_global_source_snekcode.py"
def setup(bot: Monty) -> None:
"""Add the global source cog to the bot."""
bot.add_cog(GlobalSource(bot))
| 37.352941 | 120 | 0.601732 |
65edc49e48e5587c1006c65ecaf10e38136be5e1 | 18,608 | py | Python | grr/core/grr_response_core/lib/rdfvalue_test.py | khanhgithead/grr | 8ad8a4d2c5a93c92729206b7771af19d92d4f915 | [
"Apache-2.0"
] | 4,238 | 2015-01-01T15:34:50.000Z | 2022-03-31T08:18:05.000Z | grr/core/grr_response_core/lib/rdfvalue_test.py | khanhgithead/grr | 8ad8a4d2c5a93c92729206b7771af19d92d4f915 | [
"Apache-2.0"
] | 787 | 2015-01-02T21:34:24.000Z | 2022-03-02T13:26:38.000Z | grr/core/grr_response_core/lib/rdfvalue_test.py | khanhgithead/grr | 8ad8a4d2c5a93c92729206b7771af19d92d4f915 | [
"Apache-2.0"
] | 856 | 2015-01-02T02:50:11.000Z | 2022-03-31T11:11:53.000Z | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Tests for utility classes."""
import datetime
import sys
import unittest
from absl import app
from absl.testing import absltest
from grr_response_core.lib import rdfvalue
from grr.test_lib import test_lib
long_string = (
"\n"
"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi luctus "
"ex sed dictum volutpat. Integer maximus, mauris at tincidunt iaculis, "
"felis magna scelerisque ex, in scelerisque est odio non nunc. "
"Suspendisse et lobortis augue. Donec faucibus tempor massa, sed dapibus"
" erat iaculis ut. Vestibulum eu elementum nulla. Nullam scelerisque "
"hendrerit lorem. Integer vitae semper metus. Suspendisse accumsan "
"dictum felis. Etiam viverra, felis sed ullamcorper vehicula, libero "
"nisl tempus dui, a porta lacus erat et erat. Morbi mattis elementum "
"efficitur. Pellentesque aliquam placerat mauris non accumsan.")
MAX_UINT64 = 18446744073709551615
if __name__ == "__main__":
app.run(main)
| 39.591489 | 80 | 0.690294 |
028ccbb703922e522d9de79fd431e21d9aeac192 | 909 | py | Python | src/main/python/server/test.py | areichmann-tgm/client_travis | c00163e6d7630ff4efaf28605b134e356e02a9d1 | [
"MIT"
] | null | null | null | src/main/python/server/test.py | areichmann-tgm/client_travis | c00163e6d7630ff4efaf28605b134e356e02a9d1 | [
"MIT"
] | null | null | null | src/main/python/server/test.py | areichmann-tgm/client_travis | c00163e6d7630ff4efaf28605b134e356e02a9d1 | [
"MIT"
] | null | null | null | import pytest
from server import rest
def test_update(client):
"""res = client.put('/schuelerA',data={'schueler_id':'1000','usernameX':'Adrian','emailX':'adrian@new.at','picture':'-'})"""
assert True
| 25.971429 | 133 | 0.667767 |
028dbb898943de5745b9b0587b4aecb405f08834 | 3,143 | py | Python | helper-scripts/instrnocombine.py | felixaestheticus/realcode-validation | c599cc41797fc074bd2b71d205d6b2b904e1d64b | [
"BSD-3-Clause"
] | null | null | null | helper-scripts/instrnocombine.py | felixaestheticus/realcode-validation | c599cc41797fc074bd2b71d205d6b2b904e1d64b | [
"BSD-3-Clause"
] | null | null | null | helper-scripts/instrnocombine.py | felixaestheticus/realcode-validation | c599cc41797fc074bd2b71d205d6b2b904e1d64b | [
"BSD-3-Clause"
] | null | null | null | #combine.py, combines available dictionaries into one, and generates csv file for latex
#f = open('dict_random')
mem_ops = 'MOVS','MOV','LDR','LDRH','LDRB','LDRSH','LDRSB','LDM','STR','STRH','STRB','STM'
ari_ops = 'ADDS','ADD','ADC','ADCS','ADR','SUBS','SUB','SBCS','RSBS','MULS','MUL','RSB','SBC'
com_ops = 'CMP','CMN'
log_ops = 'ANDS','EORS','ORRS','BICS','MVNS','TST','EOR','MVN','ORR'
sys_ops = 'PUSH','POP','SVC','CPSID','CPSIE','MRS','MSR','BKPT','SEV','WFE','WFI','YIELD','NOP','ISB','DMB','DSB'
bra_ops = 'B','BL','BLX','BX','BCC','BCS','BEQ','BIC','BLS','BNE','BPL','BGE','BGT','BHI','BLE','BLT','BMI','BVC','BVS'
man_ops = 'SXTH','SXTB','UXTH','UXTB','REV','REV16','REVSH','LSLS','LSRS','RORS','ASR','ASRS','LSL','LSR','ROR'
import os,sys
path = '.'
#files = []
#for i in os.listdir(path):
# if os.path.isfile(os.path.join(path,i)) and i.startswith('typelist') and not i.endswith('~'):
# files.append(i)
files = sys.argv[1:]
print(files)
dic_all = {}
print(dic_all)
for f in files:
f = open(f)
lines = f.readlines()
dic = {}
line = lines[0]
if(line!= ''):
dic = eval(line)
for key in dic:
if(key not in dic_all):
dic_all[key] = str(dic[key])
else:
dic_all[key] = str(dic_all[key]) + "," + str(dic[key])
for key in dic_all:
dic_all[key] = ''
for f in files:
f = open(f)
lines = f.readlines()
dic = {}
line = lines[0]
if(line!= ''):
dic = eval(line)
for key in dic:
#if(dic_all[key] != ''):
dic_all[key] = str(dic_all[key]) + str(dic[key])
for key in dic_all:
if(key not in dic):
dic_all[key] = str(dic_all[key]) +"0"
dic_all[key] = str(dic_all[key]) +","
print(dic_all)
ou = open('dict_nocomb','w')
ou.write(str(dic_all))
csv1 = open("tablenocomb1.csv","w")
csv2 = open("tablenocomb2.csv","w")
csv1.write("Instr. Name, Occur.(Random),Occur.(Real),Type\n")
csv2.write("Instr. Name, Occur.(Random),Occur.(Real),Type\n")
keylist = [key for key in dic_all]
keylist.sort()
nonempty = 0.0
nonemptyr = 0.0
for key in dic_all:
h= str(key)
if(h in mem_ops):
#print("1\n")
dic_all[key] = dic_all[key]+'M'
elif(h in ari_ops):
#print("2\n")
dic_all[key] = dic_all[key]+'A'
elif(h in com_ops):
#print("3\n")
dic_all[key] = dic_all[key]+'C'
elif(h in log_ops):
#print("4\n")
dic_all[key] = dic_all[key]+'L'
elif(h in sys_ops):
#print("5\n")
dic_all[key] = dic_all[key]+'S'
elif(h in bra_ops):
#print("6\n")
dic_all[key] = dic_all[key]+'B'
elif(h in man_ops):
#print("7\n")
dic_all[key] = dic_all[key]+'R'
else:
#print("no cat, sorry\n")
dic_all[key] = dic_all[key]+'O'
#for key in dic_all:
for i in range(len(keylist)):
key = keylist[i]
if(dic_all[key].split(",")[1]!='0'):
nonempty = nonempty+1
#print(str(i)+",")
if(dic_all[key].split(",")[0]!='0'):
nonemptyr = nonemptyr+1
if(i < len(keylist)/2):
csv1.write(str(key) + ',' + str(dic_all[key])+'\n')
else:
csv2.write(str(key) + ',' + str(dic_all[key])+'\n')
print( "Coverage rate -real:" + str(nonempty/len(keylist)))
print( "Coverage rate - random:" + str(nonemptyr/len(keylist)))
csv1.close()
csv2.close()
#print( "Success rate:" + str((nonempty/len(keylist)))
| 25.144 | 119 | 0.602291 |
028e69371236efe13bb824f07b81ce319b9462f0 | 4,102 | py | Python | ip_client.py | HuiiBuh/checkers-master | 112eb1df1d8b0d691edd82978945ea5527b75fab | [
"MIT"
] | 1 | 2021-09-04T05:34:51.000Z | 2021-09-04T05:34:51.000Z | ip_client.py | HuiiBuh/checkers-master | 112eb1df1d8b0d691edd82978945ea5527b75fab | [
"MIT"
] | null | null | null | ip_client.py | HuiiBuh/checkers-master | 112eb1df1d8b0d691edd82978945ea5527b75fab | [
"MIT"
] | null | null | null | #from src.piclient import PiClient
from src.streamclient import StreamClient
import math
options = {
"boarddetector": {
"prepare": {
"resize": [512, 512]
},
"corners": {
"maxcorners": 500,
"qualitylevel": 0.01,
"mindistance": 20
},
"lines": {
"harris": {
"rho": 1,
"theta": 0.01,
"threshold": 350
},
"filter": {
"rho": 20,
"theta": 0.15
}
},
"similarpoints": {
"range": 9
},
"correctlines": {
"amount": 9
}
},
"figuredetector": {
"circles": {
"rho": 1,
"mindist": 40,
"param1": 150,
"param2": 15,
"minradius": 0,
"maxradius": 30
},
"colors": {
"white": {
"normal": {
"lower": [160, 0, 170],
"upper": [180, 120, 255]
},
"king": {
"lower": [115, 0, 0],
"upper": [160, 255, 255]
},
},
"black": {
"normal": {
"lower": [160, 0, 0],
"upper": [180, 255, 150]
},
"king": {
"lower": [160, 120, 160],
"upper": [180, 255, 255]
}
}
}
}
}
# Setup the PiClient
client = StreamClient(
url='https://boarddetection.nebula.blue',
token='uhe3xXfev3H3ehjkUswY9QWpiqzhYvH8y5YmpSvMRDoy8yFvH5LXnbY5phJ5tu88',
stream_url="http://127.0.0.1:8000/stream.mjpg"
)
| 25.165644 | 105 | 0.455144 |
028e7466100505ca2d031073edf99db35fd3966b | 2,773 | py | Python | registration_eval/different_days/dd_compute_dense_transformation_error.py | mirestrepo/voxels-at-lems | df47d031653d2ad877a97b3c1ea574b924b7d4c2 | [
"BSD-2-Clause"
] | 2 | 2015-09-18T00:17:16.000Z | 2019-02-06T04:41:29.000Z | registration_eval/different_days/dd_compute_dense_transformation_error.py | mirestrepo/voxels-at-lems | df47d031653d2ad877a97b3c1ea574b924b7d4c2 | [
"BSD-2-Clause"
] | null | null | null | registration_eval/different_days/dd_compute_dense_transformation_error.py | mirestrepo/voxels-at-lems | df47d031653d2ad877a97b3c1ea574b924b7d4c2 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
"""
compute_transformation_error.py
Created by Maria Isabel Restrepo on 2012-09-24.
Copyright (c) 2012 . All rights reserved.
This script computes the distances betweeen an estimated similarity transformation and its ground trutrransformation is used to transform a "source" coordinate system into a "target coordinate system"
To compute the error between the translations, the L2 norm diference translation vectors in the
"source coordinate system" is computed. Since distances are preserved under R and T, only scale is applied.
The rotation error is computed as the half angle between the normalized queternions i.e acos(|<q1,q2>|) in [0, pi/2]
This script was intended to use with Vishal's results
"""
import os
import sys
import logging
import argparse
from vpcl_adaptor import *
import numpy as np
from numpy import linalg as LA
import transformations as tf
import math
import matplotlib.pyplot as plt
sys.path.append(os.pardir)
import reg3d
if __name__ == '__main__':
# fname = "/Users/isa/Dropbox/data/registration_for_vj/capitol_2011/original/2011-2006_Hs_matrix_vj_dense.txt"
# gt_fname = "/Users/isa/Dropbox/data/registration_for_vj/capitol_2011/original/2011-2006_Hs.txt"
# geo_fname ="/Users/isa/Dropbox/data/registration_for_vj/capitol_2006/original/Hs_geo.txt"
# error = reg3d.transformation_error_general(fname = fname,
# gt_fname = gt_fname,
# geo_fname = geo_fname )
# # Error (S,R,T) 1.39523511977e-06 0.802221070301 2.98789826592
# fname = "/Users/isa/Dropbox/data/registration_for_vj/downtown_2006/original/2006-2011_Hs_matrix_vj_dense.txt"
# gt_fname = "/Users/isa/Dropbox/data/registration_for_vj/downtown_2006/original/2006-2011_Hs.txt"
# geo_fname ="/Users/isa/Dropbox/data/registration_for_vj/capitol_2011/original/Hs_geo.txt"
# error = reg3d.transformation_error_general(fname = fname,
# gt_fname = gt_fname,
# geo_fname = geo_fname )
# # Error (S,R,T) 5.31970689721e-08 0.808909241082 4.83449482984
# fname = "/Users/isa/Dropbox/data/registration_for_vj/BH_VSI/original/f4-2006_Hs_matrix_vj_dense.txt"
# gt_fname = "/Users/isa/Dropbox/data/registration_for_vj/BH_VSI/original/f4-2006_Hs.txt"
# geo_fname ="/Users/isa/Dropbox/data/registration_for_vj/BH_2006/original/Hs_geo.txt"
# error = reg3d.transformation_error_general(fname = fname,
# gt_fname = gt_fname,
# geo_fname = geo_fname )
# # Error (S,R,T) 2.57980939389e-07 0.763324882652 4.79257669203 | 48.649123 | 200 | 0.695636 |
028f14718283c8b1eabad98e17db6f0ca1dee6eb | 16,301 | py | Python | migrations/versions/be21086640ad_country_added.py | anjinkristou/assistor | 02d9b826b9d8844d475c11c33db48cf278282183 | [
"MIT"
] | 1 | 2022-01-29T14:00:32.000Z | 2022-01-29T14:00:32.000Z | migrations/versions/be21086640ad_country_added.py | anjinkristou/assistor | 02d9b826b9d8844d475c11c33db48cf278282183 | [
"MIT"
] | null | null | null | migrations/versions/be21086640ad_country_added.py | anjinkristou/assistor | 02d9b826b9d8844d475c11c33db48cf278282183 | [
"MIT"
] | null | null | null | """Country added
Revision ID: be21086640ad
Revises: 153f720f966f
Create Date: 2021-11-09 15:34:04.306218
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'be21086640ad'
down_revision = '153f720f966f'
branch_labels = None
depends_on = None
naming_convention = {
"ix": 'ix_%(column_0_label)s',
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(column_0_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s"
}
| 55.070946 | 123 | 0.587081 |
0290a912952dfd6fc7b4ea5458b073ea88cdb834 | 26,718 | py | Python | pyxrd/scripts/generate_default_phases.py | PyXRD/pyxrd | 26bacdf64f3153fa74b8caa62e219b76d91a55c1 | [
"BSD-2-Clause"
] | 27 | 2018-06-15T15:28:18.000Z | 2022-03-10T12:23:50.000Z | pyxrd/scripts/generate_default_phases.py | PyXRD/pyxrd | 26bacdf64f3153fa74b8caa62e219b76d91a55c1 | [
"BSD-2-Clause"
] | 22 | 2018-06-14T08:29:16.000Z | 2021-07-05T13:33:44.000Z | pyxrd/scripts/generate_default_phases.py | PyXRD/pyxrd | 26bacdf64f3153fa74b8caa62e219b76d91a55c1 | [
"BSD-2-Clause"
] | 8 | 2019-04-13T13:03:51.000Z | 2021-06-19T09:29:11.000Z | #!/usr/bin/python
# coding=UTF-8
# ex:ts=4:sw=4:et=on
# Copyright (c) 2013, Mathijs Dumon
# All rights reserved.
# Complete license can be found in the LICENSE file.
import os
from pyxrd.data import settings
from pyxrd.project.models import Project
from pyxrd.phases.models import Component, Phase
def generate_expandables(
filename_format, phase_name, maxR,
phase_kwargs_AD, phase_kwargs_EG, phase_kwargs_350,
code_AD, code_EG, code_350,
comp_kwargs_AD, comp_kwargs_EG, comp_kwargs_350):
"""
Generates a list of phase descriptions for a combination of an
AD, EG and 350 Ca-saturated phase linked together
"""
return [
('%s' + (filename_format % R), [
(dict(R=R, name=phase_name + (' R%d Ca-AD' % R), **phase_kwargs_AD), code_AD, comp_kwargs_AD),
(dict(R=R, name=phase_name + (' R%d Ca-EG' % R), based_on=phase_name + (' R%d Ca-AD' % R), **phase_kwargs_EG), code_EG, comp_kwargs_EG),
(dict(R=R, name=phase_name + (' R%d Ca-350' % R), based_on=phase_name + (' R%d Ca-AD' % R), **phase_kwargs_350), code_350, comp_kwargs_350)
]) for R in range(maxR)
]
def run(args=None, ui_callback=None):
"""
How this script works:
- 'code_length' is the length of the aliases keys (see below)
- 'aliases' is a dictionary contain 4-character long keys describing a
specific layer-type (or with other words: a Component object)
E.g. dS2w stands for Di-octahedral Smectite with 2 layers of water.
The values are file path formats, in which a single '%s' string placeholder
will be filled with the absolute path to the default components folder.
- 'default_phases' is an initially empty list that will be filled with two-
tuples. The first element in this tuple is the filename of the generated
phases, the second element is describing what this phase contains. This
second element is again a tuple, containing three parts:
- A dictionary of key-word arguments passed on to the Phase
constructor. If a 'based_on' keyword is defined, an attempt is
made to translate it to an earlier generated phase. This way, it
is possible to pass the name of an earlier generated phase, and
the script will pass in the actual Phase object instead.
- A component code (string) built by the keys of the 'aliases'
dictionary. This string's length should be a multiple of 'code_length'.
There is no separator, rather, the 'code_length' is used to split the
code into its parts.
- Component keyword arguments dictionaries: this is a dictionary in
which the keys match with the components code parts. The values are
property-value dictionaries used to set Component properties after
importing them. Similarly to the Phases' 'based_on' keyword, the
value for the 'linked_with' key is translated to the actual
Component named as such.
### Setup:
"""
code_length = 4
aliases = {
'C ': '%sChlorite.cmp',
'K ': '%sKaolinite.cmp',
'I ': '%sIllite.cmp',
'Se ': '%sSerpentine.cmp',
'T ': '%sTalc.cmp',
'Ma ': '%sMargarite.cmp',
'Pa ': '%sParagonite.cmp',
'L ': '%sLeucophyllite.cmp',
'dS2w': '%sDi-Smectite/Di-Smectite - Ca 2WAT.cmp',
'dS1w': '%sDi-Smectite/Di-Smectite - Ca 1WAT.cmp',
'dS0w': '%sDi-Smectite/Di-Smectite - Ca Dehydr.cmp',
'dS2g': '%sDi-Smectite/Di-Smectite - Ca 2GLY.cmp',
'dS1g': '%sDi-Smectite/Di-Smectite - Ca 1GLY.cmp',
'dSht': '%sDi-Smectite/Di-Smectite - Ca Heated.cmp',
'tS2w': '%sTri-Smectite/Tri-Smectite - Ca 2WAT.cmp',
'tS1w': '%sTri-Smectite/Tri-Smectite - Ca 1WAT.cmp',
'tS0w': '%sTri-Smectite/Tri-Smectite - Ca Dehydr.cmp',
'tS2g': '%sTri-Smectite/Tri-Smectite - Ca 2GLY.cmp',
'tS1g': '%sTri-Smectite/Tri-Smectite - Ca 1GLY.cmp',
'tSht': '%sTri-Smectite/Tri-Smectite - Ca Heated.cmp',
'dV2w': '%sDi-Vermiculite/Di-Vermiculite - Ca 2WAT.cmp',
'dV1w': '%sDi-Vermiculite/Di-Vermiculite - Ca 1WAT.cmp',
'dV0w': '%sDi-Vermiculite/Di-Vermiculite - Ca Dehydr.cmp',
'dV2g': '%sDi-Vermiculite/Di-Vermiculite - Ca 2GLY.cmp',
'dV1g': '%sDi-Vermiculite/Di-Vermiculite - Ca 1GLY.cmp',
'dVht': '%sDi-Vermiculite/Di-Vermiculite - Ca Heated.cmp',
}
default_phases = []
"""
### Commonly used inherit flag dicts:
"""
inherit_S = dict(
inherit_ucp_a=True,
inherit_ucp_b=True,
inherit_delta_c=True,
inherit_layer_atoms=True,
)
inherit_all = dict(
inherit_d001=True,
inherit_default_c=True,
inherit_interlayer_atoms=True,
inherit_atom_relations=True,
**inherit_S
)
inherit_phase = dict(
inherit_display_color=True,
inherit_sigma_star=True,
inherit_CSDS_distribution=True,
inherit_probabilities=True
)
"""
### Single-layer phases:
"""
default_phases += [
('%sKaolinite.phs', [(dict(R=0, name='Kaolinite'), 'K ', {}), ]),
('%sIllite.phs', [(dict(R=0, name='Illite'), 'I ', {})]),
('%sSerpentine.phs', [(dict(R=0, name='Serpentine'), 'Se ', {})]),
('%sTalc.phs', [(dict(R=0, name='Talc'), 'T ', {})]),
('%sChlorite.phs', [(dict(R=0, name='Chlorite'), 'C ', {})]),
('%sMargarite.phs', [(dict(R=0, name='Margarite'), 'Ma ', {})]),
('%sLeucophyllite.phs', [(dict(R=0, name='Leucophyllite'), 'L ', {})]),
('%sParagonite.phs', [(dict(R=0, name='Paragonite'), 'Pa ', {})]),
]
"""
### Dioctahedral smectites:
"""
S_code_AD = 'dS2w'
S_code_EG = 'dS2g'
S_code_350 = 'dSht'
S_inh_comp_args = {
'dS2g': dict(linked_with='dS2w', **inherit_S),
'dSht': dict(linked_with='dS2w', **inherit_S),
}
SS_code_AD = S_code_AD + 'dS1w'
SS_code_EG = S_code_EG + 'dS1g'
SS_code_350 = S_code_350 + 'dS1g'
SS_inh_comp_args = dict(S_inh_comp_args)
SS_inh_comp_args.update({
'dS1g': dict(linked_with='dS1w', **inherit_S),
})
SSS_code_AD = SS_code_AD + 'dS0w'
SSS_code_EG = SS_code_EG + 'dS0w'
SSS_code_350 = SS_code_350 + 'dS0w'
SSS_inh_comp_args = dict(SS_inh_comp_args)
SSS_inh_comp_args.update({
'dS0w': dict(linked_with='dS0w', **inherit_S),
})
default_phases += [
('%sSmectites/Di-Smectite Ca.phs', [
(dict(R=0, name='S R0 Ca-AD'), S_code_AD, {}),
(dict(R=0, name='S R0 Ca-EG', based_on='S R0 Ca-AD', **inherit_phase), S_code_EG, S_inh_comp_args),
(dict(R=0, name='S R0 Ca-350', based_on='S R0 Ca-AD', **inherit_phase), S_code_350, S_inh_comp_args)
]),
]
default_phases += generate_expandables(
'Smectites/SS/Di-SS R%d Ca.phs', 'SS', 4,
{}, inherit_phase, inherit_phase,
SS_code_AD, SS_code_EG, SS_code_350,
{}, SS_inh_comp_args, SS_inh_comp_args,
)
default_phases += generate_expandables(
'Smectites/SSS/Di-SSS R%d Ca.phs', 'SSS', 3,
{}, inherit_phase, inherit_phase,
SSS_code_AD, SSS_code_EG, SSS_code_350,
{}, SSS_inh_comp_args, SSS_inh_comp_args,
)
"""
### Trioctahedral smectites:
"""
tS_code_AD = 'tS2w'
tS_code_EG = 'tS2g'
tS_code_350 = 'tSht'
tS_inh_comp_args = {
'tS2g': dict(linked_with='tS2w', **inherit_S),
'tSht': dict(linked_with='tS2w', **inherit_S),
}
tSS_code_AD = tS_code_AD + 'tS1w'
tSS_code_EG = tS_code_EG + 'tS1g'
tSS_code_350 = tS_code_350 + 'tS1g'
tSS_inh_comp_args = dict(S_inh_comp_args)
tSS_inh_comp_args.update({
'tS1g': dict(linked_with='tS1w', **inherit_S),
})
tSSS_code_AD = tSS_code_AD + 'tS0w'
tSSS_code_EG = tSS_code_EG + 'tS0w'
tSSS_code_350 = tSS_code_350 + 'tS0w'
tSSS_inh_comp_args = dict(SS_inh_comp_args)
tSSS_inh_comp_args.update({
'tS0w': dict(linked_with='tS0w', **inherit_S),
})
default_phases += [
('%sSmectites/Tri-Smectite Ca.phs', [
(dict(R=0, name='S R0 Ca-AD'), tS_code_AD, {}),
(dict(R=0, name='S R0 Ca-EG', based_on='S R0 Ca-AD', **inherit_phase), tS_code_EG, tS_inh_comp_args),
(dict(R=0, name='S R0 Ca-350', based_on='S R0 Ca-AD', **inherit_phase), tS_code_350, tS_inh_comp_args)
]),
]
default_phases += generate_expandables(
'Smectites/SS/Tri-SS R%d Ca.phs', 'SS', 4,
{}, inherit_phase, inherit_phase,
tSS_code_AD, tSS_code_EG, tSS_code_350,
{}, tSS_inh_comp_args, tSS_inh_comp_args,
)
default_phases += generate_expandables(
'Smectites/SSS/Tri-SSS R%d Ca.phs', 'SSS', 3,
{}, inherit_phase, inherit_phase,
tSSS_code_AD, tSSS_code_EG, tSSS_code_350,
{}, tSSS_inh_comp_args, tSSS_inh_comp_args,
)
"""
### Dioctahedral vermiculites:
"""
V_code_AD = 'dV2w'
V_code_EG = 'dV2g'
V_code_350 = 'dVht'
V_inh_comp_args = {
'dV2g': dict(linked_with='dV2w', **inherit_S),
'dVht': dict(linked_with='dV2w', **inherit_S),
}
VV_code_AD = V_code_AD + 'dV1w'
VV_code_EG = V_code_EG + 'dV1g'
VV_code_350 = V_code_350 + 'dV1g'
VV_inh_comp_args = dict(V_inh_comp_args)
VV_inh_comp_args.update({
'dV1g': dict(linked_with='dV1w', **inherit_S),
})
VVV_code_AD = VV_code_AD + 'dV0w'
VVV_code_EG = VV_code_EG + 'dV0w'
VVV_code_350 = VV_code_350 + 'dV0w'
VVV_inh_comp_args = dict(VV_inh_comp_args)
VVV_inh_comp_args.update({
'dV0w': dict(linked_with='dV0w', **inherit_S),
})
default_phases += [
('%sVermiculites/Di-Vermiculite Ca.phs', [
(dict(R=0, name='V R0 Ca-AD'), V_code_AD, {}),
(dict(R=0, name='V R0 Ca-EG', based_on='V R0 Ca-AD', **inherit_phase), V_code_EG, V_inh_comp_args),
(dict(R=0, name='V R0 Ca-350', based_on='V R0 Ca-AD', **inherit_phase), V_code_350, V_inh_comp_args)
]),
]
default_phases += generate_expandables(
'Vermiculites/VV/Di-VV R%d Ca.phs', 'VV', 4,
{}, inherit_phase, inherit_phase,
VV_code_AD, VV_code_EG, VV_code_350,
{}, VV_inh_comp_args, VV_inh_comp_args,
)
default_phases += generate_expandables(
'Vermiculites/VVV/Di-VVV R%d Ca.phs', 'VVV', 3,
{}, inherit_phase, inherit_phase,
VVV_code_AD, VVV_code_EG, VVV_code_350,
{}, VVV_inh_comp_args, VVV_inh_comp_args,
)
"""
### Kaolinite - Smectites:
"""
K_code = 'K '
K_inh_comp_args = {
'K ': dict(linked_with='K ', **inherit_all),
}
KS_code_AD = K_code + S_code_AD
KS_code_EG = K_code + S_code_EG
KS_code_350 = K_code + S_code_350
KS_inh_comp_args = dict(S_inh_comp_args)
KS_inh_comp_args.update(K_inh_comp_args)
KSS_code_AD = K_code + SS_code_AD
KSS_code_EG = K_code + SS_code_EG
KSS_code_350 = K_code + SS_code_350
KSS_inh_comp_args = dict(SS_inh_comp_args)
KSS_inh_comp_args.update(K_inh_comp_args)
KSSS_code_AD = K_code + SSS_code_AD
KSSS_code_EG = K_code + SSS_code_EG
KSSS_code_350 = K_code + SSS_code_350
KSSS_inh_comp_args = dict(SSS_inh_comp_args)
KSSS_inh_comp_args.update(K_inh_comp_args)
default_phases += generate_expandables(
'Kaolinite-Smectites/KS/KS R%d Ca.phs', 'KS', 4,
{}, inherit_phase, inherit_phase,
KS_code_AD, KS_code_EG, KS_code_350,
{}, KS_inh_comp_args, KS_inh_comp_args,
)
default_phases += generate_expandables(
'Kaolinite-Smectites/KSS/KSS R%d Ca.phs', 'KSS', 3,
{}, inherit_phase, inherit_phase,
KSS_code_AD, KSS_code_EG, KSS_code_350,
{}, KSS_inh_comp_args, KSS_inh_comp_args,
)
default_phases += generate_expandables(
'Kaolinite-Smectites/KSSS/KSSS R%d Ca.phs', 'KSSS', 2,
{}, inherit_phase, inherit_phase,
KSSS_code_AD, KSSS_code_EG, KSSS_code_350,
{}, KSSS_inh_comp_args, KSSS_inh_comp_args,
)
"""
### Illite - Smectites:
"""
I_code = 'I '
I_inh_comp_args = {
'I ': dict(linked_with='I ', **inherit_all),
}
IS_code_AD = I_code + S_code_AD
IS_code_EG = I_code + S_code_EG
IS_code_350 = I_code + S_code_350
IS_inh_comp_args = dict(S_inh_comp_args)
IS_inh_comp_args.update(I_inh_comp_args)
ISS_code_AD = I_code + SS_code_AD
ISS_code_EG = I_code + SS_code_EG
ISS_code_350 = I_code + SS_code_350
ISS_inh_comp_args = dict(SS_inh_comp_args)
ISS_inh_comp_args.update(I_inh_comp_args)
ISSS_code_AD = I_code + SSS_code_AD
ISSS_code_EG = I_code + SSS_code_EG
ISSS_code_350 = I_code + SSS_code_350
ISSS_inh_comp_args = dict(SSS_inh_comp_args)
ISSS_inh_comp_args.update(I_inh_comp_args)
default_phases += generate_expandables(
'Illite-Smectites/IS/IS R%d Ca.phs', 'IS', 4,
{}, inherit_phase, inherit_phase,
IS_code_AD, IS_code_EG, IS_code_350,
{}, IS_inh_comp_args, IS_inh_comp_args,
)
default_phases += generate_expandables(
'Illite-Smectites/ISS/ISS R%d Ca.phs', 'ISS', 3,
{}, inherit_phase, inherit_phase,
ISS_code_AD, ISS_code_EG, ISS_code_350,
{}, ISS_inh_comp_args, ISS_inh_comp_args,
)
default_phases += generate_expandables(
'Illite-Smectites/ISSS/ISSS R%d Ca.phs', 'ISSS', 2,
{}, inherit_phase, inherit_phase,
ISSS_code_AD, ISSS_code_EG, ISSS_code_350,
{}, ISSS_inh_comp_args, ISSS_inh_comp_args,
)
"""
### Chlorite - Smectites:
"""
C_code = 'C '
C_inh_comp_args = {
'C ': dict(linked_with='C ', **inherit_all),
}
CS_code_AD = C_code + tS_code_AD
CS_code_EG = C_code + tS_code_EG
CS_code_350 = C_code + tS_code_350
CS_inh_comp_args = dict(tS_inh_comp_args)
CS_inh_comp_args.update(C_inh_comp_args)
CSS_code_AD = C_code + tSS_code_AD
CSS_code_EG = C_code + tSS_code_EG
CSS_code_350 = C_code + tSS_code_350
CSS_inh_comp_args = dict(tSS_inh_comp_args)
CSS_inh_comp_args.update(C_inh_comp_args)
CSSS_code_AD = C_code + tSSS_code_AD
CSSS_code_EG = C_code + tSSS_code_EG
CSSS_code_350 = C_code + tSSS_code_350
CSSS_inh_comp_args = dict(tSSS_inh_comp_args)
CSSS_inh_comp_args.update(C_inh_comp_args)
default_phases += generate_expandables(
'Chlorite-Smectites/CS/CS R%d Ca.phs', 'CS', 4,
{}, inherit_phase, inherit_phase,
CS_code_AD, CS_code_EG, CS_code_350,
{}, CS_inh_comp_args, CS_inh_comp_args,
)
default_phases += generate_expandables(
'Chlorite-Smectites/CSS/CSS R%d Ca.phs', 'CSS', 3,
{}, inherit_phase, inherit_phase,
CSS_code_AD, CSS_code_EG, CSS_code_350,
{}, CSS_inh_comp_args, CSS_inh_comp_args,
)
default_phases += generate_expandables(
'Chlorite-Smectites/CSSS/CSSS R%d Ca.phs', 'CSSS', 2,
{}, inherit_phase, inherit_phase,
CSSS_code_AD, CSSS_code_EG, CSSS_code_350,
{}, CSSS_inh_comp_args, CSSS_inh_comp_args,
)
"""
### Talc - Smectites:
"""
T_code = 'T '
T_inh_comp_args = {
'T ': dict(linked_with='T ', **inherit_all),
}
TS_code_AD = T_code + S_code_AD
TS_code_EG = T_code + S_code_EG
TS_code_350 = T_code + S_code_350
TS_inh_comp_args = dict(S_inh_comp_args)
TS_inh_comp_args.update(T_inh_comp_args)
TSS_code_AD = T_code + SS_code_AD
TSS_code_EG = T_code + SS_code_EG
TSS_code_350 = T_code + SS_code_350
TSS_inh_comp_args = dict(SS_inh_comp_args)
TSS_inh_comp_args.update(T_inh_comp_args)
TSSS_code_AD = T_code + SSS_code_AD
TSSS_code_EG = T_code + SSS_code_EG
TSSS_code_350 = T_code + SSS_code_350
TSSS_inh_comp_args = dict(SSS_inh_comp_args)
TSSS_inh_comp_args.update(T_inh_comp_args)
default_phases += generate_expandables(
'Talc-Smectites/TS/TS R%d Ca.phs', 'TS', 4,
{}, inherit_phase, inherit_phase,
TS_code_AD, TS_code_EG, TS_code_350,
{}, TS_inh_comp_args, TS_inh_comp_args,
)
default_phases += generate_expandables(
'Talc-Smectites/TSS/TSS R%d Ca.phs', 'TSS', 3,
{}, inherit_phase, inherit_phase,
TSS_code_AD, TSS_code_EG, TSS_code_350,
{}, TSS_inh_comp_args, TSS_inh_comp_args,
)
default_phases += generate_expandables(
'Talc-Smectites/TSSS/TSSS R%d Ca.phs', 'TSSS', 2,
{}, inherit_phase, inherit_phase,
TSSS_code_AD, TSSS_code_EG, TSSS_code_350,
{}, TSSS_inh_comp_args, TSSS_inh_comp_args,
)
"""
### Illite - Chlorite - Smectites:
"""
IC_code = I_code + C_code
IC_inh_comp_args = dict(I_inh_comp_args)
IC_inh_comp_args.update(C_inh_comp_args)
ICS_code_AD = IC_code + S_code_AD
ICS_code_EG = IC_code + S_code_EG
ICS_inh_comp_args = dict(S_inh_comp_args)
ICS_inh_comp_args.update(IC_inh_comp_args)
ICSS_code_AD = IC_code + SS_code_AD
ICSS_code_EG = IC_code + SS_code_EG
ICSS_inh_comp_args = dict(SS_inh_comp_args)
ICSS_inh_comp_args.update(IC_inh_comp_args)
ICSSS_code_AD = IC_code + SSS_code_AD
ICSSS_code_EG = IC_code + SSS_code_EG
ICSSS_inh_comp_args = dict(SSS_inh_comp_args)
ICSSS_inh_comp_args.update(IC_inh_comp_args)
default_phases += [
('%sIllite-Chlorite-Smectites/ICS/ICS R0 Ca.phs', [
(dict(R=0, name='ICS R0 Ca-AD'), ICS_code_AD, {}),
(dict(R=0, name='ICS R0 Ca-EG', based_on='ICS R0 Ca-AD', **inherit_phase), ICS_code_EG, ICS_inh_comp_args)
]),
('%sIllite-Chlorite-Smectites/ICS/ICS R1 Ca.phs', [
(dict(R=1, name='ICS R1 Ca-AD'), ICS_code_AD, {}),
(dict(R=1, name='ICS R1 Ca-EG', based_on='ICS R1 Ca-AD', **inherit_phase), ICS_code_EG, ICS_inh_comp_args)
]),
('%sIllite-Chlorite-Smectites/ICS/ICS R2 Ca.phs', [
(dict(R=2, name='ICS R2 Ca-AD'), ICS_code_AD, {}),
(dict(R=2, name='ICS R2 Ca-EG', based_on='ICS R2 Ca-AD', **inherit_phase), ICS_code_EG, ICS_inh_comp_args)
]),
('%sIllite-Chlorite-Smectites/ICSS/ICSS R0 Ca.phs', [
(dict(R=0, name='ICSS R0 Ca-AD'), ICSS_code_AD, {}),
(dict(R=0, name='ICSS R0 Ca-EG', based_on='ICSS R0 Ca-AD', **inherit_phase), ICSS_code_EG, ICSS_inh_comp_args)
]),
('%sIllite-Chlorite-Smectites/ICSS/ICSS R1 Ca.phs', [
(dict(R=1, name='ICSS R1 Ca-AD'), ICSS_code_AD, {}),
(dict(R=1, name='ICSS R1 Ca-EG', based_on='ICSS R1 Ca-AD', **inherit_phase), ICSS_code_EG, ICSS_inh_comp_args)
]),
('%sIllite-Chlorite-Smectites/ICSSS/ICSSS R0 Ca.phs', [
(dict(R=0, name='ICSSS R0 Ca-AD'), ICSSS_code_AD, {}),
(dict(R=0, name='ICSSS R0 Ca-EG', based_on='ICSSS R0 Ca-AD', **inherit_phase), ICSSS_code_EG, ICSSS_inh_comp_args)
]),
]
"""
### Kaolinite - Chlorite - Smectites:
"""
KC_code = K_code + C_code
KC_inh_comp_args = dict(K_inh_comp_args)
KC_inh_comp_args.update(C_inh_comp_args)
KCS_code_AD = KC_code + S_code_AD
KCS_code_EG = KC_code + S_code_EG
KCS_inh_comp_args = dict(S_inh_comp_args)
KCS_inh_comp_args.update(KC_inh_comp_args)
KCSS_code_AD = KC_code + SS_code_AD
KCSS_code_EG = KC_code + SS_code_EG
KCSS_inh_comp_args = dict(SS_inh_comp_args)
KCSS_inh_comp_args.update(KC_inh_comp_args)
KCSSS_code_AD = KC_code + SSS_code_AD
KCSSS_code_EG = KC_code + SSS_code_EG
KCSSS_inh_comp_args = dict(SSS_inh_comp_args)
KCSSS_inh_comp_args.update(KC_inh_comp_args)
default_phases += [
('%sKaolinite-Chlorite-Smectites/KCS/KCS R0 Ca.phs', [
(dict(R=0, name='KCS R0 Ca-AD'), KCS_code_AD, {}),
(dict(R=0, name='KCS R0 Ca-EG', based_on='KCS R0 Ca-AD', **inherit_phase), KCS_code_EG, KCS_inh_comp_args)
]),
('%sKaolinite-Chlorite-Smectites/KCS/KCS R1 Ca.phs', [
(dict(R=1, name='KCS R1 Ca-AD'), KCS_code_AD, {}),
(dict(R=1, name='KCS R1 Ca-EG', based_on='KCS R1 Ca-AD', **inherit_phase), KCS_code_EG, KCS_inh_comp_args)
]),
('%sKaolinite-Chlorite-Smectites/KCS/KCS R2 Ca.phs', [
(dict(R=2, name='KCS R2 Ca-AD'), KCS_code_AD, {}),
(dict(R=2, name='KCS R2 Ca-EG', based_on='KCS R2 Ca-AD', **inherit_phase), KCS_code_EG, KCS_inh_comp_args)
]),
('%sKaolinite-Chlorite-Smectites/KCSS/KCSS R0 Ca.phs', [
(dict(R=0, name='KCSS R0 Ca-AD'), KCSS_code_AD, {}),
(dict(R=0, name='KCSS R0 Ca-EG', based_on='KCSS R0 Ca-AD', **inherit_phase), KCSS_code_EG, KCSS_inh_comp_args)
]),
('%sKaolinite-Chlorite-Smectites/KCSS/KCSS R1 Ca.phs', [
(dict(R=1, name='KCSS R1 Ca-AD'), KCSS_code_AD, {}),
(dict(R=1, name='KCSS R1 Ca-EG', based_on='KCSS R1 Ca-AD', **inherit_phase), KCSS_code_EG, KCSS_inh_comp_args)
]),
('%sKaolinite-Chlorite-Smectites/KCSSS/KCSSS R0 Ca.phs', [
(dict(R=0, name='KCSSS R0 Ca-AD'), KCSSS_code_AD, {}),
(dict(R=0, name='KCSSS R0 Ca-EG', based_on='KCSSS R0 Ca-AD', **inherit_phase), KCSSS_code_EG, KCSSS_inh_comp_args)
]),
]
"""
### Actual object generation routine:
"""
import queue
import threading
def ioworker(in_queue, stop):
"""
Saves Phase objects from the in_queue.
If the Queue is empty this function will only stop
if the 'stop' event is set.
"""
while True:
try:
phases_path, phases = in_queue.get(timeout=0.5)
create_dir_recursive(phases_path)
Phase.save_phases(phases, phases_path)
in_queue.task_done()
except queue.Empty:
if not stop.is_set():
continue
else:
return
save_queue = queue.Queue()
io_stop = threading.Event()
iothread = threading.Thread(target=ioworker, args=(save_queue, io_stop))
iothread.start()
def phaseworker(in_queue, save_queue, stop):
"""
Parses Phase descriptions into actual objects and passes them
to the save_queue.
'stop' should be a threading.Event() that should be toggled
once all elements have been Queued.
This way, the worker will only stop once the Queue is really empty,
and not when it's processing faster than the Queue can be filled.
"""
while True:
try:
phases_path, phase_descr = in_queue.get(timeout=0.5)
project = Project()
phase_lookup = {}
component_lookup = {}
for phase_kwargs, code, comp_props in phase_descr:
# create phase:
G = len(code) / code_length
based_on = None
if "based_on" in phase_kwargs:
based_on = phase_lookup.get(phase_kwargs.pop("based_on"), None)
phase = Phase(G=G, parent=project, **phase_kwargs)
phase.based_on = based_on
phase_lookup[phase.name] = phase
# derive upper and lower limits for the codes using code lengths:
limits = list(zip(
list(range(0, len(code), code_length)),
list(range(code_length, len(code) + 1, code_length))
))
# create components:
phase.components[:] = []
for ll, ul in limits:
part = code[ll: ul]
for component in Component.load_components(aliases[part] % (settings.DATA_REG.get_directory_path("DEFAULT_COMPONENTS") + "/"), parent=phase):
component.resolve_json_references()
phase.components.append(component)
props = comp_props.get(part, {})
for prop, value in props.items():
if prop == 'linked_with':
value = component_lookup[value]
setattr(component, prop, value)
component_lookup[part] = component
# put phases on the save queue:
phases_path = phases_path % (settings.DATA_REG.get_directory_path("DEFAULT_PHASES") + "/")
save_queue.put((phases_path, list(phase_lookup.values())))
# Flag this as finished
in_queue.task_done()
except queue.Empty:
if not stop.is_set():
continue
else:
return
phase_queue = queue.Queue()
phase_stop = threading.Event()
phasethread = threading.Thread(target=phaseworker, args=(phase_queue, save_queue, phase_stop))
phasethread.start()
# Queue phases:
for phases_path, phase_descr in default_phases:
phase_queue.put((phases_path, phase_descr))
# Signal phaseworker it can stop if the phase_queue is emptied:
phase_stop.set()
while phasethread.is_alive():
# Try to join the thread, but don't block, inform the UI
# of our progress if a callback is provided:
phasethread.join(timeout=0.1)
if callable(ui_callback):
progress = float(len(default_phases) - phase_queue.qsize()) / float(len(default_phases))
ui_callback(progress)
if callable(ui_callback):
ui_callback(1.0)
# Signal the IO worker the phaseworker has stopped, so it can stop
# if the save_queue is empty
io_stop.set()
while iothread.is_alive():
# Try to join the thread, but don't block
iothread.join(timeout=0.1)
pass # end of run
def create_dir_recursive(path):
"""
Creates the path 'path' recursively.
"""
to_create = []
while not os.path.exists(path):
to_create.insert(0, path)
path = os.path.dirname(path)
for path in to_create[:-1]:
os.mkdir(path)
| 38.777939 | 165 | 0.606183 |
0291301e96c8737d3d86596ed9a5bcb7c2fdd30e | 1,322 | py | Python | data/kaggle_python/interview_prac/FizzBuzz.py | MohanKrishna-RC/Python-Necessities | c63fbac717a9bf7edd48ec20337c16de55f5b535 | [
"FTL"
] | null | null | null | data/kaggle_python/interview_prac/FizzBuzz.py | MohanKrishna-RC/Python-Necessities | c63fbac717a9bf7edd48ec20337c16de55f5b535 | [
"FTL"
] | 8 | 2019-11-27T12:05:09.000Z | 2019-11-27T12:05:18.000Z | data/kaggle_python/interview_prac/FizzBuzz.py | MohanKrishna-RC/Python-Necessities | c63fbac717a9bf7edd48ec20337c16de55f5b535 | [
"FTL"
] | null | null | null | import sys
"""
Example :
Input:
2 (Test cases)
3 (Size of array)
0 1 1 (input)
3
0 1 2
"""
# To store no of test cases here (2).
# To store input here (0 1 1) and (0 1 2).
t = int(sys.stdin.readline())
# print(t)
l = []
while t:
#To store the size of array here (3).
n = int(sys.stdin.readline())
#Here i have used sys.stdin.readline() to take input 0 1 1 than split to get a= ['0','1','1'].
a = (sys.stdin.readline().split()) #Now converting a= ['0','1','1'] to l = [0,1,1]
print(a)
for i in range(0, n):
b = int(a[i])
l.append(b)
#Do your job with the list l here just print !
print(l)
l = [] # empty list for next input ie (0 1 2).
t = t-1
# our problem
"""
Input :
2 ( Test Cases)
3 15 (input string)
"""
# To store no of test cases here (2).
# t=int(sys.stdin.readline())
# # To store input here 3 15.
# # print(t)
# #Here i have used sys.stdin.readline() to take input 3 15 than split to get a= ['3', '15'].
# a = (sys.stdin.readline().split())
# # print(a)
# for k in range(t):
# for i in range(1,int(a[k])+1):
# if i % 3 == 0 and i % 5 == 0:
# print("FizzBuzz")
# elif i%3==0:
# print("Fizz")
# elif i%5 == 0:
# print("Buzz")
# else:
# print(i)
| 22.793103 | 98 | 0.519667 |
029137d82ad6128135f8644310a7387974e99f16 | 3,660 | py | Python | wikipediabase/persistentkv.py | fakedrake/WikipediaBase | ab5aa92786bddcd7942ad3e3f1f4e433575ba3fb | [
"Apache-2.0"
] | 1 | 2017-11-26T17:57:59.000Z | 2017-11-26T17:57:59.000Z | wikipediabase/persistentkv.py | fakedrake/WikipediaBase | ab5aa92786bddcd7942ad3e3f1f4e433575ba3fb | [
"Apache-2.0"
] | 34 | 2015-03-23T10:28:59.000Z | 2021-12-13T20:16:48.000Z | wikipediabase/persistentkv.py | fakedrake/WikipediaBase | ab5aa92786bddcd7942ad3e3f1f4e433575ba3fb | [
"Apache-2.0"
] | 2 | 2015-05-17T00:56:45.000Z | 2015-06-27T22:10:59.000Z | """
Some persistent maps (gdbm) require special encoding of keys
and/or values. This is an abstraction for these kinds of quirks.
"""
from itertools import imap
import collections
import gdbm as dbm
import json
from sqlitedict import SqliteDict
import os
"""
Some info on performance:
>>> import timeit
>>> sqlkv = SqlitePersistentDict('/tmp/bench1.sqlite')
>>> timeit.timeit(lambda : benchmark_write(sqlkv), number=100)
10.847157955169678
>>> timeit.timeit(lambda : benchmark_read(sqlkv), number=100)
18.88098978996277
>>> dbmkv = DbmPersistentDict('/tmp/bench.dbm')
>>> timeit.timeit(lambda : benchmark_write(dbmkv), number=100)
0.18030309677124023
>>> timeit.timeit(lambda : benchmark_read(dbmkv), number=100)
0.14914202690124512
SqliteDict is a pretty thin wrapper around sqlite, I would probably
not have made it much thinner. Just use Dbm.
Keep this around in case anyone considers changing to sqlite.
XXX: see how gdbm does when data is larger than memory. Also check out
bsddb
"""
# PersistentDict = SqlitePersistentDict
PersistentDict = DbmPersistentDict
| 24.72973 | 76 | 0.650546 |
0291c411c4fb519a999596fb62a5c1bf748ff844 | 1,672 | py | Python | SlidingWindows/Leetcode132.py | Rylie-W/LeetRecord | 623c4efe88b3af54b8a65f6ec23db850b8c6f46f | [
"Apache-2.0"
] | null | null | null | SlidingWindows/Leetcode132.py | Rylie-W/LeetRecord | 623c4efe88b3af54b8a65f6ec23db850b8c6f46f | [
"Apache-2.0"
] | null | null | null | SlidingWindows/Leetcode132.py | Rylie-W/LeetRecord | 623c4efe88b3af54b8a65f6ec23db850b8c6f46f | [
"Apache-2.0"
] | null | null | null |
if __name__ == '__main__':
sol=Solution()
# s='aammbbc'
# s='bb'
s="fifgbeajcacehiicccfecbfhhgfiiecdcjjffbghdidbhbdbfbfjccgbbdcjheccfbhafehieabbdfeigbiaggchaeghaijfbjhi"
print(sol.minCut(s)) | 30.962963 | 108 | 0.423445 |
0292b6686b64612233e83af31cfc31f88384ed05 | 3,708 | py | Python | widgets/component.py | peskaf/ramAIn | 8eb1418007c925ac618e3bddd7de2c0520f5977a | [
"MIT"
] | null | null | null | widgets/component.py | peskaf/ramAIn | 8eb1418007c925ac618e3bddd7de2c0520f5977a | [
"MIT"
] | null | null | null | widgets/component.py | peskaf/ramAIn | 8eb1418007c925ac618e3bddd7de2c0520f5977a | [
"MIT"
] | null | null | null | from PySide6.QtGui import QColor
from PySide6.QtWidgets import QFrame, QHBoxLayout, QWidget
from PySide6.QtCore import Qt, QSettings, QEvent
from utils import colors
import pyqtgraph as pg
import numpy as np
| 32.243478 | 102 | 0.650755 |
029490250183cbdb90fa4664ca45b602bbeae6f3 | 9,124 | py | Python | Step 4 - Implement the tflite for raspberry pi/godlike_tflite_cam_script.py | monacotime/4.IoT-project-sem-5 | ef14dfba33d308cb5307bbb07d2950fd9a34cfda | [
"MIT"
] | null | null | null | Step 4 - Implement the tflite for raspberry pi/godlike_tflite_cam_script.py | monacotime/4.IoT-project-sem-5 | ef14dfba33d308cb5307bbb07d2950fd9a34cfda | [
"MIT"
] | null | null | null | Step 4 - Implement the tflite for raspberry pi/godlike_tflite_cam_script.py | monacotime/4.IoT-project-sem-5 | ef14dfba33d308cb5307bbb07d2950fd9a34cfda | [
"MIT"
] | null | null | null | ### IT WORKS BOIISSS WE DID IT!!!!###
#-------------------------------------------------------------
#Imports
#-------------------------------------------------------------
import tensorflow as tf
import numpy as np
from PIL import Image
import cv2
import colorsys
import random
import time
from googleapiclient.http import MediaFileUpload
from Google import Create_Service
cap = cv2.VideoCapture(0)
#-------------------------------------------------------------
# Global variables
#-------------------------------------------------------------
input_size = 416
iou = 0.4 #iou threshold
score = 0.25 #score threshold
class_names = "./classes.names"
model_path = "./yolov3-tiny-416-int8.tflite"
CLIENT_SECRET_FILE = "credentials.json"
API_NAME = "drive"
API_VERSION = "v3"
SCOPES = ["https://www.googleapis.com/auth/drive"]
service = Create_Service(CLIENT_SECRET_FILE, API_NAME, API_VERSION, SCOPES)
folder_id = "1hFg3bENi-106qf8XvN1q_5uRDORUsxiD"
file_name = "save.jpg"
mime_type = "image/jpeg"
file_metadata = {"name": file_name, "parents": [folder_id]}
media = MediaFileUpload("./{0}".format(file_name), mimetype= mime_type)
#-------------------------------------------------------------
# Utility function definations
#-------------------------------------------------------------
#-------------------------------------------------------------
# Allocating tflite
#-------------------------------------------------------------
interpreter = tf.lite.Interpreter(model_path=model_path)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_shape = input_details[0]['shape'] #REMOVE THIS ONE
#-------------------------------------------------------------
# Start
#-------------------------------------------------------------
# normal_operation_mode()
debug_mode()
#-------------------------------------------------------------
# Thats it! THE END ez pz
#-------------------------------------------------------------
| 42.635514 | 130 | 0.551732 |
0295359e838ee62284e6df9935d33336b1756495 | 2,790 | py | Python | maskrcnn_benchmark/modeling/backbone/res2net_builder.py | koseimori/Res2Net-maskrcnn | e205ff67855b52375f340ca70a08995069424e5c | [
"MIT"
] | 31 | 2020-02-02T15:12:13.000Z | 2022-03-18T08:09:17.000Z | maskrcnn_benchmark/modeling/backbone/res2net_builder.py | koseimori/Res2Net-maskrcnn | e205ff67855b52375f340ca70a08995069424e5c | [
"MIT"
] | 4 | 2020-03-08T08:26:12.000Z | 2021-03-08T11:30:52.000Z | maskrcnn_benchmark/modeling/backbone/res2net_builder.py | koseimori/Res2Net-maskrcnn | e205ff67855b52375f340ca70a08995069424e5c | [
"MIT"
] | 17 | 2020-02-20T12:04:04.000Z | 2021-06-06T07:26:23.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from collections import OrderedDict
from torch import nn
from maskrcnn_benchmark.modeling import registry
from maskrcnn_benchmark.modeling.make_layers import conv_with_kaiming_uniform
from . import fpn as fpn_module
from . import res2net
# def build_backbone(cfg):
# assert cfg.MODEL.BACKBONE.CONV_BODY in registry.BACKBONES, \
# "cfg.MODEL.BACKBONE.CONV_BODY: {} are not registered in registry".format(
# cfg.MODEL.BACKBONE.CONV_BODY
# )
# return registry.BACKBONES[cfg.MODEL.BACKBONE.CONV_BODY](cfg)
| 34.875 | 83 | 0.699283 |
029644afd069012e2e180cddce470b4c75d102b6 | 608 | py | Python | code/tmp_rtrip/test/memory_watchdog.py | emilyemorehouse/ast-and-me | 3f58117512e125e1ecbe3c72f2f0d26adb80b7b3 | [
"MIT"
] | 24 | 2018-01-23T05:28:40.000Z | 2021-04-13T20:52:59.000Z | code/tmp_rtrip/test/memory_watchdog.py | emilyemorehouse/ast-and-me | 3f58117512e125e1ecbe3c72f2f0d26adb80b7b3 | [
"MIT"
] | 17 | 2017-12-21T18:32:31.000Z | 2018-12-18T17:09:50.000Z | code/tmp_rtrip/test/memory_watchdog.py | emilyemorehouse/ast-and-me | 3f58117512e125e1ecbe3c72f2f0d26adb80b7b3 | [
"MIT"
] | null | null | null | """Memory watchdog: periodically read the memory usage of the main test process
and print it out, until terminated."""
import os
import sys
import time
try:
page_size = os.sysconf('SC_PAGESIZE')
except (ValueError, AttributeError):
try:
page_size = os.sysconf('SC_PAGE_SIZE')
except (ValueError, AttributeError):
page_size = 4096
while True:
sys.stdin.seek(0)
statm = sys.stdin.read()
data = int(statm.split()[5])
sys.stdout.write(' ... process data size: {data:.1f}G\n'.format(data=
data * page_size / 1024 ** 3))
sys.stdout.flush()
time.sleep(1)
| 28.952381 | 79 | 0.662829 |
02967401719aa2d8549023548710f426054a51b3 | 371 | py | Python | tests/conftest.py | charles-cooper/crvfunder | 63b4041ff06ff6ea943a7d69ae233719c4411bbd | [
"MIT"
] | 6 | 2022-03-17T21:10:41.000Z | 2022-03-27T04:38:53.000Z | tests/conftest.py | charles-cooper/crvfunder | 63b4041ff06ff6ea943a7d69ae233719c4411bbd | [
"MIT"
] | null | null | null | tests/conftest.py | charles-cooper/crvfunder | 63b4041ff06ff6ea943a7d69ae233719c4411bbd | [
"MIT"
] | 2 | 2022-03-26T03:37:40.000Z | 2022-03-28T22:01:20.000Z | import pytest
pytest_plugins = ["fixtures.accounts", "fixtures.deployments"]
| 24.733333 | 62 | 0.749326 |
0297324475a0f71073a283c42e8668872ade345c | 38,375 | py | Python | sdk/python/pulumi_databricks/permissions.py | pulumi/pulumi-databricks | 43580d4adbd04b72558f368ff0eef3d03432ebc1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_databricks/permissions.py | pulumi/pulumi-databricks | 43580d4adbd04b72558f368ff0eef3d03432ebc1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_databricks/permissions.py | pulumi/pulumi-databricks | 43580d4adbd04b72558f368ff0eef3d03432ebc1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['PermissionsArgs', 'Permissions']
class Permissions(pulumi.CustomResource):
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PermissionsArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_controls: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PermissionsAccessControlArgs']]]]] = None,
authorization: Optional[pulumi.Input[str]] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
cluster_policy_id: Optional[pulumi.Input[str]] = None,
directory_id: Optional[pulumi.Input[str]] = None,
directory_path: Optional[pulumi.Input[str]] = None,
experiment_id: Optional[pulumi.Input[str]] = None,
instance_pool_id: Optional[pulumi.Input[str]] = None,
job_id: Optional[pulumi.Input[str]] = None,
notebook_id: Optional[pulumi.Input[str]] = None,
notebook_path: Optional[pulumi.Input[str]] = None,
object_type: Optional[pulumi.Input[str]] = None,
registered_model_id: Optional[pulumi.Input[str]] = None,
repo_id: Optional[pulumi.Input[str]] = None,
repo_path: Optional[pulumi.Input[str]] = None,
sql_alert_id: Optional[pulumi.Input[str]] = None,
sql_dashboard_id: Optional[pulumi.Input[str]] = None,
sql_endpoint_id: Optional[pulumi.Input[str]] = None,
sql_query_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PermissionsArgs.__new__(PermissionsArgs)
if access_controls is None and not opts.urn:
raise TypeError("Missing required property 'access_controls'")
__props__.__dict__["access_controls"] = access_controls
__props__.__dict__["authorization"] = authorization
__props__.__dict__["cluster_id"] = cluster_id
__props__.__dict__["cluster_policy_id"] = cluster_policy_id
__props__.__dict__["directory_id"] = directory_id
__props__.__dict__["directory_path"] = directory_path
__props__.__dict__["experiment_id"] = experiment_id
__props__.__dict__["instance_pool_id"] = instance_pool_id
__props__.__dict__["job_id"] = job_id
__props__.__dict__["notebook_id"] = notebook_id
__props__.__dict__["notebook_path"] = notebook_path
__props__.__dict__["object_type"] = object_type
__props__.__dict__["registered_model_id"] = registered_model_id
__props__.__dict__["repo_id"] = repo_id
__props__.__dict__["repo_path"] = repo_path
__props__.__dict__["sql_alert_id"] = sql_alert_id
__props__.__dict__["sql_dashboard_id"] = sql_dashboard_id
__props__.__dict__["sql_endpoint_id"] = sql_endpoint_id
__props__.__dict__["sql_query_id"] = sql_query_id
super(Permissions, __self__).__init__(
'databricks:index/permissions:Permissions',
resource_name,
__props__,
opts)
| 41.802832 | 279 | 0.645368 |
02974a7f2e55a4545889ad1727cb810be5d621b5 | 1,254 | py | Python | file/txt2bin.py | QPointNotebook/PythonSample | 53c2a54da2bf9a61449ed1c7d2864c5c0eedc5e0 | [
"MIT"
] | null | null | null | file/txt2bin.py | QPointNotebook/PythonSample | 53c2a54da2bf9a61449ed1c7d2864c5c0eedc5e0 | [
"MIT"
] | null | null | null | file/txt2bin.py | QPointNotebook/PythonSample | 53c2a54da2bf9a61449ed1c7d2864c5c0eedc5e0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from file.file import file
| 31.35 | 77 | 0.369219 |
02982b64a64f41b7dc43e4d28a9770dcfad2d139 | 2,105 | py | Python | PGPs_tensorflow/Examples/Airline.py | maziarraissi/ParametricGP | d5974c9e41a2cd761c0cfaff138c5b1722c006db | [
"MIT"
] | 43 | 2017-04-12T10:43:21.000Z | 2022-02-28T05:16:02.000Z | PGPs_tensorflow/Examples/Airline.py | arita37/ParametricGP | 9c04f3166c22e787a92290fe4353ba4f918ed598 | [
"MIT"
] | 1 | 2018-05-25T00:26:10.000Z | 2018-05-29T05:26:15.000Z | PGPs_tensorflow/Examples/Airline.py | arita37/ParametricGP | 9c04f3166c22e787a92290fe4353ba4f918ed598 | [
"MIT"
] | 22 | 2017-04-12T02:22:08.000Z | 2021-04-10T23:19:52.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Maziar Raissi
"""
import sys
sys.path.insert(0, '../PGP/')
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from parametric_GP import PGP
if __name__ == "__main__":
# Import the data
data = pd.read_pickle('airline.pickle')
# Convert time of day from hhmm to minutes since midnight
data.ArrTime = 60*np.floor(data.ArrTime/100)+np.mod(data.ArrTime, 100)
data.DepTime = 60*np.floor(data.DepTime/100)+np.mod(data.DepTime, 100)
# Pick out the data
Y = data['ArrDelay'].values
names = ['Month', 'DayofMonth', 'DayOfWeek', 'plane_age', 'AirTime', 'Distance', 'ArrTime', 'DepTime']
X = data[names].values
N = len(data)
np.random.seed(N)
# Shuffle the data and only consider a subset of it
perm = np.random.permutation(N)
X = X[perm]
Y = Y[perm]
XT = X[int(2*N/3):N]
YT = Y[int(2*N/3):N]
X = X[:int(2*N/3)]
Y = Y[:int(2*N/3)]
# Normalize Y scale and offset
Ymean = Y.mean()
Ystd = Y.std()
Y = (Y - Ymean) / Ystd
Y = Y.reshape(-1, 1)
YT = (YT - Ymean) / Ystd
YT = YT.reshape(-1, 1)
# Normalize X on [0, 1]
Xmin, Xmax = X.min(0), X.max(0)
X = (X - Xmin) / (Xmax - Xmin)
XT = (XT - Xmin) / (Xmax - Xmin)
# Model creation
M = 500
pgp = PGP(X, Y, M, max_iter = 10000, N_batch = 1000,
monitor_likelihood = 10, lrate = 1e-3)
# Training
pgp.train()
# Prediction
mean_star, var_star = pgp.predict(XT)
# MSE
print('MSE: %f' % ((mean_star-YT)**2).mean())
print('MSE_mean: %f' % ((Y.mean()-YT)**2).mean())
# ARD
ARD = 1/np.sqrt(np.exp(pgp.hyp[1:-1]))
ARD_x = np.arange(len(ARD))
fig, ax = plt.subplots(figsize=(10,5))
plt.rcParams.update({'font.size': 16})
ax.barh(ARD_x,ARD)
ax.set_yticks(ARD_x)
ax.set_yticklabels(names)
ax.set_xlabel('ARD weights')
plt.savefig('../Fig/Flights.eps', format='eps', dpi=1000)
#####
# MSE: 0.832810
# MSE_mean: 0.999799 | 25.059524 | 106 | 0.566271 |
029864a3d0017b3744cbc0ce2c0fdf1a9dd81484 | 2,081 | py | Python | django_project/proj/settings/local.py | robabram/Quickstart-Secure-Django-Template | 22f304e864f8f6ce972f44bce6fe9b885341201a | [
"MIT"
] | 9 | 2018-10-03T00:30:57.000Z | 2021-12-29T07:48:08.000Z | django_project/proj/settings/local.py | robabram/Quickstart-Secure-Django-Template | 22f304e864f8f6ce972f44bce6fe9b885341201a | [
"MIT"
] | 9 | 2020-02-10T17:08:01.000Z | 2021-11-19T17:21:18.000Z | django_project/proj/settings/local.py | robabram/Quickstart-Secure-Django-Template | 22f304e864f8f6ce972f44bce6fe9b885341201a | [
"MIT"
] | null | null | null | #
# Author: Robert Abram <rabram991@gmail.com>
#
# This file is subject to the terms and conditions defined in the
# file 'LICENSE', which is part of this source code package.
#
import os
from proj.settings.base import *
#
# Logging
#
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'website.log',
'formatter': 'verbose',
'maxBytes': 1024 * 1000 * 100 # 100MB
},
'lockout': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'lockout.log',
'formatter': 'verbose',
'maxBytes': 1024 * 1000 * 100 # 100MB
},
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django.db.backends': {
'handlers': ['console'], # Could change to: ['null'],
'level': 'ERROR', # Change this to DEBUG to see SQL Queries in log output
},
'django': {
'handlers': ['console'],
'propagate': True,
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
},
'axes.watch_login': {
'handlers': ['lockout'],
'propagate': False,
'level': 'INFO',
},
'celery': {
'handlers': ['console'],
'propagate': False,
'level': os.getenv('DJANGO_LOG_LEVEL', 'WARNING'),
},
}
}
| 27.381579 | 86 | 0.475252 |
0298d15f0c2dd54fb30dc11d08603cd497ca28b4 | 3,426 | py | Python | pyfos/utils/system_security/seccertmgmt_show.py | madhavinaiduprathap/pyfosbrocade | ec100e77c441761c3e688f1d8e5d18ad38cc83f4 | [
"Apache-2.0"
] | 44 | 2017-11-17T12:03:11.000Z | 2022-02-03T20:57:56.000Z | pyfos/utils/system_security/seccertmgmt_show.py | madhavinaiduprathap/pyfosbrocade | ec100e77c441761c3e688f1d8e5d18ad38cc83f4 | [
"Apache-2.0"
] | 13 | 2018-10-09T15:34:15.000Z | 2022-02-24T20:03:17.000Z | pyfos/utils/system_security/seccertmgmt_show.py | madhavinaiduprathap/pyfosbrocade | ec100e77c441761c3e688f1d8e5d18ad38cc83f4 | [
"Apache-2.0"
] | 23 | 2017-12-14T18:08:33.000Z | 2022-02-03T15:33:40.000Z | #!/usr/bin/env python3
# Copyright 2018 Brocade Communications Systems LLC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may also obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`seccertmgmt_show` - PyFOS util for displaying certificates in the switch.
***********************************************************************************
The :mod:`seccertmgmt_show` util provides the option to display a certificate.
This module can be used to display a certificate. If the certificate entity \
and type are not provided, information for all certificates is displayed.
* Input:
| Infrastructure Options:
| -i,--ipaddr=IPADDR The IP address of the FOS switch.
| -L,--login=LOGIN The login name.
| -P,--password=PASSWORD The password.
| -s,--secured=MODE The HTTPS mode "self" or "CA" [OPTIONAL].
| -v,--verbose Verbose mode [OPTIONAL].
* Util Script Options:
| --certificate-entity=ENTITY-NAME Sets the certificate entity name.
| --certificate-type=CERT-TYPE Sets the certificate type.
| --is-hexdump-show Displays the raw hex data.
* Output:
* The certificate information.
.. function:: seccertmgmt_show.show_system_security_seccertmgmt(session)
* Displays the certificate and its information in the switch.
Example Usage of the Method:
ret = seccertmgmt_show.show_system_security_seccertmgmt(session, \
cert_entity, cert_type)
print (ret)
Details::
result = seccertmgmt_show.show_system_security_seccertmgmt(
session, \'cert\', \'https\')
* Input:
:param session: The session returned by the login.
:param cert_entity: The associated certificate entity.
:param cert_type: The associated certificate type.
* Output:
:rtype: A dictionary of return status matching the REST response.
*Use Cases*
1. Retrieve the certificate-related information.
"""
import sys
from pyfos import pyfos_auth
from pyfos import pyfos_util
from pyfos.pyfos_brocade_security import security_certificate
from pyfos.utils import brcd_util
if __name__ == "__main__":
main(sys.argv[1:])
| 30.864865 | 83 | 0.694104 |
029a249ed3ef3d36cbd9d5a8a3e1445f10e3e29d | 2,345 | py | Python | tests/base.py | Suhail6inkling/bot | 95ae05c773e753699e6899255783c1d7df936024 | [
"MIT"
] | 1 | 2022-01-01T17:33:48.000Z | 2022-01-01T17:33:48.000Z | tests/base.py | Suhail6inkling/bot | 95ae05c773e753699e6899255783c1d7df936024 | [
"MIT"
] | null | null | null | tests/base.py | Suhail6inkling/bot | 95ae05c773e753699e6899255783c1d7df936024 | [
"MIT"
] | 1 | 2020-11-01T19:57:00.000Z | 2020-11-01T19:57:00.000Z | import logging
import unittest
from contextlib import contextmanager
| 34.485294 | 104 | 0.639659 |
029b069d68471e7fbe34c10e131ca57fcd80d3f5 | 892 | py | Python | blog/app/admin/views.py | web-user/flask-blog | 130f5dbcdb18b8f325c7aa8dd3d71cbc7190485a | [
"MIT"
] | null | null | null | blog/app/admin/views.py | web-user/flask-blog | 130f5dbcdb18b8f325c7aa8dd3d71cbc7190485a | [
"MIT"
] | null | null | null | blog/app/admin/views.py | web-user/flask-blog | 130f5dbcdb18b8f325c7aa8dd3d71cbc7190485a | [
"MIT"
] | null | null | null | from flask import Flask, render_template, session, redirect, url_for, request, flash, abort, current_app, make_response
from flask_login import login_user, logout_user, login_required, current_user
from . import admin
from .. import db
from ..models import User, Post
from ..form import PostForm
from functools import wraps
from flask import g, request, redirect, url_for
| 34.307692 | 119 | 0.690583 |
029c0dd5be38ab97f221e4b0ca039e07bafa37e8 | 2,561 | py | Python | examples/get-set-params/robot.py | Tyler-Duckworth/robotpy-rev | d03829a4f8e47526e753f0edeafc1df888880775 | [
"Apache-2.0"
] | 1 | 2019-01-28T18:16:55.000Z | 2019-01-28T18:16:55.000Z | examples/get-set-params/robot.py | Tyler-Duckworth/robotpy-rev | d03829a4f8e47526e753f0edeafc1df888880775 | [
"Apache-2.0"
] | 18 | 2019-01-09T08:35:48.000Z | 2022-01-15T02:17:23.000Z | examples/get-set-params/robot.py | Tyler-Duckworth/robotpy-rev | d03829a4f8e47526e753f0edeafc1df888880775 | [
"Apache-2.0"
] | 9 | 2019-01-11T03:14:19.000Z | 2022-01-13T00:51:48.000Z | # ----------------------------------------------------------------------------
# Copyright (c) 2017-2018 FIRST. All Rights Reserved.
# Open Source Software - may be modified and shared by FRC teams. The code
# must be accompanied by the FIRST BSD license file in the root directory of
# the project.
# ----------------------------------------------------------------------------
import rev
import wpilib
if __name__ == "__main__":
wpilib.run(Robot)
| 40.015625 | 88 | 0.632956 |
029cefa854d393945ca4f9769661f617a4a0cbfe | 39,324 | py | Python | pyclustering/nnet/som.py | JosephChataignon/pyclustering | bf4f51a472622292627ec8c294eb205585e50f52 | [
"BSD-3-Clause"
] | 1,013 | 2015-01-26T19:50:14.000Z | 2022-03-31T07:38:48.000Z | pyclustering/nnet/som.py | peterlau0626/pyclustering | bf4f51a472622292627ec8c294eb205585e50f52 | [
"BSD-3-Clause"
] | 542 | 2015-01-20T16:44:32.000Z | 2022-01-29T14:57:20.000Z | pyclustering/nnet/som.py | peterlau0626/pyclustering | bf4f51a472622292627ec8c294eb205585e50f52 | [
"BSD-3-Clause"
] | 262 | 2015-03-19T07:28:12.000Z | 2022-03-30T07:28:24.000Z | """!
@brief Neural Network: Self-Organized Feature Map
@details Implementation based on paper @cite article::nnet::som::1, @cite article::nnet::som::2.
@authors Andrei Novikov (pyclustering@yandex.ru)
@date 2014-2020
@copyright BSD-3-Clause
"""
import math
import random
import matplotlib.pyplot as plt
import pyclustering.core.som_wrapper as wrapper
from pyclustering.core.wrapper import ccore_library
from pyclustering.utils import euclidean_distance_square
from pyclustering.utils.dimension import dimension_info
from enum import IntEnum
def __del__(self):
"""!
@brief Destructor of the self-organized feature map.
"""
if self.__ccore_som_pointer is not None:
wrapper.som_destroy(self.__ccore_som_pointer)
def __len__(self):
"""!
@brief Returns size of the network that defines by amount of neuron in it.
@return (uint) Size of self-organized map (amount of neurons).
"""
return self._size
def __getstate__(self):
"""
@brief Returns state of SOM network that can be used to store network.
"""
if self.__ccore_som_pointer is not None:
self.__download_dump_from_ccore()
return self.__get_dump_from_python(True)
return self.__get_dump_from_python(False)
def __setstate__(self, som_state):
"""
@brief Set state of SOM network that can be used to load network.
"""
if som_state['ccore'] is True and ccore_library.workable():
self.__upload_dump_to_ccore(som_state['state'])
else:
self.__upload_dump_to_python(som_state['state'])
def __initialize_initial_radius(self, rows, cols):
"""!
@brief Initialize initial radius using map sizes.
@param[in] rows (uint): Number of neurons in the column (number of rows).
@param[in] cols (uint): Number of neurons in the row (number of columns).
@return (list) Value of initial radius.
"""
if (cols + rows) / 4.0 > 1.0:
return 2.0
elif (cols > 1) and (rows > 1):
return 1.5
else:
return 1.0
def __initialize_locations(self, rows, cols):
"""!
@brief Initialize locations (coordinates in SOM grid) of each neurons in the map.
@param[in] rows (uint): Number of neurons in the column (number of rows).
@param[in] cols (uint): Number of neurons in the row (number of columns).
@return (list) List of coordinates of each neuron in map.
"""
location = list()
for i in range(rows):
for j in range(cols):
location.append([float(i), float(j)])
return location
def __initialize_distances(self, size, location):
"""!
@brief Initialize distance matrix in SOM grid.
@param[in] size (uint): Amount of neurons in the network.
@param[in] location (list): List of coordinates of each neuron in the network.
@return (list) Distance matrix between neurons in the network.
"""
sqrt_distances = [[[] for i in range(size)] for j in range(size)]
for i in range(size):
for j in range(i, size, 1):
dist = euclidean_distance_square(location[i], location[j])
sqrt_distances[i][j] = dist
sqrt_distances[j][i] = dist
return sqrt_distances
def _create_initial_weights(self, init_type):
"""!
@brief Creates initial weights for neurons in line with the specified initialization.
@param[in] init_type (type_init): Type of initialization of initial neuron weights (random, random in center of the input data, random distributed in data, ditributed in line with uniform grid).
"""
dim_info = dimension_info(self._data)
step_x = dim_info.get_center()[0]
if self._rows > 1:
step_x = dim_info.get_width()[0] / (self._rows - 1)
step_y = 0.0
if dim_info.get_dimensions() > 1:
step_y = dim_info.get_center()[1]
if self._cols > 1:
step_y = dim_info.get_width()[1] / (self._cols - 1)
# generate weights (topological coordinates)
random.seed(self._params.random_state)
# Uniform grid.
if init_type == type_init.uniform_grid:
# Predefined weights in line with input data.
self._weights = [[[] for i in range(dim_info.get_dimensions())] for j in range(self._size)]
for i in range(self._size):
location = self._location[i]
for dim in range(dim_info.get_dimensions()):
if dim == 0:
if self._rows > 1:
self._weights[i][dim] = dim_info.get_minimum_coordinate()[dim] + step_x * location[dim]
else:
self._weights[i][dim] = dim_info.get_center()[dim]
elif dim == 1:
if self._cols > 1:
self._weights[i][dim] = dim_info.get_minimum_coordinate()[dim] + step_y * location[dim]
else:
self._weights[i][dim] = dim_info.get_center()[dim]
else:
self._weights[i][dim] = dim_info.get_center()[dim]
elif init_type == type_init.random_surface:
# Random weights at the full surface.
self._weights = [
[random.uniform(dim_info.get_minimum_coordinate()[i], dim_info.get_maximum_coordinate()[i]) for i in
range(dim_info.get_dimensions())] for _ in range(self._size)]
elif init_type == type_init.random_centroid:
# Random weights at the center of input data.
self._weights = [[(random.random() + dim_info.get_center()[i]) for i in range(dim_info.get_dimensions())]
for _ in range(self._size)]
else:
# Random weights of input data.
self._weights = [[random.random() for i in range(dim_info.get_dimensions())] for _ in range(self._size)]
def _create_connections(self, conn_type):
"""!
@brief Create connections in line with input rule (grid four, grid eight, honeycomb, function neighbour).
@param[in] conn_type (type_conn): Type of connection between oscillators in the network.
"""
self._neighbors = [[] for index in range(self._size)]
for index in range(0, self._size, 1):
upper_index = index - self._cols
upper_left_index = index - self._cols - 1
upper_right_index = index - self._cols + 1
lower_index = index + self._cols
lower_left_index = index + self._cols - 1
lower_right_index = index + self._cols + 1
left_index = index - 1
right_index = index + 1
node_row_index = math.floor(index / self._cols)
upper_row_index = node_row_index - 1
lower_row_index = node_row_index + 1
if (conn_type == type_conn.grid_eight) or (conn_type == type_conn.grid_four):
if upper_index >= 0:
self._neighbors[index].append(upper_index)
if lower_index < self._size:
self._neighbors[index].append(lower_index)
if (conn_type == type_conn.grid_eight) or (conn_type == type_conn.grid_four) or (
conn_type == type_conn.honeycomb):
if (left_index >= 0) and (math.floor(left_index / self._cols) == node_row_index):
self._neighbors[index].append(left_index)
if (right_index < self._size) and (math.floor(right_index / self._cols) == node_row_index):
self._neighbors[index].append(right_index)
if conn_type == type_conn.grid_eight:
if (upper_left_index >= 0) and (math.floor(upper_left_index / self._cols) == upper_row_index):
self._neighbors[index].append(upper_left_index)
if (upper_right_index >= 0) and (math.floor(upper_right_index / self._cols) == upper_row_index):
self._neighbors[index].append(upper_right_index)
if (lower_left_index < self._size) and (math.floor(lower_left_index / self._cols) == lower_row_index):
self._neighbors[index].append(lower_left_index)
if (lower_right_index < self._size) and (math.floor(lower_right_index / self._cols) == lower_row_index):
self._neighbors[index].append(lower_right_index)
if conn_type == type_conn.honeycomb:
if (node_row_index % 2) == 0:
upper_left_index = index - self._cols
upper_right_index = index - self._cols + 1
lower_left_index = index + self._cols
lower_right_index = index + self._cols + 1
else:
upper_left_index = index - self._cols - 1
upper_right_index = index - self._cols
lower_left_index = index + self._cols - 1
lower_right_index = index + self._cols
if (upper_left_index >= 0) and (math.floor(upper_left_index / self._cols) == upper_row_index):
self._neighbors[index].append(upper_left_index)
if (upper_right_index >= 0) and (math.floor(upper_right_index / self._cols) == upper_row_index):
self._neighbors[index].append(upper_right_index)
if (lower_left_index < self._size) and (math.floor(lower_left_index / self._cols) == lower_row_index):
self._neighbors[index].append(lower_left_index)
if (lower_right_index < self._size) and (math.floor(lower_right_index / self._cols) == lower_row_index):
self._neighbors[index].append(lower_right_index)
def _competition(self, x):
"""!
@brief Calculates neuron winner (distance, neuron index).
@param[in] x (list): Input pattern from the input data set, for example it can be coordinates of point.
@return (uint) Returns index of neuron that is winner.
"""
index = 0
minimum = euclidean_distance_square(self._weights[0], x)
for i in range(1, self._size, 1):
candidate = euclidean_distance_square(self._weights[i], x)
if candidate < minimum:
index = i
minimum = candidate
return index
def _adaptation(self, index, x):
"""!
@brief Change weight of neurons in line with won neuron.
@param[in] index (uint): Index of neuron-winner.
@param[in] x (list): Input pattern from the input data set.
"""
dimension = len(self._weights[0])
if self._conn_type == type_conn.func_neighbor:
for neuron_index in range(self._size):
distance = self._sqrt_distances[index][neuron_index]
if distance < self._local_radius:
influence = math.exp(-(distance / (2.0 * self._local_radius)))
for i in range(dimension):
self._weights[neuron_index][i] = self._weights[neuron_index][
i] + self._learn_rate * influence * (
x[i] - self._weights[neuron_index][i])
else:
for i in range(dimension):
self._weights[index][i] = self._weights[index][i] + self._learn_rate * (x[i] - self._weights[index][i])
for neighbor_index in self._neighbors[index]:
distance = self._sqrt_distances[index][neighbor_index]
if distance < self._local_radius:
influence = math.exp(-(distance / (2.0 * self._local_radius)))
for i in range(dimension):
self._weights[neighbor_index][i] = self._weights[neighbor_index][
i] + self._learn_rate * influence * (
x[i] - self._weights[neighbor_index][i])
def train(self, data, epochs, autostop=False):
"""!
@brief Trains self-organized feature map (SOM).
@param[in] data (list): Input data - list of points where each point is represented by list of features, for example coordinates.
@param[in] epochs (uint): Number of epochs for training.
@param[in] autostop (bool): Automatic termination of learning process when adaptation is not occurred.
@return (uint) Number of learning iterations.
"""
self._data = data
if self.__ccore_som_pointer is not None:
return wrapper.som_train(self.__ccore_som_pointer, data, epochs, autostop)
self._sqrt_distances = self.__initialize_distances(self._size, self._location)
for i in range(self._size):
self._award[i] = 0
self._capture_objects[i].clear()
# weights
self._create_initial_weights(self._params.init_type)
previous_weights = None
for epoch in range(1, epochs + 1):
# Depression term of coupling
self._local_radius = (self._params.init_radius * math.exp(-(epoch / epochs))) ** 2
self._learn_rate = self._params.init_learn_rate * math.exp(-(epoch / epochs))
# Clear statistics
if autostop:
for i in range(self._size):
self._award[i] = 0
self._capture_objects[i].clear()
for i in range(len(self._data)):
# Step 1: Competition:
index = self._competition(self._data[i])
# Step 2: Adaptation:
self._adaptation(index, self._data[i])
# Update statistics
if (autostop is True) or (epoch == epochs):
self._award[index] += 1
self._capture_objects[index].append(i)
# Check requirement of stopping
if autostop:
if previous_weights is not None:
maximal_adaptation = self._get_maximal_adaptation(previous_weights)
if maximal_adaptation < self._params.adaptation_threshold:
return epoch
previous_weights = [item[:] for item in self._weights]
return epochs
def simulate(self, input_pattern):
"""!
@brief Processes input pattern (no learining) and returns index of neuron-winner.
Using index of neuron winner catched object can be obtained using property capture_objects.
@param[in] input_pattern (list): Input pattern.
@return (uint) Returns index of neuron-winner.
@see capture_objects
"""
if self.__ccore_som_pointer is not None:
return wrapper.som_simulate(self.__ccore_som_pointer, input_pattern)
return self._competition(input_pattern)
def _get_maximal_adaptation(self, previous_weights):
"""!
@brief Calculates maximum changes of weight in line with comparison between previous weights and current weights.
@param[in] previous_weights (list): Weights from the previous step of learning process.
@return (double) Value that represents maximum changes of weight after adaptation process.
"""
dimension = len(self._data[0])
maximal_adaptation = 0.0
for neuron_index in range(self._size):
for dim in range(dimension):
current_adaptation = previous_weights[neuron_index][dim] - self._weights[neuron_index][dim]
if current_adaptation < 0:
current_adaptation = -current_adaptation
if maximal_adaptation < current_adaptation:
maximal_adaptation = current_adaptation
return maximal_adaptation
def get_winner_number(self):
"""!
@brief Calculates number of winner at the last step of learning process.
@return (uint) Number of winner.
"""
if self.__ccore_som_pointer is not None:
self._award = wrapper.som_get_awards(self.__ccore_som_pointer)
winner_number = 0
for i in range(self._size):
if self._award[i] > 0:
winner_number += 1
return winner_number
def show_distance_matrix(self):
"""!
@brief Shows gray visualization of U-matrix (distance matrix).
@see get_distance_matrix()
"""
distance_matrix = self.get_distance_matrix()
plt.imshow(distance_matrix, cmap=plt.get_cmap('hot'), interpolation='kaiser')
plt.title("U-Matrix")
plt.colorbar()
plt.show()
def get_distance_matrix(self):
"""!
@brief Calculates distance matrix (U-matrix).
@details The U-Matrix visualizes based on the distance in input space between a weight vector and its neighbors on map.
@return (list) Distance matrix (U-matrix).
@see show_distance_matrix()
@see get_density_matrix()
"""
if self.__ccore_som_pointer is not None:
self._weights = wrapper.som_get_weights(self.__ccore_som_pointer)
if self._conn_type != type_conn.func_neighbor:
self._neighbors = wrapper.som_get_neighbors(self.__ccore_som_pointer)
distance_matrix = [[0.0] * self._cols for i in range(self._rows)]
for i in range(self._rows):
for j in range(self._cols):
neuron_index = i * self._cols + j
if self._conn_type == type_conn.func_neighbor:
self._create_connections(type_conn.grid_eight)
for neighbor_index in self._neighbors[neuron_index]:
distance_matrix[i][j] += euclidean_distance_square(self._weights[neuron_index],
self._weights[neighbor_index])
distance_matrix[i][j] /= len(self._neighbors[neuron_index])
return distance_matrix
def show_density_matrix(self, surface_divider=20.0):
"""!
@brief Show density matrix (P-matrix) using kernel density estimation.
@param[in] surface_divider (double): Divider in each dimension that affect radius for density measurement.
@see show_distance_matrix()
"""
density_matrix = self.get_density_matrix(surface_divider)
plt.imshow(density_matrix, cmap=plt.get_cmap('hot'), interpolation='kaiser')
plt.title("P-Matrix")
plt.colorbar()
plt.show()
def get_density_matrix(self, surface_divider=20.0):
"""!
@brief Calculates density matrix (P-Matrix).
@param[in] surface_divider (double): Divider in each dimension that affect radius for density measurement.
@return (list) Density matrix (P-Matrix).
@see get_distance_matrix()
"""
if self.__ccore_som_pointer is not None:
self._weights = wrapper.som_get_weights(self.__ccore_som_pointer)
density_matrix = [[0] * self._cols for i in range(self._rows)]
dimension = len(self._weights[0])
dim_max = [float('-Inf')] * dimension
dim_min = [float('Inf')] * dimension
for weight in self._weights:
for index_dim in range(dimension):
if weight[index_dim] > dim_max[index_dim]:
dim_max[index_dim] = weight[index_dim]
if weight[index_dim] < dim_min[index_dim]:
dim_min[index_dim] = weight[index_dim]
radius = [0.0] * len(self._weights[0])
for index_dim in range(dimension):
radius[index_dim] = (dim_max[index_dim] - dim_min[index_dim]) / surface_divider
## TODO: do not use data
for point in self._data:
for index_neuron in range(len(self)):
point_covered = True
for index_dim in range(dimension):
if abs(point[index_dim] - self._weights[index_neuron][index_dim]) > radius[index_dim]:
point_covered = False
break
row = int(math.floor(index_neuron / self._cols))
col = index_neuron - row * self._cols
if point_covered is True:
density_matrix[row][col] += 1
return density_matrix
def show_winner_matrix(self):
"""!
@brief Show a winner matrix where each element corresponds to neuron and value represents
amount of won objects from input data-space at the last training iteration.
@see show_distance_matrix()
"""
if self.__ccore_som_pointer is not None:
self._award = wrapper.som_get_awards(self.__ccore_som_pointer)
(fig, ax) = plt.subplots()
winner_matrix = [[0] * self._cols for _ in range(self._rows)]
for i in range(self._rows):
for j in range(self._cols):
neuron_index = i * self._cols + j
winner_matrix[i][j] = self._award[neuron_index]
ax.text(i, j, str(winner_matrix[i][j]), va='center', ha='center')
ax.imshow(winner_matrix, cmap=plt.get_cmap('cool'), interpolation='none')
ax.grid(True)
plt.title("Winner Matrix")
plt.show()
plt.close(fig)
def show_network(self, awards=False, belongs=False, coupling=True, dataset=True, marker_type='o'):
"""!
@brief Shows neurons in the dimension of data.
@param[in] awards (bool): If True - displays how many objects won each neuron.
@param[in] belongs (bool): If True - marks each won object by according index of neuron-winner (only when
dataset is displayed too).
@param[in] coupling (bool): If True - displays connections between neurons (except case when function neighbor
is used).
@param[in] dataset (bool): If True - displays inputs data set.
@param[in] marker_type (string): Defines marker that is used to denote neurons on the plot.
"""
if self.__ccore_som_pointer is not None:
self._size = wrapper.som_get_size(self.__ccore_som_pointer)
self._weights = wrapper.som_get_weights(self.__ccore_som_pointer)
self._neighbors = wrapper.som_get_neighbors(self.__ccore_som_pointer)
self._award = wrapper.som_get_awards(self.__ccore_som_pointer)
dimension = len(self._weights[0])
fig = plt.figure()
# Check for dimensions
if (dimension == 1) or (dimension == 2):
axes = fig.add_subplot(111)
elif dimension == 3:
axes = fig.gca(projection='3d')
else:
raise NotImplementedError('Impossible to show network in data-space that is differ from 1D, 2D or 3D.')
if (self._data is not None) and (dataset is True):
for x in self._data:
if dimension == 1:
axes.plot(x[0], 0.0, 'b|', ms=30)
elif dimension == 2:
axes.plot(x[0], x[1], 'b.')
elif dimension == 3:
axes.scatter(x[0], x[1], x[2], c='b', marker='.')
# Show neurons
for index in range(self._size):
color = 'g'
if self._award[index] == 0:
color = 'y'
if dimension == 1:
axes.plot(self._weights[index][0], 0.0, color + marker_type)
if awards:
location = '{0}'.format(self._award[index])
axes.text(self._weights[index][0], 0.0, location, color='black', fontsize=10)
if belongs and self._data is not None:
location = '{0}'.format(index)
axes.text(self._weights[index][0], 0.0, location, color='black', fontsize=12)
for k in range(len(self._capture_objects[index])):
point = self._data[self._capture_objects[index][k]]
axes.text(point[0], 0.0, location, color='blue', fontsize=10)
if dimension == 2:
axes.plot(self._weights[index][0], self._weights[index][1], color + marker_type)
if awards:
location = '{0}'.format(self._award[index])
axes.text(self._weights[index][0], self._weights[index][1], location, color='black', fontsize=10)
if belongs and self._data is not None:
location = '{0}'.format(index)
axes.text(self._weights[index][0], self._weights[index][1], location, color='black', fontsize=12)
for k in range(len(self._capture_objects[index])):
point = self._data[self._capture_objects[index][k]]
axes.text(point[0], point[1], location, color='blue', fontsize=10)
if (self._conn_type != type_conn.func_neighbor) and (coupling is True):
for neighbor in self._neighbors[index]:
if neighbor > index:
axes.plot([self._weights[index][0], self._weights[neighbor][0]],
[self._weights[index][1], self._weights[neighbor][1]],
'g', linewidth=0.5)
elif dimension == 3:
axes.scatter(self._weights[index][0], self._weights[index][1], self._weights[index][2], c=color,
marker=marker_type)
if (self._conn_type != type_conn.func_neighbor) and (coupling != False):
for neighbor in self._neighbors[index]:
if neighbor > index:
axes.plot([self._weights[index][0], self._weights[neighbor][0]],
[self._weights[index][1], self._weights[neighbor][1]],
[self._weights[index][2], self._weights[neighbor][2]],
'g-', linewidth=0.5)
plt.title("Network Structure")
plt.grid()
plt.show()
plt.close(fig)
| 39.402806 | 203 | 0.576416 |
029d25002fe312ac4b1cd506fc070aee02af1ff6 | 2,265 | py | Python | ros/src/tl_detector/light_classification/tl_classifier.py | trajkd/Programming-a-Real-Self-Driving-Car | 536377815a8dd907c59979f4a07d25b6d157dbaa | [
"MIT"
] | 1 | 2021-08-17T11:19:34.000Z | 2021-08-17T11:19:34.000Z | ros/src/tl_detector/light_classification/tl_classifier.py | trajkd/Programming-a-Real-Self-Driving-Car | 536377815a8dd907c59979f4a07d25b6d157dbaa | [
"MIT"
] | 7 | 2020-09-26T01:07:12.000Z | 2022-03-12T00:31:00.000Z | ros/src/tl_detector/light_classification/tl_classifier.py | trajkd/Programming-a-Real-Self-Driving-Car | 536377815a8dd907c59979f4a07d25b6d157dbaa | [
"MIT"
] | 1 | 2021-08-06T17:24:26.000Z | 2021-08-06T17:24:26.000Z | import rospy
from styx_msgs.msg import TrafficLight
import numpy as np
from keras.models import Model
from keras import applications
from keras.models import load_model
from keras.preprocessing import image as img_preprocessing
import cv2
# load the trained model
from keras.utils.generic_utils import CustomObjectScope
model_filepath = 'saved_models/model.MobileNet-3-classes.h5'
n_classes = 3 | 36.532258 | 132 | 0.680795 |
029d60e7a021da261de9331901e5b18fe50fb799 | 2,080 | py | Python | sktime/clustering/evaluation/_plot_clustering.py | marcio55afr/sktime | 25ba2f470f037366ca6b0e529137d3d0a6191e2e | [
"BSD-3-Clause"
] | 5,349 | 2019-03-21T14:56:50.000Z | 2022-03-31T11:25:30.000Z | sktime/clustering/evaluation/_plot_clustering.py | marcio55afr/sktime | 25ba2f470f037366ca6b0e529137d3d0a6191e2e | [
"BSD-3-Clause"
] | 1,803 | 2019-03-26T13:33:53.000Z | 2022-03-31T23:58:10.000Z | sktime/clustering/evaluation/_plot_clustering.py | marcio55afr/sktime | 25ba2f470f037366ca6b0e529137d3d0a6191e2e | [
"BSD-3-Clause"
] | 911 | 2019-03-25T01:21:30.000Z | 2022-03-31T04:45:51.000Z | # -*- coding: utf-8 -*-
"""Cluster plotting tools"""
__author__ = ["Christopher Holder", "Tony Bagnall"]
__all__ = ["plot_cluster_algorithm"]
import pandas as pd
from sktime.clustering.base._typing import NumpyOrDF
from sktime.clustering.base.base import BaseClusterer
from sktime.clustering.partitioning._lloyds_partitioning import (
TimeSeriesLloydsPartitioning,
)
from sktime.datatypes._panel._convert import from_nested_to_2d_array
from sktime.utils.validation._dependencies import _check_soft_dependencies
def plot_cluster_algorithm(model: BaseClusterer, predict_series: NumpyOrDF, k: int):
"""
Method that is used to plot a clustering algorithms output
Parameters
----------
model: BaseClusterer
Clustering model to plot
predict_series: Numpy or Dataframe
The series to predict the values for
k: int
Number of centers
"""
_check_soft_dependencies("matplotlib")
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
if isinstance(predict_series, pd.DataFrame):
predict_series = from_nested_to_2d_array(predict_series, return_numpy=True)
plt.figure(figsize=(5, 10))
plt.rcParams["figure.dpi"] = 100
indexes = model.predict(predict_series)
centers = model.get_centers()
series_values = TimeSeriesLloydsPartitioning.get_cluster_values(
indexes, predict_series, k
)
fig, axes = plt.subplots(nrows=k, ncols=1)
for i in range(k):
_plot(series_values[i], centers[i], axes[i])
blue_patch = mpatches.Patch(color="blue", label="Series that belong to the cluster")
red_patch = mpatches.Patch(color="red", label="Cluster centers")
plt.legend(
handles=[red_patch, blue_patch],
loc="upper center",
bbox_to_anchor=(0.5, -0.40),
fancybox=True,
shadow=True,
ncol=5,
)
plt.tight_layout()
plt.show()
| 30.144928 | 88 | 0.703846 |
029ed725f1f2d111375bab605c9d49677c361f7c | 2,858 | py | Python | src/tests/crud/test_user.py | Behnam-sn/neat-backend | ba6e6356ee092eba27179f72fd2a15e25c68d1b8 | [
"MIT"
] | 1 | 2022-03-07T22:16:48.000Z | 2022-03-07T22:16:48.000Z | src/tests/crud/test_user.py | Behnam-sn/neat-backend | ba6e6356ee092eba27179f72fd2a15e25c68d1b8 | [
"MIT"
] | null | null | null | src/tests/crud/test_user.py | Behnam-sn/neat-backend | ba6e6356ee092eba27179f72fd2a15e25c68d1b8 | [
"MIT"
] | 1 | 2022-03-07T22:16:49.000Z | 2022-03-07T22:16:49.000Z | from sqlalchemy.orm import Session
from src import crud
from src.core.security import verify_password
from src.schemas.user import UserCreate, UserUpdate
from src.tests.utils.user import create_random_user_by_api
from src.tests.utils.utils import random_lower_string
| 26.220183 | 74 | 0.747726 |
02a1f0d97978ae4cf6f1fe28ed7cbca384a07bc2 | 1,062 | py | Python | src/transductor/tests/test_forms.py | fga-gpp-mds/2016.2-Time07 | 44d78ce4f36b7cb535b9c775027b8a93972ba5e3 | [
"MIT"
] | null | null | null | src/transductor/tests/test_forms.py | fga-gpp-mds/2016.2-Time07 | 44d78ce4f36b7cb535b9c775027b8a93972ba5e3 | [
"MIT"
] | null | null | null | src/transductor/tests/test_forms.py | fga-gpp-mds/2016.2-Time07 | 44d78ce4f36b7cb535b9c775027b8a93972ba5e3 | [
"MIT"
] | null | null | null | from django.test import TestCase
from transductor.forms import EnergyForm
from transductor.models import TransductorModel
| 25.902439 | 56 | 0.579096 |
02a2b904bf7cbd57601e581a0ffde8c156b2a583 | 3,729 | py | Python | src/models/base.py | zhengzangw/pytorch-classification | 3a6d95e3810015fa71c950492585c11dfe0b8b64 | [
"MIT"
] | null | null | null | src/models/base.py | zhengzangw/pytorch-classification | 3a6d95e3810015fa71c950492585c11dfe0b8b64 | [
"MIT"
] | null | null | null | src/models/base.py | zhengzangw/pytorch-classification | 3a6d95e3810015fa71c950492585c11dfe0b8b64 | [
"MIT"
] | null | null | null | from typing import Any, List, Optional
import hydra
import torch
import torchmetrics
from omegaconf import DictConfig
from pytorch_lightning import LightningModule
from ..optimizer.scheduler import create_scheduler
from ..utils import utils
from ..utils.misc import mixup_data
log = utils.get_logger(__name__)
| 31.601695 | 98 | 0.604452 |
02a48025dd5fe8b32b133893735d857d8b3b537a | 11,122 | py | Python | tea/balance.py | pcubillos/TEA | e3e4844de4cacef89b9f4a8b1673545726bfc42e | [
"BSD-4-Clause-UC"
] | 25 | 2016-06-20T23:21:46.000Z | 2022-02-06T18:57:33.000Z | tea/balance.py | pcubillos/TEA | e3e4844de4cacef89b9f4a8b1673545726bfc42e | [
"BSD-4-Clause-UC"
] | 3 | 2015-06-04T16:56:26.000Z | 2018-04-03T03:33:31.000Z | tea/balance.py | dzesmin/TEA | 0ec66410f274d9deea7764d53d6363f9aaad3355 | [
"BSD-4-Clause-UC"
] | 19 | 2015-05-27T17:46:41.000Z | 2021-08-05T10:54:59.000Z | #! /usr/bin/env python
############################# BEGIN FRONTMATTER ################################
# #
# TEA - calculates Thermochemical Equilibrium Abundances of chemical species #
# #
# TEA is part of the PhD dissertation work of Dr. Jasmina #
# Blecic, who developed it with coding assistance from #
# undergraduate M. Oliver Bowman and under the advice of #
# Prof. Joseph Harrington at the University of Central Florida, #
# Orlando, Florida, USA. #
# #
# Copyright (C) 2014-2016 University of Central Florida #
# #
# This program is reproducible-research software: you can #
# redistribute it and/or modify it under the terms of the #
# Reproducible Research Software License as published by #
# Prof. Joseph Harrington at the University of Central Florida, #
# either version 0.3 of the License, or (at your option) any later #
# version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# Reproducible Research Software License for more details. #
# #
# You should have received a copy of the Reproducible Research #
# Software License along with this program. If not, see #
# <http://planets.ucf.edu/resources/reproducible/>. The license's #
# preamble explains the situation, concepts, and reasons surrounding #
# reproducible research, and answers some common questions. #
# #
# This project was started with the support of the NASA Earth and #
# Space Science Fellowship Program, grant NNX12AL83H, held by #
# Jasmina Blecic, Principal Investigator Joseph Harrington, and the #
# NASA Science Mission Directorate Planetary Atmospheres Program, #
# grant NNX12AI69G. #
# #
# See the file ACKNOWLEDGING in the top-level TEA directory for #
# instructions on how to acknowledge TEA in publications. #
# #
# Visit our Github site: #
# https://github.com/dzesmin/TEA/ #
# #
# Reach us directly at: #
# Jasmina Blecic <jasmina@nyu.edu> #
# #
############################## END FRONTMATTER #################################
import readconf as rc
import os
import numpy as np
from sys import argv
from sympy.core import Symbol
from sympy.solvers import solve
import format as form
def balance(a, b, verb=0, loc_out=None):
"""
This code produces an initial guess for the first TEA iteration by
fulfilling the mass balance condition, sum_i(ai_j * y_i) = bj (equation (17)
in the TEA theory paper), where i is species index, j is element index, a's
are stoichiometric coefficients, and b's are elemental fractions by number,
i.e., ratio of number densities of element 'j' to the total number densities
of all elements in the system (see the end of the Section 2 in the TEA theory
paper). The code writes the result into machine- and human-readable files,
if requested.
To satisfy the mass balance equation, some yi variables remain as free
parameters. The number of free parameters is set to the number of total
elements in the system, thus ensuring that the mass balance equation can
be solved for any number of input elements and output species the user
chooses. The code locates a chunk of species (y_i) containing a sum of
ai_j values that forbids ignoring any element in the system (sum of the
ai_j values in a column must not be zero). This chunk is used as a set
of free variables in the system. The initial scale for other y_i variables
are set to a known, arbitrary number. Initially, starting values for the
known species are set to 0.1 moles, and the mass balance equation is
calculated. If this value does not produce all positive mole numbers,
the code automatically sets known parameters to 10 times smaller and
tries again. Actual mole numbers for the initial guesses of y_i are
arbitrary, as TEA only requires a balanced starting point to initialize
minimization. The goal of this code is to find a positive set of non-zero
mole numbers to satisfy this requirement. Finally, the code calculates y_bar,
initializes the iteration number, delta, and delta_bar to zero and writes
results into machine- and human-readable output files.
This code is called by runatm.py and runsingle.py
Parameters
----------
a: 2D float ndarray
Stoichiometric coefficients of the species.
b: 1D float ndarray
Elemental mixing fractions.
verb: Integer
Verbosity level (0=mute, 1=quiet, 2=verbose).
loc_out: String
If not None, save results to this folder.
Returns
-------
y: 1D float ndarray
Initial non-zero guesses for the species mixing ratios that
satisfy the mass-balance equation.
y_bar: Float
Sum of the mixing ratios.
"""
# Read in values from header file
nspec, natom = np.shape(a)
# Print b values for debugging purposes
if verb > 1:
print("b values: " + str(b))
# Find chunk of ai_j array that will allow the corresponding yi values
# to be free variables such that all elements are considered
for n in np.arange(nspec - natom + 1):
# Get lower and upper indices for chunk of ai_j array to check
lower = n
upper = n + natom
# Retrieve chunk of ai_j that would contain free variables
a_chunk = a[lower:upper]
# Sum columns to get total of ai_j in chunk for each species 'j'
check = list(map(sum, zip(*a_chunk)))
# Look for zeros in check. If a zero is found, this chunk of data can't
# be used for free variables, as this signifies an element is ignored
has_zero = 0 in check
# If zero not found, create list of free variables' indices
if has_zero == False:
free_id = []
for m in np.arange(natom):
if verb > 1:
print('Using y_{:d} as a free variable.'.format(n + m + 1))
free_id.append(n + m)
break
# Set initial guess of non-free y_i
scale = 0.1
# Loop until all y_i are non-zero positive:
nofit = True
while nofit:
# Set up list of 'known' initial mole numbers before and after free chunk
pre_free = np.zeros(free_id[0]) + scale
post_free = np.zeros(nspec - free_id[-1] - 1) + scale
# Set up list of free variables
free = []
for m in np.arange(natom):
name = 'y_unknown_' + np.str(m)
free.append(Symbol(name))
# Combine free and 'known' to make array of y_initial mole numbers
y_init = np.append(pre_free, free)
y_init = np.append(y_init, post_free)
# Make 'j' equations satisfying mass balance equation (17) in TEA
# theory doc:
# sum_i(ai_j * y_i) = b_j
eq = []
for m in np.arange(natom):
rhs = 0
for n in np.arange(nspec):
rhs += a[n, m] * y_init[n]
rhs -= b[m]
eq.append(rhs)
# Solve system of linear equations to get free y_i variables
result = solve(list(eq), list(free), rational=False)
# Correct for no-solution-found results.
# If no solution found, decrease scale size.
if result == []:
scale /= 10
if verb > 1:
print("Correcting initial guesses for realistic mass. \
Trying " + str(scale) + "...")
# Correct for negative-mass results. If found, decrease scale size
else:
# Assume no negatives and check
hasneg = False
for m in np.arange(natom):
if result[free[m]] < 0:
hasneg = True
# If negatives found, decrease scale size
if hasneg:
scale /= 10
if verb > 1:
print("Negative numbers found in fit."
"\n Correcting initial guesses for realistic mass."
"\n Trying scale of {:.0e}.".format(scale))
# If no negatives found, exit the loop (good fit is found)
else:
nofit = False
if verb > 1:
print("A scale of {:.0e} provided a viable initial guess.".
format(scale))
# Gather the results
fit = []
for m in np.arange(natom):
fit = np.append(fit, result[free[m]])
# Put the result into the final y_init array
y_init[free_id[0]:free_id[natom-1]+1] = fit
# This part of the code is only for debugging purposes
# It rounds the values and checks whether the balance equation is satisfied
# No values are changed and this serves solely as a check
if verb > 1:
print('\nChecks:')
for m in np.arange(natom):
flag = round((sum(a[:,m] * y_init[:])), 2) == round(b[m], 2)
if flag:
if verb > 1:
print('Equation {:d} is satisfied.'.format(m+1))
else:
print('Equation {:d} is NOT satisfied. Check for errors'.format(m+1))
# Put all initial mole numbers in y array
y = np.array(y_init, dtype=np.double)
# Make y_bar (sum of all y values)
y_bar = np.sum(y, dtype=np.double)
# Initialize delta variables to 0. (this signifies the first iteration)
delta = np.zeros(nspec)
delta_bar = np.sum(delta)
return y, y_bar
| 45.958678 | 85 | 0.538482 |
02a4beb0015cd6725cf78ab2fb76439c197ecfc1 | 2,073 | py | Python | sims/s251/calc-err.py | ammarhakim/ammar-simjournal | 85b64ddc9556f01a4fab37977864a7d878eac637 | [
"MIT",
"Unlicense"
] | 1 | 2019-12-19T16:21:13.000Z | 2019-12-19T16:21:13.000Z | sims/s251/calc-err.py | ammarhakim/ammar-simjournal | 85b64ddc9556f01a4fab37977864a7d878eac637 | [
"MIT",
"Unlicense"
] | null | null | null | sims/s251/calc-err.py | ammarhakim/ammar-simjournal | 85b64ddc9556f01a4fab37977864a7d878eac637 | [
"MIT",
"Unlicense"
] | 2 | 2020-01-08T06:23:33.000Z | 2020-01-08T07:06:50.000Z | from pylab import *
import tables
fh = tables.openFile("s251-dg-diffuse-2d_q_1.h5")
q = fh.root.StructGridField
nx, ny, nc = q.shape
dx = 2*pi/nx
Xf = linspace(0, 2*pi-dx, nx)
dy = 2*pi/ny
Yf = linspace(0, 2*pi-dy, ny)
XX, YY = meshgrid(Xf, Yf)
Xhr = linspace(0, 2*pi, 101)
Yhr = linspace(0, 2*pi, 101)
XXhr, YYhr = meshgrid(Xhr, Yhr)
fhr = exactSol(XXhr, YYhr, 1.0)
figure(1)
pcolormesh(Xhr, Yhr, fhr)
colorbar()
figure(2)
pcolormesh(Xf, Yf, q[:,:,0])
colorbar()
# compute error
fex = exactSol(XX, YY, 1.0)
error = abs(fex.transpose()-q[:,:,0]).sum()/(nx*ny);
print "%g %g" % (dx, error)
Xc = linspace(0.5*dx, 2*pi-0.5*dx, nx)
Yc = linspace(0.5*dy, 2*pi-0.5*dy, ny)
Xp, Yp, qp = projectOnFinerGrid_f24(Xc, Yc, q)
figure(1)
subplot(1,2,1)
pcolormesh(Xp, Yp, transpose(qp))
title('RDG t=1')
colorbar(shrink=0.5)
axis('image')
subplot(1,2,2)
pcolormesh(Xhr, Yhr, fhr)
title('Exact t=1')
colorbar(shrink=0.5)
axis('image')
savefig('s251-exact-cmp.png')
show()
| 21.371134 | 65 | 0.578389 |
02a600c96645d56c182f0a175380bb6948a7e4b5 | 973 | py | Python | PyCharm/Exercicios/Aula12/ex041.py | fabiodarice/Python | 15ec1c7428f138be875111ac98ba38cf2eec1a93 | [
"MIT"
] | null | null | null | PyCharm/Exercicios/Aula12/ex041.py | fabiodarice/Python | 15ec1c7428f138be875111ac98ba38cf2eec1a93 | [
"MIT"
] | null | null | null | PyCharm/Exercicios/Aula12/ex041.py | fabiodarice/Python | 15ec1c7428f138be875111ac98ba38cf2eec1a93 | [
"MIT"
] | null | null | null | # Importao de bibliotecas
from datetime import date
# Ttulo do programa
print('\033[1;34;40mCLASSIFICAO DE CATEGORIAS PARA NATAO\033[m')
# Objetos
nascimento = int(input('\033[30mDigite o ano do seu nascimento:\033[m '))
idade = date.today().year - nascimento
mirim = 9
infantil = 14
junior = 19
senior = 20
# Lgica
if idade <= mirim:
print('Sua idade \033[1;33m{} anos\033[m, e sua categoria a \033[1;34mMIRIM!\033[m'.format(idade))
elif idade <= infantil:
print('Sua idade \033[1;33m{}\033[m anos, e sua categoria a \033[1;34mINFANTIL!\033[m'.format(idade))
elif idade <= junior:
print('Sua idade \033[1;33m{}\033[m anos, e sua categoria a \033[1;34mJUNIOR!\033[m'.format(idade))
elif idade <= senior:
print('Sua idade \033[1;33m{}\033[m anos, e sua categoria a \033[1;34mSNIOR!\033[m'.format(idade))
elif idade > senior:
print('Sua idade \033[1;33m{}\033[m anos, e sua categoria \033[1;34mMASTER!\033[m'.format(idade)) | 38.92 | 109 | 0.693731 |
02a632349d6da6f348ea1c189802c694c33a0241 | 1,681 | py | Python | github-bot/harvester_github_bot/config.py | futuretea/bot | 5f1f1a08e0fca6519e0126ff8f0b87fec23a38e3 | [
"Apache-2.0"
] | null | null | null | github-bot/harvester_github_bot/config.py | futuretea/bot | 5f1f1a08e0fca6519e0126ff8f0b87fec23a38e3 | [
"Apache-2.0"
] | null | null | null | github-bot/harvester_github_bot/config.py | futuretea/bot | 5f1f1a08e0fca6519e0126ff8f0b87fec23a38e3 | [
"Apache-2.0"
] | null | null | null | from everett.component import RequiredConfigMixin, ConfigOptions
from everett.manager import ConfigManager, ConfigOSEnv
| 64.653846 | 121 | 0.606782 |
02a66be9396c06d98dcd1e2835505651b29dc2d8 | 618 | py | Python | scripts/MCA/1combine.py | jumphone/scRef | 7308d8571c3e46f481c9432857de84fd13955166 | [
"MIT"
] | 10 | 2018-11-27T09:32:53.000Z | 2022-03-21T02:42:54.000Z | scripts/MCA/1combine.py | jumphone/scRef | 7308d8571c3e46f481c9432857de84fd13955166 | [
"MIT"
] | null | null | null | scripts/MCA/1combine.py | jumphone/scRef | 7308d8571c3e46f481c9432857de84fd13955166 | [
"MIT"
] | 2 | 2018-12-13T18:45:26.000Z | 2020-06-20T07:18:19.000Z | import sys
output=sys.argv[1]
input_list=(sys.argv[2:])
EXP={}
header=[]
for input_file in input_list:
fi=open(input_file)
header=header+fi.readline().replace('"','').rstrip().split()
for line in fi:
seq=line.replace('"','').rstrip().split()
if seq[0] in EXP:
EXP[seq[0]]=EXP[seq[0]]+seq[1:]
else:
EXP[seq[0]]=seq[1:]
fi.close()
fo=open(output,'w')
fo.write('\t'.join(header)+'\n')
for gene in EXP:
if len(EXP[gene])==len(header):
fo.write(gene+'\t'+'\t'.join(EXP[gene])+'\n')
fo.close()
| 16.263158 | 65 | 0.509709 |
02a84eeec97777f61185766e05077d7532adafbc | 232 | py | Python | pyramda/logic/any_pass.py | sergiors/pyramda | 5bf200888809b1bc946e813e29460f204bccd13e | [
"MIT"
] | 124 | 2015-07-30T21:34:25.000Z | 2022-02-19T08:45:50.000Z | pyramda/logic/any_pass.py | sergiors/pyramda | 5bf200888809b1bc946e813e29460f204bccd13e | [
"MIT"
] | 37 | 2015-08-31T23:02:20.000Z | 2022-02-04T04:45:28.000Z | pyramda/logic/any_pass.py | sergiors/pyramda | 5bf200888809b1bc946e813e29460f204bccd13e | [
"MIT"
] | 20 | 2015-08-04T18:59:09.000Z | 2021-12-13T08:08:59.000Z | from pyramda.function.curry import curry
from pyramda.function.always import always
from pyramda.iterable.reduce import reduce
from .either import either
| 23.2 | 47 | 0.784483 |
02ab52b832209102ceb109d8aa07a587d3c2d55e | 817 | py | Python | bio/scheduler/views.py | ZuluPro/bio-directory | 4cdd3967e97363f59795d7b0fdb85998029370ff | [
"BSD-2-Clause"
] | null | null | null | bio/scheduler/views.py | ZuluPro/bio-directory | 4cdd3967e97363f59795d7b0fdb85998029370ff | [
"BSD-2-Clause"
] | null | null | null | bio/scheduler/views.py | ZuluPro/bio-directory | 4cdd3967e97363f59795d7b0fdb85998029370ff | [
"BSD-2-Clause"
] | null | null | null | from django.shortcuts import render, get_object_or_404
from django.utils.timezone import now
from schedule.models import Calendar
from schedule.periods import Day
from bio.scheduler import models
| 31.423077 | 74 | 0.684211 |
02ac373e21b06fce0640079ee711d2d820229c82 | 1,759 | py | Python | src/undefined/1/try.py | ytyaru0/Python.Sqlite.framework.think.20180305101210 | babb8f55183776a9cbed486613c4a1a7caa6daf2 | [
"CC0-1.0"
] | null | null | null | src/undefined/1/try.py | ytyaru0/Python.Sqlite.framework.think.20180305101210 | babb8f55183776a9cbed486613c4a1a7caa6daf2 | [
"CC0-1.0"
] | null | null | null | src/undefined/1/try.py | ytyaru0/Python.Sqlite.framework.think.20180305101210 | babb8f55183776a9cbed486613c4a1a7caa6daf2 | [
"CC0-1.0"
] | null | null | null | try:
import MyTable
except NameError as e:
print(e)
import importlib
importlib.import_module('Constraints')
# locals() module Constraints.py
# MyTableConstraints`from Constraints import PK,UK,FK,NN,D,C`
#
import sys, traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
last_tb = None
for tb in traceback.extract_tb(exc_traceback):
print(tb)
last_tb = tb
#print(last_tb)
#print(type(last_tb))
#print(dir(last_tb))
print(last_tb.filename)
print(last_tb.line)
print(last_tb.lineno)
print(last_tb.name)
import pathlib
module_path = pathlib.Path(last_tb.filename)
module_name = module_path.name.replace(module_path.suffix, '')
print(module_name)
# Constraints PK
# `from Constraints import PK,UK,FK,NN,D,C`
# exec(source_code)
#import importlib
#importlib.import_module(module_name)
print(e)
#print('', e)
#print(type(e))
#print(dir(e))
#print(e.args)
#print(type(e.with_traceback()))
#print(e.with_traceback())
#print(type(e.with_traceback))
#print(dir(e.with_traceback))
# #!python3
source_code = 'from Constraints import PK,UK,FK,NN,D,C' + '\n'
with pathlib.Path(last_tb.filename).open() as f:
source_code += f.read()
exec(source_code)
assert(module_name in locals())
cls = locals()[module_name]
print(dir(cls))
print(cls.Id)
# name 'PK' is not defined
#print(locals())
#print(locals()['__loader__'])
#print(dir(locals()['__loader__']))
#print(locals()['__loader__'].get_filename())
| 29.316667 | 75 | 0.665151 |
02aedbb634ecb4773466e66e6c16d7f09a8368bc | 22,797 | py | Python | trac/wiki/tests/admin.py | rjollos/trac | 2bc0edd96b0eace18aaa8a2fe3cbeebdf1a88214 | [
"BSD-3-Clause"
] | null | null | null | trac/wiki/tests/admin.py | rjollos/trac | 2bc0edd96b0eace18aaa8a2fe3cbeebdf1a88214 | [
"BSD-3-Clause"
] | null | null | null | trac/wiki/tests/admin.py | rjollos/trac | 2bc0edd96b0eace18aaa8a2fe3cbeebdf1a88214 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2018-2020 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at https://trac.edgewall.org/log/.
import os.path
import sys
import tempfile
import unittest
from trac.admin.api import console_datetime_format
from trac.admin.console import TracAdmin
from trac.admin.test import TracAdminTestCaseBase
from trac.test import EnvironmentStub, mkdtemp
from trac.tests.contentgen import random_unique_camel, random_paragraph
from trac.util import create_file
from trac.util.datefmt import format_datetime
from trac.wiki.admin import WikiAdmin
from trac.wiki.model import WikiPage
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(WikiAdminTestCase))
suite.addTest(unittest.makeSuite(TracAdminTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| 37.128664 | 79 | 0.605343 |
02aee1a4ed761847d16082b08939ec1e2b7eae92 | 2,646 | py | Python | cli/cli.py | kandrio/toy-chord | c35b02f1e7d7ba44e14a86e1944acc8cee3cd5da | [
"Apache-2.0"
] | null | null | null | cli/cli.py | kandrio/toy-chord | c35b02f1e7d7ba44e14a86e1944acc8cee3cd5da | [
"Apache-2.0"
] | null | null | null | cli/cli.py | kandrio/toy-chord | c35b02f1e7d7ba44e14a86e1944acc8cee3cd5da | [
"Apache-2.0"
] | null | null | null | import click, requests, sys
bootstrap_ip = '192.168.0.2'
bootstrap_port = '8000'
base_url = 'http://' + bootstrap_ip + ':' + bootstrap_port
if __name__ == '__main__':
toychord()
| 25.941176 | 77 | 0.628118 |
02b0025b6adb156b789fef5aff0bf34cd7804353 | 87 | py | Python | sportstrackeranalyzer/plugin_handler/__init__.py | XeBoris/SportsTrackerAnalyzer | f211a9120b9ba91bb04b9742c80d0de7b4143f78 | [
"MIT"
] | 1 | 2021-02-12T08:00:34.000Z | 2021-02-12T08:00:34.000Z | sportstrackeranalyzer/plugin_handler/__init__.py | XeBoris/SportsTrackerAnalyzer | f211a9120b9ba91bb04b9742c80d0de7b4143f78 | [
"MIT"
] | null | null | null | sportstrackeranalyzer/plugin_handler/__init__.py | XeBoris/SportsTrackerAnalyzer | f211a9120b9ba91bb04b9742c80d0de7b4143f78 | [
"MIT"
] | null | null | null | from sportstrackeranalyzer.plugins.plugin_simple_distances import Plugin_SimpleDistance | 87 | 87 | 0.942529 |
02b0d90057314fc93b4b1b7ec332876605dcadca | 2,598 | py | Python | migrations/versions/a12e86de073c_.py | ravenscroftj/harri_gttool | 11e8e6b5e8c4bbfc62dc15c7d8b099d4a4fa1a5e | [
"MIT"
] | null | null | null | migrations/versions/a12e86de073c_.py | ravenscroftj/harri_gttool | 11e8e6b5e8c4bbfc62dc15c7d8b099d4a4fa1a5e | [
"MIT"
] | 4 | 2020-06-18T14:38:26.000Z | 2021-12-13T19:54:55.000Z | migrations/versions/a12e86de073c_.py | ravenscroftj/harri_gttool | 11e8e6b5e8c4bbfc62dc15c7d8b099d4a4fa1a5e | [
"MIT"
] | null | null | null | """Initial creation of schema
Revision ID: a12e86de073c
Revises:
Create Date: 2018-01-05 13:42:18.768932
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a12e86de073c'
down_revision = None
branch_labels = None
depends_on = None
| 36.591549 | 90 | 0.681678 |
02b1a17a92fa82b80990e63f3f0d4e50c1738b1c | 5,106 | py | Python | takedown/__init__.py | zsxing99/Takedown-script | fcd0533ab71a1198651a6e53cd1d58039d4fa7fd | [
"MIT"
] | 1 | 2021-01-06T00:23:03.000Z | 2021-01-06T00:23:03.000Z | takedown/__init__.py | zsxing99/Takedown-script | fcd0533ab71a1198651a6e53cd1d58039d4fa7fd | [
"MIT"
] | 4 | 2020-11-09T06:01:25.000Z | 2020-12-17T06:39:30.000Z | takedown/__init__.py | zsxing99/Takedown-script | fcd0533ab71a1198651a6e53cd1d58039d4fa7fd | [
"MIT"
] | null | null | null | """
TakeDown v0.0.1
===============
author: Zesheng Xing
email: zsxing@ucdavis.edu
This Python project is to help people search on some client hosting contents that potential violate the their copyright.
"""
VERSION = "0.1.0"
DESCRIPTION = "A python script that allows users to search potential copyright violated information on GitHub and " \
"send emails taking down those."
CONTRIBUTORS_INFO = "The project is developed by Zesheng Xing and supervised by Jol Porquet-Lupine at UC Davis, 2020."
USAGE = \
"""
Usage: takedown.py command [args...]
where commands include:
find search repositories
python takedown.py find [search_query] [GitHub_token] [-options]
with following args:
[search_query]: required. The text used to search.
[Github_token]: required. The Github token used to raise the rate limit and enable broader search.
[-t target]: optional. The target of the search query. It could be repo, code. It is code by default.
Concatenate them by +, eg. -t code+repo.
[-i input]: optional. The file path of previous output of takedown find. By providing this path, the output
this time will be compared against the previous one.
[-o output]: optional. The output file path. The result will be printed to the console by default.
[-f format]: optional. The output format. It could be yaml or json. It is yaml by default
or using a configuration file:
python takedown.py find -c <path_to_config_file>
config file args:
required args:
[search_query]: required. The text used to search.
[Github_token]: required. The Github token used to raise the rate limit and enable broader search.
optional args:
[target]: optional. The target of the search query. It could be repo, code. It is code by default.
Concatenate them by +, eg. -t code+repo.
[input]: optional. The file path of previous output of takedown find. By providing this path,
the output this time will be compared against the previous one.
[output]: optional. The output file path. The result will be printed to the console by default.
[format]: optional. The output format. It could be yaml or json. It is yaml by default
send send emails based on records
python takedown send [domain] [port] [inputs] [-options]
with following args:
[domain]: required. The domain address to connect
[port]: required. port of domain to connect
[inputs]: required. Input files to send email
[-u username]: optional. username of the account. or ask
[-p password]: optional. password of the account. or ask
[-s secure method]: optional. It could be TLS or SSL, depending on the domain and port connected.
Confirm before using this option.
[-t tags]: optional. Only the records that matches the tag will be sent with an email
[-o output]: optional. The output file path. The result will be printed to the console by default.
[-f format]: optional. The output format. It could be yaml or json. It is yaml by default
[-en email name]: optional. name used to send email. Otherwise username will be used
[-es email subject]: optional. subject of the email. Otherwise default email subject is used
[-ep email preface]: optional. preface of the email. Otherwise default email preface is used
[-ee email ending]: optional. preface of the email. Otherwise default email preface is used
or using a configuration file:
python takedown.py send -c <path_to_config_file>
config file args:
required parameters:
[domain]: required. Domain used to connect smtp service
[port]: required. Port of domain to connect smtp service
[inputs]: required. Records based to send emails
optional parameters:
[username]: optional. username of the account. or ask
[password]: optional. password of the account. or ask
[secure method]: optional. It could be TLS or SSL, depending on the domain and port connected.
Confirm before using this option.
[tags]: optional. Only the records that matches the tag will be sent with an email
[output]: optional. The output file path. The result will be printed to the console by default.
[format]: optional. The output format. It could be yaml or json. It is yaml by default
[emai_name]: optional. name used to send email. Otherwise username will be used
[email_subject]: optional. subject of the email. Otherwise default email subject is used
[email_preface]: optional. preface of the email. Otherwise default email preface is used
[email_ending]: optional. preface of the email. Otherwise default email preface is used
help show instructions and list of options
"""
| 60.785714 | 120 | 0.665883 |
02b1f2bbe7cd1537f8a13eba37e53d713c062a3c | 977 | py | Python | test/test_data.py | xoriath/py-cab | ab02faeaf69578bb9a0874632c610b27a9dd582f | [
"MIT"
] | null | null | null | test/test_data.py | xoriath/py-cab | ab02faeaf69578bb9a0874632c610b27a9dd582f | [
"MIT"
] | null | null | null | test/test_data.py | xoriath/py-cab | ab02faeaf69578bb9a0874632c610b27a9dd582f | [
"MIT"
] | null | null | null |
import os.path
CABEXTRACT_TEST_DIR = os.path.join(os.path.dirname(__file__), 'test-data', 'cabextract', 'cabs')
CABEXTRACT_BUGS_DIR = os.path.join(os.path.dirname(__file__), 'test-data', 'cabextract', 'bugs')
LIBMSPACK_TEST_DIR = os.path.join(os.path.dirname(__file__), 'test-data', 'libmspack')
| 36.185185 | 107 | 0.732856 |
02b2af84d188c97a9d4469b8353aba7b18703383 | 141 | py | Python | utils/exception.py | Lolik-Bolik/Hashing_Algorithms | d3ba488cf575fc685d5ea603b1915de4d5fed713 | [
"MIT"
] | 2 | 2020-12-15T20:26:29.000Z | 2020-12-15T20:27:26.000Z | utils/exception.py | Lolik-Bolik/Hashing_Algorithms | d3ba488cf575fc685d5ea603b1915de4d5fed713 | [
"MIT"
] | null | null | null | utils/exception.py | Lolik-Bolik/Hashing_Algorithms | d3ba488cf575fc685d5ea603b1915de4d5fed713 | [
"MIT"
] | null | null | null | """
Exceptions module
"""
| 11.75 | 44 | 0.64539 |
02b2cd966c362b3581d56d85cfd72c1cf6dfa614 | 1,212 | py | Python | finetwork/plotter/_centrality_metrics.py | annakuchko/FinNetwork | 4566ff96b33fb5668f9b28f41a94791d1cf9249c | [
"MIT"
] | 5 | 2021-12-07T22:14:10.000Z | 2022-03-30T14:09:15.000Z | finetwork/plotter/_centrality_metrics.py | annakuchko/FinNetwork | 4566ff96b33fb5668f9b28f41a94791d1cf9249c | [
"MIT"
] | null | null | null | finetwork/plotter/_centrality_metrics.py | annakuchko/FinNetwork | 4566ff96b33fb5668f9b28f41a94791d1cf9249c | [
"MIT"
] | null | null | null | import networkx as nx
| 32.756757 | 72 | 0.615512 |
02b429e1598512b88fa03213426e6cd52e56ec98 | 3,558 | py | Python | filters/incoming_filters.py | juhokokkala/podoco_juhokokkala | 57709c539168b6aaddfc187b3a3610bef63bd68a | [
"MIT"
] | null | null | null | filters/incoming_filters.py | juhokokkala/podoco_juhokokkala | 57709c539168b6aaddfc187b3a3610bef63bd68a | [
"MIT"
] | null | null | null | filters/incoming_filters.py | juhokokkala/podoco_juhokokkala | 57709c539168b6aaddfc187b3a3610bef63bd68a | [
"MIT"
] | null | null | null | ###############################################################################
# Copyright (C) 2016 Juho Kokkala
# This is part of Juho Kokkala's PoDoCo project.
#
# This file is licensed under the MIT License.
###############################################################################
"""
Particle filters for tracking the incoming traffic intensity.
See, the files script_test_poisson_1.py and script_test_negbin.py for
usage.
"""
import numpy as np
import resampling # resampling (c) Roger R Labbe Jr (MIT License)
from scipy.special import gammaln
def pf_init(Nrep, params):
"""
Initialize particle filter from MCMC samples.
"""
for key in params.keys():
params[key] = np.tile(params[key], Nrep)
N = params['A_x'].shape[0]
W = np.repeat(1/N, N)
x = np.random.normal(params['base'][0, :],
params['sqrtQ_x'] / np.sqrt((1 - params['A_x']**2)))
return x, params, W
def pf_update_poisson(y, x, params, W):
"""Update weights according to measurement"""
logW = np.log(W) + y * np.log(np.exp(x)) - np.exp(x)
W = np.exp(logW - np.max(logW))
W = W / sum(W)
return params, W
def pf_step_poisson(y, x, params, W, resample=True):
"""One step (measurement) of the particle filter, Poisson obs. model
(Resample)
Propagate the particles using the prior model,
Update weights
Remove the first elements of baselines
"""
N = W.shape[0]
if resample:
ind = resampling.residual_resample(W)
x = x[ind]
params['base'] = params['base'][:, ind]
params['sqrtQ_x'] = params['sqrtQ_x'][ind]
params['A_x'] = params['A_x'][ind]
W = np.repeat(1/N, N)
x = np.random.normal(params['base'][1, :] + params['A_x'] *
(x - params['base'][0, :]), params['sqrtQ_x'])
params = trim_base(params)
params, W = pf_update_poisson(y, x, params, W)
return x, params, W
def predict_mean(x, params, W):
"""Expected value of the next observation after the update step"""
return np.sum(W * (np.exp(params['base'][1, :] + params['A_x'] *
(x - params['base'][0, :]) + 0.5 * params['sqrtQ_x']**2)))
def trim_base(params):
"""Cuts the first component of base"""
params['base'] = params['base'][1:, :]
return params
def pf_update_negbin(y, x, params, W):
"""Update weights per measurement, NegBin obs. model"""
phi = np.exp(x) / (params['omega'] - 1)
logW = (gammaln(y + phi) - gammaln(phi) +
y * (np.log(params['omega'] - 1) - np.log(params['omega'])) -
phi * (np.log(params['omega'])))
W = np.exp(logW - np.max(logW))
W = W / sum(W)
return params, W
def pf_step_negbin(y, x, params, W, resample=True):
"""
One step (measurement) of the particle filter, NegBin obs. model
(Resample)
Propagate the particles using the prior model,
Update weights
Remove the first elements of baselines
"""
N = W.shape[0]
if resample:
ind = resampling.residual_resample(W)
x = x[ind]
params['base'] = params['base'][:, ind]
params['sqrtQ_x'] = params['sqrtQ_x'][ind]
params['A_x'] = params['A_x'][ind]
params['omega'] = params['omega'][ind]
W = np.repeat(1/N, N)
x = np.random.normal(params['base'][1, :] + params['A_x'] *
(x - params['base'][0, :]), params['sqrtQ_x'])
params = trim_base(params)
params, W = pf_update_negbin(y, x, params, W)
return x, params, W
| 29.163934 | 79 | 0.559303 |
02b49f51a158d139c9f2c3154c3099e5edf9d8c5 | 2,610 | py | Python | model/semantic_gcn.py | AndersonStra/Mucko | f630712ea5be5f3ce995958c050cd7b5398d31e0 | [
"MIT"
] | 2 | 2022-02-23T02:14:49.000Z | 2022-03-23T13:27:44.000Z | model/semantic_gcn.py | AndersonStra/Mucko | f630712ea5be5f3ce995958c050cd7b5398d31e0 | [
"MIT"
] | null | null | null | model/semantic_gcn.py | AndersonStra/Mucko | f630712ea5be5f3ce995958c050cd7b5398d31e0 | [
"MIT"
] | 1 | 2022-03-23T13:27:49.000Z | 2022-03-23T13:27:49.000Z | import torch
import torch.nn.functional as F
from torch import nn
import dgl
import networkx as nx
| 31.071429 | 93 | 0.576245 |
02b57da73d7345d506383c6f3f8675776637aa80 | 3,808 | py | Python | negentropy/scriptparser.py | shewitt-au/negentropy | 40841e3f7d95f9124f4b59b0d591bf16e57ef312 | [
"MIT"
] | 4 | 2021-07-07T09:49:05.000Z | 2021-11-14T04:17:11.000Z | negentropy/scriptparser.py | shewitt-au/negentropy | 40841e3f7d95f9124f4b59b0d591bf16e57ef312 | [
"MIT"
] | null | null | null | negentropy/scriptparser.py | shewitt-au/negentropy | 40841e3f7d95f9124f4b59b0d591bf16e57ef312 | [
"MIT"
] | 1 | 2020-05-29T18:11:04.000Z | 2020-05-29T18:11:04.000Z | from inspect import cleandoc
from textwrap import indent
from lark import Lark, Transformer
from lark.exceptions import LarkError
from .interval import Interval
from . import errors
| 31.471074 | 99 | 0.541754 |
02b58fbea2e6f02fd5c603a709c877e0fd2cae0b | 567 | py | Python | Ex25.py | CarlosDouradoPGR/PythonBR-EstruturasDecs | 727ab33f44c48e2d7026ea85d54791d1885c0bdc | [
"MIT"
] | null | null | null | Ex25.py | CarlosDouradoPGR/PythonBR-EstruturasDecs | 727ab33f44c48e2d7026ea85d54791d1885c0bdc | [
"MIT"
] | null | null | null | Ex25.py | CarlosDouradoPGR/PythonBR-EstruturasDecs | 727ab33f44c48e2d7026ea85d54791d1885c0bdc | [
"MIT"
] | null | null | null | print('Interrogando um suspeito: ')
pg1 = str(input("Telefonou para a vtma?(S/N)\n").upper().strip())
pg2 = str(input('Esteve no local do crime?(S/N)\n').upper().strip())
pg3 = str(input('Mora perto da vtma?(S/N)\n').upper().strip())
pg4 = str(input('Devia para a vtma?(S/N)\n').upper().strip())
pg5 = str(input('J trabalhou com a vtma?(S/N\n').upper().strip())
lst = [pg1, pg2, pg3, pg4, pg5].count('S')
if 3 > lst >= 2 :
print('Suspeita')
elif lst == 3 or lst == 4:
print('Cmplice')
elif lst == 5:
print('Assassino')
else:
print('Inocente')
| 31.5 | 68 | 0.611993 |
02b6a5972aef51ad1a07e6ff7ba0827ae6cad8a4 | 2,235 | py | Python | t/text_test.py | gsnedders/Template-Python | 4081e4d820c1be0c0448a8dcb79e0703066da099 | [
"Artistic-2.0"
] | null | null | null | t/text_test.py | gsnedders/Template-Python | 4081e4d820c1be0c0448a8dcb79e0703066da099 | [
"Artistic-2.0"
] | 6 | 2015-10-13T13:46:10.000Z | 2019-06-17T09:39:57.000Z | t/text_test.py | gsnedders/Template-Python | 4081e4d820c1be0c0448a8dcb79e0703066da099 | [
"Artistic-2.0"
] | 3 | 2018-12-03T13:15:21.000Z | 2019-03-13T09:12:09.000Z | from template import Template
from template.test import TestCase, main
DATA = r"""
-- test --
This is a text block "hello" 'hello' 1/3 1\4 <html> </html>
$ @ { } @{ } ${ } # ~ ' ! % *foo
$a ${b} $c
-- expect --
This is a text block "hello" 'hello' 1/3 1\4 <html> </html>
$ @ { } @{ } ${ } # ~ ' ! % *foo
$a ${b} $c
-- test --
<table width=50%>©
-- expect --
<table width=50%>©
-- test --
[% foo = 'Hello World' -%]
start
[%
#
# [% foo %]
#
#
-%]
end
-- expect --
start
end
-- test --
pre
[%
# [% PROCESS foo %]
-%]
mid
[% BLOCK foo; "This is foo"; END %]
-- expect --
pre
mid
-- test --
-- use interp --
This is a text block "hello" 'hello' 1/3 1\4 <html> </html>
\$ @ { } @{ } \${ } # ~ ' ! % *foo
$a ${b} $c
-- expect --
This is a text block "hello" 'hello' 1/3 1\4 <html> </html>
$ @ { } @{ } ${ } # ~ ' ! % *foo
alpha bravo charlie
-- test --
<table width=50%>©
-- expect --
<table width=50%>©
-- test --
[% foo = 'Hello World' -%]
start
[%
#
# [% foo %]
#
#
-%]
end
-- expect --
start
end
-- test --
pre
[%
#
# [% PROCESS foo %]
#
-%]
mid
[% BLOCK foo; "This is foo"; END %]
-- expect --
pre
mid
-- test --
[% a = "C'est un test"; a %]
-- expect --
C'est un test
-- test --
[% META title = "C'est un test" -%]
[% component.title -%]
-- expect --
C'est un test
-- test --
[% META title = 'C\'est un autre test' -%]
[% component.title -%]
-- expect --
C'est un autre test
-- test --
[% META title = "C'est un \"test\"" -%]
[% component.title -%]
-- expect --
C'est un "test"
-- test --
[% sfoo %]/[% sbar %]
-- expect --
foo/bar
-- test --
[% s1 = "$sfoo"
s2 = "$sbar ";
s3 = sfoo;
ref(s1);
'/';
ref(s2);
'/';
ref(s3);
-%]
-- expect --
foo[str]/bar [str]/foo[Stringy]
"""
| 14.607843 | 71 | 0.503803 |
02b974ddbd9b73968df839c1e4fdda0cbb8567db | 761 | py | Python | research/destroyer.py | carrino/FrisPy | db9e59f465ee25d1c037d580c37da8f35b930b50 | [
"MIT"
] | null | null | null | research/destroyer.py | carrino/FrisPy | db9e59f465ee25d1c037d580c37da8f35b930b50 | [
"MIT"
] | null | null | null | research/destroyer.py | carrino/FrisPy | db9e59f465ee25d1c037d580c37da8f35b930b50 | [
"MIT"
] | null | null | null | # Copyright (c) 2021 John Carrino
import math
from pprint import pprint
import matplotlib.pyplot as plt
from frispy import Disc
from frispy import Discs
model = Discs.destroyer
mph_to_mps = 0.44704
v = 70 * mph_to_mps
rot = -v / model.diameter * 1.2
x0 = [6, -3, 25]
a, nose_up, hyzer = x0
disc = Disc(model, {"vx": math.cos(a * math.pi / 180) * v, "dgamma": rot, "vz": math.sin(a * math.pi / 180) * v,
"nose_up": nose_up, "hyzer": hyzer, "gamma": -2})
result = disc.compute_trajectory(20.0, **{"max_step": .2})
times = result.times
t, x, y, z = result.times, result.x, result.y, result.z
plt.plot(x, result.theta)
plt.plot(x, y)
plt.plot(x, z)
#plt.plot(t, x)
#plt.plot(t, y)
#plt.plot(t, z)
pprint(x[-1] * 3.28084) # feet
plt.show()
| 22.382353 | 112 | 0.638633 |
02b9d9f46387318db8e53a0fa38e85fa6e870ae5 | 6,200 | py | Python | litex/soc/cores/clock/xilinx_common.py | suarezvictor/litex | d37ef60e70bd0ce8d28079143b6859d8b928395e | [
"ADSL"
] | 1 | 2021-12-25T13:49:55.000Z | 2021-12-25T13:49:55.000Z | litex/soc/cores/clock/xilinx_common.py | suarezvictor/litex | d37ef60e70bd0ce8d28079143b6859d8b928395e | [
"ADSL"
] | 1 | 2022-02-11T14:39:50.000Z | 2022-02-11T22:25:57.000Z | litex/soc/cores/clock/xilinx_common.py | suarezvictor/litex | d37ef60e70bd0ce8d28079143b6859d8b928395e | [
"ADSL"
] | 1 | 2021-12-25T13:49:57.000Z | 2021-12-25T13:49:57.000Z | #
# This file is part of LiteX.
#
# Copyright (c) 2018-2020 Florent Kermarrec <florent@enjoy-digital.fr>
# SPDX-License-Identifier: BSD-2-Clause
from migen import *
from migen.genlib.resetsync import AsyncResetSynchronizer
from litex.build.io import DifferentialInput
from litex.soc.interconnect.csr import *
from litex.soc.cores.clock.common import *
# Xilinx / Generic ---------------------------------------------------------------------------------
| 40.25974 | 105 | 0.528226 |
02ba8cd0ea54e5520bfcd504cff7483bc433ed10 | 3,044 | py | Python | my_des/my_des.py | ipid/my-des | 13340481c03113a23263ef824c119b3374028fe2 | [
"MIT"
] | null | null | null | my_des/my_des.py | ipid/my-des | 13340481c03113a23263ef824c119b3374028fe2 | [
"MIT"
] | null | null | null | my_des/my_des.py | ipid/my-des | 13340481c03113a23263ef824c119b3374028fe2 | [
"MIT"
] | null | null | null | __all__ = (
'des_encrypt',
'des_decrypt',
)
from typing import List, Any
from ._tools import *
from ._constant import *
from rich import print
from rich.text import Text
from rich.panel import Panel
| 30.138614 | 88 | 0.610053 |
02baf30e1ca8e8d64e8718657b516c4805fddd84 | 1,080 | py | Python | Chromebook/setup.py | mahtuag/DistroSetup | 8fb2b7351ea12163602a9a4c5a7b63fc87f326e2 | [
"Apache-2.0"
] | 3 | 2020-01-12T11:21:47.000Z | 2021-09-16T06:43:22.000Z | Chromebook/setup.py | mahtuag/DistroSetup | 8fb2b7351ea12163602a9a4c5a7b63fc87f326e2 | [
"Apache-2.0"
] | 2 | 2020-07-01T20:46:31.000Z | 2020-07-01T21:10:09.000Z | Chromebook/setup.py | wingedrhino/DistroSetup | 65edfda7dbded113bf5f3e6f53b331fc8aeaf1c5 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
import subprocess
import apt
import sys
cache = apt.cache.Cache()
cache.update()
cache.open()
packages = [
'git',
'curl',
'wget',
'software-properties-common',
'build-essential',
'automake',
'libtool',
'autoconf',
'pkg-config',
'udev',
'fuse',
'snap',
'snapd',
'zsh',
'byobu',
'python3',
'libsquashfuse0',
'squashfuse',
'fuse',
'vim',
'atop',
'zsh',
'byobu',
'htop',
'iotop',
'nethogs',
'aptitude',
'udisks2',
'parted',
'gparted',
'udisks2-lvm2',
'udisks2-vdo',
'udisks2-zram',
'udisks2-btrfs',
'udisks2-doc',
'default-jdk',
'leiningen',
'clojure',
]
for pkg_name in packages:
pkg = cache[pkg_name]
if pkg.is_installed:
print(f'{pkg_name} is already installed.')
else:
print(f'{pkg_name} will be marked for installation.')
| 18.305085 | 62 | 0.466667 |
02bb2ad5f8635de13653c1ed22f4978ec39fcfc6 | 377 | py | Python | performance_test.py | alan-augustine/python_singly_linkedlist | f227a4154b22de8a273d319ecdd6329035d5d258 | [
"MIT"
] | null | null | null | performance_test.py | alan-augustine/python_singly_linkedlist | f227a4154b22de8a273d319ecdd6329035d5d258 | [
"MIT"
] | null | null | null | performance_test.py | alan-augustine/python_singly_linkedlist | f227a4154b22de8a273d319ecdd6329035d5d258 | [
"MIT"
] | null | null | null | from time import time
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), 'src'))
from singly_linkedlist.singly_linkedlist import SinglyLinkedList
start = time()
linked_list = SinglyLinkedList()
for i in range(100000):
linked_list.insert_head(111111111111)
end = time()
print("Took {0} seconds".format(start-end))
# linked_list.print_elements()
| 23.5625 | 64 | 0.774536 |
02bdc143ddedce60ffc59109acab68856a9f8737 | 561 | py | Python | python/leetcode/0001.py | bluewaitor/playground | 330266ce28212dc5e32b0276c896f9ceffd35bf5 | [
"MIT"
] | null | null | null | python/leetcode/0001.py | bluewaitor/playground | 330266ce28212dc5e32b0276c896f9ceffd35bf5 | [
"MIT"
] | null | null | null | python/leetcode/0001.py | bluewaitor/playground | 330266ce28212dc5e32b0276c896f9ceffd35bf5 | [
"MIT"
] | null | null | null | # 1.
from typing import List, Optional
if __name__ == '__main__':
solution = Solution()
print(solution.twoSum([2, 7, 11, 15], 9))
| 28.05 | 74 | 0.593583 |
02bea4753652cd78237dd184ed6e67ea923d42ea | 454 | py | Python | dataprocess/print_msg.py | lifelong-robotic-vision/openloris-scene-tools | ce6a4839f618bf036d3f3dbae14561bfc7413641 | [
"MIT"
] | 13 | 2021-03-27T15:49:21.000Z | 2022-03-19T13:26:30.000Z | dataprocess/print_msg.py | lifelong-robotic-vision/openloris-scene-tools | ce6a4839f618bf036d3f3dbae14561bfc7413641 | [
"MIT"
] | 4 | 2021-03-30T10:40:43.000Z | 2022-03-28T01:36:57.000Z | dataprocess/print_msg.py | lifelong-robotic-vision/openloris-scene-tools | ce6a4839f618bf036d3f3dbae14561bfc7413641 | [
"MIT"
] | 1 | 2022-02-16T13:42:32.000Z | 2022-02-16T13:42:32.000Z | #!/usr/bin/env python2
import rosbag
import sys
filename = sys.argv[1]
topics = sys.argv[2:]
with rosbag.Bag(filename) as bag:
for topic, msg, t in bag.read_messages(topics):
print('%s @%.7f ----------------------------' % (topic, t.to_sec()))
print(msg)
print('Press ENTER to continue')
while True:
try:
raw_input()
break
except EOFError:
pass
| 25.222222 | 76 | 0.497797 |
02beda4568a4663c141bf81401d0595971779e3a | 1,011 | py | Python | alegra/resources/invoice.py | okchaty/alegra | 6c423b23a24650c9121da5f165f6f03669b98468 | [
"MIT"
] | 1 | 2022-03-31T03:44:50.000Z | 2022-03-31T03:44:50.000Z | alegra/resources/invoice.py | okchaty/alegra | 6c423b23a24650c9121da5f165f6f03669b98468 | [
"MIT"
] | 4 | 2020-03-24T17:54:03.000Z | 2021-06-02T00:48:50.000Z | alegra/resources/invoice.py | okchaty/alegra | 6c423b23a24650c9121da5f165f6f03669b98468 | [
"MIT"
] | null | null | null | from alegra.api_requestor import APIRequestor
from alegra.resources.abstract import CreateableAPIResource
from alegra.resources.abstract import EmailableAPIResource
from alegra.resources.abstract import ListableAPIResource
from alegra.resources.abstract import UpdateableAPIResource
from alegra.resources.abstract import VoidableAPIResource
| 29.735294 | 68 | 0.681503 |
02bf002db0ba833a4cef03b49b9c37dba336934d | 244 | py | Python | dask_ml/model_selection.py | lesteve/dask-ml | 0aca19c545be5c27bedcfbab5554b4ba39a6d754 | [
"BSD-3-Clause"
] | 1 | 2020-12-01T13:20:05.000Z | 2020-12-01T13:20:05.000Z | dask_ml/model_selection.py | lesteve/dask-ml | 0aca19c545be5c27bedcfbab5554b4ba39a6d754 | [
"BSD-3-Clause"
] | null | null | null | dask_ml/model_selection.py | lesteve/dask-ml | 0aca19c545be5c27bedcfbab5554b4ba39a6d754 | [
"BSD-3-Clause"
] | null | null | null | """Utilities for hyperparameter optimization.
These estimators will operate in parallel. Their scalability depends
on the underlying estimators being used.
"""
from dask_searchcv.model_selection import GridSearchCV, RandomizedSearchCV # noqa
| 34.857143 | 82 | 0.831967 |
02c06f0c429f92d5e5a68c4d5f561cf2b85e43c8 | 23 | py | Python | code/sample_2-1-9.py | KoyanagiHitoshi/AtCoder-Python-Introduction | 6d014e333a873f545b4d32d438e57cf428b10b96 | [
"MIT"
] | 1 | 2022-03-29T13:50:12.000Z | 2022-03-29T13:50:12.000Z | code/sample_2-1-9.py | KoyanagiHitoshi/AtCoder-Python-Introduction | 6d014e333a873f545b4d32d438e57cf428b10b96 | [
"MIT"
] | null | null | null | code/sample_2-1-9.py | KoyanagiHitoshi/AtCoder-Python-Introduction | 6d014e333a873f545b4d32d438e57cf428b10b96 | [
"MIT"
] | null | null | null | x = 5
y = 6
print(x*y)
| 5.75 | 10 | 0.478261 |
02c10165e05312844fa4ea1cd0be76da5bd780bb | 2,521 | py | Python | whatrecord/tests/test_stcmd.py | ZLLentz/whatrecord | 9f15da79e3063a64dbe6bb9678dbf52ebad46680 | [
"BSD-3-Clause-LBNL"
] | 2 | 2021-09-15T20:32:04.000Z | 2021-12-17T16:46:10.000Z | whatrecord/tests/test_stcmd.py | ZLLentz/whatrecord | 9f15da79e3063a64dbe6bb9678dbf52ebad46680 | [
"BSD-3-Clause-LBNL"
] | 92 | 2021-04-02T16:42:24.000Z | 2022-03-31T22:24:52.000Z | whatrecord/tests/test_stcmd.py | ZLLentz/whatrecord | 9f15da79e3063a64dbe6bb9678dbf52ebad46680 | [
"BSD-3-Clause-LBNL"
] | 2 | 2021-04-01T20:48:42.000Z | 2021-09-08T18:51:34.000Z | import pytest
from ..iocsh import IocshRedirect, IocshSplit, split_words
| 28.977011 | 77 | 0.410948 |
02c18f6d2d3ebb8100e01a783419de97602121b6 | 1,723 | py | Python | code/generateElevationFile.py | etcluvic/sme.altm | ffdb51d380a6b8cd8073d5ef3bd6fd15fa0779ea | [
"CC-BY-4.0"
] | null | null | null | code/generateElevationFile.py | etcluvic/sme.altm | ffdb51d380a6b8cd8073d5ef3bd6fd15fa0779ea | [
"CC-BY-4.0"
] | null | null | null | code/generateElevationFile.py | etcluvic/sme.altm | ffdb51d380a6b8cd8073d5ef3bd6fd15fa0779ea | [
"CC-BY-4.0"
] | null | null | null | from bs4 import BeautifulSoup
from datetime import datetime
from lxml import etree
import time
import codecs
import pickle
import os
if __name__ == '__main__':
doiPrefix = '10.7202' #erudit's doi prefix
myTime = datetime.now().strftime('%Y-%m-%d_%H-%M-%S-%f')
referencedDocs = '/mnt/smeCode/altm/code/out/' + '2017-10-13_22-44-03-672976' + '.xml'
pickleFile = '/mnt/smeCode/parseMe2/code/pickles/keywords.p'
outputPath = '/mnt/smeCode/altm/code/elevation.files/'
outputFile = 'test.xml'
printSeparator('*',50)
print('loading pickle...')
keywords = pickle.load( open( pickleFile, "rb" ) )
print('pickle loaded!')
printSeparator('*',50)
#elevation file
rootElement = etree.Element("elevate")
f = codecs.open(referencedDocs,'r','utf-8')
markup = f.read()
f.close()
soup = BeautifulSoup(markup, "lxml-xml")
documents = soup.find_all('doi')
for d in documents:
doi = d.get_text().split('/')[1]
print(doi)
#print(d.get_text())
if doi in keywords.keys():
print(keywords[doi])
queryElement = etree.SubElement(rootElement, "query")
queryElement.set("text", ' '. join(list(keywords[doi]['terms'])))
docElement = etree.SubElement(queryElement, "doc")
docElement.set("id", doi)
printSeparator('*',50)
printSeparator('*', 50)
print 'Elevation - Saving xml file...'
xmlString = etree.tostring(rootElement, pretty_print=True, encoding='UTF-8')
fh = codecs.open(os.path.join(outputPath, myTime + '.xml'),'w', encoding='utf-8' )
fh.write(xmlString.decode('utf-8'))
fh.close()
print 'done'
printSeparator('*', 50)
print(xmlString)
print('bye')
| 22.671053 | 89 | 0.663958 |
02c1ec959e5357766a542721519334ad6dee8666 | 49,071 | py | Python | MilightWifiBridge/MilightWifiBridge.py | K-Stefan/Milight-Wifi-Bridge-3.0-Python-Library | bcaf1e3a67ed56d9cedc3370d4b4d688f5d4b4fb | [
"MIT"
] | null | null | null | MilightWifiBridge/MilightWifiBridge.py | K-Stefan/Milight-Wifi-Bridge-3.0-Python-Library | bcaf1e3a67ed56d9cedc3370d4b4d688f5d4b4fb | [
"MIT"
] | null | null | null | MilightWifiBridge/MilightWifiBridge.py | K-Stefan/Milight-Wifi-Bridge-3.0-Python-Library | bcaf1e3a67ed56d9cedc3370d4b4d688f5d4b4fb | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Milight 3.0 (LimitlessLED Wifi Bridge v6.0) library: Control wireless lights (Milight 3.0) with Wifi
Note that this library was tested with Milight Wifi iBox v1 and RGBW lights. It should work with any other
lights and bridge using Milight 3.0 / LimitlessLED v6.0 protocol.
Non-exhaustive functionality using the python class or using this file from shell
(launch this python file with '-h' parameter to get more information):
- Initialize the Wifi bridge
- Link/Unlink lights
- Light on/off
- Wifi bridge lamp on/off
- Set night mode
- Set white mode
- Set color (using Milight format)
- Set saturation
- Set brightness
- Set disco mode (9 available)
- Increase/Decrease disco mode speed
- Get Milight wifi bridge MAC address
- ...
Used protocol: http://www.limitlessled.com/dev/ (LimitlessLED Wifi Bridge v6.0 section)
"""
__author__ = 'Quentin Comte-Gaz'
__email__ = "quentin@comte-gaz.com"
__license__ = "MIT License"
__copyright__ = "Copyright Quentin Comte-Gaz (2019)"
__python_version__ = "2.7+ and 3.+"
__version__ = "2.1 (2019/11/09)"
__status__ = "Usable for any project"
import socket
import collections
import sys, getopt
import logging
import binascii
################################### SETUP ####################################
def close(self):
"""Close connection with Milight wifi bridge"""
self.__initialized = False
self.__sequence_number = 0
try:
self.__sock.shutdown(socket.SHUT_RDWR)
self.__sock.close()
logging.debug("Socket closed")
# If close before initialization, better handle attribute error
except AttributeError:
pass
def setup(self, ip, port=5987, timeout_sec=5.0):
"""Initialize the class (can be launched multiple time if setup changed or module crashed)
Keyword arguments:
ip -- (string) IP to communication with the Milight wifi bridge
port -- (int, optional) UDP port to communication with the Milight wifi bridge
timeout_sec -- (int, optional) Timeout in sec for Milight wifi bridge to answer commands
return: (bool) Milight wifi bridge initialized
"""
# Close potential previous Milight wifi bridge session
self.close()
# Create new milight wifi bridge session
try:
self.__sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
self.__ip = ip
self.__port = port
self.__sock.connect((self.__ip, self.__port))
self.__sock.settimeout(timeout_sec)
self.__initialized = True
logging.debug("UDP connection initialized with ip {} and port {}".format(str(ip), str(port)))
except (socket.error, socket.herror, socket.gaierror, socket.timeout) as err:
logging.error("Impossible to initialize the UDP connection with ip {} and port {}: {}".format(str(ip), str(port), str(err)))
return self.__initialized
######################### INTERNAL UTILITY FUNCTIONS #########################
def __startSession(self):
"""Send start session request and return start session information
return: (MilightWifiBridge.__START_SESSION_RESPONSE) Start session information containing response received,
mac address and session IDs
"""
# Send start session request
data_to_send = MilightWifiBridge.__START_SESSION_MSG
logging.debug("Sending frame '{}' to {}:{}".format(str(binascii.hexlify(data_to_send)),
str(self.__ip), str(self.__port)))
self.__sock.send(data_to_send)
response = MilightWifiBridge.__START_SESSION_RESPONSE(responseReceived=False, mac="", sessionId1=-1, sessionId2=-1)
try:
# Receive start session response
data = self.__sock.recvfrom(1024)[0]
if len(data) == 22:
# Parse valid start session response
response = MilightWifiBridge.__START_SESSION_RESPONSE(responseReceived=True,
mac=str("{}:{}:{}:{}:{}:{}".format(format(MilightWifiBridge.__getStringFromUnicode(data[7]), 'x'),
format(MilightWifiBridge.__getStringFromUnicode(data[8]), 'x'),
format(MilightWifiBridge.__getStringFromUnicode(data[9]), 'x'),
format(MilightWifiBridge.__getStringFromUnicode(data[10]), 'x'),
format(MilightWifiBridge.__getStringFromUnicode(data[11]), 'x'),
format(MilightWifiBridge.__getStringFromUnicode(data[12]), 'x'))),
sessionId1=int(MilightWifiBridge.__getStringFromUnicode(data[19])),
sessionId2=int(MilightWifiBridge.__getStringFromUnicode(data[20])))
logging.debug("Start session (mac address: {}, session ID 1: {}, session ID 2: {})"
.format(str(response.mac), str(response.sessionId1), str(response.sessionId2)))
else:
logging.warning("Invalid start session response size")
except socket.timeout:
logging.warning("Timed out for start session response")
return response
def __sendRequest(self, command, zoneId):
"""Send command to a specific zone and get response (ACK from the wifi bridge)
Keyword arguments:
command -- (bytearray) Command
zoneId -- (int) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = False
# Send request only if valid parameters
if len(bytearray(command)) == 9:
if int(zoneId) >= 0 and int(zoneId) <= 4:
startSessionResponse = self.__startSession()
if startSessionResponse.responseReceived:
# For each request, increment the sequence number (even if the session ID is regenerated)
# Sequence number must be between 0x01 and 0xFF
self.__sequence_number = (self.__sequence_number + 1) & 0xFF
if self.__sequence_number == 0:
self.__sequence_number = 1
# Prepare request frame to send
bytesToSend = bytearray([0x80, 0x00, 0x00, 0x00, 0x11, startSessionResponse.sessionId1,
startSessionResponse.sessionId2, 0x00, int(self.__sequence_number), 0x00])
bytesToSend += bytearray(command)
bytesToSend += bytearray([int(zoneId), 0x00])
bytesToSend += bytearray([int(MilightWifiBridge.__calculateCheckSum(bytearray(command), int(zoneId)))])
# Send request frame
logging.debug("Sending request with command '{}' with session ID 1 '{}', session ID 2 '{}' and sequence number '{}'"
.format(str(binascii.hexlify(command)), str(startSessionResponse.sessionId1),
str(startSessionResponse.sessionId2), str(self.__sequence_number)))
self.__sock.send(bytesToSend)
try:
# Receive response frame
data = self.__sock.recvfrom(64)[0]
if len(data) == 8:
if int(MilightWifiBridge.__getStringFromUnicode(data[6])) == self.__sequence_number:
returnValue = True
logging.debug("Received valid response for previously sent request")
else:
logging.warning("Invalid sequence number ack {} instead of {}".format(str(data[6]),
self.__sequence_number))
else:
logging.warning("Invalid response size {} instead of 8".format(str(len(data))))
except socket.timeout:
logging.warning("Timed out for response")
else:
logging.warning("Start session failed")
else:
logging.error("Invalid zone {} (must be between 0 and 4)".format(str(zoneId)))
else:
logging.error("Invalid command size {} instead of 9".format(str(len(bytearray(command)))))
return returnValue
######################### PUBLIC FUNCTIONS #########################
def turnOn(self, zoneId):
"""Request 'Light on' to a zone
Keyword arguments:
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__ON_CMD, zoneId)
logging.debug("Turn on zone {}: {}".format(str(zoneId), str(returnValue)))
return returnValue
def turnOff(self, zoneId):
"""Request 'Light off' to a zone
Keyword arguments:
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__OFF_CMD, zoneId)
logging.debug("Turn off zone {}: {}".format(str(zoneId), str(returnValue)))
return returnValue
def turnOnWifiBridgeLamp(self):
"""Request 'Wifi bridge lamp on' to a zone
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__WIFI_BRIDGE_LAMP_ON_CMD, 0x01)
logging.debug("Turn on wifi bridge lamp: {}".format(str(returnValue)))
return returnValue
def turnOffWifiBridgeLamp(self):
"""Request 'Wifi bridge lamp off'
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__WIFI_BRIDGE_LAMP_OFF_CMD, 0x01)
logging.debug("Turn off wifi bridge lamp: {}".format(str(returnValue)))
return returnValue
def setNightMode(self, zoneId):
"""Request 'Night mode' to a zone
Keyword arguments:
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__NIGHT_MODE_CMD, zoneId)
logging.debug("Set night mode to zone {}: {}".format(str(zoneId), str(returnValue)))
return returnValue
def setWhiteMode(self, zoneId):
"""Request 'White mode' to a zone
Keyword arguments:
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__WHITE_MODE_CMD, zoneId)
logging.debug("Set white mode to zone {}: {}".format(str(zoneId), str(returnValue)))
return returnValue
def setWhiteModeBridgeLamp(self):
"""Request 'White mode' to the bridge lamp
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__WIFI_BRIDGE_LAMP_WHITE_MODE_CMD, 0x01)
logging.debug("Set white mode to wifi bridge: {}".format(str(returnValue)))
return returnValue
def setDiscoMode(self, discoMode, zoneId):
"""Request 'Set disco mode' to a zone
Keyword arguments:
discoMode -- (int or MilightWifiBridge.eDiscoMode) Disco mode (9 modes available)
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__getSetDiscoModeCmd(discoMode), zoneId)
logging.debug("Set disco mode {} to zone {}: {}".format(str(discoMode), str(zoneId), str(returnValue)))
return returnValue
def setDiscoModeBridgeLamp(self, discoMode):
"""Request 'Set disco mode' to the bridge lamp
Keyword arguments:
discoMode -- (int or MilightWifiBridge.eDiscoMode) Disco mode (9 modes available)
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__getSetDiscoModeForBridgeLampCmd(discoMode), 0x01)
logging.debug("Set disco mode {} to wifi bridge: {}".format(str(discoMode), str(returnValue)))
return returnValue
def speedUpDiscoMode(self, zoneId):
"""Request 'Disco mode speed up' to a zone
Keyword arguments:
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__DISCO_MODE_SPEED_UP_CMD, zoneId)
logging.debug("Speed up disco mode to zone {}: {}".format(str(zoneId), str(returnValue)))
return returnValue
def speedUpDiscoModeBridgeLamp(self):
"""Request 'Disco mode speed up' to the wifi bridge
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__WIFI_BRIDGE_LAMP_DISCO_MODE_SPEED_UP_CMD, 0x01)
logging.debug("Speed up disco mode to wifi bridge: {}".format(str(returnValue)))
return returnValue
def slowDownDiscoMode(self, zoneId):
"""Request 'Disco mode slow down' to a zone
Keyword arguments:
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__DISCO_MODE_SLOW_DOWN_CMD, zoneId)
logging.debug("Slow down disco mode to zone {}: {}".format(str(zoneId), str(returnValue)))
return returnValue
def slowDownDiscoModeBridgeLamp(self):
"""Request 'Disco mode slow down' to wifi bridge
Keyword arguments:
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__WIFI_BRIDGE_LAMP_DISCO_MODE_SLOW_DOWN_CMD, 0x01)
logging.debug("Slow down disco mode to wifi bridge: {}".format(str(returnValue)))
return returnValue
def link(self, zoneId):
"""Request 'Link' to a zone
Keyword arguments:
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__LINK_CMD, zoneId)
logging.debug("Link zone {}: {}".format(str(zoneId), str(returnValue)))
return returnValue
def unlink(self, zoneId):
"""Request 'Unlink' to a zone
Keyword arguments:
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__UNLINK_CMD, zoneId)
logging.debug("Unlink zone {}: {}".format(str(zoneId), str(returnValue)))
return returnValue
def setColor(self, color, zoneId):
"""Request 'Set color' to a zone
Keyword arguments:
color -- (int or eColor) Color (between 0x00 and 0xFF)
examples: 0xFF = Red, 0xD9 = Lavender, 0xBA = Blue, 0x85 = Aqua,
0x7A = Green, 0x54 = Lime, 0x3B = Yellow, 0x1E = Orange
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__getSetColorCmd(color), zoneId)
logging.debug("Set color {} to zone {}: {}".format(str(color), str(zoneId), str(returnValue)))
return returnValue
def setColorBridgeLamp(self, color):
"""Request 'Set color' to wifi bridge
Keyword arguments:
color -- (int or eColor) Color (between 0x00 and 0xFF)
examples: 0xFF = Red, 0xD9 = Lavender, 0xBA = Blue, 0x85 = Aqua,
0x7A = Green, 0x54 = Lime, 0x3B = Yellow, 0x1E = Orange
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__getSetBridgeLampColorCmd(color), 0x01)
logging.debug("Set color {} to wifi bridge: {}".format(str(color), str(returnValue)))
return returnValue
def setBrightness(self, brightness, zoneId):
"""Request 'Set brightness' to a zone
Keyword arguments:
brightness -- (int) Brightness in percentage (between 0 and 100)
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__getSetBrightnessCmd(brightness), zoneId)
logging.debug("Set brightness {}% to zone {}: {}".format(str(brightness), str(zoneId), str(returnValue)))
return returnValue
def setBrightnessBridgeLamp(self, brightness):
"""Request 'Set brightness' to the wifi bridge
Keyword arguments:
brightness -- (int) Brightness in percentage (between 0 and 100)
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__getSetBrightnessForBridgeLampCmd(brightness), 0x01)
logging.debug("Set brightness {}% to the wifi bridge: {}".format(str(brightness), str(returnValue)))
return returnValue
def setSaturation(self, saturation, zoneId):
"""Request 'Set saturation' to a zone
Keyword arguments:
brightness -- (int) Saturation in percentage (between 0 and 100)
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__getSetSaturationCmd(saturation), zoneId)
logging.debug("Set saturation {}% to zone {}: {}".format(str(saturation), str(zoneId), str(returnValue)))
return returnValue
def setTemperature(self, temperature, zoneId):
"""Request 'Set temperature' to a zone
Keyword arguments:
brightness -- (int or MilightWifiBridge.eTemperature) Temperature in percentage (between 0 and 100)
zoneId -- (int or MilightWifiBridge.eZone) Zone ID
return: (bool) Request received by the wifi bridge
"""
returnValue = self.__sendRequest(MilightWifiBridge.__getSetTemperatureCmd(temperature), zoneId)
logging.debug("Set temperature {}% ({} kelvin) to zone {}: {}"
.format(str(temperature), str(int(2700 + 38*temperature)), str(zoneId), str(returnValue)))
return returnValue
def getMacAddress(self):
"""Request the MAC address of the milight wifi bridge
return: (string) MAC address of the wifi bridge (empty if an error occured)
"""
returnValue = self.__startSession().mac
logging.debug("Get MAC address: {}".format(str(returnValue)))
return returnValue
################################# HELP FUNCTION ################################
def __help(func="", filename=__file__):
"""Show help on how to use command line milight wifi bridge functions
Keyword arguments:
func -- (string, optional) Command line function requiring help, none will show all function
filename -- (string, optional) File name of the python script implementing the commands
"""
func = func.lower()
# Help
if func in ("h", "help"):
print("Give information to use all or specific milight wifi bridge commands\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" -h [command (default: none)]\r\n"
+filename+" --help [command (default: none)]\r\n"
+"\r\n"
+"Example:\r\n"
+filename+" -h \r\n"
+filename+" -h turnOn \r\n"
+filename+" --help \r\n"
+filename+" --help link")
return
elif func == "":
print("HELP (-h, --help): Give information to use all or specific milight wifi bridge commands")
# Ip
if func in ("i", "ip"):
print("Specify milight wifi bridge IP (mandatory to use any command)\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" -i [ip]\r\n"
+filename+" --ip [ip]\r\n"
+"\r\n"
+"Example:\r\n"
+filename+" -i 192.168.1.23\r\n"
+filename+" --ip 192.168.1.23\r\n")
return
elif func == "":
print("IP (-i, --ip): Specify milight wifi bridge IP (mandatory to use any command)")
# Port
if func in ("p", "port"):
print("Specify milight wifi bridge port\r\n"
+"\r\n"
+"Default value (if not called): 5987\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" -port [port]\r\n"
+filename+" --port [port]\r\n"
+"\r\n"
+"Example:\r\n"
+filename+" -p 1234\r\n"
+filename+" --port 1234\r\n")
return
elif func == "":
print("PORT (-p, --port): Specify milight wifi bridge port (default value: 5987)")
# Timeout
if func in ("t", "timeout"):
print("Specify timeout for communication with the wifi bridge (in sec)\r\n"
+"\r\n"
+"Default value (if not called): 5.0\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" -t [timeout]\r\n"
+filename+" --timeout [timeout]\r\n"
+"\r\n"
+"Example:\r\n"
+filename+" -t 1\r\n"
+filename+" --timeout 1\r\n")
return
elif func == "":
print("TIMEOUT (-t, --timeout): Specify timeout for communication with the wifi bridge in sec (default value: 5.0sec)")
# Zone
if func in ("z", "zone"):
print("Specify milight light zone to control\r\n"
+"\r\n"
+"Default value (if not called): 0\r\n"
+"\r\n"
+"Possible values: 0 for all zone or zone 1 to 4\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" -z [zone]\r\n"
+filename+" --zone [zone]\r\n"
+"\r\n"
+"Example:\r\n"
+filename+" -z 1\r\n"
+filename+" --zone 1\r\n")
return
elif func == "":
print("ZONE (-z, --zone): Specify milight light zone to control (default value: All zone)")
# Get MAC address
if func in ("m", "getmacaddress"):
print("Get the milight wifi bridge mac address\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 -m\r\n"
+filename+" --ip 192.168.1.23 --getMacAddress\r\n")
return
elif func == "":
print("GET MAC ADDRESS (-m, --getMacAddress): Get the milight wifi bridge mac address")
# Link
if func in ("l", "link"):
print("Link lights to a specific zone\r\n"
+"\r\n"
+"Note: In order to make this work, the light must be switch on manually max 3sec before this command\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 --zone 1 -l\r\n"
+filename+" --ip 192.168.1.23 --zone 1 --link\r\n")
return
elif func == "":
print("LINK (-l, --link): Link lights to a specific zone")
# Unlink
if func in ("u", "unlink"):
print("Unlink lights\r\n"
+"\r\n"
+"Note: In order to make this work, the light must be switch on manually max 3sec before this command\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 -u\r\n"
+filename+" --ip 192.168.1.23 --unlink\r\n")
return
elif func == "":
print("UNLINK (-u, --unlink): Unlink lights")
# Turn lights ON
if func in ("o", "turnon"):
print("Turn lights on\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 --zone 1 -o\r\n"
+filename+" --ip 192.168.1.23 --zone 1 --turnOn\r\n")
return
elif func == "":
print("TURN ON (-o, --turnOn): Turn lights on")
# Turn lights OFF
if func in ("f", "turnoff"):
print("Turn lights off\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 --zone 1 -f\r\n"
+filename+" --ip 192.168.1.23 --zone 1 --turnOff\r\n")
return
elif func == "":
print("TURN OFF (-o, --turnOff): Turn lights off")
# Turn wifi bridge lamp ON
if func in ("x", "turnonwifibridgelamp"):
print("Turn wifi bridge lamp on\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 -x\r\n"
+filename+" --ip 192.168.1.23 --turnOnWifiBridgeLamp\r\n")
return
elif func == "":
print("TURN WIFI BRIDGE LAMP ON (-x, --turnOnWifiBridgeLamp): Turn wifi bridge lamp on")
# Turn wifi bridge lamp OFF
if func in ("y", "turnoffwifibridgelamp"):
print("Turn wifi bridge lamp off\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 -y\r\n"
+filename+" --ip 192.168.1.23 --turnOffWifiBridgeLamp\r\n")
return
elif func == "":
print("TURN WIFI BRIDGE LAMP OFF (-y, --turnOffWifiBridgeLamp): Turn wifi bridge lamp off")
# Set night mode
if func in ("n", "setnightmode"):
print("Set night mode\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 --zone 1 -n\r\n"
+filename+" --ip 192.168.1.23 --zone 1 --setNightMode\r\n")
return
elif func == "":
print("SET NIGHT MODE (-n, --setNightMode): Set night mode")
# Set white mode
if func in ("w", "setwhitemode"):
print("Set white mode\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 --zone 1 -w\r\n"
+filename+" --ip 192.168.1.23 --zone 1 --setWhiteMode\r\n")
return
elif func == "":
print("SET WHITE MODE (-w, --setWhiteMode): Set white mode")
# Set white mode for bridge lamp
if func in ("j", "setwhitemodebridgelamp"):
print("Set white mode\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 -j\r\n"
+filename+" --ip 192.168.1.23 --setWhiteModeBridgeLamp\r\n")
return
elif func == "":
print("SET WHITE MODE ON BRIDGE LAMP (-j, --setWhiteModeBridgeLamp): Set white mode on bridge lamp")
# Speed up disco mode for bridge lamp
if func in ("k", "speedupdiscomodebridgelamp"):
print("Speed up disco mode for bridge lamp\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 -k\r\n"
+filename+" --ip 192.168.1.23 --speedUpDiscoModeBridgeLamp\r\n")
return
elif func == "":
print("SPEED UP DISCO MODE FOR BRIDGE LAMP (-k, --speedUpDiscoModeBridgeLamp): Speed up disco mode for bridge lamp")
# Slow down disco mode for bridge lamp
if func in ("q", "slowdowndiscomodebridgelamp"):
print("Slow down disco mode for bridge lamp\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 -q\r\n"
+filename+" --ip 192.168.1.23 --slowDownDiscoModeBridgeLamp\r\n")
return
elif func == "":
print("SLOW DOWN DISCO MODE FOR BRIDGE LAMP (-q, --slowDownDiscoModeBridgeLamp): Slow down disco mode for bridge lamp")
# Speed up disco mode
if func in ("a", "speedupdiscomode"):
print("Speed up disco mode\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 --zone 1 -a\r\n"
+filename+" --ip 192.168.1.23 --zone 1 --speedUpDiscoMode\r\n")
return
elif func == "":
print("SPEED UP DISCO MODE (-a, --speedUpDiscoMode): Speed up disco mode")
# Slow down disco mode
if func in ("g", "slowdowndiscomode"):
print("Slow down disco mode\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 --zone 1 -g\r\n"
+filename+" --ip 192.168.1.23 --zone 1 --slowDownDiscoMode\r\n")
return
elif func == "":
print("SLOW DOWN DISCO MODE (-g, --slowDownDiscoMode): Slow down disco mode")
# Set specific color
if func in ("c", "setcolor"):
print("Set specific color (between 0 and 255)\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 --zone 1 -c 255\r\n"
+filename+" --ip 192.168.1.23 --zone 1 --setColor 255\r\n")
return
elif func == "":
print("SET COLOR (-c, --setColor): Set specific color (between 0 and 255)")
# Set brightness
if func in ("b", "setbrightness"):
print("Set brightness (in %)\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 --zone 1 -b 50\r\n"
+filename+" --ip 192.168.1.23 --zone 1 --setBrightness 50\r\n")
return
elif func == "":
print("SET BRIGHTNESS (-b, --setBrightness): Set brightness (in %)")
# Set specific color for bridge lamp
if func in ("r", "setcolorbridgelamp"):
print("Set specific color for the bridge lamp (between 0 and 255)\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 -r 255\r\n"
+filename+" --ip 192.168.1.23 --setColorBridgeLamp 255\r\n")
return
elif func == "":
print("SET COLOR FOR THE BRIDGE LAMP (-r, --setColorBridgeLamp): Set specific color for the bridge lamp (between 0 and 255)")
# Set brightness for bridge lamp
if func in ("v", "setbrightnessbridgelamp"):
print("Set brightness for the bridge lamp (in %)\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 -v 50\r\n"
+filename+" --ip 192.168.1.23 --setBrightnessBridgeLamp 50\r\n")
return
elif func == "":
print("SET BRIGHTNESS FOR THE BRIDGE LAMP (-v, --setBrightnessBridgeLamp): Set brightness for the bridge lamp(in %)")
# Set saturation
if func in ("s", "setsaturation"):
print("Set saturation (in %)\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 --zone 1 -s 50\r\n"
+filename+" --ip 192.168.1.23 --zone 1 --setSaturation 50\r\n")
return
elif func == "":
print("SET SATURATION (-s, --setSaturation): Set saturation (in %)")
# Set temperature
if func in ("s", "settemperature"):
print("Set temperature (in %)\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 --zone 1 -e 50\r\n"
+filename+" --ip 192.168.1.23 --zone 1 --setTemperature 50\r\n")
return
elif func == "":
print("SET TEMPERATURE (-e, --setTemperature): Set temperature (in %)")
# Set disco mode
if func in ("d", "setdiscomode"):
print("Set disco mode (between 1 and 9)\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 --zone 1 -d 5\r\n"
+filename+" --ip 192.168.1.23 --zone 1 --setDiscoMode 5\r\n")
return
elif func == "":
print("SET DISCO MODE (-d, --setDiscoMode): Set disco mode (between 1 and 9)")
# Set disco mode for bridge lamp
if func in ("d", "setdiscomodebridgelamp"):
print("Set disco mode for bridge lamp (between 1 and 9)\r\n"
+"\r\n"
+"Usage:\r\n"
+filename+" --ip 192.168.1.23 -1 5\r\n"
+filename+" --ip 192.168.1.23 --setDiscoModeBridgeLamp 5\r\n")
return
elif func == "":
print("SET DISCO MODE FOR BRIDGE LAMP (-1, --setDiscoModeBridgeLamp): Set disco mode for bridge lamp (between 1 and 9)")
# Add use case examples:
if func == "":
print("\r\n"
+"Some examples (if ip '192.168.1.23', port is 5987):\r\n"
+" - Get the mac address: "+filename+" --ip 192.168.1.23 --port 5987 --getMacAddress\r\n"
+" - Set disco mode 5 in light zone 1: "+filename+" --ip 192.168.1.23 --port 5987 --zone 1 --setDiscoMode 5\r\n"
+" - Light on zone 1: "+filename+" --ip 192.168.1.23 --port 5987 --zone 1 --lightOn\r\n"
+" - Light off zone 1: "+filename+" --ip 192.168.1.23 --port 5987 --zone 1 --lightOff\r\n"
+" - Light on and set with light in zone 1: "+filename+" --ip 192.168.1.23 --port 5987 --zone 1 --lightOn --setWhiteMode\r\n"
+" - Light on all zone: "+filename+" --ip 192.168.1.23 --port 5987 --zone 0 --lightOn\r\n"
+" - Light off all zone: "+filename+" --ip 192.168.1.23 --port 5987 --zone 0 --lightOff")
################################# MAIN FUNCTION ###############################
def main(parsed_args = sys.argv[1:]):
"""Shell Milight utility function"""
# Set the log level (no log will be shown if "logging.CRITICAL" is used)
logger = logging.getLogger()
logger.setLevel(logging.CRITICAL) #Other parameters: logging.DEBUG, logging.WARNING, logging.ERROR
ip = "" # No default IP, must be specified by the user
port = 5987 # Default milight 3.0 port
zone = 0 # By default, all zone are controlled
timeout = 5.0 # By default, Wait maximum 5sec
# Get options
try:
opts, args = getopt.getopt(parsed_args, "i:p:t:z:hmluofx23ynwagc:b:s:e:d:jkqr:v:1:",
["ip=", "port=", "timeout=", "zone=", "help", "debug", "nodebug",
"getMacAddress", "link", "unlink", "turnOn", "turnOff", "turnOnWifiBridgeLamp",
"turnOffWifiBridgeLamp", "setNightMode", "setWhiteMode", "speedUpDiscoMode", "slowDownDiscoMode",
"setColor=", "setBrightness=", "setSaturation=", "setTemperature=", "setDiscoMode=",
"setWhiteModeBridgeLamp", "speedUpDiscoModeBridgeLamp", "slowDownDiscoModeBridgeLamp",
"setColorBridgeLamp=", "setBrightnessBridgeLamp=", "setDiscoModeBridgeLamp="])
except getopt.GetoptError as err:
print("[ERROR] "+str(err))
__help()
sys.exit(1)
# Show help (if requested)
for o, a in opts:
if o in ("-h", "--help"):
if len(args) >= 1:
__help(args[0])
else:
__help()
sys.exit(0)
elif o in ("-l", "--debug"):
print("Debugging...")
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
elif o in ("-z", "--nodebug"):
logger = logging.getLogger()
logger.setLevel(logging.CRITICAL)
# Get base parameters
for o, a in opts:
if o in ("-i", "--ip"):
ip = str(a)
continue
if o in ("-p", "--port"):
port = int(a)
continue
if o in ("-t", "--timeout"):
timeout = int(a)
continue
if o in ("-z", "--zone"):
zone = int(a)
continue
# Check base parameters
if ip == "":
print("[ERROR] You need to specify the ip...\r\n")
__help("ip")
sys.exit(1)
if zone < 0 or zone > 4:
print("[ERROR] You need to specify a valid zone ID (between 0 and 4)\r\n")
__help("zone")
sys.exit(1)
if timeout <= 0:
print("[ERROR] You need to specify a valid timeout (more than 0sec)\r\n")
__help("timeout")
sys.exit(1)
if port <= 0:
print("[ERROR] You need to specify a valid port (more than 0)\r\n")
__help("port")
sys.exit(1)
# Show base parameters
print("Ip: "+str(ip))
print("Zone: "+str(zone))
print("Timeout: "+str(timeout))
print("Port: "+str(port))
# Initialize Milight bridge
milight = MilightWifiBridge()
milight.close()
is_init = milight.setup(ip, port, timeout)
logging.debug("Milight bridge connection initialized with ip {}:{} : {}".format(ip, port, is_init))
if (not is_init):
print("[ERROR] Initialization failed, re-check the ip (and the port), use '-h' to get more information.")
sys.exit(2)
# Execute requested commands in the requested order
returnValue = True
atLeastOneRequestDone = False
for o, a in opts:
if o in ("-m", "--getMacAddress"):
atLeastOneRequestDone = True
macAddress = milight.getMacAddress()
returnValue &= (macAddress != "")
if macAddress != "":
print("Mac address: "+str(macAddress))
else:
print("Failed to get mac address")
elif o in ("-l", "--link"):
atLeastOneRequestDone = True
res = milight.link(zoneId=zone)
returnValue &= res
print("Link zone "+str(zone)+": "+str(res))
elif o in ("-u", "--unlink"):
atLeastOneRequestDone = True
res = milight.unlink(zoneId=zone)
returnValue &= res
print("Unlink zone "+str(zone)+": "+str(res))
elif o in ("-o", "--turnOn"):
atLeastOneRequestDone = True
res = milight.turnOn(zoneId=zone)
returnValue &= res
print("Turn on zone "+str(zone)+": "+str(res))
elif o in ("-f", "--turnOff"):
atLeastOneRequestDone = True
res = milight.turnOff(zoneId=zone)
returnValue &= res
print("Turn off zone "+str(zone)+": "+str(res))
elif o in ("-x", "--turnOnWifiBridgeLamp"):
atLeastOneRequestDone = True
res = milight.turnOnWifiBridgeLamp()
returnValue &= res
print("Turn on wifi bridge lamp: "+str(res))
elif o in ("-y", "--turnOffWifiBridgeLamp"):
atLeastOneRequestDone = True
res = milight.turnOffWifiBridgeLamp()
returnValue &= res
print("Turn off wifi bridge lamp: "+str(res))
elif o in ("-j", "--setWhiteModeBridgeLamp"):
atLeastOneRequestDone = True
res = milight.setWhiteModeBridgeLamp()
returnValue &= res
print("Set white mode to wifi bridge: "+str(res))
elif o in ("-k", "--speedUpDiscoModeBridgeLamp"):
atLeastOneRequestDone = True
res = milight.speedUpDiscoModeBridgeLamp()
returnValue &= res
print("Speed up disco mode to wifi bridge: "+str(res))
elif o in ("-q", "--slowDownDiscoModeBridgeLamp"):
atLeastOneRequestDone = True
res = milight.slowDownDiscoModeBridgeLamp()
returnValue &= res
print("Slow down disco mode to wifi bridge: "+str(res))
elif o in ("-r", "--setColorBridgeLamp"):
userColor = int(a)
if userColor < 0 or userColor > 255:
print("[ERROR] Color must be between 0 and 255")
sys.exit(2)
atLeastOneRequestDone = True
res = milight.setColorBridgeLamp(color=userColor)
returnValue &= res
print("Set color "+str(userColor)+" to wifi bridge: "+str(res))
elif o in ("-v", "--setBrightnessBridgeLamp"):
userBrightness = int(a)
if userBrightness < 0 or userBrightness > 100:
print("[ERROR] Brightness must be between 0 and 100 (in %)")
sys.exit(2)
atLeastOneRequestDone = True
res = milight.setBrightnessBridgeLamp(brightness=userBrightness)
returnValue &= res
print("Set brightness "+str(userBrightness)+"% to the wifi bridge: "+str(res))
elif o in ("-1", "--setDiscoModeBridgeLamp"):
mode = int(a)
if mode < 1 or mode > 9:
print("[ERROR] Disco mode must be between 1 and 9")
sys.exit(2)
atLeastOneRequestDone = True
res = milight.setDiscoModeBridgeLamp(discoMode=mode)
returnValue &= res
print("Set disco mode "+str(mode)+" to wifi bridge: "+str(res))
elif o in ("-n", "--setNightMode"):
atLeastOneRequestDone = True
res = milight.setNightMode(zoneId=zone)
returnValue &= res
print("Set night mode to zone "+str(zone)+": "+str(res))
elif o in ("-w", "--setWhiteMode"):
atLeastOneRequestDone = True
res = milight.setWhiteMode(zoneId=zone)
returnValue &= res
print("Set white mode to zone "+str(zone)+": "+str(res))
elif o in ("-a", "--speedUpDiscoMode"):
atLeastOneRequestDone = True
res = milight.speedUpDiscoMode(zoneId=zone)
returnValue &= res
print("Speed up disco mode to zone "+str(zone)+": "+str(res))
elif o in ("-g", "--slowDownDiscoMode"):
atLeastOneRequestDone = True
res = milight.slowDownDiscoMode(zoneId=zone)
returnValue &= res
print("Slow down disco mode to zone "+str(zone)+": "+str(res))
elif o in ("-d", "--setDiscoMode"):
mode = int(a)
if mode < 1 or mode > 9:
print("[ERROR] Disco mode must be between 1 and 9")
sys.exit(2)
atLeastOneRequestDone = True
res = milight.setDiscoMode(discoMode=mode, zoneId=zone)
returnValue &= res
print("Set disco mode "+str(mode)+" to zone "+str(zone)+": "+str(res))
elif o in ("-c", "--setColor"):
userColor = int(a)
if userColor < 0 or userColor > 255:
print("[ERROR] Color must be between 0 and 255")
sys.exit(2)
atLeastOneRequestDone = True
res = milight.setColor(color=userColor, zoneId=zone)
returnValue &= res
print("Set color "+str(userColor)+" to zone "+str(zone)+": "+str(res))
elif o in ("-b", "--setBrightness"):
userBrightness = int(a)
if userBrightness < 0 or userBrightness > 100:
print("[ERROR] Brightness must be between 0 and 100 (in %)")
sys.exit(2)
atLeastOneRequestDone = True
res = milight.setBrightness(brightness=userBrightness, zoneId=zone)
returnValue &= res
print("Set brightness "+str(userBrightness)+"% to zone "+str(zone)+": "+str(res))
elif o in ("-s", "--setSaturation"):
userSaturation = int(a)
if userSaturation < 0 or userSaturation > 100:
print("[ERROR] Saturation must be between 0 and 100 (in %)")
sys.exit(2)
atLeastOneRequestDone = True
res = milight.setSaturation(saturation=userSaturation, zoneId=zone)
returnValue &= res
print("Set saturation "+str(userSaturation)+"% to zone "+str(zone)+": "+str(res))
elif o in ("-e", "--setTemperature"):
userTemperature = int(a)
if userTemperature < 0 or userTemperature > 100:
print("[ERROR] Temperature must be between 0 and 100 (in %)")
sys.exit(2)
atLeastOneRequestDone = True
res = milight.setTemperature(temperature=userTemperature, zoneId=zone)
returnValue &= res
print("Set temperature "+str(userTemperature)+"% to zone "+str(zone)+": "+str(res))
# In case an error occured in any of the request, stop the program
if not returnValue:
break
if not atLeastOneRequestDone:
print("[ERROR] You must call one action, use '-h' to get more information.")
sys.exit(1)
if not returnValue:
print("[ERROR] Request failed")
sys.exit(1)
if atLeastOneRequestDone and returnValue:
sys.exit(0)
if __name__ == '__main__':
main()
| 37.090703 | 163 | 0.610646 |
02c2fced0a87e7a137014b222d4278f9278017f9 | 2,602 | py | Python | Homework/HW5/Test.py | zhufyaxel/ML_SaltyFish | 84b839fa236c471e1fa8600093f0096ff79e4097 | [
"MIT"
] | null | null | null | Homework/HW5/Test.py | zhufyaxel/ML_SaltyFish | 84b839fa236c471e1fa8600093f0096ff79e4097 | [
"MIT"
] | null | null | null | Homework/HW5/Test.py | zhufyaxel/ML_SaltyFish | 84b839fa236c471e1fa8600093f0096ff79e4097 | [
"MIT"
] | null | null | null | import os
import numpy as np
import pandas as pd
from Base import Train, Predict
if __name__ == "__main__":
strProjectFolder = os.path.dirname(__file__)
getTest(boolNormalize=True, boolDeep=False, boolBias=True, strProjectFolder=strProjectFolder) | 35.643836 | 104 | 0.65834 |
02c3032f7fcf222c4dcbaeab9a4990232ced4e2b | 3,948 | py | Python | auxilium/__init__.py | sonntagsgesicht/auxilium | f4e34089d6230e4a2957cf8c2f462210c6c714f0 | [
"Apache-2.0"
] | null | null | null | auxilium/__init__.py | sonntagsgesicht/auxilium | f4e34089d6230e4a2957cf8c2f462210c6c714f0 | [
"Apache-2.0"
] | null | null | null | auxilium/__init__.py | sonntagsgesicht/auxilium | f4e34089d6230e4a2957cf8c2f462210c6c714f0 | [
"Apache-2.0"
] | 1 | 2020-03-12T22:51:27.000Z | 2020-03-12T22:51:27.000Z | # -*- coding: utf-8 -*-
# auxilium
# --------
# Python project for an automated test and deploy toolkit.
#
# Author: sonntagsgesicht
# Version: 0.2.8, copyright Friday, 14 January 2022
# Website: https://github.com/sonntagsgesicht/auxilium
# License: Apache License 2.0 (see LICENSE file)
from logging import log, basicConfig, getLogger, NullHandler
from os import getcwd, name as os_name
from os.path import basename, split, join
from pathlib import Path
from re import findall
from sys import exit, executable
from configparser import ConfigParser
from .add_arguments import add_parser
from .methods.root import do
from .tools.const import CONFIG_PATH, VERBOSITY_LEVELS, ICONS
getLogger(__name__).addHandler(NullHandler())
__doc__ = 'Python project for an automated test and deploy toolkit.'
__version__ = '0.2.8'
__dev_status__ = '4 - Beta'
__date__ = 'Saturday, 15 January 2022'
__author__ = 'sonntagsgesicht'
__email__ = __author__ + '@icloud.com'
__url__ = 'https://github.com/' + __author__ + '/' + __name__
__license__ = 'Apache License 2.0'
__dependencies__ = 'pip', 'dulwich', 'regtest', 'flake8', 'bandit', \
'coverage', 'twine', 'sphinx', 'sphinx-rtd-theme', \
'sphinx-math-dollar', 'karma-sphinx-theme', \
'sphinx-pytype-substitution'
__dependency_links__ = ()
__data__ = ('data/pkg.zip',)
__scripts__ = ('auxilium=auxilium:auxilium',)
__theme__ = 'karma-sphinx-theme'
''' todo
auxilium create --clone url
auxilium build --archive as zip -r Derivate.zip Derivate -x "*/.*"
'black' python code linter incl. correction
'isort . --profile black' sorts imports
'darglint' rst doc linter
'poetry' run safety check dependency management
'Cookiecutter' project templates
'pipenv'
'pyscaffold' project generator for bootstrapping high quality Python packages
'''
| 33.457627 | 79 | 0.636272 |
02c4f7ff3bc4a17aba2744ac9fa57ef882474bfa | 7,875 | py | Python | importtime_output_wrapper.py | Victor333Huesca/importtime-output-wrapper | 15941ffe30a93a2d5ec1832e16df160caa1d51e4 | [
"MIT"
] | 1 | 2021-02-10T13:15:47.000Z | 2021-02-10T13:15:47.000Z | importtime_output_wrapper.py | dominikwalk/importtime_output_wrapper | 67c94371cd92ea66f4dbdd8840cf6120db4160c0 | [
"MIT"
] | 1 | 2021-09-01T19:25:33.000Z | 2021-09-01T19:25:33.000Z | importtime_output_wrapper.py | dominikwalk/importtime_output_wrapper | 67c94371cd92ea66f4dbdd8840cf6120db4160c0 | [
"MIT"
] | null | null | null | import re
import subprocess
import shutil
import sys
import json
import argparse
from typing import List, NamedTuple, Optional, Sequence
PATTERN_IMPORT_TIME = re.compile(r"^import time:\s+(\d+) \|\s+(\d+) \|(\s+.*)")
def get_import_time(module: str) -> str:
"""
Call the importtime function as subprocess, pass all selected modules
and return the stderr output.
"""
try:
ret = subprocess.run(
(sys.executable, "-Ximporttime", "-c", f"import {module}"),
check=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE,
encoding="UTF-8",
)
except subprocess.CalledProcessError:
raise InvalidInput(f'Invalid input: Could not import module "{module}"')
return ret.stderr
def parse_import_time(s: str) -> List[Import]:
"""
Recursively parse the importtime strderr output into a uniform tree structure.
"""
root = Import("root", 0, 0, 0, [])
import_stack = [root]
for line in reversed(s.splitlines()):
m = PATTERN_IMPORT_TIME.match(line)
if m:
t_self = int(m[1])
t_cumu = int(m[2])
name = str(m[3])
depth = int((len(name) - len(name.lstrip()) - 1) / 2) + 1
new_imp = Import(
name=name.strip(), t_self=t_self, t_cumu=t_cumu, depth=depth, childs=[]
)
for _ in range(len(import_stack) - depth):
import_stack.pop()
import_stack[-1].nested_imports.insert(0, new_imp)
import_stack.append(new_imp)
if root.nested_imports == []:
raise InvalidInput("Invalid input: could not parse any imports")
return [root]
def prune_import_depth(
imports: List[Import], depth: Optional[int] = None
) -> List[Import]:
"""
Prune the unified tree structure to the desired depth level.
"""
if depth is not None:
prune_children(imports, depth + 1)
return imports
def sort_imports(imports: List[Import], sort_by="self") -> List[Import]:
"""
Sort the unified tree structure according to the desired time key.
"""
sort_children(imports)
return imports
def import_tree_to_json_str(imports=List[Import]) -> str:
"""
Print the imported modules tree in json format.
"""
exclude_root = imports[0]["nested_imports"]
return json.dumps(exclude_root, indent=2)
def import_tree_to_waterfall(imports=List[Import], time_key="self", width=79) -> str:
"""
Print the imported modules tree as a waterfall diagram.
"""
output_str = ""
waterfall_output = []
max_time = 0
max_name_len = 0
imp = NamedTuple("imp", [("name", str), ("space", int), ("time", int)])
create_name_str(imports[0]["nested_imports"])
header = "module name" + " " * ((max_name_len + 1) - len("module name")) + " "
header += " import time (us)" + "\n" + "-" * width + "\n"
output_str += header
for node in waterfall_output:
name = node.space * "." + str(node.name)
offset = ((max_name_len - len(name)) + 3) * " "
time_str = str(node.time)
water = "=" * int(
(node.time / max_time)
* (width - len(offset) - len(time_str) - len(name) - 2)
)
line_str = f"{name}{offset}{water}({time_str})\n"
output_str += line_str
min_width = round(1 / (node.time / max_time) + len(time_str) + len(name) + 2)
if width < min_width:
warning_msg = f"WARNING: The waterfall diagram may not be displayed correctly if the set width is too small!"
output_str += warning_msg
return output_str
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser(
description="""
This script calls the python3 -X importtime implementation with a given module
and parses the stderr output into a json format, which can then be used to
search or display the given information. It can also display the data as a
waterfall diagram in the terminal.
"""
)
parser.add_argument("module", help="the module to import")
parser.add_argument(
"--format",
nargs="?",
default="json",
choices=["json", "waterfall"],
help="output format",
)
parser.add_argument(
"--sort",
nargs="?",
choices=["self", "cumulative"],
help="sort imported modules by import-time",
)
parser.add_argument(
"--time",
nargs="?",
choices=["self", "cumulative"],
help="time to use in waterfall format (default self)",
)
parser.add_argument(
"--width",
nargs="?",
type=int,
help="width of entries in waterfall format (default to "
"environement variable COLUMNS or terminal's width)",
)
parser.add_argument(
"--depth",
nargs="?",
type=int,
help="limit depth of output format (default unlimited)",
)
args = parser.parse_args(argv)
if args.time and args.format != "waterfall":
parser.error(
"--time requires format to be set to waterfall (--format waterfall)"
)
if args.width and args.format != "waterfall":
parser.error(
"--length requires format to be set to waterfall (--format waterfall)"
)
raw_output = get_import_time(module=str(args.module))
all_imports = parse_import_time(raw_output)
pruned_imports = prune_import_depth(all_imports, args.depth)
if args.sort:
output_imports = sort_imports(imports=pruned_imports, sort_by=args.sort)
else:
output_imports = pruned_imports
if args.format == "json":
print(import_tree_to_json_str(output_imports))
elif args.format == "waterfall":
width = args.width or shutil.get_terminal_size().columns
time = args.time or "self"
print(import_tree_to_waterfall(output_imports, time_key=time, width=width))
return 0
if __name__ == "__main__":
exit(main())
| 30.405405 | 117 | 0.590349 |
02c5550343d841d31714a9ed5ade721bffe3bee2 | 6,631 | py | Python | test/unit/test_params.py | davvil/sockeye | 188db761d314a913b88a5ff44395abb77797e5b9 | [
"Apache-2.0"
] | 1,117 | 2017-06-12T15:11:12.000Z | 2022-03-23T00:53:51.000Z | test/unit/test_params.py | davvil/sockeye | 188db761d314a913b88a5ff44395abb77797e5b9 | [
"Apache-2.0"
] | 553 | 2017-06-14T09:24:10.000Z | 2022-03-31T20:17:23.000Z | test/unit/test_params.py | davvil/sockeye | 188db761d314a913b88a5ff44395abb77797e5b9 | [
"Apache-2.0"
] | 369 | 2017-06-12T15:22:34.000Z | 2022-03-30T19:32:27.000Z | # Copyright 2017--2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import itertools
import glob
import os.path
import tempfile
import mxnet as mx
import pytest
import sockeye.encoder
import sockeye.model
import sockeye.training
import sockeye.constants as C
| 47.028369 | 119 | 0.663248 |
02c5bed1bce16f210d130ab65cdc1e0b0a119b39 | 8,194 | py | Python | code/utils/data_preparation.py | cltl/positive-interpretations | 27640be255e8072b3333851f605c13c80e5d7ed3 | [
"Apache-2.0"
] | null | null | null | code/utils/data_preparation.py | cltl/positive-interpretations | 27640be255e8072b3333851f605c13c80e5d7ed3 | [
"Apache-2.0"
] | null | null | null | code/utils/data_preparation.py | cltl/positive-interpretations | 27640be255e8072b3333851f605c13c80e5d7ed3 | [
"Apache-2.0"
] | null | null | null | import csv
import collections
import pandas as pd
from random import shuffle
from tqdm import tqdm
def get_all_tokens_conll(conll_file):
"""
Reads a CoNLL-2011 file and returns all tokens with their annotations in a dataframe including the original
sentence identifiers from OntoNotes
"""
all_tokens = list()
most_semroles = 0
with open(conll_file, "r") as infile:
for line in infile:
# Get sentence identifiers: distinguish between sentence count per file and per file part
# (some files are divided into multiple parts numbered as 000, 001, 002, ... etc.)
if line.startswith("#begin document"):
sent_id_part = 0
part_id = line.split("; part ")[1].rstrip("\n")
if part_id == "000":
sent_id_file = 0
else:
sent_id_file += 1
elif line.startswith("#end document"):
sent_id_file -= 1 # prevent counting too much (empty line followed by end document)
elif line == "\n":
sent_id_part += 1
sent_id_file += 1
else:
columns = line.split()
dict_token = {"file_id": columns[0],
"part_id": int(columns[1]),
"sent_id_part": int(sent_id_part),
"sent_id_file": int(sent_id_file),
"token_id": columns[2],
"word_form": columns[3],
"POS": columns[4],
"parse": columns[5],
"pred_lemma": columns[6],
"pred_frameset": columns[7],
"word_sense": columns[8],
"speaker": columns[9],
"NE": columns[10],
"coref": columns[-1].rstrip("\n")
}
semroles = {f"APRED{i}": role for i, role in enumerate(columns[11:-1], 1)}
dict_token.update(semroles)
all_tokens.append(dict_token)
if len(semroles) > most_semroles:
most_semroles = len(semroles)
cols = list(dict_token.keys())
df_tokens = pd.DataFrame(all_tokens, columns=cols)
return df_tokens
def find_original_sent_ids(df_instances, df_conll):
"""
Takes the file_id, part_id and sent_id indicating a specific sentence in the CoNLL-2011 data (where file is split
into smaller parts and sent_id restarts for each part) and finds the corresponding 'original' sentence identifier
"""
print("Finding original sentence identifiers")
for index, row in tqdm(df_instances.iterrows(), total=len(df_instances)):
# For each instance in the set, find the corresponding sent_id_file in the annotations of CoNLL-2011
file_id = row["file_id"]
part_id = row["part_id"]
sent_id_part = row["sent_id_part"]
matching_rows = df_conll.loc[(df_conll["file_id"] == file_id) & (df_conll["part_id"] == part_id) &
(df_conll["sent_id_part"] == sent_id_part)]
sent_id_file = matching_rows.iloc[0]["sent_id_file"]
df_instances.set_value(index, "sent_id_file", sent_id_file)
return df_instances
def get_role_features_from_annotations(role_annotations):
"""Splits the verb and role information (in original annotations file) to separate values"""
head, role = role_annotations.split(")] ")
head_pos, head_wf = head.lstrip("[(").split()
span, tokens = role.split(maxsplit=1)
span, label = span.rstrip(":").split(":")
role_features = (head_wf, head_pos, span, label, tokens)
return role_features
def rewrite_verb_and_role_features(df):
"""Rewrites the verb and role information in the original annotations file to separate columns"""
instances = df.to_dict("records")
for index, inst in enumerate(instances):
# Get verb features
verb = inst["verb"]
verb_features = get_role_features_from_annotations(verb)
verb_wf, verb_pos, verb_span, verb_label, verb_tokens = verb_features
# Get role features
role = inst["role"]
role_features = get_role_features_from_annotations(role)
role_head_wf, role_head_pos, role_span, role_label, role_tokens = role_features
new_dict = {"verb_wf": verb_wf,
"verb_pos": verb_pos,
"verb_span": verb_span,
"verb_label": verb_label,
"verb_tokens": verb_tokens,
"role_head_wf": role_head_wf,
"role_head_pos": role_head_pos,
"role_span": role_span,
"role_label": role_label,
"role_tokens": role_tokens,
"role_tokens": role_tokens}
inst.update(new_dict)
del inst["verb"]
del inst["role"]
instances[index] = inst
columns = list(instances[0].keys())
df = pd.DataFrame(instances, columns=columns)
return df
def transform_labels_three(row):
"""Takes original score (label) and converts to tertiary classes"""
label = int(row['label'])
if label <= 1:
return 0
if 1 < label <= 3:
return 1
if label >= 4:
return 2
def transform_labels_two(row):
"""Takes original score (label) and converts to binary classes"""
label = int(row['label'])
if label <= 2:
return 0
else:
return 1
def categorize_scores(df):
"""Takes original score (label) and converts to tertiary/binary classes"""
df["class_tertiary"] = df.apply(lambda row: transform_labels_three(row),axis=1)
df["class_binary"] = df.apply(lambda row: transform_labels_two(row),axis=1)
return df
def split_train_test(df_instances, test_ratio=0.2, to_shuffle=True):
"""Splits the instances into train and test sets. Each negation is either assigned to the train or test set."""
instances = df_instances.to_dict("records")
neg_ids = list({(inst["file_id"], inst["sent_id_file"], inst["verb_span"]) for inst in instances})
if to_shuffle:
shuffle(neg_ids)
test_size = int(len(neg_ids) * test_ratio)
test_ids = neg_ids[0:test_size]
test_instances = [inst for inst in instances if (inst["file_id"],
inst["sent_id_file"],
inst["verb_span"]) in test_ids]
train_instances = [inst for inst in instances if (inst["file_id"],
inst["sent_id_file"],
inst["verb_span"]) not in test_ids]
columns = list(train_instances[0].keys())
df_train = pd.DataFrame(train_instances, columns=columns)
df_test = pd.DataFrame(test_instances, columns=columns)
return df_train, df_test
def k_fold(df_instances, k=10):
"""Divides all the samples in k groups of samples. Each negation is either assigned to the train or test set."""
instances = df_instances.T.to_dict().values()
neg_ids = list({(inst["file_id"], inst["sent_id_file"], inst["verb_span"]) for inst in instances})
kf = list()
test_size = int(len(neg_ids) / k)
start = 0
for n in range(0, k):
test_ids = neg_ids[start:start+test_size]
test_instances = [inst for inst in instances if (inst["file_id"],
inst["sent_id_file"],
inst["verb_span"]) in test_ids]
train_instances = [inst for inst in instances if (inst["file_id"],
inst["sent_id_file"],
inst["verb_span"]) not in test_ids]
train_test = (pd.DataFrame(train_instances), pd.DataFrame(test_instances))
kf.append(train_test)
start += test_size
return kf
| 45.522222 | 117 | 0.569929 |
02c6de12abd9a20df4664b77466fae5d81958b59 | 800 | py | Python | {{ cookiecutter.project_name }}/setup.py | wlongxiang/cookiecutter-data-science | bbae41e22ac7db74430f3c6b457c2ff7f52537e1 | [
"MIT"
] | null | null | null | {{ cookiecutter.project_name }}/setup.py | wlongxiang/cookiecutter-data-science | bbae41e22ac7db74430f3c6b457c2ff7f52537e1 | [
"MIT"
] | null | null | null | {{ cookiecutter.project_name }}/setup.py | wlongxiang/cookiecutter-data-science | bbae41e22ac7db74430f3c6b457c2ff7f52537e1 | [
"MIT"
] | null | null | null | from setuptools import find_packages, setup
import re
VERSIONFILE = "{{ cookiecutter.project_name }}/__init__.py"
with open(VERSIONFILE, "rt") as versionfle:
verstrline = versionfle.read()
version_re = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(version_re, verstrline, re.M)
if mo:
ver_str = mo.group(1)
else:
raise ValueError("Unable to find version string in %s." % (VERSIONFILE,))
# add prod requires to setup so that pip can install dependencies for you
with open("requirements_prod.txt") as f:
required_pkgs = f.read().splitlines()
setup(
name='{{ cookiecutter.project_name }}',
packages=find_packages(),
version=ver_str,
description='{{ cookiecutter.description }}',
author='{{ cookiecutter.author_name }}',
install_requires=required_pkgs
)
| 30.769231 | 77 | 0.6975 |
02c7a6f45d41d451a3a0c3c43389880ddc7a1852 | 8,759 | py | Python | square/api/cards_api.py | codertjay/square-python-sdk | 1f5f34bc792e31991db0fb2756d92c717f2dcfa4 | [
"Apache-2.0"
] | 1 | 2022-02-28T13:18:30.000Z | 2022-02-28T13:18:30.000Z | square/api/cards_api.py | codertjay/square-python-sdk | 1f5f34bc792e31991db0fb2756d92c717f2dcfa4 | [
"Apache-2.0"
] | null | null | null | square/api/cards_api.py | codertjay/square-python-sdk | 1f5f34bc792e31991db0fb2756d92c717f2dcfa4 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from square.api_helper import APIHelper
from square.http.api_response import ApiResponse
from square.api.base_api import BaseApi
| 36.194215 | 120 | 0.612513 |
02c8b5302247d3f0de4a0fcfd8043adc64146600 | 1,564 | py | Python | setup.py | nilp0inter/threadedprocess | 0120d6e795782c9f527397490846cd214d9196e1 | [
"PSF-2.0"
] | 9 | 2018-03-21T22:19:10.000Z | 2021-06-08T12:10:15.000Z | setup.py | nilp0inter/threadedprocess | 0120d6e795782c9f527397490846cd214d9196e1 | [
"PSF-2.0"
] | 3 | 2019-09-18T19:57:28.000Z | 2020-07-17T08:06:54.000Z | setup.py | nilp0inter/threadedprocess | 0120d6e795782c9f527397490846cd214d9196e1 | [
"PSF-2.0"
] | 4 | 2018-03-24T23:10:38.000Z | 2020-06-18T02:26:24.000Z | import os
from setuptools import setup
try:
import concurrent.futures
except ImportError:
CONCURRENT_FUTURES_PRESENT = False
else:
CONCURRENT_FUTURES_PRESENT = True
setup(
name="threadedprocess",
version="0.0.5",
author="Roberto Abdelkader Martinez Perez",
author_email="robertomartinezp@gmail.com",
description=(
"A `ThreadedProcessPoolExecutor` is formed by a modified "
"`ProcessPoolExecutor` that generates processes that use a "
"`ThreadPoolExecutor` instance to run the given tasks."),
license="BSD",
keywords="concurrent futures executor process thread",
url="https://github.com/nilp0inter/threadedprocess",
py_modules=['threadedprocess'],
long_description=read('README.rst'),
install_requires=[] if CONCURRENT_FUTURES_PRESENT else ["futures"],
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: BSD License",
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
)
| 34 | 71 | 0.658568 |
02c90b77315d19cadcdffd4cbada1b9dd920626e | 2,592 | py | Python | coremltools/converters/mil/mil/passes/const_elimination.py | VadimLevin/coremltools | 66c17b0fa040a0d8088d33590ab5c355478a9e5c | [
"BSD-3-Clause"
] | 3 | 2018-10-02T17:23:01.000Z | 2020-08-15T04:47:07.000Z | coremltools/converters/mil/mil/passes/const_elimination.py | holzschu/coremltools | 5ece9069a1487d5083f00f56afe07832d88e3dfa | [
"BSD-3-Clause"
] | null | null | null | coremltools/converters/mil/mil/passes/const_elimination.py | holzschu/coremltools | 5ece9069a1487d5083f00f56afe07832d88e3dfa | [
"BSD-3-Clause"
] | 1 | 2021-05-07T15:38:20.000Z | 2021-05-07T15:38:20.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import numpy as np
from coremltools.converters.mil.mil import Builder as mb
from coremltools.converters.mil.mil.passes.pass_registry import register_pass
| 32 | 83 | 0.58179 |
02c92d6241ebe854f6a64f06a949f8d5440cd141 | 13,749 | py | Python | matching/retrieval.py | Macielyoung/sentence_representation_matching | aa33147eb870a805f69dbc54c2177b11a94cf814 | [
"Apache-2.0"
] | 22 | 2022-01-24T10:08:39.000Z | 2022-03-31T10:47:05.000Z | matching/retrieval.py | Macielyoung/sentence_representation_matching | aa33147eb870a805f69dbc54c2177b11a94cf814 | [
"Apache-2.0"
] | 3 | 2022-03-06T11:52:25.000Z | 2022-03-15T06:32:17.000Z | matching/retrieval.py | Macielyoung/sentence_representation_matching | aa33147eb870a805f69dbc54c2177b11a94cf814 | [
"Apache-2.0"
] | 5 | 2022-02-28T09:13:04.000Z | 2022-03-22T12:50:09.000Z | from simcse import SimCSE
from esimcse import ESimCSE
from promptbert import PromptBERT
from sbert import SBERT
from cosent import CoSent
from config import Params
from log import logger
import torch
from transformers import AutoTokenizer
simcse_retrieval = SimCSERetrieval(Params.pretrained_model, Params.simcse_model, Params.pool_type, Params.simcse_dropout)
logger.info("start simcse model succussfully!")
esimcse_repeat_retrieval = ESimCSERetrieval(Params.pretrained_model, Params.esimcse_repeat_model, Params.esimcse_repeat_dropout)
logger.info("start esimcse repeat model succussfully!")
esimcse_same_retrieval = ESimCSERetrieval(Params.pretrained_model, Params.esimcse_same_model, Params.esimcse_same_dropout)
logger.info("start esimcse same model succussfully!")
esimcse_multi_retrieval = ESimCSERetrieval(Params.pretrained_model, Params.esimcse_multi_model, Params.esimcse_multi_dropout)
logger.info("start esimcse multi model succussfully!")
promptbert_retrieval = PromptBertRetrieval(Params.pretrained_model, Params.promptbert_model, Params.promptbert_dropout)
logger.info("start promptbert model succussfully!")
sbert_retrieval = SBERTRetrieval(Params.pretrained_model, Params.sbert_model, Params.sbert_pool_type, Params.sbert_dropout)
logger.info("start sbert model succussfully!")
cosent_retrieval = CoSentRetrieval(Params.pretrained_model, Params.cosent_model)
logger.info("start cosent model succussfully!")
if __name__ == "__main__":
# model_path = "models/esimcse_0.32_0.15_160.pth"
# model_path = "models/esimcse_multi_0.15_64.pth"
# model_path = "models/esimcse_0.15_64.pth"
# simcse_retrieval = SimCSERetrieval(Params.pretrained_model, Params.simcse_model, Params.pool_type, Params.simcse_dropout)
# model_info = simcse_retrieval.print_checkpoint_info()
# print(model_info)
model_info = sbert_retrieval.print_checkpoint_info()
print(model_info)
while True:
print("input your sentence1:")
sentence1 = input()
print("input your sentence2:")
sentence2 = input()
sbert_sentence_similarity = sbert_retrieval.calculate_sentence_similarity(sentence1, sentence2)
# promptbert_sentence_similarity = prom.calculate_sentence_similarity(sentence1, sentence2)
# print("simcse sim: {}, promptbert sim: {}".format(simcse_sentence_similarity, promptbert_sentence_similarity))
print("sbert similarity: {}".format(sbert_sentence_similarity)) | 48.928826 | 128 | 0.630664 |
02c969f151e36baef658a4ae669ed82de7db3bc7 | 15,885 | py | Python | Backend/src/awattprice/notifications.py | a8/AWattPrice | 008df74b66f4790276f847eecb4e05536d66b518 | [
"BSD-3-Clause"
] | null | null | null | Backend/src/awattprice/notifications.py | a8/AWattPrice | 008df74b66f4790276f847eecb4e05536d66b518 | [
"BSD-3-Clause"
] | 1 | 2021-05-31T06:07:21.000Z | 2021-05-31T06:07:21.000Z | Backend/src/awattprice/notifications.py | a8/AWattPrice | 008df74b66f4790276f847eecb4e05536d66b518 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Check which users apply to receive certain notifications.
Send notifications via APNs to those users.
"""
import asyncio
import json
from datetime import datetime
from math import floor
from pathlib import Path
from typing import List, Optional, Tuple
import arrow # type: ignore
import httpx
import jwt
from box import Box # type: ignore
from configupdater import ConfigUpdater # type: ignore
from dateutil.tz import tzstr
from fastapi import status
from loguru import logger as log
from tenacity import retry, stop_after_attempt, stop_after_delay, wait_exponential # type: ignore
from awattprice import poll
from awattprice.defaults import CURRENT_VAT, Region
from awattprice.token_manager import APNsTokenManager
from awattprice.types import APNSToken
from awattprice.utils import before_log
| 41.25974 | 174 | 0.629902 |
02ca7b014cd9960cd4ff5fbac17c8225edc804e1 | 821 | py | Python | examples/charts/horizon.py | timelyportfolio/bokeh | a976a85535cf137c6238ce9e90b41ab14ae8ce22 | [
"BSD-3-Clause"
] | 1 | 2015-07-17T13:57:01.000Z | 2015-07-17T13:57:01.000Z | examples/charts/horizon.py | timelyportfolio/bokeh | a976a85535cf137c6238ce9e90b41ab14ae8ce22 | [
"BSD-3-Clause"
] | null | null | null | examples/charts/horizon.py | timelyportfolio/bokeh | a976a85535cf137c6238ce9e90b41ab14ae8ce22 | [
"BSD-3-Clause"
] | 1 | 2021-08-01T08:38:53.000Z | 2021-08-01T08:38:53.000Z | from collections import OrderedDict
import pandas as pd
from bokeh.charts import Horizon, output_file, show
# read in some stock data from the Yahoo Finance API
AAPL = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=AAPL&a=0&b=1&c=2000&d=0&e=1&f=2010",
parse_dates=['Date'])
MSFT = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=MSFT&a=0&b=1&c=2000&d=0&e=1&f=2010",
parse_dates=['Date'])
IBM = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=IBM&a=0&b=1&c=2000&d=0&e=1&f=2010",
parse_dates=['Date'])
xyvalues = OrderedDict(
AAPL=AAPL['Adj Close'],
Date=AAPL['Date'],
MSFT=MSFT['Adj Close'],
IBM=IBM['Adj Close'],
)
output_file("horizon.html")
hp = Horizon(
xyvalues, index='Date',
title="horizon plot using stock inputs",
width=800, height=300
)
show(hp)
| 24.147059 | 77 | 0.662607 |
02cb62a1399f0fec841542f9ed5b1a63b1a1c4d2 | 4,023 | py | Python | backend.py | CameronStollery/road-trip-planner | 440207ccb9273123695c04ec3027b7822413bf2c | [
"MIT"
] | 1 | 2020-08-18T13:21:00.000Z | 2020-08-18T13:21:00.000Z | backend.py | CameronStollery/road-trip-planner | 440207ccb9273123695c04ec3027b7822413bf2c | [
"MIT"
] | 1 | 2020-08-11T10:24:49.000Z | 2020-08-11T14:49:22.000Z | backend.py | CameronStollery/road-trip-planner | 440207ccb9273123695c04ec3027b7822413bf2c | [
"MIT"
] | null | null | null | # from __future__ import print_function
import pymzn
import time
from pprint import pprint
from collections import OrderedDict
import openrouteservice
from openrouteservice.geocode import pelias_search
from openrouteservice.distance_matrix import distance_matrix
client = openrouteservice.Client(key='')
# routes = client.directions(coords)
# print(routes)
# TODO add error classes for distance matrix errors etc
if __name__ == '__main__':
"""
This just contains testing code. Delete before deploying to production environment. Code in this file shoudl only
be accessed through the compute_results function.
"""
loc_details = geocode('5 Bolinda Pl')
print(loc_details['features'][0]['geometry']['coordinates'])
print(loc_details['features'][0]['properties']['label'])
# compute_results(test_input)
# pprint(test_matrix())
# people = []
# # Prompt user to enter all names and addresses
# personId = 1
# name = ""
# while name != "DONE":
# name = input("Enter the name of person " + str(personId) + " or type \"DONE\" when you have entered everyone.")
# if name != "DONE":
# address = input("Enter their address: ")
# loc = geocode(address)
# # pprint(loc)
# people.append({'id': personId, 'address': address, 'coords': loc['features'][0]['geometry']['coordinates']})
# personId += 1
# if people == []:
# print("You haven't entered any addresses.")
# else:
# coordinates = []
# for person in people:
# coordinates.append(person['coords'])
# # print(coordinates)
# distances = matrix(coordinates)
# # distances = testMatrix()
# pprint(distances) | 34.384615 | 123 | 0.637584 |
02cbfb02bf42141ca374969c292f1c21a9ad8577 | 597 | py | Python | examples/python/django/load-generator.py | ScriptBox99/pyroscope | fbf5bd297caf6a987f9fb6ffd0240ed804eaf9b4 | [
"Apache-2.0"
] | 5,751 | 2021-01-01T18:58:15.000Z | 2022-03-31T19:19:39.000Z | examples/python/django/load-generator.py | ScriptBox99/pyroscope | fbf5bd297caf6a987f9fb6ffd0240ed804eaf9b4 | [
"Apache-2.0"
] | 913 | 2021-01-05T07:46:12.000Z | 2022-03-31T20:04:39.000Z | examples/python/django/load-generator.py | admariner/pyroscope | e13afb40348914ae29b813881bfad0ca3b89f250 | [
"Apache-2.0"
] | 329 | 2021-01-11T06:25:55.000Z | 2022-03-29T08:19:33.000Z | import random
import requests
import time
HOSTS = [
'us-east-1',
'us-west-1',
'eu-west-1',
]
VEHICLES = [
'bike',
'scooter',
'car',
]
if __name__ == "__main__":
print(f"starting load generator")
time.sleep(15)
print('done sleeping')
while True:
host = HOSTS[random.randint(0, len(HOSTS) - 1)]
vehicle = VEHICLES[random.randint(0, len(VEHICLES) - 1)]
print(f"requesting {vehicle} from {host}")
resp = requests.get(f'http://web:8000/{vehicle}')
print(f"received {resp}")
time.sleep(random.uniform(0.2, 0.4))
| 21.321429 | 64 | 0.582915 |
02cc889fddc76c8e78693be834e7b5343b8c87f5 | 3,148 | py | Python | python/Multi-Service/content_moderator_cs.py | kyichii/cognitive-services-quickstart-code | a48549dd6b1fbb795fbe3cc5286c888306b6eb79 | [
"MIT"
] | 2 | 2020-12-06T18:05:30.000Z | 2020-12-09T17:01:21.000Z | python/Multi-Service/content_moderator_cs.py | diberry/cognitive-services-quickstart-code | 53972838ff64937e099c6886ff4a3c019b2ef346 | [
"MIT"
] | null | null | null | python/Multi-Service/content_moderator_cs.py | diberry/cognitive-services-quickstart-code | 53972838ff64937e099c6886ff4a3c019b2ef346 | [
"MIT"
] | null | null | null | import os
from pprint import pprint
from msrest.authentication import CognitiveServicesCredentials
from azure.cognitiveservices.vision.contentmoderator import ContentModeratorClient
from azure.cognitiveservices.vision.contentmoderator.models import ( Evaluate, OCR, FoundFaces )
'''
This quickstart uses Content Moderator to moderate a list of images.
Uses the general Cognitive Services key/endpoint. It's used when you want to
combine many Cognitive Services with just one authentication key/endpoint.
Services are not combined here, but could be potentially.
Install the Content Moderator SDK from a command prompt or IDE terminal:
pip install --upgrade azure-cognitiveservices-vision-contentmoderator
The Content Moderator SDK:
https://docs.microsoft.com/en-us/python/api/azure-cognitiveservices-vision-contentmoderator/?view=azure-python
'''
# Add your Cognitive Services subscription key and endpoint to your environment variables.
subscription_key = os.environ['COGNITIVE_SERVICES_SUBSCRIPTION_KEY']
endpoint = os.environ['COGNITIVE_SERVICES_ENDPOINT']
# List of URL images used to moderate.
IMAGE_LIST = [
"https://moderatorsampleimages.blob.core.windows.net/samples/sample2.jpg",
"https://moderatorsampleimages.blob.core.windows.net/samples/sample5.png"
]
'''
AUTHENTICATE
Create a Content Moderator client.
'''
client = ContentModeratorClient(
endpoint=endpoint,
credentials=CognitiveServicesCredentials(subscription_key)
)
'''
CONTENT MODERATOR
This quickstart moderates an image, then text and faces within the image.
'''
print('IMAGE MODERATION')
print()
# Image moderation, using image at [0]
print("Evaluate the image '{}' for adult and racy content:".format(os.path.basename(IMAGE_LIST[0])))
mod_image = client.image_moderation.evaluate_url_input(content_type="application/json", cache_image=True,
data_representation="URL", value=IMAGE_LIST[0])
assert isinstance(mod_image, Evaluate)
# Format for printing
mod_results = list(mod_image.as_dict().items())
for result in mod_results:
print(result)
# Moderating text in an image, using image at [0]
print("\nDetect, extract, and moderate text for image {}:".format(
os.path.basename(IMAGE_LIST[0])))
mod_image = client.image_moderation.ocr_url_input(language="eng", content_type="application/json",
data_representation="URL", value=IMAGE_LIST[0], cache_image=True)
assert isinstance(mod_image, OCR)
# Format for printing
mod_results = list(mod_image.as_dict().items())
for result in mod_results:
print(result)
# Moderating faces in an image, using image at [1]
print("\nDetect faces and moderate for image {}:".format(
os.path.basename(IMAGE_LIST[1])))
mod_image = client.image_moderation.find_faces_url_input(content_type="application/json", cache_image=True,
data_representation="URL", value=IMAGE_LIST[1])
assert isinstance(mod_image, FoundFaces)
# Format for printing
mod_results = list(mod_image.as_dict().items())
for result in mod_results:
print(result)
print()
| 39.35 | 115 | 0.750318 |
02cee01f37525203bf54b532eb16ee060a8b571e | 1,498 | py | Python | usda_nutrition/admin.py | danielnaab/django-usda-nutrition | ba05bf741844a1858ad4bc2474e0640cba42994c | [
"BSD-3-Clause"
] | 11 | 2017-01-18T19:57:11.000Z | 2021-06-19T08:03:21.000Z | usda_nutrition/admin.py | danielnaab/django-usda-nutrition | ba05bf741844a1858ad4bc2474e0640cba42994c | [
"BSD-3-Clause"
] | 3 | 2017-09-24T01:09:42.000Z | 2021-08-11T02:44:55.000Z | usda_nutrition/admin.py | danielnaab/django-usda-nutrition | ba05bf741844a1858ad4bc2474e0640cba42994c | [
"BSD-3-Clause"
] | 8 | 2016-09-20T17:46:39.000Z | 2020-04-24T16:20:44.000Z | from django.contrib import admin
from . import models
admin.site.register(models.DerivationCode, DerivationCodeAdmin)
admin.site.register(models.FoodDescription, FoodDescriptionAdmin)
admin.site.register(models.FoodGroup, FoodGroupAdmin)
admin.site.register(models.Footnote, FootnoteAdmin)
admin.site.register(models.NutrientDefinition, NutrientDefinitionAdmin)
admin.site.register(models.SourceCode, SourceCodeAdmin)
admin.site.register(models.Weight, WeightAdmin)
| 28.264151 | 77 | 0.763017 |
02cf4f4a30c1e8ca50d5538321deac3fbf7ca247 | 1,779 | py | Python | FisherExactTest/FisherExactTest.py | Ae-Mc/Fisher | 166e3ac68e304ed7418393d6a7717dd6f7032c15 | [
"MIT"
] | null | null | null | FisherExactTest/FisherExactTest.py | Ae-Mc/Fisher | 166e3ac68e304ed7418393d6a7717dd6f7032c15 | [
"MIT"
] | null | null | null | FisherExactTest/FisherExactTest.py | Ae-Mc/Fisher | 166e3ac68e304ed7418393d6a7717dd6f7032c15 | [
"MIT"
] | null | null | null | from decimal import Decimal
def FisherExact(a: int, b: int,
c: int, d: int) -> Decimal:
"""Calculate two-tailed Fisher's exact test for 2x2 continguency table
Args:
a: column 1 row 1
b: column 2 row 1
c: column 1 row 2
c: column 2 row 2
Returns:
Result of two-tailed Fisher's exact test stored in Decimal class
"""
if a == b == c == d:
return Decimal(1)
p = t = pvalue(a, b, c, d)
leftTail = Decimal(FisherLeftSide(a, b, c, d, t))
p += leftTail
rightTail = Decimal(FisherRightSide(a, b, c, d, t))
p += rightTail
return p
| 21.962963 | 74 | 0.441821 |
02cfb28047d612da27deb76d6967a4e3c09ca214 | 1,528 | py | Python | Signature Detection and Analysis/signprocessing.py | andrevks/Document-Forgery-Detection | 77dcde3867732a55cd0f4604627d7bf67a5e79a5 | [
"MIT"
] | null | null | null | Signature Detection and Analysis/signprocessing.py | andrevks/Document-Forgery-Detection | 77dcde3867732a55cd0f4604627d7bf67a5e79a5 | [
"MIT"
] | null | null | null | Signature Detection and Analysis/signprocessing.py | andrevks/Document-Forgery-Detection | 77dcde3867732a55cd0f4604627d7bf67a5e79a5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 31 04:01:49 2018
@author: abhilasha
"""
#Team: GuardiansOfGalaxy
from PIL import Image, ImageEnhance
if __name__ == "__main__":
filename = 'data\\Signature.jpg'
img = Image.open(filename)
enhance_signature(img)
get_boxed_signature()
| 25.466667 | 86 | 0.623037 |
02d171270f0a756f0c3935d386245a162e597707 | 23,802 | py | Python | crowdsource/views.py | Code-and-Response/ISAC-SIMO-Repo-2 | 5b0cb0099e128dbacfdc53bf686ef2b069a51bc6 | [
"PostgreSQL",
"Apache-2.0"
] | 5 | 2021-08-16T16:32:41.000Z | 2022-02-22T03:47:49.000Z | crowdsource/views.py | Code-and-Response/ISAC-SIMO-Repo-2 | 5b0cb0099e128dbacfdc53bf686ef2b069a51bc6 | [
"PostgreSQL",
"Apache-2.0"
] | 7 | 2021-04-12T14:48:48.000Z | 2022-02-14T08:30:57.000Z | crowdsource/views.py | Code-and-Response/ISAC-SIMO-Repo-2 | 5b0cb0099e128dbacfdc53bf686ef2b069a51bc6 | [
"PostgreSQL",
"Apache-2.0"
] | 1 | 2021-04-28T14:28:34.000Z | 2021-04-28T14:28:34.000Z | from main.customdecorators import check_honeypot_conditional
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.http.response import HttpResponseRedirect, JsonResponse
from rest_framework.decorators import action
from api.models import ObjectType
from django.core.cache import cache
from crowdsource.forms import CrowdsourceForm, ImageShareForm
from crowdsource.helpers import delete_object, get_object, get_object_list, move_object, upload_object
from crowdsource.models import Crowdsource, ImageShare
from django.shortcuts import get_object_or_404, redirect, render
from main.authorization import login_url, is_admin_or_project_admin, is_admin
from django.contrib.auth.decorators import login_required, user_passes_test
from django.core import serializers
from django.conf import settings
from django.http import HttpResponse
from django.contrib import messages
from django.core.paginator import Paginator
from django.db.models import Q
from rest_framework import generics, mixins, viewsets
from rest_framework.permissions import AllowAny, IsAdminUser, IsAuthenticated
from .serializers import CrowdsourceSerializer, ImageShareSerializer
from rest_framework.response import Response
import uuid
import json
from django.utils import timezone
from datetime import timedelta
# View All Crowdsource Images + Update/Create
#######
# API #
#######
class ResponseInfo(object):
# Crowdsource Image API
# PRUNE old ImageShare requests (check and remove old > 60 days requests)
def prune_old_image_share():
if not cache.get('prune_image_share'):
ImageShare.objects.filter(created_at__lte=timezone.now()-timedelta(days=60)).delete()
cache.set("prune_image_share", True, 86400) # Prune every 24 hours
# Image Share Views
# Image Share API | 48.279919 | 150 | 0.56991 |
02d235dc4031cc79fd9ab325030c238874738554 | 2,232 | py | Python | epochCiCdApi/ita/viewsOperations.py | matsumoto-epoch/epoch | c4b1982e68aa8cb108e6ae9b1c0de489d40d4db5 | [
"Apache-2.0"
] | null | null | null | epochCiCdApi/ita/viewsOperations.py | matsumoto-epoch/epoch | c4b1982e68aa8cb108e6ae9b1c0de489d40d4db5 | [
"Apache-2.0"
] | null | null | null | epochCiCdApi/ita/viewsOperations.py | matsumoto-epoch/epoch | c4b1982e68aa8cb108e6ae9b1c0de489d40d4db5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cgi # CGI
import cgitb
import sys
import requests
import json
import subprocess
import traceback
import os
import base64
import io
import logging
from django.shortcuts import render
from django.http import HttpResponse
from django.http.response import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_http_methods
ita_host = os.environ['EPOCH_ITA_HOST']
ita_port = os.environ['EPOCH_ITA_PORT']
ita_user = os.environ['EPOCH_ITA_USER']
ita_pass = os.environ['EPOCH_ITA_PASSWORD']
# ID
ite_menu_operation = '2100000304'
ita_restapi_endpoint='http://' + ita_host + ':' + ita_port + '/default/menu/07_rest_api_ver1.php'
logger = logging.getLogger('apilog')
| 28.987013 | 108 | 0.72043 |
02d4e6b3a3eee626ac3250b843b87270720d699e | 56 | py | Python | tests/test_init.py | keisuke-umezawa/chutil | df60440983c38a6dbbe4710019bcec5e83331904 | [
"MIT"
] | 1 | 2019-02-16T06:20:50.000Z | 2019-02-16T06:20:50.000Z | tests/test_init.py | keisuke-umezawa/chutil | df60440983c38a6dbbe4710019bcec5e83331904 | [
"MIT"
] | null | null | null | tests/test_init.py | keisuke-umezawa/chutil | df60440983c38a6dbbe4710019bcec5e83331904 | [
"MIT"
] | null | null | null | import chutil as module
| 9.333333 | 23 | 0.714286 |
02d56efb28c0baac4d608dce2e0ed1e45b667e10 | 932 | py | Python | src/service/uri_generator.py | HalbardHobby/git-LFS-for-Lambda | d19ba6fc4605d5dc2dba52acb4236c68787f8bde | [
"MIT"
] | null | null | null | src/service/uri_generator.py | HalbardHobby/git-LFS-for-Lambda | d19ba6fc4605d5dc2dba52acb4236c68787f8bde | [
"MIT"
] | null | null | null | src/service/uri_generator.py | HalbardHobby/git-LFS-for-Lambda | d19ba6fc4605d5dc2dba52acb4236c68787f8bde | [
"MIT"
] | null | null | null | """Generates pre-signed uri's for blob handling."""
from boto3 import client
import os
s3_client = client('s3')
def create_uri(repo_name, resource_oid, upload=False, expires_in=300):
"""Create a download uri for the given oid and repo."""
action = 'get_object'
if upload:
action = 'put_object'
params = {'Bucket': os.environ['LFS_S3_BUCKET_NAME'],
'Key': repo_name + '/' + resource_oid}
return s3_client.generate_presigned_url(action, Params=params,
ExpiresIn=expires_in)
def file_exists(repo_name, resource_oid):
"""Check if the file exists within the bucket."""
key = repo_name + '/' + resource_oid
response = s3_client.list_objects_v2(
Bucket=os.environ['LFS_S3_BUCKET_NAME'], Prefix=key)
for obj in response.get('Contents', []):
if obj['Key'] == key:
return True
return False
| 31.066667 | 71 | 0.626609 |
02d7c80b9c168487db13fab6edd36bd30ed15c3d | 4,919 | py | Python | rnn/chatbot/chatbot.py | llichengtong/yx4 | 17de7a6257a9f0c38e12089b2d1947927ec54c90 | [
"Apache-2.0"
] | 128 | 2017-03-04T08:53:44.000Z | 2020-06-05T11:19:16.000Z | rnn/chatbot/chatbot.py | github-jinwei/TensorFlowBook | 17de7a6257a9f0c38e12089b2d1947927ec54c90 | [
"Apache-2.0"
] | null | null | null | rnn/chatbot/chatbot.py | github-jinwei/TensorFlowBook | 17de7a6257a9f0c38e12089b2d1947927ec54c90 | [
"Apache-2.0"
] | 120 | 2017-02-07T09:41:25.000Z | 2022-03-17T00:57:59.000Z | # coding=utf8
import logging
import os
import random
import re
import numpy as np
import tensorflow as tf
from seq2seq_conversation_model import seq2seq_model
from seq2seq_conversation_model import data_utils
from seq2seq_conversation_model import tokenizer
from seq2seq_conversation_model.seq2seq_conversation_model import FLAGS, _buckets
from settings import SEQ2SEQ_MODEL_DIR
_LOGGER = logging.getLogger('track')
UNK_TOKEN_REPLACEMENT = [
'',
'',
'',
'',
]
ENGLISHWORD_PATTERN = re.compile(r'[a-zA-Z0-9]')
def trim(s):
"""
1. delete every space between chinese words
2. suppress extra spaces
:param s: some python string
:return: the trimmed string
"""
if not (isinstance(s, unicode) or isinstance(s, str)):
return s
unistr = s.decode('utf8') if type(s) != unicode else s
unistr = unistr.strip()
if not unistr:
return ''
trimmed_str = []
if unistr[0] != ' ':
trimmed_str.append(unistr[0])
for ind in xrange(1, len(unistr) - 1):
prev_char = unistr[ind - 1] if len(trimmed_str) == 0 else trimmed_str[-1]
cur_char = unistr[ind]
maybe_trim = cur_char == ' '
next_char = unistr[ind + 1]
if not maybe_trim:
trimmed_str.append(cur_char)
else:
if is_unichar_englishnum(prev_char) and is_unichar_englishnum(next_char):
trimmed_str.append(cur_char)
else:
continue
if unistr[-1] != ' ':
trimmed_str.append(unistr[-1])
return ''.join(trimmed_str)
if __name__ == "__main__":
m = Chatbot(SEQ2SEQ_MODEL_DIR + '/train/')
response = m.generate_answer(u'')
print response
| 36.708955 | 98 | 0.645456 |
02d7c976dba252653f990cef7776c119996e55c4 | 5,986 | py | Python | chip8_pygame_integration/config_test.py | Artoooooor/chip8 | d5132348f3081aeb9af19814d8251084ae723379 | [
"MIT"
] | null | null | null | chip8_pygame_integration/config_test.py | Artoooooor/chip8 | d5132348f3081aeb9af19814d8251084ae723379 | [
"MIT"
] | null | null | null | chip8_pygame_integration/config_test.py | Artoooooor/chip8 | d5132348f3081aeb9af19814d8251084ae723379 | [
"MIT"
] | null | null | null | import unittest
import pygame
from chip8_pygame_integration.config import get_config, KeyBind, to_text
DEFAULT = [KeyBind(pygame.K_o, pygame.KMOD_CTRL, 'some_command')]
if __name__ == '__main__':
unittest.main()
| 36.278788 | 77 | 0.664885 |
02d8c4f8a25b42b9035c973df73101d47ff6f388 | 1,934 | py | Python | Trees/Binary Trees/Preorder_Binary_Tree.py | jarvis-1805/DSAwithPYTHON | 872073d1b8d0001ea8b1a54b5e327dd0c1c406f2 | [
"Apache-2.0"
] | 1 | 2021-03-21T18:54:34.000Z | 2021-03-21T18:54:34.000Z | Trees/Binary Trees/Preorder_Binary_Tree.py | jarvis-1805/DSAwithPYTHON | 872073d1b8d0001ea8b1a54b5e327dd0c1c406f2 | [
"Apache-2.0"
] | null | null | null | Trees/Binary Trees/Preorder_Binary_Tree.py | jarvis-1805/DSAwithPYTHON | 872073d1b8d0001ea8b1a54b5e327dd0c1c406f2 | [
"Apache-2.0"
] | null | null | null | '''
Preorder Binary Tree
For a given Binary Tree of integers, print the pre-order traversal.
Input Format:
The first and the only line of input will contain the nodes data, all separated by a single space. Since -1 is used as an indication whether the left or right node data exist for root, it will not be a part of the node data.
Output Format:
The only line of output prints the pre-order traversal of the given binary tree.
Constraints:
1 <= N <= 10^6
Where N is the total number of nodes in the binary tree.
Time Limit: 1 sec
Sample Input 1:
5 6 10 2 3 -1 -1 -1 -1 -1 9 -1 -1
Sample Ouptut 1:
5 6 2 3 9 10
Sample Input 2:
1 2 3 4 5 6 7 -1 -1 -1 -1 -1 -1 -1 -1
Sample Ouptut 2:
1 2 4 5 3 6 7
'''
from sys import stdin, setrecursionlimit
import queue
setrecursionlimit(10 ** 6)
#Following the structure used for Binary Tree
#Taking level-order input using fast I/O method
# Main
root = takeInput()
preOrder(root) | 21.977273 | 224 | 0.643226 |
02da14e11f8f22cf912a874caefb9a62ca916f39 | 1,693 | py | Python | test/pages/base_pages.py | gordonnguyen/10fastfingers-auto-type | 624a6667b67743904791929a36d12b0f12f50e05 | [
"MIT"
] | null | null | null | test/pages/base_pages.py | gordonnguyen/10fastfingers-auto-type | 624a6667b67743904791929a36d12b0f12f50e05 | [
"MIT"
] | 1 | 2021-03-16T13:31:33.000Z | 2021-03-16T13:31:33.000Z | test/pages/base_pages.py | gordonnguyen/10fastfingers-auto-type | 624a6667b67743904791929a36d12b0f12f50e05 | [
"MIT"
] | null | null | null | '''
Super classes for Page object
Serve as a base template for page
automating functions with selenium
'''
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from utils.best_buy.locators import Locators
from utils.best_buy import urls | 34.55102 | 106 | 0.705848 |
02db29d58f9fcbf982055980d5e6b51e86d8c020 | 2,419 | py | Python | Form-Filler.py | Zaidtech/AUTOMATION-SCRIPTS | 88c83e1edca02b0b86f3de4981a5f27f398b4441 | [
"MIT"
] | 4 | 2020-11-04T13:25:48.000Z | 2022-03-29T01:21:49.000Z | Form-Filler.py | Zaidtech/AUTOMATION-SCRIPTS | 88c83e1edca02b0b86f3de4981a5f27f398b4441 | [
"MIT"
] | null | null | null | Form-Filler.py | Zaidtech/AUTOMATION-SCRIPTS | 88c83e1edca02b0b86f3de4981a5f27f398b4441 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
This script has been tested on various custom google forms and other various forms with
few alteratios ..
Google forms which does include the input type "token" attribute are found
to be safer than those who don't.
Any form contains various fields.
1. input text fields
2. radio
3. checkboxes
4. textareas
5. Uploads --- important . still working.
"""
import re
import requests
from urllib.request import urlopen
from bs4 import BeautifulSoup
params = {}
url = input("Enter the website url")
page = urlopen(url)
bs_obj = BeautifulSoup(page, 'html.parser')
# bs_obj.prettify() --> it's effects on the tags buried deep in the divs
requests.session()
input_tags = bs_obj.find_all('input')
# print(input_tags)
form_action = bs_obj.find('form') # some pages have multiple form tags ...
text_tags = bs_obj.find_all('textarea')
for text in text_tags:
try:
print(text['name'])
text['name'] = "Running around and fill this form"
except:
print('Key Error')
# if form_action.attrs['action'] == "" or None:
# print("Form action not specifies")
# else:
# print(form_action)
url = form_action.attrs['action']
print(f"Post request is send in here: {url}")
# there might be some custom fields which are to be looked and inspected manually as they skip the scrapper
# like params['entry.377191685'] = 'Faculty'
# params['tos'] = 'true'
# vary accordingly as at least an attck is just not that easy. ;-)
for tag in input_tags:
try:
print(tag.attrs['aria-label'])
except:
pass
try:
if tag.attrs['value'] == "" or None:
tag.attrs['value'] = input(f"Enter the value of {tag.attrs['name']}")
params[tag.attrs['name']] = tag.attrs['value']
# except:
# value= input(f"Enter the value of {tag.attrs['name']}")
# params[tag.attrs['name']] = value
else:
params[tag.attrs['name']] = tag.attrs['value'].strip('\n')
except:
pass
print(params)
# getting the dicts as printed here... which is to be submitted
while True:
requests.session()
r = requests.post(url, data=params)
print(r.status_code)
# 200 OK ---> submitted
# 400 BAD REQUEST ERROR --> input data corrupt or server incompatible
# 401 UNAOUTHORIZED ACCESS --> validation failed (need to deal with tokens and the cookies)
| 27.804598 | 107 | 0.653162 |
02dbc26cd1fbc18374360e0a5ad4732c9bed896a | 1,306 | py | Python | lektor_root_relative_path.py | a2csuga/lektor-root-relative-path | 5c200bdae50a78a2a295a3c0bb5440004b7fa72a | [
"MIT"
] | 2 | 2018-10-20T10:45:25.000Z | 2019-08-12T08:53:11.000Z | lektor_root_relative_path.py | a2csuga/lektor-root-relative-path | 5c200bdae50a78a2a295a3c0bb5440004b7fa72a | [
"MIT"
] | 2 | 2018-09-21T14:35:33.000Z | 2018-10-15T21:43:08.000Z | lektor_root_relative_path.py | a2csuga/lektor-root-relative-path | 5c200bdae50a78a2a295a3c0bb5440004b7fa72a | [
"MIT"
] | 1 | 2017-11-30T12:58:08.000Z | 2017-11-30T12:58:08.000Z | # -*- coding: utf-8 -*-
try:
# py3
from urllib.parse import urljoin, quote
except ImportError:
# py2
from urlparse import urljoin
from urllib import quote
from lektor.pluginsystem import Plugin
from furl import furl
| 31.853659 | 87 | 0.598009 |
02dbf3b5b09c9427c60b05103927121e020bab72 | 1,375 | py | Python | controllers/main.py | dduarte-odoogap/odoo_jenkins | 69bfcf088f75426c0e4b961a60b5c15a65b37979 | [
"BSD-2-Clause"
] | 5 | 2018-10-26T19:52:45.000Z | 2021-11-04T03:59:22.000Z | controllers/main.py | dduarte-odoogap/odoo_jenkins | 69bfcf088f75426c0e4b961a60b5c15a65b37979 | [
"BSD-2-Clause"
] | null | null | null | controllers/main.py | dduarte-odoogap/odoo_jenkins | 69bfcf088f75426c0e4b961a60b5c15a65b37979 | [
"BSD-2-Clause"
] | 6 | 2017-11-10T07:15:40.000Z | 2021-02-24T10:55:15.000Z | # -*- coding: utf-8 -*-
from odoo import http
from odoo.http import request
import jenkins
| 33.536585 | 95 | 0.611636 |
02dc9cb7558321b5cc7729d952a58e8fc90917a1 | 996 | py | Python | attendance.py | mykbgwl/Students-Attendance | bd9aef8cd12edff7fc47326fdeca6131eef575a6 | [
"Apache-2.0"
] | null | null | null | attendance.py | mykbgwl/Students-Attendance | bd9aef8cd12edff7fc47326fdeca6131eef575a6 | [
"Apache-2.0"
] | 1 | 2021-05-11T08:23:13.000Z | 2021-05-11T08:23:13.000Z | attendance.py | mykbgwl/Students-Attendance | bd9aef8cd12edff7fc47326fdeca6131eef575a6 | [
"Apache-2.0"
] | null | null | null | import cv2
import numpy as np
import pyzbar.pyzbar as pyzbar
import sys
import time
import pybase64
# Starting the webcam
capt = cv2.VideoCapture(0)
names = []
# Creating Attendees file
fob = open('attendees.txt', 'a+')
print('Reading code...')
# Function of Data present or not
while True:
_, frame = capt.read()
decodedObject = pyzbar.decode(frame)
for obj in decodedObject:
checkData(obj.data)
time.sleep(1)
cv2.imshow('Frame', frame)
if cv2.waitKey(1) & 0xFF == ord('s'):
cv2.destroyAllWindows()
break
fob.close()
| 17.473684 | 63 | 0.567269 |
02dcb415d51e0799783c6d81ac253913ae5ccdc0 | 19,562 | py | Python | net.py | rishabnayak/SegAN | 6f9415a079d8417ecebec3279338423286decf1c | [
"MIT"
] | null | null | null | net.py | rishabnayak/SegAN | 6f9415a079d8417ecebec3279338423286decf1c | [
"MIT"
] | null | null | null | net.py | rishabnayak/SegAN | 6f9415a079d8417ecebec3279338423286decf1c | [
"MIT"
] | null | null | null | import torch.nn as nn
import torch.nn.functional as F
import torch
from numpy.random import normal
from math import sqrt
import argparse
channel_dim = 3
ndf = 64
| 41.183158 | 86 | 0.517227 |
02dd42303eb7feb71bc2c94bd3f296e7b8ac9419 | 2,155 | py | Python | karmagrambot/__init__.py | caiopo/karmagrambot | 00935d0de228e516047bc3848344290be2cfcc0f | [
"MIT"
] | null | null | null | karmagrambot/__init__.py | caiopo/karmagrambot | 00935d0de228e516047bc3848344290be2cfcc0f | [
"MIT"
] | null | null | null | karmagrambot/__init__.py | caiopo/karmagrambot | 00935d0de228e516047bc3848344290be2cfcc0f | [
"MIT"
] | null | null | null | import dataset
import logging
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
from .config import TOKEN, DB_URI
from .commands import HANDLERS
logging.basicConfig()
| 21.767677 | 73 | 0.620418 |
02dff69165c131d9f3101aa1c12186dc1957dfcb | 647 | py | Python | app/services/articles.py | StanislavRud/api-realword-app-test | 9a49f299b02cec26d237f3bc4b363c8b93520b7b | [
"MIT"
] | 1,875 | 2019-03-27T14:26:20.000Z | 2022-03-31T14:52:50.000Z | app/services/articles.py | StanislavRud/api-realword-app-test | 9a49f299b02cec26d237f3bc4b363c8b93520b7b | [
"MIT"
] | 232 | 2019-04-11T11:05:48.000Z | 2022-03-05T10:23:50.000Z | app/services/articles.py | StanislavRud/api-realword-app-test | 9a49f299b02cec26d237f3bc4b363c8b93520b7b | [
"MIT"
] | 433 | 2019-04-11T01:48:59.000Z | 2022-03-31T10:33:42.000Z | from slugify import slugify
from app.db.errors import EntityDoesNotExist
from app.db.repositories.articles import ArticlesRepository
from app.models.domain.articles import Article
from app.models.domain.users import User
| 26.958333 | 85 | 0.775889 |
02e1c3bcb817bb01646a68ea5b10f1ece433f8ce | 1,240 | py | Python | tests/test_decoder.py | carlosmouracorreia/python_mp3_decoder | 79ea61f3ceedc07a173a216538f4acbdf1c4c6c3 | [
"MIT"
] | 23 | 2016-06-22T14:18:28.000Z | 2020-11-23T12:39:01.000Z | tests/test_decoder.py | carlosmouracorreia/python_mp3_decoder | 79ea61f3ceedc07a173a216538f4acbdf1c4c6c3 | [
"MIT"
] | 3 | 2019-12-10T01:07:41.000Z | 2021-03-29T14:40:29.000Z | tests/test_decoder.py | carlosmouracorreia/python_mp3_decoder | 79ea61f3ceedc07a173a216538f4acbdf1c4c6c3 | [
"MIT"
] | 1 | 2019-10-29T11:55:08.000Z | 2019-10-29T11:55:08.000Z | from pymp3decoder import Decoder
import contextlib
import os
import math
import pyaudio
CHUNK_SIZE = 4096
| 24.313725 | 91 | 0.575806 |