hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a9b559c6d7c0db14da8219dc9c4e053b7a27ff8 | 442 | bzl | Python | tools/mirrors.bzl | kkiningh/slime | 85853115e284bda35b3da10957823d23428b65d3 | [
"Apache-2.0"
] | null | null | null | tools/mirrors.bzl | kkiningh/slime | 85853115e284bda35b3da10957823d23428b65d3 | [
"Apache-2.0"
] | null | null | null | tools/mirrors.bzl | kkiningh/slime | 85853115e284bda35b3da10957823d23428b65d3 | [
"Apache-2.0"
] | null | null | null | DEFAULT_MIRRORS = {
"bitbucket": [
"https://bitbucket.org/{repository}/get/{commit}.tar.gz",
],
"buildifier": [
"https://github.com/bazelbuild/buildtools/releases/download/{version}/{filename}",
],
"github": [
"https://github.com/{repository}/archive/{commit}.tar.gz",
],
"pypi": [
"https://files.pythonhosted.org/packages/source/{p}/{package}/{package}-{version}.tar.gz",
],
}
| 29.466667 | 98 | 0.58371 |
4a9cb65121c1db2693bb70ca50a62adb977ff292 | 2,059 | py | Python | 201805_ChIP_ATAC/codes_old/read_txt.py | ScrippsPipkinLab/GenomeTracks | 89824daceba82f7a52cf8a31149845548fe1aa76 | [
"CC0-1.0"
] | null | null | null | 201805_ChIP_ATAC/codes_old/read_txt.py | ScrippsPipkinLab/GenomeTracks | 89824daceba82f7a52cf8a31149845548fe1aa76 | [
"CC0-1.0"
] | 2 | 2020-12-09T02:41:54.000Z | 2020-12-09T02:45:43.000Z | 201805_ChIP_ATAC/codes_old/read_txt.py | ScrippsPipkinLab/GenomeTracks | 89824daceba82f7a52cf8a31149845548fe1aa76 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 6 21:15:23 2017
@author: yolandatiao
"""
import csv
import glob
import os
from astropy.io import ascii # For using ascii table to open csv
from astropy.table import Table, Column # For using astropy table functions
os.chdir("/Volumes/Huitian/GSE88987/codes")
import fc_basic_astropy_subprocess as fc
os.chdir("/Volumes/Huitian/Genombrower/codes/txt")
flist=[]
for fname in glob.glob("*.txt"):
flist.append(fname)
nlist=[]
fnflist=[]
print len(flist)
for i in flist:
fnflist.append(i[:-4])
with open(i, "r") as fin:
rfin=csv.reader(fin, delimiter=",")
nlist.append(int(next(rfin)[0]))
#print nlist
outab=Table()
outab["filename_nf"]=fnflist
outab["bdgaccu"]=nlist
ascii.write(outab, "meta.csv", format="csv", overwrite=True)
metab=ascii.read("meta_write_bash.csv")
metab=fc.setcolnames(metab)
with open("bdgnorm.sh","r") as fin:
rfin=csv.reader(fin, delimiter=",")
inrow=next(rfin)[0]
print inrow
for x in xrange(0, len(metab)):
xshname="%s.sh"%x
with open(xshname, "w") as fout:
wfout=csv.writer(fout, delimiter="\t")
wfout.writerow(["cd /gpfs/home/hdiao/Geombrowser"])
outrow=inrow
osfactor=str(metab["1000000000_scalingfactor"][x])
ofname=str(metab["filename_nf"][x])
outrow=outrow.replace("sfactor", osfactor)
outrow=outrow.replace("inputfile", ofname)
fout.writelines(outrow)
with open("qsub.sh", "w") as fout:
for x in xrange(0, 66):
fout.writelines("qsub %s.sh"%x)
fout.writelines("\n")
os.chdir("/Volumes/Huitian/Genombrower/codes/rename")
meta=ascii.read("rename_meta.csv")
with open("rename.sh", "w") as fout:
for x in xrange(0, len(meta)):
fout.writelines("mv ")
fout.writelines(meta["oldname"][x])
fout.writelines(" ")
fout.writelines(meta["newnamenf"][x])
fout.writelines(".bdg")
fout.writelines("\n")
| 21.226804 | 78 | 0.629432 |
4a9cba0b5388d429f06edbee8329e6af7d50f140 | 674 | py | Python | tests/test_vendcrawler.py | josetaas/vendcrawler | 5cb497d0741f6dbd29a6e41fa9f1cb3374e8f062 | [
"MIT"
] | null | null | null | tests/test_vendcrawler.py | josetaas/vendcrawler | 5cb497d0741f6dbd29a6e41fa9f1cb3374e8f062 | [
"MIT"
] | null | null | null | tests/test_vendcrawler.py | josetaas/vendcrawler | 5cb497d0741f6dbd29a6e41fa9f1cb3374e8f062 | [
"MIT"
] | null | null | null | import unittest
from vendcrawler.scripts.vendcrawler import VendCrawler
if __name__ == '__main__':
unittest.main()
| 32.095238 | 73 | 0.615727 |
4aa2559e81941797f8eb297eceb0ea501eab99d6 | 7,104 | py | Python | services/spotify-service.py | thk4711/mediamanager | 8f6d21c220767aa9ee5d65635d2993dba07eceed | [
"MIT"
] | null | null | null | services/spotify-service.py | thk4711/mediamanager | 8f6d21c220767aa9ee5d65635d2993dba07eceed | [
"MIT"
] | null | null | null | services/spotify-service.py | thk4711/mediamanager | 8f6d21c220767aa9ee5d65635d2993dba07eceed | [
"MIT"
] | 1 | 2022-02-07T08:09:15.000Z | 2022-02-07T08:09:15.000Z | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import time
import json
import os
import sys
import time
import urllib
import socket
import argparse
import requests
import lib.common as common
base_url = 'http://localhost:24879/player/'
#------------------------------------------------------------------------------#
# do something on startup #
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# check if librespot-java is running #
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# get metadata from spotify #
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# get play status #
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# get whats currently playing #
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# get player data from API #
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# read cover image fom spotify connect web #
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# play next song #
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# play previuous song #
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# start playing #
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# stop playing #
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# handle http get request #
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# main program #
#------------------------------------------------------------------------------#
init()
common.http_get_handler = respond_to_get_request
common.run_http(port)
while True:
time.sleep(2000)
| 39.466667 | 91 | 0.366273 |
4aa2c859139d7fe0cc49624a8f1cffa727776312 | 9,913 | py | Python | Segment/models/other/fcn.py | YuHe0108/cvmodule | ea00a90fc9bbca5b2c7809791cbd1f7b0da526cd | [
"Apache-2.0"
] | null | null | null | Segment/models/other/fcn.py | YuHe0108/cvmodule | ea00a90fc9bbca5b2c7809791cbd1f7b0da526cd | [
"Apache-2.0"
] | null | null | null | Segment/models/other/fcn.py | YuHe0108/cvmodule | ea00a90fc9bbca5b2c7809791cbd1f7b0da526cd | [
"Apache-2.0"
] | null | null | null | # from tensorflow.keras import Model, Input
# from tensorflow.keras.applications import vgg16, resnet50
# from tensorflow.keras.layers import (Conv2D, Conv2DTranspose, Cropping2D, add, Dropout, Reshape, Activation)
# from tensorflow.keras import layers
# import tensorflow as tf
#
# """
# FCN-8:
# 1(fc)(fully conv)
# 2(deconv)
# 3(skip)
# 4 skip 3 pixels
# lower-resolution layers padding crop,
# spatially aligned concat
#
# FCN-8FCN-16FCN-32:
# 1: FCN: [b, 16, 16, filters], 32 [b, 16*32, 16*32, n_classes],
# 32 FCN-32
# FCN-16 FCN-8 168
# """
#
#
# def fcn8_helper(input_shape, num_classes, backbone):
# assert input_shape[0] % 32 == 0
# assert input_shape[1] % 32 == 0
#
# inputs = Input(input_shape)
# if backbone == 'vgg16':
# base_model = vgg16.VGG16(input_tensor=inputs,
# include_top=False,
# weights='imagenet',
# pooling=None,
# classes=100)
# elif backbone == 'resnet50':
# base_model = resnet50.ResNet50(input_tensor=inputs,
# include_top=False,
# weights='imagenet',
# pooling=None,
# classes=1000)
# assert isinstance(base_model, Model)
# base_model.trainable = False #
#
# out = Conv2D(
# filters=1024, kernel_size=7, padding="same", activation="relu", name="fc6")(base_model.output)
# out = Dropout(rate=0.5)(out)
# out = Conv2D(
# filters=1024, kernel_size=1, padding="same", activation="relu", name="fc7")(out)
# out = Dropout(rate=0.5)(out)
# out = Conv2D(
# filters=num_classes, kernel_size=(1, 1), padding="same", activation="relu",
# kernel_initializer="he_normal", name="score_fr")(out)
#
# # [B, 8, 8, filters] * 2 --> [None, 16, 16, n_classes]
# out = Conv2DTranspose(
# filters=num_classes, kernel_size=(2, 2), strides=(2, 2), padding="valid", activation=None, name="score2")(out)
#
# fcn8 = Model(inputs=inputs, outputs=out)
# return fcn8
#
#
# def fcn8_model(input_shape, num_classes):
# fcn8 = fcn8_helper(input_shape, num_classes, backbone='vgg16')
#
# # "block4_pool" shape: [B, 16, 16, 512] :
# skip_con1 = Conv2D(
# num_classes, kernel_size=(1, 1), padding="same", activation=None,
# kernel_initializer="he_normal", name="score_pool4")(fcn8.get_layer("block4_pool").output)
# Summed = add(inputs=[skip_con1, fcn8.output])
#
# # [B, 32, 32, num_classes]
# x = Conv2DTranspose(
# num_classes, kernel_size=(2, 2), strides=(2, 2), padding="valid", activation=None, name="score4")(Summed)
#
# # block3_pool: [B, 32, 32, filters]
# skip_con2 = Conv2D(
# num_classes, kernel_size=(1, 1), padding="same", activation=None,
# kernel_initializer="he_normal", name="score_pool3")(fcn8.get_layer("block3_pool").output)
# Summed2 = add(inputs=[skip_con2, x])
#
# # 8, [B, 32, 32, filters] --> [B, 32*8, 32*8, n_classes]
# outputs = Conv2DTranspose(
# num_classes, kernel_size=(8, 8), strides=(8, 8), padding="valid",
# activation='sigmoid', name="upsample")(Summed2)
#
# if num_classes == 1:
# outputs = layers.Activation('sigmoid')(outputs)
# else:
# outputs = layers.Softmax()(outputs)
#
# fcn_model = Model(inputs=fcn8.input, outputs=outputs, name='FCN8s')
# return fcn_model
#
#
# def fcn8_model_resnet50(input_shape, num_classes):
# fcn8 = fcn8_helper(input_shape, num_classes, backbone='resnet50')
#
# # "block4_pool" shape: [B, 16, 16, 1024] :
# skip_con1 = Conv2D(
# num_classes, kernel_size=(1, 1), padding="same", activation=None,
# kernel_initializer="he_normal", name="score_pool4")(fcn8.get_layer("conv4_block6_out").output)
# Summed = add(inputs=[skip_con1, fcn8.output])
#
# # [B, 32, 32, num_classes]
# x = Conv2DTranspose(
# num_classes, kernel_size=(2, 2), strides=(2, 2), padding="valid", activation=None, name="score4")(Summed)
#
# # block3_pool: [B, 32, 32, 512]
# skip_con2 = Conv2D(
# num_classes, kernel_size=(1, 1), padding="same", activation=None,
# kernel_initializer="he_normal", name="score_pool3")(fcn8.get_layer("conv3_block4_out").output)
# Summed2 = add(inputs=[skip_con2, x])
#
# # 8, [B, 32, 32, filters] --> [B, 32*8, 32*8, n_classes]
# outputs = Conv2DTranspose(
# num_classes, kernel_size=(8, 8), strides=(8, 8), padding="valid",
# activation='sigmoid', name="upsample")(Summed2)
#
# if num_classes == 1:
# outputs = layers.Activation('sigmoid')(outputs)
# else:
# outputs = layers.Softmax()(outputs)
#
# fcn_model = Model(inputs=fcn8.input, outputs=outputs, name='FCN8s')
# return fcn_model
#
#
# if __name__ == '__main__':
# # m = FCN8(15, 320, 320)
# # from keras.utils import plot_model
# #
# # plot_model(m, show_shapes=True, to_file='model_fcn8.png')
# # print(len(m.layers))
# model_1 = fcn8_model_resnet50(input_shape=(256, 256, 3), num_classes=1)
# model_1.summary()
# # inputs = tf.keras.Input((256, 256, 3))
# # base_model = resnet50.ResNet50(input_tensor=inputs,
# # include_top=False,
# # weights='imagenet',
# # pooling=None,
# # classes=1000)
# # base_model.summary()
from tensorflow.keras.layers import (Conv2D, Conv2DTranspose, Cropping2D, add, Dropout, Reshape, Activation)
from tensorflow.keras.applications import vgg16, resnet50
from tensorflow.keras import Model, Input
from tensorflow.keras import layers
"""
FCN-8:
1(fc)(fully conv)
2(deconv)
3(skip)
4 skip 3 pixels
lower-resolution layers padding crop,
spatially aligned concat
FCN-8FCN-16FCN-32:
1: FCN: [b, 16, 16, filters], 32 [b, 16*32, 16*32, n_classes],
32 FCN-32
FCN-16 FCN-8 168
"""
if __name__ == '__main__':
# m = FCN8(15, 320, 320)
# from keras.utils import plot_model
#
# plot_model(m, show_shapes=True, to_file='model_fcn8.png')
# print(len(m.layers))
model_1 = fcn8_model(input_shape=(256, 256, 3), num_classes=1)
model_1.summary()
| 42.182979 | 121 | 0.605468 |
4aa366c3a95eb19c5533d5c2db8cc7a7e0760866 | 1,331 | py | Python | tests/Python/test_all_configs_output.py | lopippo/IsoSpec | dfc6d7dac213f174fb9c61a5ee018d3f6174febc | [
"BSD-2-Clause"
] | 27 | 2016-05-10T21:27:35.000Z | 2022-03-30T08:11:36.000Z | tests/Python/test_all_configs_output.py | lopippo/IsoSpec | dfc6d7dac213f174fb9c61a5ee018d3f6174febc | [
"BSD-2-Clause"
] | 30 | 2017-08-08T14:24:56.000Z | 2022-03-30T12:44:11.000Z | tests/Python/test_all_configs_output.py | lopippo/IsoSpec | dfc6d7dac213f174fb9c61a5ee018d3f6174febc | [
"BSD-2-Clause"
] | 10 | 2017-06-26T12:14:00.000Z | 2020-11-01T13:45:14.000Z | def binom(n, k):
"""Quickly adapted from https://stackoverflow.com/questions/26560726/python-binomial-coefficient"""
if k < 0 or k > n:
return 0
if k == 0 or k == n:
return 1
total_ways = 1
for i in range(min(k, n - k)):
total_ways = total_ways * (n - i) // (i + 1)
return total_ways
def max_confs_cnt(formula=""):
"""Get the maximal number of configurations for a given chemical formula."""
from IsoSpecPy import IsoParamsFromFormula
f = IsoParamsFromFormula(formula)
if f.atomCount:
N = 1
for n, p in zip(f.atomCount, f.prob):
N *= binom(n+len(p)-1, n)
return N
else:
return 0
test_formulas = [ 'O100',
'O100N10S6',
'C100H202',
'S10H20' ]
def test_all_configs_output_cnt():
"""Test if IsoSpecPy output correctly all configurations."""
from IsoSpecPy import IsoThreshold
global test_formulas
for f in test_formulas:
I = IsoThreshold(formula=f, threshold=0.0, absolute=True)
assert len(I) == max_confs_cnt(f)
print("Seems OK!")
if __name__ == "__main__":
test_all_configs_output_cnt()
| 28.319149 | 103 | 0.602554 |
4aa38327240010c87a37f52f085b58c65fe79f76 | 5,090 | py | Python | tractseg/models/UNet_Pytorch_Regression.py | soichih/TractSeg | f78d0c6dc998905e593cbf4346745467e30d1979 | [
"Apache-2.0"
] | null | null | null | tractseg/models/UNet_Pytorch_Regression.py | soichih/TractSeg | f78d0c6dc998905e593cbf4346745467e30d1979 | [
"Apache-2.0"
] | null | null | null | tractseg/models/UNet_Pytorch_Regression.py | soichih/TractSeg | f78d0c6dc998905e593cbf4346745467e30d1979 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Division of Medical Image Computing, German Cancer Research Center (DKFZ)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import glob
from os.path import join
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adamax
import torch.optim.lr_scheduler as lr_scheduler
from torch.autograd import Variable
from tractseg.libs.PytorchUtils import PytorchUtils
from tractseg.libs.ExpUtils import ExpUtils
from tractseg.models.BaseModel import BaseModel
from tractseg.libs.MetricUtils import MetricUtils
from tractseg.libs.PytorchUtils import conv2d
from tractseg.libs.PytorchUtils import deconv2d
| 40.07874 | 167 | 0.678978 |
4aa4605e775071451ff4f02953c5854fc600fb27 | 1,619 | py | Python | platform/core/polyaxon/sidecar/sidecar/__main__.py | hackerwins/polyaxon | ff56a098283ca872abfbaae6ba8abba479ffa394 | [
"Apache-2.0"
] | null | null | null | platform/core/polyaxon/sidecar/sidecar/__main__.py | hackerwins/polyaxon | ff56a098283ca872abfbaae6ba8abba479ffa394 | [
"Apache-2.0"
] | null | null | null | platform/core/polyaxon/sidecar/sidecar/__main__.py | hackerwins/polyaxon | ff56a098283ca872abfbaae6ba8abba479ffa394 | [
"Apache-2.0"
] | null | null | null | import argparse
import time
from kubernetes.client.rest import ApiException
from polyaxon_client.client import PolyaxonClient
from polyaxon_k8s.manager import K8SManager
from sidecar import settings
from sidecar.monitor import is_pod_running
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--app_label',
type=str
)
parser.add_argument(
'--container_id',
type=str
)
parser.add_argument(
'--sleep_interval',
default=2,
type=int
)
parser.add_argument(
'--max_restarts',
default=0,
type=int
)
args = parser.parse_args()
arguments = args.__dict__
container_id = arguments.pop('container_id')
app_label = arguments.pop('app_label')
sleep_interval = arguments.pop('sleep_interval')
max_restarts = arguments.pop('max_restarts')
k8s_manager = K8SManager(namespace=settings.K8S_NAMESPACE, in_cluster=True)
client = PolyaxonClient()
client.set_internal_health_check()
retry = 0
is_running = True
status = None
while is_running and retry < 3:
time.sleep(sleep_interval)
try:
is_running, status = is_pod_running(k8s_manager,
settings.POD_ID,
container_id,
max_restarts)
except ApiException:
retry += 1
time.sleep(sleep_interval) # We wait a bit more before try
if status:
client.reconcile(status=status)
| 27.440678 | 79 | 0.6084 |
4aa4e20dc8b2673c6655b3fbcb68df91576905a0 | 615 | py | Python | simple_robot_tests/src/test_odometry.py | plusangel/simple_robot | d9ad5ed8cd592f4aee14df13465435279b4d60d7 | [
"MIT"
] | 1 | 2022-03-02T14:55:27.000Z | 2022-03-02T14:55:27.000Z | simple_robot_tests/src/test_odometry.py | plusangel/simple_robot | d9ad5ed8cd592f4aee14df13465435279b4d60d7 | [
"MIT"
] | null | null | null | simple_robot_tests/src/test_odometry.py | plusangel/simple_robot | d9ad5ed8cd592f4aee14df13465435279b4d60d7 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
import rospy
from nav_msgs.msg import Odometry
if __name__ == "__main__":
rospy.init_node('odom_topic_subscriber')
odom_reader_object = OdomTopicReader()
rate = rospy.Rate(10)
while not rospy.is_shutdown():
rate.sleep()
| 26.73913 | 85 | 0.681301 |
4aa67ef1976bb462a8e4797f9376dea3623f23b3 | 4,432 | py | Python | test/test_random.py | kevinintel/neural-compressor | b57645566aeff8d3c18dc49d2739a583c072f940 | [
"Apache-2.0"
] | 100 | 2020-12-01T02:40:12.000Z | 2021-09-09T08:14:22.000Z | test/test_random.py | kevinintel/neural-compressor | b57645566aeff8d3c18dc49d2739a583c072f940 | [
"Apache-2.0"
] | 25 | 2021-01-05T00:16:17.000Z | 2021-09-10T03:24:01.000Z | test/test_random.py | kevinintel/neural-compressor | b57645566aeff8d3c18dc49d2739a583c072f940 | [
"Apache-2.0"
] | 25 | 2020-12-01T19:07:08.000Z | 2021-08-30T14:20:07.000Z | """Tests for quantization"""
import numpy as np
import unittest
import os
import shutil
import yaml
import tensorflow as tf
if __name__ == "__main__":
unittest.main()
| 32.115942 | 108 | 0.559792 |
4aa6a27eafd16cd1848b1b408b9c5ae618566d25 | 43,229 | py | Python | cirq/google/engine/engine_client_test.py | lilies/Cirq | 519b8b70ba4d2d92d1c034c398161ebdbd23e2e7 | [
"Apache-2.0"
] | 1 | 2020-04-06T17:06:10.000Z | 2020-04-06T17:06:10.000Z | cirq/google/engine/engine_client_test.py | lilies/Cirq | 519b8b70ba4d2d92d1c034c398161ebdbd23e2e7 | [
"Apache-2.0"
] | null | null | null | cirq/google/engine/engine_client_test.py | lilies/Cirq | 519b8b70ba4d2d92d1c034c398161ebdbd23e2e7 | [
"Apache-2.0"
] | 1 | 2020-04-14T15:29:29.000Z | 2020-04-14T15:29:29.000Z | # Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for EngineClient."""
import datetime
from unittest import mock
import pytest
from google.api_core import exceptions
from google.protobuf.field_mask_pb2 import FieldMask
from google.protobuf.timestamp_pb2 import Timestamp
from cirq.google.engine.engine_client import EngineClient, EngineException
from cirq.google.engine.client import quantum
from cirq.google.engine.client.quantum_v1alpha1 import enums as qenums
from cirq.google.engine.client.quantum_v1alpha1 import types as qtypes
# yapf: disable
# yapf: disable
| 39.478539 | 80 | 0.637373 |
4aa74af42d1bc1038ceab671898746be1f6af4af | 3,163 | py | Python | google/ads/google_ads/v0/proto/services/media_file_service_pb2_grpc.py | jwygoda/google-ads-python | 863892b533240cb45269d9c2cceec47e2c5a8b68 | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v0/proto/services/media_file_service_pb2_grpc.py | jwygoda/google-ads-python | 863892b533240cb45269d9c2cceec47e2c5a8b68 | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v0/proto/services/media_file_service_pb2_grpc.py | jwygoda/google-ads-python | 863892b533240cb45269d9c2cceec47e2c5a8b68 | [
"Apache-2.0"
] | null | null | null | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.ads.google_ads.v0.proto.resources import media_file_pb2 as google_dot_ads_dot_googleads__v0_dot_proto_dot_resources_dot_media__file__pb2
from google.ads.google_ads.v0.proto.services import media_file_service_pb2 as google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2
def add_MediaFileServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetMediaFile': grpc.unary_unary_rpc_method_handler(
servicer.GetMediaFile,
request_deserializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2.GetMediaFileRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_resources_dot_media__file__pb2.MediaFile.SerializeToString,
),
'MutateMediaFiles': grpc.unary_unary_rpc_method_handler(
servicer.MutateMediaFiles,
request_deserializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2.MutateMediaFilesRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_media__file__service__pb2.MutateMediaFilesResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v0.services.MediaFileService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| 48.661538 | 163 | 0.812204 |
4aa9aadd40d912fb75115061e304f8eab10a0530 | 15,044 | py | Python | docs/generate_example_images.py | KhaledSharif/kornia | 9bae28e032b092b065658117723a82816d09dbac | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | docs/generate_example_images.py | KhaledSharif/kornia | 9bae28e032b092b065658117723a82816d09dbac | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | docs/generate_example_images.py | KhaledSharif/kornia | 9bae28e032b092b065658117723a82816d09dbac | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import importlib
import math
import os
from pathlib import Path
from typing import Optional, Tuple
import cv2
import numpy as np
import requests
import torch
import kornia as K
if __name__ == "__main__":
main()
| 42.982857 | 120 | 0.545865 |
4aa9bb3cf3909a79588350f79db082251d5ab096 | 3,318 | py | Python | forte/processors/data_augment/algorithms/embedding_similarity_replacement_op.py | Pushkar-Bhuse/forte | b7402330cf0b2b26fe56234f0ae43c89b31c0082 | [
"Apache-2.0"
] | null | null | null | forte/processors/data_augment/algorithms/embedding_similarity_replacement_op.py | Pushkar-Bhuse/forte | b7402330cf0b2b26fe56234f0ae43c89b31c0082 | [
"Apache-2.0"
] | null | null | null | forte/processors/data_augment/algorithms/embedding_similarity_replacement_op.py | Pushkar-Bhuse/forte | b7402330cf0b2b26fe56234f0ae43c89b31c0082 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from typing import Tuple
import numpy as np
from texar.torch.data import Vocab, Embedding
from ft.onto.base_ontology import Annotation
from forte.common.configuration import Config
from forte.processors.data_augment.algorithms.text_replacement_op import (
TextReplacementOp,
)
__all__ = [
"EmbeddingSimilarityReplacementOp",
]
| 36.065217 | 77 | 0.676311 |
4aa9bbcefe6db481163c6d0a501873756cbebc17 | 565 | py | Python | src/sentry/receivers/experiments.py | FelixSchwarz/sentry | 7c92c4fa2b6b9f214764f48c82594acae1549e52 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/receivers/experiments.py | FelixSchwarz/sentry | 7c92c4fa2b6b9f214764f48c82594acae1549e52 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/receivers/experiments.py | FelixSchwarz/sentry | 7c92c4fa2b6b9f214764f48c82594acae1549e52 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function, absolute_import
from sentry import analytics
from sentry.signals import join_request_created, join_request_link_viewed
| 33.235294 | 91 | 0.823009 |
4aaa0313e4b848ea3e028c07ae2b856db9916524 | 715 | py | Python | arturtamborskipl/urls.py | arturtamborski/arturtamborskipl | 9b93be045f58d5802d9a61568d7ecfbb12042b59 | [
"MIT"
] | 1 | 2017-05-05T12:01:43.000Z | 2017-05-05T12:01:43.000Z | arturtamborskipl/urls.py | arturtamborski/arturtamborskipl | 9b93be045f58d5802d9a61568d7ecfbb12042b59 | [
"MIT"
] | null | null | null | arturtamborskipl/urls.py | arturtamborski/arturtamborskipl | 9b93be045f58d5802d9a61568d7ecfbb12042b59 | [
"MIT"
] | null | null | null | from django.conf.urls import url, include
from django.contrib import admin
from django.views.generic import RedirectView
from django.views.generic import TemplateView
from django.contrib.sitemaps.views import sitemap
from django.conf import settings
from blog.sitemaps import ArticleSitemap
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^robots\.txt$', TemplateView.as_view(template_name='robots.txt', content_type='text/plain')),
url(r'^sitemap\.xml$', sitemap, {'sitemaps': {'blog': ArticleSitemap}}, name='sitemap'),
url(r'^', include('blog.urls')),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
| 29.791667 | 103 | 0.721678 |
4aaa0768bd968c91cbf077505e1dc0e7ee6365c8 | 34,840 | py | Python | ion_functions/qc/qc_functions.py | steinermg/ion-functions | cea532ad9af51e86768572c8deb48547d99567c5 | [
"Apache-2.0"
] | 10 | 2015-04-03T15:32:21.000Z | 2018-11-21T11:57:26.000Z | ion_functions/qc/qc_functions.py | steinermg/ion-functions | cea532ad9af51e86768572c8deb48547d99567c5 | [
"Apache-2.0"
] | 8 | 2015-01-07T15:19:22.000Z | 2015-12-08T18:14:04.000Z | ion_functions/qc/qc_functions.py | steinermg/ion-functions | cea532ad9af51e86768572c8deb48547d99567c5 | [
"Apache-2.0"
] | 17 | 2015-01-14T16:23:00.000Z | 2021-07-19T08:26:52.000Z | #!/usr/bin/env python
"""
@package ion_functions.qc_functions
@file ion_functions/qc_functions.py
@author Christopher Mueller
@brief Module containing QC functions ported from matlab samples in DPS documents
"""
from ion_functions.qc.qc_extensions import stuckvalues, spikevalues, gradientvalues, ntp_to_month
import time
import numpy as np
import numexpr as ne
from scipy.interpolate import LinearNDInterpolator
from ion_functions import utils
from ion_functions.utils import fill_value
# try to load the OOI logging module, using default Python logging module if
# unavailable
try:
from ooi.logging import log
except ImportError:
import logging
log = logging.getLogger('ion-functions')
def dataqc_globalrangetest_minmax(dat, dat_min, dat_max, strict_validation=False):
'''
Python wrapper for dataqc_globalrangetest
Combines the min/max arguments into list for dataqc_globalrangetest
'''
if is_none(dat_min) or is_none(dat_max) or is_fill(dat_min) or is_fill(dat_max):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
return dataqc_globalrangetest(dat, [np.atleast_1d(dat_min)[-1], np.atleast_1d(dat_max)[-1]], strict_validation=strict_validation)
def dataqc_globalrangetest(dat, datlim, strict_validation=False):
"""
Description:
Data quality control algorithm testing if measurements fall into a
user-defined valid range. Returns 1 for presumably good data and 0 for
data presumed bad.
Implemented by:
2010-07-28: DPS authored by Mathias Lankhorst. Example code provided
for Matlab.
2013-04-06: Christopher Wingard. Initial python implementation.
2013-05-30: Christopher Mueller. Performance improvements by adding
strict_validation flag.
Usage:
qcflag = dataqc_globalrangetest(dat, datlim, strict_validation)
where
qcflag = Boolean, 0 if value is outside range, else = 1.
dat = Input dataset, any scalar or vector. Must be numeric and real.
datlim = Two-element vector with the minimum and maximum values
considered to be valid.
strict_validation = Flag (default is False) to assert testing of input
types (e.g. isreal, isnumeric)
References:
OOI (2012). Data Product Specification for Global Range Test. Document
Control Number 1341-10004. https://alfresco.oceanobservatories.org
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10004_Data_Product_SPEC_GLBLRNG_OOI.pdf)
"""
dat = np.atleast_1d(dat)
datlim = np.atleast_1d(datlim)
if strict_validation:
if not utils.isnumeric(dat).all():
raise ValueError('\'dat\' must be numeric')
if not utils.isreal(dat).all():
raise ValueError('\'dat\' must be real')
if not utils.isnumeric(datlim).all():
raise ValueError('\'datlim\' must be numeric')
if not utils.isreal(datlim).all():
raise ValueError('\'datlim\' must be real')
if len(datlim) < 2: # Must have at least 2 elements
raise ValueError('\'datlim\' must have at least 2 elements')
return (datlim.min() <= dat) & (dat <= datlim.max()).astype('int8')
def dataqc_localrangetest(dat, z, datlim, datlimz, strict_validation=False):
"""
Description:
Data quality control algorithm testing if measurements fall into a
user-defined valid range. This range is not constant but varies with
measurement location. Returns 1 for presumably good data and 0 for data
presumed bad.
Implemented by:
2012-07-17: DPS authored by Mathias Lankhorst. Example code provided
for Matlab.
2013-04-06: Christopher Wingard. Initial python implementation.
Usage:
qcflag = dataqc_localrangetest(dat, z, datlim, datlimz)
where
qcflag = Boolean, 0 if value is outside range, else = 1.
dat = input data set, a numeric real scalar or column vector.
z = location of measurement dat. must have same # of rows as dat and
same # of columns as datlimz
datlim = two column array with the minimum (column 1) and maximum
(column 2) values considered valid.
datlimz = array with the locations where datlim is given. must have
same # of rows as datlim and same # of columns as z.
References:
OOI (2012). Data Product Specification for Local Range Test. Document
Control Number 1341-10005. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10005_Data_Product_SPEC_LOCLRNG_OOI.pdf)
"""
if strict_validation:
# check if dat and datlim are matrices
if not utils.isvector(dat):
raise ValueError('\'dat\' must be a matrix')
if not utils.ismatrix(datlim):
raise ValueError('\'datlim\' must be a matrix')
# check if all inputs are numeric and real
for k, arg in {'dat': dat, 'z': z, 'datlim': datlim,
'datlimz': datlimz}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
if len(datlim.shape) == 3 and datlim.shape[0] == 1:
datlim = datlim.reshape(datlim.shape[1:])
if len(datlimz.shape) == 3 and datlimz.shape[0] == 1:
datlimz = datlimz.reshape(datlimz.shape[1:])
# test size and shape of the input arrays datlimz and datlim, setting test
# variables.
array_size = datlimz.shape
if len(array_size) == 1:
numlim = array_size[0]
ndim = 1
else:
numlim = array_size[0]
ndim = array_size[1]
array_size = datlim.shape
tmp1 = array_size[0]
tmp2 = array_size[1]
if tmp1 != numlim:
raise ValueError('\'datlim\' and \'datlimz\' must '
'have the same number of rows.')
if tmp2 != 2:
raise ValueError('\'datlim\' must be structured as 2-D array '
'with exactly 2 columns and 1 through N rows.')
# test the size and shape of the z input array
array_size = z.shape
if len(array_size) == 1:
num = array_size[0]
tmp2 = 1
else:
num = array_size[0]
tmp2 = array_size[1]
if tmp2 != ndim:
raise ValueError('\'z\' must have the same number of columns '
'as \'datlimz\'.')
if num != dat.size:
raise ValueError('Len of \'dat\' must match number of '
'rows in \'z\'')
# test datlim, values in column 2 must be greater than those in column 1
if not all(datlim[:, 1] > datlim[:, 0]):
raise ValueError('Second column values of \'datlim\' should be '
'greater than first column values.')
# calculate the upper and lower limits for the data set
if ndim == 1:
# determine the lower limits using linear interpolation
lim1 = np.interp(z, datlimz, datlim[:, 0], left=np.nan, right=np.nan)
# determine the upper limits using linear interpolation
lim2 = np.interp(z, datlimz, datlim[:, 1], left=np.nan, right=np.nan)
else:
# Compute Delaunay Triangulation and use linear interpolation to
# determine the N-dimensional lower limits
F = LinearNDInterpolator(datlimz, datlim[:, 0].reshape(numlim, 1))
lim1 = F(z).reshape(dat.size)
# Compute Delaunay Triangulation and use linear interpolation to
# determine the N-dimensional upper limits
F = LinearNDInterpolator(datlimz, datlim[:, 1].reshape(numlim, 1))
lim2 = F(z).reshape(dat.size)
# replace NaNs from above interpolations
ff = (np.isnan(lim1)) | (np.isnan(lim2))
lim1[ff] = np.max(datlim[:, 1])
lim2[ff] = np.min(datlim[:, 0])
# compute the qcflags
qcflag = (dat >= lim1) & (dat <= lim2)
return qcflag.astype('int8')
def dataqc_spiketest(dat, acc, N=5, L=5, strict_validation=False):
"""
Description:
Data quality control algorithm testing a time series for spikes.
Returns 1 for presumably good data and 0 for data presumed bad.
The time series is divided into windows of len L (an odd integer
number). Then, window by window, each value is compared to its (L-1)
neighboring values: a range R of these (L-1) values is computed (max.
minus min.), and replaced with the measurement accuracy ACC if ACC>R. A
value is presumed to be good, i.e. no spike, if it deviates from the
mean of the (L-1) peers by less than a multiple of the range,
N*max(R,ACC).
Further than (L-1)/2 values from the start or end points, the peer
values are symmetrically before and after the test value. Within that
range of the start and end, the peers are the first/last L values
(without the test value itself).
The purpose of ACC is to restrict spike detection to deviations
exceeding a minimum threshold value (N*ACC) even if the data have
little variability. Use ACC=0 to disable this behavior.
Implemented by:
2012-07-28: DPS authored by Mathias Lankhorst. Example code provided
for Matlab.
2013-04-06: Christopher Wingard. Initial python implementation.
2013-05-30: Christopher Mueller. Performance optimizations.
Usage:
qcflag = dataqc_spiketest(dat, acc, N, L)
where
qcflag = Boolean, 0 if value is outside range, else = 1.
dat = input data set, a numeric, real vector.
acc = Accuracy of any input measurement.
N = (optional, defaults to 5) Range multiplier, cf. above
L = (optional, defaults to 5) Window len, cf. above
References:
OOI (2012). Data Product Specification for Spike Test. Document
Control Number 1341-10006. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10006_Data_Product_SPEC_SPKETST_OOI.pdf)
"""
dat = np.atleast_1d(dat)
if strict_validation:
if not utils.isnumeric(dat).all():
raise ValueError('\'dat\' must be numeric')
if not utils.isreal(dat).all():
raise ValueError('\'dat\' must be real')
if not utils.isvector(dat):
raise ValueError('\'dat\' must be a vector')
for k, arg in {'acc': acc, 'N': N, 'L': L}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
dat = np.asanyarray(dat, dtype=np.float)
out = spikevalues(dat, L, N, acc)
return out
def dataqc_polytrendtest(dat, t, ord_n=1, nstd=3, strict_validation=False):
"""
Description:
Data quality control algorithm testing if measurements contain a
significant portion of a polynomial. Returns 1 if this is not the case,
else 0.
The purpose of this test is to check if a significant fraction of the
variability in a time series can be explained by a drift, possibly
interpreted as a sensor drift. This drift is assumed to be a polynomial
of order ORD. Use ORD=1 to consider a linear drift
The time series dat is passed to MatLab's POLYFIT routine to obtain a
polynomial fit PP to dat, and the difference dat-PP is compared to the
original dat. If the standard deviation of (dat-PP) is less than that
of dat by a factor of NSTD, the time series is assumed to contain a
significant trend (output will be 0), else not (output will be 1).
Implemented by:
2012-10-29: DPS authored by Mathias Lankhorst. Example code provided
for Matlab.
2013-04-06: Christopher Wingard. Initial python implementation.
2013-05-30: Christopher Mueller. Performance optimizations.
Usage:
qcflag = dataqc_polytrendtest(dat, t, ord_n, nstd, strict_validation)
where
qcflag = Boolean, 0 a trend is detected, 1 elsewhere.
dat = Input dataset, a numeric real vector.
t = time record associated with dat
ord_n (optional, defaults to 1) = Polynomial order.
nstd (optional, defaults to 3) = Factor by how much the standard
deviation must be reduced before qcflag switches from 1 to 0
strict_validation (optional, defaults to False) = Flag asserting
testing of inputs.
References:
OOI (2012). Data Product Specification for Trend Test. Document
Control Number 1341-10007. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10007_Data_Product_SPEC_TRNDTST_OOI.pdf)
"""
dat = np.atleast_1d(dat)
t = np.atleast_1d(t)
if strict_validation:
for k, arg in {'dat': dat, 't': t, 'ord_n': ord_n, 'nstd': nstd}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
for k, arg in {'dat': dat, 't': t}.iteritems():
if not utils.isvector(arg):
raise ValueError('\'{0}\' must be a vector'.format(k))
for k, arg in {'ord_n': ord_n, 'nstd': nstd}.iteritems():
if not utils.isscalar(arg):
raise ValueError('\'{0}\' must be a scalar'.format(k))
ord_n = int(round(abs(ord_n)))
nstd = int(abs(nstd))
ll = len(dat)
# Not needed because time is incorporated as 't'
# t = range(ll)
pp = np.polyfit(t, dat, ord_n)
datpp = np.polyval(pp, t)
# test for a trend
if np.atleast_1d((np.std(dat - datpp) * nstd) < np.std(dat)).all():
trndtst = 0
else:
trndtst = 1
# insure output size equals input, even though test yields a single value.
qcflag = np.ones(dat.shape).astype('int8') * trndtst
return qcflag
def dataqc_stuckvaluetest(x, reso, num=10, strict_validation=False):
"""
Description:
Data quality control algorithm testing a time series for "stuck
values", i.e. repeated occurences of one value. Returns 1 for
presumably good data and 0 for data presumed bad.
Implemented by:
2012-10-29: DPS authored by Mathias Lankhorst. Example code provided
for Matlab.
2013-04-06: Christopher Wingard. Initial python implementation.
Usage:
qcflag = =dataqc_stuckvaluetest(x, RESO, NUM);
where
qcflag = Boolean output: 0 where stuck values are found, 1 elsewhere.
x = Input time series (vector, numeric).
reso = Resolution; repeat values less than reso apart will be
considered "stuck values".
num = Minimum number of successive values within reso of each other
that will trigger the "stuck value". num is optional and defaults
to 10 if omitted or empty.
References:
OOI (2012). Data Product Specification for Stuck Value Test. Document
Control Number 1341-10008. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10008_Data_Product_SPEC_STUCKVL_OOI.pdf)
"""
dat = np.atleast_1d(x)
if strict_validation:
if not utils.isnumeric(dat).all():
raise ValueError('\'x\' must be numeric')
if not utils.isvector(dat):
raise ValueError('\'x\' must be a vector')
if not utils.isreal(dat).all():
raise ValueError('\'x\' must be real')
for k, arg in {'reso': reso, 'num': num}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isscalar(arg):
raise ValueError('\'{0}\' must be a scalar'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
num = np.abs(num)
dat = np.asanyarray(dat, dtype=np.float)
ll = len(x)
if ll < num:
# Warn - 'num' is greater than len(x), returning zeros
out = np.zeros(dat.size, dtype='int8')
else:
out = stuckvalues(dat, reso, num)
return out
def dataqc_gradienttest(dat, x, ddatdx, mindx, startdat, toldat, strict_validation=False):
"""
Description
Data quality control algorithm testing if changes between successive
data points fall within a certain range.
Input data dat are given as a function of coordinate x. The algorithm
will flag dat values as bad if the change deltaDAT/deltaX between
successive dat values exceeds thresholds given in ddatdx. Once the
threshold is exceeded, following dat are considered bad until a dat
value returns to within toldat of the last known good value.
It is possible to remove data points that are too close together in x
coordinates (use mindx).
By default, the first value of dat is considered good. To change this,
use startdat and toldat to set as the first good data point the first
one that comes within toldat of startdat.
Implemented by:
2012-07-17: DPS authored by Mathias Lankhorst. Example code provided
for Matlab.
2013-04-06: Christopher Wingard. Initial python implementation.
Usage:
outdat, outx, outqc = dataqc_gradienttest(dat, x, ddatdx, mindx,
startdat, toldat);
where
outdat = same as dat except that NaNs and values not meeting mindx are
removed.
outx = same as x except that NaNs and values not meeting mindx are
removed.
outqc = output quality control flags for outdat. 0 means bad data, 1
means good data.
dat = input dataset, a numeric real vector.
x = coordinate (e.g. time, distance) along which dat is given. Must be
of the same size as dat and strictly increasing.
ddatdx = two-element vector defining the valid range of ddat/dx
from one point to the next.
mindx = scalar. minimum dx for which this test will be applied (data
that are less than mindx apart will be deleted). defaults to zero
if NaN/empty.
startdat = start value (scalar) of dat that is presumed good. defaults
to first non-NaN value of dat if NaN/empty.
toldat = tolerance value (scalar) for dat; threshold to within which
dat must return to be counted as good, after exceeding a ddatdx
threshold detected bad data.
References:
OOI (2012). Data Product Specification for Gradient Test. Document
Control Number 1341-100010.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-10010_Data_Product_SPEC_GRDTEST_OOI.pdf)
"""
if strict_validation:
if not utils.isvector(dat) or not utils.isvector(x):
raise ValueError('\'dat\' and \'x\' must be vectors')
if len(dat) != len(x):
raise ValueError('\'dat\' and \'x\' must be of equal len')
if not all(np.diff(x) > 0):
raise ValueError('\'x\' must be montonically increasing')
dat = np.asanyarray(dat, dtype=np.float).flatten()
x = np.asanyarray(x, dtype=np.float).flatten()
if np.isnan(mindx):
mindx = 0
mindx = mindx or 0
if np.isnan(startdat):
startdat = 0
startdat = startdat or 0
# No strict validation here, they are scalards and they must be validated
# before going into the C-layer
if not utils.isscalar(mindx):
raise ValueError("'mindx' must be scalar, NaN, or empty.")
if not utils.isscalar(startdat):
raise ValueError("'startdat' must be scalar, NaN, or empty.")
# Confirm that there are still data points left, else abort:
if np.abs(x[0] - x[-1]) < mindx:
out = np.zeros(x.shape)
out.fill(1)
log.warn('Too few values to inspect')
return out
grad_min = ddatdx[0]
grad_max = ddatdx[1]
out = gradientvalues(dat, x, grad_min, grad_max, mindx, startdat, toldat)
return out
def dataqc_solarelevation(lon, lat, dt):
"""
Description
Computes instantaneous no-sky solar radiation and altitude from date
and time stamp and position data. It is put together from expressions
taken from Appendix E in the 1978 edition of Almanac for Computers,
Nautical Almanac Office, U.S. Naval Observatory. They are reduced
accuracy expressions valid for the years 1800-2100. Solar declination
computed from these expressions is accurate to at least 1'. The solar
constant (1368.0 W/m^2) represents a mean of satellite measurements
made over the last sunspot cycle (1979-1995) taken from Coffey et al
(1995), Earth System Monitor, 6, 6-10.
This code is a python implementation of soradna1.m available in Air-Sea
Toolbox.
Implemented by:
1997-03-08: Version 1.0 (author unknown) of soradna1.m.
1998-08-28: Version 1.1 (author unknown) of soradna1.m.
1999-08-05: Version 2.0 (author unknown) of soradna1.m.
2013-04-07: Christopher Wingard. Initial python implementation. Note,
this function is derived from old, unmaintained code. More robust
implementations exist (e.g. PyEphem and PySolar) that will probably
calculate these values more accurately.
Usage:
z, sorad = dataqc_solarelevation(lon, lat, dt)
where
z = solar altitude [degrees]
sorad = no atmosphere solar radiation [W m^-2]
lon = longitude (east is positive) [decimal degress]
lat = latitude [decimal degrees]
dt = date and time stamp in UTC [seconds since 1970-01-01]
Examples
dt = 1329177600 # 2012-02-14 00:00:00
z, sorad = dataqc_solarelevation(120, 30, dt)
z = 15.1566, sorad = 366.8129
OOI (2012). Data Product Specification for Solar Elevation. Document
Control Number 1341-100011.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-10011_Data_Product_SPEC_SOLRELV_OOI.pdf)
"""
# Test lengths and types of inputs. Latitude and longitude must be the same
# size and can either be a scalar or a vecotr. The date and time stamp
# can also be either a scalar or a vector. If all three inputs are vectors,
# they must be of the same length.
if len(lon) != len(lat):
raise ValueError('\'lon\' and \'lat\' must be the same size')
if utils.isvector(lon) and utils.isvector(lat) and utils.isvector(dt):
# test their lengths
if not len(lon) == len(lat) == len(dt):
raise ValueError('If all inputs are vectors, these must all '
'be of the same length')
# set constants (using values from as_consts.m)
# ------ short-wave flux calculations
# the solar constant [W m^-2] represents a mean of satellite measurements
# made over the last sunspot cycle (1979-1995), taken from Coffey et al.
# (1995), Earth System Monitor, 6, 6-10.
solar_const = 1368.0
# Create a time tuple in UTC from the Epoch time input, and then create
# scalars or numpy arrays of time elements for subsequent calculations.
ldt = len(dt)
yy = np.zeros(ldt, dtype=np.int)
mn = np.zeros(ldt, dtype=np.int)
dd = np.zeros(ldt, dtype=np.int)
hh = np.zeros(ldt, dtype=np.int)
mm = np.zeros(ldt, dtype=np.int)
ss = np.zeros(ldt, dtype=np.int)
for i in range(ldt):
# create time tuple in UTC
gtime = time.gmtime(dt[i])
# create scalar elements
yy[i] = gtime[0]
mn[i] = gtime[1]
dd[i] = gtime[2]
hh[i] = gtime[3]
mm[i] = gtime[4]
ss[i] = gtime[5]
#constants used in function
deg2rad = np.pi / 180.0
rad2deg = 1 / deg2rad
# compute Universal Time in hours
utime = hh + (mm + ss / 60.0) / 60.0
# compute Julian ephemeris date in days (Day 1 is 1 Jan 4713 B.C. which
# equals -4712 Jan 1)
jed = (367.0 * yy - np.fix(7.0*(yy+np.fix((mn+9)/12.0))/4.0)
+ np.fix(275.0*mn/9.0) + dd + 1721013 + utime / 24.0)
# compute interval in Julian centuries since 1900
jc_int = (jed - 2415020.0) / 36525.0
# compute mean anomaly of the sun
ma_sun = 358.475833 + 35999.049750 * jc_int - 0.000150 * jc_int**2
ma_sun = (ma_sun - np.fix(ma_sun/360.0) * 360.0) * deg2rad
# compute mean longitude of sun
ml_sun = 279.696678 + 36000.768920 * jc_int + 0.000303 * jc_int**2
ml_sun = (ml_sun - np.fix(ml_sun/360.0) * 360.0) * deg2rad
# compute mean anomaly of Jupiter
ma_jup = 225.444651 + 2880.0 * jc_int + 154.906654 * jc_int
ma_jup = (ma_jup - np.fix(ma_jup/360.0) * 360.0) * deg2rad
# compute longitude of the ascending node of the moon's orbit
an_moon = (259.183275 - 1800 * jc_int - 134.142008 * jc_int
+ 0.002078 * jc_int**2)
an_moon = (an_moon - np.fix(an_moon/360.0) * 360.0 + 360.0) * deg2rad
# compute mean anomaly of Venus
ma_ven = (212.603219 + 58320 * jc_int + 197.803875 * jc_int
+ 0.001286 * jc_int**2)
ma_ven = (ma_ven - np.fix(ma_ven/360.0) * 360.0) * deg2rad
# compute sun theta
theta = (0.397930 * np.sin(ml_sun) + 0.009999 * np.sin(ma_sun-ml_sun)
+ 0.003334 * np.sin(ma_sun+ml_sun) - 0.000208 * jc_int
* np.sin(ml_sun) + 0.000042 * np.sin(2*ma_sun+ml_sun) - 0.000040
* np.cos(ml_sun) - 0.000039 * np.sin(an_moon-ml_sun) - 0.000030
* jc_int * np.sin(ma_sun-ml_sun) - 0.000014
* np.sin(2*ma_sun-ml_sun) - 0.000010
* np.cos(ma_sun-ml_sun-ma_jup) - 0.000010 * jc_int
* np.sin(ma_sun+ml_sun))
# compute sun rho
rho = (1.000421 - 0.033503 * np.cos(ma_sun) - 0.000140 * np.cos(2*ma_sun)
+ 0.000084 * jc_int * np.cos(ma_sun) - 0.000033
* np.sin(ma_sun-ma_jup) + 0.000027 * np.sin(2.*ma_sun-2.*ma_ven))
# compute declination
decln = np.arcsin(theta/np.sqrt(rho))
# compute equation of time (in seconds of time)
l = 276.697 + 0.98564734 * (jed-2415020.0)
l = (l - 360.0 * np.fix(l/360.0)) * deg2rad
eqt = (-97.8 * np.sin(l) - 431.3 * np.cos(l) + 596.6 * np.sin(2*l)
- 1.9 * np.cos(2*l) + 4.0 * np.sin(3*l) + 19.3 * np.cos(3*l)
- 12.7 * np.sin(4*l))
eqt = eqt / 60.0
# compute local hour angle from global hour angle
gha = 15.0 * (utime-12) + 15.0 * eqt / 60.0
lha = gha - lon
# compute radius vector
rv = np.sqrt(rho)
# compute solar altitude
sz = (np.sin(deg2rad*lat) * np.sin(decln) + np.cos(deg2rad*lat)
* np.cos(decln) * np.cos(deg2rad*lha))
z = rad2deg * np.arcsin(sz)
# compute solar radiation outside atmosphere (defaults to 0 when solar
# altitude is below the horizon)
sorad = (solar_const / rv**2) * np.sin(deg2rad * z)
sorad[z < 0] = 0
return (z, sorad)
def dataqc_propagateflags_wrapper(strict_validation=False, *args):
'''
This is a function that wraps dataqc_propagateflags for use in ION
It accepts a variable number of vector arguments (of the same shape) and calls dataqc_propagateflags
'''
if not strict_validation:
shapes = np.array([i.shape[0] for i in args])
if not (shapes == shapes[0]).all():
raise ValueError('Input vectors are not the same shape')
return dataqc_propagateflags(np.array(args), strict_validation=strict_validation)
def dataqc_propagateflags(inflags, strict_validation=False):
"""
Description:
Propagate "bad" qc flags (from an arbitrary number of source datasets)
to another (derived) dataset.
Consider data from an oceanographic CTD (conductivity, temperature, and
pressure) instrument. From these three time series, you want to compute
salinity. If any of the three source data (conductivity, temperature,
pressure) is of bad quality, the salinity will be bad as well. You can
feed your QC assessment of the former three into this routine, which
will then give you the combined assessment for the derived (here:
salinity) property.
Implemented by:
2012-07-17: DPS authored by Mathias Lankhorst. Example code provided
for Matlab.
2013-04-06: Christopher Wingard. Initial python implementation.
Usage:
outflag = dataqc_propagateflags(inflags)
where
outflag = a 1-by-N boolean vector that contains 1 where all of the
inflags are 1, and 0 otherwise.
inflags = an M-by-N boolean matrix, where each of the M rows contains
flags of an independent data set such that "0" means bad data and
"1" means good data.
References:
OOI (2012). Data Product Specification for Combined QC Flags. Document
Control Number 1341-100012.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-10012_Data_Product_SPEC_CMBNFLG_OOI.pdf)
"""
if strict_validation:
if not utils.islogical(inflags):
raise ValueError('\'inflags\' must be \'0\' or \'1\' '
'integer flag array')
array_size = inflags.shape
nrows = array_size[0]
if nrows < 2:
error('\'inflags\' must be at least a two-dimensional array')
outflag = np.all(inflags, 0)
return outflag.astype('int8')
def dataqc_condcompress(p_orig, p_new, c_orig, cpcor=-9.57e-8):
"""
Description:
Implementation of the Sea-Bird conductivity compressibility correction,
scaling the input conductivity based on ratio of the original pressure
and the updated pressure.
Implemented by:
2013-04-07: Christopher Wingard. Initial python implementation.
Usage:
c_new = dataqc_condcompress(p_orig, p_new, c_orig, cpcor)
where
c_new = updated conductivity record [S/m]
p_orig = original pressure used to calculate original conductivity,
this typically the L1a PRESWAT [dbar]
p_new = updated pressure, typically L1b PRESWAT [dbar]
c_orig = original conductivty record, typically L1a CONDWAT [S/m]
cpcor = pressure correction coefficient used to calculate original
conductivity, default is -9.57e-8
References:
OOI (2012). Data Product Specification for Conductivity Compressibility
Correction. Document Control Number 1341-10030.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-10030_Data_Product_SPEC_CNDCMPR_OOI.pdf)
"""
c_new = c_orig * (1 + cpcor * p_orig) / (1 + cpcor * p_new)
return c_new
| 37.746479 | 215 | 0.632319 |
4aaa32daecbc845e7f79a56464fde4fa9e4bd81d | 10,702 | py | Python | datatest/__past__/api08.py | avshalomt2/datatest | f622b0e990b53c73f56730a9009b39af7653df20 | [
"Apache-2.0"
] | null | null | null | datatest/__past__/api08.py | avshalomt2/datatest | f622b0e990b53c73f56730a9009b39af7653df20 | [
"Apache-2.0"
] | null | null | null | datatest/__past__/api08.py | avshalomt2/datatest | f622b0e990b53c73f56730a9009b39af7653df20 | [
"Apache-2.0"
] | null | null | null | """Backward compatibility for version 0.8 API."""
from __future__ import absolute_import
import inspect
import datatest
from datatest._compatibility import itertools
from datatest._compatibility.collections.abc import Sequence
from datatest._load.get_reader import get_reader
from datatest._load.load_csv import load_csv
from datatest._load.temptable import load_data
from datatest._load.temptable import new_table_name
from datatest._load.temptable import savepoint
from datatest._load.temptable import table_exists
from datatest._query.query import DEFAULT_CONNECTION
from datatest._query.query import BaseElement
from datatest._utils import file_types
from datatest._utils import string_types
from datatest._utils import iterpeek
from datatest.allowance import BaseAllowance
from datatest import Invalid
from datatest.difference import NOTFOUND
datatest.DataResult = datatest.Result
datatest.DataQuery = DataQuery
datatest.DataSource = DataSource
datatest.allowed_key = allowed_key
datatest.allowed_args = allowed_args
datatest.DataTestCase.subject = property(get_subject, set_subject)
datatest.DataTestCase.reference = property(get_reference, set_reference)
datatest.DataTestCase._find_data_source = staticmethod(_find_data_source)
def allowedKey(self, function, msg=None):
"""Allows differences in a mapping where *function* returns True.
For each difference, function will receive the associated mapping
**key** unpacked into one or more arguments.
"""
return allowed_key(function, msg)
datatest.DataTestCase.allowedKey = allowedKey
def allowedArgs(self, function, msg=None):
"""Allows differences where *function* returns True. For the
'args' attribute of each difference (a tuple), *function* must
accept the number of arguments unpacked from 'args'.
"""
return allowed_args(function, msg)
datatest.DataTestCase.allowedArgs = allowedArgs
def _require_sequence(data, sequence): # New behavior in datatest 0.8.3
"""Compare *data* against a *sequence* of values. Stops at the
first difference found and returns an AssertionError. If no
differences are found, returns None.
"""
if isinstance(data, str):
raise ValueError("uncomparable types: 'str' and sequence type")
data_type = getattr(data, 'evaluation_type', data.__class__)
if not issubclass(data_type, Sequence):
type_name = data_type.__name__
msg = "expected sequence type, but got " + repr(type_name)
raise ValueError(msg)
message_prefix = None
previous_element = NOTFOUND
zipped = itertools.zip_longest(data, sequence, fillvalue=NOTFOUND)
for index, (actual, expected) in enumerate(zipped):
if actual == expected:
previous_element = actual
continue
if actual == NOTFOUND:
message_prefix = ('Data sequence is missing '
'elements starting with index {0}').format(index)
message_suffix = 'Expected {0!r}'.format(expected)
elif expected == NOTFOUND:
message_prefix = ('Data sequence contains extra '
'elements starting with index {0}').format(index)
message_suffix = 'Found {0!r}'.format(actual)
else:
message_prefix = \
'Data sequence differs starting at index {0}'.format(index)
message_suffix = \
'Found {0!r}, expected {1!r}'.format(actual, expected)
break
else: # <- NOBREAK!
return None # <- EXIT!
leading_elements = []
if index > 1:
leading_elements.append('...')
if previous_element != NOTFOUND:
leading_elements.append(repr(previous_element))
actual_repr = repr(actual) if actual != NOTFOUND else '?????'
caret_underline = '^' * len(actual_repr)
trailing_elements = []
next_tuple = next(zipped, NOTFOUND)
if next_tuple != NOTFOUND:
trailing_elements.append(repr(next_tuple[0]))
if next(zipped, NOTFOUND) != NOTFOUND:
trailing_elements.append('...')
if leading_elements:
leading_string = ', '.join(leading_elements) + ', '
else:
leading_string = ''
leading_whitespace = ' ' * len(leading_string)
if trailing_elements:
trailing_string = ', ' + ', '.join(trailing_elements)
else:
trailing_string = ''
sequence_string = leading_string + actual_repr + trailing_string
message = '{0}:\n\n {1}\n {2}{3}\n{4}'.format(message_prefix,
sequence_string,
leading_whitespace,
caret_underline,
message_suffix)
return AssertionError(message)
datatest.validation._require_sequence = _require_sequence
| 36.03367 | 86 | 0.652775 |
4aab427f1e96678aba34462ced9f7928129f2aef | 7,288 | py | Python | lib/reinteract/editor.py | jonkuhn/reinteract-jk | 319c8d930f142cf3c3b8693fbff1b84fd582387c | [
"BSD-2-Clause"
] | 1 | 2016-05-08T14:35:25.000Z | 2016-05-08T14:35:25.000Z | lib/reinteract/editor.py | jonkuhn/reinteract-jk | 319c8d930f142cf3c3b8693fbff1b84fd582387c | [
"BSD-2-Clause"
] | null | null | null | lib/reinteract/editor.py | jonkuhn/reinteract-jk | 319c8d930f142cf3c3b8693fbff1b84fd582387c | [
"BSD-2-Clause"
] | null | null | null | # Copyright 2008 Owen Taylor
#
# This file is part of Reinteract and distributed under the terms
# of the BSD license. See the file COPYING in the Reinteract
# distribution for full details.
#
########################################################################
import os
import gobject
import gtk
import pango
from application import application
from format_escaped import format_escaped
from notebook import NotebookFile
from shell_buffer import ShellBuffer
from shell_view import ShellView
from save_file import SaveFileBuilder
| 29.387097 | 109 | 0.565587 |
4aabc93bad87b0dbf891bc8cb36cc3c5cdca1038 | 5,975 | py | Python | python/scripts/compare_events.py | tvogels01/arthur-redshift-etl | 477f822d16cd3a86b3bf95cfa28915cb7470a6e4 | [
"MIT"
] | null | null | null | python/scripts/compare_events.py | tvogels01/arthur-redshift-etl | 477f822d16cd3a86b3bf95cfa28915cb7470a6e4 | [
"MIT"
] | 44 | 2021-11-22T02:18:41.000Z | 2022-03-28T02:13:32.000Z | python/scripts/compare_events.py | tvogels01/arthur-redshift-etl | 477f822d16cd3a86b3bf95cfa28915cb7470a6e4 | [
"MIT"
] | null | null | null | """
This script compares events from two ETLs to highlight differences in elapsed times or row counts.
* Pre-requisites
You need to have a list of events for each ETL. Arthur can provide this using the
"query_events" command.
For example:
```
arthur.py query_events -p development 37ACEC7440AB4620 -q > 37ACEC7440AB4620.events
arthur.py query_events -p development 96BE11B234F84F39 -q > 96BE11B234F84F39.events
```
* Usage
Once you have the files, you use this script:
```
compare_events.py 37ACEC7440AB4620.events 96BE11B234F84F39.events
```
The order of those two files is: "older ETL" => "newer ETL".
"""
import csv
import re
import sys
from collections import defaultdict, namedtuple
from math import isclose
from tabulate import tabulate
def extract_values(filename):
"""Find elapsed time and rowcount for each target relation."""
# The "lambda: None" trick allows us to use 'd[]' instead of 'd.get()' later.
elapsed = defaultdict(lambda: None)
rowcount = defaultdict(lambda: None)
for row in parse_file(filename):
elapsed[row.step, row.target] = float(row.elapsed) if row.elapsed != "---" else None
rowcount[row.step, row.target] = int(row.rowcount) if row.rowcount != "---" else None
return elapsed, rowcount
def delta(a, b):
"""
Return change in percent (or None if undefined).
The delta in percent is rounded to one decimal.
"""
if a is None or b is None:
return None
if a == 0.0 and b == 0.0:
return 0.0
assert a != 0.0 and b != 0.0
return round((b - a) * 1000.0 / a) / 10.0
def show_delta(previous_value, current_value, column):
"""
Return whether the change from previous event to current event is "significant".
If the values appear to be equal or almost equal, there's no need to report a delta.
Also, if the values are really small and any change is inflated, skip reporting the delta.
Note that for row count, a decrease in rows is always shown.
"""
if previous_value is None or current_value is None:
return False
if previous_value == current_value:
return False
if column == "elapsed":
# Decrease trigger-happiness for quick loads:
if previous_value < 10.0 and current_value < 10.0:
return False
if previous_value < 30.0 or current_value < 30.0:
return not isclose(previous_value, current_value, abs_tol=20.0)
if previous_value < 60.0 or current_value < 60.0:
return not isclose(previous_value, current_value, rel_tol=0.5)
if previous_value < 300.0 or current_value < 300.0:
return not isclose(previous_value, current_value, rel_tol=0.2)
if column == "rowcount":
# We expect to move forward with growing tables so smaller row counts are suspect.
if previous_value > current_value:
return True
# Increase trigger-happiness for small (dimensional) tables:
if previous_value < 1000 or current_value < 1000:
return not isclose(previous_value, current_value, abs_tol=10)
return not isclose(previous_value, current_value, rel_tol=0.1)
def print_comparison_table(previous_values, current_values, column):
"""Print differences between runs, sorted by relation."""
all_events = frozenset(previous_values).union(current_values)
has_large_diff = frozenset(
event
for event in all_events
if show_delta(previous_values[event], current_values[event], column)
)
table = sorted(
(
(
event[1], # target
event[0], # step
previous_values[event],
current_values[event],
delta(previous_values[event], current_values[event]),
)
for event in has_large_diff
),
key=lambda row: row[:2], # Avoid comparison with None values in the columns
)
print("Differences for '{}':\n".format(column))
print(
tabulate(
table,
headers=("target", "step", "prev. " + column, "cur. " + column, "delta %"),
tablefmt="presto",
)
)
def main():
if len(sys.argv) >= 2 and sys.argv[1] in ("-h", "--help"):
print(__doc__)
sys.exit(0)
if len(sys.argv) != 3:
print(
"Usage: {prog} previous_events current_events".format(prog=sys.argv[0]),
file=sys.stderr,
)
sys.exit(1)
previous_events_file, current_events_file = sys.argv[1:3]
previous_elapsed, previous_rowcount = extract_values(previous_events_file)
current_elapsed, current_rowcount = extract_values(current_events_file)
print_comparison_table(previous_elapsed, current_elapsed, "elapsed")
print()
print_comparison_table(previous_rowcount, current_rowcount, "rowcount")
if __name__ == "__main__":
main()
| 33.948864 | 101 | 0.654226 |
4aacfc97b162e67687e0053e093dc275ef1915a8 | 4,163 | py | Python | harness/drifter.py | cmu-sei/augur-code | d8c1e29ce3276037b26b65ea316d251752529449 | [
"BSD-3-Clause"
] | null | null | null | harness/drifter.py | cmu-sei/augur-code | d8c1e29ce3276037b26b65ea316d251752529449 | [
"BSD-3-Clause"
] | null | null | null | harness/drifter.py | cmu-sei/augur-code | d8c1e29ce3276037b26b65ea316d251752529449 | [
"BSD-3-Clause"
] | null | null | null | # Augur: A Step Towards Realistic Drift Detection in Production MLSystems - Code
# Copyright 2022 Carnegie Mellon University.
#
# NO WARRANTY. THIS CARNEGIE MELLON UNIVERSITY AND SOFTWARE ENGINEERING INSTITUTE MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. CARNEGIE MELLON UNIVERSITY MAKES NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. CARNEGIE MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
#
# Released under a MIT (SEI)-style license, please see license.txt or contact permission@sei.cmu.edu for full terms.
#
# [DISTRIBUTION STATEMENT A] This material has been approved for public release and unlimited distribution. Please see Copyright notice for non-US Government use and distribution.
#
# Carnegie Mellon is registered in the U.S. Patent and Trademark Office by Carnegie Mellon University.
#
# This Software includes and/or makes use of the following Third-Party Software subject to its own license:
# 1. Tensorflow (https://github.com/tensorflow/tensorflow/blob/master/LICENSE) Copyright 2014 The Regents of the University of California.
# 2. Pandas (https://github.com/pandas-dev/pandas/blob/main/LICENSE) Copyright 2021 AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team, and open source contributors.
# 3. scikit-learn (https://github.com/scikit-learn/scikit-learn/blob/main/COPYING) Copyright 2021 The scikit-learn developers.
# 4. numpy (https://github.com/numpy/numpy/blob/main/LICENSE.txt) Copyright 2021 NumPy Developers.
# 5. scipy (https://github.com/scipy/scipy/blob/main/LICENSE.txt) Copyright 2021 SciPy Developers.
# 6. statsmodels (https://github.com/statsmodels/statsmodels/blob/main/LICENSE.txt) Copyright 2018 Jonathan E. Taylor, Scipy developers, statsmodels Developers.
# 7. matplotlib (https://github.com/matplotlib/matplotlib/blob/main/LICENSE/LICENSE) Copyright 2016 Matplotlib development team.
#
# DM22-0044
import shutil
from drift import drift_generator
from utils import arguments
from utils.config import Config
from utils import logging
from datasets import dataset
LOG_FILE_NAME = "drifter.log"
DEFAULT_CONFIG_FILENAME = "./drifter_config.json"
DRIFT_EXP_CONFIG_FOLDER = "../experiments/drifter"
def load_dataset(dataset_filename, dataset_class_name):
"""Load dataset to drift."""
dataset_class = dataset.load_dataset_class(dataset_class_name)
base_dataset = dataset_class()
base_dataset.load_from_file(dataset_filename)
return base_dataset
if __name__ == '__main__':
main()
| 54.064935 | 513 | 0.764833 |
4aadfd2d97ab81dd6472cc9d6d7741a3c62a553c | 2,316 | py | Python | server/server-flask/app/docs/admin/survey/survey.py | DSM-DMS/Project-DMS-Web | 73a5d8fc2310bca90169414abf50f541ca0724c7 | [
"MIT"
] | 11 | 2017-07-04T07:44:07.000Z | 2017-09-19T12:56:55.000Z | server/server-flask/app/docs/admin/survey/survey.py | DSM-DMS/DMS | 73a5d8fc2310bca90169414abf50f541ca0724c7 | [
"MIT"
] | null | null | null | server/server-flask/app/docs/admin/survey/survey.py | DSM-DMS/DMS | 73a5d8fc2310bca90169414abf50f541ca0724c7 | [
"MIT"
] | 2 | 2017-10-23T06:11:16.000Z | 2017-10-26T03:27:57.000Z | SURVEY_POST = {
'tags': [' '],
'description': ' ',
'parameters': [
{
'name': 'Authorization',
'description': 'JWT Token',
'in': 'header',
'type': 'str',
'required': True
},
{
'name': 'title',
'description': ' ',
'in': 'formData',
'type': 'str',
'required': True
},
{
'name': 'start_date',
'description': ' (YYYY-MM-DD)',
'in': 'formData',
'type': 'str',
'required': True
},
{
'name': 'end_date',
'description': ' (YYYY-MM-DD)',
'in': 'formData',
'type': 'str',
'required': True
},
{
'name': 'target',
'description': ' ',
'in': 'formData',
'type': 'list',
'required': True
}
],
'responses': {
'201': {
'description': ' '
},
'403': {
'description': ' '
}
}
}
QUESTION_POST = {
'tags': [' '],
'description': ' ',
'parameters': [
{
'name': 'Authorization',
'description': 'JWT Token',
'in': 'header',
'type': 'str',
'required': True
},
{
'name': 'id',
'description': ' ID',
'in': 'formData',
'type': 'str',
'required': True
},
{
'name': 'title',
'description': ' ',
'in': 'formData',
'type': 'str',
'required': True
},
{
'name': 'is_objective',
'description': ' ',
'in': 'formData',
'type': 'bool',
'required': True
},
{
'name': 'choice_paper',
'description': ' ',
'in': 'formData',
'type': 'list',
'required': False
}
],
'responses': {
'201': {
'description': ' '
},
'403': {
'description': ' '
}
}
}
| 23.16 | 47 | 0.345423 |
4aae62b164701dc61724cb01ba008cf15083826f | 8,528 | py | Python | network/baselines_archive/resnet_3d101.py | xuyu0010/ARID_v1 | b03d0975f41547e8aa78929b8e26a62248f8e18f | [
"CC-BY-4.0"
] | 5 | 2020-06-24T07:33:36.000Z | 2021-11-30T17:52:08.000Z | network/baselines_archive/resnet_3d101.py | xuyu0010/ARID_v1 | b03d0975f41547e8aa78929b8e26a62248f8e18f | [
"CC-BY-4.0"
] | 1 | 2022-03-29T05:23:24.000Z | 2022-03-29T06:19:57.000Z | network/baselines_archive/resnet_3d101.py | xuyu0010/ARID_v1 | b03d0975f41547e8aa78929b8e26a62248f8e18f | [
"CC-BY-4.0"
] | 3 | 2021-02-06T10:56:30.000Z | 2022-01-18T18:50:12.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import math
from functools import partial
import logging
import os
try:
from . import initializer
from .utils import load_state
except:
import initializer
from utils import load_state
__all__ = ['ResNeXt', 'resnet50', 'resnet101']
def get_fine_tuning_parameters(model, ft_begin_index):
if ft_begin_index == 0:
return model.parameters()
ft_module_names = []
for i in range(ft_begin_index, 5):
ft_module_names.append('layer{}'.format(i))
ft_module_names.append('fc')
parameters = []
for k, v in model.named_parameters():
for ft_module in ft_module_names:
if ft_module in k:
parameters.append({'params': v})
break
else:
parameters.append({'params': v, 'lr': 0.0})
return parameters
def RESNET101(**kwargs):
"""Constructs a ResNet-50 model.
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
return model
if __name__ == "__main__":
import torch
logging.getLogger().setLevel(logging.DEBUG)
# ---------
net1 = RESNET101(num_classes=11, pretrained=True)
data = torch.randn(1,3,16,224,224)
output1 = net1(data)
print (output1.shape)
| 31.238095 | 124 | 0.533654 |
4ab01eaed0874fd5f366410cee4ae62597dd8de5 | 4,167 | py | Python | tests/ninety_nine_problems/test_miscellaneous_problems.py | gecBurton/inference_logic | 2531d8f8fb0154b3bd42ac86eccc44d7038f6ef6 | [
"MIT"
] | 3 | 2020-10-19T20:35:24.000Z | 2020-10-21T07:13:02.000Z | tests/ninety_nine_problems/test_miscellaneous_problems.py | gecBurton/inference_logic | 2531d8f8fb0154b3bd42ac86eccc44d7038f6ef6 | [
"MIT"
] | 2 | 2020-11-10T16:54:13.000Z | 2020-11-10T18:51:31.000Z | tests/ninety_nine_problems/test_miscellaneous_problems.py | gecBurton/inference_logic | 2531d8f8fb0154b3bd42ac86eccc44d7038f6ef6 | [
"MIT"
] | 1 | 2020-10-21T07:13:14.000Z | 2020-10-21T07:13:14.000Z | import pytest
from inference_logic import Rule, Variable, search
from inference_logic.data_structures import Assert, Assign
| 35.313559 | 88 | 0.538037 |
4ab03ce1ed84ecb90d03ef035bc80050cf57b143 | 4,856 | py | Python | airbus_cobot_gui/src/airbus_cobot_gui/diagnostics/diagnostics.py | ipa320/airbus_coop | 974564807ba5d24096e237a9991311608a390da1 | [
"Apache-2.0"
] | 4 | 2017-10-15T23:32:24.000Z | 2019-12-26T12:31:53.000Z | airbus_cobot_gui/src/airbus_cobot_gui/diagnostics/diagnostics.py | ipa320/airbus_coop | 974564807ba5d24096e237a9991311608a390da1 | [
"Apache-2.0"
] | 6 | 2017-09-05T13:52:00.000Z | 2017-12-01T14:18:27.000Z | airbus_cobot_gui/src/airbus_cobot_gui/diagnostics/diagnostics.py | ipa320/airbus_coop | 974564807ba5d24096e237a9991311608a390da1 | [
"Apache-2.0"
] | 4 | 2017-09-04T08:14:36.000Z | 2017-09-18T07:22:21.000Z | #!/usr/bin/env python
#
# Copyright 2015 Airbus
# Copyright 2017 Fraunhofer Institute for Manufacturing Engineering and Automation (IPA)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rospy
import os
import sys
import threading
from roslib.packages import get_pkg_dir
from python_qt_binding.QtGui import *
from python_qt_binding.QtCore import *
from python_qt_binding import loadUi
from airbus_cobot_gui.res import R
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus
from airbus_pyqt_extend.QtAgiGui import QAgiPopup
from rqt_robot_monitor.status_item import StatusItem
import rqt_robot_monitor.util_robot_monitor as util
## @class DiagnosticsStatus
## @brief Class for difine different control status.
#OK = 0
#WARN = 1
#ERROR = 2
#STALE = 3
if __name__ == "__main__":
from airbus_cobot_gui.context import Context
app = QApplication(sys.argv)
main = QMainWindow()
main.setCentralWidget(TranslatorUi(Context(main)))
main.show()
app.exec_()
#End of file
| 35.97037 | 150 | 0.691928 |
4ab09ca1eb1f8969f1a877484a232ef5df5d51ce | 253 | py | Python | sanansaattaja/website/forms/comment_form.py | KEZKA/YL-WEB-PROJECT | dcefb490bdd6a1ae8449b3cbd5d6b36219506e8f | [
"MIT"
] | 3 | 2020-04-04T12:48:11.000Z | 2020-05-03T04:12:34.000Z | sanansaattaja/website/forms/comment_form.py | KEZKA/YL-WEB-PROJECT | dcefb490bdd6a1ae8449b3cbd5d6b36219506e8f | [
"MIT"
] | null | null | null | sanansaattaja/website/forms/comment_form.py | KEZKA/YL-WEB-PROJECT | dcefb490bdd6a1ae8449b3cbd5d6b36219506e8f | [
"MIT"
] | 3 | 2020-03-24T15:49:50.000Z | 2020-05-04T14:00:27.000Z | from flask_wtf import FlaskForm
from wtforms import SubmitField, TextAreaField
from wtforms.validators import DataRequired
| 28.111111 | 61 | 0.790514 |
4ab14530560ea0c6ff68422c45af6c1228280da2 | 758 | py | Python | graphgallery/functional/dense/onehot.py | dongzizhu/GraphGallery | c65eab42daeb52de5019609fe7b368e30863b4ae | [
"MIT"
] | 1 | 2020-07-29T08:00:32.000Z | 2020-07-29T08:00:32.000Z | graphgallery/functional/dense/onehot.py | dongzizhu/GraphGallery | c65eab42daeb52de5019609fe7b368e30863b4ae | [
"MIT"
] | null | null | null | graphgallery/functional/dense/onehot.py | dongzizhu/GraphGallery | c65eab42daeb52de5019609fe7b368e30863b4ae | [
"MIT"
] | null | null | null | import numpy as np
from ..transform import DenseTransform
from ..decorators import multiple
from ..transform import Transform
__all__ = ['onehot', 'Onehot']
| 25.266667 | 84 | 0.634565 |
4ab1c757764878f4d5cd5d264e6b8d82bbff63ba | 12,893 | py | Python | models.py | Bileonaire/api-ridemyway | af5a669c811356998e1935ace555ba955de1e8d0 | [
"MIT"
] | null | null | null | models.py | Bileonaire/api-ridemyway | af5a669c811356998e1935ace555ba955de1e8d0 | [
"MIT"
] | null | null | null | models.py | Bileonaire/api-ridemyway | af5a669c811356998e1935ace555ba955de1e8d0 | [
"MIT"
] | 1 | 2018-10-20T12:00:53.000Z | 2018-10-20T12:00:53.000Z | """Handles data storage for Users, rides and requests
"""
# pylint: disable=E1101
import datetime
from flask import make_response, jsonify, current_app
from werkzeug.security import generate_password_hash
import psycopg2
import config
from databasesetup import db
| 39.307927 | 173 | 0.556891 |
4ab3a002c74475748d23b9510c6318a19949f281 | 752 | py | Python | lesson06/liqi/test.py | herrywen-nanj/51reboot | 1130c79a360e1b548a6eaad176eb60f8bed22f40 | [
"Apache-2.0"
] | null | null | null | lesson06/liqi/test.py | herrywen-nanj/51reboot | 1130c79a360e1b548a6eaad176eb60f8bed22f40 | [
"Apache-2.0"
] | null | null | null | lesson06/liqi/test.py | herrywen-nanj/51reboot | 1130c79a360e1b548a6eaad176eb60f8bed22f40 | [
"Apache-2.0"
] | null | null | null | import configparser
'''
config = configparser.ConfigParser()
config.read('db.ini')
print(config.sections())
print(dict(config['mysqld'])['symbolic-links'])
'''
result, ok = ReadConfig('db.ini', 'mysqld', 'socket')
print(ok)
print(result)
if __name__ == '__main__':
ReadConfig('db.ini','mysqld','socket') | 22.117647 | 53 | 0.640957 |
4ab3ac9ae685aecfb387f1a734cc96132d725108 | 1,947 | py | Python | core/forms.py | xUndero/noc | 9fb34627721149fcf7064860bd63887e38849131 | [
"BSD-3-Clause"
] | 1 | 2019-09-20T09:36:48.000Z | 2019-09-20T09:36:48.000Z | core/forms.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | core/forms.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Forms wrapper
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Third-party modules
import six
from django import forms
from django.utils.encoding import force_unicode
from django.utils.html import escape
| 32.45 | 83 | 0.558295 |
4ab43c897df779b46c4155028b30eff4d2ad17d1 | 1,990 | py | Python | ersteops/unit/views.py | Prescrypto/ErsteOps | 0b744173fb4f500003c96c4dcb26fb67d6eaa5ec | [
"MIT"
] | null | null | null | ersteops/unit/views.py | Prescrypto/ErsteOps | 0b744173fb4f500003c96c4dcb26fb67d6eaa5ec | [
"MIT"
] | 33 | 2017-11-24T19:44:57.000Z | 2022-02-12T07:02:53.000Z | ersteops/unit/views.py | Prescrypto/ErsteOps | 0b744173fb4f500003c96c4dcb26fb67d6eaa5ec | [
"MIT"
] | 1 | 2017-12-11T09:15:04.000Z | 2017-12-11T09:15:04.000Z | import json
from django.shortcuts import get_object_or_404
from django.core import serializers
from django.http import HttpResponse
from .models import Unit
from .utils import UNIT_LIST_FIELD
BAD_REQUEST = HttpResponse(json.dumps({'error': 'Bad Request'}), status=400, content_type='application/json')
def unit_json_list(request):
''' List Json View for local available units '''
if request.is_ajax():
units = Unit.objects.available_units()
data = serializers.serialize('json', list(units), fields=UNIT_LIST_FIELD)
_raw_data = json.loads(data)
for unit in _raw_data:
if unit['fields']['is_alliance']:
unit['fields'].update({'identifier': '{}{}'.format(unit['fields']['identifier'],' (Alianza)')})
else:
continue
return HttpResponse(json.dumps(_raw_data), content_type='application/json', status=200)
else:
return BAD_REQUEST
def detail_unit_json(request, id_unit):
''' Detail view of unit '''
if request.is_ajax():
unit = Unit.objects.filter(pk=id_unit)
if len(unit) == 0:
return HttpResponse(json.dumps({'error': 'Unidad no encontrada'}), status=404, content_type='application/json')
data = serializers.serialize('json', unit, fields=UNIT_LIST_FIELD)
# Add crew list
_raw_data = json.loads(data)
_raw_data[0]['fields'].update({
'crew_list' : unit.first().get_crew_list
})
return HttpResponse(json.dumps(_raw_data), content_type='application/json', status=200)
else:
return BAD_REQUEST
def alliance_unit_json_list(request):
''' List Json View for alliance available units '''
if request.is_ajax():
units = Unit.objects.available_alliance_units()
data = serializers.serialize('json', list(units), fields=UNIT_LIST_FIELD)
return HttpResponse(data, content_type='application/json', status=200)
else:
return BAD_REQUEST
| 39.019608 | 123 | 0.665327 |
4ab456260f6c742ad312aaa99e3e2590ddc0675c | 731 | py | Python | olamundo.py/exercicios_refeitos/ex029.py | gabrielviticov/exercicios-python | 4068cb0029513f8ab8bd12fa3a9055f37b4040d4 | [
"MIT"
] | null | null | null | olamundo.py/exercicios_refeitos/ex029.py | gabrielviticov/exercicios-python | 4068cb0029513f8ab8bd12fa3a9055f37b4040d4 | [
"MIT"
] | null | null | null | olamundo.py/exercicios_refeitos/ex029.py | gabrielviticov/exercicios-python | 4068cb0029513f8ab8bd12fa3a9055f37b4040d4 | [
"MIT"
] | null | null | null | '''
ex029: Escreva um programa que leia a velocidade de uma carro. Se ele ultrapassar 80 km/h, mostre uma mensagem dizendo que ele foi multado. A multa vai custar R$ 7,00 por cada Km acima do limite.
'''
from colorise import set_color, reset_color
cor = {
'limpa':'\033[m',
'white':'\033[1;97m'
}
set_color(fg='green')
velocidade_carro = int(input('Informe a velocidade do carro KM/H: '))
if velocidade_carro > 80:
multa = (velocidade_carro - 80) * 7.00
print('\nMULTADO! VOC ULTRAPASSOU O LIMITE PERMITIDO. LOGO TER QUE PAGAR ', end='')
reset_color()
print('{}R${:.2f}{}'.format(cor['white'], multa, cor['limpa']))
else:
set_color(fg='green')
print('\nCONTINUE ASSIM. DIRIGINDO COM SEGURANA!')
| 36.55 | 195 | 0.679891 |
4ab62b5efbeb5c0a7886f27f8824551ce65f3eab | 256 | py | Python | fruit/mixin/drawable.py | felko/fruit | 4768fd333ac3b7c0bd6d339304b23e20e312d2d1 | [
"MIT"
] | 4 | 2017-06-14T14:50:05.000Z | 2019-07-29T16:51:24.000Z | fruit/mixin/drawable.py | felko/fruit | 4768fd333ac3b7c0bd6d339304b23e20e312d2d1 | [
"MIT"
] | null | null | null | fruit/mixin/drawable.py | felko/fruit | 4768fd333ac3b7c0bd6d339304b23e20e312d2d1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3.4
# coding: utf-8
| 15.058824 | 64 | 0.648438 |
4ab74d6454f4022c0cd33cf7aa9d2924c227290a | 2,394 | py | Python | src/action/tests/test_logic.py | uts-cic/ontask_b | b313e2352c77b40655f41dd5acba3a7635e6f3b3 | [
"MIT"
] | 3 | 2018-08-24T10:48:40.000Z | 2020-05-29T06:33:23.000Z | src/action/tests/test_logic.py | Lukahm/ontask | f16bdaa06ea450ee56d4581340e611b1076bed16 | [
"MIT"
] | null | null | null | src/action/tests/test_logic.py | Lukahm/ontask | f16bdaa06ea450ee56d4581340e611b1076bed16 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import os
from django.conf import settings
from django.shortcuts import reverse
from django.core.management import call_command
import test
from dataops import pandas_db
from workflow.models import Workflow
| 38.612903 | 202 | 0.704678 |
4ab90259acfbeda3412addc434ad2001de65b77a | 5,371 | py | Python | obniz/parts/Moving/StepperMotor/__init__.py | izm51/obniz-python-sdk | 40a738b5fe2c0a415cdc09f46d28c143982bfb07 | [
"MIT"
] | 11 | 2019-03-22T12:02:11.000Z | 2021-01-21T04:57:18.000Z | obniz/parts/Moving/StepperMotor/__init__.py | izm51/obniz-python-sdk | 40a738b5fe2c0a415cdc09f46d28c143982bfb07 | [
"MIT"
] | 5 | 2019-03-02T08:28:25.000Z | 2021-02-02T22:06:37.000Z | obniz/parts/Moving/StepperMotor/__init__.py | izm51/obniz-python-sdk | 40a738b5fe2c0a415cdc09f46d28c143982bfb07 | [
"MIT"
] | 3 | 2019-07-20T06:55:09.000Z | 2019-12-04T05:05:00.000Z | from attrdict import AttrDefault
import asyncio
def current_distance(self):
return self.current_step / self.milli_meter_step_count
def _get_step_instructions(self):
return self._step_instructions[self._step_type] | 37.559441 | 135 | 0.571216 |
4ab990bdf241333adbd85027e848b2433fdf4b53 | 52 | py | Python | basic_assignment/39.py | 1212091/python-learning | 30fad66460daf73fd3961cf667ee25b91dee923d | [
"MIT"
] | null | null | null | basic_assignment/39.py | 1212091/python-learning | 30fad66460daf73fd3961cf667ee25b91dee923d | [
"MIT"
] | null | null | null | basic_assignment/39.py | 1212091/python-learning | 30fad66460daf73fd3961cf667ee25b91dee923d | [
"MIT"
] | null | null | null | input_num = raw_input()
print(str(eval(input_num)))
| 17.333333 | 27 | 0.75 |
4ab9a2726c38f9bfc3c6566bc523e4832e60605f | 2,808 | py | Python | website/website/apps/entry/admin.py | SimonGreenhill/Language5 | c59f502dda7be27fc338f0338cc3b03e63bad9c8 | [
"MIT"
] | 1 | 2020-08-17T05:56:16.000Z | 2020-08-17T05:56:16.000Z | website/website/apps/entry/admin.py | SimonGreenhill/Language5 | c59f502dda7be27fc338f0338cc3b03e63bad9c8 | [
"MIT"
] | 5 | 2020-06-05T17:51:56.000Z | 2022-01-13T00:42:51.000Z | website/website/apps/entry/admin.py | SimonGreenhill/Language5 | c59f502dda7be27fc338f0338cc3b03e63bad9c8 | [
"MIT"
] | 1 | 2015-02-23T22:54:00.000Z | 2015-02-23T22:54:00.000Z | from django.contrib import admin
from django.db.models import Count
from reversion.admin import VersionAdmin
from website.apps.lexicon.models import Lexicon
from website.apps.entry.models import Task, TaskLog, Wordlist, WordlistMember
from website.apps.core.admin import TrackedModelAdmin
admin.site.register(Task, TaskAdmin)
admin.site.register(TaskLog, TaskLogAdmin)
admin.site.register(Wordlist, TaskWordlistAdmin)
| 33.428571 | 103 | 0.666667 |
4abb4683ee2e4ff41f7985424a94c70975cdae94 | 356 | py | Python | src/modules/python.py | fest2bash/fest2bash | 008282f67d4d4415c27b3b9b6162daf54f8d6028 | [
"MIT"
] | null | null | null | src/modules/python.py | fest2bash/fest2bash | 008282f67d4d4415c27b3b9b6162daf54f8d6028 | [
"MIT"
] | null | null | null | src/modules/python.py | fest2bash/fest2bash | 008282f67d4d4415c27b3b9b6162daf54f8d6028 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import re
import sys
sys.dont_write_bytecode = True
from pprint import pprint
from base import BaseFest2Bash
| 19.777778 | 49 | 0.710674 |
4abb8389f46537b21c77c0aa5024c68649d338e4 | 2,241 | py | Python | opennem/utils/scrapyd.py | paulculmsee/opennem | 9ebe4ab6d3b97bdeebc352e075bbd5c22a8ddea1 | [
"MIT"
] | 22 | 2020-06-30T05:27:21.000Z | 2022-02-21T12:13:51.000Z | opennem/utils/scrapyd.py | paulculmsee/opennem | 9ebe4ab6d3b97bdeebc352e075bbd5c22a8ddea1 | [
"MIT"
] | 71 | 2020-08-07T13:06:30.000Z | 2022-03-15T06:44:49.000Z | opennem/utils/scrapyd.py | paulculmsee/opennem | 9ebe4ab6d3b97bdeebc352e075bbd5c22a8ddea1 | [
"MIT"
] | 13 | 2020-06-30T03:28:32.000Z | 2021-12-30T08:17:16.000Z | #!/usr/bin/env python
"""
Srapyd control methods
"""
import logging
from typing import Any, Dict, List
from urllib.parse import urljoin
from opennem.settings import settings
from opennem.utils.http import http
from opennem.utils.scrapy import get_spiders
logger = logging.getLogger("scrapyd.client")
| 23.103093 | 87 | 0.635431 |
4abeb59415a08109665cd4a0b2b19c7296f2ab4d | 6,316 | py | Python | src/abaqus/Material/Elastic/Linear/Elastic.py | Haiiliin/PyAbaqus | f20db6ebea19b73059fe875a53be370253381078 | [
"MIT"
] | 7 | 2022-01-21T09:15:45.000Z | 2022-02-15T09:31:58.000Z | src/abaqus/Material/Elastic/Linear/Elastic.py | Haiiliin/PyAbaqus | f20db6ebea19b73059fe875a53be370253381078 | [
"MIT"
] | null | null | null | src/abaqus/Material/Elastic/Linear/Elastic.py | Haiiliin/PyAbaqus | f20db6ebea19b73059fe875a53be370253381078 | [
"MIT"
] | null | null | null | from abaqusConstants import *
from .FailStrain import FailStrain
from .FailStress import FailStress
| 32.22449 | 103 | 0.572198 |
4abedd6e3a784e93e18495ecbb7c4ea6ddcaa98a | 3,255 | py | Python | apps/pyscrabble/pyscrabble-hatchet/setup.py | UWSysLab/diamond | 1beec323c084d9d477c770ca6b9625c8f5682a39 | [
"MIT"
] | 19 | 2016-08-22T23:54:24.000Z | 2021-03-19T08:08:35.000Z | apps/pyscrabble/pyscrabble-hatchet/setup.py | UWSysLab/diamond | 1beec323c084d9d477c770ca6b9625c8f5682a39 | [
"MIT"
] | 3 | 2020-12-02T18:29:32.000Z | 2021-06-23T20:26:09.000Z | apps/pyscrabble/pyscrabble-hatchet/setup.py | UWSysLab/diamond | 1beec323c084d9d477c770ca6b9625c8f5682a39 | [
"MIT"
] | 5 | 2017-01-25T19:31:49.000Z | 2018-07-25T05:08:19.000Z | # setup.py for pyscrabble
from distutils.core import setup
try:
import py2exe
HAS_PY2EXE = True
except ImportError:
HAS_PY2EXE = False
import glob
import os
import pkg_resources
import sys
from pyscrabble.constants import VERSION
from pyscrabble import util
from pyscrabble import dist
kwargs = {
'name': 'pyscrabble',
'version': VERSION,
'author': 'Kevin Conaway',
'author_email': 'kevin.a.conaway@gmail.com',
'url': 'http://pyscrabble.sourceforge.net',
'data_files': dist.getDataFiles(),
'packages': ['pyscrabble', 'pyscrabble.command', 'pyscrabble.game', 'pyscrabble.gui', 'pyscrabble.net']
}
if HAS_PY2EXE and 'py2exe' in sys.argv:
#eggpacks = pkg_resources.require("nevow")
#for egg in eggpacks:
# if os.path.isdir(egg.location):
# sys.path.insert(0, egg.location)
try:
import modulefinder
import win32com
for p in win32com.__path__[1:]:
modulefinder.AddPackagePath("win32com",p)
for extra in ["win32com.shell"]:
__import__(extra)
m = sys.modules[extra]
for p in m.__path__[1:]:
modulefinder.addPackagePath(extra, p)
except ImportError:
print 'import error'
kwargs['py_modules'] = ['pyscrabble-main', 'server_console', 'db_upgrade']
kwargs['options'] = {
"py2exe": {
"packages": "encodings, nevow",
"includes": "pango,atk,gobject,decimal,dumbdbm,dbhash,xml.sax.expatreader",
"dll_excludes": ["iconv.dll","intl.dll","libatk-1.0-0.dll",
"libgdk_pixbuf-2.0-0.dll","libgdk-win32-2.0-0.dll",
"libglib-2.0-0.dll","libgmodule-2.0-0.dll",
"libgobject-2.0-0.dll","libgthread-2.0-0.dll",
"libgtk-win32-2.0-0.dll","libpango-1.0-0.dll",
"libpangowin32-1.0-0.dll"],
}
}
kwargs['windows'] = [{
"script": "pyscrabble-main.py",
"icon_resources" : [(1, "resources/images/py.ico")]
}]
kwargs['console'] = [{
"script": "server_service.py",
"icon_resources" : [(1, "resources/images/py.ico")]
}, {
"script": "server_console.py",
"icon_resources" : [(1, "resources/images/py.ico")]
}]
kwargs['service'] = ['server_service']
kwargs['data_files'] += [('.', ['CHANGELOG.txt'])]
kwargs['data_files'] += [('.', ['LICENSE.txt'])]
#for egg in eggpacks:
# kwargs['data_files'] += dist.getResourceDirs(egg.location, ensureLower=False, basePath=None, outdir='extra')
else:
kwargs['scripts'] = ['pyscrabble-main.py', 'server_console.py', 'db_upgrade.py']
kwargs['data_files'] = [fix_path(x) for x in kwargs['data_files']]
kwargs['cmdclass'] = {'install_lib': dist.InstallLib, 'install_scripts' : dist.InstallScripts}
setup(**kwargs) | 37.848837 | 118 | 0.580031 |
4abf6af83131868287dda032df11a21439ed9d49 | 1,164 | py | Python | tutorials/W2D2_LinearSystems/solutions/W2D2_Tutorial1_Solution_437c0b24.py | liuxiaomiao123/NeuroMathAcademy | 16a7969604a300bf9fbb86f8a5b26050ebd14c65 | [
"CC-BY-4.0"
] | 2 | 2020-07-03T04:39:09.000Z | 2020-07-12T02:08:31.000Z | tutorials/W2D2_LinearSystems/solutions/W2D2_Tutorial1_Solution_437c0b24.py | NinaHKivanani/course-content | 3c91dd1a669cebce892486ba4f8086b1ef2e1e49 | [
"CC-BY-4.0"
] | 1 | 2020-06-22T22:57:03.000Z | 2020-06-22T22:57:03.000Z | tutorials/W2D2_LinearSystems/solutions/W2D2_Tutorial1_Solution_437c0b24.py | NinaHKivanani/course-content | 3c91dd1a669cebce892486ba4f8086b1ef2e1e49 | [
"CC-BY-4.0"
] | 1 | 2021-03-29T21:08:26.000Z | 2021-03-29T21:08:26.000Z | def integrate_exponential(a, x0, dt, T):
"""Compute solution of the differential equation xdot=a*x with
initial condition x0 for a duration T. Use time step dt for numerical
solution.
Args:
a (scalar): parameter of xdot (xdot=a*x)
x0 (scalar): initial condition (x at time 0)
dt (scalar): timestep of the simulation
T (scalar): total duration of the simulation
Returns:
ndarray, ndarray: `x` for all simulation steps and the time `t` at each step
"""
# Initialize variables
t = np.arange(0, T, dt)
x = np.zeros_like(t, dtype=complex)
x[0] = x0
# Step through system and integrate in time
for k in range(1, len(t)):
# for each point in time, compute xdot = a*x
xdot = (a*x[k-1])
# update x by adding xdot scaled by dt
x[k] = x[k-1] + xdot * dt
return x, t
# choose parameters
a = -0.5 # parameter in f(x)
T = 10 # total Time duration
dt = 0.001 # timestep of our simulation
x0 = 1. # initial condition of x at time 0
x, t = integrate_exponential(a, x0, dt, T)
with plt.xkcd():
fig = plt.figure(figsize=(8, 6))
plt.plot(t, x.real)
plt.xlabel('Time (s)')
plt.ylabel('x') | 27.714286 | 80 | 0.640893 |
4abf7b9f84deaebd77faef58a9ebbc8bcdd69360 | 1,199 | py | Python | PyTemp/gis/shapefile_to_geojson.py | SwaggerKhan/PatrolGis | 89b1a398ffd6171ac35ea9d023bce98a0fc7e930 | [
"MIT"
] | null | null | null | PyTemp/gis/shapefile_to_geojson.py | SwaggerKhan/PatrolGis | 89b1a398ffd6171ac35ea9d023bce98a0fc7e930 | [
"MIT"
] | null | null | null | PyTemp/gis/shapefile_to_geojson.py | SwaggerKhan/PatrolGis | 89b1a398ffd6171ac35ea9d023bce98a0fc7e930 | [
"MIT"
] | null | null | null | import json
import geojson
import geopandas as gpd
| 31.552632 | 95 | 0.605505 |
4abf8e0a8ee60fe90e1a20e373c9a2a3d84d695d | 3,504 | py | Python | ssbio/databases/pdbflex.py | JoshuaMeyers/ssbio | 624618602437e2c2e4adf90962adcef3af2d5b40 | [
"MIT"
] | 76 | 2017-03-06T02:50:38.000Z | 2022-02-08T08:08:48.000Z | ssbio/databases/pdbflex.py | JoshuaMeyers/ssbio | 624618602437e2c2e4adf90962adcef3af2d5b40 | [
"MIT"
] | 30 | 2017-03-09T14:54:05.000Z | 2021-10-06T10:57:45.000Z | ssbio/databases/pdbflex.py | JoshuaMeyers/ssbio | 624618602437e2c2e4adf90962adcef3af2d5b40 | [
"MIT"
] | 21 | 2017-09-01T23:00:31.000Z | 2022-02-23T14:04:30.000Z | import requests
import ssbio.utils
import os.path as op
# #### PDB stats
# Request flexibility data about one particular PDB.
#
# http://pdbflex.org/php/api/PDBStats.php?pdbID=1a50&chainID=A
#
# pdbID of structure you are interested in
# chainID of chain you are interested in
#
# [{"pdbID":"1a50",
# "chainID":"A",
# "parentClusterID":"4hn4A",
# "avgRMSD":"0.538",
# "maxRMSD":"2.616",
# "flexibilityLabel":"Low",
# "otherClusterMembers":["4hn4A","4hpjA","4hpxA","4kkxA",...],
# "PDBFlexLink":"http:\/\/pdbflex.org\/cluster.html#!\/4hn4A\/20987\/1a50A"}]
#
# Note: you can omit the chainID and PDBFlex will return information for all chains.
#
# #### RMSD profile
# Request RMSD array used for local flexibility plots
#
# http://pdbflex.org/php/api/rmsdProfile.php?pdbID=1a50&chainID=A
#
# pdbID PDB ID of structure you are interested in
# chainID Chain ID of chain you are interested in
#
# {"queryPDB":"1a50A",
# "clusterName":"4hn4A",
# "profile":"[0.616,0.624,0.624,0.624,0.624,0.624,0.029,0.013,0.016,0.023,0.025,0.028,0.030,0.034,0.035,0.035,0.035,0.035,0.036,0.033,0.027,0.023,0.017...]"}
#
# #### PDB representatives
# Request representatives for a PDB's own cluster. Returns a list of chains that represent the most distinct structures in the cluster.
#
# http://pdbflex.org/php/api/representatives.php?pdbID=1a50&chainID=A
#
# pdbID PDB ID of structure you are interested in
# chainID Chain ID of chain you are interested in
#
# ["2trsA","3pr2A","1kfjA"]
| 40.275862 | 161 | 0.627854 |
4abff12be5f57f68704691116cfaac62253e2192 | 1,065 | py | Python | api/insights/insights/infrastructure/mysql/read/modify_notes.py | manisharmagarg/qymatix | 0dc240970359429ae5105db79f9aebf1a99ba6fd | [
"Apache-2.0"
] | null | null | null | api/insights/insights/infrastructure/mysql/read/modify_notes.py | manisharmagarg/qymatix | 0dc240970359429ae5105db79f9aebf1a99ba6fd | [
"Apache-2.0"
] | null | null | null | api/insights/insights/infrastructure/mysql/read/modify_notes.py | manisharmagarg/qymatix | 0dc240970359429ae5105db79f9aebf1a99ba6fd | [
"Apache-2.0"
] | null | null | null | """
Modify Notes
"""
# pylint: disable=too-few-public-methods
from ...mysql.mysql_connection import MySqlConnection
from ...mysql.orm.autogen_entities import Task
| 29.583333 | 68 | 0.642254 |
4ac08cf9f315cf058d8ec6ec1e3e396023b3a1de | 1,834 | py | Python | desktop/core/ext-py/pyasn1-0.4.6/tests/type/test_namedval.py | yetsun/hue | 2e48f0cc70e233ee0e1b40733d4b2a18d8836c66 | [
"Apache-2.0"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | desktop/core/ext-py/pyasn1-0.4.6/tests/type/test_namedval.py | yetsun/hue | 2e48f0cc70e233ee0e1b40733d4b2a18d8836c66 | [
"Apache-2.0"
] | 1,623 | 2015-01-01T08:06:24.000Z | 2022-03-30T19:48:52.000Z | desktop/core/ext-py/pyasn1-0.4.6/tests/type/test_namedval.py | yetsun/hue | 2e48f0cc70e233ee0e1b40733d4b2a18d8836c66 | [
"Apache-2.0"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | #
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pyasn1/license.html
#
import sys
try:
import unittest2 as unittest
except ImportError:
import unittest
from tests.base import BaseTestCase
from pyasn1.type import namedval
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite)
| 31.084746 | 96 | 0.598691 |
4ac2c45edfed557313913a01b6d6e982c2b62143 | 858 | py | Python | setup.py | methane/pymemcache | 0ff5430cdcef7ed52fb3edc2a90c1c7d208ad77f | [
"Apache-2.0"
] | null | null | null | setup.py | methane/pymemcache | 0ff5430cdcef7ed52fb3edc2a90c1c7d208ad77f | [
"Apache-2.0"
] | null | null | null | setup.py | methane/pymemcache | 0ff5430cdcef7ed52fb3edc2a90c1c7d208ad77f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from setuptools import setup, find_packages
from pymemcache import __version__
setup(
name = 'pymemcache',
version = __version__,
author = 'Charles Gordon',
author_email = 'charles@pinterest.com',
packages = find_packages(),
tests_require = ['nose>=1.0'],
install_requires = ['six'],
description = 'A comprehensive, fast, pure Python memcached client',
long_description = open('README.md').read(),
license = 'Apache License 2.0',
url = 'https://github.com/Pinterest/pymemcache',
classifiers = [
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'License :: OSI Approved :: Apache Software License',
'Topic :: Database',
],
)
| 29.586207 | 72 | 0.632867 |
4ac2c549f6e7bc96012e6af6cdb10885c9451aa4 | 543 | py | Python | torch_geometric/read/ply.py | DL-85/pytorch_geometric | eb12a94a667e881c4a6bff26b0453428bcb72393 | [
"MIT"
] | 2 | 2019-10-10T07:01:07.000Z | 2020-11-04T06:26:42.000Z | torch_geometric/read/ply.py | cloudyyyyy/pytorch_geometric | 61d389b5f8ee700dda4d18cadca72f24c978fce1 | [
"MIT"
] | null | null | null | torch_geometric/read/ply.py | cloudyyyyy/pytorch_geometric | 61d389b5f8ee700dda4d18cadca72f24c978fce1 | [
"MIT"
] | 1 | 2019-10-31T01:15:03.000Z | 2019-10-31T01:15:03.000Z | import torch
from plyfile import PlyData
from torch_geometric.data import Data
| 23.608696 | 76 | 0.607735 |
4ac4732c076aba6b6bc386af069168643221a2c1 | 2,679 | py | Python | ml-agents/mlagents/trainers/brain_conversion_utils.py | ranguera/ml-agents | 68779b407b32fce2ea14b16ef1bc26dea7d5e5a8 | [
"Apache-2.0"
] | 2 | 2019-12-13T22:00:11.000Z | 2019-12-14T00:47:32.000Z | ml-agents/mlagents/trainers/brain_conversion_utils.py | almartson/ml-agents | ee748705b777ddd365c55065366e83596c615811 | [
"Apache-2.0"
] | null | null | null | ml-agents/mlagents/trainers/brain_conversion_utils.py | almartson/ml-agents | ee748705b777ddd365c55065366e83596c615811 | [
"Apache-2.0"
] | null | null | null | from mlagents.trainers.brain import BrainInfo, BrainParameters, CameraResolution
from mlagents.envs.base_env import BatchedStepResult, AgentGroupSpec
from mlagents.envs.exception import UnityEnvironmentException
import numpy as np
from typing import List
| 37.732394 | 87 | 0.670773 |
4ac65a293f32905c196e86dcfb72e76e3b1b85d2 | 853 | py | Python | mrdc_ws/src/mrdc_serial/setup.py | SoonerRobotics/MRDC22 | 00c1360138e468bf313eefc93fbde11f289ece82 | [
"MIT"
] | null | null | null | mrdc_ws/src/mrdc_serial/setup.py | SoonerRobotics/MRDC22 | 00c1360138e468bf313eefc93fbde11f289ece82 | [
"MIT"
] | 1 | 2021-12-01T01:21:22.000Z | 2021-12-01T01:21:22.000Z | mrdc_ws/src/mrdc_serial/setup.py | SoonerRobotics/MRDC22 | 00c1360138e468bf313eefc93fbde11f289ece82 | [
"MIT"
] | 1 | 2021-09-28T23:43:07.000Z | 2021-09-28T23:43:07.000Z | from setuptools import find_packages, setup
from glob import glob
import os
package_name = 'mrdc_serial'
setup(
name=package_name,
version='1.0.0',
packages=find_packages(),
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
(os.path.join('share', package_name, 'launch'),
glob(os.path.join('launch', '*.xml')))
],
install_requires=['setuptools'],
maintainer='Dylan Zemlin',
maintainer_email='dylan.zemlin@gmail.com',
description='The MRDC Serial package that controls communication to the arduino',
license='MIT License',
entry_points={
'console_scripts': [
'remote = mrdc_serial.remote:main',
'serial = mrdc_serial.serial:main'
],
},
)
| 28.433333 | 85 | 0.630715 |
4ac74e03723bd148ef8b0804cbefc4d25af183f4 | 2,577 | py | Python | orders/views.py | DobromirZlatkov/anteya | 9c66c64643350ad1710bcf60e2e38169e389a66b | [
"MIT"
] | null | null | null | orders/views.py | DobromirZlatkov/anteya | 9c66c64643350ad1710bcf60e2e38169e389a66b | [
"MIT"
] | null | null | null | orders/views.py | DobromirZlatkov/anteya | 9c66c64643350ad1710bcf60e2e38169e389a66b | [
"MIT"
] | null | null | null | from django.core.urlresolvers import reverse_lazy
from django.views import generic
from django.shortcuts import redirect, render
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from . import forms
from . import models
from custommixins import mixins
| 37.897059 | 102 | 0.67404 |
43493e4caf41318515d94514d68ea22bde6fccc6 | 5,219 | py | Python | PytorchRouting/Examples/run_experiments.py | oleksost/RoutingNetworks | 7e3e9219b7389d5af2a832a4882bc9fda0e7fd21 | [
"Apache-2.0"
] | 63 | 2018-07-19T20:12:55.000Z | 2022-03-31T14:59:37.000Z | PytorchRouting/Examples/run_experiments.py | oleksost/RoutingNetworks | 7e3e9219b7389d5af2a832a4882bc9fda0e7fd21 | [
"Apache-2.0"
] | 2 | 2019-08-08T18:28:13.000Z | 2019-09-24T16:46:22.000Z | PytorchRouting/Examples/run_experiments.py | oleksost/RoutingNetworks | 7e3e9219b7389d5af2a832a4882bc9fda0e7fd21 | [
"Apache-2.0"
] | 16 | 2018-07-25T05:56:51.000Z | 2021-01-09T02:47:05.000Z | """
This file defines some simple experiments to illustrate how Pytorch-Routing functions.
"""
import numpy as np
import tqdm
import torch
from PytorchRouting.DecisionLayers import REINFORCE, QLearning, SARSA, ActorCritic, GumbelSoftmax, PerTaskAssignment, \
WPL, AAC, AdvantageLearning, RELAX, EGreedyREINFORCE, EGreedyAAC
from PytorchRouting.Examples.Models import PerTask_all_fc, RoutedAllFC, PerTask_1_fc, PerDecisionSingleAgent, \
Dispatched
from PytorchRouting.Examples.Datasets import CIFAR100MTL
if __name__ == '__main__':
# MNIST
# dataset = MNIST_MTL(64, data_files=['./Datasets/mnist.pkl.gz'])
# model = PerTask_all_fc(1, 288, 2, dataset.num_tasks, dataset.num_tasks)
# model = WPL_routed_all_fc(1, 288, 2, dataset.num_tasks, dataset.num_tasks)
cuda = False
# cuda = True
# CIFAR
dataset = CIFAR100MTL(10, data_files=['./Datasets/cifar-100-py/train', './Datasets/cifar-100-py/test'], cuda=cuda)
model = RoutedAllFC(WPL, 3, 128, 5, dataset.num_tasks, dataset.num_tasks)
# model = RoutedAllFC(RELAX, 3, 128, 5, dataset.num_tasks, dataset.num_tasks)
# model = RoutedAllFC(EGreedyREINFORCE, 3, 128, 5, dataset.num_tasks, dataset.num_tasks)
# model = RoutedAllFC(AdvantageLearning, 3, 128, 5, dataset.num_tasks, dataset.num_tasks)
# model = PerDecisionSingleAgent(AdvantageLearning, 3, 128, 5, dataset.num_tasks, dataset.num_tasks)
# model = Dispatched(AdvantageLearning, 3, 128, 5, dataset.num_tasks, dataset.num_tasks)
learning_rates = {0: 3e-3, 5: 1e-3, 10: 3e-4}
routing_module_learning_rate_ratio = 0.3
if cuda:
model.cuda()
run_experiment(model, dataset, learning_rates, routing_module_learning_rate_ratio)
'''
WPL_routed_all_fc(3, 512, 5, dataset.num_tasks, dataset.num_tasks)
Training averages: Model loss: 0.427, Routing loss: 8.864, Accuracy: 0.711
Testing averages: Model loss: 0.459, Routing loss: 9.446, Accuracy: 0.674
'''
| 46.185841 | 119 | 0.650699 |
434a5580172de0ca0736b7166ab6de48eed316fe | 121 | py | Python | output/models/ms_data/regex/re_g22_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/ms_data/regex/re_g22_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/ms_data/regex/re_g22_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from output.models.ms_data.regex.re_g22_xsd.re_g22 import (
Regex,
Doc,
)
__all__ = [
"Regex",
"Doc",
]
| 12.1 | 59 | 0.603306 |
434b79d3907786ef5e26df8cc123133e8b35acdc | 6,670 | py | Python | code/image-manipulation.py | rgeirhos/object-recognition | 4679f7c60665bd9fb274c6c4372fc0fa34b51485 | [
"CC-BY-4.0"
] | 33 | 2017-06-22T21:51:25.000Z | 2021-09-03T01:59:58.000Z | code/image-manipulation.py | rgeirhos/object-recognition | 4679f7c60665bd9fb274c6c4372fc0fa34b51485 | [
"CC-BY-4.0"
] | null | null | null | code/image-manipulation.py | rgeirhos/object-recognition | 4679f7c60665bd9fb274c6c4372fc0fa34b51485 | [
"CC-BY-4.0"
] | 20 | 2017-06-24T01:48:19.000Z | 2021-05-12T08:41:23.000Z | #!/usr/bin/env python
from skimage.color import rgb2gray
from skimage.io import imread, imsave
from scipy.misc import toimage
import numpy as np
import wrapper as wr
###########################################################
# IMAGE IO
###########################################################
def imload_rgb(path):
"""Load and return an RGB image in the range [0, 1]."""
return imread(path) / 255.0
def save_img(image, imgname, use_JPEG=False):
"""Save image as either .jpeg or .png"""
if use_JPEG:
imsave(imgname+".JPEG", image)
else:
toimage(image,
cmin=0.0, cmax=1.0).save(imgname+".png")
###########################################################
# IMAGE MANIPULATION
###########################################################
def adjust_contrast(image, contrast_level):
"""Return the image scaled to a certain contrast level in [0, 1].
parameters:
- image: a numpy.ndarray
- contrast_level: a scalar in [0, 1]; with 1 -> full contrast
"""
assert(contrast_level >= 0.0), "contrast_level too low."
assert(contrast_level <= 1.0), "contrast_level too high."
return (1-contrast_level)/2.0 + image.dot(contrast_level)
def grayscale_contrast(image, contrast_level):
"""Convert to grayscale. Adjust contrast.
parameters:
- image: a numpy.ndarray
- contrast_level: a scalar in [0, 1]; with 1 -> full contrast
"""
return adjust_contrast(rgb2gray(image), contrast_level)
def uniform_noise(image, width, contrast_level, rng):
"""Convert to grayscale. Adjust contrast. Apply uniform noise.
parameters:
- image: a numpy.ndarray
- width: a scalar indicating width of additive uniform noise
-> then noise will be in range [-width, width]
- contrast_level: a scalar in [0, 1]; with 1 -> full contrast
- rng: a np.random.RandomState(seed=XYZ) to make it reproducible
"""
image = grayscale_contrast(image, contrast_level)
return apply_uniform_noise(image, -width, width, rng)
###########################################################
# HELPER FUNCTIONS
###########################################################
def apply_uniform_noise(image, low, high, rng=None):
"""Apply uniform noise to an image, clip outside values to 0 and 1.
parameters:
- image: a numpy.ndarray
- low: lower bound of noise within [low, high)
- high: upper bound of noise within [low, high)
- rng: a np.random.RandomState(seed=XYZ) to make it reproducible
"""
nrow = image.shape[0]
ncol = image.shape[1]
image = image + get_uniform_noise(low, high, nrow, ncol, rng)
#clip values
image = np.where(image < 0, 0, image)
image = np.where(image > 1, 1, image)
assert is_in_bounds(image, 0, 1), "values <0 or >1 occurred"
return image
def get_uniform_noise(low, high, nrow, ncol, rng=None):
"""Return uniform noise within [low, high) of size (nrow, ncol).
parameters:
- low: lower bound of noise within [low, high)
- high: upper bound of noise within [low, high)
- nrow: number of rows of desired noise
- ncol: number of columns of desired noise
- rng: a np.random.RandomState(seed=XYZ) to make it reproducible
"""
if rng is None:
return np.random.uniform(low=low, high=high,
size=(nrow, ncol))
else:
return rng.uniform(low=low, high=high,
size=(nrow, ncol))
def is_in_bounds(mat, low, high):
"""Return wether all values in 'mat' fall between low and high.
parameters:
- mat: a numpy.ndarray
- low: lower bound (inclusive)
- high: upper bound (inclusive)
"""
return np.all(np.logical_and(mat >= 0, mat <= 1))
def eidolon_partially_coherent_disarray(image, reach, coherence, grain):
"""Return parametrically distorted images (produced by Eidolon factory.
For more information on the effect of different distortions, please
have a look at the paper: Koenderink et al., JoV 2017,
Eidolons: Novel stimuli for vision research).
- image: a numpy.ndarray
- reach: float, controlling the strength of the manipulation
- coherence: a float within [0, 1] with 1 = full coherence
- grain: float, controlling how fine-grained the distortion is
"""
return wr.partially_coherent_disarray(wr.data_to_pic(image),
reach, coherence, grain)
###########################################################
# MAIN METHOD FOR TESTING & DEMONSTRATION PURPOSES
###########################################################
if __name__ == "__main__":
print("""This main method should generate manipulated
images in the directory where it was executed.""")
use_JPEG = False # either JPEG or PNG
img = imload_rgb("test_image.JPEG")
###################################################
# A) Example for color-experiment:
# - convert to grayscale
###################################################
img_grayscale = rgb2gray(img)
save_img(img_grayscale, "test_image_grayscale", use_JPEG)
###################################################
# B) Example for contrast-experiment:
# - convert to grayscale and
# - reduce contrast to nominal contrast of 10%
###################################################
contrast_level_1 = 0.1
img_low_contrast = grayscale_contrast(image=img,
contrast_level=contrast_level_1)
save_img(img_low_contrast, "test_image_low_contrast", use_JPEG)
###################################################
# C) Example for noise-experiment:
# - convert to graycale and
# - reduce contrast to 30% and
# - apply uniform noise with width 0.1
###################################################
noise_width = 0.1
contrast_level_2 = 0.3
rng = np.random.RandomState(seed=42)
img_noisy = uniform_noise(image=img, width=noise_width,
contrast_level=contrast_level_2,
rng=rng)
save_img(img_noisy, "test_image_noisy", use_JPEG)
###################################################
# D) Example for eidolon-experiment:
# - use partially_coherent_disarray
###################################################
grain = 10.0
coherence = 1.0
reach = 8.0
img_eidolon = eidolon_partially_coherent_disarray(img, reach,
coherence, grain)
save_img(img_eidolon, "test_image_eidolon", use_JPEG)
| 31.913876 | 75 | 0.556822 |
434c335f1ca44ae4f15f8789642b629548cea61b | 659 | py | Python | students/K33402/Akhmetzhanov Alisher/lr2/main/forms.py | AlishKZ/ITMO_ICT_WebDevelopment_2020-2021 | b3ce82e17392d26d815e64343f5103f1bd46cd81 | [
"MIT"
] | null | null | null | students/K33402/Akhmetzhanov Alisher/lr2/main/forms.py | AlishKZ/ITMO_ICT_WebDevelopment_2020-2021 | b3ce82e17392d26d815e64343f5103f1bd46cd81 | [
"MIT"
] | null | null | null | students/K33402/Akhmetzhanov Alisher/lr2/main/forms.py | AlishKZ/ITMO_ICT_WebDevelopment_2020-2021 | b3ce82e17392d26d815e64343f5103f1bd46cd81 | [
"MIT"
] | null | null | null | from django.db.models import fields
from main.models import RoomReservation, UserRoom
from django import forms
from django.core.exceptions import ValidationError
from django.contrib.auth import authenticate, login
from django.contrib.auth import get_user_model
| 31.380952 | 75 | 0.775417 |
434cb653784b20b7295c5b100050122451d7d139 | 4,855 | py | Python | emmet-core/emmet/core/vasp/calc_types.py | espottesmith/emmet | bd28b91d240da9f0c996a2b2efb7e67da9176a09 | [
"BSD-3-Clause-LBNL"
] | null | null | null | emmet-core/emmet/core/vasp/calc_types.py | espottesmith/emmet | bd28b91d240da9f0c996a2b2efb7e67da9176a09 | [
"BSD-3-Clause-LBNL"
] | 78 | 2020-11-16T06:46:43.000Z | 2022-03-28T03:02:51.000Z | emmet-core/emmet/core/vasp/calc_types.py | utf/emmet | 27a51a7ad4c300e280de5ba9b59a311dd77cffdd | [
"BSD-3-Clause-LBNL"
] | null | null | null | """ Module to define various calculation types as Enums for VASP """
import datetime
from itertools import groupby, product
from pathlib import Path
from typing import Dict, Iterator, List
import bson
import numpy as np
from monty.json import MSONable
from monty.serialization import loadfn
from pydantic import BaseModel
from pymatgen.analysis.structure_matcher import ElementComparator, StructureMatcher
from pymatgen.core.structure import Structure
from typing_extensions import Literal
from emmet.core import SETTINGS
from emmet.core.utils import ValueEnum
_RUN_TYPE_DATA = loadfn(str(Path(__file__).parent.joinpath("run_types.yaml").resolve()))
_TASK_TYPES = [
"NSCF Line",
"NSCF Uniform",
"Dielectric",
"DFPT",
"DFPT Dielectric",
"NMR Nuclear Shielding",
"NMR Electric Field Gradient",
"Static",
"Structure Optimization",
"Deformation",
]
_RUN_TYPES = (
[
rt
for functional_class in _RUN_TYPE_DATA
for rt in _RUN_TYPE_DATA[functional_class]
]
+ [
f"{rt}+U"
for functional_class in _RUN_TYPE_DATA
for rt in _RUN_TYPE_DATA[functional_class]
]
+ ["LDA", "LDA+U"]
)
RunType = ValueEnum( # type: ignore
"RunType", dict({"_".join(rt.split()).replace("+", "_"): rt for rt in _RUN_TYPES})
)
RunType.__doc__ = "VASP calculation run types"
TaskType = ValueEnum("TaskType", {"_".join(tt.split()): tt for tt in _TASK_TYPES}) # type: ignore
TaskType.__doc__ = "VASP calculation task types"
CalcType = ValueEnum( # type: ignore
"CalcType",
{
f"{'_'.join(rt.split()).replace('+','_')}_{'_'.join(tt.split())}": f"{rt} {tt}"
for rt, tt in product(_RUN_TYPES, _TASK_TYPES)
},
)
CalcType.__doc__ = "VASP calculation types"
def run_type(parameters: Dict) -> RunType:
"""
Determines the run_type from the VASP parameters dict
This is adapted from pymatgen to be far less unstable
Args:
parameters: Dictionary of VASP parameters from Vasprun.xml
"""
if parameters.get("LDAU", False):
is_hubbard = "+U"
else:
is_hubbard = ""
def _variant_equal(v1, v2) -> bool:
"""
helper function to deal with strings
"""
if isinstance(v1, str) and isinstance(v2, str):
return v1.strip().upper() == v2.strip().upper()
else:
return v1 == v2
# This is to force an order of evaluation
for functional_class in ["HF", "VDW", "METAGGA", "GGA"]:
for special_type, params in _RUN_TYPE_DATA[functional_class].items():
if all(
[
_variant_equal(parameters.get(param, None), value)
for param, value in params.items()
]
):
return RunType(f"{special_type}{is_hubbard}")
return RunType(f"LDA{is_hubbard}")
def task_type(
inputs: Dict[Literal["incar", "poscar", "kpoints", "potcar"], Dict]
) -> TaskType:
"""
Determines the task type
Args:
inputs: inputs dict with an incar, kpoints, potcar, and poscar dictionaries
"""
calc_type = []
incar = inputs.get("incar", {})
if incar.get("ICHARG", 0) > 10:
try:
kpts = inputs.get("kpoints") or {}
kpt_labels = kpts.get("labels") or []
num_kpt_labels = len(list(filter(None.__ne__, kpt_labels)))
except Exception as e:
raise Exception(
"Couldn't identify total number of kpt labels: {}".format(e)
)
if num_kpt_labels > 0:
calc_type.append("NSCF Line")
else:
calc_type.append("NSCF Uniform")
elif incar.get("LEPSILON", False):
if incar.get("IBRION", 0) > 6:
calc_type.append("DFPT")
calc_type.append("Dielectric")
elif incar.get("IBRION", 0) > 6:
calc_type.append("DFPT")
elif incar.get("LCHIMAG", False):
calc_type.append("NMR Nuclear Shielding")
elif incar.get("LEFG", False):
calc_type.append("NMR Electric Field Gradient")
elif incar.get("NSW", 1) == 0:
calc_type.append("Static")
elif incar.get("ISIF", 2) == 3 and incar.get("IBRION", 0) > 0:
calc_type.append("Structure Optimization")
elif incar.get("ISIF", 3) == 2 and incar.get("IBRION", 0) > 0:
calc_type.append("Deformation")
return TaskType(" ".join(calc_type))
def calc_type(
inputs: Dict[Literal["incar", "poscar", "kpoints", "potcar"], Dict],
parameters: Dict,
) -> CalcType:
"""
Determines the calc type
Args:
inputs: inputs dict with an incar, kpoints, potcar, and poscar dictionaries
parameters: Dictionary of VASP parameters from Vasprun.xml
"""
rt = run_type(parameters).value
tt = task_type(inputs).value
return CalcType(f"{rt} {tt}")
| 28.391813 | 98 | 0.618332 |
434cd47f7ba08d7cc16a2a4b4b972a8099060404 | 207 | py | Python | sensors/__init__.py | dawnos/robotcar-to-rosbag | c51035d7fd7e08487629a9b06d84a86890f7cc03 | [
"MIT"
] | null | null | null | sensors/__init__.py | dawnos/robotcar-to-rosbag | c51035d7fd7e08487629a9b06d84a86890f7cc03 | [
"MIT"
] | null | null | null | sensors/__init__.py | dawnos/robotcar-to-rosbag | c51035d7fd7e08487629a9b06d84a86890f7cc03 | [
"MIT"
] | null | null | null |
from mono_left import MonoLeft
from mono_right import MonoRight
from mono_rear import MonoRear
from stereo_left import StereoLeft
from stereo_right import StereoRight
from stereo_centre import StereoCentre
| 25.875 | 38 | 0.879227 |
434df89c1b80cf68699882387250cf1a06bd4617 | 4,165 | py | Python | models/train_classifier.py | YiWang-Evonne/disaster_response | 824f646920ac85a01419101e17e92f592a505782 | [
"MIT"
] | null | null | null | models/train_classifier.py | YiWang-Evonne/disaster_response | 824f646920ac85a01419101e17e92f592a505782 | [
"MIT"
] | null | null | null | models/train_classifier.py | YiWang-Evonne/disaster_response | 824f646920ac85a01419101e17e92f592a505782 | [
"MIT"
] | null | null | null | import sys
import pandas as pd
from sqlalchemy import create_engine
import nltk
nltk.download(['punkt', 'wordnet', 'averaged_perceptron_tagger'])
import re
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
import pickle
from sklearn.model_selection import GridSearchCV
def load_data(database_filepath):
"""
load data from sql db
:param database_filepath: sql db path
:return: pandas dataframe
"""
engine = create_engine("sqlite:///"+database_filepath)
df = pd.read_sql_table('modeling_data', engine)
yvar = [item for item in list(df) if item not in ['message', 'original', 'genre', 'id']]
X = df['message']
Y = df[yvar]
return X.values, Y.values, list(Y)
def tokenize(text):
"""
processing the text input
:param text: text inputs
:return:
"""
url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
detected_urls = re.findall(url_regex, text)
for url in detected_urls:
text = text.replace(url, "urlplaceholder")
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
def build_model():
"""
build model pipeline
:return: model pipeline
"""
model_pipeline = Pipeline([
('features', Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer())
])),
('clf', RandomForestClassifier())
])
return model_pipeline
def evaluate_model(model, X_test, Y_test, category_names):
"""
evaluate model performances
:param model: model obj
:param X_test: test x
:param Y_test: test y
:param category_names: y names
:return:
"""
y_pred = model.predict(X_test)
print(classification_report(Y_test, y_pred, target_names=category_names))
def save_model(model, model_filepath):
"""
save model to local path
:param model: model obj
:param model_filepath: saving path
:return:
"""
with open(model_filepath, 'wb') as f:
pickle.dump(model, f)
def main():
"""
CLI to fit the model
:return:
"""
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
# model.fit(X_train, Y_train)
parameters = {
'clf__n_estimators': [100, 400, 800],
# 'clf__criterion':["gini", "entropy"]
}
cv = model_gridsearch(model, parameters)
best_model_pipeline = cv.best_estimator_
print('Evaluating model...')
evaluate_model(best_model_pipeline, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(best_model_pipeline, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main() | 30.181159 | 96 | 0.657143 |
434e153d430f769d0af982184da673ab7f398f75 | 6,213 | py | Python | terra/terra/emails.py | dymaxionlabs/platform | 98fe893d4632d62fea3e2357f16d970014037cdf | [
"BSD-3-Clause"
] | null | null | null | terra/terra/emails.py | dymaxionlabs/platform | 98fe893d4632d62fea3e2357f16d970014037cdf | [
"BSD-3-Clause"
] | null | null | null | terra/terra/emails.py | dymaxionlabs/platform | 98fe893d4632d62fea3e2357f16d970014037cdf | [
"BSD-3-Clause"
] | null | null | null | import os
from datetime import date
from django.conf import settings
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.utils import translation
from django.utils.translation import ugettext as _
from mailchimp3 import MailChimp
def notify(subject, body='.'):
send_mail(subject, body, 'damian@dymaxionlabs.com',
['monitor@dymaxionlabs.com'])
| 27.860987 | 77 | 0.600998 |
434e8c387b837394ff0f03da5e59c67d77ad7f7c | 7,456 | py | Python | experimental/attentive_uncertainty/toy_regression/datasets.py | miksu/edward2 | 973acdb23701f320ebaee8a56fc44d4414acfa4e | [
"Apache-2.0"
] | null | null | null | experimental/attentive_uncertainty/toy_regression/datasets.py | miksu/edward2 | 973acdb23701f320ebaee8a56fc44d4414acfa4e | [
"Apache-2.0"
] | null | null | null | experimental/attentive_uncertainty/toy_regression/datasets.py | miksu/edward2 | 973acdb23701f320ebaee8a56fc44d4414acfa4e | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2019 The Edward2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parses real and synthetic datasets.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import google_type_annotations
from __future__ import print_function
import collections
import tensorflow as tf
NPRegressionDescription = collections.namedtuple(
"NPRegressionDescription",
("context_x", "context_y", "target_x", "target_y"))
| 37.467337 | 80 | 0.666309 |
434ee97c218201d658ac3ee9f3df8bd8d8383c79 | 1,287 | py | Python | critiquebrainz/frontend/views/index.py | shagun6/critiquebrainz | b7ae41fb09ff4dd4e34847b294fbee4ccc76bad5 | [
"Apache-2.0"
] | null | null | null | critiquebrainz/frontend/views/index.py | shagun6/critiquebrainz | b7ae41fb09ff4dd4e34847b294fbee4ccc76bad5 | [
"Apache-2.0"
] | null | null | null | critiquebrainz/frontend/views/index.py | shagun6/critiquebrainz | b7ae41fb09ff4dd4e34847b294fbee4ccc76bad5 | [
"Apache-2.0"
] | 1 | 2019-10-20T05:48:53.000Z | 2019-10-20T05:48:53.000Z | from flask import Blueprint, render_template
from flask_babel import format_number
import critiquebrainz.db.users as db_users
import critiquebrainz.db.review as db_review
from bs4 import BeautifulSoup
from markdown import markdown
DEFAULT_CACHE_EXPIRATION = 10 * 60 # seconds
frontend_bp = Blueprint('frontend', __name__)
| 31.390244 | 110 | 0.740482 |
434fad48264cdf3b340402e86c40cd6b6db05bc8 | 2,406 | py | Python | Enigma/Enigma-master/GBS/gbsHelper.py | Q-Alpha/Hackathon2020 | c0ed45b4c1cc4f475f83786e641b859dad94f863 | [
"MIT"
] | 12 | 2020-07-23T17:11:22.000Z | 2022-02-03T12:44:56.000Z | Enigma/Enigma-master/GBS/gbsHelper.py | Q-Alpha/Hackathon2020 | c0ed45b4c1cc4f475f83786e641b859dad94f863 | [
"MIT"
] | 1 | 2020-07-28T13:35:51.000Z | 2020-07-28T13:35:51.000Z | Enigma/Enigma-master/GBS/gbsHelper.py | Q-Alpha/Hackathon2020 | c0ed45b4c1cc4f475f83786e641b859dad94f863 | [
"MIT"
] | 25 | 2020-07-22T14:32:17.000Z | 2021-09-08T11:43:55.000Z | import strawberryfields as sf
from strawberryfields import ops
from strawberryfields.utils import random_interferometer
from strawberryfields.apps import data, sample, subgraph, plot
import plotly
import networkx as nx
import numpy as np
| 34.869565 | 101 | 0.554032 |
434fcaaddceb714a13ca57fae4621f94efbd1d3d | 10,781 | py | Python | happy/HappyNodeJoin.py | jenniexie/happy | 6ba01586e20bb3e4f92e180fd8dce3752519f7c9 | [
"Apache-2.0"
] | null | null | null | happy/HappyNodeJoin.py | jenniexie/happy | 6ba01586e20bb3e4f92e180fd8dce3752519f7c9 | [
"Apache-2.0"
] | null | null | null | happy/HappyNodeJoin.py | jenniexie/happy | 6ba01586e20bb3e4f92e180fd8dce3752519f7c9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (c) 2015-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##
# @file
# Implements HappyNodeJoin class through which a virtual node join a network.
#
# When a node joins a network, an TAP interface is created in the node and in
# the network. Then TUN is setup on the node.
#
import os
import sys
from happy.ReturnMsg import ReturnMsg
from happy.Utils import *
from happy.utils.IP import IP
from happy.HappyLink import HappyLink
from happy.HappyNetwork import HappyNetwork
from happy.HappyNode import HappyNode
import happy.HappyLinkAdd
import happy.HappyNodeAddress
import happy.HappyNodeRoute
options = {}
options["quiet"] = False
options["node_id"] = None
options["tap"] = False
options["network_id"] = None
options["fix_hw_addr"] = None
options["customized_eui64"] = None
| 35.463816 | 102 | 0.626472 |
434feac939e1b8979a11ce2e5fb237601f1fd855 | 46,866 | py | Python | __init__.py | SDRAST/Data_Reduction | f007d716b5c28c086910a81206cffaf37ff6368c | [
"Apache-2.0"
] | null | null | null | __init__.py | SDRAST/Data_Reduction | f007d716b5c28c086910a81206cffaf37ff6368c | [
"Apache-2.0"
] | null | null | null | __init__.py | SDRAST/Data_Reduction | f007d716b5c28c086910a81206cffaf37ff6368c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Modules to support data reduction in Python.
The main purpose of the base module ``Data_Reduction`` is to provide a
suplerclass with a good set of attributes and methods to cover all common needs.
The base module is also able to read data from a text file as a ``numpy``
structured array. This is done with a class called ``DataGetterMixin`` which
must be invoked after the base class has been initiated.
The module function ``examine_text_data_file()`` reveals the structure of the
file(s) that provide the data..
Examples
========
Here we initiate a base class after mixing in the data getter. The first line o
the file has column names but the first three columns are all under one
name ``UTC`` so we specify column widths to consider the first three columns
to be one column. We use the names from the first line of the file, which
could have been done with an ``open()``, ``readline()``, and ``close()``::
mixIn(Observation, DataGetterMixin)
obs = Observation(dss=28, date="2012/127", project="SolarPatrol")
obs.open_datafile('t12127.10',
delimiter=[17,16,3,11,7,9,8,2,6],
skip_header=1,
names="UTC Epoch Chan Tsys Int Az El Diode Level".split())
Now the data getter is already mixed in to Observation so we don't need to do
it again. In this case we specify the names of the columns, changing ``Int`` to
``Integr``::
obs2 = Observation(dss=28, date="2012/127", project="SolarPatrol")
obs2.open_datafile('t12127.10', skip_header=1,
names="Year DOY UTC Epoch Chan Tsys Integr Az El Diode Level".split())
The class Map inherits from DataGetterMixin, so no explicit mixin required::
obsmap = Map(dss=84, date="2020/163", project="SolarPatrol")
obsmap.initialize('sim-venus.dat', source="Venus")
Let's examine ``obsmap``. We have only one signal column::
In [3]: obsmap.channel.keys()
Out[3]: dict_keys(['xl'])
In [4]: obsmap.channel['xl'].keys()
Out[4]: dict_keys(['freq', 'bw', 'pol', 'ifmode', 'atten', 'power'])
"""
# standard Python modules
import datetime
import glob
import h5py
import logging
import math
import matplotlib.dates as MPLd
import numpy as NP
import os
import re
import readline
import scipy.interpolate
import scipy.fftpack
import Astronomy as A
import Astronomy.DSN_coordinates as coords
import Astronomy.Ephem as AE
import DatesTimes as DT
import local_dirs
import Math.clusters as VQ # vector quantization
import support
# enable raw_input Tab completion
readline.parse_and_bind("tab: complete")
logger = logging.getLogger(__name__) # module logger
# ------------------------ module functions -------------------------------
def examine_text_data_file(filename):
"""
Examine a file to guide ``genfromtxt()``
Things to look for::
* Is there a header line with column names? If not, use argument ``names``.
* Is the number of names equal to the number of columns? If not::
- use argument ``names`` and ``skip_header=1``, or
- use argument ``delimiter`` with a list of column widths
and ``skip_header=1``.
"""
print(examine_text_data_file.__doc__)
fd = open(filename, "r")
lines = fd.readlines()
fd.close()
topline = lines[0].strip().split()
print(" 1 2 3 4 5 6 7")
print("01234567890123456789012345678901234567890123456789012345678901234567890123456789")
print(lines[0].strip())
print(lines[1].strip())
print(" ...")
print(lines[-1].strip())
data = NP.genfromtxt(filename, dtype=None, names=None, skip_header=1, encoding=None)
print("%d datatypes:" % len(data.dtype.fields))
for item in data.dtype.fields:
print(item, data.dtype.fields[item])
def get_obs_dirs(project, station, year, DOY, datafmt=None):
"""
Returns the directories where data and working files are kept
@param project : project code string, e.g., RRL
@type project : str
@param station : DSN station number
@type station : int
@param year : year of observation
@type year : int
@param DOY : day of year of observations
@type DOY : int
@param datafmt : raw data format
@type datafmt : str
"""
#logger.debug("get_obs_dirs: type %s for %s, DSS%d, %4d/%03d",
# datafmt, project, station, year, DOY)
obspath = "dss%2d/%4d/%03d/" % (station,year,DOY)
if project:
projdatapath = "/usr/local/project_data/"+project+"/"+obspath
projworkpath = "/usr/local/projects/"+project+"/Observations/"+obspath
else:
projdatapath = ""
projworkpath = ""
if datafmt:
rawdatapath = "/usr/local/RA_data/"+datafmt+"/"+obspath
else:
rawdatapath = ""
return projdatapath, projworkpath, rawdatapath
# --------- old stuff to be discarded still needed for now ---------------
def old_get_obs_session(project=None, dss=None, date=None, path='proj'):
"""
Provides project, station, year and DOY, asking as needed.
It follows one of several possible paths to get to the session::
proj - path through /usr/local/projects/<project>
hdf5 - path through /usr/local/RA_data/HDF5
fits - path through /usr/local/RA_data/FITS
wvsr - path through /data
@param project : optional name as defined in /usr/local/projects
@type project : str
@param dss : optional station number
@type dss : int
@param date : optional YYYY/DDD
@type date : str
@return: project, DSS, year, DOY.
"""
def get_directory(path):
"""
"""
# only one trailing /
path = path.rstrip('/')+"/*"
logger.debug("get_obs_session:get_directory: from %s", path)
names = glob.glob(path)
if names:
dirs = []
for name in names:
if os.path.isdir(name):
dirs.append(os.path.basename(name))
dirs.sort()
for name in dirs:
print((name), end=' ')
return input('\n>')
else:
return []
def from_wvsr_dir():
"""
this needs to be completed and tested on crab14 or an auto host
"""
session = get_directory(local_dirs.wvsr_dir)
return session
cwd = os.getcwd()
# get the project
if project:
pass
else:
os.chdir(local_dirs.projects_dir)
project = get_directory(local_dirs.projects_dir)
logger.debug("from_wvsr_dir: project is %s", project)
projectpath = local_dirs.projects_dir+project
# get the station
if path[:4].lower() == 'wvsr':
# special call
print("from_wvsr_dir()")
if path[:4].lower() == 'proj':
os.chdir(projectpath+"/Observations/")
elif path[:4].lower() == 'hdf5':
os.chdir(local_dirs.hdf5_dir)
elif path[:4].lower() == 'fits':
os.chdir(local_dirs.fits_dir)
# get the station
if dss:
pass
else:
# This seems odd but get_directory() needs '/' and int does not
station = get_directory(os.getcwd()+"/").rstrip('/')
dss = int(station[-2:])
stationpath = os.getcwd()+"/dss"+str(dss)
# get the date
if date:
items = date.split('/')
year = int(items[0])
DOY = int(items[1])
else:
year = int(get_directory(stationpath))
yearpath = stationpath+"/"+str(year)
DOY = int(get_directory(yearpath))
os.chdir(cwd)
return project, dss, year, DOY
| 35.370566 | 91 | 0.615307 |
43503b600179235461951733f8a10d6ec6cc9d94 | 219 | py | Python | PyGRB/__init__.py | HughPaynter/PyGRB | 2eaf834cf3c62a639a056285ca9518456daa4b7c | [
"BSD-3-Clause"
] | null | null | null | PyGRB/__init__.py | HughPaynter/PyGRB | 2eaf834cf3c62a639a056285ca9518456daa4b7c | [
"BSD-3-Clause"
] | null | null | null | PyGRB/__init__.py | HughPaynter/PyGRB | 2eaf834cf3c62a639a056285ca9518456daa4b7c | [
"BSD-3-Clause"
] | null | null | null | """
PyGRB.
A GRB light-curve analysis package.
"""
__version__ = "0.0.5"
__author__ = 'James Paynter'
from . import backend
from . import fetch
from . import main
from . import postprocess
from . import preprocess
| 13.6875 | 35 | 0.716895 |
4350f1b77cfb0dfefe4845923da0aa75d35da2ca | 363 | py | Python | src/config.py | john9384/PyblogRestAPI | f8cd42b6ffd5ccc3224d18f71cbea654f05023d0 | [
"MIT"
] | null | null | null | src/config.py | john9384/PyblogRestAPI | f8cd42b6ffd5ccc3224d18f71cbea654f05023d0 | [
"MIT"
] | null | null | null | src/config.py | john9384/PyblogRestAPI | f8cd42b6ffd5ccc3224d18f71cbea654f05023d0 | [
"MIT"
] | null | null | null | import os
from dotenv import load_dotenv
load_dotenv()
| 25.928571 | 60 | 0.730028 |
435242d1a3384ab078fa9b2a0a84286b9581b8f8 | 8,483 | py | Python | Context_Guided_RelRep/train.py | Huda-Hakami/Context-Guided-Relation-Embeddings | 520ce89fe7bad3aba2f3eb112329300625bb55f7 | [
"Apache-2.0"
] | 1 | 2019-10-06T03:54:53.000Z | 2019-10-06T03:54:53.000Z | Context_Guided_RelRep/train.py | Huda-Hakami/Context-Guided-Relation-Embeddings | 520ce89fe7bad3aba2f3eb112329300625bb55f7 | [
"Apache-2.0"
] | null | null | null | Context_Guided_RelRep/train.py | Huda-Hakami/Context-Guided-Relation-Embeddings | 520ce89fe7bad3aba2f3eb112329300625bb55f7 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from wordreps import WordReps
from algebra import cosine, normalize
import tensorflow as tf
import random
from dataset import DataSet
import CGRE_Model
from Eval import eval_SemEval
import sklearn.preprocessing
# ============ End Imports ============
# ============ End of the Evaluation class ============
def next_batch(batchSize,data):
# loop over our dataset in mini-batches of size `batchSize`
for i in np.arange(0, len(data), batchSize):
# yield the current batched data
yield data[i:i + batchSize]
# -------------------------------------------------------
# -------------------------------------------------------
# -----------------------------------------------------------
if __name__=="__main__":
'''
Word Embeddings
'''
pretrained_glove_300=("../glove.6B.300d.zip","glove",300)
WR=WordReps()
norm=1
standardise=0
WR.Read_Embeddings_zip_file(pretrained_glove_300,norm,standardise)
WR.vects['<PAD>']=np.zeros(WR.dim)
# WR.vects['X']=np.random.rand(WR.dim)
# WR.vects['Y']=np.random.rand(WR.dim)
WR.vects['X']=np.random.normal(size=(WR.dim)).astype('float32')
WR.vects['Y']=np.random.normal(size=(WR.dim)).astype('float32')
'''
Dataset
'''
corpus='Wikipedia_English'
Train_dataset=('DiffVec',"DiffVec_Pairs")
Test_dataset=('SemEval',"SemEval_Pairs.txt")
labels_type='proxy'
Reverse_pairs=True
DS=DataSet(corpus,Train_dataset,Test_dataset,labels_type,Reverse_pairs)
id2Patterns="../Relational_Patterns/Patterns_Xmid5Y"
Patterns_per_pair="../Relational_Patterns/Patterns_Xmid5Y_PerPair"
DS.Retrieve_Patterns(id2Patterns,Patterns_per_pair)
Ea=DS.Generate_Embedding_Matrix(WR)
'''
Training & Evaluation
'''
Eval=Training()
Eval.Train_Model()
| 37.870536 | 150 | 0.707651 |
43525bbf3ff2f6151c746e2a0599b8ee3f2bbfcc | 1,071 | py | Python | synch_integrate.py | HerculesJack/grtrans | bc005307d81dac1bdb9520e776e7627126dd690a | [
"MIT"
] | 25 | 2016-02-11T01:52:14.000Z | 2021-06-16T02:15:42.000Z | synch_integrate.py | RAnantua/grtrans | a0353a8516335412b27fe4866eabafcfc0fe498f | [
"MIT"
] | 6 | 2016-11-10T15:25:20.000Z | 2018-01-18T15:15:57.000Z | synch_integrate.py | RAnantua/grtrans | a0353a8516335412b27fe4866eabafcfc0fe498f | [
"MIT"
] | 6 | 2016-02-11T14:13:01.000Z | 2022-03-10T01:56:02.000Z | from radtrans_integrate import radtrans_integrate
from polsynchemis import polsynchemis
import numpy as np
import scipy.integrate
# calculate synchrotron emissivity for given coefficients
| 36.931034 | 109 | 0.659197 |
4354188cd39459be1c39fa882aceb00bf1b969f5 | 1,683 | py | Python | actions/lib/Template_Parser.py | pjimmybrcd/campus_ztp_nps | 2ab266b32fbcddcbdf9031138aabc40942914c3a | [
"Apache-2.0"
] | null | null | null | actions/lib/Template_Parser.py | pjimmybrcd/campus_ztp_nps | 2ab266b32fbcddcbdf9031138aabc40942914c3a | [
"Apache-2.0"
] | null | null | null | actions/lib/Template_Parser.py | pjimmybrcd/campus_ztp_nps | 2ab266b32fbcddcbdf9031138aabc40942914c3a | [
"Apache-2.0"
] | null | null | null | """
Copyright 2016 Brocade Communications Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from jinja2 import Template, Environment, StrictUndefined, UndefinedError, meta
| 39.139535 | 91 | 0.699346 |
43559b99540d7f4aefb586b6180111026c8c9f97 | 461 | py | Python | lca_writer/data/loader.py | line-mind/lca_writer | 0f356cf20285ba684826dfdd18b75d0f0ebea120 | [
"BSD-3-Clause"
] | 1 | 2022-02-10T21:32:54.000Z | 2022-02-10T21:32:54.000Z | lca_writer/data/loader.py | line-mind/lca_writer | 0f356cf20285ba684826dfdd18b75d0f0ebea120 | [
"BSD-3-Clause"
] | 1 | 2018-12-22T23:12:30.000Z | 2018-12-22T23:12:30.000Z | lca_writer/data/loader.py | line-mind/lca_writer | 0f356cf20285ba684826dfdd18b75d0f0ebea120 | [
"BSD-3-Clause"
] | 1 | 2019-10-09T07:03:09.000Z | 2019-10-09T07:03:09.000Z | import os
__all__ = ['DATA_FOLDER', 'load_data']
DATA_FOLDER = os.path.dirname(os.path.abspath(__file__))
def load_data(name):
"""
Loads an Excel form from the data folder with the specified name.
Parameters
----------
name : str
The name of the form without file extension.
"""
from ..lca_writer import LCAWriter # to prevent recursive import
p = os.path.join(DATA_FOLDER, name + '.xlsx')
return LCAWriter(p)
| 20.954545 | 69 | 0.652928 |
4356791d282fbcbf316192672ada1b6fc8d08206 | 45 | py | Python | main.py | Dephilia/pipenv-docker-development | 3be5f63120638922fe98336b6ee5b3b0f6f182dc | [
"MIT"
] | null | null | null | main.py | Dephilia/pipenv-docker-development | 3be5f63120638922fe98336b6ee5b3b0f6f182dc | [
"MIT"
] | null | null | null | main.py | Dephilia/pipenv-docker-development | 3be5f63120638922fe98336b6ee5b3b0f6f182dc | [
"MIT"
] | null | null | null | var = "Docker"
print(f"Hello {var} world!")
| 11.25 | 28 | 0.622222 |
4356793fe5f5eb5615052cdcbe88626695d774de | 841 | py | Python | app/v1/utils/mixins.py | pndemo/yummy-recipes-api | ae6729bd1c886ce9872d83488a6eaa99e92be513 | [
"MIT"
] | null | null | null | app/v1/utils/mixins.py | pndemo/yummy-recipes-api | ae6729bd1c886ce9872d83488a6eaa99e92be513 | [
"MIT"
] | 3 | 2019-12-20T23:17:20.000Z | 2022-03-21T22:16:25.000Z | app/v1/utils/mixins.py | pndemo/yummy-recipes-api | ae6729bd1c886ce9872d83488a6eaa99e92be513 | [
"MIT"
] | 1 | 2017-12-13T12:44:11.000Z | 2017-12-13T12:44:11.000Z | """ Model mixin classes for auth, category and recipe modules """
from app import db
# pylint: disable=C0103
# pylint: disable=E1101
| 29 | 81 | 0.67063 |
435728a0cb21ad40d2d8c25c033f2746e09d0952 | 4,239 | py | Python | apps/dash-port-analytics/app/ui/tab_map_controls.py | JeroenvdSande/dash-sample-apps | 106fa24693cfdaf47c06466a0aed78e642344f91 | [
"MIT"
] | 2,332 | 2019-05-10T18:24:20.000Z | 2022-03-30T21:46:29.000Z | apps/dash-port-analytics/app/ui/tab_map_controls.py | JeroenvdSande/dash-sample-apps | 106fa24693cfdaf47c06466a0aed78e642344f91 | [
"MIT"
] | 384 | 2019-05-09T19:19:56.000Z | 2022-03-12T00:58:24.000Z | apps/dash-port-analytics/app/ui/tab_map_controls.py | JeroenvdSande/dash-sample-apps | 106fa24693cfdaf47c06466a0aed78e642344f91 | [
"MIT"
] | 3,127 | 2019-05-16T17:20:45.000Z | 2022-03-31T17:59:07.000Z | import dash_core_components as dcc
import dash_html_components as html
from config import strings
def make_tab_port_map_controls(
port_arr: list,
port_val: str,
vessel_types_arr: list,
vessel_type_val: str,
year_arr: list,
year_val: int,
month_arr: list,
month_val: int,
) -> html.Div:
"""
Returns a HTML div of user controls found on top of the map tab.
:param port_arr: list, all possible ports
:param port_val: str, current port value
:param vessel_types_arr: list, all possible vessel types
:param vessel_type_val: str, current vessel type value
:param year_arr: list, all possible years
:param year_val: str, current year value
:param month_arr: list, all possible months
:param month_val: str, current month value
:return: HTML div
"""
return html.Div(
className="tab-port-map-controls",
children=[
html.Div(
className="tab-port-map-single-control-container area-a",
children=[
html.Label(
className="control-label", children=[strings.LABEL_PORT]
),
dcc.Dropdown(
id="port-map-dropdown-port",
clearable=False,
options=[{"label": port, "value": port} for port in port_arr],
value=port_val,
),
],
),
html.Div(className="tab-port-map-single-control-separator area-b"),
html.Div(
className="tab-port-map-single-control-container area-c",
children=[
html.Label(
className="control-label", children=[strings.LABEL_VESSEL]
),
dcc.Dropdown(
id="port-map-dropdown-vessel-type",
clearable=False,
options=[
{"label": vessel_type, "value": vessel_type}
for vessel_type in vessel_types_arr
],
value=vessel_type_val,
),
],
),
html.Div(className="tab-port-map-single-control-separator area-d"),
html.Div(
className="tab-port-map-single-control-container date-grid area-e",
children=[
html.Div(
className="tab-port-map-single-control-container-date",
children=[
html.Label(
className="control-label", children=[strings.LABEL_YEAR]
),
dcc.Dropdown(
id="port-map-dropdown-year",
clearable=False,
options=[
{"label": year, "value": year} for year in year_arr
],
value=year_val,
),
],
),
html.Div(
className="tab-port-map-single-control-separator smaller-line"
),
html.Div(
className="tab-port-map-single-control-container-date",
children=[
html.Label(
className="control-label",
children=[strings.LABEL_MONTH],
),
dcc.Dropdown(
id="port-map-dropdown-month",
clearable=False,
options=[
{"label": month, "value": month}
for month in month_arr
],
value=month_val,
),
],
),
],
),
],
)
| 38.889908 | 88 | 0.420854 |
43580621cfd0f7e6c205651bbcde02772c3c846a | 628 | py | Python | subs2srs/gui/state.py | TFarla/subs2srs-cross-platform | 79158a313ca4099adb20df97207b19d7bc948697 | [
"MIT"
] | 3 | 2020-07-04T22:34:50.000Z | 2020-08-10T18:18:51.000Z | subs2srs/gui/state.py | TFarla/subs2srs-cross-platform | 79158a313ca4099adb20df97207b19d7bc948697 | [
"MIT"
] | 5 | 2020-07-04T08:34:36.000Z | 2021-05-19T01:27:04.000Z | subs2srs/gui/state.py | TFarla/subs2srs-cross-platform | 79158a313ca4099adb20df97207b19d7bc948697 | [
"MIT"
] | null | null | null | from typing import List
from subs2srs.core.preview_item import PreviewItem
| 27.304348 | 93 | 0.694268 |
435956da8c173c0f00fa6d13687b5307a4d9b2a5 | 499 | py | Python | sync_ends/main.py | nirav1997/sync_ends | 04e39ec26ac43ad4e6d4e1bdf685eb73c03b1dbb | [
"MIT"
] | null | null | null | sync_ends/main.py | nirav1997/sync_ends | 04e39ec26ac43ad4e6d4e1bdf685eb73c03b1dbb | [
"MIT"
] | null | null | null | sync_ends/main.py | nirav1997/sync_ends | 04e39ec26ac43ad4e6d4e1bdf685eb73c03b1dbb | [
"MIT"
] | null | null | null | import sys
sys.path.append("..")
from src.sync_ends_service import SyncEnd
from src.parser import Parser
if __name__ == "__main__":
main()
| 22.681818 | 100 | 0.709419 |
435a70dd7b6f4dda69b0f2a7703c3f754714213d | 22,429 | py | Python | graphql_compiler/compiler/workarounds/orientdb_query_execution.py | 0xflotus/graphql-compiler | 0c892f5254d0cf3d03a68012080d0b736bc49913 | [
"Apache-2.0"
] | null | null | null | graphql_compiler/compiler/workarounds/orientdb_query_execution.py | 0xflotus/graphql-compiler | 0c892f5254d0cf3d03a68012080d0b736bc49913 | [
"Apache-2.0"
] | 1 | 2019-04-18T18:23:16.000Z | 2019-04-18T18:23:16.000Z | graphql_compiler/compiler/workarounds/orientdb_query_execution.py | 0xflotus/graphql-compiler | 0c892f5254d0cf3d03a68012080d0b736bc49913 | [
"Apache-2.0"
] | 1 | 2019-11-21T02:38:27.000Z | 2019-11-21T02:38:27.000Z | # Copyright 2018-present Kensho Technologies, LLC.
"""Workarounds for OrientDB scheduler issue that causes poor query planning for certain queries.
For purposes of query planning, the OrientDB query planner ignores "where:" clauses
that hit indexes but do not use the "=" operator. For example, "CONTAINS" can be used to check
that a field covered by an index is in a specified list of values, and can therefore be covered
by an index, but OrientDB will ignore this. When no equality ("=") checks on indexed columns
are present, OrientDB will generate a query plan that starts execution at the class with
lowest cardinality, which can lead to excessive numbers of scanned and discarded records.
Assuming the query planner creates a query plan where a location with CONTAINS is
the first in the execution order, the execution system will apply indexes
to speed up this operation. Therefore, it's sufficient to trick the query planner into
always creating such a query plan, even though it thinks indexes cannot be used in the query.
Valid query execution start points for the OrientDB query planner must satisfy the following:
- Must not be "optional: true".
- Must not have a "while:" clause nor follow a location that has one.
- Must have a "class:" defined. This class is used for cardinality estimation, and to
look for available indexes that may cover any "where:" clause that may be present.
The optimizations in this file improve performance by enabling execution start points according
to the following assumptions:
1. Start points with "where:" clauses that reference only local fields (i.e. not tagged values
from other query locations) are always better than start points without a "where:".
This is because the filter will have to be applied one way or the other, so we might as well
apply it early.
2. If no such start points are available, we'd like to make available as many start points
as possible, since we'd like OrientDB to start at the start point whose class has
the lowest possible cardinality.
The process of applying the optimizations is as follows:
- Exclude and ignore all query steps that are inside a fold, optional, or recursion scope,
or have a "where:" clause that references a non-local (i.e. tagged) field.
- Find all remaining query steps with "where:" clauses that reference only local fields.
- If any are found, we guide our actions from assumption 1 above:
- Ensure they have a defined "class:" -- i.e. the OrientDB scheduler will consider them
valid start points.
- Then, prune all other query steps (ones without such "where:" clauses) by removing their
"class:" clause, making them invalid as query start points for OrientDB's scheduler.
- If none are found, we guide our actions from assumption 2 above:
- Ensure that all query points not inside fold, optional, or recursion scope contain
a "class:" clause. That increases the number of available query start points,
so OrientDB can choose the start point of lowest cardinality.
"""
from ..blocks import CoerceType, QueryRoot, Recurse, Traverse
from ..expressions import ContextField, ContextFieldExistence
from ..helpers import get_only_element_from_collection
from ..ir_lowering_match.utils import convert_coerce_type_and_add_to_where_block
def _is_local_filter(filter_block):
"""Return True if the Filter block references no non-local fields, and False otherwise."""
# We need the "result" value of this function to be mutated within the "visitor_fn".
# Since we support both Python 2 and Python 3, we can't use the "nonlocal" keyword here:
# https://www.python.org/dev/peps/pep-3104/
# Instead, we use a dict to store the value we need mutated, since the "visitor_fn"
# can mutate state in the parent scope, but not rebind variables in it without "nonlocal".
# TODO(predrag): Revisit this if we drop support for Python 2.
result = {
'is_local_filter': True
}
filter_predicate = filter_block.predicate
def visitor_fn(expression):
"""Expression visitor function that looks for uses of non-local fields."""
non_local_expression_types = (ContextField, ContextFieldExistence)
if isinstance(expression, non_local_expression_types):
result['is_local_filter'] = False
# Don't change the expression.
return expression
filter_predicate.visit_and_update(visitor_fn)
return result['is_local_filter']
def _classify_query_locations(match_query):
"""Classify query locations into three groups: preferred, eligible, ineligible.
- Ineligible locations are ones that cannot be the starting point of query execution.
These include locations within recursions, locations that are the target of
an optional traversal, and locations with an associated "where:" clause with non-local filter.
- Preferred locations are ones that are eligible to be the starting point, and also have
an associated "where:" clause that references no non-local fields -- only local fields,
literals, and variables.
- Eligible locations are all locations that do not fall into either of these two categories.
Args:
match_query: MatchQuery object describing the query being analyzed for optimization
Returns:
tuple (preferred, eligible, ineligible) where each element is a set of Location objects.
The three sets are disjoint.
"""
preferred_locations = set()
eligible_locations = set()
ineligible_locations = set()
# Any query must have at least one traversal with at least one step.
# The first step in this traversal must be a QueryRoot.
first_match_step = match_query.match_traversals[0][0]
if not isinstance(first_match_step.root_block, QueryRoot):
raise AssertionError(u'First step of first traversal unexpectedly was not QueryRoot: '
u'{} {}'.format(first_match_step, match_query))
# The first step in the first traversal cannot possibly be inside an optional, recursion,
# or fold. Its location is always an eligible start location for a query.
# We need to determine whether it is merely eligible, or actually a preferred location.
if first_match_step.where_block is not None:
if _is_local_filter(first_match_step.where_block):
preferred_locations.add(first_match_step.as_block.location)
else:
# TODO(predrag): Fix once we have a proper fix for tag-and-filter in the same scope.
# Either the locally-scoped tag will have to generate a LocalField
# instead of a ContextField, or we'll have to rework the local filter
# detection code in this module.
raise AssertionError(u'The first step of the first traversal somehow had a non-local '
u'filter. This should not be possible, since there is nowhere '
u'for the tagged value to have come from. Values: {} {}'
.format(first_match_step, match_query))
else:
eligible_locations.add(first_match_step.as_block.location)
# This loop will repeat the analysis of the first step of the first traversal.
# QueryRoots other than the first are required to always be at a location whose status
# (preferred / eligible / ineligible) is already known. Since we already processed
# the first QueryRoot above, the rest of the loop can assume all QueryRoots are like that.
for current_traversal in match_query.match_traversals:
for match_step in current_traversal:
current_step_location = match_step.as_block.location
if isinstance(match_step.root_block, QueryRoot):
already_encountered_location = any((
current_step_location in preferred_locations,
current_step_location in eligible_locations,
current_step_location in ineligible_locations,
))
if not already_encountered_location:
raise AssertionError(u'Unexpectedly encountered a location in QueryRoot whose '
u'status has not been determined: {} {} {}'
.format(current_step_location, match_step, match_query))
at_eligible_or_preferred_location = (
current_step_location in preferred_locations or
current_step_location in eligible_locations)
# This location has already been encountered and processed.
# Other than setting the "at_eligible_or_preferred_location" state for the sake of
# the following MATCH steps, there is nothing further to be done.
continue
elif isinstance(match_step.root_block, Recurse):
# All Recurse blocks cause locations within to be ineligible.
at_eligible_or_preferred_location = False
elif isinstance(match_step.root_block, Traverse):
# Optional Traverse blocks cause locations within to be ineligible.
# Non-optional Traverse blocks do not change the eligibility of locations within:
# if the pre-Traverse location was eligible, so will the location within,
# and if it was not eligible, neither will the location within.
if match_step.root_block.optional:
at_eligible_or_preferred_location = False
else:
raise AssertionError(u'Unreachable condition reached: {} {} {}'
.format(match_step.root_block, match_step, match_query))
if not at_eligible_or_preferred_location:
ineligible_locations.add(current_step_location)
elif match_step.where_block is not None:
if _is_local_filter(match_step.where_block):
# This location has a local filter, and is not otherwise ineligible (it's not
# in a recursion etc.). Therefore, it's a preferred query start location.
preferred_locations.add(current_step_location)
else:
# Locations with non-local filters are never eligible locations, since they
# depend on another location being executed before them.
ineligible_locations.add(current_step_location)
else:
# No local filtering (i.e. not preferred), but also not ineligible. Eligible it is.
eligible_locations.add(current_step_location)
return preferred_locations, eligible_locations, ineligible_locations
def _calculate_type_bound_at_step(match_step):
"""Return the GraphQL type bound at the given step, or None if no bound is given."""
current_type_bounds = []
if isinstance(match_step.root_block, QueryRoot):
# The QueryRoot start class is a type bound.
current_type_bounds.extend(match_step.root_block.start_class)
if match_step.coerce_type_block is not None:
# The CoerceType target class is also a type bound.
current_type_bounds.extend(match_step.coerce_type_block.target_class)
if current_type_bounds:
# A type bound exists. Assert that there is exactly one bound, defined in precisely one way.
return get_only_element_from_collection(current_type_bounds)
else:
# No type bound exists at this MATCH step.
return None
def _assert_type_bounds_are_not_conflicting(current_type_bound, previous_type_bound,
location, match_query):
"""Ensure that the two bounds either are an exact match, or one of them is None."""
if all((current_type_bound is not None,
previous_type_bound is not None,
current_type_bound != previous_type_bound)):
raise AssertionError(
u'Conflicting type bounds calculated at location {}: {} vs {} '
u'for query {}'.format(location, previous_type_bound, current_type_bound, match_query))
def _expose_only_preferred_locations(match_query, location_types, coerced_locations,
preferred_locations, eligible_locations):
"""Return a MATCH query where only preferred locations are valid as query start locations."""
preferred_location_types = dict()
eligible_location_types = dict()
new_match_traversals = []
for current_traversal in match_query.match_traversals:
new_traversal = []
for match_step in current_traversal:
new_step = match_step
current_step_location = match_step.as_block.location
if current_step_location in preferred_locations:
# This location is preferred. We have to make sure that at least one occurrence
# of this location in the MATCH query has an associated "class:" clause,
# which would be generated by a type bound at the corresponding MATCH step.
current_type_bound = _calculate_type_bound_at_step(match_step)
previous_type_bound = preferred_location_types.get(current_step_location, None)
if previous_type_bound is not None:
# The location is already valid. If so, make sure that this step either does
# not have any type bounds (e.g. via QueryRoot or CoerceType blocks),
# or has type bounds that match the previously-decided type bound.
_assert_type_bounds_are_not_conflicting(
current_type_bound, previous_type_bound, current_step_location, match_query)
else:
# The location is not yet known to be valid. If it does not have
# a type bound in this MATCH step, add a type coercion to the type
# registered in "location_types".
if current_type_bound is None:
current_type_bound = location_types[current_step_location].name
new_step = match_step._replace(
coerce_type_block=CoerceType({current_type_bound}))
preferred_location_types[current_step_location] = current_type_bound
elif current_step_location in eligible_locations:
# This location is eligible, but not preferred. We have not make sure
# none of the MATCH steps with this location have type bounds, and therefore
# will not produce a corresponding "class:" clause in the resulting MATCH query.
current_type_bound = _calculate_type_bound_at_step(match_step)
previous_type_bound = eligible_location_types.get(current_step_location, None)
if current_type_bound is not None:
# There is a type bound here that we need to neutralize.
_assert_type_bounds_are_not_conflicting(
current_type_bound, previous_type_bound, current_step_location, match_query)
# Record the deduced type bound, so that if we encounter this location again,
# we ensure that we again infer the same type bound.
eligible_location_types[current_step_location] = current_type_bound
if (current_step_location not in coerced_locations or
previous_type_bound is not None):
# The type bound here is already implied by the GraphQL query structure,
# or has already been applied at a previous occurrence of this location.
# We can simply delete the QueryRoot / CoerceType blocks that impart it.
if isinstance(match_step.root_block, QueryRoot):
new_root_block = None
else:
new_root_block = match_step.root_block
new_step = match_step._replace(
root_block=new_root_block, coerce_type_block=None)
else:
# The type bound here is not already implied by the GraphQL query structure.
# This should only be possible via a CoerceType block. Lower this CoerceType
# block into a Filter with INSTANCEOF to ensure the resulting query has the
# same semantics, while making the location invalid as a query start point.
if (isinstance(match_step.root_block, QueryRoot) or
match_step.coerce_type_block is None):
raise AssertionError(u'Unexpected MATCH step applying a type bound not '
u'already implied by the GraphQL query structure: '
u'{} {}'.format(match_step, match_query))
new_where_block = convert_coerce_type_and_add_to_where_block(
match_step.coerce_type_block, match_step.where_block)
new_step = match_step._replace(
coerce_type_block=None, where_block=new_where_block)
else:
# There is no type bound that OrientDB can find defined at this location.
# No action is necessary.
pass
else:
# This location is neither preferred nor eligible.
# No action is necessary at this location.
pass
new_traversal.append(new_step)
new_match_traversals.append(new_traversal)
return match_query._replace(match_traversals=new_match_traversals)
def _expose_all_eligible_locations(match_query, location_types, eligible_locations):
"""Return a MATCH query where all eligible locations are valid as query start locations."""
eligible_location_types = dict()
new_match_traversals = []
for current_traversal in match_query.match_traversals:
new_traversal = []
for match_step in current_traversal:
new_step = match_step
current_step_location = match_step.as_block.location
if current_step_location in eligible_locations:
# This location is eligible. We need to make sure it has an associated type bound,
# so that it produces a "class:" clause that will make it a valid query start
# location. It either already has such a type bound, or we can use the type
# implied by the GraphQL query structure to add one.
current_type_bound = _calculate_type_bound_at_step(match_step)
previous_type_bound = eligible_location_types.get(current_step_location, None)
if current_type_bound is None:
current_type_bound = location_types[current_step_location].name
new_coerce_type_block = CoerceType({current_type_bound})
new_step = match_step._replace(coerce_type_block=new_coerce_type_block)
else:
# There is a type bound here. We simply ensure that the bound is not conflicting
# with any other type bound at a different MATCH step with the same location.
_assert_type_bounds_are_not_conflicting(
current_type_bound, previous_type_bound, current_step_location, match_query)
# Record the deduced type bound, so that if we encounter this location again,
# we ensure that we again infer the same type bound.
eligible_location_types[current_step_location] = current_type_bound
else:
# This function may only be called if there are no preferred locations. Since this
# location cannot be preferred, and is not eligible, it must be ineligible.
# No action is necessary in this case.
pass
new_traversal.append(new_step)
new_match_traversals.append(new_traversal)
return match_query._replace(match_traversals=new_match_traversals)
def expose_ideal_query_execution_start_points(compound_match_query, location_types,
coerced_locations):
"""Ensure that OrientDB only considers desirable query start points in query planning."""
new_queries = []
for match_query in compound_match_query.match_queries:
location_classification = _classify_query_locations(match_query)
preferred_locations, eligible_locations, _ = location_classification
if preferred_locations:
# Convert all eligible locations into non-eligible ones, by removing
# their "class:" clause. The "class:" clause is provided either by having
# a QueryRoot block or a CoerceType block in the MatchStep corresponding
# to the location. We remove it by converting the class check into
# an "INSTANCEOF" Filter block, which OrientDB is unable to optimize away.
new_query = _expose_only_preferred_locations(
match_query, location_types, coerced_locations,
preferred_locations, eligible_locations)
elif eligible_locations:
# Make sure that all eligible locations have a "class:" clause by adding
# a CoerceType block that is a no-op as guaranteed by the schema. This merely
# ensures that OrientDB is able to use each of these locations as a query start point,
# and will choose the one whose class is of lowest cardinality.
new_query = _expose_all_eligible_locations(
match_query, location_types, eligible_locations)
else:
raise AssertionError(u'This query has no preferred or eligible query start locations. '
u'This is almost certainly a bug: {}'.format(match_query))
new_queries.append(new_query)
return compound_match_query._replace(match_queries=new_queries)
| 58.257143 | 100 | 0.664452 |
435b03494c0e0f08adce48e2055f1eb32e5446ba | 3,763 | py | Python | traffic_light/core.py | ofalk/cleware-traffic-light | be319fec8e190811463ade8aabc37ca2b4f17e57 | [
"MIT"
] | null | null | null | traffic_light/core.py | ofalk/cleware-traffic-light | be319fec8e190811463ade8aabc37ca2b4f17e57 | [
"MIT"
] | null | null | null | traffic_light/core.py | ofalk/cleware-traffic-light | be319fec8e190811463ade8aabc37ca2b4f17e57 | [
"MIT"
] | null | null | null | from enum import IntEnum
import functools
import usb.core
import usb.util
from traffic_light.error import TrafficLightError, MultipleTrafficLightsError
BM_REQUEST_TYPE = 0x21
B_REQUEST = 0x09
W_VALUE = 0x200
W_INDEX = 0x00
ID_VENDOR = 0x0d50
ID_PRODUCT = 0x0008
INTERFACE = 0
def __getattr__(self, name):
"""Parses attribut calls in function"""
args = name.split('_')
try:
color = Color[args[0].upper()]
state = State[args[1].upper()]
except Exception as exc:
raise TrafficLightError("Either the given color or state could not be parsed! Exc: {}"
.format(exc))
return functools.partial(self.set_led, color, state)
def __str__(self):
"""Converts instance into string with important imformations"""
return ("== Cleware Traffic Light ==\n"
"Address: {} \n"
"IdVendor: {} \n"
"IdProduct: {}".format(self.address, ID_VENDOR, ID_PRODUCT))
| 32.439655 | 122 | 0.60962 |
435b7f5d139890173dc2cf9019b51215cc554d6e | 3,646 | py | Python | sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_orchestration_app_luis_response_async.py | dubiety/azure-sdk-for-python | 62ffa839f5d753594cf0fe63668f454a9d87a346 | [
"MIT"
] | 1 | 2022-02-01T18:50:12.000Z | 2022-02-01T18:50:12.000Z | sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_orchestration_app_luis_response_async.py | ellhe-blaster/azure-sdk-for-python | 82193ba5e81cc5e5e5a5239bba58abe62e86f469 | [
"MIT"
] | null | null | null | sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_orchestration_app_luis_response_async.py | ellhe-blaster/azure-sdk-for-python | 82193ba5e81cc5e5e5a5239bba58abe62e86f469 | [
"MIT"
] | null | null | null | # coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
"""
FILE: sample_analyze_orchestration_app_luis_response_async.py
DESCRIPTION:
This sample demonstrates how to analyze user query using an orchestration project.
In this sample, orchestration project's top intent will map to a LUIS project.
For more info about how to setup a CLU orchestration project, see the README.
USAGE:
python sample_analyze_orchestration_app_luis_response_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_CONVERSATIONS_ENDPOINT - endpoint for your CLU resource.
2) AZURE_CONVERSATIONS_KEY - API key for your CLU resource.
3) AZURE_CONVERSATIONS_WORKFLOW_PROJECT_NAME - project name for your CLU orchestration project.
4) AZURE_CONVERSATIONS_WORKFLOW_DEPLOYMENT_NAME - deployment name for your CLU orchestration project.
"""
import asyncio
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main()) | 39.630435 | 106 | 0.637685 |
435b8874fd825cd72ac9feb3e9f96907066c1541 | 141 | py | Python | src/sunstruck/schemas/__init__.py | la-mar/sunstruck-api | 90074a55d3b243f7f0eee6e897a98699d2cebc43 | [
"MIT"
] | 3 | 2021-04-04T07:48:48.000Z | 2022-02-19T17:42:12.000Z | src/sunstruck/schemas/__init__.py | la-mar/sunstruck-api | 90074a55d3b243f7f0eee6e897a98699d2cebc43 | [
"MIT"
] | null | null | null | src/sunstruck/schemas/__init__.py | la-mar/sunstruck-api | 90074a55d3b243f7f0eee6e897a98699d2cebc43 | [
"MIT"
] | null | null | null | # flake8: noqa
from schemas.client_credentials import *
from schemas.message import *
from schemas.token import *
from schemas.user import *
| 23.5 | 40 | 0.794326 |
435f04515eafc16cb9b3781591916aadd65a8bd3 | 2,499 | py | Python | intro/deploy.py | terziev-viktor/SolidityCourse | 6f10852e94eec69438c5e577795d317694227337 | [
"MIT"
] | null | null | null | intro/deploy.py | terziev-viktor/SolidityCourse | 6f10852e94eec69438c5e577795d317694227337 | [
"MIT"
] | null | null | null | intro/deploy.py | terziev-viktor/SolidityCourse | 6f10852e94eec69438c5e577795d317694227337 | [
"MIT"
] | null | null | null | import json
from web3 import Web3
from solcx import compile_standard, install_solc
with open("./SimpleStorage.sol", "r") as file:
simple_storage_src = file.read()
# install solcx
install_solc("0.8.0")
# compile the source
compiled_sol = compile_standard(
{
"language": "Solidity",
"sources": {"SimpleStorage.sol": {"content": simple_storage_src}},
"settings":
{
"outputSelection":
{
"*":
{
"*": ["abi", "metadata", "evm.bytecode", "evm.sourceMap"]
}
}
},
},
solc_version = "0.8.0"
)
with open("./out.json", "w") as file:
json.dump(compiled_sol, file)
# getting the bytecode
bytecode = compiled_sol["contracts"]["SimpleStorage.sol"]["SimpleStorage"]["evm"]["bytecode"]["object"]
# getting the abi
abi = compiled_sol["contracts"]["SimpleStorage.sol"]["SimpleStorage"]["abi"]
# connecting to ganache
w3 = Web3(Web3.HTTPProvider("HTTP://127.0.0.1:7545"))
chain_id = 1337
my_address = "0x02ECDdb09504C4d4B2ba2c7Ec80d77d44f6e631c"
private_key = "0xa9ddbecce894fdad11cd9864d9c58f794d23bd5f0d78d1c2eea204b284edfefc"
# Create the contract in python
SimpleStorage = w3.eth.contract(abi=abi, bytecode=bytecode)
# Get the latest test transaction
nonce = w3.eth.getTransactionCount(my_address)
# 1. Build a transaction
# 2. Sing the transaction
# 3. Send the transaction
transaction = SimpleStorage.constructor().buildTransaction({"gasPrice": w3.eth.gas_price, "chainId": chain_id, "from": my_address, "nonce": nonce})
signed_txn = w3.eth.account.sign_transaction(transaction, private_key)
tx_hash = w3.eth.send_raw_transaction(signed_txn.rawTransaction)
# confirm transaction is received
tx_receipt = w3.eth.wait_for_transaction_receipt(tx_hash)
print("tx_hash=", tx_hash)
print("receipt=", tx_receipt)
# working on-chain
simple_storage = w3.eth.contract(address=tx_receipt.contractAddress, abi=abi)
print(simple_storage.functions.retrieve().call())
store_transaction = simple_storage.functions.store(15).buildTransaction({
"gasPrice": w3.eth.gas_price,
"chainId": chain_id,
"from": my_address,
"nonce": nonce + 1
}
)
singed_store_transaction = w3.eth.account.sign_transaction(store_transaction, private_key)
store_transaction_hash = w3.eth.send_raw_transaction(singed_store_transaction.rawTransaction)
store_transaction_receipt = w3.eth.wait_for_transaction_receipt(store_transaction_hash)
| 31.2375 | 147 | 0.708283 |
435f7637a4041b297cabc29fa1091a243b6bc151 | 2,154 | py | Python | noise/extras/meta/protocol/protocol.py | mgp25/noise | 8560849fa4a1d6e938adde27d26572f4da16e422 | [
"MIT"
] | 6 | 2019-05-02T09:40:53.000Z | 2021-05-18T00:18:30.000Z | noise/extras/meta/protocol/protocol.py | mgp25/noise | 8560849fa4a1d6e938adde27d26572f4da16e422 | [
"MIT"
] | null | null | null | noise/extras/meta/protocol/protocol.py | mgp25/noise | 8560849fa4a1d6e938adde27d26572f4da16e422 | [
"MIT"
] | null | null | null | from noise.dh.dh import DH
from noise.cipher.cipher import Cipher
from noise.hash.hash import Hash
from noise.processing.handshakepatterns.handshakepattern import HandshakePattern
from noise.processing.impl.handshakestate import HandshakeState
from noise.processing.impl.symmetricstate import SymmetricState
from noise.processing.impl.cipherstate import CipherState
def create_symmetricstate(self, cipherstate=None, hash=None):
"""
:param cipherstate:
:type cipherstate: CipherState
:param hash:
:type hash: Hash
:return:
:rtype: SymmetricState
"""
return SymmetricState(cipherstate or self.create_cipherstate(), hash or self._hash)
def create_handshakestate(self, symmetricstate=None, dh=None):
"""
:param symmetricstate:
:type symmetricstate: SymmetricState
:param dh:
:type dh: DH
:return:
:rtype: HandshakeState
"""
return HandshakeState(symmetricstate or self.create_symmetricstate(), dh or self._dh)
| 27.265823 | 103 | 0.627205 |
4361a9278aa18283e07b14ec0d517fca7051b980 | 9,550 | py | Python | info_popup.py | cartazio/SublimeHaskell | e6f12ea69de939d12212a6ec594bf0aae0603f6d | [
"MIT"
] | 2 | 2021-07-07T16:41:48.000Z | 2021-11-17T11:08:50.000Z | info_popup.py | cartazio/SublimeHaskell | e6f12ea69de939d12212a6ec594bf0aae0603f6d | [
"MIT"
] | null | null | null | info_popup.py | cartazio/SublimeHaskell | e6f12ea69de939d12212a6ec594bf0aae0603f6d | [
"MIT"
] | null | null | null | import urllib.parse
import webbrowser
import json
from xml.etree import ElementTree
import sublime
import SublimeHaskell.sublime_haskell_common as Common
import SublimeHaskell.internals.utils as Utils
import SublimeHaskell.internals.unicode_opers as UnicodeOpers
import SublimeHaskell.symbols as symbols
import SublimeHaskell.internals.backend_mgr as BackendManager
import SublimeHaskell.parseoutput as ParseOutput
import SublimeHaskell.types as types
# Unused module variable:
# style_header = "<style>" \
# "a { text-decoration: underline; }" \
# ".type { color: red; }" \
# ".tyvar { color: blue; }" \
# ".operator { color: green; }" \
# ".comment { color: gray; font-style: italic; }" \
# ".docs { color: gray; }" \
# "</style>"
| 42.070485 | 126 | 0.566387 |
4361a9d08c25b0f208bbec15d3be738264785d14 | 4,126 | py | Python | modules/google_home_lights.py | artizanatweb/ghome-assistant | dba2bc58979ebae48afc71c356ae2d40b8830eee | [
"Apache-2.0"
] | null | null | null | modules/google_home_lights.py | artizanatweb/ghome-assistant | dba2bc58979ebae48afc71c356ae2d40b8830eee | [
"Apache-2.0"
] | null | null | null | modules/google_home_lights.py | artizanatweb/ghome-assistant | dba2bc58979ebae48afc71c356ae2d40b8830eee | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright (C) 2017 Seeed Technology Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from modules.pixel_ring import pixel_ring
import numpy
import time
import threading
try:
import queue as Queue
except ImportError:
import Queue as Queue
lights = GoogleHomeLights()
if __name__ == '__main__':
while True:
try:
lights.wakeup()
time.sleep(3)
lights.think()
time.sleep(3)
lights.speak()
time.sleep(3)
lights.off()
time.sleep(3)
except KeyboardInterrupt:
break
pixel_ring.off() | 24.128655 | 74 | 0.542414 |
43626cff0461fc1edbacac7b7a76a2f308ada971 | 5,016 | py | Python | tensortools/optimize/mncp_hals.py | klmcguir/tensortools | 38262f5bad9d3171286e34e5f15d196752dda939 | [
"MIT"
] | null | null | null | tensortools/optimize/mncp_hals.py | klmcguir/tensortools | 38262f5bad9d3171286e34e5f15d196752dda939 | [
"MIT"
] | null | null | null | tensortools/optimize/mncp_hals.py | klmcguir/tensortools | 38262f5bad9d3171286e34e5f15d196752dda939 | [
"MIT"
] | null | null | null | """
Nonnegative CP decomposition by Hierarchical alternating least squares (HALS).
With support for missing data.
"""
import numpy as np
import scipy as sci
from scipy import linalg
from tensortools.operations import unfold, khatri_rao
from tensortools.tensors import KTensor
from tensortools.optimize import FitResult, optim_utils
from .._hals_update import _hals_update
def mncp_hals(X, rank, mask, random_state=None, init='rand', **options):
"""
Fits nonnegtaive CP Decomposition using the Hierarcial Alternating Least
Squares (HALS) Method. Supports missing data.
Parameters
----------
X : (I_1, ..., I_N) array_like
A real array with nonnegative entries and ``X.ndim >= 3``.
rank : integer
The `rank` sets the number of components to be computed.
mask : (I_1, ..., I_N) array_like
A binary tensor with the same shape as ``X``. All entries equal to zero
correspond to held out or missing data in ``X``. All entries equal to
one correspond to observed entries in ``X`` and the decomposition is
fit to these datapoints.
random_state : integer, RandomState instance or None, optional (default ``None``)
If integer, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used by np.random.
init : str, or KTensor, optional (default ``'rand'``).
Specifies initial guess for KTensor factor matrices.
If ``'randn'``, Gaussian random numbers are used to initialize.
If ``'rand'``, uniform random numbers are used to initialize.
If KTensor instance, a copy is made to initialize the optimization.
options : dict, specifying fitting options.
tol : float, optional (default ``tol=1E-5``)
Stopping tolerance for reconstruction error.
max_iter : integer, optional (default ``max_iter = 500``)
Maximum number of iterations to perform before exiting.
min_iter : integer, optional (default ``min_iter = 1``)
Minimum number of iterations to perform before exiting.
max_time : integer, optional (default ``max_time = np.inf``)
Maximum computational time before exiting.
verbose : bool ``{'True', 'False'}``, optional (default ``verbose=True``)
Display progress.
Returns
-------
result : FitResult instance
Object which holds the fitted results. It provides the factor matrices
in form of a KTensor, ``result.factors``.
Notes
-----
This implemenation is using the Hierarcial Alternating Least Squares Method.
References
----------
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
Examples
--------
"""
# Mask missing elements.
X = np.copy(X)
X[~mask] = np.linalg.norm(X[mask])
# Check inputs.
optim_utils._check_cpd_inputs(X, rank)
# Initialize problem.
U, normX = optim_utils._get_initial_ktensor(init, X, rank, random_state)
result = FitResult(U, 'NCP_HALS', **options)
# Store problem dimensions.
normX = linalg.norm(X[mask].ravel())
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Iterate the HALS algorithm until convergence or maxiter is reached
# i) compute the N gram matrices and multiply
# ii) Compute Khatri-Rao product
# iii) Update component U_1, U_2, ... U_N
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
while result.still_optimizing:
# First, HALS update.
for n in range(X.ndim):
# Select all components, but U_n
components = [U[j] for j in range(X.ndim) if j != n]
# i) compute the N-1 gram matrices
grams = sci.multiply.reduce([arr.T.dot(arr) for arr in components])
# ii) Compute Khatri-Rao product
kr = khatri_rao(components)
p = unfold(X, n).dot(kr)
# iii) Update component U_n
_hals_update(U[n], grams, p)
# Then, update masked elements.
pred = U.full()
X[~mask] = pred[~mask]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Update the optimization result, checks for convergence.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Compute objective function
# grams *= U[X.ndim - 1].T.dot(U[X.ndim - 1])
# obj = np.sqrt( (sci.sum(grams) - 2 * sci.sum(U[X.ndim - 1] * p) + normX**2)) / normX
resid = X - pred
result.update(linalg.norm(resid.ravel()) / normX)
# end optimization loop, return result.
return result.finalize() | 40.780488 | 94 | 0.60626 |
4363164b554bb6ade5f87250305647778400993f | 18,079 | py | Python | raredecay/tools/data_tools.py | jonas-eschle/raredecay | 6285f91e0819d01c80125f50b24e60ee5353ae2e | [
"Apache-2.0"
] | 7 | 2016-11-19T17:28:07.000Z | 2020-12-29T19:49:37.000Z | raredecay/tools/data_tools.py | mayou36/raredecay | 5b319ada66ebe54f81e216efad81fc9f06237a30 | [
"Apache-2.0"
] | 23 | 2017-03-13T19:13:58.000Z | 2021-05-30T21:48:50.000Z | raredecay/tools/data_tools.py | jonas-eschle/raredecay | 6285f91e0819d01c80125f50b24e60ee5353ae2e | [
"Apache-2.0"
] | 5 | 2016-12-17T19:24:13.000Z | 2021-05-31T14:32:34.000Z | """
@author: Jonas Eschle "Mayou36"
DEPRECEATED! USE OTHER MODULES LIKE rd.data, rd.ml, rd.reweight, rd.score and rd.stat
DEPRECEATED!DEPRECEATED!DEPRECEATED!DEPRECEATED!DEPRECEATED!
Contains several tools to convert, load, save and plot data
"""
import warnings
import os
import copy
import pandas as pd
import numpy as np
import uproot
import pickle
from . import dev_tool
# both produce error (27.07.2016) when importing them if run from main.py.
# No problem when run as main...
# from raredecay.tools import dev_tool
from .. import meta_config as meta_cfg
def apply_cuts(signal_data, bkg_data, percent_sig_to_keep=100, bkg_length=None):
"""Search for best cut on value to still keep percent_sig_to_keep of signal
Parameters
----------
signal_data : 1-D numpy array
The signal
bkg_data : 1-D numpy array
The background data
percent_sig_to_keep : 0 < float <= 100
What percentage of the data to keep in order to apply the cuts.
"""
# if percent_sig_to_keep < 100:
# raise NotImplementedError("percentage of < 100 not yet imlemented")
percentile = [0, percent_sig_to_keep] # TODO: modify for percent_sig_to_keep
bkg_length_before = len(bkg_data)
bkg_length = len(bkg_data) if bkg_length in (None, 0) else bkg_length
lower_cut, upper_cut = np.percentile(signal_data, percentile)
cut_bkg = np.count_nonzero(
np.logical_or(bkg_data < lower_cut, bkg_data > upper_cut)
)
rejected_bkg = (bkg_length_before - cut_bkg) / bkg_length
return [lower_cut, upper_cut], rejected_bkg
def make_root_dict(path_to_rootfile, tree_name, branches):
"""Returns a root_numpy compatible "root-dict" of a root-tree.
Parameters
----------
path_to_rootfile : str
The exact path to the root-tree including the filename. Example:
/home/user1/data/myRootTree1.root
tree_name : str
The name of the tree
branches : str or list[str, str, str,... ]
The branches of the tree to use
"""
output = dict(filenames=path_to_rootfile, treename=tree_name, branches=branches)
output = dev_tool.entries_to_str(output)
return output
def add_to_rootfile(rootfile, new_branch, branch_name=None, overwrite=True):
"""Adds a new branch to a given root file.
.. warning:: Overwrite not working currently!
Parameters
----------
rootfile : root-dict
The ROOT-file where the data should be added
new_branch : numpy.array 1-D, list, root-dict
A one-dimensional numpy array that contains the data.
branch_name : str
The name of the branche resp. the name in the dtype of the array.
"""
from root_numpy import array2root
from rootpy.io import root_open
rootfile = dev_tool.entries_to_str(rootfile)
new_branch = dev_tool.entries_to_str(new_branch)
branch_name = dev_tool.entries_to_str(branch_name)
# get the right parameters
# TODO: what does that if there? an assertion maybe?
write_mode = "update"
branch_name = "new_branch1" if branch_name is None else branch_name
if isinstance(rootfile, dict):
filename = rootfile.get("filenames")
treename = rootfile.get("treename")
new_branch = to_ndarray(new_branch)
# new_branch.dtype = [(branch_name, 'f8')]
# write to ROOT-file
write_to_root = False
if os.path.isfile(filename):
with root_open(filename, mode="a") as root_file:
tree = getattr(root_file, treename) # test
if not tree.has_branch(branch_name):
write_to_root = True
# array2tree(new_branch, tree=tree)
# f.write("", TObject.kOverwrite) # overwrite, does not create friends
else:
write_mode = "recreate"
write_to_root = True
if write_to_root:
arr = np.core.records.fromarrays([new_branch], names=branch_name)
array2root(arr=arr, filename=filename, treename=treename, mode=write_mode)
return 0
else:
return 1
# TODO: remove? outdated
def format_data_weights(data_to_shape, weights):
"""Format the data and the weights perfectly. Same length and more.
Change the data to pandas.DataFrame and fill the weights with ones where
nothing or None is specified. Returns both in lists.
Very useful to loop over several data and weights.
Parameters
----------
data_to_shape : (root_dict, numpy.array, pandas.DataFrame)
The data for which we apply the weights. Usual 2-D shape.
weights : (list, numpy.array, pandas.DataFrame, None)
The weights to be reshaped
*Best format* :
[array(weights),array(weights), None, array(weights),...]
*None* can be used if no special weights are specified.
If weights contains less "weight-containing array-like objects" then
data_to_shape does, the difference will be filled with *1*
Return
------
out : list(pandas.DataFrame(data), pandas.DataFrame(data),...)
Return a list containing data
out : list(numpy.array(weight), numpy.array(weight),...)
Return a list with the weights, converted and filled.
"""
# conver the data
if not isinstance(data_to_shape, list):
data_to_shape = [data_to_shape]
data_to_shape = list(map(to_pandas, data_to_shape))
# convert the weights
if not isinstance(weights, list):
weights = [weights]
if weights[0] is not None:
if len(weights[0]) == 1:
weights = [weights]
# convert to pandas
assert isinstance(weights, list), "weights could not be converted to list"
for data_id, data in enumerate(data_to_shape):
if data_id >= len(weights):
weights.append(None)
if weights[data_id] is None:
weights[data_id] = np.array([1] * len(data))
weights[data_id] = to_pandas(weights[data_id]).squeeze().values
return data_to_shape, weights
def obj_to_string(objects, separator=None):
"""Return a string containing all objects as strings, separated by the separator.
Useful for automatic conversion for different types. The following objects
will automatically be converted:
- None will be omitted
Parameters
----------
objects : any object or list(obj, obj, ...) with a string representation
The objects will be converted to a string and concatenated, separated
by the separator.
separator : str
The separator between the objects. Default is " - ".
"""
objects = dev_tool.entries_to_str(objects)
if isinstance(objects, str): # no need to change things
return objects
separator = " - " if separator is None else separator
assert isinstance(separator, str), "Separator not a str"
objects = to_list(objects)
objects = [str(obj) for obj in objects if obj not in (None, "")] # remove Nones
string_out = ""
for word in objects:
string_out += word + separator if word != objects[-1] else word
return string_out
def is_root(data_to_check):
"""Check whether a given data is a root file. Needs dicts to be True."""
flag = False
data_to_check = dev_tool.entries_to_str(data_to_check)
if isinstance(data_to_check, dict):
path_name = data_to_check.get("filenames")
# assert isinstance(path_name, str), ("'filenames' of the dictionary " +
# str(data_to_check) + "is not a string")
if path_name.endswith(meta_cfg.ROOT_DATATYPE):
flag = True
return flag
def is_list(data_to_check):
"""Check whether the given data is a list."""
flag = False
if isinstance(data_to_check, list):
flag = True
return flag
def is_ndarray(data_to_check):
"""Check whether a given data is an ndarray."""
flag = False
if isinstance(data_to_check, np.ndarray):
flag = True
return flag
def is_pickle(data_to_check):
"""Check if the file is a pickled file (checks the ending)."""
flag = False
data_to_check = dev_tool.entries_to_str(data_to_check)
if isinstance(data_to_check, str):
if data_to_check.endswith(meta_cfg.PICKLE_DATATYPE):
flag = True
return flag
def to_list(data_in):
"""Convert the data into a list. Does not pack lists into a new one.
If your input is, for example, a string or a list of strings, or a
tuple filled with strings, you have, in general, a problem:
- just iterate through the object will fail because it iterates through the
characters of the string.
- using list(obj) converts the tuple, leaves the list but splits the strings
characters into single elements of a new list.
- using [obj] creates a list containing a string, but also a list containing
a list or a tuple, which you did not want to.
Solution: use to_list(obj), which creates a new list in case the object is
a single object (a string is a single object in this sence) or converts
to a list if the object is already a container for several objects.
Parameters
----------
data_in : any obj
So far, any object can be entered.
Returns
-------
out : list
Return a list containing the object or the object converted to a list.
"""
if isinstance(data_in, (str, int, float)):
data_in = [data_in]
data_in = list(data_in)
return data_in
def to_ndarray(data_in, float_array=False):
"""Convert data to numpy array (containing only floats).
Parameters
----------
data_in : any reasonable data
The data to be converted
"""
import uproot
if is_root(data_in):
with uproot.open(data_in["filenames"]) as file:
tree = file[data_in["treename"]]
branches = to_list(data_in["branches"])
loaded = tree.arrays(branches, library="np")
loaded = np.stack([loaded[branch] for branch in branches])
if len(branches) == 1:
loaded = loaded[0]
data_in = loaded
# change numpy.void to normal floats
if isinstance(data_in, (pd.Series, pd.DataFrame)):
test_sample = data_in.iloc[0]
else:
test_sample = data_in[0]
if isinstance(test_sample, np.void):
data_in = np.array([val[0] for val in data_in])
if isinstance(data_in, (np.recarray, np.ndarray)):
data_in = data_in.tolist()
if is_list(data_in) or isinstance(data_in, pd.Series):
data_in = np.array(data_in)
if not isinstance(data_in[0], (int, float, str, bool)):
if float_array:
iter_data = copy.deepcopy(data_in)
# HACK
data_in = np.ndarray(shape=len(data_in), dtype=data_in.dtype)
# HACK END
for i, element in enumerate(iter_data):
if not isinstance(element, (int, float, str, bool)):
# does that work or should we iterate over copy?
try:
element_len = len(element)
except TypeError:
element_len = 1
if element_len > 1:
data_in[i] = to_ndarray(element)
float_array = False
elif element_len == 1:
data_in[i] = float(element)
warnings.warn("Could not force float array")
if float_array:
data_in = np.asfarray(data_in)
assert is_ndarray(data_in), "Error, could not convert data to numpy array"
return data_in
def to_pandas_old(data_in, index=None, columns=None):
"""Convert data from numpy or root to pandas dataframe.
Convert data safely to pandas, whatever the format is.
Parameters
----------
data_in : any reasonable data
The data to be converted
"""
# TODO: generalize
root_index_name = "__index__"
data_in = dev_tool.entries_to_str(data_in)
if is_root(data_in):
root_index = None
import root_numpy
if root_index_name in root_numpy.list_branches(
filename=data_in["filenames"], treename=data_in.get("treename")
):
root_index = root_numpy.root2array(
filenames=data_in["filenames"],
treename=data_in.get("treename"),
selection=data_in.get("selection"),
branches=root_index_name,
)
data_in = root_numpy.root2array(**data_in) # why **? it's a root dict
if is_list(data_in):
data_in = np.array(data_in)
if is_ndarray(data_in):
if (isinstance(columns, (list, tuple)) and len(columns) == 1) or isinstance(
columns, str
):
data_in = to_ndarray(data_in)
data_in = pd.DataFrame(data_in, columns=columns, index=root_index)
if index is not None:
data_in = data_in.loc[index]
elif isinstance(data_in, pd.DataFrame):
pass
else:
raise TypeError("Could not convert data to pandas. Data: " + data_in)
return data_in
def to_pandas(data_in, index=None, columns=None):
"""Convert data from numpy or root to pandas dataframe.
Convert data safely to pandas, whatever the format is.
Parameters
----------
data_in : any reasonable data
The data to be converted
"""
data_in = dev_tool.entries_to_str(data_in)
if is_root(data_in):
if columns is None:
columns = data_in["branches"]
with uproot.open(data_in["filenames"]) as file:
tree = file[data_in["treename"]]
if "__index__" in tree.keys(): # legacy, we can also convert this
return to_pandas_old(data_in=data_in, index=index, columns=columns)
branches = to_list(columns)
loaded = tree.arrays(branches, library="pd")
if index is not None:
loaded = loaded.loc[index]
return loaded
else:
# HACK START
return to_pandas_old(data_in=data_in, index=index, columns=columns)
# HACK END
# from root_pandas import read_root
#
# root_pandas_numpy_map = dict(filenames='paths', treename='key', branches='columns',
# selection='where')
#
# if is_root(data_in):
# is_root2array = False
# for key, val in copy.deepcopy(list(data_in.items())):
# if key in root_pandas_numpy_map:
# is_root2array = True
# del data_in[key]
# data_in[root_pandas_numpy_map[key]] = val
# data_in['columns'] = to_list(data_in['columns'])
# if is_root2array:
# data_in['columns'] = ['noexpand:'+col for col in data_in['columns'] if not col.startswith('noexpand:')]
# remove the noexpand:
# data_in = read_root(**data_in) # why **? it's a root dict
# if is_list(data_in):
# data_in = np.array(data_in)
# if is_ndarray(data_in):
# if ((isinstance(columns, (list, tuple)) and len(columns) == 1) or
# isinstance(columns, string)):
#
# data_in = to_ndarray(data_in)
# data_in = pd.DataFrame(data_in, columns=columns)
# if index is not None:
# data_in = data_in.loc[index]
# elif isinstance(data_in, pd.DataFrame):
# pass
# else:
# raise TypeError("Could not convert data to pandas. Data: " + data_in)
# return data_in
def adv_return(return_value, save_name=None):
"""Save the value if save_name specified, otherwise just return input.
Can be wrapped around the return value. Without any arguments, the return
of your function will be exactly the same. With arguments, the value can
be saved (**pickled**) before it is returned.
Parameters
----------
return_value : any python object
The python object which should be pickled.
save_name : str, None
| The (file-)name for the pickled file. File-extension will be added \
automatically if specified in *raredecay.meta_config*.
| If *None* is passed, the object won't be pickled.
Return
------
out : python object
Return return_value without changes.
**Usage**:
Instead of a simple return statement
>>> return my_variable/my_object
one can use the **completely equivalent** statement
>>> return adv_return(my_variable/my_object)
If the return value should be saved in addition to be returned, use
>>> return adv_return(my_variable/my_object, save_name='my_object.pickle')
(*the .pickle ending is not required but added automatically if omitted*)
which returns the value and saves it.
"""
save_name = dev_tool.entries_to_str(save_name)
if save_name not in (None, False):
if isinstance(save_name, str):
save_name = meta_cfg.PICKLE_PATH + save_name
if not is_pickle(save_name):
save_name += "." + meta_cfg.PICKLE_DATATYPE
with open(str(save_name), "wb") as f:
pickle.dump(return_value, f, meta_cfg.PICKLE_PROTOCOL)
print(str(return_value) + " pickled to " + save_name)
else:
pass
# HACK how to solve logger problem?
# logger.error("Could not pickle data, name for file (" +
# str(save_name) + ") is not a string!" +
# "\n Therefore, the following data was only returned" +
# " but not saved! \n Data:" + str(return_value))
return return_value
def try_unpickle(file_to_unpickle, use_metapath_bkwcomp=False):
"""Try to unpickle a file and return, otherwise just return input."""
file_to_unpickle = dev_tool.entries_to_str(file_to_unpickle)
if is_pickle(file_to_unpickle):
extra_path = meta_cfg.PICKLE_PATH if use_metapath_bkwcomp else ""
with open(extra_path + file_to_unpickle, "rb") as f:
file_to_unpickle = pickle.load(f)
return file_to_unpickle
| 34.969052 | 117 | 0.636872 |
4363297eb771b020c864cdfbc69be70aff1727b6 | 2,052 | py | Python | toontown/coghq/boardbothq/BoardOfficeManagerAI.py | LittleNed/toontown-stride | 1252a8f9a8816c1810106006d09c8bdfe6ad1e57 | [
"Apache-2.0"
] | 1 | 2018-06-16T23:06:38.000Z | 2018-06-16T23:06:38.000Z | toontown/coghq/boardbothq/BoardOfficeManagerAI.py | NoraTT/Historical-Commits-Project-Altis-Source | fe88e6d07edf418f7de6ad5b3d9ecb3d0d285179 | [
"Apache-2.0"
] | null | null | null | toontown/coghq/boardbothq/BoardOfficeManagerAI.py | NoraTT/Historical-Commits-Project-Altis-Source | fe88e6d07edf418f7de6ad5b3d9ecb3d0d285179 | [
"Apache-2.0"
] | 4 | 2019-06-20T23:45:23.000Z | 2020-10-14T20:30:15.000Z | from direct.directnotify import DirectNotifyGlobal
import DistributedBoardOfficeAI
from toontown.toonbase import ToontownGlobals
from toontown.coghq.boardbothq import BoardOfficeLayout
from direct.showbase import DirectObject
import random
| 41.04 | 141 | 0.639376 |
436393af32e8421a7a3401c8eb82314850e79873 | 2,144 | py | Python | ansiblemetrics/utils.py | radon-h2020/AnsibleMetrics | 8a8e27d9b54fc1578d00526c8663184a2e686cb2 | [
"Apache-2.0"
] | 1 | 2020-04-24T16:09:14.000Z | 2020-04-24T16:09:14.000Z | ansiblemetrics/utils.py | radon-h2020/AnsibleMetrics | 8a8e27d9b54fc1578d00526c8663184a2e686cb2 | [
"Apache-2.0"
] | null | null | null | ansiblemetrics/utils.py | radon-h2020/AnsibleMetrics | 8a8e27d9b54fc1578d00526c8663184a2e686cb2 | [
"Apache-2.0"
] | null | null | null | from typing import Union
def key_value_list(d: Union[dict, list], key=None) -> list:
"""
This function iterates over all the key-value pairs of a dictionary and returns a list of tuple (key, value) where the key contain only primitive value (i.e., no list or dict), e.g., string, number etc.
d -- a dictionary to iterate through
"""
if not d:
return []
if not isinstance(d, dict) and not isinstance(d, list):
return []
key_values = []
if isinstance(d, list):
for entry in d:
if isinstance(entry, dict):
key_values.extend(key_value_list(entry))
else:
key_values.append((key, entry))
else:
for k, v in d.items():
if k is None or v is None:
continue
if not isinstance(v, dict) and type(v) != list:
key_values.append((k, v))
elif isinstance(v, list):
key_values.extend(key_value_list(v, k))
else:
key_values.extend(key_value_list(v))
return key_values
def all_keys(d: Union[dict, list]) -> list:
"""
Returns a list of all the keys of a dictionary (duplicates included)
d -- a dictionary to iterate through
"""
if not d:
return []
if d is None or not isinstance(d, dict) and not isinstance(d, list):
return []
keys = []
if isinstance(d, list):
for entry in d:
keys.extend(all_keys(entry))
else:
for k, v in d.items():
keys.append(k)
keys.extend(all_keys(v))
return keys
def all_values(d: Union[dict, list]) -> list:
"""
Returns a list of all the primitive values of a dictionary (duplicates included)
d -- a dictionary to iterate through
"""
if not d:
return []
if not isinstance(d, dict) and not isinstance(d, list):
return [d]
values = []
if isinstance(d, list):
for entry in d:
values.extend(all_values(entry))
else:
for k, v in d.items():
values.extend(all_values(v))
return values
| 25.831325 | 206 | 0.564366 |
4364633db5685f14b086dbb59f77e9958e56ad15 | 2,913 | py | Python | yampy/apis/groups.py | Kunal-Shah-Bose/yam-python | 1d24b4b5c4bfb512804183efe741a2f7a75889e5 | [
"Apache-2.0"
] | null | null | null | yampy/apis/groups.py | Kunal-Shah-Bose/yam-python | 1d24b4b5c4bfb512804183efe741a2f7a75889e5 | [
"Apache-2.0"
] | null | null | null | yampy/apis/groups.py | Kunal-Shah-Bose/yam-python | 1d24b4b5c4bfb512804183efe741a2f7a75889e5 | [
"Apache-2.0"
] | 1 | 2019-01-10T18:50:35.000Z | 2019-01-10T18:50:35.000Z | from yampy.apis.utils import ArgumentConverter, none_filter, stringify_booleans
from yampy.models import extract_id
| 28.281553 | 79 | 0.599725 |
4364ccde24cc2af35ff42479b35b005f175a3209 | 24,502 | py | Python | phy/gui/actions.py | ycanerol/phy | 7a247f926dd5bf5d8ab95fe138e8f4a0db11b068 | [
"BSD-3-Clause"
] | 118 | 2019-06-03T06:19:43.000Z | 2022-03-25T00:05:26.000Z | phy/gui/actions.py | ycanerol/phy | 7a247f926dd5bf5d8ab95fe138e8f4a0db11b068 | [
"BSD-3-Clause"
] | 761 | 2015-01-08T11:17:41.000Z | 2019-05-27T16:12:08.000Z | phy/gui/actions.py | ycanerol/phy | 7a247f926dd5bf5d8ab95fe138e8f4a0db11b068 | [
"BSD-3-Clause"
] | 70 | 2019-05-30T11:05:26.000Z | 2022-03-30T11:51:23.000Z | # -*- coding: utf-8 -*-
"""Actions and snippets."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import inspect
from functools import partial, wraps
import logging
import re
import sys
import traceback
from .qt import QKeySequence, QAction, require_qt, input_dialog, busy_cursor, _get_icon
from phylib.utils import Bunch
logger = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
# Snippet parsing utilities
# -----------------------------------------------------------------------------
def _parse_arg(s):
"""Parse a number or string."""
try:
return int(s)
except ValueError:
pass
try:
return float(s)
except ValueError:
pass
return s
def _parse_list(s):
"""Parse a comma-separated list of values (strings or numbers)."""
# Range: 'x-y'
if '-' in s:
m, M = map(_parse_arg, s.split('-'))
return list(range(m, M + 1))
# List of ids: 'x,y,z'
elif ',' in s:
return list(map(_parse_arg, s.split(',')))
else:
return _parse_arg(s)
def _parse_snippet(s):
"""Parse an entire snippet command."""
return tuple(map(_parse_list, s.split(' ')))
def _prompt_args(title, docstring, default=None):
"""Display a prompt dialog requesting function arguments.
'default' is a function returning the default value for the proposed input dialog.
"""
# There are args, need to display the dialog.
# Extract Example: `...` in the docstring to put a predefined text
# in the input dialog.
logger.debug("Prompting arguments for %s", title)
r = re.search('Example: `([^`]+)`', docstring)
docstring_ = docstring[:r.start()].strip() if r else docstring
try:
text = str(default()) if default else (r.group(1) if r else None)
except Exception as e: # pragma: no cover
logger.error("Error while handling user input: %s", str(e))
return
s, ok = input_dialog(title, docstring_, text)
if not ok or not s:
return
# Parse user-supplied arguments and call the function.
args = _parse_snippet(s)
return args
# -----------------------------------------------------------------------------
# Show shortcut utility functions
# -----------------------------------------------------------------------------
def _get_shortcut_string(shortcut):
"""Return a string representation of a shortcut."""
if not shortcut:
return ''
if isinstance(shortcut, (tuple, list)):
return ', '.join([_get_shortcut_string(s) for s in shortcut])
if isinstance(shortcut, str):
if hasattr(QKeySequence, shortcut):
shortcut = QKeySequence(getattr(QKeySequence, shortcut))
else:
return shortcut.lower()
assert isinstance(shortcut, QKeySequence)
s = shortcut.toString() or ''
return str(s).lower()
def _get_qkeysequence(shortcut):
"""Return a QKeySequence or list of QKeySequence from a shortcut string."""
if shortcut is None:
return []
if isinstance(shortcut, (tuple, list)):
return [_get_qkeysequence(s) for s in shortcut]
assert isinstance(shortcut, str)
if hasattr(QKeySequence, shortcut):
return QKeySequence(getattr(QKeySequence, shortcut))
sequence = QKeySequence.fromString(shortcut)
assert not sequence.isEmpty()
return sequence
def _show_shortcuts(shortcuts):
"""Display shortcuts."""
out = []
for n in sorted(shortcuts):
shortcut = _get_shortcut_string(shortcuts[n])
if not n.startswith('_') and not shortcut.startswith('-'):
out.append('- {0:<40} {1:s}'.format(n, shortcut))
if out:
print('Keyboard shortcuts')
print('\n'.join(out))
print('')
def _show_snippets(snippets):
"""Display snippets."""
out = []
for n in sorted(snippets):
snippet = snippets[n]
if not n.startswith('_'):
out.append('- {0:<40} :{1:s}'.format(n, snippet))
if out:
print('Snippets')
print('\n'.join(out))
print('')
def show_shortcuts_snippets(actions):
"""Show the shortcuts and snippets of an Actions instance."""
print(actions.name)
print('-' * len(actions.name))
print()
_show_shortcuts(actions.shortcuts)
_show_snippets(actions._default_snippets)
# -----------------------------------------------------------------------------
# Actions
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Snippets
# -----------------------------------------------------------------------------
def _enter(self):
"""Disable the snippet mode and execute the command."""
command = self.command
logger.log(5, "Snippet keystroke `Enter`.")
# NOTE: we need to set back the actions (mode_off) before running
# the command.
self.mode_off()
self.run(command)
def _create_snippet_actions(self):
"""Add mock Qt actions for snippet keystrokes.
Used to enable snippet mode.
"""
# One action per allowed character.
for i, char in enumerate(self._snippet_chars):
# Lowercase letters.
self.actions.add(
name='_snippet_{}'.format(i),
shortcut=char,
callback=_make_func(char))
# Uppercase letters.
if char in self._snippet_chars[:26]:
self.actions.add(
name='_snippet_{}_upper'.format(i),
shortcut='shift+' + char,
callback=_make_func(char.upper()))
self.actions.add(
name='_snippet_backspace', shortcut='backspace', callback=self._backspace)
self.actions.add(
name='_snippet_activate', shortcut=('enter', 'return'), callback=self._enter)
self.actions.add(
name='_snippet_disable', shortcut='escape', callback=self.mode_off)
def run(self, snippet):
"""Execute a snippet command.
May be overridden.
"""
assert snippet[0] == ':'
snippet = snippet[1:]
snippet_args = _parse_snippet(snippet)
name = snippet_args[0]
logger.debug("Processing snippet `%s`.", snippet)
try:
# Try to run the snippet on all attached Actions instances.
for actions in self.gui.actions:
try:
actions.run(name, *snippet_args[1:])
return
except ValueError:
# This Actions instance doesn't contain the requested
# snippet, trying the next attached Actions instance.
pass
logger.warning("Couldn't find action `%s`.", name)
except Exception as e:
logger.warning("Error when executing snippet: \"%s\".", str(e))
logger.debug(''.join(traceback.format_exception(*sys.exc_info())))
def is_mode_on(self):
"""Whether the snippet mode is enabled."""
return self.command.startswith(':')
def mode_on(self):
"""Enable the snippet mode."""
logger.debug("Snippet mode enabled, press `escape` to leave this mode.")
# Save the current status message.
self._status_message = self.gui.status_message
self.gui.lock_status()
# Silent all actions except the Snippets actions.
for actions in self.gui.actions:
if actions != self.actions:
actions.disable()
self.actions.enable()
self.command = ':'
def mode_off(self):
"""Disable the snippet mode."""
self.gui.unlock_status()
# Reset the GUI status message that was set before the mode was
# activated.
self.gui.status_message = self._status_message
# Re-enable all actions except the Snippets actions.
self.actions.disable()
for actions in self.gui.actions:
if actions != self.actions:
actions.enable()
# The `:` shortcut should always be enabled.
self.actions.enable('enable_snippet_mode')
| 35.305476 | 99 | 0.587952 |
436637ae94348f41cc38697c102e03126553cd4f | 807 | py | Python | PP4E-Examples-1.4/Examples/PP4E/Tools/cleanpyc.py | AngelLiang/PP4E | 3a7f63b366e1e4700b4d2524884696999a87ba9d | [
"MIT"
] | null | null | null | PP4E-Examples-1.4/Examples/PP4E/Tools/cleanpyc.py | AngelLiang/PP4E | 3a7f63b366e1e4700b4d2524884696999a87ba9d | [
"MIT"
] | null | null | null | PP4E-Examples-1.4/Examples/PP4E/Tools/cleanpyc.py | AngelLiang/PP4E | 3a7f63b366e1e4700b4d2524884696999a87ba9d | [
"MIT"
] | null | null | null | """
delete all .pyc bytecode files in a directory tree: use the
command line arg as root if given, else current working dir
"""
import os, sys
findonly = False
rootdir = os.getcwd() if len(sys.argv) == 1 else sys.argv[1]
found = removed = 0
for (thisDirLevel, subsHere, filesHere) in os.walk(rootdir):
for filename in filesHere:
if filename.endswith('.pyc'):
fullname = os.path.join(thisDirLevel, filename)
print('=>', fullname)
if not findonly:
try:
os.remove(fullname)
removed += 1
except:
type, inst = sys.exc_info()[:2]
print('*'*4, 'Failed:', filename, type, inst)
found += 1
print('Found', found, 'files, removed', removed)
| 31.038462 | 65 | 0.553903 |
436697d1038207224e723ad6677937d00c16b249 | 207 | py | Python | apps.py | louxfaure/sudoc_recouv | da3f094a0a9554c0b3911a365d1feea6d2758fec | [
"MIT"
] | 1 | 2022-02-28T13:00:48.000Z | 2022-02-28T13:00:48.000Z | apps.py | louxfaure/sudoc_recouv | da3f094a0a9554c0b3911a365d1feea6d2758fec | [
"MIT"
] | null | null | null | apps.py | louxfaure/sudoc_recouv | da3f094a0a9554c0b3911a365d1feea6d2758fec | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 25.875 | 56 | 0.768116 |
4366c0b4bfc3d82921cf8860654a9fdf8156bfc0 | 893 | py | Python | src/states.py | amancevice/terraform-aws-slack-interactive-components | 819a9b6a408b36cd1a0100859801bc47c437fdc8 | [
"MIT"
] | 24 | 2018-10-17T04:42:56.000Z | 2022-03-03T10:27:56.000Z | src/states.py | amancevice/terraform-aws-slack-interactive-components | 819a9b6a408b36cd1a0100859801bc47c437fdc8 | [
"MIT"
] | 5 | 2019-03-01T17:14:48.000Z | 2022-01-21T23:11:39.000Z | src/states.py | amancevice/terraform-aws-slack-interactive-components | 819a9b6a408b36cd1a0100859801bc47c437fdc8 | [
"MIT"
] | 11 | 2019-03-01T15:16:24.000Z | 2022-03-03T10:27:59.000Z | import boto3
from logger import logger
| 35.72 | 69 | 0.693169 |
43670f7c99a2ebd5fc17181669e6be4597ca4939 | 25,401 | py | Python | apps/controllerx/cx_core/type/light_controller.py | clach04/controllerx | b5cd92d3371c352c50f7d5ba7dae4538d7c15dfe | [
"MIT"
] | null | null | null | apps/controllerx/cx_core/type/light_controller.py | clach04/controllerx | b5cd92d3371c352c50f7d5ba7dae4538d7c15dfe | [
"MIT"
] | null | null | null | apps/controllerx/cx_core/type/light_controller.py | clach04/controllerx | b5cd92d3371c352c50f7d5ba7dae4538d7c15dfe | [
"MIT"
] | null | null | null | from typing import Any, Dict, Optional, Type, Union
from cx_const import Light, PredefinedActionsMapping
from cx_core.color_helper import get_color_wheel
from cx_core.controller import action
from cx_core.feature_support.light import LightSupport
from cx_core.integration import EventData
from cx_core.integration.deconz import DeCONZIntegration
from cx_core.integration.z2m import Z2MIntegration
from cx_core.release_hold_controller import ReleaseHoldController
from cx_core.stepper import Stepper
from cx_core.stepper.circular_stepper import CircularStepper
from cx_core.stepper.minmax_stepper import MinMaxStepper
from cx_core.type_controller import Entity, TypeController
DEFAULT_MANUAL_STEPS = 10
DEFAULT_AUTOMATIC_STEPS = 10
DEFAULT_MIN_BRIGHTNESS = 1
DEFAULT_MAX_BRIGHTNESS = 255
DEFAULT_MIN_WHITE_VALUE = 1
DEFAULT_MAX_WHITE_VALUE = 255
DEFAULT_MIN_COLOR_TEMP = 153
DEFAULT_MAX_COLOR_TEMP = 500
DEFAULT_TRANSITION = 300
DEFAULT_ADD_TRANSITION = True
DEFAULT_TRANSITION_TURN_TOGGLE = False
ColorMode = str
# Once the minimum supported version of Python is 3.8,
# we can declare the ColorMode as a Literal
# ColorMode = Literal["auto", "xy_color", "color_temp"]
| 37.686944 | 117 | 0.565332 |
4367a493fbe503c8a8ff6c69a39f88b75c5407aa | 125 | py | Python | kts/core/types.py | konodyuk/kts | 3af5ccbf1d2089cb41d171626fcde4b0ba5aa8a7 | [
"MIT"
] | 18 | 2019-02-14T13:10:07.000Z | 2021-11-26T07:10:13.000Z | kts/core/types.py | konodyuk/kts | 3af5ccbf1d2089cb41d171626fcde4b0ba5aa8a7 | [
"MIT"
] | 2 | 2019-02-17T14:06:42.000Z | 2019-09-15T18:05:54.000Z | kts/core/types.py | konodyuk/kts | 3af5ccbf1d2089cb41d171626fcde4b0ba5aa8a7 | [
"MIT"
] | 2 | 2019-09-15T13:12:42.000Z | 2020-04-15T14:05:54.000Z | from typing import Union
import pandas as pd
from kts.core.frame import KTSFrame
AnyFrame = Union[pd.DataFrame, KTSFrame]
| 15.625 | 40 | 0.792 |
4368cab14b8dd0a73f5639ed6a1c9ef3a5f4c07f | 354 | py | Python | krispy/mod_user/models.py | jlaura/krispy | b1b2bf8a3e315608152c7dad15d384d0669f5e27 | [
"0BSD"
] | 2 | 2016-03-31T05:35:28.000Z | 2017-04-12T00:11:59.000Z | krispy/mod_user/models.py | jlaura/krispy | b1b2bf8a3e315608152c7dad15d384d0669f5e27 | [
"0BSD"
] | null | null | null | krispy/mod_user/models.py | jlaura/krispy | b1b2bf8a3e315608152c7dad15d384d0669f5e27 | [
"0BSD"
] | null | null | null | from app import db
from flask.ext.login import UserMixin
| 29.5 | 69 | 0.717514 |
4369ad9700348a9af2bc92b402bcac16112c9914 | 16,746 | py | Python | blog_app/blog/views.py | flxj/Django_blog | 01eb12553335115fee5faecafe8cacf2f0615135 | [
"MIT"
] | 1 | 2019-03-27T02:24:22.000Z | 2019-03-27T02:24:22.000Z | blog_app/blog/views.py | flxj/Django_blog | 01eb12553335115fee5faecafe8cacf2f0615135 | [
"MIT"
] | null | null | null | blog_app/blog/views.py | flxj/Django_blog | 01eb12553335115fee5faecafe8cacf2f0615135 | [
"MIT"
] | null | null | null | import markdown
from comments.forms import CommentForm,BookCommentForm,MovieCommentForm
from django.shortcuts import render, get_object_or_404
from.models import Post,Category,Tag, Book,Movie
#from django.http import HttpResponse
from django.views.generic import ListView, DetailView
from django.utils.text import slugify
from markdown.extensions.toc import TocExtension
from django.db.models import Q
"""
def index(request):
#post_list = Post.objects.all().order_by('-created_time')
post_list = Post.objects.all()
return render(request, 'blog/index.html', context={'post_list': post_list})
"""
#
"""
def detail(request, pk):
post = get_object_or_404(Post, pk=pk)
# +1
post.increase_views()
post.body = markdown.markdown(post.body,
extensions=[
'markdown.extensions.extra',
'markdown.extensions.codehilite',
'markdown.extensions.toc',
'markdown.extensions.tables',
])
form = CommentForm()
# post
comment_list = post.comment_set.all()
# detail.html
context = {'post': post,
'form': form,
'comment_list': comment_list
}
return render(request, 'blog/detail.html', context=context)
"""
#
"""
def archives(request, year, month):
post_list = Post.objects.filter(created_time__year=year,
created_time__month=month
).order_by('-created_time')
return render(request, 'blog/index.html', context={'post_list': post_list})
"""
#
"""
def category(request, pk):
cate = get_object_or_404(Category, pk=pk)
post_list = Post.objects.filter(category=cate).order_by('-created_time')
return render(request, 'blog/index.html', context={'post_list': post_list})
"""
#
#
def search(request):
q = request.GET.get('q')
error_msg = ''
if not q:
error_msg = ""
return render(request, 'blog/index.html', {'error_msg': error_msg})
post_list = Post.objects.filter(Q(title__icontains=q) | Q(body__icontains=q))
return render(request, 'blog/index.html', {'error_msg': error_msg,
'post_list': post_list})
#
#
###
#
def about(request):
return render(request, 'blog/about.html') | 33.967546 | 96 | 0.582706 |
436a1ebb3d99a1475a443393df66a840b227b6bf | 4,916 | py | Python | src/command_modules/azure-cli-security/azure/cli/command_modules/security/_params.py | jfcoz/azure-cli | 8459ef3fd3c76d9f99defd95d4c980923891fa6d | [
"MIT"
] | 1 | 2019-10-01T10:29:15.000Z | 2019-10-01T10:29:15.000Z | src/command_modules/azure-cli-security/azure/cli/command_modules/security/_params.py | jfcoz/azure-cli | 8459ef3fd3c76d9f99defd95d4c980923891fa6d | [
"MIT"
] | 3 | 2019-07-12T22:10:38.000Z | 2019-07-12T22:10:49.000Z | src/command_modules/azure-cli-security/azure/cli/command_modules/security/_params.py | jfcoz/azure-cli | 8459ef3fd3c76d9f99defd95d4c980923891fa6d | [
"MIT"
] | 1 | 2019-06-21T05:08:09.000Z | 2019-06-21T05:08:09.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from azure.cli.core.commands.parameters import resource_group_name_type
from knack.arguments import CLIArgumentType
from ._validators import (validate_alert_status,
validate_auto_provisioning_toggle,
validate_pricing_tier)
name_arg_type = CLIArgumentType(options_list=('--name', '-n'), metavar='NAME', help='name of the resource to be fetched')
home_region_arg_type = CLIArgumentType(options_list=('--home-region', '-hr'), metavar='HOMEREGION', help='home region that was selected for the subscription')
location_arg_type = CLIArgumentType(options_list=('--location', '-l'), metavar='LOCATION', help='location of the resource')
# Alerts
alert_status_arg_type = CLIArgumentType(options_list=('--status'), metavar='STATUS', help='target status of the alert. possible values are "dismiss" and "activate"')
# Auto Provisioning
auto_provisioning_auto_provision_arg_type = CLIArgumentType(options_list=('--auto-provision'), metavar='AUTOPROVISION', help='Automatic provisioning toggle. possible values are "on" or "off"')
# Contacts
contact_email_arg_type = CLIArgumentType(options_list=('--email'), metavar='EMAIL', help='E-mail of the security contact')
contact_phone_arg_type = CLIArgumentType(options_list=('--phone'), metavar='PHONE', help='Phone of the security contact')
contact_alert_notifications_arg_type = CLIArgumentType(options_list=('--alert-notifications'), metavar='ALERTNOTIFICATIONS', help='Whether to send mail notifications to the security contacts')
contact_alerts_admins_arg_type = CLIArgumentType(options_list=('--alerts-admins'), metavar='ALERTADMINS', help='Whether to send mail notifications to the subscription administrators')
# Pricing
pricing_tier_arg_type = CLIArgumentType(options_list=('--tier'), metavar='TIER', help='pricing tier type')
# Workspace settings
workspace_setting_target_workspace_arg_type = CLIArgumentType(options_list=('--target-workspace'), metavar='TARGETWORKSPACE', help='An ID of the workspace resource that will hold the security data')
| 47.728155 | 198 | 0.621237 |
436a24c432c8bd3a3066c5adcc757a189d209bf5 | 332 | py | Python | utils/path_utils.py | kuyu12/pygame_fight_game | 3bbc286b9f33c6d6d9db9bea21f9b7af15247df5 | [
"MIT"
] | 1 | 2020-08-03T07:54:59.000Z | 2020-08-03T07:54:59.000Z | utils/path_utils.py | kuyu12/pygame_fight_game | 3bbc286b9f33c6d6d9db9bea21f9b7af15247df5 | [
"MIT"
] | null | null | null | utils/path_utils.py | kuyu12/pygame_fight_game | 3bbc286b9f33c6d6d9db9bea21f9b7af15247df5 | [
"MIT"
] | null | null | null | import sys
IMAGES_PATH = sys.path[1] + "/Images"
BACKGROUND_IMAGES_PATH = IMAGES_PATH + '/background'
USER_INFO_BACKGROUND_PATH = BACKGROUND_IMAGES_PATH+"/blue_background.jpg"
SPRINT_IMAGE_PATH = IMAGES_PATH + '/sprite'
PROFILE_IMAGES_PATH = IMAGES_PATH + '/profile'
CONFIGURATION_FILES_PATH = sys.path[1] + "/configuration_files" | 36.888889 | 73 | 0.795181 |
436a53c20b8a7b3181b33290aeb94d9c5458f945 | 1,558 | py | Python | tests/models/test_transformers.py | Alicegaz/torchok | 7b8f95df466a25b1ad8ee93bed1a3c7516440cf4 | [
"Apache-2.0"
] | 8 | 2021-10-12T05:39:20.000Z | 2022-03-31T10:55:01.000Z | tests/models/test_transformers.py | Alicegaz/torchok | 7b8f95df466a25b1ad8ee93bed1a3c7516440cf4 | [
"Apache-2.0"
] | 1 | 2022-03-30T19:23:42.000Z | 2022-03-30T19:23:42.000Z | tests/models/test_transformers.py | Alicegaz/torchok | 7b8f95df466a25b1ad8ee93bed1a3c7516440cf4 | [
"Apache-2.0"
] | 5 | 2021-11-17T07:38:28.000Z | 2022-01-31T10:46:36.000Z | import unittest
import torch
from parameterized import parameterized
from src.constructor import create_backbone
from src.models.backbones.utils import list_models
from .test_segmentation import example_backbones
| 38.95 | 99 | 0.717587 |
436c11b07a0ae268fa5c1da96fe20213b0b714a7 | 3,501 | py | Python | aiogram/types/inline_query.py | SvineruS/aiogram | 7892edf45302fa195544430ac5db11dcbcbf7ae6 | [
"MIT"
] | 1 | 2021-01-10T18:04:25.000Z | 2021-01-10T18:04:25.000Z | aiogram/types/inline_query.py | SvineruS/aiogram | 7892edf45302fa195544430ac5db11dcbcbf7ae6 | [
"MIT"
] | 5 | 2021-02-13T14:30:27.000Z | 2021-02-13T17:27:58.000Z | aiogram/types/inline_query.py | SvineruS/aiogram | 7892edf45302fa195544430ac5db11dcbcbf7ae6 | [
"MIT"
] | 1 | 2022-02-10T14:57:27.000Z | 2022-02-10T14:57:27.000Z | import typing
from . import base
from . import fields
from .inline_query_result import InlineQueryResult
from .location import Location
from .user import User
| 52.253731 | 118 | 0.625821 |