hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
40e730ac41b56af4d3f51d091a10e9b22fdce408 | 2,200 | py | Python | src/programy/braintree.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | null | null | null | src/programy/braintree.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | null | null | null | src/programy/braintree.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | 4 | 2019-04-01T15:42:23.000Z | 2020-11-05T08:14:27.000Z | """
Copyright (c) 2016-2019 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.utils.logging.ylogger import YLogger
from programy.storage.factory import StorageFactory
from programy.config.brain.braintree import BrainBraintreeConfiguration
| 55 | 120 | 0.782727 |
40ea3c645ea543c1874475b7543e5383d030798e | 6,095 | py | Python | reana_commons/publisher.py | marcdiazsan/reana-commons | 6e3a64db6798ab86aa521da02fa889459a382083 | [
"MIT"
] | null | null | null | reana_commons/publisher.py | marcdiazsan/reana-commons | 6e3a64db6798ab86aa521da02fa889459a382083 | [
"MIT"
] | null | null | null | reana_commons/publisher.py | marcdiazsan/reana-commons | 6e3a64db6798ab86aa521da02fa889459a382083 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2018 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""REANA-Commons module to manage AMQP connections on REANA."""
import json
import logging
from kombu import Connection, Exchange, Queue
from .config import (
MQ_CONNECTION_STRING,
MQ_DEFAULT_EXCHANGE,
MQ_DEFAULT_FORMAT,
MQ_DEFAULT_QUEUES,
MQ_PRODUCER_MAX_RETRIES,
)
| 34.828571 | 85 | 0.620673 |
40ea5c5e0176d43f5d51fa89b969ce72cc0fce56 | 1,219 | py | Python | model/commit.py | uniaim-event-team/pullre-kun | 60ee86c399d34254c82974a5debcdcb7d332f2a1 | [
"MIT"
] | 3 | 2020-03-24T08:06:37.000Z | 2020-03-29T08:53:55.000Z | model/commit.py | uniaim-event-team/pullre-kun | 60ee86c399d34254c82974a5debcdcb7d332f2a1 | [
"MIT"
] | 7 | 2020-03-23T12:36:01.000Z | 2020-04-11T08:14:06.000Z | model/commit.py | uniaim-event-team/pullre-kun | 60ee86c399d34254c82974a5debcdcb7d332f2a1 | [
"MIT"
] | null | null | null | from sqlalchemy import (
BigInteger,
Column,
DateTime,
Text,
String,
Integer,
)
from sqlalchemy.sql.functions import current_timestamp
from model.base import BaseObject
| 31.25641 | 108 | 0.721903 |
40eaa3da9e931ca4a3dcce107069762aa322fa53 | 24 | py | Python | drae/__init__.py | hso/drae.py | b78772fa055fe5f8acb2bb44d7e7573af277226b | [
"MIT"
] | null | null | null | drae/__init__.py | hso/drae.py | b78772fa055fe5f8acb2bb44d7e7573af277226b | [
"MIT"
] | null | null | null | drae/__init__.py | hso/drae.py | b78772fa055fe5f8acb2bb44d7e7573af277226b | [
"MIT"
] | null | null | null | from drae import search
| 12 | 23 | 0.833333 |
40ead0d637c17ba1e4a9c64f3e4137d28ac75a83 | 13,825 | py | Python | tests/components/template/test_select.py | JeffersonBledsoe/core | 3825f80a2dd087ae70654079cd9f3071289b8423 | [
"Apache-2.0"
] | 5 | 2017-01-26T16:33:09.000Z | 2018-07-20T13:50:47.000Z | tests/components/template/test_select.py | JeffersonBledsoe/core | 3825f80a2dd087ae70654079cd9f3071289b8423 | [
"Apache-2.0"
] | 87 | 2020-07-06T22:22:54.000Z | 2022-03-31T06:01:46.000Z | tests/components/template/test_select.py | yuvalkob/home-assistant | 6a5895222ec908acad3cf478897ca2455f88f730 | [
"Apache-2.0"
] | 3 | 2021-05-31T15:32:08.000Z | 2021-08-10T22:08:42.000Z | """The tests for the Template select platform."""
import pytest
from homeassistant import setup
from homeassistant.components.input_select import (
ATTR_OPTION as INPUT_SELECT_ATTR_OPTION,
ATTR_OPTIONS as INPUT_SELECT_ATTR_OPTIONS,
DOMAIN as INPUT_SELECT_DOMAIN,
SERVICE_SELECT_OPTION as INPUT_SELECT_SERVICE_SELECT_OPTION,
SERVICE_SET_OPTIONS,
)
from homeassistant.components.select.const import (
ATTR_OPTION as SELECT_ATTR_OPTION,
ATTR_OPTIONS as SELECT_ATTR_OPTIONS,
DOMAIN as SELECT_DOMAIN,
SERVICE_SELECT_OPTION as SELECT_SERVICE_SELECT_OPTION,
)
from homeassistant.const import ATTR_ICON, CONF_ENTITY_ID, STATE_UNKNOWN
from homeassistant.core import Context
from homeassistant.helpers.entity_registry import async_get
from tests.common import (
assert_setup_component,
async_capture_events,
async_mock_service,
)
_TEST_SELECT = "select.template_select"
# Represent for select's current_option
_OPTION_INPUT_SELECT = "input_select.option"
async def test_missing_optional_config(hass, calls):
"""Test: missing optional template is ok."""
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"select": {
"state": "{{ 'a' }}",
"select_option": {"service": "script.select_option"},
"options": "{{ ['a', 'b'] }}",
}
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
_verify(hass, "a", ["a", "b"])
def _verify(hass, expected_current_option, expected_options, entity_name=_TEST_SELECT):
"""Verify select's state."""
state = hass.states.get(entity_name)
attributes = state.attributes
assert state.state == str(expected_current_option)
assert attributes.get(SELECT_ATTR_OPTIONS) == expected_options
| 32.529412 | 132 | 0.502351 |
40eb080a05a597358c0a6ee395b1cbd8baf803e7 | 7,211 | py | Python | corefacility/core/test/models/test_application_access.py | serik1987/corefacility | 78d84e19403361e83ef562e738473849f9133bef | [
"RSA-MD"
] | null | null | null | corefacility/core/test/models/test_application_access.py | serik1987/corefacility | 78d84e19403361e83ef562e738473849f9133bef | [
"RSA-MD"
] | null | null | null | corefacility/core/test/models/test_application_access.py | serik1987/corefacility | 78d84e19403361e83ef562e738473849f9133bef | [
"RSA-MD"
] | null | null | null | import os
import random
import string
import base64
from django.utils import timezone
from django.contrib.auth.hashers import make_password, check_password
from django.test import TestCase
from parameterized import parameterized
from core.models import Module, EntryPoint, ExternalAuthorizationSession, User
AUTHORIZATION_MODULE_LIST = ["ihna", "google", "mailru"]
| 43.969512 | 113 | 0.627791 |
40eb7e71257ab84eead04db6c8b696939ea7b84e | 6,729 | py | Python | cmsfix/lib/macro.py | trmznt/cmsfix | 18d0be238f9247421db9603f1946478452336afb | [
"BSD-2-Clause"
] | null | null | null | cmsfix/lib/macro.py | trmznt/cmsfix | 18d0be238f9247421db9603f1946478452336afb | [
"BSD-2-Clause"
] | null | null | null | cmsfix/lib/macro.py | trmznt/cmsfix | 18d0be238f9247421db9603f1946478452336afb | [
"BSD-2-Clause"
] | null | null | null |
from rhombus.lib.utils import get_dbhandler
from rhombus.lib.tags import *
from cmsfix.models.node import Node
import re
# the pattern below is either
# ///123
# <<MacroName>>
# [[MacroName]]
pattern = re.compile('///(\d+)|///\{([\w-]+)\}|\<\;\<\;(.+)\>\;\>\;|\[\[(.+)\]\]')
# syntax for Macro is:
# [[MacroName|option1|option2|option3]]
def postrender(buffer, node, request):
""" return a new buffer """
dbh = get_dbhandler()
nb = ''
start_pos = 0
for m in pattern.finditer(buffer):
nb += buffer[start_pos:m.start()]
group = m.group()
print(group)
if group.startswith('///'):
nb += node_link(group, dbh)
elif group.startswith('[['):
nb += run_macro(group, node, dbh, request)
else:
nb += '{{ ERR: macro pattern unprocessed }}'
start_pos = m.end()
nb += buffer[start_pos:]
return nb
def postedit(content, node):
""" post edit the content, return a new modified content """
dbh = get_dbhandler()
nc = ''
start_pos = 0
for m in pattern.finditer(content):
nc += content[start_pos:m.start()]
group = m.group()
if group.startswith('///'):
if group[3] != '{':
# convert to UUID
node = dbh.get_node_by_id(int(group[3:]))
nc += ('///{' + str(node.uuid) + '}' if node else group)
else:
nc += group
else:
nc += group
start_pos = m.end()
nc += content[start_pos:]
return nc
_MACROS_ = {}
## -- MACRO --
##
## all macro functions should return either html or literal objects
##
| 24.558394 | 96 | 0.541537 |
40ed1faf7a529d9d2608043132523587818592bc | 2,629 | py | Python | xastropy/sdss/qso.py | bpholden/xastropy | 66aff0995a84c6829da65996d2379ba4c946dabe | [
"BSD-3-Clause"
] | 3 | 2015-08-23T00:32:58.000Z | 2020-12-31T02:37:52.000Z | xastropy/sdss/qso.py | Kristall-WangShiwei/xastropy | 723fe56cb48d5a5c4cdded839082ee12ef8c6732 | [
"BSD-3-Clause"
] | 104 | 2015-07-17T18:31:54.000Z | 2018-06-29T17:04:09.000Z | xastropy/sdss/qso.py | Kristall-WangShiwei/xastropy | 723fe56cb48d5a5c4cdded839082ee12ef8c6732 | [
"BSD-3-Clause"
] | 16 | 2015-07-17T15:50:37.000Z | 2019-04-21T03:42:47.000Z | '''
#;+
#; NAME:
#; sdss.qso
#; Version 1.1
#;
#; PURPOSE:
#; Class for SDSS QSO
#; 2015 Written by JXP
#;-
#;------------------------------------------------------------------------------
'''
# Import libraries
import numpy as np
import os
from astropy.table import QTable, Column
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.units import Quantity
from xastropy.obs import radec as xor
from xastropy.xutils import xdebug as xdb
| 30.218391 | 80 | 0.573602 |
40ee9a52429bac1502e511dda17968ae00643dd6 | 41 | py | Python | ez_sten/__init__.py | deadlift1226/ez-sten | 7f754e5648ce6b7d5207a901618b77a8e4382c86 | [
"MIT"
] | null | null | null | ez_sten/__init__.py | deadlift1226/ez-sten | 7f754e5648ce6b7d5207a901618b77a8e4382c86 | [
"MIT"
] | null | null | null | ez_sten/__init__.py | deadlift1226/ez-sten | 7f754e5648ce6b7d5207a901618b77a8e4382c86 | [
"MIT"
] | null | null | null | name = "module"
from .module import func
| 13.666667 | 24 | 0.731707 |
40ef2f9956caa7a12ca34a8e2817ab06584f9a11 | 3,110 | py | Python | wisdem/test/test_optimization_drivers/test_dakota_driver.py | johnjasa/WISDEM | a4571e71cb5b9869c81790f8abb1bb7fba8fdb02 | [
"Apache-2.0"
] | 81 | 2015-01-19T18:17:31.000Z | 2022-03-17T07:14:43.000Z | wisdem/test/test_optimization_drivers/test_dakota_driver.py | johnjasa/WISDEM | a4571e71cb5b9869c81790f8abb1bb7fba8fdb02 | [
"Apache-2.0"
] | 159 | 2015-02-05T01:54:52.000Z | 2022-03-30T22:44:39.000Z | wisdem/test/test_optimization_drivers/test_dakota_driver.py | johnjasa/WISDEM | a4571e71cb5b9869c81790f8abb1bb7fba8fdb02 | [
"Apache-2.0"
] | 70 | 2015-01-02T15:22:39.000Z | 2022-02-11T00:33:07.000Z | import unittest
import numpy as np
from openmdao.utils.assert_utils import assert_near_equal
from wisdem.optimization_drivers.dakota_driver import DakotaOptimizer
try:
import dakota
except ImportError:
dakota = None
if __name__ == "__main__":
unittest.main()
| 40.921053 | 112 | 0.630868 |
40f05be8c6d026f9f65c428c8494f859b10c0a2f | 6,848 | py | Python | lab4_runTFCurveFitting.py | pskdev/EveryBodyTensorFlow | 5166a366fca850a72de66e5ac48c421d4bb766f4 | [
"Unlicense"
] | 1 | 2018-04-15T07:36:22.000Z | 2018-04-15T07:36:22.000Z | lab4_runTFCurveFitting.py | pskdev/EveryBodyTensorFlow | 5166a366fca850a72de66e5ac48c421d4bb766f4 | [
"Unlicense"
] | null | null | null | lab4_runTFCurveFitting.py | pskdev/EveryBodyTensorFlow | 5166a366fca850a72de66e5ac48c421d4bb766f4 | [
"Unlicense"
] | null | null | null | #-*- coding: utf-8 -*-
#! /usr/bin/env python
'''
#------------------------------------------------------------
filename: lab4_runTFCurveFitting.py
This is an example for linear regression in tensorflow
Which is a curve fitting example
written by Jaewook Kang @ Aug 2017
#------------------------------------------------------------
'''
from os import getcwd
import math
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import learn_io
# from __future__ import print_function
# Preparing data set ================================================
from tensorflow.examples.tutorials.mnist import input_data
# generation of sinusoid data set
total_size = 5000
training_size = 4000
validation_size = total_size - training_size
xsize = 50 # the size of single x_data
x_data = np.zeros([xsize, total_size])
cos_x = np.zeros([xsize, total_size])
mag = 1.0
phase_rad = np.pi/4
rad_freq = np.pi / 2.0
for i in range(total_size):
x_data[:,i] = np.linspace(-4,4,xsize)
cos_x = np.cos(rad_freq*x_data + phase_rad)
noise_var = 0.01
noise = np.sqrt(noise_var) * np.random.randn(xsize,total_size)
y_clean = cos_x
y_data = y_clean + noise
x_training_data = x_data[:,0:training_size]
y_training_data = y_data[:,0:training_size]
x_validation_data = x_data[:,training_size:-1]
y_validation_data = y_data[:,training_size:-1]
# signal plot
# hfig1= plt.figure(1,figsize=[10,10])
# plt.plot(cos_x[:,1],color='b',label='clean')
# plt.plot(y_data[:,1],color='r',label='noisy')
# plt.legend()
# configure training parameters =====================================
learning_rate = 0.01
training_epochs = 20
batch_size = 100
display_step = 1
# computational TF graph construction ================================
##---------------- Define graph nodes -------------------
# tf Graph data input holder
# (x,y) : input / output of prediction model
# which will be feeded by training data in the TF graph computation
# (a,b,c,d) : model parameters
# which will be learned from training data in the TF graph computation
x = tf.placeholder(tf.float32, [xsize,None])
y = tf.placeholder(tf.float32, [xsize,None])
# Set model weights which is calculated in the TF graph
a = tf.Variable(1.) # initialization by 1
b = tf.Variable(1.)
c = tf.Variable(1.)
d = tf.Variable(1.)
print ('TF graph nodes are defined')
##--------------------- Define function -----------------
# define relationshitp btw instance data x and label data y
# define optimizer used in the learning phase
# define cost function for optimization
# Construct model
pred_y = c*tf.cos(a*x+b)+d
# Minimize error using MSE function
cost = tf.reduce_mean(tf.reduce_sum( tf.square(y - pred_y) , reduction_indices=1), name="mse")
# Gradient Descent
# optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
print ('Functions in TF graph are ready')
## Performance evaluation model ========================_y===========
# y : data output
# pred_y: prediction output by model, a x^3 + b x^2 + c x + d
correct_prediction = cost
# Calculate error rate using data --------------
# where
# tf_reduce_mean(input_tensor, axis) : reduce dimension of tensor by computing the mean of elements
# # 'x' is [[1., 1.]
# [2., 2.]]
# tf.reduce_mean(x) ==> 1.5
# tf.reduce_mean(x, 0) ==> [1.5, 1.5]
# tf.reduce_mean(x, 1) ==> [1., 2.]
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
error_rate_training = np.zeros(training_epochs)
error_rate_validation = np.zeros(training_epochs)
# Launch the graph (execution) ========================================
# Initializing the variables
init = tf.global_variables_initializer()
## -------------------- Learning iteration start --------------------
with tf.Session() as sess:
sess.run(init) # this for variable use
# Training cycle
for epoch in range(training_epochs): # iteration loop
avg_cost = 0.
total_batch = int(training_size/batch_size) #
# Loop over all batches
for i in range(total_batch): # batch loop
data_start_index = i * batch_size
data_end_index = (i + 1) * batch_size
# feed traing data --------------------------
batch_xs = x_training_data[:,data_start_index:data_end_index]
batch_ys = y_training_data[:,data_start_index:data_end_index]
#----------------------------------------------
# Run optimization op (backprop) and cost op (to get loss value)
# feedign training data
_, local_batch_cost = sess.run([optimizer, cost], feed_dict={x: batch_xs,
y: batch_ys})
# Compute average loss
avg_cost += local_batch_cost / total_batch
# print ("At %d-th batch in %d-epoch, avg_cost = %f" % (i,epoch,avg_cost) )
# Display logs per epoch step
if (epoch+1) % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost/batch_size))
batch_xs = x_training_data
batch_ys = y_training_data
error_rate_training[epoch] = accuracy.eval({x: batch_xs, y: batch_ys},session=sess)/training_size
error_rate_validation[epoch] = accuracy.eval({x: x_validation_data, y: y_validation_data},session=sess)/validation_size
print("Training set MSE:", error_rate_training[epoch])
print("Validation set MSE:", error_rate_validation[epoch])
print("--------------------------------------------")
print("Optimization Finished!")
pred_a = sess.run(a)
pred_b = sess.run(b)
pred_c = sess.run(c)
pred_d = sess.run(d)
hfig1 = plt.figure(1,figsize=(10,10))
epoch_index = np.array([elem for elem in range(training_epochs)])
plt.plot(epoch_index,error_rate_training,label='Training data',color='r',marker='o')
plt.plot(epoch_index,error_rate_validation,label='Validation data',color='b',marker='x')
plt.legend()
plt.title('MSE of prediction:')
plt.xlabel('Iteration epoch')
plt.ylabel('MSE')
hfig2 = plt.figure(2,figsize=(10,10))
pred_y = pred_c * np.cos(pred_a * x_data[:,0] + pred_b) +pred_d
plt.plot(x_validation_data[:,0],y_validation_data[:,0],label='noisy data',color='b',marker='*')
plt.plot(x_validation_data[:,0], pred_y,label='prediction',color='r')
plt.legend()
plt.title('A line fitting example:')
plt.xlabel('X data')
plt.ylabel('Y data')
# FIG_SAVE_DIR = getcwd() + '/figs/'
# hfig1.savefig(FIG_SAVE_DIR + 'runExample_TFLogisticReg_aymeric_ErrRate.png')
# hfig1.clear()
| 32.923077 | 131 | 0.631425 |
40f1379ab73e0f4b4e9297a1caebe96d0365e7e2 | 577 | py | Python | app/route/stats/route.py | LifeLaboratory/finopolis_backend | 56aac8e0b92193c627b68f3d029f6f804d001db3 | [
"MIT"
] | null | null | null | app/route/stats/route.py | LifeLaboratory/finopolis_backend | 56aac8e0b92193c627b68f3d029f6f804d001db3 | [
"MIT"
] | null | null | null | app/route/stats/route.py | LifeLaboratory/finopolis_backend | 56aac8e0b92193c627b68f3d029f6f804d001db3 | [
"MIT"
] | null | null | null | # coding=utf-8
from app.route.stats.processor import *
from app.api.base.base_router import BaseRouter
from app.api.base import base_name as names
| 25.086957 | 100 | 0.646447 |
40f148fc7af6cb3cf9e625820f51746d54b4fd9d | 1,168 | py | Python | script/calculate_correct_percentage_kingdom.py | xie186/dragmap-meth | 6e9ccfd281bd317a56b8c4e87b5386978eb8de45 | [
"MIT"
] | 4 | 2021-12-18T20:33:16.000Z | 2022-01-03T02:54:13.000Z | script/calculate_correct_percentage_kingdom.py | xie186/dragmap-meth | 6e9ccfd281bd317a56b8c4e87b5386978eb8de45 | [
"MIT"
] | null | null | null | script/calculate_correct_percentage_kingdom.py | xie186/dragmap-meth | 6e9ccfd281bd317a56b8c4e87b5386978eb8de45 | [
"MIT"
] | null | null | null | from Bio import TogoWS
import argparse
import sys
import os
if __name__ == '__main__':
## description - Text to display before the argument help (default: none)
parser=argparse.ArgumentParser(description='mbmeth')
parser.add_argument("-i", '--input', help="Input list")
parser.add_argument("-s", '--species', help="species")
options = parser.parse_args(args=None if sys.argv[1:] else ['--help'])
summary(options)
| 29.948718 | 77 | 0.5625 |
40f24ffc2a5ce750fd7226190ea187a0e43d6f6d | 296 | py | Python | borax/patterns/singleton.py | kinegratii/borax | 3595f554b788c31d0f07be4099db68c854db65f7 | [
"MIT"
] | 51 | 2018-04-18T13:52:15.000Z | 2022-03-23T13:46:02.000Z | borax/patterns/singleton.py | kinegratii/borax | 3595f554b788c31d0f07be4099db68c854db65f7 | [
"MIT"
] | 26 | 2019-05-26T02:22:34.000Z | 2022-03-14T07:50:32.000Z | borax/patterns/singleton.py | kinegratii/borax | 3595f554b788c31d0f07be4099db68c854db65f7 | [
"MIT"
] | 7 | 2018-09-30T08:17:29.000Z | 2020-12-16T01:49:24.000Z | # coding=utf8
| 22.769231 | 62 | 0.597973 |
40f2de4fdec91fb98024a2bfc2b3ed4d725f2c72 | 5,108 | py | Python | aiida/backends/general/migrations/utils.py | pranavmodx/aiida-core | 0edbbf82dfb97ab130914d1674a6f2217eba5971 | [
"BSD-2-Clause",
"MIT"
] | null | null | null | aiida/backends/general/migrations/utils.py | pranavmodx/aiida-core | 0edbbf82dfb97ab130914d1674a6f2217eba5971 | [
"BSD-2-Clause",
"MIT"
] | 2 | 2019-03-06T11:23:42.000Z | 2020-03-09T09:34:07.000Z | aiida/backends/general/migrations/utils.py | lorisercole/aiida-core | 84c2098318bf234641219e55795726f99dc25a16 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=invalid-name
"""Various utils that should be used during migrations and migrations tests because the AiiDA ORM cannot be used."""
import datetime
import errno
import os
import re
import numpy
from aiida.common import json
ISOFORMAT_DATETIME_REGEX = re.compile(r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+(\+\d{2}:\d{2})?$')
def ensure_repository_folder_created(uuid):
"""Make sure that the repository sub folder for the node with the given UUID exists or create it.
:param uuid: UUID of the node
"""
dirpath = get_node_repository_sub_folder(uuid)
try:
os.makedirs(dirpath)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def put_object_from_string(uuid, name, content):
"""Write a file with the given content in the repository sub folder of the given node.
:param uuid: UUID of the node
:param name: name to use for the file
:param content: the content to write to the file
"""
ensure_repository_folder_created(uuid)
filepath = os.path.join(get_node_repository_sub_folder(uuid), name)
with open(filepath, 'w', encoding='utf-8') as handle:
handle.write(content)
def get_object_from_repository(uuid, name):
"""Return the content of a file with the given name in the repository sub folder of the given node.
:param uuid: UUID of the node
:param name: name to use for the file
"""
filepath = os.path.join(get_node_repository_sub_folder(uuid), name)
with open(filepath) as handle:
return handle.read()
def get_node_repository_sub_folder(uuid):
"""Return the absolute path to the sub folder `path` within the repository of the node with the given UUID.
:param uuid: UUID of the node
:return: absolute path to node repository folder, i.e `/some/path/repository/node/12/ab/c123134-a123/path`
"""
from aiida.manage.configuration import get_profile
uuid = str(uuid)
repo_dirpath = os.path.join(get_profile().repository_path, 'repository')
node_dirpath = os.path.join(repo_dirpath, 'node', uuid[:2], uuid[2:4], uuid[4:], 'path')
return node_dirpath
def get_numpy_array_absolute_path(uuid, name):
"""Return the absolute path of a numpy array with the given name in the repository of the node with the given uuid.
:param uuid: the UUID of the node
:param name: the name of the numpy array
:return: the absolute path of the numpy array file
"""
return os.path.join(get_node_repository_sub_folder(uuid), name + '.npy')
def store_numpy_array_in_repository(uuid, name, array):
"""Store a numpy array in the repository folder of a node.
:param uuid: the node UUID
:param name: the name under which to store the array
:param array: the numpy array to store
"""
ensure_repository_folder_created(uuid)
filepath = get_numpy_array_absolute_path(uuid, name)
with open(filepath, 'wb') as handle:
numpy.save(handle, array)
def delete_numpy_array_from_repository(uuid, name):
"""Delete the numpy array with a given name from the repository corresponding to a node with a given uuid.
:param uuid: the UUID of the node
:param name: the name of the numpy array
"""
filepath = get_numpy_array_absolute_path(uuid, name)
try:
os.remove(filepath)
except (IOError, OSError):
pass
def load_numpy_array_from_repository(uuid, name):
"""Load and return a numpy array from the repository folder of a node.
:param uuid: the node UUID
:param name: the name under which to store the array
:return: the numpy array
"""
filepath = get_numpy_array_absolute_path(uuid, name)
return numpy.load(filepath)
def recursive_datetime_to_isoformat(value):
"""Convert all datetime objects in the given value to string representations in ISO format.
:param value: a mapping, sequence or single value optionally containing datetime objects
"""
if isinstance(value, list):
return [recursive_datetime_to_isoformat(_) for _ in value]
if isinstance(value, dict):
return dict((key, recursive_datetime_to_isoformat(val)) for key, val in value.items())
if isinstance(value, datetime.datetime):
return value.isoformat()
return value
def dumps_json(dictionary):
"""Transforms all datetime object into isoformat and then returns the JSON."""
return json.dumps(recursive_datetime_to_isoformat(dictionary))
| 34.053333 | 119 | 0.66758 |
40f3ddcdfc03bc9856328d9f89786ad5e9dd0772 | 88 | py | Python | src/models/__init__.py | DwaraknathT/sparsity | 705f2cba074e6ab4f7655c6af98882773cd826bf | [
"MIT"
] | null | null | null | src/models/__init__.py | DwaraknathT/sparsity | 705f2cba074e6ab4f7655c6af98882773cd826bf | [
"MIT"
] | null | null | null | src/models/__init__.py | DwaraknathT/sparsity | 705f2cba074e6ab4f7655c6af98882773cd826bf | [
"MIT"
] | null | null | null | __all__ = ["transformers", "vision"]
from .transformers import *
from .vision import *
| 17.6 | 36 | 0.715909 |
40f4220eb6198005a87664aaa2c6ba2fd068a95c | 350 | py | Python | packages/pyright-internal/src/tests/samples/genericTypes12.py | sasano8/pyright | e804f324ee5dbd25fd37a258791b3fd944addecd | [
"MIT"
] | 4,391 | 2019-05-07T01:18:57.000Z | 2022-03-31T20:45:44.000Z | packages/pyright-internal/src/tests/samples/genericTypes12.py | sasano8/pyright | e804f324ee5dbd25fd37a258791b3fd944addecd | [
"MIT"
] | 2,740 | 2019-05-07T03:29:30.000Z | 2022-03-31T12:57:46.000Z | packages/pyright-internal/src/tests/samples/genericTypes12.py | sasano8/pyright | e804f324ee5dbd25fd37a258791b3fd944addecd | [
"MIT"
] | 455 | 2019-05-07T12:55:14.000Z | 2022-03-31T17:09:15.000Z | # This sample tests the checker's ability to enforce
# type invariance for type arguments.
# pyright: strict
from typing import Dict, Union
foo: Dict[Union[int, str], str] = {}
bar: Dict[str, str] = {}
# This should generate an error because
# both type parameters for Dict are invariant,
# and str isn't assignable to Union[int, str].
foo = bar
| 23.333333 | 52 | 0.72 |
40f50e67874d55319f2743b79ff2d604900796f7 | 224 | py | Python | test.py | Naveenkhasyap/udacity-ml | 6df851f7b21dee120a8e8f246df7961ea065eeac | [
"MIT"
] | null | null | null | test.py | Naveenkhasyap/udacity-ml | 6df851f7b21dee120a8e8f246df7961ea065eeac | [
"MIT"
] | null | null | null | test.py | Naveenkhasyap/udacity-ml | 6df851f7b21dee120a8e8f246df7961ea065eeac | [
"MIT"
] | null | null | null | how_many_snakes = 1
snake_string = """
Welcome to Python3!
____
/ . .\\
\\ ---<
\\ /
__________/ /
-=:___________/
<3, Juno
"""
print(snake_string * how_many_snakes) | 14 | 37 | 0.473214 |
40f5c3fea77f91c61ea3a74c27daae2c26011e43 | 658 | py | Python | Nelson_Alvarez/Assignments/flask_fund/ninja_turtle/turtle.py | webguru001/Python-Django-Web | 6264bc4c90ef1432ba0902c76b567cf3caaae221 | [
"MIT"
] | 5 | 2019-05-17T01:30:02.000Z | 2021-06-17T21:02:58.000Z | Nelson_Alvarez/Assignments/flask_fund/ninja_turtle/turtle.py | curest0x1021/Python-Django-Web | 6264bc4c90ef1432ba0902c76b567cf3caaae221 | [
"MIT"
] | null | null | null | Nelson_Alvarez/Assignments/flask_fund/ninja_turtle/turtle.py | curest0x1021/Python-Django-Web | 6264bc4c90ef1432ba0902c76b567cf3caaae221 | [
"MIT"
] | null | null | null | from flask import Flask
from flask import render_template, redirect, session, request
app = Flask(__name__)
app.secret_key = 'ThisIsSecret'
app.run(debug=True) | 22.689655 | 102 | 0.682371 |
40f5d8bb4fa97a86898d698a3335896827401fd2 | 941 | py | Python | neo/Network/Inventory.py | BSathvik/neo-python | 90eddde0128f8ba41207d88fd68041682e307315 | [
"MIT"
] | 15 | 2018-02-27T13:07:00.000Z | 2021-01-29T10:27:41.000Z | neo/Network/Inventory.py | BSathvik/neo-python | 90eddde0128f8ba41207d88fd68041682e307315 | [
"MIT"
] | 3 | 2021-03-20T05:43:51.000Z | 2022-02-11T03:47:50.000Z | neo/Network/Inventory.py | BSathvik/neo-python | 90eddde0128f8ba41207d88fd68041682e307315 | [
"MIT"
] | 6 | 2018-07-13T05:00:44.000Z | 2020-10-28T19:41:54.000Z | # -*- coding:utf-8 -*-
"""
Description:
Inventory Class
Usage:
from neo.Network.Inventory import Inventory
"""
from neo.IO.MemoryStream import MemoryStream
from neocore.IO.BinaryWriter import BinaryWriter
| 18.82 | 48 | 0.587673 |
40f5e193e0cc75def4b2ba8e4e082e5183a4bea7 | 4,748 | py | Python | tests/test_api_gateway/test_common/test_exceptions.py | Clariteia/api_gateway_common | e68095f31091699fc6cc4537bd6acf97a8dc6c3e | [
"MIT"
] | 3 | 2021-05-14T08:13:09.000Z | 2021-05-26T11:25:35.000Z | tests/test_api_gateway/test_common/test_exceptions.py | Clariteia/api_gateway_common | e68095f31091699fc6cc4537bd6acf97a8dc6c3e | [
"MIT"
] | 27 | 2021-05-13T08:43:19.000Z | 2021-08-24T17:19:36.000Z | tests/test_api_gateway/test_common/test_exceptions.py | Clariteia/api_gateway_common | e68095f31091699fc6cc4537bd6acf97a8dc6c3e | [
"MIT"
] | null | null | null | """
Copyright (C) 2021 Clariteia SL
This file is part of minos framework.
Minos framework can not be copied and/or distributed without the express permission of Clariteia SL.
"""
import unittest
from minos.api_gateway.common import (
EmptyMinosModelSequenceException,
MinosAttributeValidationException,
MinosConfigDefaultAlreadySetException,
MinosConfigException,
MinosException,
MinosMalformedAttributeException,
MinosModelAttributeException,
MinosModelException,
MinosParseAttributeException,
MinosRepositoryAggregateNotFoundException,
MinosRepositoryDeletedAggregateException,
MinosRepositoryException,
MinosRepositoryManuallySetAggregateIdException,
MinosRepositoryManuallySetAggregateVersionException,
MinosRepositoryNonProvidedException,
MinosRepositoryUnknownActionException,
MinosReqAttributeException,
MinosTypeAttributeException,
MultiTypeMinosModelSequenceException,
)
if __name__ == "__main__":
unittest.main()
| 39.566667 | 117 | 0.771272 |
40f7a744294465f0d9fa2d8e7fd481a7d36370d7 | 977 | py | Python | native_prophet.py | 1143048123/cddh | 52d91f02359af659343b8c4ad4f2ba349de20852 | [
"MIT"
] | 177 | 2018-01-05T01:46:07.000Z | 2018-03-09T05:32:45.000Z | native_prophet.py | 1143048123/cddh | 52d91f02359af659343b8c4ad4f2ba349de20852 | [
"MIT"
] | 15 | 2018-01-05T03:28:38.000Z | 2018-01-17T03:04:06.000Z | native_prophet.py | 1143048123/cddh | 52d91f02359af659343b8c4ad4f2ba349de20852 | [
"MIT"
] | 55 | 2018-01-05T05:24:55.000Z | 2018-01-25T11:53:38.000Z | # coding: utf-8
# quote from kmaiya/HQAutomator
#
import time
import json
import requests
import webbrowser
questions = []
if __name__ == '__main__':
main()
| 25.710526 | 87 | 0.58956 |
40f82a11d157a4c060d3cd0a073c10873cb2a999 | 21,936 | py | Python | src/utils/TensorflowModel_pb2.py | nicolas-ivanov/MimicAndRephrase | 446674e1e6af133618e0e9888c3650c0ce9012e4 | [
"MIT"
] | 12 | 2019-06-17T19:41:35.000Z | 2022-02-17T19:51:45.000Z | src/utils/TensorflowModel_pb2.py | nicolas-ivanov/MimicAndRephrase | 446674e1e6af133618e0e9888c3650c0ce9012e4 | [
"MIT"
] | 1 | 2021-02-23T15:28:32.000Z | 2021-02-23T15:28:32.000Z | src/utils/TensorflowModel_pb2.py | isabella232/MimicAndRephrase | bd29a995b211cb4f7933fa990b0bba1564c22450 | [
"MIT"
] | 3 | 2020-09-07T16:44:11.000Z | 2020-11-14T19:00:05.000Z | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: TensorflowModel.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='TensorflowModel.proto',
package='ai.eloquent',
syntax='proto3',
serialized_pb=_b('\n\x15TensorflowModel.proto\x12\x0b\x61i.eloquent\"\x8f\x01\n\x0fTensorflowModel\x12\x18\n\x10serialized_graph\x18\x01 \x01(\x0c\x12.\n\x0ctoken_mapper\x18\x02 \x01(\x0b\x32\x18.ai.eloquent.TokenMapper\x12\x16\n\x0etrain_set_size\x18\x04 \x01(\x03\x12\x1a\n\x12train_set_positive\x18\x05 \x01(\x03\"\x80\x01\n\x0cTokenMapping\x12+\n\x04type\x18\x01 \x01(\x0e\x32\x1d.ai.eloquent.TokenMappingType\x12\r\n\x05regex\x18\x02 \x01(\t\x12\x10\n\x08num_hash\x18\x03 \x01(\x05\x12\x12\n\ndebug_base\x18\x04 \x01(\t\x12\x0e\n\x06tokens\x18\x05 \x03(\t\"\x9d\x01\n\x0bTokenMapper\x12\x30\n\rtoken_mapping\x18\x01 \x03(\x0b\x32\x19.ai.eloquent.TokenMapping\x12.\n\x0bunk_mapping\x18\x02 \x03(\x0b\x32\x19.ai.eloquent.TokenMapping\x12,\n\x07vectors\x18\x03 \x03(\x0b\x32\x1b.ai.eloquent.TunedEmbedding\"\x1f\n\x0eTunedEmbedding\x12\r\n\x05value\x18\x01 \x03(\x02\"\xf0\x03\n\x1aTensorflowModelPerformance\x12\x0f\n\x07\x64\x65v_set\x18\x01 \x03(\t\x12\x0f\n\x07version\x18\x02 \x01(\x03\x12\x17\n\x0f\x64\x65v_set_version\x18\x03 \x01(\x03\x12\x16\n\x0etrain_set_size\x18\x04 \x01(\x03\x12\x1d\n\x15train_set_total_votes\x18\x05 \x01(\x03\x12\x14\n\x0c\x64\x65v_set_size\x18\x06 \x01(\x03\x12\x1b\n\x13\x64\x65v_set_total_votes\x18\x07 \x01(\x03\x12\x12\n\nbest_epoch\x18\x08 \x01(\x05\x12\x0f\n\x07\x64ropout\x18\t \x01(\x02\x12\x13\n\x0brandom_seed\x18\n \x01(\x05\x12\x12\n\nhidden_dim\x18\x0b \x01(\x05\x12\x15\n\rtrue_positive\x18\x0c \x01(\x03\x12\x16\n\x0e\x66\x61lse_positive\x18\r \x01(\x03\x12\x16\n\x0e\x66\x61lse_negative\x18\x0e \x01(\x03\x12\x15\n\rtrue_negative\x18\x0f \x01(\x03\x12\x11\n\tprecision\x18\x10 \x01(\x02\x12\x0e\n\x06recall\x18\x11 \x01(\x02\x12\n\n\x02\x66\x31\x18\x12 \x01(\x02\x12\x10\n\x08\x61\x63\x63uracy\x18\x13 \x01(\x02\x12@\n\x08\x65xamples\x18\x14 \x03(\x0b\x32..ai.eloquent.TensorflowModelPerformanceExample\"S\n!TensorflowModelPerformanceExample\x12\r\n\x05input\x18\x01 \x03(\t\x12\x0f\n\x07guesses\x18\x02 \x03(\x02\x12\x0e\n\x06labels\x18\x03 \x03(\x05*2\n\x10TokenMappingType\x12\t\n\x05REGEX\x10\x00\x12\x08\n\x04HASH\x10\x01\x12\t\n\x05TOKEN\x10\x02\x42)\n\x10\x61i.eloquent.dataB\x15TensorflowModelProtosb\x06proto3')
)
_TOKENMAPPINGTYPE = _descriptor.EnumDescriptor(
name='TokenMappingType',
full_name='ai.eloquent.TokenMappingType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='REGEX', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HASH', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TOKEN', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1092,
serialized_end=1142,
)
_sym_db.RegisterEnumDescriptor(_TOKENMAPPINGTYPE)
TokenMappingType = enum_type_wrapper.EnumTypeWrapper(_TOKENMAPPINGTYPE)
REGEX = 0
HASH = 1
TOKEN = 2
_TENSORFLOWMODEL = _descriptor.Descriptor(
name='TensorflowModel',
full_name='ai.eloquent.TensorflowModel',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='serialized_graph', full_name='ai.eloquent.TensorflowModel.serialized_graph', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='token_mapper', full_name='ai.eloquent.TensorflowModel.token_mapper', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='train_set_size', full_name='ai.eloquent.TensorflowModel.train_set_size', index=2,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='train_set_positive', full_name='ai.eloquent.TensorflowModel.train_set_positive', index=3,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=39,
serialized_end=182,
)
_TOKENMAPPING = _descriptor.Descriptor(
name='TokenMapping',
full_name='ai.eloquent.TokenMapping',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='ai.eloquent.TokenMapping.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='regex', full_name='ai.eloquent.TokenMapping.regex', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_hash', full_name='ai.eloquent.TokenMapping.num_hash', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='debug_base', full_name='ai.eloquent.TokenMapping.debug_base', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tokens', full_name='ai.eloquent.TokenMapping.tokens', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=185,
serialized_end=313,
)
_TOKENMAPPER = _descriptor.Descriptor(
name='TokenMapper',
full_name='ai.eloquent.TokenMapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='token_mapping', full_name='ai.eloquent.TokenMapper.token_mapping', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='unk_mapping', full_name='ai.eloquent.TokenMapper.unk_mapping', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='vectors', full_name='ai.eloquent.TokenMapper.vectors', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=316,
serialized_end=473,
)
_TUNEDEMBEDDING = _descriptor.Descriptor(
name='TunedEmbedding',
full_name='ai.eloquent.TunedEmbedding',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='ai.eloquent.TunedEmbedding.value', index=0,
number=1, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=475,
serialized_end=506,
)
_TENSORFLOWMODELPERFORMANCE = _descriptor.Descriptor(
name='TensorflowModelPerformance',
full_name='ai.eloquent.TensorflowModelPerformance',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dev_set', full_name='ai.eloquent.TensorflowModelPerformance.dev_set', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='version', full_name='ai.eloquent.TensorflowModelPerformance.version', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dev_set_version', full_name='ai.eloquent.TensorflowModelPerformance.dev_set_version', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='train_set_size', full_name='ai.eloquent.TensorflowModelPerformance.train_set_size', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='train_set_total_votes', full_name='ai.eloquent.TensorflowModelPerformance.train_set_total_votes', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dev_set_size', full_name='ai.eloquent.TensorflowModelPerformance.dev_set_size', index=5,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dev_set_total_votes', full_name='ai.eloquent.TensorflowModelPerformance.dev_set_total_votes', index=6,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='best_epoch', full_name='ai.eloquent.TensorflowModelPerformance.best_epoch', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dropout', full_name='ai.eloquent.TensorflowModelPerformance.dropout', index=8,
number=9, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='random_seed', full_name='ai.eloquent.TensorflowModelPerformance.random_seed', index=9,
number=10, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hidden_dim', full_name='ai.eloquent.TensorflowModelPerformance.hidden_dim', index=10,
number=11, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='true_positive', full_name='ai.eloquent.TensorflowModelPerformance.true_positive', index=11,
number=12, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='false_positive', full_name='ai.eloquent.TensorflowModelPerformance.false_positive', index=12,
number=13, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='false_negative', full_name='ai.eloquent.TensorflowModelPerformance.false_negative', index=13,
number=14, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='true_negative', full_name='ai.eloquent.TensorflowModelPerformance.true_negative', index=14,
number=15, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='precision', full_name='ai.eloquent.TensorflowModelPerformance.precision', index=15,
number=16, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='recall', full_name='ai.eloquent.TensorflowModelPerformance.recall', index=16,
number=17, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='f1', full_name='ai.eloquent.TensorflowModelPerformance.f1', index=17,
number=18, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='accuracy', full_name='ai.eloquent.TensorflowModelPerformance.accuracy', index=18,
number=19, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='examples', full_name='ai.eloquent.TensorflowModelPerformance.examples', index=19,
number=20, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=509,
serialized_end=1005,
)
_TENSORFLOWMODELPERFORMANCEEXAMPLE = _descriptor.Descriptor(
name='TensorflowModelPerformanceExample',
full_name='ai.eloquent.TensorflowModelPerformanceExample',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='input', full_name='ai.eloquent.TensorflowModelPerformanceExample.input', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='guesses', full_name='ai.eloquent.TensorflowModelPerformanceExample.guesses', index=1,
number=2, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='labels', full_name='ai.eloquent.TensorflowModelPerformanceExample.labels', index=2,
number=3, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1007,
serialized_end=1090,
)
_TENSORFLOWMODEL.fields_by_name['token_mapper'].message_type = _TOKENMAPPER
_TOKENMAPPING.fields_by_name['type'].enum_type = _TOKENMAPPINGTYPE
_TOKENMAPPER.fields_by_name['token_mapping'].message_type = _TOKENMAPPING
_TOKENMAPPER.fields_by_name['unk_mapping'].message_type = _TOKENMAPPING
_TOKENMAPPER.fields_by_name['vectors'].message_type = _TUNEDEMBEDDING
_TENSORFLOWMODELPERFORMANCE.fields_by_name['examples'].message_type = _TENSORFLOWMODELPERFORMANCEEXAMPLE
DESCRIPTOR.message_types_by_name['TensorflowModel'] = _TENSORFLOWMODEL
DESCRIPTOR.message_types_by_name['TokenMapping'] = _TOKENMAPPING
DESCRIPTOR.message_types_by_name['TokenMapper'] = _TOKENMAPPER
DESCRIPTOR.message_types_by_name['TunedEmbedding'] = _TUNEDEMBEDDING
DESCRIPTOR.message_types_by_name['TensorflowModelPerformance'] = _TENSORFLOWMODELPERFORMANCE
DESCRIPTOR.message_types_by_name['TensorflowModelPerformanceExample'] = _TENSORFLOWMODELPERFORMANCEEXAMPLE
DESCRIPTOR.enum_types_by_name['TokenMappingType'] = _TOKENMAPPINGTYPE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TensorflowModel = _reflection.GeneratedProtocolMessageType('TensorflowModel', (_message.Message,), dict(
DESCRIPTOR = _TENSORFLOWMODEL,
__module__ = 'TensorflowModel_pb2'
# @@protoc_insertion_point(class_scope:ai.eloquent.TensorflowModel)
))
_sym_db.RegisterMessage(TensorflowModel)
TokenMapping = _reflection.GeneratedProtocolMessageType('TokenMapping', (_message.Message,), dict(
DESCRIPTOR = _TOKENMAPPING,
__module__ = 'TensorflowModel_pb2'
# @@protoc_insertion_point(class_scope:ai.eloquent.TokenMapping)
))
_sym_db.RegisterMessage(TokenMapping)
TokenMapper = _reflection.GeneratedProtocolMessageType('TokenMapper', (_message.Message,), dict(
DESCRIPTOR = _TOKENMAPPER,
__module__ = 'TensorflowModel_pb2'
# @@protoc_insertion_point(class_scope:ai.eloquent.TokenMapper)
))
_sym_db.RegisterMessage(TokenMapper)
TunedEmbedding = _reflection.GeneratedProtocolMessageType('TunedEmbedding', (_message.Message,), dict(
DESCRIPTOR = _TUNEDEMBEDDING,
__module__ = 'TensorflowModel_pb2'
# @@protoc_insertion_point(class_scope:ai.eloquent.TunedEmbedding)
))
_sym_db.RegisterMessage(TunedEmbedding)
TensorflowModelPerformance = _reflection.GeneratedProtocolMessageType('TensorflowModelPerformance', (_message.Message,), dict(
DESCRIPTOR = _TENSORFLOWMODELPERFORMANCE,
__module__ = 'TensorflowModel_pb2'
# @@protoc_insertion_point(class_scope:ai.eloquent.TensorflowModelPerformance)
))
_sym_db.RegisterMessage(TensorflowModelPerformance)
TensorflowModelPerformanceExample = _reflection.GeneratedProtocolMessageType('TensorflowModelPerformanceExample', (_message.Message,), dict(
DESCRIPTOR = _TENSORFLOWMODELPERFORMANCEEXAMPLE,
__module__ = 'TensorflowModel_pb2'
# @@protoc_insertion_point(class_scope:ai.eloquent.TensorflowModelPerformanceExample)
))
_sym_db.RegisterMessage(TensorflowModelPerformanceExample)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\020ai.eloquent.dataB\025TensorflowModelProtos'))
# @@protoc_insertion_point(module_scope)
| 42.594175 | 2,175 | 0.746034 |
40f93ae054bebaa285f8c2f48242d86d8297b31f | 8,460 | py | Python | python/ht/nodes/styles/styles.py | Hengle/Houdini-Toolbox | a1fd7d3dd73d3fc4cea78e29aeff1d190c41bae3 | [
"MIT"
] | 136 | 2015-01-03T04:03:23.000Z | 2022-02-07T11:08:57.000Z | python/ht/nodes/styles/styles.py | Hengle/Houdini-Toolbox | a1fd7d3dd73d3fc4cea78e29aeff1d190c41bae3 | [
"MIT"
] | 11 | 2017-02-09T20:05:04.000Z | 2021-01-24T22:25:59.000Z | python/ht/nodes/styles/styles.py | Hengle/Houdini-Toolbox | a1fd7d3dd73d3fc4cea78e29aeff1d190c41bae3 | [
"MIT"
] | 26 | 2015-08-18T12:11:02.000Z | 2020-12-19T01:53:31.000Z | """Classes representing color entries and mappings."""
# =============================================================================
# IMPORTS
# =============================================================================
from __future__ import annotations
# Standard Library
import re
from typing import TYPE_CHECKING, Optional, Tuple
if TYPE_CHECKING:
import hou
# =============================================================================
# CLASSES
# =============================================================================
class StyleRule:
"""This class represents a color application bound to a name.
:param name: The rule's name.
:param color: The rule's color.
:param color_type: The rule's color type.
:param shape: The rule's shape.
:param file_path: The path to the definition file.
:return:
"""
# -------------------------------------------------------------------------
# SPECIAL METHODS
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# NON-PUBLIC METHODS
# -------------------------------------------------------------------------
def _get_typed_color_value(self) -> Tuple[float]:
"""Get the appropriately typed color values.
:return: The color value in the correct type.
"""
to_func = getattr(self.color, self.color_type.lower())
return to_func()
# -------------------------------------------------------------------------
# PROPERTIES
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# METHODS
# -------------------------------------------------------------------------
def apply_to_node(self, node: hou.Node):
"""Apply styling to a node.
:param node: Node to apply to
:return:
"""
if self.color is not None:
node.setColor(self.color)
if self.shape is not None:
node.setUserData("nodeshape", self.shape)
class ConstantRule:
"""This class represents a style application bound to a named constant.
:param name: The rule's name.
:param constant_name: The constant name.
:param file_path: The path to the definition file.
:return:
"""
# -------------------------------------------------------------------------
# SPECIAL METHODS
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# PROPERTIES
# -------------------------------------------------------------------------
| 28.389262 | 87 | 0.450473 |
40f9e62c7e463cdddcd04524566bd56b8cb59940 | 1,407 | py | Python | src/sntk/kernels/ntk.py | gear/s-ntk | 3cd72cef4c941941750e03820c9c2850b81d529e | [
"MIT"
] | null | null | null | src/sntk/kernels/ntk.py | gear/s-ntk | 3cd72cef4c941941750e03820c9c2850b81d529e | [
"MIT"
] | null | null | null | src/sntk/kernels/ntk.py | gear/s-ntk | 3cd72cef4c941941750e03820c9c2850b81d529e | [
"MIT"
] | null | null | null | import math
import numpy as np
# return an array K of size (d_max, d_max, N, N), K[i][j] is kernel value of depth i + 1 with first j layers fixed
# return an array K of size (N, N), depth d_max, first fix_dep layers fixed | 40.2 | 115 | 0.509595 |
40fbdeebc9d14240c78ed2bb4a08d9c0a87ce714 | 1,509 | py | Python | nlpproject/main/words.py | Hrishi2312/IR-reimagined | 2bcaf207a402bdae9fc39be516ccb607ce78d174 | [
"MIT"
] | null | null | null | nlpproject/main/words.py | Hrishi2312/IR-reimagined | 2bcaf207a402bdae9fc39be516ccb607ce78d174 | [
"MIT"
] | null | null | null | nlpproject/main/words.py | Hrishi2312/IR-reimagined | 2bcaf207a402bdae9fc39be516ccb607ce78d174 | [
"MIT"
] | null | null | null | import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer, PorterStemmer
from nltk.tokenize import sent_tokenize , word_tokenize
import glob
import re
import os
import numpy as np
import sys
nltk.download('stopwords')
nltk.download('punkt')
Stopwords = set(stopwords.words('english'))
all_words = []
dict_global = {}
file_folder = 'main/documents/*'
idx = 1
files_with_index = {}
for file in glob.glob(file_folder):
fname = file
file = open(file , "r")
text = file.read()
text = remove_special_characters(text)
text = re.sub(re.compile('\d'),'',text)
sentences = sent_tokenize(text)
words = word_tokenize(text)
words = [word for word in words if len(words)>1]
words = [word.lower() for word in words]
words = [word for word in words if word not in Stopwords]
dict_global.update(finding_all_unique_words_and_freq(words))
files_with_index[idx] = os.path.basename(fname)
idx = idx + 1
unique_words_all = set(dict_global.keys())
| 28.471698 | 64 | 0.705765 |
40fd39b618c9cae6572cdfad086049a95c4b491f | 4,911 | py | Python | oseoserver/operations/describeresultaccess.py | pyoseo/oseoserver | 8c97ee5a7d698cc989e1c8cab8cfe0db78491307 | [
"Apache-2.0"
] | null | null | null | oseoserver/operations/describeresultaccess.py | pyoseo/oseoserver | 8c97ee5a7d698cc989e1c8cab8cfe0db78491307 | [
"Apache-2.0"
] | 10 | 2015-02-10T17:10:33.000Z | 2018-04-05T10:05:01.000Z | oseoserver/operations/describeresultaccess.py | pyoseo/oseoserver | 8c97ee5a7d698cc989e1c8cab8cfe0db78491307 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Ricardo Garcia Silva
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements the OSEO DescribeResultAccess operation"""
from __future__ import absolute_import
import logging
import datetime as dt
from django.core.exceptions import ObjectDoesNotExist
import pytz
import pyxb
import pyxb.bundles.opengis.oseo_1_0 as oseo
from .. import errors
from .. import models
from ..models import Order
from .. import utilities
logger = logging.getLogger(__name__)
def describe_result_access(request, user):
"""Implements the OSEO DescribeResultAccess operation.
This operation returns the location of the order items that are
ready to be downloaded by the user.
The DescribeResultAccess operation only reports on the availability
of order items that specify onlineDataAccess as their delivery option.
Parameters
----------
request: oseo.DescribeResultAccess
The incoming request
user: django.contrib.auth.User
The django user that placed the request
Returns
-------
response: oseo.SubmitAck
The response SubmitAck instance
"""
try:
order = Order.objects.get(id=request.orderId)
except ObjectDoesNotExist:
raise errors.InvalidOrderIdentifierError()
if order.user != user:
raise errors.AuthorizationFailedError
completed_items = get_order_completed_items(order, request.subFunction)
logger.debug("completed_items: {}".format(completed_items))
order.last_describe_result_access_request = dt.datetime.now(pytz.utc)
order.save()
response = oseo.DescribeResultAccessResponse(status='success')
item_id = None
for item in completed_items:
iut = oseo.ItemURLType()
iut.itemId = item_id or item.item_specification.item_id
iut.productId = oseo.ProductIdType(
identifier=item.identifier,
)
iut.productId.collectionId = utilities.get_collection_identifier(
item.item_specification.collection)
iut.itemAddress = oseo.OnLineAccessAddressType()
iut.itemAddress.ResourceAddress = pyxb.BIND()
iut.itemAddress.ResourceAddress.URL = item.url
iut.expirationDate = item.expires_on
response.URLs.append(iut)
return response
def get_order_completed_items(order, behaviour):
"""Get the completed order items for product orders.
Parameters
----------
order: oseoserver.models.Order
The order for which completed items are to be returned
behaviour: str
Either 'allReady' or 'nextReady', as defined in the OSEO
specification
Returns
--------
list
The completed order items for this order
"""
batches = order.batches.all()
all_complete = []
for batch in batches:
complete_items = get_batch_completed_items(batch, behaviour)
all_complete.extend(complete_items)
return all_complete
| 34.584507 | 76 | 0.696192 |
40feb012148cebe6483dabf37d02607456645a00 | 2,210 | py | Python | utils/decorator/dasyncio.py | masonsxu/red-flask | e8b978ee08072efcb2b3b7964065f272d8c875ab | [
"MIT"
] | null | null | null | utils/decorator/dasyncio.py | masonsxu/red-flask | e8b978ee08072efcb2b3b7964065f272d8c875ab | [
"MIT"
] | null | null | null | utils/decorator/dasyncio.py | masonsxu/red-flask | e8b978ee08072efcb2b3b7964065f272d8c875ab | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# python Threading
import time
from functools import wraps
from threading import Thread
def async_call(fn):
"""()
Args:
:fn(function):
Return:
:wrapper(function):
"""
return wrapper
def async_pool(pool_links):
"""
Args:
:pool_links(int):
Returns:
:sub_wrapper(function):
"""
return sub_wrapper
def async_retry(retry_times, space_time):
""" call pool
Args:
:retry_times(int):
"""
return sub_wrapper
#
# @async_call
# def sleep2andprint():
# time.sleep(2)
# print('22222222')
# @async_pool(pool_links=5)
# def pools():
# time.sleep(1)
# print('hehe')
# @async_retry(retry_times=3, space_time=1)
# def check():
# a = 1
# b = '2'
# print(a + b)
# def check_all():
# print('async_call')
# print('111111')
# sleep2andprint()
# print('333333')
# print('333322222')
# print('async_pool')
# pools()
# print('5hehe')
# print('async_retry')
# check()
# print('')
# print(check.__name__)
# print(sleep2andprint.__name__)
# print(pools.__name__)
# check_all()
| 19.557522 | 69 | 0.570588 |
40ff8361da6ba11cdb915421c267126671120831 | 872 | py | Python | oo/pessoa.py | wfs18/pythonbirds | aa3332763f9109c1fb7f1140a82a4b51c6402fdb | [
"MIT"
] | null | null | null | oo/pessoa.py | wfs18/pythonbirds | aa3332763f9109c1fb7f1140a82a4b51c6402fdb | [
"MIT"
] | null | null | null | oo/pessoa.py | wfs18/pythonbirds | aa3332763f9109c1fb7f1140a82a4b51c6402fdb | [
"MIT"
] | null | null | null |
if __name__ == '__main__':
p = Person()
eu = Person(name='marcio')
wes = Person(eu, name='Wesley')
print(p.cumprimentar())
print(p.year) # Atributo de instancia
print(p.name) # Atributo de dados
for filhos in wes.children:
print(filhos.year)
p.sobre = 'eu'
print(p.sobre)
del p.sobre
print(p.__dict__)
print(p.olhos)
print(eu.olhos)
print(p.metodo_estatico(), eu.metodo_estatico())
print(p.metodo_classe(), eu.metodo_classe())
| 22.947368 | 53 | 0.605505 |
40ff943d89da7510322d2d4989457bad5b652c0f | 179 | py | Python | tests/integration/test_combined.py | jonathan-winn-geo/new-repo-example | 2fbc54b1d42c57ca1105b1066e47627832cc8185 | [
"BSD-3-Clause"
] | null | null | null | tests/integration/test_combined.py | jonathan-winn-geo/new-repo-example | 2fbc54b1d42c57ca1105b1066e47627832cc8185 | [
"BSD-3-Clause"
] | 85 | 2020-08-12T15:59:48.000Z | 2022-01-17T10:28:56.000Z | tests/integration/test_combined.py | cma-open/cmatools | ce5743dca7c5bf1f6ab7fe3af24893a65d0c2db7 | [
"BSD-3-Clause"
] | null | null | null | """Test combined function."""
from cmatools.combine.combine import combined
def test_combined():
"""Test of combined function"""
assert combined() == "this hello cma"
| 17.9 | 45 | 0.692737 |
dc002c294c966dc124207adcde546a050c2603e1 | 1,323 | py | Python | elastalert_modules/top_count_keys_enhancement.py | OpenCoreCH/elastalert | 28502d8e81e67649976a6a3d2ccc198a5dd60631 | [
"Apache-2.0"
] | null | null | null | elastalert_modules/top_count_keys_enhancement.py | OpenCoreCH/elastalert | 28502d8e81e67649976a6a3d2ccc198a5dd60631 | [
"Apache-2.0"
] | 1 | 2018-10-05T14:38:22.000Z | 2018-10-05T14:38:22.000Z | elastalert_modules/top_count_keys_enhancement.py | OpenCoreCH/elastalert | 28502d8e81e67649976a6a3d2ccc198a5dd60631 | [
"Apache-2.0"
] | 4 | 2018-10-05T12:11:42.000Z | 2022-01-31T10:31:26.000Z | """Enhancement to reformat `top_events_X`
from match in order to reformat and put it
back to be able to use in alert message.
New format:
top_events_keys_XXX -- contains array of corresponding key values defined in `top_count_keys`,
where `XXX` key from `top_count_keys` array.
top_events_values_XXX -- contains array of corresponding counts.
Example:
Original:
{"top_events_KEY.NAME":{"key_value1": 10, "key_value2": 20}}
Reformatted:
{
"top_events_keys_KEY.NAME":["key_value1", "key_value2"]
"top_events_values_KEY.NAME":[10, 20]
}
Can be used in the rule like:
top_count_keys:
- 'KEY.NAME'
match_enhancements:
- 'elastalert_modules.top_count_keys_enhancement.Enhancement'
alert_text_args:
- top_events_keys_KEY.NAME[0]
"""
from elastalert.enhancements import BaseEnhancement
| 31.5 | 94 | 0.675737 |
dc0041528fa6c63f72d3e18e309efd1fc5282e9f | 4,054 | py | Python | nets.py | koreyou/SWEM-chainer | 728443fb5fc53409648d8bff3ae3e545fb9ac36c | [
"MIT"
] | null | null | null | nets.py | koreyou/SWEM-chainer | 728443fb5fc53409648d8bff3ae3e545fb9ac36c | [
"MIT"
] | null | null | null | nets.py | koreyou/SWEM-chainer | 728443fb5fc53409648d8bff3ae3e545fb9ac36c | [
"MIT"
] | null | null | null | import numpy
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import reporter
embed_init = chainer.initializers.Uniform(.25)
def block_embed(embed, x, dropout=0.):
"""Embedding function followed by convolution
Args:
embed (callable): A :func:`~chainer.functions.embed_id` function
or :class:`~chainer.links.EmbedID` link.
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): Input variable, which
is a :math:`(B, L)`-shaped int array. Its first dimension
:math:`(B)` is assumed to be the *minibatch dimension*.
The second dimension :math:`(L)` is the length of padded
sentences.
dropout (float): Dropout ratio.
Returns:
~chainer.Variable: Output variable. A float array with shape
of :math:`(B, N, L, 1)`. :math:`(N)` is the number of dimensions
of word embedding.
"""
e = embed(x)
e = F.dropout(e, ratio=dropout)
e = F.transpose(e, (0, 2, 1))
e = e[:, :, :, None]
return e
| 34.355932 | 88 | 0.620868 |
dc00b897bcfec50069749b3f13a2b807436fbaab | 904 | py | Python | src/entities/users.py | MillaKelhu/ohtu-lukuvinkkikirjasto | d195e53824bc5d13ded97112a8c388e05775666c | [
"MIT"
] | null | null | null | src/entities/users.py | MillaKelhu/ohtu-lukuvinkkikirjasto | d195e53824bc5d13ded97112a8c388e05775666c | [
"MIT"
] | null | null | null | src/entities/users.py | MillaKelhu/ohtu-lukuvinkkikirjasto | d195e53824bc5d13ded97112a8c388e05775666c | [
"MIT"
] | null | null | null | from flask_login import UserMixin
| 23.179487 | 60 | 0.634956 |
dc00c9713e8a8c4632743cc1feb90632ddde0bf5 | 13,726 | py | Python | artifacts/kernel_db/autotvm_scripts/tune_tilling_dense_select_codegen.py | LittleQili/nnfusion | 6c1a25db5be459a1053798f1c75bfbd26863ed08 | [
"MIT"
] | null | null | null | artifacts/kernel_db/autotvm_scripts/tune_tilling_dense_select_codegen.py | LittleQili/nnfusion | 6c1a25db5be459a1053798f1c75bfbd26863ed08 | [
"MIT"
] | null | null | null | artifacts/kernel_db/autotvm_scripts/tune_tilling_dense_select_codegen.py | LittleQili/nnfusion | 6c1a25db5be459a1053798f1c75bfbd26863ed08 | [
"MIT"
] | 1 | 2021-08-11T09:09:53.000Z | 2021-08-11T09:09:53.000Z | """
matmul autotvm
[batch,in_dim] x [in_dim,out_dim]
search_matmul_config(batch,in_dim,out_dim,num_trials):
input: batch,in_dim,out_dim,num_trials
[batch,in_dim] x [in_dim,out_dim]
num_trials: num of trials, default: 1000
output: log (json format)
use autotvm to search configs for the matmul
lookup_matmul_config():
find a proper matmul config
note: trade off kernel's performance and grid & block size
launch_matmul_from_config(config):
input: config (json string)
usage:
1. use search_matmul_config(batch,in_dim,out_dim,num_trials) to search configs
2. use lookup_matmul_config() to get a proper config
3. write the config (in json format) to "matmul_config.json"
4. use launch_matmul_from_config("matmul_config.json") to print the matmul kernel code
"""
import numpy as np
import tvm
import logging
import sys
from tvm import autotvm
import topi
import json
import os
from topi.util import get_const_tuple
import tensorflow as tf
flags = tf.flags
flags.DEFINE_string("input_path", "", "path of input file")
flags.DEFINE_string("autotvm_log", "../autotvm_logs/all_tuned_tilling_dense_nn.1000.log", "path of autotvm tuning log")
flags.DEFINE_string("tvm_profile_log",
"/tmp/tvm_profile.log", "path of tvm profile")
flags.DEFINE_string("output_path", "", "path of output file")
FLAGS = flags.FLAGS
output_log_file = "matmul_nn_autotvm_select_result.log"
if os.path.exists(output_log_file):
os.remove(output_log_file)
lookup_matmul_config(4, 256, 256, output_log_file)
lookup_matmul_config(16, 256, 256, output_log_file)
dot_ops = extract_ops_from_log()
topi_ops = generate_db_topi_ops(dot_ops, output_log_file)
with open(FLAGS.output_path, 'w') as fout:
json.dump(topi_ops, fout)
os.remove(output_log_file) | 35.4677 | 159 | 0.633396 |
dc00d047f5d2f7ce7b721b7c45d3556d9ebe4b5d | 2,240 | py | Python | src/olympia/activity/admin.py | dante381/addons-server | 9702860a19ecca1cb4e4998f37bc43c1b2dd3aa7 | [
"BSD-3-Clause"
] | null | null | null | src/olympia/activity/admin.py | dante381/addons-server | 9702860a19ecca1cb4e4998f37bc43c1b2dd3aa7 | [
"BSD-3-Clause"
] | null | null | null | src/olympia/activity/admin.py | dante381/addons-server | 9702860a19ecca1cb4e4998f37bc43c1b2dd3aa7 | [
"BSD-3-Clause"
] | null | null | null | from django.contrib import admin
from .models import ActivityLog, ReviewActionReasonLog
from olympia.reviewers.models import ReviewActionReason
admin.site.register(ActivityLog, ActivityLogAdmin)
admin.site.register(ReviewActionReasonLog, ReviewActionReasonLogAdmin)
| 26.666667 | 87 | 0.634821 |
dc01dc4bc345b863361dbfcbff2946a74c676b49 | 1,261 | py | Python | modules/nmap_script/address_info.py | naimkowshik/reyna-eye | f729ec964e586ae3f63ff29fd524f7aed3748a74 | [
"MIT"
] | 4 | 2021-04-22T19:19:13.000Z | 2022-02-10T09:26:58.000Z | modules/nmap_script/address_info.py | naimkowshik/reyna-eye | f729ec964e586ae3f63ff29fd524f7aed3748a74 | [
"MIT"
] | null | null | null | modules/nmap_script/address_info.py | naimkowshik/reyna-eye | f729ec964e586ae3f63ff29fd524f7aed3748a74 | [
"MIT"
] | 1 | 2022-02-03T19:29:46.000Z | 2022-02-03T19:29:46.000Z | import subprocess
import sys
import time
import os
#############################
# COLORING YOUR SHELL #
#############################
R = "\033[1;31m" #
B = "\033[1;34m" #
Y = "\033[1;33m" #
G = "\033[1;32m" #
RS = "\033[0m" #
W = "\033[1;37m" #
#############################
os.system("clear")
print(" ")
print(R + "[" + G + "User Summary " + R + "]" + RS)
print("""
Shows extra information about IPv6 addresses, such as embedded MAC or IPv4 addresses when available.
Some IP address formats encode extra information; for example some IPv6 addresses encode an IPv4 address or MAC address
script can decode these address formats:
IPv4-compatible IPv6 addresses,
IPv4-mapped IPv6 addresses,
Teredo IPv6 addresses,
6to4 IPv6 addresses,
IPv6 addresses using an EUI-64 interface ID,
IPv4-embedded IPv6 addresses,
ISATAP Modified EUI-64 IPv6 addresses.
IPv4-translated IPv6 addresses and
See RFC 4291 for general IPv6 addressing architecture and the definitions of some terms.
""")
print(" ")
webb = input("" + RS + "[" + B + "ENTER TARGET " + R + "WEBSITE " + Y + "IP" + RS + "]" + G + ": " + RS)
subprocess.check_call(['nmap', '-sV', '-sC', webb])
| 32.333333 | 120 | 0.57732 |
dc022c593385d4751afcdb05a041b275d5e72149 | 2,041 | py | Python | tests/utilities/test_upgrade_checkpoint.py | cuent/pytorch-lightning | b50ad528e69618d831aa01ee69f29b4f2a6a3e84 | [
"Apache-2.0"
] | null | null | null | tests/utilities/test_upgrade_checkpoint.py | cuent/pytorch-lightning | b50ad528e69618d831aa01ee69f29b4f2a6a3e84 | [
"Apache-2.0"
] | null | null | null | tests/utilities/test_upgrade_checkpoint.py | cuent/pytorch-lightning | b50ad528e69618d831aa01ee69f29b4f2a6a3e84 | [
"Apache-2.0"
] | null | null | null | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import os
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities.upgrade_checkpoint import upgrade_checkpoint
| 40.82 | 110 | 0.677609 |
dc02390fc5cc8acb642fb9142268442719d14ed1 | 4,258 | py | Python | rnn/train_rnn_oneflow.py | XinYangDong/models | ea1ab12add5812c8a3e14ecfad6b39fa56a779a9 | [
"Apache-2.0"
] | null | null | null | rnn/train_rnn_oneflow.py | XinYangDong/models | ea1ab12add5812c8a3e14ecfad6b39fa56a779a9 | [
"Apache-2.0"
] | null | null | null | rnn/train_rnn_oneflow.py | XinYangDong/models | ea1ab12add5812c8a3e14ecfad6b39fa56a779a9 | [
"Apache-2.0"
] | null | null | null | import oneflow.experimental as flow
from oneflow.experimental import optim
import oneflow.experimental.nn as nn
from utils.dataset import *
from utils.tensor_utils import *
from models.rnn_model import RNN
import argparse
import time
import math
import numpy as np
flow.env.init()
flow.enable_eager_execution()
# refer to: https://blog.csdn.net/Nin7a/article/details/107631078
n_iters = 100000
print_every = 500
plot_every = 1000
learning_rate = (
0.005 # If you set this too high, it might explode. If too low, it might not learn
)
# decrease learning rate if loss goes to NaN, increase learnig rate if it learns too slow
if __name__ == "__main__":
args = _parse_args()
main(args)
| 30.198582 | 113 | 0.615782 |
dc0343ffb97fa10db053e01b9eed2a7adc7c042b | 4,763 | py | Python | metaflow/datastore/local_storage.py | RobBlumberg/metaflow | 9f737e6026eee250c1593a2cb1d1c4b19a00adf4 | [
"Apache-2.0"
] | 2 | 2020-03-05T08:33:05.000Z | 2021-05-31T12:54:40.000Z | metaflow/datastore/local_storage.py | RobBlumberg/metaflow | 9f737e6026eee250c1593a2cb1d1c4b19a00adf4 | [
"Apache-2.0"
] | 5 | 2021-12-12T21:04:10.000Z | 2022-01-22T21:05:58.000Z | metaflow/datastore/local_storage.py | RobBlumberg/metaflow | 9f737e6026eee250c1593a2cb1d1c4b19a00adf4 | [
"Apache-2.0"
] | 2 | 2020-04-18T22:45:03.000Z | 2020-06-25T14:36:20.000Z | import json
import os
from ..metaflow_config import DATASTORE_LOCAL_DIR, DATASTORE_SYSROOT_LOCAL
from .datastore_storage import CloseAfterUse, DataStoreStorage
from .exceptions import DataException
| 34.766423 | 88 | 0.515431 |
dc03c7056424871c088a27b25411021c5ef255a8 | 669 | py | Python | src/Models/tools/quality.py | rahlk/MOOSE | e45b64cf625bb90aa8c1c24ab1c8f52ab485a316 | [
"MIT"
] | null | null | null | src/Models/tools/quality.py | rahlk/MOOSE | e45b64cf625bb90aa8c1c24ab1c8f52ab485a316 | [
"MIT"
] | 9 | 2015-09-14T21:07:06.000Z | 2015-12-08T01:38:08.000Z | src/Models/tools/quality.py | rahlk/MAPGen | 25bc1a84f07e30ab0dbb638cd2aa1ce416c510ff | [
"MIT"
] | null | null | null | from __future__ import division, print_function
from scipy.spatial.distance import euclidean
from numpy import mean
from pdb import set_trace | 30.409091 | 76 | 0.693572 |
dc0442493abb70d64838a4469e6b402804bec72d | 2,499 | py | Python | script/spider/www_chinapoesy_com.py | gitter-badger/poetry-1 | faf50558852d5d37d4fee68a8c5a114aba149689 | [
"MIT"
] | 1 | 2021-08-03T03:07:41.000Z | 2021-08-03T03:07:41.000Z | script/spider/www_chinapoesy_com.py | gitter-badger/poetry-1 | faf50558852d5d37d4fee68a8c5a114aba149689 | [
"MIT"
] | null | null | null | script/spider/www_chinapoesy_com.py | gitter-badger/poetry-1 | faf50558852d5d37d4fee68a8c5a114aba149689 | [
"MIT"
] | null | null | null |
'''
pip3 install BeautifulSoup4
pip3 install pypinyin
'''
import requests
import re
import os
import shutil
from bs4 import BeautifulSoup
from util import Profile, write_poem
def read_poem_list(page):
'''
Read poem list
@param page:int
@return (poem_list:Profile[], has_next_page:Boolean)
'''
page_url = 'http://www.chinapoesy.com/XianDaiList_' + str(page) + '.html'
response = requests.get(page_url)
if response.status_code is not 200:
return ([], False)
text = response.text
soup = BeautifulSoup(text, features='lxml')
# profiles
main_table = soup.find('table', id='DDlTangPoesy')
td_ = main_table.find_all('td')
poet_list = []
for td in td_:
poem = parse_poem_profile_td(td)
if poem is not None:
poet_list.append(poem)
img_neg = soup.find('img', src='/Images/Pager/nextn.gif')
return (poet_list, img_neg is not None)
main()
| 25.5 | 77 | 0.609444 |
dc06b7c456a20378a588b26699aae0b601ae716d | 5,086 | py | Python | tests/test_events.py | hhtong/dwave-cloud-client | 45e4d1d4f187b10495e38d47478f2c8d87514434 | [
"Apache-2.0"
] | null | null | null | tests/test_events.py | hhtong/dwave-cloud-client | 45e4d1d4f187b10495e38d47478f2c8d87514434 | [
"Apache-2.0"
] | null | null | null | tests/test_events.py | hhtong/dwave-cloud-client | 45e4d1d4f187b10495e38d47478f2c8d87514434 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from dwave.cloud.client import Client
from dwave.cloud.solver import Solver
from dwave.cloud.events import add_handler
| 34.835616 | 87 | 0.592411 |
dc06e2ba70d0080f14386cfea2dd13fc3ab64b71 | 12,084 | py | Python | ex3_nn_TF2.py | Melykuti/Ng_Machine_learning_exercises | c561190ee2705b6af9432323d7639f6655c973e5 | [
"BSD-3-Clause"
] | 3 | 2020-03-06T19:15:28.000Z | 2020-03-09T10:29:38.000Z | ex3_nn_TF2.py | Melykuti/Ng_Machine_learning_exercises | c561190ee2705b6af9432323d7639f6655c973e5 | [
"BSD-3-Clause"
] | null | null | null | ex3_nn_TF2.py | Melykuti/Ng_Machine_learning_exercises | c561190ee2705b6af9432323d7639f6655c973e5 | [
"BSD-3-Clause"
] | null | null | null | '''
Neural networks. Forward propagation in an already trained network in TensorFlow 2.0-2.1 (to use the network for classification).
TF 2.0:
Option 0 takes 0.08 sec.
Option 1 takes 0.08 sec.
Option 6 takes 0.08 sec.
Option 2 takes 4.7 sec.
Option 3 takes 1.6 sec.
Option 4 takes 5.2 sec.
Option 5 takes 0.08 sec.
Option 7 takes 0.06 sec.
If pred_digit = tf.map_fn(lambda x: ...) is used, then it's much slower:
Option 0 takes 1.75 sec.
Option 1 takes 1.75 sec.
Option 6 takes 1.8 sec.
Option 2 takes 6.1 sec.
Option 3 takes 3.1 sec.
Option 4 takes 6.3 sec.
Option 5 takes 1.8 sec.
Option 7 takes 1.8 sec.
TF 2.1: option==2, 3, 4, 5, 7 work; options 0, 1 and 6 fail with "AttributeError: 'RepeatedCompositeFieldContainer' object has no attribute 'append'" (But mine hasn't installed properly.)
Option 2 takes 4.5 sec.
Option 3 takes 1.5 sec.
Option 4 takes 4.4 sec.
Option 5 takes 0.08 sec.
Option 7 takes 0.06 sec.
If pred_digit = tf.map_fn(lambda x: ...) is used, then it's much slower:
Option 2 takes 5.7-6.1 sec.
Option 3 takes 3.1 sec.
Option 4 takes 5.7-6 sec.
Option 5 takes 1.8 sec.
Option 7 takes 1.8 sec.
Be careful:
According to tf.keras.layers.Dense (https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense):
output = activation(dot(input, kernel) + bias)
The kernel matrix multiplies from right! (And the inputs are seen as a row vector.) This is why I have to transpose the loaded network parameters Theta1 and Theta2.
Earlier, according to r1.15 tf.layers.dense documentation (https://www.tensorflow.org/api_docs/python/tf/layers/dense):
outputs = activation(inputs*kernel + bias)
[In version for Tensorflow 1.x, there used to be two independent choices in program flow:
Option 1 is with tf.layers.Input()
Option 2 is without tf.layers.Input()
Option a processes single inputs (single images), takes 1.5 sec
Option b does batch processing of all images at once, takes 0.3 sec
]
Bence Mlykti
09-19/03/2018, 27/01-07/02, 28/02/2020
'''
import numpy as np
import scipy.io # to open Matlab's .mat files
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import time
### User input ###
option = 7 # {0, 1, ..., 7}
### End of input ###
# The network parameters are here for info, they are not actually used.
input_layer_size = 400 # 20x20 Input Images of Digits
hidden_layer_size = 25 # 25 hidden units
num_labels = 10 # 10 labels, from 1 to 10
# (note that we have mapped "0" to label 10)
# =========== Part 1: Loading [and Visualizing] Data =============
data = scipy.io.loadmat('../machine-learning-ex3/ex3/ex3data1.mat')
X = data['X']
y = data['y']
y = y % 10 # Transforming 10 to 0, which is its original meaning.
# ================ Part 2: Loading Pameters ================
# In this part of the exercise, we load the pre-initialized
# neural network parameters.
params = scipy.io.loadmat('../machine-learning-ex3/ex3/ex3weights.mat')
Theta1 = params['Theta1'] # Theta1 has size 25 x 401
Theta2 = params['Theta2'] # Theta2 has size 10 x 26
tf.keras.backend.clear_session()
start_time = time.time()
# ================= Part 3: Implement Predict =================
# After training a neural network, we would like to use it to predict
# the labels. You will now implement the "predict" function to use the
# neural network to predict the labels of the training set. This lets
# you compute the training set accuracy.
# Difference between tf.data.Dataset.from_tensors and tf.data.Dataset.from_tensor_slices: https://www.tensorflow.org/api_docs/python/tf/data/Dataset#from_tensor_slices
# from_tensors reads all data at once; from_tensor_slices reads line by line, which is preferable for huge datasets
# With from_tensors, you'd also need to pull out each row from the tensor somehow.
# https://towardsdatascience.com/how-to-use-dataset-in-tensorflow-c758ef9e4428
# https://www.tensorflow.org/programmers_guide/datasets#consuming_numpy_arrays
# To narrow computation to a subset of data for quick testing:
#X, y = X[1990:2010,:], y[1990:2010,:]
if option==2 or option==3:
dataset = tf.data.Dataset.from_tensor_slices(X)
else:
dataset = tf.data.Dataset.from_tensor_slices(X).batch(X.shape[0])
#dataset = tf.data.Dataset.from_tensor_slices(X).batch(64) # this is about the same speed as .batch(X.shape[0])
#dataset = tf.data.Dataset.from_tensor_slices(X).batch(1) # this also works but it is 1.5x-4x slower
# It also works with tf.keras.initializers.Constant() in place of tf.constant_initializer because these are only aliases: https://www.tensorflow.org/api_docs/python/tf/constant_initializer .
if option==0:
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(Theta1.shape[0], activation='sigmoid', use_bias=True, kernel_initializer=tf.constant_initializer(Theta1[:,1:].T), bias_initializer=tf.constant_initializer(Theta1[:,0]), input_shape=[X.shape[1]]))
model.add(tf.keras.layers.Dense(Theta2.shape[0], activation='sigmoid', use_bias=True, kernel_initializer=tf.constant_initializer(Theta2[:,1:].T), bias_initializer=tf.constant_initializer(Theta2[:,0]))) # One doesn't even need the second sigmoid activation function because it is monotone increasing and doesn't change the ordering for argmax.
pred = model.predict(dataset)
elif option==1:
# input_shape=[X.shape[1]] could be left out below
layers = [tf.keras.layers.Dense(Theta1.shape[0], kernel_initializer=tf.constant_initializer(Theta1[:,1:].T), bias_initializer=tf.constant_initializer(Theta1[:,0]), activation='sigmoid', input_shape=[X.shape[1]]),
tf.keras.layers.Dense(Theta2.shape[0], kernel_initializer=tf.constant_initializer(Theta2[:,1:].T), bias_initializer=tf.constant_initializer(Theta2[:,0]), activation='sigmoid')] # One doesn't even need the second sigmoid activation function because it is monotone increasing and doesn't change the ordering for argmax.
# This doesn't work as tf.constant_initializer() doesn't take Tensors as input.
#layers = [tf.keras.layers.Dense(Theta1.shape[0], kernel_initializer= tf.constant_initializer(tf.transpose(Theta1[:,1:])), bias_initializer=tf.constant_initializer(Theta1[:,0]), activation='sigmoid'),
# tf.keras.layers.Dense(Theta2.shape[0], kernel_initializer= tf.constant_initializer(tf.transpose(Theta2[:,1:])), bias_initializer=tf.constant_initializer(Theta2[:,0]), activation='sigmoid')]
# This doesn't work: ValueError: Could not interpret initializer identifier: tf.Tensor(...)
#layers = [tf.keras.layers.Dense(Theta1.shape[0], kernel_initializer=tf.transpose(Theta1[:,1:]), bias_initializer=Theta1[:,0], activation='sigmoid'),
# tf.keras.layers.Dense(Theta2.shape[0], kernel_initializer=tf.transpose(Theta2[:,1:]), bias_initializer=Theta2[:,0], activation='sigmoid')]
model = tf.keras.Sequential(layers)
#model = tf.keras.models.Sequential(layers) # This is just an alias of previous.
#model.build() # not necessary
pred = model.predict(dataset)
elif option==6:
model = NNModel(Theta1, Theta2)
pred = model.predict(dataset)
elif option in [2, 3, 4, 5]:
if option==2:
pred = []
for entry in dataset:
#pred.append(evaluation(tf.constant(Theta1.T), tf.constant(Theta2.T), entry.numpy().reshape((1,-1)))) # numpy reshape might be faster than tf.reshape
pred.append(evaluation(tf.constant(Theta1.T), tf.constant(Theta2.T), tf.reshape(entry, (1,-1)))) # doing it in TF
#pred = np.concatenate(pred, axis=0) # this also works
pred = tf.concat(pred, axis=0)
elif option==3:
pred = dataset.map(lambda x: evaluation(tf.constant(Theta1.T), tf.constant(Theta2.T), tf.reshape(x, [1,-1])))
#pred = dataset.map(lambda x: evaluation(tf.constant(Theta1.T), tf.constant(Theta2.T), x)) # This doesn't work.
pred = tf.concat([entry for entry in pred], axis=0)
elif option==4:
pred = []
for batch in dataset:
for entry in batch:
pred.append(evaluation(tf.constant(Theta1.T), tf.constant(Theta2.T), tf.reshape(entry, (1,-1))))
pred = tf.concat(pred, axis=0)
else: # option==5
pred = dataset.map(lambda x: evaluation(tf.constant(Theta1.T), tf.constant(Theta2.T), x))
#pred = dataset.map(lambda x: evaluation(tf.constant(Theta1.T), tf.constant(Theta2.T), tf.reshape(x, [-1,400]))) # This works, in same time.
pred = tf.concat([entry for entry in pred], axis=0)
else: # option==7
pred = dataset.map(lambda x: evaluation2(tf.constant(Theta1[:,1:].T), tf.constant(Theta1[:,0]), tf.constant(Theta2[:,1:].T), tf.constant(Theta2[:,0].T), x))
#pred = dataset.map(lambda x: evaluation2(tf.constant(Theta1[:,1:].T), tf.constant(Theta1[:,0]), tf.constant(Theta2[:,1:].T), tf.constant(Theta2[:,0].T), tf.reshape(x, [-1,400]))) # This works, in same time.
pred = tf.concat([entry for entry in pred], axis=0)
# It does not work in this simplest form:
#pred = evaluation2(tf.constant(Theta1[:,1:].T), tf.constant(Theta1[:,0]), tf.constant(Theta2[:,1:].T), tf.constant(Theta2[:,0].T), dataset)
#tf.print(pred)
# The output layer (pred) has 10 units, for digits 1,2,...,9,0. After taking argmax, you have to map the result of argmax, 0,1,2,...,9 to the required 1,2,...,9,0.
pred_digit = (tf.argmax(pred, axis=1) + 1) % 10
#pred_digit = tf.map_fn(lambda x: (tf.argmax(x, axis=0, output_type=tf.int32)+1) % 10, pred, dtype=tf.int32) # This is rather slow!
pred_np = pred_digit.numpy().reshape(-1,1)
print('\nTraining Set Accuracy: {0:.2f}%.'.format(np.mean(pred_np == y) * 100))
print('Expected training error value on complete Training Set (approx.): 97.5%.')
print('\nTime elapsed: {:.2f} sec'.format(time.time() - start_time))
print()
if option in [0, 1, 6]:
tf.print(model.summary()) # This provides interesting output.
plt.scatter(np.arange(len(y)), y, label='Ground truth')
plt.scatter(np.arange(len(y)), pred_np, marker=".", c='r', label='Prediction')
plt.xlabel('Sample ID')
plt.ylabel('Digit')
plt.legend()
plt.show()
| 48.923077 | 347 | 0.70043 |
dc077fe63cc4f8d54762c53d45a473600de38902 | 3,843 | py | Python | instagram/models.py | kilonzijnr/instagram-clone | 1fa662248d70a64356ef3d48d52c7e38dea95aff | [
"MIT"
] | null | null | null | instagram/models.py | kilonzijnr/instagram-clone | 1fa662248d70a64356ef3d48d52c7e38dea95aff | [
"MIT"
] | null | null | null | instagram/models.py | kilonzijnr/instagram-clone | 1fa662248d70a64356ef3d48d52c7e38dea95aff | [
"MIT"
] | null | null | null | from django.db import models
from django.db.models.deletion import CASCADE
from django.contrib.auth.models import User
from cloudinary.models import CloudinaryField
# Create your models here.
| 30.991935 | 83 | 0.650273 |
dc0981553f7be2b377b0b4a03e7bcb8ef94d1db4 | 846 | py | Python | addons/purchase_request/migrations/13.0.4.0.0/post-migration.py | jerryxu4j/odoo-docker-build | 339a3229192582c289c19e276347af1326ce683f | [
"CC-BY-3.0"
] | null | null | null | addons/purchase_request/migrations/13.0.4.0.0/post-migration.py | jerryxu4j/odoo-docker-build | 339a3229192582c289c19e276347af1326ce683f | [
"CC-BY-3.0"
] | null | null | null | addons/purchase_request/migrations/13.0.4.0.0/post-migration.py | jerryxu4j/odoo-docker-build | 339a3229192582c289c19e276347af1326ce683f | [
"CC-BY-3.0"
] | null | null | null | from odoo import SUPERUSER_ID, api
from odoo.tools.sql import column_exists
def _migrate_purchase_request_to_property(env):
"""Create properties for all products with the flag set on all companies"""
env.cr.execute("select id, coalesce(purchase_request, False) from product_template")
values = dict(env.cr.fetchall())
for company in env["res.company"].with_context(active_test=False).search([]):
env["ir.property"].with_context(force_company=company.id).set_multi(
"purchase_request", "product.template", values, False,
)
env.cr.execute("alter table product_template drop column purchase_request")
| 42.3 | 88 | 0.734043 |
dc0a134e4c11e64835152cefa26ff2db3778cd60 | 13,678 | py | Python | cfy/server.py | buhanec/cloudify-flexiant-plugin | da0c42a4330c9e5ffd55d9f5024a9a36f052af16 | [
"Apache-2.0"
] | null | null | null | cfy/server.py | buhanec/cloudify-flexiant-plugin | da0c42a4330c9e5ffd55d9f5024a9a36f052af16 | [
"Apache-2.0"
] | null | null | null | cfy/server.py | buhanec/cloudify-flexiant-plugin | da0c42a4330c9e5ffd55d9f5024a9a36f052af16 | [
"Apache-2.0"
] | null | null | null | # coding=UTF-8
"""Server stuff."""
from __future__ import print_function
from cfy import (create_server,
create_ssh_key,
attach_ssh_key,
wait_for_state,
wait_for_cond,
create_nic,
attach_nic,
get_resource,
get_server_status,
start_server,
stop_server,
delete_resource)
import socket
import errno
from cloudify import ctx
from cloudify.decorators import operation
from cloudify.exceptions import NonRecoverableError
from cfy.helpers import (with_fco_api, with_exceptions_handled)
from resttypes import enums, cobjects
from paramiko import SSHClient, AutoAddPolicy
import spur
import spur.ssh
from time import sleep
from subprocess import call
from fabric.api import settings, run
import os
RT = enums.ResourceType
PROP_RESOURCE_ID = 'resource_id'
PROP_USE_EXISTING = 'use_existing'
PROP_IMAGE = 'image'
PROP_VDC = 'vdc'
PROP_NET = 'network'
PROP_SERVER_PO = 'server_type'
PROP_CPU_COUNT = 'cpu_count'
PROP_RAM_AMOUNT = 'ram_amount'
PROP_MANAGER_KEY = 'manager_key'
PROP_PRIVATE_KEYS = 'private_keys'
PROP_PUBLIC_KEYS = 'public_keys'
RPROP_UUID = 'uuid'
RPROP_DISKS = 'disks'
RPROP_NIC = 'nic'
RPROP_NICS = 'nics'
RPROP_IP = 'ip'
RPROP_USER = 'username'
RPROP_PASS = 'password'
| 36.281167 | 79 | 0.615222 |
dc0ae53c3bb6f54a76cfb756f32ba1e86d22317c | 7,317 | py | Python | markdown2dita.py | mattcarabine/markdown2dita | f4a02c3e9514d33eb3cea9c9b5d3c44817afad97 | [
"BSD-3-Clause"
] | 6 | 2019-06-28T12:47:01.000Z | 2022-02-14T18:18:53.000Z | markdown2dita.py | mattcarabine/markdown2dita | f4a02c3e9514d33eb3cea9c9b5d3c44817afad97 | [
"BSD-3-Clause"
] | null | null | null | markdown2dita.py | mattcarabine/markdown2dita | f4a02c3e9514d33eb3cea9c9b5d3c44817afad97 | [
"BSD-3-Clause"
] | 2 | 2018-02-09T22:17:48.000Z | 2020-02-20T13:59:30.000Z | # coding: utf-8
"""
markdown2dita
~~~~~~~~~~~~~
A markdown to dita-ot conversion tool written in pure python.
Uses mistune to parse the markdown.
"""
from __future__ import print_function
import argparse
import sys
import mistune
__version__ = '0.3'
__author__ = 'Matt Carabine <matt.carabine@gmail.com>'
__all__ = ['Renderer', 'Markdown', 'markdown', 'escape']
def escape(text, quote=False, smart_amp=True):
return mistune.escape(text, quote=quote, smart_amp=smart_amp)
def _parse_args(args):
parser = argparse.ArgumentParser(description='markdown2dita - a markdown '
'to dita-ot CLI conversion tool.')
parser.add_argument('-i', '--input-file',
help='input markdown file to be converted.'
'If omitted, input is taken from stdin.')
parser.add_argument('-o', '--output-file',
help='output file for the converted dita content.'
'If omitted, output is sent to stdout.')
return parser.parse_args(args)
def markdown(text, escape=True, **kwargs):
return Markdown(escape=escape, **kwargs)(text)
def main():
parsed_args = _parse_args(sys.argv[1:])
if parsed_args.input_file:
input_str = open(parsed_args.input_file, 'r').read()
elif not sys.stdin.isatty():
input_str = ''.join(line for line in sys.stdin)
else:
print('No input file specified and unable to read input on stdin.\n'
"Use the '-h' or '--help' flag to see usage information",
file=sys.stderr)
exit(1)
markdown = Markdown()
dita_output = markdown(input_str)
if parsed_args.output_file:
with open(parsed_args.output_file, 'w') as output_file:
output_file.write(dita_output)
else:
print(dita_output)
if __name__ == '__main__':
main()
| 31.403433 | 83 | 0.577012 |
dc0c391d6f0cc20589629aa4ecb77f77c49b34a1 | 2,957 | py | Python | tests/integration/test_reload_certificate/test.py | roanhe-ts/ClickHouse | 22de534fdcd3f05e27423d13f5875f97c3ba5f10 | [
"Apache-2.0"
] | 1 | 2022-02-08T03:09:51.000Z | 2022-02-08T03:09:51.000Z | tests/integration/test_reload_certificate/test.py | roanhe-ts/ClickHouse | 22de534fdcd3f05e27423d13f5875f97c3ba5f10 | [
"Apache-2.0"
] | 1 | 2022-03-21T07:27:34.000Z | 2022-03-21T07:27:34.000Z | tests/integration/test_reload_certificate/test.py | roanhe-ts/ClickHouse | 22de534fdcd3f05e27423d13f5875f97c3ba5f10 | [
"Apache-2.0"
] | null | null | null | import pytest
import os
from helpers.cluster import ClickHouseCluster
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
cluster = ClickHouseCluster(__file__)
node = cluster.add_instance('node', main_configs=["configs/first.crt", "configs/first.key",
"configs/second.crt", "configs/second.key",
"configs/cert.xml"])
def change_config_to_key(name):
'''
* Generate config with certificate/key name from args.
* Reload config.
'''
node.exec_in_container(["bash", "-c" , """cat > /etc/clickhouse-server/config.d/cert.xml << EOF
<?xml version="1.0"?>
<clickhouse>
<https_port>8443</https_port>
<openSSL>
<server>
<certificateFile>/etc/clickhouse-server/config.d/{cur_name}.crt</certificateFile>
<privateKeyFile>/etc/clickhouse-server/config.d/{cur_name}.key</privateKeyFile>
<loadDefaultCAFile>true</loadDefaultCAFile>
<cacheSessions>true</cacheSessions>
<disableProtocols>sslv2,sslv3</disableProtocols>
<preferServerCiphers>true</preferServerCiphers>
</server>
</openSSL>
</clickhouse>
EOF""".format(cur_name=name)])
node.query("SYSTEM RELOAD CONFIG")
def test_first_than_second_cert():
''' Consistently set first key and check that only it will be accepted, then repeat same for second key. '''
# Set first key
change_config_to_key('first')
# Command with correct certificate
assert node.exec_in_container(['curl', '--silent', '--cacert', '/etc/clickhouse-server/config.d/{cur_name}.crt'.format(cur_name='first'),
'https://localhost:8443/']) == 'Ok.\n'
# Command with wrong certificate
# This command don't use option '-k', so it will lead to error while execution.
# That's why except will always work
try:
node.exec_in_container(['curl', '--silent', '--cacert', '/etc/clickhouse-server/config.d/{cur_name}.crt'.format(cur_name='second'),
'https://localhost:8443/'])
assert False
except:
assert True
# Change to other key
change_config_to_key('second')
# Command with correct certificate
assert node.exec_in_container(['curl', '--silent', '--cacert', '/etc/clickhouse-server/config.d/{cur_name}.crt'.format(cur_name='second'),
'https://localhost:8443/']) == 'Ok.\n'
# Command with wrong certificate
# Same as previous
try:
node.exec_in_container(['curl', '--silent', '--cacert', '/etc/clickhouse-server/config.d/{cur_name}.crt'.format(cur_name='first'),
'https://localhost:8443/'])
assert False
except:
assert True
| 38.907895 | 142 | 0.622929 |
dc0d2dd1628c5437389a9030a61c8c8847b09265 | 1,331 | py | Python | examples/python/fling.py | arminfriedl/fling | 909606a9960fede8951436748c20a9600819b93a | [
"MIT"
] | null | null | null | examples/python/fling.py | arminfriedl/fling | 909606a9960fede8951436748c20a9600819b93a | [
"MIT"
] | null | null | null | examples/python/fling.py | arminfriedl/fling | 909606a9960fede8951436748c20a9600819b93a | [
"MIT"
] | null | null | null | import flingclient as fc
from flingclient.rest import ApiException
from datetime import datetime
# Per default the dockerized fling service runs on localhost:3000 In case you
# run your own instance, change the base url
configuration = fc.Configuration(host="http://localhost:3000")
# Every call, with the exception of `/api/auth`, is has to be authorized by a
# bearer token. Get a token by authenticating as admin and set it into the
# configuration. All subsequent calls will send this token in the header as
# `Authorization: Bearer <token> header`
admin_user = input("Username: ")
admin_password = input("Password: ")
authenticate(admin_user, admin_password)
with fc.ApiClient(configuration) as api_client:
# Create a new fling
fling_client = fc.FlingApi(api_client)
fling = fc.Fling(name="A Fling from Python", auth_code="secret",
direct_download=False, allow_upload=True,
expiration_time=datetime(2099, 12, 12))
fling = fling_client.post_fling()
print(f"Created a new fling: {fling}")
#
| 40.333333 | 86 | 0.75432 |
dc0d3f00ae59f64419ff5f7a5aba262466241f01 | 1,811 | py | Python | pretraining/python/download_tensorboard_logs.py | dl4nlp-rg/PTT5 | cee2d996ba7eac80d7764072eef01a7f9c38836c | [
"MIT"
] | 51 | 2020-08-11T13:34:07.000Z | 2022-01-20T23:09:32.000Z | pretraining/python/download_tensorboard_logs.py | dl4nlp-rg/PTT5 | cee2d996ba7eac80d7764072eef01a7f9c38836c | [
"MIT"
] | 4 | 2020-09-28T20:33:31.000Z | 2022-03-12T00:46:13.000Z | pretraining/python/download_tensorboard_logs.py | unicamp-dl/PTT5 | aee3e0d0b6ad1bb6f8c2d9afd1d2e89679301f6f | [
"MIT"
] | 6 | 2021-01-25T07:47:40.000Z | 2022-02-23T20:06:03.000Z | import tensorflow.compat.v1 as tf
import os
import tqdm
GCS_BUCKET = 'gs://ptt5-1'
TENSORBOARD_LOGS_LOCAL = '../logs_tensorboard'
os.makedirs(TENSORBOARD_LOGS_LOCAL, exist_ok=True)
# where to look for events files - experiment names
base_paths = [
# Main initial experiments - all weights are updated
'small_standard_vocab',
'base_standard_vocab',
'large_standard_vocab',
'small_custom_sentencepiece_vocab',
'base_custom_sentencepiece_vocab',
'large_custom_sentencepiece_vocab',
# Only embeddings are updated
'small_embeddings_only_standard_vocab',
'base_embeddings_only_standard_vocab',
'large_embeddings_only_standard_vocab',
'small_embeddings_only_custom_sentencepiece_vocab',
'base_embeddings_only_custom_sentencepiece_vocab',
'large_embeddings_only_custom_sentencepiece_vocab',
# Double batch size for large (128 = 64 * 2)
'large_batchsize_128_custom_sentencepiece_vocab',
'large_batchsize_128_standard_vocab',
]
# all paths have the scructure
for base_path in base_paths:
size = base_path.split('_')[0]
full_path = os.path.join(GCS_BUCKET, base_path, 'models', size)
download_dir = os.path.join(TENSORBOARD_LOGS_LOCAL, base_path)
if not os.path.exists(download_dir):
os.makedirs(download_dir, exist_ok=True)
print(f'Downloading files from {full_path} to {download_dir}')
for file in tqdm.tqdm(tf.gfile.Glob(os.path.join(full_path,
"events.*"))):
tf.gfile.Copy(file,
os.path.join(download_dir, os.path.basename(file)),
overwrite=False)
else:
print(f'{base_path} logs already download. Delete folder'
f'{download_dir} and run script to download again')
| 38.531915 | 77 | 0.699613 |
dc0e5e9f0de144528e9e2fd2507b7d3b024c5594 | 1,408 | py | Python | tests/TestPythonLibDir/RemotePkcs1Signer/__init__.py | q351941406/isign-1 | c24ce94fa88f15ebc6cc2dbda6852c6d17094fc6 | [
"Apache-2.0"
] | 83 | 2019-08-20T09:34:27.000Z | 2022-03-24T13:42:36.000Z | tests/TestPythonLibDir/RemotePkcs1Signer/__init__.py | q351941406/isign-1 | c24ce94fa88f15ebc6cc2dbda6852c6d17094fc6 | [
"Apache-2.0"
] | 15 | 2019-08-20T06:34:16.000Z | 2020-05-17T21:22:52.000Z | tests/TestPythonLibDir/RemotePkcs1Signer/__init__.py | q351941406/isign-1 | c24ce94fa88f15ebc6cc2dbda6852c6d17094fc6 | [
"Apache-2.0"
] | 6 | 2020-02-09T09:35:17.000Z | 2022-03-19T18:43:17.000Z | import base64
import requests
| 32.744186 | 106 | 0.599432 |
dc0f94e928edc42769b1d0d49b60f125df3ce1e6 | 4,497 | py | Python | architecture_tool_django/nodes/tasks.py | goldginkgo/architecture_tool_django | e4229c5938a4dd01d0877afa7b93daf68e09283b | [
"MIT"
] | 1 | 2021-08-13T01:37:29.000Z | 2021-08-13T01:37:29.000Z | architecture_tool_django/nodes/tasks.py | goldginkgo/architecture_tool_django | e4229c5938a4dd01d0877afa7b93daf68e09283b | [
"MIT"
] | null | null | null | architecture_tool_django/nodes/tasks.py | goldginkgo/architecture_tool_django | e4229c5938a4dd01d0877afa7b93daf68e09283b | [
"MIT"
] | 1 | 2021-07-19T07:57:54.000Z | 2021-07-19T07:57:54.000Z | import logging
import re
from celery import shared_task
from django.conf import settings
from django.db.models import Q
from django.shortcuts import get_object_or_404
from django.template.loader import get_template
from django.urls import reverse
from django.utils import timezone
from architecture_tool_django.utils.confluence_wrapper import (
MyConfluence,
tiny_to_page_id,
)
from .models import Node
logger = logging.getLogger(__name__)
| 32.824818 | 99 | 0.683344 |
dc107c520e6be07939c0ec67b42b5fccd394dfb1 | 3,195 | py | Python | crosswalk/views/alias_or_create.py | cofin/django-crosswalk | 349ebbd5676d3ef3ccf889ec3849b2f1cff4be32 | [
"MIT"
] | 4 | 2019-04-08T23:24:30.000Z | 2021-12-22T16:42:12.000Z | crosswalk/views/alias_or_create.py | cofin/django-crosswalk | 349ebbd5676d3ef3ccf889ec3849b2f1cff4be32 | [
"MIT"
] | 12 | 2017-12-18T04:27:14.000Z | 2021-06-10T18:05:46.000Z | crosswalk/views/alias_or_create.py | cofin/django-crosswalk | 349ebbd5676d3ef3ccf889ec3849b2f1cff4be32 | [
"MIT"
] | 3 | 2019-08-12T14:36:04.000Z | 2020-10-17T20:54:09.000Z | from crosswalk.authentication import AuthenticatedView
from crosswalk.models import Domain, Entity
from crosswalk.serializers import EntitySerializer
from crosswalk.utils import import_class
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import status
from rest_framework.response import Response
| 32.272727 | 78 | 0.571831 |
dc10e734b445882a7de1ca38ba65c2b849b9fe68 | 3,629 | py | Python | hoist/fastapi_wrapper.py | ZeroIntensity/Hoist | 08388af0328f225fc3066cf09b8043c30cb900e3 | [
"MIT"
] | null | null | null | hoist/fastapi_wrapper.py | ZeroIntensity/Hoist | 08388af0328f225fc3066cf09b8043c30cb900e3 | [
"MIT"
] | null | null | null | hoist/fastapi_wrapper.py | ZeroIntensity/Hoist | 08388af0328f225fc3066cf09b8043c30cb900e3 | [
"MIT"
] | 2 | 2021-07-26T17:10:19.000Z | 2021-09-02T00:13:17.000Z | from fastapi import FastAPI, Response, WebSocket, WebSocketDisconnect
from threading import Thread
from .server import Server
from .errors import HoistExistsError
from .error import Error
from .version import __version__
from .flask_wrapper import HTML
import uvicorn
from typing import List, Callable
from fastapi.responses import HTMLResponse, JSONResponse
| 34.894231 | 124 | 0.58005 |
dc110c5732b9e3f42c8a0c8715b260a938e9705c | 4,874 | py | Python | network/mqtt_client/main_mqtt_publisher.py | flashypepo/myMicropython-Examples | b2b63df865b5ad471b351ca5f279135025859f5d | [
"MIT"
] | 3 | 2017-09-03T17:17:44.000Z | 2017-12-10T12:26:46.000Z | network/mqtt_client/main_mqtt_publisher.py | flashypepo/myMicropython-Examples | b2b63df865b5ad471b351ca5f279135025859f5d | [
"MIT"
] | null | null | null | network/mqtt_client/main_mqtt_publisher.py | flashypepo/myMicropython-Examples | b2b63df865b5ad471b351ca5f279135025859f5d | [
"MIT"
] | 2 | 2017-10-01T01:10:55.000Z | 2018-07-15T19:49:29.000Z | # This file is executed on every boot (including wake-boot from deepsleep)
# 2017-1210 PePo send timestamp and temperature (Celsius) to MQTT-server on BBB
# 2017-1105 PePo add _isLocal: sensor data to serial port (False) of stored in file (True)
# 2017-0819 PePo add sensor, led and print to serial port
# 2017-0811 PePo updated: no debug, disable webrepl,
# source: https://youtu.be/yGKZOwzGePY - Tony D! MP ESP8266 HTTP examples
print('main.py executing...')
# connect to a personal Wifi network ---------
import wifinetwork as wifi
# TODO: JSON config-file with ssid:ww entry/entries
#wifi.connectTo("PePoDevNet", wifi.readPasswordFrom('pepodevnet.txt'))
print('Wifi: connect to PePoDevNet...')
wifi.connectTo("PePoDevNet")
# set the time from nptime ---------
#print('TODO: get current time from the web...')
print('getting time from the web...')
import nptime
print('... UTC time:', nptime.settime())
#print('\tTODO -local time')
# --- SUMMERTIME or not (=WINTERTIME) ---------------
_isSummerTime = False
print('... Summertime:', _isSummerTime)
# temperature ---------
import class_ds18b20
#get sensor at GPIO14
ds = class_ds18b20.DS18B20(14)
# --- location ---------------
_LOCATION = 'studyroom'
#7-segment display
import tm1637
from machine import Pin
import math
# create tm
tm = tm1637.TM1637(clk=Pin(5), dio=Pin(4))
#print('tm: ', tm)
# helper function: returns temperature-record as string
#''' store data in file temperature.txt
# default: 1 measuremtn per 30 seconds
# send data to MQTT-server
#main run() - by-default 1 measurement per 30 seconds
# go ahead and start getting, sending/storing the sensor data
if __name__ == "__main__":
run(60.0) # 1 measurement per minute
| 33.383562 | 164 | 0.622897 |
dc11cc17aee754089dc4fb18a3e6534b5f45cf92 | 1,724 | py | Python | 2015/07.py | Valokoodari/advent-of-code | c664987f739e0b07ddad34bad87d56768556a5a5 | [
"MIT"
] | 2 | 2021-12-27T18:59:11.000Z | 2022-01-10T02:31:36.000Z | 2015/07.py | Valokoodari/advent-of-code-2019 | c664987f739e0b07ddad34bad87d56768556a5a5 | [
"MIT"
] | null | null | null | 2015/07.py | Valokoodari/advent-of-code-2019 | c664987f739e0b07ddad34bad87d56768556a5a5 | [
"MIT"
] | 2 | 2021-12-23T17:29:10.000Z | 2021-12-24T03:21:49.000Z | #!/usr/bin/python3
lines = open("inputs/07.in", "r").readlines()
for i,line in enumerate(lines):
lines[i] = line.split("\n")[0]
l = lines.copy();
wires = {}
run()
print("Part 1: " + str(wires["a"]))
lines = l
wires = {"b": wires["a"]}
run()
print("Part 2: " + str(wires["a"]))
| 23.297297 | 53 | 0.487239 |
dc1360cdb290733689a5e8387a3d39ce467c6a9c | 1,659 | py | Python | soccer_embedded/Development/Ethernet/lwip-rtos-config/test_udp_echo.py | ghsecuritylab/soccer_ws | 60600fb826c06362182ebff00f3031e87ac45f7c | [
"BSD-3-Clause"
] | 56 | 2016-12-25T22:29:00.000Z | 2022-01-06T04:42:00.000Z | soccer_embedded/Development/Ethernet/lwip-rtos-config/test_udp_echo.py | ghsecuritylab/soccer_ws | 60600fb826c06362182ebff00f3031e87ac45f7c | [
"BSD-3-Clause"
] | 244 | 2021-04-05T03:22:25.000Z | 2022-03-31T16:47:36.000Z | soccer_embedded/Development/Ethernet/lwip-rtos-config/test_udp_echo.py | ghsecuritylab/soccer_ws | 60600fb826c06362182ebff00f3031e87ac45f7c | [
"BSD-3-Clause"
] | 7 | 2017-01-24T23:38:07.000Z | 2022-01-19T16:58:08.000Z | import socket
import time
import numpy
# This script sends a message to the board, at IP address and port given by
# server_address, using User Datagram Protocol (UDP). The board should be
# programmed to echo back UDP packets sent to it. The time taken for num_samples
# echoes is measured.
# Create a UDP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_address = ('192.168.0.59', 7)
sock.bind(('', 7))
message = 'this is a message of length 80 chars. asdfghjklasdfghjklasdfghjklasdfghjkl ++++'.encode()
num_samples = 500
times = []
try:
# Send data
print('Sending "{}"'.format(message))
print('Measuring time taken for {} echoes'.format(num_samples))
total_time = 0
for i in range(num_samples):
t0 = time.perf_counter()
sent = sock.sendto(message, server_address)
# Receive response
data, server = sock.recvfrom(4096)
t1 = time.perf_counter()
dt = t1 - t0
total_time += dt
#print('received "{}"'.format(data))
times.append(dt)
f = open('times', 'a')
try:
f.write('\n')
for i in range(num_samples):
f.write('{},'.format(times[i]))
finally:
f.close()
times_array = numpy.array(times)
print('Took {} seconds for {} samples'.format(total_time, num_samples))
print('Average echo time: {} seconds'.format(numpy.average(times_array)))
print('Standard deviation: {} seconds'.format(numpy.std(times_array)))
print('Maximum: {} seconds, Minimum: {} seconds'.format(numpy.amax(times_array), numpy.amin(times_array)))
finally:
print('Closing socket')
sock.close()
| 27.65 | 110 | 0.650995 |
dc140fb927ee173544f8803200f7806b0546c054 | 16,058 | py | Python | test.py | keke185321/emotions | f7cef86c20880b99469c9a35b071d6062e56ac40 | [
"MIT"
] | 58 | 2017-04-04T18:59:36.000Z | 2022-02-16T14:54:09.000Z | test.py | keke185321/emotions | f7cef86c20880b99469c9a35b071d6062e56ac40 | [
"MIT"
] | 4 | 2017-06-28T13:56:04.000Z | 2021-07-02T03:42:21.000Z | test.py | keke185321/emotions | f7cef86c20880b99469c9a35b071d6062e56ac40 | [
"MIT"
] | 26 | 2017-08-22T14:41:28.000Z | 2022-03-08T05:41:03.000Z | #!/usr/bin/env python
#
# This file is part of the Emotions project. The complete source code is
# available at https://github.com/luigivieira/emotions.
#
# Copyright (c) 2016-2017, Luiz Carlos Vieira (http://www.luiz.vieira.nom.br)
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import argparse
import cv2
import numpy as np
from collections import OrderedDict
from datetime import datetime, timedelta
from faces import FaceDetector
from data import FaceData
from gabor import GaborBank
from emotions import EmotionsDetector
#---------------------------------------------
#---------------------------------------------
def main(argv):
"""
Main entry of this script.
Parameters
------
argv: list of str
Arguments received from the command line.
"""
# Parse the command line
args = parseCommandLine(argv)
# Loads the video or starts the webcam
if args.source == 'cam':
video = cv2.VideoCapture(args.id)
if not video.isOpened():
print('Error opening webcam of id {}'.format(args.id))
sys.exit(-1)
fps = 0
frameCount = 0
sourceName = 'Webcam #{}'.format(args.id)
else:
video = cv2.VideoCapture(args.file)
if not video.isOpened():
print('Error opening video file {}'.format(args.file))
sys.exit(-1)
fps = int(video.get(cv2.CAP_PROP_FPS))
frameCount = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
sourceName = args.file
# Force HD resolution (if the video was not recorded in this resolution or
# if the camera does not support it, the frames will be stretched to fit it)
# The intention is just to standardize the input (and make the help window
# work as intended)
video.set(cv2.CAP_PROP_FRAME_WIDTH, 1280);
video.set(cv2.CAP_PROP_FRAME_HEIGHT, 720);
# Create the helper class
data = VideoData()
# Text settings
font = cv2.FONT_HERSHEY_SIMPLEX
scale = 1
thick = 1
glow = 3 * thick
# Color settings
color = (255, 255, 255)
paused = False
frameNum = 0
# Process the video input
while True:
if not paused:
start = datetime.now()
ret, img = video.read()
if ret:
frame = img.copy()
else:
paused = True
drawInfo(frame, frameNum, frameCount, paused, fps, args.source)
data.detect(frame)
data.draw(frame)
cv2.imshow(sourceName, frame)
if paused:
key = cv2.waitKey(0)
else:
end = datetime.now()
delta = (end - start)
if fps != 0:
delay = int(max(1, ((1 / fps) - delta.total_seconds()) * 1000))
else:
delay = 1
key = cv2.waitKey(delay)
if key == ord('q') or key == ord('Q') or key == 27:
break
elif key == ord('p') or key == ord('P'):
paused = not paused
elif args.source == 'video' and (key == ord('r') or key == ord('R')):
frameNum = 0
video.set(cv2.CAP_PROP_POS_FRAMES, frameNum)
elif args.source == 'video' and paused and key == 2424832: # Left key
frameNum -= 1
if frameNum < 0:
frameNum = 0
video.set(cv2.CAP_PROP_POS_FRAMES, frameNum)
elif args.source == 'video' and paused and key == 2555904: # Right key
frameNum += 1
if frameNum >= frameCount:
frameNum = frameCount - 1
elif args.source == 'video' and key == 2162688: # Pageup key
frameNum -= (fps * 10)
if frameNum < 0:
frameNum = 0
video.set(cv2.CAP_PROP_POS_FRAMES, frameNum)
elif args.source == 'video' and key == 2228224: # Pagedown key
frameNum += (fps * 10)
if frameNum >= frameCount:
frameNum = frameCount - 1
video.set(cv2.CAP_PROP_POS_FRAMES, frameNum)
elif key == 7340032: # F1
showHelp(sourceName, frame.shape)
if not paused:
frameNum += 1
video.release()
cv2.destroyAllWindows()
#---------------------------------------------
def drawInfo(frame, frameNum, frameCount, paused, fps, source):
"""
Draws text info related to the given frame number into the frame image.
Parameters
----------
image: numpy.ndarray
Image data where to draw the text info.
frameNum: int
Number of the frame of which to drawn the text info.
frameCount: int
Number total of frames in the video.
paused: bool
Indication if the video is paused or not.
fps: int
Frame rate (in frames per second) of the video for time calculation.
source: str
Source of the input images (either "video" or "cam").
"""
# Font settings
font = cv2.FONT_HERSHEY_SIMPLEX
scale = 0.5
thick = 1
glow = 3 * thick
# Color settings
black = (0, 0, 0)
yellow = (0, 255, 255)
# Print the current frame number and timestamp
if source == 'video':
text = 'Frame: {:d}/{:d} {}'.format(frameNum, frameCount - 1,
'(paused)' if paused else '')
else:
text = 'Frame: {:d} {}'.format(frameNum, '(paused)' if paused else '')
size, _ = cv2.getTextSize(text, font, scale, thick)
x = 5
y = frame.shape[0] - 2 * size[1]
cv2.putText(frame, text, (x, y), font, scale, black, glow)
cv2.putText(frame, text, (x, y), font, scale, yellow, thick)
if source == 'video':
timestamp = datetime.min + timedelta(seconds=(frameNum / fps))
elapsedTime = datetime.strftime(timestamp, '%H:%M:%S')
timestamp = datetime.min + timedelta(seconds=(frameCount / fps))
totalTime = datetime.strftime(timestamp, '%H:%M:%S')
text = 'Time: {}/{}'.format(elapsedTime, totalTime)
size, _ = cv2.getTextSize(text, font, scale, thick)
y = frame.shape[0] - 5
cv2.putText(frame, text, (x, y), font, scale, black, glow)
cv2.putText(frame, text, (x, y), font, scale, yellow, thick)
# Print the help message
text = 'Press F1 for help'
size, _ = cv2.getTextSize(text, font, scale, thick)
x = frame.shape[1] - size[0] - 5
y = frame.shape[0] - size[1] + 5
cv2.putText(frame, text, (x, y), font, scale, black, glow)
cv2.putText(frame, text, (x, y), font, scale, yellow, thick)
#---------------------------------------------
def showHelp(windowTitle, shape):
"""
Displays an image with helping text.
Parameters
----------
windowTitle: str
Title of the window where to display the help
shape: tuple
Height and width of the window to create the help image.
"""
# Font settings
font = cv2.FONT_HERSHEY_SIMPLEX
scale = 1.0
thick = 1
# Color settings
black = (0, 0, 0)
red = (0, 0, 255)
# Create the background image
image = np.ones((shape[0], shape[1], 3)) * 255
# The help text is printed in one line per item in this list
helpText = [
'Controls:',
'-----------------------------------------------',
'[q] or [ESC]: quits from the application.',
'[p]: toggles paused/playing the video/webcam input.',
'[r]: restarts the video playback (video input only).',
'[left/right arrow]: displays the previous/next frame (video input only).',
'[page-up/down]: rewinds/fast forwards by 10 seconds (video input only).',
' ',
' ',
'Press any key to close this window...'
]
# Print the controls help text
xCenter = image.shape[1] // 2
yCenter = image.shape[0] // 2
margin = 20 # between-lines margin in pixels
textWidth = 0
textHeight = margin * (len(helpText) - 1)
lineHeight = 0
for line in helpText:
size, _ = cv2.getTextSize(line, font, scale, thick)
textHeight += size[1]
textWidth = size[0] if size[0] > textWidth else textWidth
lineHeight = size[1] if size[1] > lineHeight else lineHeight
x = xCenter - textWidth // 2
y = yCenter - textHeight // 2
for line in helpText:
cv2.putText(image, line, (x, y), font, scale, black, thick * 3)
cv2.putText(image, line, (x, y), font, scale, red, thick)
y += margin + lineHeight
# Show the image and wait for a key press
cv2.imshow(windowTitle, image)
cv2.waitKey(0)
#---------------------------------------------
def parseCommandLine(argv):
"""
Parse the command line of this utility application.
This function uses the argparse package to handle the command line
arguments. In case of command line errors, the application will be
automatically terminated.
Parameters
------
argv: list of str
Arguments received from the command line.
Returns
------
object
Object with the parsed arguments as attributes (refer to the
documentation of the argparse package for details)
"""
parser = argparse.ArgumentParser(description='Tests the face and emotion '
'detector on a video file input.')
parser.add_argument('source', nargs='?', const='Yes',
choices=['video', 'cam'], default='cam',
help='Indicate the source of the input images for '
'the detectors: "video" for a video file or '
'"cam" for a webcam. The default is "cam".')
parser.add_argument('-f', '--file', metavar='<name>',
help='Name of the video file to use, if the source is '
'"video". The supported formats depend on the codecs '
'installed in the operating system.')
parser.add_argument('-i', '--id', metavar='<number>', default=0, type=int,
help='Numerical id of the webcam to use, if the source '
'is "cam". The default is 0.')
args = parser.parse_args()
if args.source == 'video' and args.file is None:
parser.error('-f is required when source is "video"')
return args
#---------------------------------------------
# namespace verification for invoking main
#---------------------------------------------
if __name__ == '__main__':
main(sys.argv[1:]) | 31.924453 | 80 | 0.5482 |
dc1410a8579c40952f7be96924032fe936ce5616 | 56 | py | Python | konform/cmd.py | openanalytics/konform | 8691575ec94e753987bf4748ac279b1510b6e04a | [
"Apache-2.0"
] | 7 | 2021-02-23T12:08:01.000Z | 2022-03-12T01:52:35.000Z | konform/cmd.py | openanalytics/konform | 8691575ec94e753987bf4748ac279b1510b6e04a | [
"Apache-2.0"
] | 1 | 2022-03-11T21:53:18.000Z | 2022-03-11T21:53:18.000Z | konform/cmd.py | openanalytics/konform | 8691575ec94e753987bf4748ac279b1510b6e04a | [
"Apache-2.0"
] | 1 | 2021-05-07T20:13:30.000Z | 2021-05-07T20:13:30.000Z | from . import Konform
| 9.333333 | 21 | 0.607143 |
dc1615d2555d04af3309f9652b1529186785aefa | 1,711 | py | Python | ichnaea/taskapp/app.py | mikiec84/ichnaea | ec223cefb788bb921c0e7f5f51bd3b20eae29edd | [
"Apache-2.0"
] | 348 | 2015-01-13T11:48:07.000Z | 2022-03-31T08:33:07.000Z | ichnaea/taskapp/app.py | mikiec84/ichnaea | ec223cefb788bb921c0e7f5f51bd3b20eae29edd | [
"Apache-2.0"
] | 1,274 | 2015-01-02T18:15:56.000Z | 2022-03-23T15:29:08.000Z | ichnaea/taskapp/app.py | mikiec84/ichnaea | ec223cefb788bb921c0e7f5f51bd3b20eae29edd | [
"Apache-2.0"
] | 149 | 2015-01-04T21:15:07.000Z | 2021-12-10T06:05:09.000Z | """
Holds global celery application state and startup / shutdown handlers.
"""
from celery import Celery
from celery.app import app_or_default
from celery.signals import (
beat_init,
worker_process_init,
worker_process_shutdown,
setup_logging,
)
from ichnaea.log import configure_logging
from ichnaea.taskapp.config import (
configure_celery,
init_beat,
init_worker,
shutdown_worker,
)
celery_app = Celery("ichnaea.taskapp.app")
configure_celery(celery_app)
| 24.442857 | 74 | 0.733489 |
dc16a13d387c0b0bc002823fb7755299735633f4 | 1,771 | py | Python | gmqtt/storage.py | sabuhish/gmqtt | b88aaaaa88b0d8eb1e2757a327060298524a976a | [
"MIT"
] | null | null | null | gmqtt/storage.py | sabuhish/gmqtt | b88aaaaa88b0d8eb1e2757a327060298524a976a | [
"MIT"
] | null | null | null | gmqtt/storage.py | sabuhish/gmqtt | b88aaaaa88b0d8eb1e2757a327060298524a976a | [
"MIT"
] | null | null | null | import asyncio
from typing import Tuple
import heapq
| 29.032787 | 73 | 0.648786 |
dc16d9cdd8796257d1bb841212fc202433a9eade | 10,638 | py | Python | test/testframework/runner.py | 5GExchange/escape | eb35d460597a0386b18dd5b6a5f62a3f30eed5fa | [
"Apache-2.0"
] | 10 | 2016-11-16T16:26:16.000Z | 2021-04-26T17:20:28.000Z | test/testframework/runner.py | 5GExchange/escape | eb35d460597a0386b18dd5b6a5f62a3f30eed5fa | [
"Apache-2.0"
] | 3 | 2017-04-20T11:29:17.000Z | 2017-11-06T17:12:12.000Z | test/testframework/runner.py | 5GExchange/escape | eb35d460597a0386b18dd5b6a5f62a3f30eed5fa | [
"Apache-2.0"
] | 10 | 2017-03-27T13:58:52.000Z | 2020-06-24T22:42:51.000Z | # Copyright 2017 Lajos Gerecs, Janos Czentye
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib
import logging
import os
import sys
import threading
from collections import Iterable
import pexpect
import yaml
from yaml.error import YAMLError
log = logging.getLogger()
def kill_process (self):
"""
Kill the process and call the optional hook function.
"""
log.debug("Kill process...")
self.stop()
self.__killed = True
if self.is_alive:
self._process.terminate(force=True)
def stop (self):
"""
Stop the process.
:return: None
"""
log.debug("Terminate program under test: %s" % self)
if self._process:
self._process.sendcontrol('c')
if self.is_alive:
self._process.terminate()
def get_process_output_stream (self):
"""
:return: Return with the process buffer.
"""
return self._process.before if self._process.before else ""
class ESCAPECommandRunner(CommandRunner):
"""
Extended CommandRunner class for ESCAPE.
Use threading.Event for signalling ESCAPE is up.
"""
ESC_PARAM_QUIT = "--quit"
ESC_PARAM_SERVICE = "--service"
def execute (self, wait_for_up=True):
"""
Create and start the process. Block until the process ends or timeout is
exceeded.
"""
log.debug("\nStart program under test...")
log.debug(self._command)
try:
self._process = pexpect.spawn(self._command[0],
args=self._command[1:],
timeout=self.kill_timeout,
cwd=self._cwd,
logfile=self.output_stream)
if wait_for_up:
self._process.expect(pattern="ESCAPEv2 is up")
self.__ready.set()
self._process.expect(pexpect.EOF)
return self
except pexpect.TIMEOUT:
log.debug("Process running timeout(%ss) is exceeded!" % self.kill_timeout)
self.kill_process()
self.timeouted = True
except pexpect.ExceptionPexpect as e:
log.error("Got unexpected error:\n%s" % e.message)
log.debug("\n\nError details:\n%s" % self._process.before)
self.kill_process()
def test (self, timeout=CommandRunner.KILL_TIMEOUT):
"""
Start a presumably simple process and test if the process is executed
successfully within the timeout interval or been killed.
:param timeout: use the given timeout instead of the default kill timeout
:type timeout: int
:return: the process is stopped successfully
:rtype: bool
"""
try:
proc = pexpect.spawn(self._command[0],
args=self._command[1:],
cwd=self._cwd,
timeout=timeout)
proc.expect(pexpect.EOF)
return True
except pexpect.ExceptionPexpect:
return False
class RunnableTestCaseInfo(object):
"""
Container class for storing the relevant information and config values of a
test case.
"""
CONFIG_FILE_NAME = "test-config.yaml"
CONFIG_CONTAINER_NAME = "test"
RUNNER_SCRIPT_NAME = "run.sh"
README_FILE_NAME = "README.txt"
def readme (self):
"""
:return: load the README file
:rtype: str
"""
with open(os.path.join(self.full_testcase_path,
self.README_FILE_NAME)) as f:
readme = f.read()
return readme if readme else ""
def load_test_case_class (self):
"""
:return: Return the TestCase class and it's parameters defined in the
test case config file
:rtype: tuple(object, dict)
"""
test_args = {}
try:
with open(self.config_file_name, 'r') as f:
config = yaml.safe_load(f)
except (IOError, YAMLError) as e:
log.error("Failed to load configuration file: %s" % e)
return None
if self.CONFIG_CONTAINER_NAME in config:
test_args = copy.copy(config[self.CONFIG_CONTAINER_NAME])
try:
m = test_args.pop('module')
c = test_args.pop('class')
return getattr(importlib.import_module(m), c), test_args
except (KeyError, ImportError):
pass
return None, test_args
| 28.142857 | 80 | 0.650498 |
dc1774c173332a4ec6c00f25e59d94cce3123021 | 868 | py | Python | Calliope/13 Clock/Clock.py | frankyhub/Python | 323ef1399efcbc24ddc66ad069ff99b4999fff38 | [
"MIT"
] | null | null | null | Calliope/13 Clock/Clock.py | frankyhub/Python | 323ef1399efcbc24ddc66ad069ff99b4999fff38 | [
"MIT"
] | null | null | null | Calliope/13 Clock/Clock.py | frankyhub/Python | 323ef1399efcbc24ddc66ad069ff99b4999fff38 | [
"MIT"
] | null | null | null | from microbit import *
hands = Image.ALL_CLOCKS
#A centre dot of brightness 2.
ticker_image = Image("2\n").crop(-2,-2,5,5)
#Adjust these to taste
MINUTE_BRIGHT = 0.1111
HOUR_BRIGHT = 0.55555
#Generate hands for 5 minute intervals
#Generate hands with ticker superimposed for 1 minute intervals.
#Run a clock speeded up 60 times, so we can watch the animation.
for tick in ticks():
display.show(tick)
sleep(200) | 24.8 | 71 | 0.624424 |
dc18cde3ecea098343bc73407dcfa2ce64cc68f5 | 528 | py | Python | home/kakadu31/sabertooth.py | rv8flyboy/pyrobotlab | 4e04fb751614a5cb6044ea15dcfcf885db8be65a | [
"Apache-2.0"
] | 63 | 2015-02-03T18:49:43.000Z | 2022-03-29T03:52:24.000Z | home/kakadu31/sabertooth.py | hirwaHenryChristian/pyrobotlab | 2debb381fc2db4be1e7ea6e5252a50ae0de6f4a9 | [
"Apache-2.0"
] | 16 | 2016-01-26T19:13:29.000Z | 2018-11-25T21:20:51.000Z | home/kakadu31/sabertooth.py | hirwaHenryChristian/pyrobotlab | 2debb381fc2db4be1e7ea6e5252a50ae0de6f4a9 | [
"Apache-2.0"
] | 151 | 2015-01-03T18:55:54.000Z | 2022-03-04T07:04:23.000Z | #Variables
#Working with build 2234
saberPort = "/dev/ttyUSB0"
#Initializing Motorcontroller
saber = Runtime.start("saber", "Sabertooth")
saber.connect(saberPort)
sleep(1)
#Initializing Joystick
joystick = Runtime.start("joystick","Joystick")
print(joystick.getControllers())
python.subscribe("joystick","publishJoystickInput")
joystick.setController(0)
for x in range(0,100):
print("power", x)
saber.driveForwardMotor1(x)
sleep(0.5)
for x in range(100,-1,-1):
print("power", x)
saber.driveForwardMotor1(x)
sleep(0.5)
| 21.12 | 51 | 0.751894 |
dc19222afbe13a4d5207f36ba7d56c249b5d6019 | 4,542 | py | Python | Dangerous/Weevely/core/backdoor.py | JeyZeta/Dangerous- | 824ea6b571eda98bb855f176361e9b35dfda578e | [
"MIT"
] | null | null | null | Dangerous/Weevely/core/backdoor.py | JeyZeta/Dangerous- | 824ea6b571eda98bb855f176361e9b35dfda578e | [
"MIT"
] | null | null | null | Dangerous/Weevely/core/backdoor.py | JeyZeta/Dangerous- | 824ea6b571eda98bb855f176361e9b35dfda578e | [
"MIT"
] | 1 | 2018-07-04T18:35:16.000Z | 2018-07-04T18:35:16.000Z | # -*- coding: utf-8 -*-
# This file is part of Weevely NG.
#
# Copyright(c) 2011-2012 Weevely Developers
# http://code.google.com/p/weevely/
#
# This file may be licensed under the terms of of the
# GNU General Public License Version 2 (the ``GPL'').
#
# Software distributed under the License is distributed
# on an ``AS IS'' basis, WITHOUT WARRANTY OF ANY KIND, either
# express or implied. See the GPL for the specific language
# governing rights and limitations.
#
# You should have received a copy of the GPL along with this
# program. If not, go to http://www.gnu.org/licenses/gpl.html
# or write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import base64, codecs
from random import random, randrange, choice, shuffle
from pollution import pollute_with_static_str
from core.utils import randstr
from core.moduleexception import ModuleException
from string import Template, ascii_letters, digits
PERMITTED_CHARS = ascii_letters + digits + '_.~'
WARN_SHORT_PWD = 'Invalid password, use words longer than 3 characters'
WARN_CHARS = 'Invalid password, password permitted chars are \'%s\'' % PERMITTED_CHARS
| 34.409091 | 137 | 0.674373 |
dc19c0faf717f2a11500ab0d47cd0b71aa1f7557 | 4,638 | py | Python | musicscore/musicxml/types/complextypes/notations.py | alexgorji/music_score | b4176da52295361f3436826903485c5cb8054c5e | [
"MIT"
] | 2 | 2020-06-22T13:33:28.000Z | 2020-12-30T15:09:00.000Z | musicscore/musicxml/types/complextypes/notations.py | alexgorji/music_score | b4176da52295361f3436826903485c5cb8054c5e | [
"MIT"
] | 37 | 2020-02-18T12:15:00.000Z | 2021-12-13T20:01:14.000Z | musicscore/musicxml/types/complextypes/notations.py | alexgorji/music_score | b4176da52295361f3436826903485c5cb8054c5e | [
"MIT"
] | null | null | null | from musicscore.dtd.dtd import Sequence, GroupReference, Choice, Element
from musicscore.musicxml.attributes.optional_unique_id import OptionalUniqueId
from musicscore.musicxml.attributes.printobject import PrintObject
from musicscore.musicxml.groups.common import Editorial
from musicscore.musicxml.elements.xml_element import XMLElement
from musicscore.musicxml.types.complextypes.arpeggiate import ComplexTypeArpeggiate
from musicscore.musicxml.types.complextypes.articulations import ComplexTypeArticulations
from musicscore.musicxml.types.complextypes.complextype import ComplexType
from musicscore.musicxml.types.complextypes.dynamics import Dynamics
from musicscore.musicxml.types.complextypes.fermata import ComplexTypeFermata
from musicscore.musicxml.types.complextypes.ornaments import ComplexTypeOrnaments
from musicscore.musicxml.types.complextypes.slide import ComplexTypeSlide
from musicscore.musicxml.types.complextypes.slur import ComplexTypeSlur
from musicscore.musicxml.types.complextypes.technical import ComplexTypeTechnical
from musicscore.musicxml.types.complextypes.tied import ComplexTypeTied
from musicscore.musicxml.types.complextypes.tuplet import ComplexTypeTuplet
| 30.715232 | 118 | 0.684994 |
904fd225f8fe0b9727c74b7b31cf0eb0c1430fbd | 794 | py | Python | src/constants.py | MitraSeifari/pystackoverflow | 70da1c6a8407df34496fe9843e8ae7f4c15aac0e | [
"MIT"
] | null | null | null | src/constants.py | MitraSeifari/pystackoverflow | 70da1c6a8407df34496fe9843e8ae7f4c15aac0e | [
"MIT"
] | null | null | null | src/constants.py | MitraSeifari/pystackoverflow | 70da1c6a8407df34496fe9843e8ae7f4c15aac0e | [
"MIT"
] | null | null | null | from types import SimpleNamespace
from src.utils.keyboard import create_keyboard
keys = SimpleNamespace(
settings=':gear: Settings',
cancel=':cross_mark: Cancel',
back=':arrow_left: Back',
next=':arrow_right: Next',
add=':heavy_plus_sign: Add',
edit=':pencil: Edit',
save=':check_mark_button: Save',
delete=':wastebasket: Delete',
yes=':white_check_mark: Yes',
no=':negetive_squared_cross_mark: No',
ask_question=':red_question_mark: Ask a question',
send_question=':envelope_with_arrow: Send question',
)
keyboards = SimpleNamespace(
main=create_keyboard(keys.ask_question, keys.settings),
ask_question=create_keyboard(keys.cancel, keys.send_question),
)
states = SimpleNamespace(
main='MAIN',
ask_question='ASK_QUESTION'
)
| 26.466667 | 66 | 0.715365 |
9051a1c1088095b37931ffbb5f87a6219186207b | 456 | py | Python | iirsBenchmark/exceptions.py | gAldeia/iirsBenchmark | 2211b4755405eb32178a09f1a01143d53dc6516d | [
"BSD-3-Clause"
] | null | null | null | iirsBenchmark/exceptions.py | gAldeia/iirsBenchmark | 2211b4755405eb32178a09f1a01143d53dc6516d | [
"BSD-3-Clause"
] | null | null | null | iirsBenchmark/exceptions.py | gAldeia/iirsBenchmark | 2211b4755405eb32178a09f1a01143d53dc6516d | [
"BSD-3-Clause"
] | null | null | null | # Author: Guilherme Aldeia
# Contact: guilherme.aldeia@ufabc.edu.br
# Version: 1.0.0
# Last modified: 08-20-2021 by Guilherme Aldeia
"""
Simple exception that is raised by explainers when they don't support local
or global explanations, or when they are not model agnostic. This should be
catched and handled in the experiments.
"""
| 32.571429 | 76 | 0.730263 |
90534359708ff8911197cad1bfec21d46c458905 | 1,302 | py | Python | covid_data_tracker/util.py | granularai/gh5050_covid_data_tracker | 7af3013ad9142a20cf42963e39c8968081cec7db | [
"MIT"
] | null | null | null | covid_data_tracker/util.py | granularai/gh5050_covid_data_tracker | 7af3013ad9142a20cf42963e39c8968081cec7db | [
"MIT"
] | 51 | 2020-05-31T17:36:37.000Z | 2020-06-24T05:23:19.000Z | covid_data_tracker/util.py | granularai/gh5050_covid_data_tracker | 7af3013ad9142a20cf42963e39c8968081cec7db | [
"MIT"
] | 1 | 2020-06-11T19:35:41.000Z | 2020-06-11T19:35:41.000Z | import click
from covid_data_tracker.registry import PluginRegistry
def plugin_selector(selected_country: str):
"""plugin selector uses COUNTRY_MAP to find the appropriate plugin
for a given country.
Parameters
----------
selected_country : str
specify the country of interest.
Returns
-------
covid_data_tracker.plugins.BasePlugin
More appropriately, returns an instance of a country-specific
subclass of BasePlugin.
"""
if selected_country in PluginRegistry.keys():
klass = PluginRegistry[selected_country]
instance = klass()
else:
raise AttributeError
click.echo('No country plugin available')
return instance
def country_downloader(country: str):
"""Finds country plugin, fetches data, and downloads
to csv with click alerts.
Parameters
----------
country : str
Name of country
Returns
-------
NoneType
"""
click.echo(f"selecting plugin for {country}")
country_plugin = plugin_selector(country)
click.echo(f"attempting to find available data for {country}")
country_plugin.fetch()
click.echo(f"downloading available data for {country}")
country_plugin.check_instance_attributes()
country_plugin.download()
| 25.529412 | 70 | 0.675115 |
90541de92a1d97d772f070e495cb4dccfca0eef7 | 1,416 | py | Python | dev/libs.py | karimwitani/webscraping | 58d4b2587d039fcea567db2caf86bbddb4e0b96f | [
"MIT"
] | null | null | null | dev/libs.py | karimwitani/webscraping | 58d4b2587d039fcea567db2caf86bbddb4e0b96f | [
"MIT"
] | null | null | null | dev/libs.py | karimwitani/webscraping | 58d4b2587d039fcea567db2caf86bbddb4e0b96f | [
"MIT"
] | null | null | null | import selenium
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
print("everything ok") | 40.457143 | 136 | 0.738701 |
905515ca4421e0d997a1e7e93a11455f5f918cff | 380 | py | Python | setup.py | dwastberg/osmuf | 0cef4e87401b3fc2d344d7e067b4d9ada25848a4 | [
"MIT"
] | null | null | null | setup.py | dwastberg/osmuf | 0cef4e87401b3fc2d344d7e067b4d9ada25848a4 | [
"MIT"
] | null | null | null | setup.py | dwastberg/osmuf | 0cef4e87401b3fc2d344d7e067b4d9ada25848a4 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(name='osmuf',
version='0.1',
install_requires=[
"seaborn",
],
description='Urban Form analysis from OpenStreetMap',
url='http://github.com/atelierlibre/osmuf',
author='AtelierLibre',
author_email='mail@atelierlibre.org',
license='MIT',
packages=['osmuf'],
zip_safe=False)
| 25.333333 | 59 | 0.615789 |
905714b59b0d263f8c19b411a33bd80163e9bbb7 | 1,813 | py | Python | tests/test_model.py | artemudovyk/django-updown | 0353cf8ec5c50b4ffd869a56f51ede65b6368ef8 | [
"BSD-2-Clause"
] | 41 | 2015-01-07T07:43:33.000Z | 2020-09-23T04:35:09.000Z | tests/test_model.py | artemudovyk/django-updown | 0353cf8ec5c50b4ffd869a56f51ede65b6368ef8 | [
"BSD-2-Clause"
] | 20 | 2015-01-28T21:02:56.000Z | 2018-08-14T13:39:31.000Z | tests/test_model.py | artemudovyk/django-updown | 0353cf8ec5c50b4ffd869a56f51ede65b6368ef8 | [
"BSD-2-Clause"
] | 19 | 2015-01-06T12:50:05.000Z | 2022-01-21T17:01:56.000Z | # -*- coding: utf-8 -*-
"""
tests.test_model
~~~~~~~~~~~~~~~~
Tests the models provided by the updown rating app
:copyright: 2016, weluse (https://weluse.de)
:author: 2016, Daniel Banck <dbanck@weluse.de>
:license: BSD, see LICENSE for more details.
"""
from __future__ import unicode_literals
import random
from django.test import TestCase
from django.contrib.auth.models import User
from updown.models import SCORE_TYPES
from updown.exceptions import CannotChangeVote
from tests.models import RatingTestModel
| 31.258621 | 75 | 0.629344 |
90571fc1423b9d2a5a71dbb91569f10170f5532e | 5,179 | py | Python | nlptk/ratings/rake/rake.py | GarryGaller/nlp_toolkit | df98ee25f8a1f4379e751fdd4fd9f5389ffbfd1b | [
"MIT"
] | null | null | null | nlptk/ratings/rake/rake.py | GarryGaller/nlp_toolkit | df98ee25f8a1f4379e751fdd4fd9f5389ffbfd1b | [
"MIT"
] | null | null | null | nlptk/ratings/rake/rake.py | GarryGaller/nlp_toolkit | df98ee25f8a1f4379e751fdd4fd9f5389ffbfd1b | [
"MIT"
] | null | null | null | import sys,os
from typing import List
from collections import defaultdict, Counter
from itertools import groupby, chain, product
import heapq
from pprint import pprint
import string
| 30.827381 | 85 | 0.519598 |
90572919b03e5c9195f95e3b9733b72ece7106bb | 5,623 | py | Python | depimpact/tests/test_functions.py | NazBen/dep-impact | 284e72bccfb6309110df5191dfae3c0a93ce813b | [
"MIT"
] | null | null | null | depimpact/tests/test_functions.py | NazBen/dep-impact | 284e72bccfb6309110df5191dfae3c0a93ce813b | [
"MIT"
] | null | null | null | depimpact/tests/test_functions.py | NazBen/dep-impact | 284e72bccfb6309110df5191dfae3c0a93ce813b | [
"MIT"
] | null | null | null | import numpy as np
import openturns as ot
def func_overflow(X, model=1, h_power=0.6):
"""Overflow model function.
Parameters
----------
X : np.ndarray, shape : N x 8
Input variables
- x1 : Flow,
- x2 : Krisler Coefficient,
- x3 : Zv, etc...
model : bool, optional(default=1)
If 1, the classical model. If 2, the economic model.
Returns
-------
Overflow S (if model=1) or Cost Cp (if model=2).
"""
X = np.asarray(X)
if X.shape[0] == X.size: # It's a vector
n = 1
dim = X.size
ids = None
else:
n, dim = X.shape
ids = range(n)
assert dim == 8, "Incorect dimension : dim = %d != 8" % dim
Q = X[ids, 0]
Ks = X[ids, 1]
Zv = X[ids, 2]
Zm = X[ids, 3]
Hd = X[ids, 4]
Cb = X[ids, 5]
L = X[ids, 6]
B = X[ids, 7]
H = (Q / (B * Ks * np.sqrt((Zm - Zv) / L)))**h_power
S = Zv + H - Hd - Cb
if model == 1:
return S
elif model == 2:
Cp = (S > 0.) + (0.2 + 0.8 * (1. - np.exp(-1000. / (S**4)))) * (S <= 0.) + 1./20. * (Hd * (Hd > 8.) + 8*(Hd <= 8.))
return Cp
else:
raise AttributeError('Unknow model.')
tmp = ot.Gumbel()
tmp.setParameter(ot.GumbelMuSigma()([1013., 558.]))
dist_Q = ot.TruncatedDistribution(tmp, 500., 3000.)
dist_Ks = ot.TruncatedNormal(30., 8., 15., np.inf)
dist_Zv = ot.Triangular(49., 50., 51.)
dist_Zm = ot.Triangular(54., 55., 56.)
dist_Hd = ot.Uniform(7., 9.)
dist_Cb = ot.Triangular(55., 55.5, 56.)
dist_L = ot.Triangular(4990., 5000., 5010.)
dist_B = ot.Triangular(295., 300., 305.)
margins_overflow = [dist_Q, dist_Ks, dist_Zv, dist_Zm, dist_Hd, dist_Cb, dist_L, dist_B]
var_names_overflow = ["Q", "K_s", "Z_v", "Z_m", "H_d", "C_b", "L", "B"]
def func_sum(x, a=None):
"""Additive weighted model function.
Parameters
----------
x : np.ndarray
The input values.
a : np.ndarray
The input coefficients.
Returns
-------
y : a.x^t
"""
if isinstance(x, list):
x = np.asarray(x)
n, dim = x.shape
if a is None:
a = np.ones((dim, 1))
if a.ndim == 1:
a = a.reshape(-1, 1)
assert a.shape[0] == dim, "Shape not good"
elif a.ndim > 2:
raise AttributeError('Dimension problem for constant a')
y = np.dot(x, a)
if y.size == 1:
return y.item()
elif y.size == y.shape[0]:
return y.ravel()
else:
return y
def func_prod(x, a=None):
"""Product weighted model function.
Parameters
----------
x : np.ndarray
The input values.
a : np.ndarray
The input coefficients.
Returns
-------
y : a.x^t
"""
if isinstance(x, list):
x = np.asarray(x)
n, dim = x.shape
if a is None:
a = np.ones((dim, 1))
if a.ndim == 1:
a = a.reshape(-1, 1)
assert a.shape[0] == dim, "Shape not good"
elif a.ndim > 2:
raise AttributeError('Dimension problem for constant a')
y = np.sum(x, axis=1)
if y.size == 1:
return y.item()
elif y.size == y.shape[0]:
return y.ravel()
else:
return y
def func_spec(x, a=[0.58, -1, -1.0, 0, 0., 0.]):
"""Product weighted model function.
Parameters
----------
x : np.ndarray
The input values.
a : np.ndarray
The input coefficients.
Returns
-------
y : a.x^t
"""
if isinstance(x, list):
x = np.asarray(x)
n, dim = x.shape
y = a[0]*(x**2).prod(axis=1) + \
a[1]*x.prod(axis=1) + \
a[2]*(x**2).sum(axis=1) + \
a[3] * x.sum(axis=1) + \
a[4] * np.sin(x).sum(axis=1) + \
a[5] * np.cos(x).sum(axis=1)
if y.size == 1:
return y.item()
elif y.size == y.shape[0]:
return y.ravel()
else:
return y
def func_cum_sum_weight(x, weights=None, use_sum=True, const=[0., 0., 0., 1., 0., 0.]):
"""Additive weighted model function.
Parameters
----------
x : np.ndarray
The input values.
weights : np.ndarray
The input coefficients.
Returns
-------
y : a.x^t
"""
if isinstance(x, list):
x = np.asarray(x)
n, dim = x.shape
if weights is None:
weights = np.zeros((dim, dim))
corr_dim = dim * (dim-1)/2
k = 1
for i in range(1, dim):
for j in range(i):
weights[i, j] = k
k += 1
weights /= corr_dim
if weights.ndim == 1:
weights = weights.reshape(-1, 1)
assert weights.shape[0] == dim, "Shape not good"
elif weights.ndim > 2:
raise AttributeError('Dimension problem for constant a')
if use_sum:
y = 1
for i in range(1, dim):
for j in range(i):
y *= (1. + weights[i, j] * func_spec(np.c_[x[:, i], x[:, j]], a=const))
else:
y = 0
for i in range(1, dim):
for j in range(i):
y += weights[i, j] * func_spec(np.c_[x[:, i], x[:, j]], a=const)
return y
def multi_output_func_sum(x, output_dim=2):
"""Additive model function with multi output.
Parameters
----------
x : np.ndarray
The input values.
output_dim : int
The number of output dimension.
Returns
-------
y : [i * x]
"""
return np.asarray([x.sum(axis=1)*a for a in range(output_dim)]).T | 24.554585 | 123 | 0.486395 |
9059540a6a1df436a316a8b4d0bf19c43271fcb4 | 1,699 | py | Python | app/main/forms.py | ingabire1/blog | 5fcee6027cee9fbdcd94057123862bd146a16e98 | [
"Unlicense"
] | null | null | null | app/main/forms.py | ingabire1/blog | 5fcee6027cee9fbdcd94057123862bd146a16e98 | [
"Unlicense"
] | null | null | null | app/main/forms.py | ingabire1/blog | 5fcee6027cee9fbdcd94057123862bd146a16e98 | [
"Unlicense"
] | null | null | null |
from flask_wtf import FlaskForm
from wtforms import StringField,TextAreaField,SubmitField
from wtforms.validators import Required
# class LoginForm(FlaskForm):
# email = StringField('Your Email Address',validators=[Required(),Email()])
# password = PasswordField('Password',validators =[Required()])
# remember = BooleanField('Remember me')
# submit = SubmitField('Sign In')
| 42.475 | 94 | 0.712772 |
9059c31682941520b3a9802d364d8232668dc8f3 | 3,228 | py | Python | SEPHIRA/FastAPI/main.py | dman926/Flask-API | 49e052159a3915ec25305141ecdd6cdeb1d7a25c | [
"MIT"
] | 4 | 2021-04-23T16:51:57.000Z | 2021-06-06T20:28:08.000Z | SEPHIRA/FastAPI/main.py | dman926/Flask-API | 49e052159a3915ec25305141ecdd6cdeb1d7a25c | [
"MIT"
] | 15 | 2021-10-22T01:55:53.000Z | 2022-01-15T11:40:48.000Z | SEPHIRA/FastAPI/main.py | dman926/Flask-API | 49e052159a3915ec25305141ecdd6cdeb1d7a25c | [
"MIT"
] | 3 | 2021-03-21T22:29:05.000Z | 2021-06-06T20:30:18.000Z | from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from starlette import status
from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint
from starlette.requests import Request
from starlette.responses import Response
from starlette.types import ASGIApp
from config import APISettings, CORSSettings, FastAPISettings, PayPalSettings, UvicornSettings, ShopSettings, NowPaymentsSettings
import logging
####
# Custom Middlewares #
####
####
# #
####
logging.basicConfig(filename="log.log", level=logging.INFO, format=f'%(asctime)s %(levelname)s %(name)s %(threadName)s : %(message)s')
logger = logging.getLogger(__name__)
app = FastAPI(debug=FastAPISettings.DEBUG)
app.add_middleware(
CORSMiddleware,
allow_origins=CORSSettings.ALLOW_ORIGINS,
allow_methods=['*'],
allow_headers=['*']
)
if UvicornSettings.MAX_CONTENT_SIZE:
app.add_middleware(
LimitPostContentSizeMiddleware,
max_upload_size=UvicornSettings.MAX_CONTENT_SIZE
)
if __name__== '__main__':
import uvicorn
uvicorn.run('main:app', reload=UvicornSettings.USE_RELOADER, log_level=UvicornSettings.LOG_LEVEL, port=UvicornSettings.PORT) | 32.606061 | 160 | 0.763011 |
905b8e431341e337a25074cf4f7919a71c8959b2 | 94,831 | py | Python | bio_rtd/uo/sc_uo.py | open-biotech/bio-rtd | c3e2cf4d7d646bda719e5fc6f694a1cae0e412c0 | [
"MIT"
] | 5 | 2020-03-30T13:26:12.000Z | 2021-04-02T07:10:49.000Z | bio_rtd/uo/sc_uo.py | open-biotech/bio-rtd | c3e2cf4d7d646bda719e5fc6f694a1cae0e412c0 | [
"MIT"
] | null | null | null | bio_rtd/uo/sc_uo.py | open-biotech/bio-rtd | c3e2cf4d7d646bda719e5fc6f694a1cae0e412c0 | [
"MIT"
] | 1 | 2020-06-03T07:50:56.000Z | 2020-06-03T07:50:56.000Z | """Semi continuous unit operations.
Unit operations that accept constant or box-shaped flow rate profile
and provide periodic flow rate profile.
"""
__all__ = ['AlternatingChromatography', 'ACC', 'PCC', 'PCCWithWashDesorption']
__version__ = '0.7.1'
__author__ = 'Jure Sencar'
import typing as _typing
import numpy as _np
import scipy.interpolate as _interp
from bio_rtd.chromatography import bt_load as _bt_load
import bio_rtd.utils as _utils
import bio_rtd.core as _core
import bio_rtd.pdf as _pdf
| 39.595407 | 79 | 0.571891 |
905ba6022a4c26013aa2a89c33571a5f24d93f3a | 1,640 | py | Python | src/tools/create_graphs_log.py | KatiaJDL/CenterPoly | 42811d9f5f85d9fef91a03275fe6ad113ccb163c | [
"MIT"
] | null | null | null | src/tools/create_graphs_log.py | KatiaJDL/CenterPoly | 42811d9f5f85d9fef91a03275fe6ad113ccb163c | [
"MIT"
] | null | null | null | src/tools/create_graphs_log.py | KatiaJDL/CenterPoly | 42811d9f5f85d9fef91a03275fe6ad113ccb163c | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
if __name__ == '__main__':
main()
| 24.848485 | 50 | 0.585366 |
905cb03976073d3a05d5e9b6aad19e20554ed770 | 551 | py | Python | fluree/query-generate.py | ivankoster/aioflureedb | d421391a7db1d2acaf8d39f6dfe2997e8097ade8 | [
"BSD-3-Clause"
] | 4 | 2020-09-09T14:58:10.000Z | 2021-12-04T14:11:44.000Z | fluree/query-generate.py | ivankoster/aioflureedb | d421391a7db1d2acaf8d39f6dfe2997e8097ade8 | [
"BSD-3-Clause"
] | 10 | 2020-09-15T14:05:32.000Z | 2022-01-20T11:46:07.000Z | fluree/query-generate.py | ivankoster/aioflureedb | d421391a7db1d2acaf8d39f6dfe2997e8097ade8 | [
"BSD-3-Clause"
] | 1 | 2020-12-01T10:10:00.000Z | 2020-12-01T10:10:00.000Z | #!/usr/bin/python3
import json
from aioflureedb.signing import DbSigner
privkey = "bf8a7281f43918a18a3feab41d17e84f93b064c441106cf248307d87f8a60453"
address = "1AxKSFQ387AiQUX6CuF3JiBPGwYK5XzA1A"
signer = DbSigner(privkey, address, "something/test")
free_test(signer)
| 27.55 | 76 | 0.716878 |
905d2dacd283245c26f6f827ba4beeef737df514 | 3,447 | py | Python | actions/delete_bridge_domain.py | StackStorm-Exchange/network_essentials | 99cb5a966812fb503d340c6689390dfb08c4e374 | [
"Apache-2.0"
] | 5 | 2017-02-27T23:48:10.000Z | 2020-11-12T18:55:28.000Z | actions/delete_bridge_domain.py | StackStorm-Exchange/network_essentials | 99cb5a966812fb503d340c6689390dfb08c4e374 | [
"Apache-2.0"
] | 5 | 2017-03-07T01:19:21.000Z | 2020-09-16T18:22:05.000Z | actions/delete_bridge_domain.py | StackStorm-Exchange/network_essentials | 99cb5a966812fb503d340c6689390dfb08c4e374 | [
"Apache-2.0"
] | 2 | 2017-06-20T00:52:58.000Z | 2021-01-28T17:45:48.000Z | # Copyright 2016 Brocade Communications Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from ne_base import NosDeviceAction
from ne_base import log_exceptions
import itertools
| 41.53012 | 100 | 0.607775 |
905dd4ceac49c186f37f935a9aa23bbcc3c6c3d1 | 1,182 | py | Python | python/signature.py | IUIDSL/kgap_lincs-idg | 1f781e5f34cc5d006a22b8357100dc01845a0690 | [
"CC0-1.0"
] | 4 | 2021-01-14T14:01:06.000Z | 2021-06-21T12:41:32.000Z | python/signature.py | IUIDSL/kgap_lincs-idg | 1f781e5f34cc5d006a22b8357100dc01845a0690 | [
"CC0-1.0"
] | null | null | null | python/signature.py | IUIDSL/kgap_lincs-idg | 1f781e5f34cc5d006a22b8357100dc01845a0690 | [
"CC0-1.0"
] | 1 | 2020-09-01T09:56:58.000Z | 2020-09-01T09:56:58.000Z | #!/usr/bin/env python3
###
# Based on signature.R
###
import sys,os,logging
import numpy as np
import pandas as pd
if __name__=="__main__":
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
if (len(sys.argv) < 3):
logging.error("3 file args required, LINCS sig info for GSE70138 and GSE92742, and output file.")
sys.exit(1)
fn1 = sys.argv[1] #GSE70138_Broad_LINCS_sig_info_2017-03-06.txt.gz
fn2 = sys.argv[2] #GSE92742_Broad_LINCS_sig_info.txt.gz
ofile = sys.argv[3] #signature.tsv
#
part1 = pd.read_table(fn1, "\t", na_values=["-666", "-666.0"])
logging.info(f"columns: {part1.columns}")
part1 = part1[["sig_id", "pert_id", "pert_iname", "pert_type", "cell_id", "pert_idose", "pert_itime"]]
#
part2 = pd.read_table(fn2, "\t", na_values=["-666", "-666.0"], dtype="str")
part2.pert_time = part2.pert_time.astype(np.int32)
logging.info(f"columns: {part2.columns}")
part2 = part2[["sig_id", "pert_id", "pert_iname", "pert_type", "cell_id", "pert_idose", "pert_itime"]]
#
sign = pd.concat([part1, part2])
sign.drop_duplicates(subset=["sig_id"], keep="first", inplace=True)
sign.to_csv(ofile, "\t", index=False)
| 35.818182 | 104 | 0.678511 |
905ec305866e4908924c5460c3c40007ef7a2438 | 8,289 | py | Python | HW3 - Contest Data Base/main.py | 916-Maria-Popescu/Fundamental-of-Programming | 6ddf951622bd6cfde16ede5ab6ee966cff657db2 | [
"MIT"
] | null | null | null | HW3 - Contest Data Base/main.py | 916-Maria-Popescu/Fundamental-of-Programming | 6ddf951622bd6cfde16ede5ab6ee966cff657db2 | [
"MIT"
] | null | null | null | HW3 - Contest Data Base/main.py | 916-Maria-Popescu/Fundamental-of-Programming | 6ddf951622bd6cfde16ede5ab6ee966cff657db2 | [
"MIT"
] | null | null | null | # ASSIGNMENT 3
"""
During a programming contest, each contestant had to solve 3 problems (named P1, P2 and P3).
Afterwards, an evaluation committee graded the solutions to each of the problems using integers between 0 and 10.
The committee needs a program that will allow managing the list of scores and establishing the winners.
Write a program that implements the functionalities exemplified below:
(A) Add the result of a new participant (add, insert)
(B) Modify scores (remove, remove between two postion, replace the score obtained by a certain participant at a
certain problem with other score obtained by other participant)
(C) Display participants whose score has different properties. """
def get(list, position):
""" The function will extract a certain element from a list."""
return list[int(position)]
def set(list, element, position):
""" The functin will set a certain element from a list.
:param list: [ ['2', '4', '8'], ['3', '5', '6'], ['10', '4', '6'], ['9', '3', '2'], ['10', '10', '10'] ]
:param element: ['5', '8', '9']
:param position: 1
:return: [ ['2', '4', '8'], ['5', '8', '9'], ['10', '4', '6'], ['9', '3', '2'], ['10', '10', '10']
"""
list.insert(int(position), element)
list.remove(get(list, int(position) + 1))
def make_a_list(sentence):
""" The function will make a list containing the given scores P1, P2 and P3 that are found in the command."""
list_one_score = []
for i in range(1, 4):
list_one_score.append(sentence[i])
return list_one_score
def add_scores(list, sentence):
""" The function will add to the principal list (with all the scores of all the participants) a list with the
scores of just one participant.
"""
list.append(make_a_list(sentence))
def insert_scores(list, sentence, position):
""" The function will insert in a given position to the principal list (with all the scores of all the participants)
a list with the scores of just one participant
"""
list.insert(int(position), make_a_list(sentence))
def remove_one_part(list, position):
""" The function will set the scores of the participant at a given position to 0.
So that, the participant <position> score P1=P2=P3= 0. """
nul_element = ['0', '0', '0']
set(list, nul_element, position)
def remove_more_part(list, first_position, last_position):
""" The function will set the scores of all the participants between the first position and last position to 0.
For all the participants between <first_position> and <last_position>, P1=P1=P3= 0 """
nul_element = ['0', '0', '0']
for i in range(int(first_position), int(last_position) + 1):
set(list, nul_element, i)
def replace(list, problem, new_score):
""" The function will replace a score obtained by a participant at a specific problem with a new score.
List represents the list with the scores of a participant, where <problem> ( P1/P2/P3 ) will recive a new score
"""
set(list, new_score, int(problem[1]) - 1)
def calc_average(list):
""" The function will calculate the average of all the integers from a list ( it will calculate the sum of al the
integers, and then it will divide the sum by the value of the len of tne list)
:param list: [ '2', '4', '3' ]
:return: 3
"""
result = 0
for i in range(0, len(list)):
result = result + int(get(list, i))
return result / len(list)
def average_score_lesser(list, number):
""" The function will display all the participants with an average score lesser than the given number.
:param list: [['5', '8', '9'], ['10', '4', '6'], ['9', '3', '2'], ['10', '10', '10'], ['7', '8', '9']]
:param number: 7
:return:['10', '4', '6'], ['9', '3', '2']
"""
l = [] # l is the required list
for i in range(0, len(list)):
if calc_average(get(list, i)) < number:
l.append(get(list, i))
return l
def average_score_equal(list, number):
""" The function will display all the participants with an average score equal with the given number.
:param list: [['5', '8', '9'], ['10', '4', '6'], ['9', '3', '2'], ['10', '10', '10'], ['7', '8', '9']]
:param number: 8
:return:['7', '8', '9']
"""
l = [] # l is the required list
for i in range(0, len(list)):
if calc_average(get(list, i)) == number:
l.append(get(list, i))
return l
def average_score_greater(list, number):
""" The function will return a list with all the participants with an average score greater than the given number.
:param list: [['10', '4', '6'], ['9', '3', '2'], ['10', '10', '10'], ['7', '8', '9']]
:param number: 7
:return: [['10', '10', '10'], ['7', '8', '9']]
"""
l = [] # l is the required list
for i in range(0, len(list)):
if calc_average(get(list, i)) > number:
l.append(get(list, i))
return l
def list_sorted(list):
""" The function will return a list with participants sorted in decreasing order of average score
:param list: [['5', '8', '9'], ['10', '4', '6'], ['10', '10', '10'], ['7', '8', '9'], ['10', '2', '9']]
:return: [['10', '10', '10'], , ['7', '8', '9'], ['5', '8', '9'], ['10', '2', '9'], ['10', '4', '6']]
"""
l = []
for i in range(0, len(list)):
get(list, i).insert(0, calc_average(get(list, i)))
l.append(get(list, i))
l.sort(reverse=True)
for i in range(0, len(l)):
get(l, i)
get(l, i).remove(get(get(l, i), 0))
return l
if __name__ == '__main__':
print_menu()
run_menu()
| 37.849315 | 120 | 0.583183 |
905fb1174dc9f76a043ce3432db2989539fb3eae | 1,212 | py | Python | surface/ex_surface02.py | orbingol/NURBS-Python_Examples | c99d8cd3d20e7523694ce62f72760b260582fa11 | [
"MIT"
] | 48 | 2017-12-14T09:54:48.000Z | 2020-03-30T13:34:44.000Z | surface/ex_surface02.py | GabrielJie/NURBS-Python_Examples | c99d8cd3d20e7523694ce62f72760b260582fa11 | [
"MIT"
] | 7 | 2020-05-27T04:27:24.000Z | 2021-05-25T16:11:39.000Z | surface/ex_surface02.py | GabrielJie/NURBS-Python_Examples | c99d8cd3d20e7523694ce62f72760b260582fa11 | [
"MIT"
] | 37 | 2017-10-14T08:11:11.000Z | 2020-05-04T02:51:58.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Examples for the NURBS-Python Package
Released under MIT License
Developed by Onur Rauf Bingol (c) 2016-2017
"""
import os
from geomdl import BSpline
from geomdl import utilities
from geomdl import exchange
from geomdl import operations
from geomdl.visualization import VisPlotly
# Fix file path
os.chdir(os.path.dirname(os.path.realpath(__file__)))
# Create a BSpline surface instance
surf = BSpline.Surface()
# Set degrees
surf.degree_u = 3
surf.degree_v = 3
# Set control points
surf.set_ctrlpts(*exchange.import_txt("ex_surface02.cpt", two_dimensional=True))
# Set knot vectors
surf.knotvector_u = utilities.generate_knot_vector(surf.degree_u, 6)
surf.knotvector_v = utilities.generate_knot_vector(surf.degree_v, 6)
# Set evaluation delta
surf.delta = 0.025
# Evaluate surface
surf.evaluate()
# Plot the control point grid and the evaluated surface
vis_comp = VisPlotly.VisSurface()
surf.vis = vis_comp
surf.render()
# Evaluate surface tangent and normal at the given u and v
uv = [0.2, 0.9]
surf_tangent = operations.tangent(surf, uv)
surf_normal = operations.normal(surf, uv)
# Good to have something here to put a breakpoint
pass
| 22.867925 | 80 | 0.763201 |
90600f2b374617aa571df4d29f498ce0b363ef8b | 1,380 | bzl | Python | dev/bazel/deps/micromkl.bzl | cmsxbc/oneDAL | eeb8523285907dc359c84ca4894579d5d1d9f57e | [
"Apache-2.0"
] | 169 | 2020-03-30T09:13:05.000Z | 2022-03-15T11:12:36.000Z | dev/bazel/deps/micromkl.bzl | cmsxbc/oneDAL | eeb8523285907dc359c84ca4894579d5d1d9f57e | [
"Apache-2.0"
] | 1,198 | 2020-03-24T17:26:18.000Z | 2022-03-31T08:06:15.000Z | dev/bazel/deps/micromkl.bzl | cmsxbc/oneDAL | eeb8523285907dc359c84ca4894579d5d1d9f57e | [
"Apache-2.0"
] | 75 | 2020-03-30T11:39:58.000Z | 2022-03-26T05:16:20.000Z | #===============================================================================
# Copyright 2020-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
load("@onedal//dev/bazel:repos.bzl", "repos")
micromkl_repo = repos.prebuilt_libs_repo_rule(
includes = [
"include",
"%{os}/include",
],
libs = [
"%{os}/lib/intel64/libdaal_mkl_thread.a",
"%{os}/lib/intel64/libdaal_mkl_sequential.a",
"%{os}/lib/intel64/libdaal_vmlipp_core.a",
],
build_template = "@onedal//dev/bazel/deps:micromkl.tpl.BUILD",
)
micromkl_dpc_repo = repos.prebuilt_libs_repo_rule(
includes = [
"include",
],
libs = [
"lib/intel64/libdaal_sycl.a",
],
build_template = "@onedal//dev/bazel/deps:micromkldpc.tpl.BUILD",
)
| 33.658537 | 80 | 0.603623 |
9061aefc06f55a6c43c18d036ea605173b84260a | 3,580 | py | Python | opennsa/protocols/nsi2/bindings/p2pservices.py | jmacauley/opennsa | 853c0fc8e065e74815cbc3f769939f64ac6aadeb | [
"BSD-3-Clause"
] | null | null | null | opennsa/protocols/nsi2/bindings/p2pservices.py | jmacauley/opennsa | 853c0fc8e065e74815cbc3f769939f64ac6aadeb | [
"BSD-3-Clause"
] | null | null | null | opennsa/protocols/nsi2/bindings/p2pservices.py | jmacauley/opennsa | 853c0fc8e065e74815cbc3f769939f64ac6aadeb | [
"BSD-3-Clause"
] | null | null | null | ## Generated by pyxsdgen
from xml.etree import ElementTree as ET
# types
POINT2POINT_NS = 'http://schemas.ogf.org/nsi/2013/12/services/point2point'
p2ps = ET.QName(POINT2POINT_NS, 'p2ps')
capacity = ET.QName(POINT2POINT_NS, 'capacity')
parameter = ET.QName(POINT2POINT_NS, 'parameter')
| 33.773585 | 134 | 0.613966 |
90633c1edf956b4cbfebb1310e68eb561ac6fc3b | 87 | py | Python | Scripts/PyLecTest.py | DVecchione/DVEC | 8788310acefe948c1c40b2ecfd781b0af7027993 | [
"MIT"
] | null | null | null | Scripts/PyLecTest.py | DVecchione/DVEC | 8788310acefe948c1c40b2ecfd781b0af7027993 | [
"MIT"
] | null | null | null | Scripts/PyLecTest.py | DVecchione/DVEC | 8788310acefe948c1c40b2ecfd781b0af7027993 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
x=20
y=1
plt.plot(x,y)
plt.show()
| 9.666667 | 31 | 0.724138 |
90667496af942d519fbd83a19bb664048a86c4ea | 3,708 | py | Python | examples/nested/mog4_fast.py | ivandebono/nnest | 490b0797312c22a1019f5f400db684b1be5e8fe5 | [
"MIT"
] | null | null | null | examples/nested/mog4_fast.py | ivandebono/nnest | 490b0797312c22a1019f5f400db684b1be5e8fe5 | [
"MIT"
] | null | null | null | examples/nested/mog4_fast.py | ivandebono/nnest | 490b0797312c22a1019f5f400db684b1be5e8fe5 | [
"MIT"
] | null | null | null | import os
import sys
import argparse
import copy
import numpy as np
import scipy.special
sys.path.append(os.getcwd())
def main(args):
from nnest import NestedSampler
g = GaussianMix()
volume_switch = 1.0 / (5 * args.num_slow)
sampler = NestedSampler(args.x_dim, loglike, transform=transform, log_dir=args.log_dir, num_live_points=args.num_live_points,
hidden_dim=args.hidden_dim, num_layers=args.num_layers, num_blocks=args.num_blocks, num_slow=args.num_slow,
use_gpu=args.use_gpu)
sampler.run(train_iters=args.train_iters, mcmc_steps=args.mcmc_steps, volume_switch=volume_switch, noise=args.noise)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--x_dim', type=int, default=5,
help="Dimensionality")
parser.add_argument('--train_iters', type=int, default=2000,
help="number of train iters")
parser.add_argument("--mcmc_steps", type=int, default=0)
parser.add_argument("--num_live_points", type=int, default=1000)
parser.add_argument('--switch', type=float, default=-1)
parser.add_argument('--hidden_dim', type=int, default=128)
parser.add_argument('--num_layers', type=int, default=2)
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('-use_gpu', action='store_true')
parser.add_argument('--flow', type=str, default='nvp')
parser.add_argument('--num_blocks', type=int, default=5)
parser.add_argument('--noise', type=float, default=-1)
parser.add_argument('--run_num', type=str, default='')
parser.add_argument('--num_slow', type=int, default=2)
parser.add_argument('--log_dir', type=str, default='logs/mog4_fast')
args = parser.parse_args()
main(args)
| 34.654206 | 135 | 0.618932 |
9066a9157ffc22c0ce94777109f0d24999e2d0dd | 3,060 | py | Python | sendria/message.py | scottcove/sendria | 26e7581cc8d7673887ac8018d8d32ff4ad23cfbd | [
"MIT"
] | 85 | 2020-10-03T22:11:55.000Z | 2022-03-25T12:49:44.000Z | sendria/message.py | scottcove/sendria | 26e7581cc8d7673887ac8018d8d32ff4ad23cfbd | [
"MIT"
] | 13 | 2020-10-05T10:59:34.000Z | 2022-03-26T08:16:24.000Z | sendria/message.py | scottcove/sendria | 26e7581cc8d7673887ac8018d8d32ff4ad23cfbd | [
"MIT"
] | 13 | 2020-10-15T13:32:40.000Z | 2022-03-28T01:46:58.000Z | __all__ = ['Message']
import uuid
from email.header import decode_header as _decode_header
from email.message import Message as EmailMessage
from email.utils import getaddresses
from typing import Union, List, Dict, Any
| 34.382022 | 113 | 0.56732 |
9066b9980c0b3869cc716e1c22a3fe141c968868 | 1,705 | py | Python | myApps/test_web.py | Rocket-hodgepodge/NewsWeb | 7835b6ae4e754eb96f3f0d5983b2421c9464fee3 | [
"BSD-3-Clause"
] | null | null | null | myApps/test_web.py | Rocket-hodgepodge/NewsWeb | 7835b6ae4e754eb96f3f0d5983b2421c9464fee3 | [
"BSD-3-Clause"
] | null | null | null | myApps/test_web.py | Rocket-hodgepodge/NewsWeb | 7835b6ae4e754eb96f3f0d5983b2421c9464fee3 | [
"BSD-3-Clause"
] | 2 | 2018-07-04T01:43:36.000Z | 2018-07-04T06:12:47.000Z | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import unittest
if __name__ == '__main__':
unittest.main(warnings='ignore')
| 31.574074 | 89 | 0.775367 |
9067bc1c116c9890747e5871781d17c6c8744561 | 30,017 | py | Python | nce_glue/run_glue.py | salesforce/ebm_calibration_nlu | e0598923551c4587e0ea8c4feb001cb9cc736103 | [
"BSD-3-Clause"
] | 7 | 2021-04-22T09:56:54.000Z | 2022-03-20T14:44:02.000Z | nce_glue/run_glue.py | salesforce/ebm_calibration_nlu | e0598923551c4587e0ea8c4feb001cb9cc736103 | [
"BSD-3-Clause"
] | 1 | 2022-02-22T04:41:44.000Z | 2022-02-22T18:21:23.000Z | nce_glue/run_glue.py | salesforce/ebm_calibration_nlu | e0598923551c4587e0ea8c4feb001cb9cc736103 | [
"BSD-3-Clause"
] | 1 | 2021-06-21T09:06:24.000Z | 2021-06-21T09:06:24.000Z | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa)."""
import dataclasses
import logging
import os, math
import sys, copy
from dataclasses import dataclass, field
from typing import Callable, Dict, Optional
import numpy as np
import torch
from transformers import AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, EvalPrediction, GlueDataset
from transformers import BertModel, BertConfig
from transformers import GlueDataTrainingArguments as DataTrainingArguments
from transformers import (
HfArgumentParser,
TrainingArguments,
glue_compute_metrics,
glue_output_modes,
glue_tasks_num_labels,
set_seed,
)
from my_robustness import MyRandomTokenNoise
from my_trainer import MyTrainer
from my_glue_dataset import MyGlueDataset
from my_modeling_roberta import MyRobertaForSequenceClassification, MyRobertaForNCESequenceClassification
from transformers.data.processors.utils import InputFeatures, InputExample
#import matplotlib
#matplotlib.use('Agg')
#import matplotlib.pyplot as plt
from my_utils import setLogger
#import checklist_utils
logger = logging.getLogger()
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments, CustomArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args, my_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args, my_args = parser.parse_args_into_dataclasses()
all_args = (model_args, data_args, training_args, my_args)
#training_args.learning_rate = my_args.my_learning_rate
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
log_fn = training_args.output_dir + '/log_' + ('train_' if training_args.do_train else '') + ('eval_' if training_args.do_eval else '') + ('evalcalibration_' if my_args.do_eval_calibration else '') + '.txt'
print('logger file will be set to', log_fn)
os.system('mkdir -p ' + training_args.output_dir)
setLogger(logger, log_fn)
my_args.log_fn = log_fn
for kk in range(5): logger.info('==hostname %s', os.uname()[1])
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
logger.info("Training/evaluation parameters %s", training_args)
# Set seed
set_seed(training_args.seed)
try:
num_labels = glue_tasks_num_labels[data_args.task_name]
output_mode = glue_output_modes[data_args.task_name]
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name))
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name,
cache_dir=model_args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
)
if my_args.train_mode == 'normal':
assert('roberta' in model_args.model_name_or_path.lower())
#model = AutoModelForSequenceClassification.from_pretrained(
model = MyRobertaForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
if my_args.train_mode == 'nce_noise':
#nce_model = MyRobertaForSequenceClassification(config)
assert('roberta' in model_args.model_name_or_path.lower())
model = MyRobertaForNCESequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
if my_args.train_from_scratch:
print('=== training from scratch! reinitilize weights')
embed_bak = copy.deepcopy(model.bert.embeddings)
layer_bak = copy.deepcopy(model.bert.encoder.layer)
model.init_weights()
LL = my_args.layer_num
print('=== applying layer_num', LL)
# Initializing a BERT bert-base-uncased style configuration
new_config = BertConfig(num_hidden_layers=LL)
# Initializing a model from the bert-base-uncased style configuration
new_bert = BertModel(new_config)
print('=== using pretrained embedding')
new_bert.embeddings = embed_bak
"""
for l in range(LL):
print('copying encoder layer', l)
new_bert.encoder.layer[l] = layer_bak[l]
"""
model.bert = new_bert
model.config.num_hidden_layers = LL
nce_noise_train_dataset, nce_noise_eval_dataset = None, None
if my_args.train_mode == 'nce_noise' and training_args.do_train:
# Get datasets
nce_noise_train_dataset = (MyGlueDataset(data_args, tokenizer=tokenizer, cache_dir=model_args.cache_dir, special_mode = 'nce_noise', nce_noise_file = my_args.nce_noise_file, mode = 'train', for_noiselm = False, my_args = my_args))
nce_noise_eval_dataset = (MyGlueDataset(data_args, tokenizer=tokenizer, cache_dir=model_args.cache_dir, special_mode = 'nce_noise', nce_noise_file = my_args.nce_noise_eval_file, mode = 'dev', for_noiselm = False, my_args = my_args))
# Get datasets
train_dataset = (
MyGlueDataset(data_args, tokenizer=tokenizer, cache_dir=model_args.cache_dir, my_args = my_args)
)
eval_dataset = (MyGlueDataset(data_args, tokenizer=tokenizer, mode="dev", cache_dir=model_args.cache_dir, my_args = my_args))
test_dataset = (
MyGlueDataset(data_args, tokenizer=tokenizer, mode="test", cache_dir=model_args.cache_dir, my_args = my_args)
if training_args.do_predict
else None
)
logger.info('constructing datasets (splitting eval_dataset) for calibration...')
dataset_cal_dev1 = copy.deepcopy(eval_dataset)
dataset_cal_dev2 = copy.deepcopy(eval_dataset)
dataset_cal_tr = copy.deepcopy(train_dataset)
cal_num = int(len(eval_dataset) / 2)
dataset_cal_dev1.features = dataset_cal_dev1.features[:cal_num]
dataset_cal_dev2.features = dataset_cal_dev2.features[-cal_num:]
#dataset_cal_tr.features = dataset_cal_tr.features[-cal_num:]
logger.info('setting eval_dataset to dataset_cal_dev2...')
eval_dataset = dataset_cal_dev2
# Initialize our Trainer
trainer = MyTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
compute_metrics=build_compute_metrics_fn(data_args.task_name),
tokenizer = tokenizer,
my_args = my_args,
)
print('=== random_noise_rate:', my_args.my_random_noise_rate)
my_noise = MyRandomTokenNoise(tokenizer, my_args.my_random_noise_rate)
input_transform = None
if my_args.my_random_noise_rate > 0:
input_transform = my_noise.add_random_noise
# Training
final_evalres_savefn = None
if training_args.do_train:
#if my_args.train_mode == 'nce_noise':
# trainer.nce_train(model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None, input_transform = input_transform)
#else:
set_seed(training_args.seed) #set seed again before constructing suite, so that it will be the same thing when do_eval
suite = None
#suite = checklist_utils.construct_checklist_suite(model, tokenizer, eval_dataset, all_args)
return_d = {}
trainer.train(model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None, input_transform = input_transform, train_mode = my_args.train_mode, nce_noise_dataset = nce_noise_train_dataset, nce_noise_ratio = my_args.nce_noise_ratio, nce_noise_bz = my_args.nce_noise_batch_size, nce_mode = my_args.nce_mode, nce_noise_eval_dataset = nce_noise_eval_dataset, return_d = return_d, checklist_suite = suite, all_args = all_args)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
logger.info('===PRINTING EVAL_RES_LIS===')
for eval_res in return_d['eval_res_lis']:
logger.info(str(eval_res))
final_evalres_savefn = training_args.output_dir + '/eval_res_save/final_eval_res.save'
torch.save(return_d['eval_res_lis'], final_evalres_savefn)
logger.info('eval res saved to %s', final_evalres_savefn)
final_eval_results, final_checklist_eval_results = {}, {}
final_nce_eval_results, final_nce_train_results = {}, {}
# evaluation
eval_results = {}
"""
if data_args.task_name == "mnli":
mnli_mm_data_args = dataclasses.replace(data_args, task_name="mnli-mm")
logger.info('===SWITCHING to mnli-mm for test')
eval_dataset = GlueDataset(mnli_mm_data_args, tokenizer=tokenizer, mode="dev", cache_dir=model_args.cache_dir)
"""
logger.info('seed: %d', training_args.seed)
if training_args.do_eval:
logger.info("*** evaluate ***")
set_seed(training_args.seed) #set seed again before eval
# loop to handle mnli double evaluation (matched, mis-matched)
eval_datasets = [eval_dataset]
#""" #we only look at the matched dev-set for mnli (mm is mismatched)
assert(len(eval_datasets) == 1)
for eval_dataset in eval_datasets:
trainer.compute_metrics = build_compute_metrics_fn(eval_dataset.args.task_name)
#prediction_output = trainer.predict(test_dataset=eval_dataset)
eval_result = trainer.evaluate(eval_dataset=eval_dataset, input_transform = input_transform)
if my_args.train_mode == 'nce_noise':
eval_nce_result = trainer.nce_evaluate(nce_noise_eval_dataset)
final_nce_eval_results.update(eval_nce_result)
train_nce_result = trainer.nce_evaluate(nce_noise_train_dataset, max_step = 500)
final_nce_train_results.update(train_nce_result)
output_eval_file = os.path.join(
training_args.output_dir, f"eval_results_{eval_dataset.args.task_name}.txt"
)
if trainer.is_world_master():
with open(output_eval_file, "w") as writer:
logger.info("***** eval results {} *****".format(eval_dataset.args.task_name))
for key, value in eval_result.items():
logger.info(" %s = %s", key, value)
writer.write("%s = %s\n" % (key, value))
eval_results.update(eval_result)
#final_eval_results['eval_acc'] = eval_result['eval_acc']
final_eval_results.update(eval_result)
if my_args.do_eval_checklist:
logger.info('*** eval checklist***')
set_seed(training_args.seed) #set seed again before eval
suite = checklist_utils.construct_checklist_suite(model, tokenizer, eval_dataset, all_args)
cres = checklist_utils.run_checklist_suite(model, tokenizer, eval_dataset, all_args, given_suite = suite, verbose = True)
final_checklist_eval_results.update(cres)
"""
if data_args.task_name.lower() == 'qqp':
cres = checklist_utils.do_checklist_QQP(model, tokenizer, eval_dataset, all_args)
final_checklist_eval_results.update(cres)
if data_args.task_name.lower() == 'qnli':
cres = checklist_utils.do_checklist_QNLI(model, tokenizer, eval_dataset, all_args)
final_checklist_eval_results.update(cres)
if data_args.task_name.lower() == 'sst-2':
cres = checklist_utils.do_checklist_SST2(model, tokenizer, eval_dataset, all_args)
final_checklist_eval_results.update(cres)
"""
"""
for checklist_trans in ['typo', 'typo^2']:
eval_checklist_dataset = MyGlueDataset(data_args, tokenizer=tokenizer, mode="dev", cache_dir=model_args.cache_dir, checklist_transform = checklist_trans, my_args = my_args)
eval_result = trainer.evaluate(eval_dataset=eval_checklist_dataset, input_transform = None)
for s in eval_result:
final_checklist_eval_results['checklist_{}_{}'.format(checklist_trans, s)] = eval_result[s]
"""
if my_args.do_eval_noise_robustness:
# loop to handle mnli double evaluation (matched, mis-matched)
eval_datasets = [eval_dataset]
set_seed(training_args.seed) #set seed again before eval
"""
if data_args.task_name == "mnli":
mnli_mm_data_args = dataclasses.replace(data_args, task_name="mnli-mm")
eval_datasets.append(
GlueDataset(mnli_mm_data_args, tokenizer=tokenizer, mode="dev", cache_dir=model_args.cache_dir)
)
""" #we only look at the matched dev-set for mnli (mm is mismatched)
for noise_rate in [0.1, 0.2]:
logger.info('*** eval_noise_robustness rate: %f ***', noise_rate)
my_noise = MyRandomTokenNoise(tokenizer, noise_rate)
input_transform = my_noise.add_random_noise
assert(len(eval_datasets) == 1)
for eval_dataset in eval_datasets:
trainer.compute_metrics = build_compute_metrics_fn(eval_dataset.args.task_name)
#prediction_output = trainer.predict(test_dataset=eval_dataset)
eval_result = trainer.evaluate(eval_dataset=eval_dataset, input_transform = input_transform)
output_eval_file = os.path.join(
training_args.output_dir, f"eval_results_{eval_dataset.args.task_name}.txt"
)
if trainer.is_world_master():
with open(output_eval_file, "w") as writer:
logger.info("***** eval results {} *****".format(eval_dataset.args.task_name))
for key, value in eval_result.items():
logger.info(" %s = %s", key, value)
writer.write("%s = %s\n" % (key, value))
if 'eval_mnli/acc' in eval_result: eval_result['eval_acc'] = eval_result['eval_mnli/acc']
final_eval_results['randomnoise{}_eval_acc'.format(noise_rate)] = eval_result['eval_acc']
import calibration as cal
from my_calibration import TScalCalibrator
if my_args.do_eval_calibration:
logger.info("*** do calbiration ***")
#if data_args.task_name.lower() == 'cola':
#it's cola, let's do evaluate for mcc
#res = trainer.evaluate(eval_dataset = dataset_cal_dev2)
set_seed(training_args.seed) #set seed again before eval
drawcal_res = trainer.eval_calibration(dataset_cal_dev2, verbose = True, fig_fn = training_args.output_dir + '/{}_calibration.pdf'.format(data_args.task_name))
save_fn = training_args.output_dir + '/drawcal.save'
logger.info('saving drawcal_res to %s', save_fn)
torch.save(drawcal_res, save_fn)
cal_res = do_cal(trainer, dataset_cal_dev2, do_postcal = False, ss = 'cal_ori_')
final_eval_results.update(cal_res)
if my_args.do_eval_scaling_binning_calibration:
logger.info('*** do scaling_binning calibration ***')
set_seed(training_args.seed)
cal_res = {}
cal_res.update(do_cal(trainer, dataset_cal_dev2, do_postcal = True, do_plattbin = False, do_tscal = True, tr_d = dataset_cal_dev1, ss = 'cal_dev_'))
cal_res.update(do_cal(trainer, dataset_cal_dev2, do_postcal = True, do_plattbin = False, do_tscal = True, tr_d = dataset_cal_tr, ss = 'cal_train_'))
logger.info('===scaling_binning_calibration %s', str(cal_res))
final_eval_results.update(cal_res)
if training_args.do_predict:
logging.info("*** Test ***")
test_datasets = [test_dataset]
if data_args.task_name == "mnli":
mnli_mm_data_args = dataclasses.replace(data_args, task_name="mnli-mm")
test_datasets.append(
GlueDataset(mnli_mm_data_args, tokenizer=tokenizer, mode="test", cache_dir=model_args.cache_dir)
)
for test_dataset in test_datasets:
predictions = trainer.predict(test_dataset=test_dataset).predictions
if output_mode == "classification":
predictions = np.argmax(predictions, axis=1)
output_test_file = os.path.join(
training_args.output_dir, f"test_results_{test_dataset.args.task_name}.txt"
)
if trainer.is_world_master():
with open(output_test_file, "w") as writer:
logger.info("***** Test results {} *****".format(test_dataset.args.task_name))
writer.write("index\tprediction\n")
for index, item in enumerate(predictions):
if output_mode == "regression":
writer.write("%d\t%3.3f\n" % (index, item))
else:
item = test_dataset.get_labels()[item]
writer.write("%d\t%s\n" % (index, item))
if my_args.do_energy_analysis:
logger.info('*** do_energy_analysis ***')
eval_dataloader = trainer.get_eval_dataloader(dataset_cal_dev2)
logger.info('loading baseline model...')
if data_args.task_name.lower() == 'sst-2':
base_model = MyRobertaForSequenceClassification.from_pretrained('./exps/glue_baseline_roberta-base/SST-2/LR2e-5BA32MAXSTEP5233WARMSTEP314/')
if data_args.task_name.lower() == 'qnli':
base_model = MyRobertaForSequenceClassification.from_pretrained('./exps/glue_baseline_roberta-base/QNLI/LR2e-5BA32MAXSTEP8278WARMSTEP496')
if data_args.task_name.lower() == 'mrpc':
base_model = MyRobertaForSequenceClassification.from_pretrained('./exps/glue_baseline_roberta-base/MRPC/LR1e-5BA16MAXSTEP2296WARMSTEP137')
if data_args.task_name.lower() == 'mnli':
base_model = MyRobertaForSequenceClassification.from_pretrained('./exps/glue_baseline_roberta-base/MNLI/LR2e-5BA32MAXSTEP30968WARMSTEP1858/')
base_model = base_model.cuda()
lis_energy, lis_logits, lis_logits_base = [], [], []
for step, inputs in enumerate(eval_dataloader):
has_labels = any(inputs.get(k) is not None for k in ["labels", "lm_labels", "masked_lm_labels"])
for k, v in inputs.items():
inputs[k] = v.cuda()
return_d = {}
model.eval(); base_model.eval();
with torch.no_grad():
outputs = base_model(**inputs)
lis_logits_base.append(outputs[1])
inputs['special_mode'] = 'nce_noise'
inputs['nce_mode'] = my_args.nce_mode
inputs['return_d'] = return_d
inputs['nce_feed_type'] = 'data'
inputs['nce_noise_ratio'] = my_args.nce_noise_ratio
outputs = model(**inputs)
lis_energy.append(return_d['nce_logits'])
lis_logits.append(outputs[1])
all_energy = torch.cat(lis_energy, dim = 0).view(-1)
all_probs = torch.softmax(torch.cat(lis_logits, dim = 0), dim = -1)
all_probs_base = torch.softmax(torch.cat(lis_logits_base, dim = 0), dim = -1)
sorted_idx = all_energy.sort(descending = False)[1]
save_fn = training_args.output_dir + '/dev_energy.save'
logger.info('saving all_energy to %s', save_fn)
torch.save({'all_energy': all_energy.cpu(), 'all_probs': all_probs.cpu(), 'all_probs_base': all_probs_base.cpu()}, save_fn)
print('low energy:')
for idx in sorted_idx[:10].tolist():
print(idx, '\tenergy:', all_energy[idx].item(), 'prediction prob:', all_probs[idx].tolist(), 'prediction prob baseline:', all_probs_base[idx].tolist(), 'label:', dataset_cal_dev2[idx].label, 'text:', tokenizer.decode(dataset_cal_dev2[idx].input_ids[:100]))
print('high energy:')
for idx in sorted_idx[-10:].tolist():
if torch.argmax(all_probs_base[idx]).item() != dataset_cal_dev2[idx].label:
print(idx, '\tenergy:', all_energy[idx].item(), 'prediction prob:', all_probs[idx].tolist(), 'prediction prob baseline:', all_probs_base[idx].tolist(), 'label:', dataset_cal_dev2[idx].label, 'text:', tokenizer.decode(dataset_cal_dev2[idx].input_ids[:70]))
logger.info('output_dir: %s', training_args.output_dir)
if my_args.train_mode == 'nce_noise':
logger.info('===FINAL NCE_EVAL RESULT===')
report_str = '[EVAL_DATA] '
for idx in final_nce_eval_results: report_str += idx + ':' + str(final_nce_eval_results[idx])[:5] + ', '
logger.info('%s', report_str)
report_str = '[TRAIN_DATA] '
for idx in final_nce_train_results: report_str += idx + ':' + str(final_nce_train_results[idx])[:5] + ', '
logger.info('%s', report_str)
"""
logger.info('===FINAL CHECKLIST_EVAL RESULTS===')
report_str, ll = '', []
for idx in final_checklist_eval_results:
if idx != 'AVG':
report_str += idx + ':' + str(final_checklist_eval_results[idx] * 100)[:5] + '%, '
#ll.append(final_checklist_eval_results[idx])
logger.info('%s AVG: %s', report_str, str(final_checklist_eval_results['AVG'] * 100)[:5] + '%')
"""
logger.info('===FINAL EVAL RESULTS===')
report_str = ''
for idx in final_eval_results: report_str += idx + ':' + str(final_eval_results[idx])[:5] + ', '
logger.info('%s', report_str)
if final_evalres_savefn is not None:
logger.info(final_evalres_savefn)
return eval_results
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 50.279732 | 467 | 0.667922 |
9068b9974dcf2fb879760cc992a13d9cece6f426 | 43 | py | Python | tools/python/myriad/__init__.py | TU-Berlin-DIMA/myriad-toolkit | 5f7610e10b11e05591d6e2dc030c3ca5dc2a90b4 | [
"BSL-1.0"
] | 15 | 2015-01-18T18:02:16.000Z | 2021-08-02T09:20:35.000Z | tools/python/myriad/__init__.py | TU-Berlin-DIMA/myriad-toolkit | 5f7610e10b11e05591d6e2dc030c3ca5dc2a90b4 | [
"BSL-1.0"
] | null | null | null | tools/python/myriad/__init__.py | TU-Berlin-DIMA/myriad-toolkit | 5f7610e10b11e05591d6e2dc030c3ca5dc2a90b4 | [
"BSL-1.0"
] | 5 | 2015-08-10T21:50:39.000Z | 2018-03-14T15:31:28.000Z | __all__ = [ "assistant", "event", "error" ] | 43 | 43 | 0.604651 |
9068dd91546f900a5c60936212742aac5fb95fd0 | 577 | py | Python | Python/Advanced/Tuples And Sets/Lab/SoftUni Party.py | EduardV777/Softuni-Python-Exercises | 79db667028aea7dfecb3dbbd834c752180c50f44 | [
"Unlicense"
] | null | null | null | Python/Advanced/Tuples And Sets/Lab/SoftUni Party.py | EduardV777/Softuni-Python-Exercises | 79db667028aea7dfecb3dbbd834c752180c50f44 | [
"Unlicense"
] | null | null | null | Python/Advanced/Tuples And Sets/Lab/SoftUni Party.py | EduardV777/Softuni-Python-Exercises | 79db667028aea7dfecb3dbbd834c752180c50f44 | [
"Unlicense"
] | null | null | null | guests=int(input())
reservations=set([])
while guests!=0:
reservationCode=input()
reservations.add(reservationCode)
guests-=1
while True:
r=input()
if r!="END":
reservations.discard(r)
else:
print(len(reservations))
VIPS=[]; Regulars=[]
for e in reservations:
if e[0].isnumeric():
VIPS.append(e)
else:
Regulars.append(e)
VIPS.sort(); Regulars.sort()
for k in VIPS:
print(k)
for k in Regulars:
print(k)
break | 22.192308 | 37 | 0.514731 |
9068dfa377a4e3878aa69220570645e9c12f27ec | 404 | py | Python | locale/pot/api/plotting/_autosummary/pyvista-Plotter-remove_all_lights-1.py | tkoyama010/pyvista-doc-translations | 23bb813387b7f8bfe17e86c2244d5dd2243990db | [
"MIT"
] | 4 | 2020-08-07T08:19:19.000Z | 2020-12-04T09:51:11.000Z | locale/pot/api/plotting/_autosummary/pyvista-Plotter-remove_all_lights-1.py | tkoyama010/pyvista-doc-translations | 23bb813387b7f8bfe17e86c2244d5dd2243990db | [
"MIT"
] | 19 | 2020-08-06T00:24:30.000Z | 2022-03-30T19:22:24.000Z | locale/pot/api/plotting/_autosummary/pyvista-Plotter-remove_all_lights-1.py | tkoyama010/pyvista-doc-translations | 23bb813387b7f8bfe17e86c2244d5dd2243990db | [
"MIT"
] | 1 | 2021-03-09T07:50:40.000Z | 2021-03-09T07:50:40.000Z | # Create a plotter and remove all lights after initialization.
# Note how the mesh rendered is completely flat
#
import pyvista as pv
plotter = pv.Plotter()
plotter.remove_all_lights()
plotter.renderer.lights
# Expected:
## []
_ = plotter.add_mesh(pv.Sphere(), show_edges=True)
plotter.show()
#
# Note how this differs from a plot with default lighting
#
pv.Sphere().plot(show_edges=True, lighting=True)
| 25.25 | 62 | 0.762376 |
906c0d695c5d23512c396e22821fa9b115229101 | 880 | py | Python | einsum.py | odiak/einsum | c7c71f8daefcf33b4743cc8dca588577d03bdde6 | [
"MIT"
] | null | null | null | einsum.py | odiak/einsum | c7c71f8daefcf33b4743cc8dca588577d03bdde6 | [
"MIT"
] | null | null | null | einsum.py | odiak/einsum | c7c71f8daefcf33b4743cc8dca588577d03bdde6 | [
"MIT"
] | null | null | null | from typing import Dict, Tuple
import numpy as np
| 29.333333 | 87 | 0.494318 |
906c820368e4e2bf91a72f86c8e3c46b23314109 | 4,201 | py | Python | aarhus/get_roots.py | mikedelong/aarhus | 0c0e94fadd65be8428fe3bd2c92928e1b23fc2a1 | [
"Apache-2.0"
] | null | null | null | aarhus/get_roots.py | mikedelong/aarhus | 0c0e94fadd65be8428fe3bd2c92928e1b23fc2a1 | [
"Apache-2.0"
] | 7 | 2017-01-13T19:04:57.000Z | 2017-01-23T14:10:53.000Z | aarhus/get_roots.py | mikedelong/aarhus | 0c0e94fadd65be8428fe3bd2c92928e1b23fc2a1 | [
"Apache-2.0"
] | null | null | null | import json
import logging
import os
import pickle
import sys
import time
import pyzmail
# http://mypy.pythonblogs.com/12_mypy/archive/1253_workaround_for_python_bug_ascii_codec_cant_encode_character_uxa0_in_position_111_ordinal_not_in_range128.html
reload(sys)
sys.setdefaultencoding("utf8")
logging.basicConfig(format='%(asctime)s : %(levelname)s :: %(message)s', level=logging.DEBUG)
if __name__ == '__main__':
run()
| 40.394231 | 160 | 0.650321 |
906d8e08da166b6c85abfbc022b056f7f3eb7ea0 | 1,547 | py | Python | src/jdk.internal.vm.compiler/.mx.graal/mx_graal.py | siweilxy/openjdkstudy | 8597674ec1d6809faf55cbee1f45f4e9149d670d | [
"Apache-2.0"
] | 2 | 2018-06-19T05:43:32.000Z | 2018-06-23T10:04:56.000Z | src/jdk.internal.vm.compiler/.mx.graal/mx_graal.py | siweilxy/openjdkstudy | 8597674ec1d6809faf55cbee1f45f4e9149d670d | [
"Apache-2.0"
] | null | null | null | src/jdk.internal.vm.compiler/.mx.graal/mx_graal.py | siweilxy/openjdkstudy | 8597674ec1d6809faf55cbee1f45f4e9149d670d | [
"Apache-2.0"
] | null | null | null | #
# ----------------------------------------------------------------------------------------------------
#
# Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# ----------------------------------------------------------------------------------------------------
import mx
if mx.get_jdk(tag='default').javaCompliance < "1.9":
mx.abort('JAVA_HOME is not a JDK9: ' + mx.get_jdk(tag='default').home)
from mx_graal_9 import mx_post_parse_cmd_line, run_vm, get_vm, isJVMCIEnabled # pylint: disable=unused-import
import mx_graal_bench # pylint: disable=unused-import
| 45.5 | 109 | 0.66128 |
906df45a0cbaf0b269d84eb1b51d8ce436ca4a79 | 4,621 | py | Python | linear_regression.py | wail007/ml_playground | 5a8cd1fc57d3ba32a255e665fc3480f58eb9c3c2 | [
"Apache-2.0"
] | null | null | null | linear_regression.py | wail007/ml_playground | 5a8cd1fc57d3ba32a255e665fc3480f58eb9c3c2 | [
"Apache-2.0"
] | null | null | null | linear_regression.py | wail007/ml_playground | 5a8cd1fc57d3ba32a255e665fc3480f58eb9c3c2 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal
| 29.062893 | 119 | 0.554425 |
906e0d5d4effa98640d75d6a7be5cc83893d3c38 | 84 | py | Python | pygments_lexer_solidity/__init__.py | veox/pygments-lexer-solidity | e99ccf980337ceaad4fbc7ee11795e91d7fab0ae | [
"BSD-2-Clause"
] | 2 | 2018-05-24T14:36:59.000Z | 2019-06-29T23:50:08.000Z | pygments_lexer_solidity/__init__.py | veox/pygments-lexer-solidity | e99ccf980337ceaad4fbc7ee11795e91d7fab0ae | [
"BSD-2-Clause"
] | null | null | null | pygments_lexer_solidity/__init__.py | veox/pygments-lexer-solidity | e99ccf980337ceaad4fbc7ee11795e91d7fab0ae | [
"BSD-2-Clause"
] | 1 | 2019-11-11T23:24:17.000Z | 2019-11-11T23:24:17.000Z | from .lexer import SolidityLexer, YulLexer
__all__ = ['SolidityLexer', 'YulLexer']
| 21 | 42 | 0.761905 |
906e5ccc6b995d3e3569837e29fff36deedc118c | 1,174 | py | Python | optimal_buy_gdax/history.py | coulterj/optimal-buy-gdax | cdebd2af2cf54bdef34c0ff64a4a731e540bdcdb | [
"Unlicense"
] | null | null | null | optimal_buy_gdax/history.py | coulterj/optimal-buy-gdax | cdebd2af2cf54bdef34c0ff64a4a731e540bdcdb | [
"Unlicense"
] | null | null | null | optimal_buy_gdax/history.py | coulterj/optimal-buy-gdax | cdebd2af2cf54bdef34c0ff64a4a731e540bdcdb | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, String, Float, DateTime, Integer
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
def get_session(engine):
engine = create_engine(engine)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
return session
| 24.458333 | 63 | 0.721465 |
906f41f56725ceef73c59638d0fd312fa10a88f9 | 6,689 | py | Python | vmtkScripts/vmtkmeshboundaryinspector.py | ramtingh/vmtk | 4d6f58ce65d73628353ba2b110cbc29a2e7aa7b3 | [
"Apache-2.0"
] | null | null | null | vmtkScripts/vmtkmeshboundaryinspector.py | ramtingh/vmtk | 4d6f58ce65d73628353ba2b110cbc29a2e7aa7b3 | [
"Apache-2.0"
] | null | null | null | vmtkScripts/vmtkmeshboundaryinspector.py | ramtingh/vmtk | 4d6f58ce65d73628353ba2b110cbc29a2e7aa7b3 | [
"Apache-2.0"
] | 1 | 2019-06-18T23:41:11.000Z | 2019-06-18T23:41:11.000Z | #!/usr/bin/env python
## Program: VMTK
## Module: $RCSfile: vmtkmeshboundaryinspector.py,v $
## Language: Python
## Date: $Date: 2006/05/26 12:35:13 $
## Version: $Revision: 1.3 $
## Copyright (c) Luca Antiga, David Steinman. All rights reserved.
## See LICENSE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
from __future__ import absolute_import #NEEDS TO STAY AS TOP LEVEL MODULE FOR Py2-3 COMPATIBILITY
import vtk
import sys
from vmtk import vtkvmtk
from vmtk import vmtkrenderer
from vmtk import pypes
if __name__=='__main__':
main = pypes.pypeMain()
main.Arguments = sys.argv
main.Execute()
| 39.579882 | 132 | 0.697264 |