hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
83cfac902258a852b3126b17d68f53a3484fe5c0 | 1,757 | py | Python | code_examples/Python/app_debugger/test_client/test_debugger.py | VPoser/docs-and-training | 55d4aa4c5b964f3c810d58008db4c5b75260322b | [
"Zlib"
] | null | null | null | code_examples/Python/app_debugger/test_client/test_debugger.py | VPoser/docs-and-training | 55d4aa4c5b964f3c810d58008db4c5b75260322b | [
"Zlib"
] | 4 | 2019-05-29T06:16:56.000Z | 2021-03-31T19:03:36.000Z | code_examples/Python/app_debugger/test_client/test_debugger.py | VPoser/docs-and-training | 55d4aa4c5b964f3c810d58008db4c5b75260322b | [
"Zlib"
] | 3 | 2019-04-02T08:48:31.000Z | 2020-09-23T08:13:35.000Z | #!/usr/bin/env python
"""Simple test client to call the debugger SOAP service"""
import os
import sys
import base64
import getpass
from suds.client import Client
from suds.cache import NoCache
from suds import WebFault, MethodNotFound
from clfpy import AuthClient
auth_endpoint = 'https://api.hetcomp.org/authManager/AuthManager?wsdl'
extra_pars = "auth={},WFM=dummy,".format(auth_endpoint)
def soap_call(wsdl_url, methodname, method_args):
"""Calls a SOAP webmethod at a given URL with given arguments."""
client = Client(wsdl_url, cache=NoCache())
try:
method = getattr(client.service, methodname)
except MethodNotFound as error:
return(error)
try:
response = method(*method_args)
except WebFault as error:
return(error)
return response
if __name__ == "__main__":
main()
| 27.030769 | 135 | 0.682413 |
83cfd9aa79927b2baa0758f343509a236b7d9e4c | 393 | py | Python | bai01/keocatgiay.py | YtalYa/CSx101-A1-2021-02 | 5d95faa483c7a98d8ea75fb3a1720c12e1c1e727 | [
"MIT"
] | null | null | null | bai01/keocatgiay.py | YtalYa/CSx101-A1-2021-02 | 5d95faa483c7a98d8ea75fb3a1720c12e1c1e727 | [
"MIT"
] | null | null | null | bai01/keocatgiay.py | YtalYa/CSx101-A1-2021-02 | 5d95faa483c7a98d8ea75fb3a1720c12e1c1e727 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# from time import time
# from math import sqrt
# with open("inp.txt", "r") as f:
# a, b = list(i for i in f.read().split())
a, b = input().split()
# print(a,b,c, type(a), type(int(a)))
a = int(a)
b = int(b)
# st = time()
# -----
s1 = a * (a - 1) // 2
cuoi = b - 2
dau = b - a
s2 = (dau + cuoi) * (a - 1) // 2
result = s1 + s2
# -----
print(result)
# print(time() - st) | 17.086957 | 43 | 0.508906 |
83d10767f4acd2d7c3295abb41942f7d2223b741 | 6,295 | py | Python | data_manager/acs/gui_ACS_sched_blocks_script_0.py | IftachSadeh/ctaOperatorGUI | f6365a86440dd2404da0bc139cd9345eb3dcb566 | [
"MIT"
] | 3 | 2018-08-28T22:44:23.000Z | 2018-10-24T09:16:34.000Z | data_manager/acs/gui_ACS_sched_blocks_script_0.py | IftachSadeh/ctaOperatorGUI | f6365a86440dd2404da0bc139cd9345eb3dcb566 | [
"MIT"
] | 28 | 2020-04-02T14:48:29.000Z | 2021-05-27T08:10:36.000Z | data_manager/acs/gui_ACS_sched_blocks_script_0.py | IftachSadeh/ctaOperatorGUI | f6365a86440dd2404da0bc139cd9345eb3dcb566 | [
"MIT"
] | null | null | null | # import tcs
# import daqctrl, inspect
# ------------------------------------------------------------------
# install the script by:
# cd $INTROOT/config/scripts
# ln -s $guiInstalDir/ctaOperatorGUI/ctaGuiBack/ctaGuiBack/acs/guiACS_schedBlocks_script0.py
# ------------------------------------------------------------------
# ------------------------------------------------------------------
from random import Random
rndGen = Random(10987268332)
waitTime = dict()
waitTime['config_daq'] = rndGen.randint(1, 3)
waitTime['config_camera'] = rndGen.randint(1, 5)
waitTime['config_mount'] = rndGen.randint(2, 7)
waitTime['finish_daq'] = rndGen.randint(1, 6)
waitTime['finish_camera'] = rndGen.randint(1, 3)
waitTime['finish_mount'] = rndGen.randint(1, 2)
# ------------------------------------------------------------------
#
# ------------------------------------------------------------------
__phases__ = [
"configuring",
"config_daq",
"config_camera",
"config_mount",
"take_data",
"closing",
"finish_daq",
"finish_camera",
"finish_mount",
]
# ------------------------------------------------------------------
#
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# ------------------------------------------------------------------
| 28.2287 | 94 | 0.550278 |
83d111ca41c3bb0510b5e6661f1236eaf7537220 | 576 | py | Python | msort/check/age.py | leighmacdonald/msort | b9182d7e3f01ffdb85229dd6e74ad270c766a2d8 | [
"MIT"
] | 4 | 2015-02-22T04:27:23.000Z | 2021-11-30T14:39:10.000Z | msort/check/age.py | leighmacdonald/msort | b9182d7e3f01ffdb85229dd6e74ad270c766a2d8 | [
"MIT"
] | null | null | null | msort/check/age.py | leighmacdonald/msort | b9182d7e3f01ffdb85229dd6e74ad270c766a2d8 | [
"MIT"
] | null | null | null | """
Module to scan for empty folders and directories
"""
from time import time
from msort.check import BaseCheck, CheckSkip
| 30.315789 | 96 | 0.642361 |
83d13060a3394fdf762b857adb260865c20a7f38 | 143 | py | Python | AtC_Beg_Con_081-090/ABC089/B.py | yosho-18/AtCoder | 50f6d5c92a01792552c31ac912ce1cd557b06fb0 | [
"MIT"
] | null | null | null | AtC_Beg_Con_081-090/ABC089/B.py | yosho-18/AtCoder | 50f6d5c92a01792552c31ac912ce1cd557b06fb0 | [
"MIT"
] | null | null | null | AtC_Beg_Con_081-090/ABC089/B.py | yosho-18/AtCoder | 50f6d5c92a01792552c31ac912ce1cd557b06fb0 | [
"MIT"
] | null | null | null | n = int(input())
a = input().split()
a = [str(m) for m in a]
for i in a:
if i == "Y":
print("Four")
exit()
print("Three") | 14.3 | 23 | 0.461538 |
83d316b3fd73a29aececfa45fc1d41b8ed48ae12 | 5,141 | py | Python | scripts/component_graph/server/fpm/package_manager.py | winksaville/Fuchsia | a0ec86f1d51ae8d2538ff3404dad46eb302f9b4f | [
"BSD-3-Clause"
] | 3 | 2020-08-02T04:46:18.000Z | 2020-08-07T10:10:53.000Z | scripts/component_graph/server/fpm/package_manager.py | winksaville/Fuchsia | a0ec86f1d51ae8d2538ff3404dad46eb302f9b4f | [
"BSD-3-Clause"
] | null | null | null | scripts/component_graph/server/fpm/package_manager.py | winksaville/Fuchsia | a0ec86f1d51ae8d2538ff3404dad46eb302f9b4f | [
"BSD-3-Clause"
] | 1 | 2020-08-07T10:11:49.000Z | 2020-08-07T10:11:49.000Z | #!/usr/bin/env python3
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""PackageManager provides an interface to the JSON FPM API.
The PackageManager interface provides a simple way to retrieve data from the
package manager. It combines this data with annotated data from the disk
(which would be packages if not packaged in BootFS due to implementation
details). It does minimal parsing on this data and passes it back to the user.
"""
import json
import os
import re
import urllib.request
from far.far_reader import far_read
from server.util.url import strip_package_version, package_to_url
from server.util.logging import get_logger
def read_package(far_buffer):
"""Performs a raw_read then intelligently restructures known package structures."""
files = far_read(far_buffer)
if "meta/contents" in files:
content = files["meta/contents"].decode()
files["meta/contents"] = dict(
[tuple(e.rsplit("=", maxsplit=1)) for e in content.split("\n") if e])
if "meta/package" in files:
files["meta/package"] = json.loads(files["meta/package"].decode())
json_extensions = [".cm", ".cmx"]
for ext in json_extensions:
for path in files.keys():
if path.endswith(ext):
files[path] = json.loads(files[path])
return files
| 40.480315 | 96 | 0.614861 |
83d375aa877a85c2432fbed5fdd969dd8542a727 | 977 | py | Python | D01/main.py | itscassie/advent-of-code-2021 | 731f7b8593e827de7d098f311ab19813f3f1a38d | [
"MIT"
] | null | null | null | D01/main.py | itscassie/advent-of-code-2021 | 731f7b8593e827de7d098f311ab19813f3f1a38d | [
"MIT"
] | null | null | null | D01/main.py | itscassie/advent-of-code-2021 | 731f7b8593e827de7d098f311ab19813f3f1a38d | [
"MIT"
] | null | null | null | """ Solve 2021 Day 1: Sonar Sweep Problem """
def solver_problem1(inputs):
""" Count the number of increasement from given list """
num_increased = 0
for i in range(1, len(inputs)):
if inputs[i] > inputs[i - 1]:
num_increased += 1
return num_increased
def solver_problem2(inputs):
""" Count the number of increasement from each sum of 3 number from give list """
num_increased = 0
for i in range(1, len(inputs) - 2):
# sum_prev = inputs[i-1] + inputs[i] + inputs[i+1]
# sum_curr = inputs[i] + inputs[i+1] + inputs[i+2]
# (sum_curr - sum_prev) = inputs[i+2] - inputs[i-1]
if inputs[i + 2] > inputs[i - 1]:
num_increased += 1
return num_increased
if __name__ == "__main__":
with open("./input/d01.txt", encoding='UTF-8') as file:
data = [int(line.strip()) for line in file]
print(solver_problem1(data))
print(solver_problem2(data))
| 34.892857 | 86 | 0.590583 |
83d3b34c981cd51adb859cdd0943e06deba009df | 928 | py | Python | tests/test_power_converter.py | LauWien/smooth | 3d2ee96e3c2b2f9d5d805da1a920748f2dbbd538 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 5 | 2019-10-15T15:56:35.000Z | 2021-02-04T10:11:31.000Z | tests/test_power_converter.py | LauWien/smooth | 3d2ee96e3c2b2f9d5d805da1a920748f2dbbd538 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 121 | 2020-01-06T14:32:30.000Z | 2021-09-23T11:26:11.000Z | tests/test_power_converter.py | LauWien/smooth | 3d2ee96e3c2b2f9d5d805da1a920748f2dbbd538 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 6 | 2019-10-21T08:36:05.000Z | 2021-03-26T10:37:17.000Z | from smooth.components.component_power_converter import PowerConverter
import oemof.solph as solph
| 33.142857 | 73 | 0.710129 |
83d597052cf5a96babe41243ddbe009226025de6 | 2,094 | py | Python | compecon/demos/demapp06.py | daniel-schaefer/CompEcon-python | d3f66e04a7e02be648fc5a68065806ec7cc6ffd6 | [
"MIT"
] | 23 | 2016-12-14T13:21:27.000Z | 2020-08-23T21:04:34.000Z | compecon/demos/demapp06.py | daniel-schaefer/CompEcon-python | d3f66e04a7e02be648fc5a68065806ec7cc6ffd6 | [
"MIT"
] | 1 | 2017-09-10T04:48:54.000Z | 2018-03-31T01:36:46.000Z | compecon/demos/demapp06.py | daniel-schaefer/CompEcon-python | d3f66e04a7e02be648fc5a68065806ec7cc6ffd6 | [
"MIT"
] | 13 | 2017-02-25T08:10:38.000Z | 2020-05-15T09:49:16.000Z | from demos.setup import np, plt
from compecon import BasisChebyshev, BasisSpline
from compecon.tools import nodeunif
__author__ = 'Randall'
# DEMAPP06 Chebychev and cubic spline derivative approximation errors
# Function to be approximated
# Set degree of approximation and endpoints of approximation interval
a = -1 # left endpoint
b = 1 # right endpoint
n = 10 # order of interpolatioin
# Construct refined uniform grid for error ploting
x = nodeunif(1001, a, b)
# Compute actual and fitted values on grid
y, d, s = f(x) # actual
# Construct and evaluate Chebychev interpolant
C = BasisChebyshev(n, a, b, f=f) # chose basis functions
yc = C(x) # values
dc = C(x, 1) # first derivative
sc = C(x, 2) # second derivative
# Construct and evaluate cubic spline interpolant
S = BasisSpline(n, a, b, f=f) # chose basis functions
ys = S(x) # values
ds = S(x, 1) # first derivative
ss = S(x, 2) # second derivative
# Plot function approximation error
plt.figure()
plt.subplot(2, 1, 1),
plt.plot(x, y - yc[0])
plt.ylabel('Chebychev')
plt.title('Function Approximation Error')
plt.subplot(2, 1, 2)
plt.plot(x, y - ys[0])
plt.ylabel('Cubic Spline')
plt.xlabel('x')
# Plot first derivative approximation error
plt.figure()
plt.subplot(2, 1, 1),
plt.plot(x, d - dc[0])
plt.ylabel('Chebychev')
plt.title('First Derivative Approximation Error')
plt.subplot(2, 1, 2)
plt.plot(x, d - ds[0], 'm')
plt.ylabel('Cubic Spline')
plt.xlabel('x')
# Plot second derivative approximation error
plt.figure()
plt.subplot(2, 1, 1),
plt.plot(x, s - sc[0])
plt.ylabel('Chebychev')
plt.title('Second Derivative Approximation Error')
plt.subplot(2, 1, 2)
plt.plot(x, s - ss[0], 'm')
plt.ylabel('Cubic Spline')
plt.xlabel('x')
plt.show()
| 26.506329 | 69 | 0.608883 |
83d5ab6a69ea7c486e04c2f09093c01b18d52c8b | 5,411 | py | Python | v3_inc_mem_dropout_dqn_model.py | kucharzyk-sebastian/aigym_dqn | eef88dafce3f2a1e13ab91a92089ea6a6c359cd6 | [
"MIT"
] | 2 | 2021-03-25T17:55:58.000Z | 2021-07-24T14:43:24.000Z | v3_inc_mem_dropout_dqn_model.py | kucharzyk-sebastian/aigym_dqn | eef88dafce3f2a1e13ab91a92089ea6a6c359cd6 | [
"MIT"
] | null | null | null | v3_inc_mem_dropout_dqn_model.py | kucharzyk-sebastian/aigym_dqn | eef88dafce3f2a1e13ab91a92089ea6a6c359cd6 | [
"MIT"
] | null | null | null | import random
import gym
import numpy as np
from collections import deque
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import Adam
import tensorflow as tf
import os
import logging
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
logging.getLogger('tensorflow').disabled = True
NUM_OF_AGENTS = 4
NUM_OF_EPISODES = 75
FRAMES_PER_EPISODE = 1000
BATCH_SIZE = 16
GAME_ID = "LunarLander-v2"
if __name__ == "__main__":
with tf.device('/device:CPU:0'):
game = gym.make(GAME_ID)
num_of_actions = game.action_space.n
observation_size = game.observation_space.shape[0]
npc = SimpleDqnNpcV3(observation_size, num_of_actions)
is_done = False
avgs = []
for model in range(NUM_OF_AGENTS):
scores = []
for episode in range(NUM_OF_EPISODES):
score = 0
current_state = np.reshape(game.reset(), [1, observation_size])
for frame in range(FRAMES_PER_EPISODE):
# game.render()
action = npc.act(current_state)
new_state, gained_reward, is_done, info = game.step(action)
new_state = np.reshape(new_state, [1, observation_size])
npc.retain(current_state, action, gained_reward, new_state, is_done)
score += gained_reward
current_state = new_state
if len(npc.memory) > BATCH_SIZE:
npc.replay(BATCH_SIZE)
if is_done:
print("episode: {0}/{1}; result: {2}; e: {3} used memory: {4}/{5}; time: {5}"
.format(episode, NUM_OF_EPISODES, score, npc._exploration_rate, len(npc.memory), npc.memory.maxlen, frame))
break
scores.append(score)
if not is_done:
print("episode: {0}/{1}; result: {2}; used memory: {3}/{4}; time: {5}"
.format(episode, NUM_OF_EPISODES, score, len(npc.memory), npc.memory.maxlen, frame))
npc.save("evo_dqn_" + str(model) + ".h5")
avgs.append(sum(scores) / len(scores))
for i, avg in enumerate(avgs):
print("Model {} has avarage: {}".format(i, avg))
print("Overall avg: {}".format(sum(avgs) / len(avgs)))
| 40.684211 | 137 | 0.62798 |
83d7033bcfe2791f10a0c9ef5053fa59b2220a75 | 370 | py | Python | phlcensus/acs/percapitaincome.py | PhiladelphiaController/phlcensus | 8e15d7c993e397bec4cb06a2144e134ec96c48a1 | [
"MIT"
] | null | null | null | phlcensus/acs/percapitaincome.py | PhiladelphiaController/phlcensus | 8e15d7c993e397bec4cb06a2144e134ec96c48a1 | [
"MIT"
] | null | null | null | phlcensus/acs/percapitaincome.py | PhiladelphiaController/phlcensus | 8e15d7c993e397bec4cb06a2144e134ec96c48a1 | [
"MIT"
] | null | null | null | from .core import ACSDataset
import collections
__all__ = ["PerCapitaIncome"]
| 23.125 | 81 | 0.705405 |
83d7cca1abc5dcfe213ee77fb80532cd598c02d8 | 1,642 | py | Python | card-games/lists.py | vietanhtran2710/python-exercism | 1f88dfca56928276ab81a274e8259ce465a2d425 | [
"MIT"
] | null | null | null | card-games/lists.py | vietanhtran2710/python-exercism | 1f88dfca56928276ab81a274e8259ce465a2d425 | [
"MIT"
] | null | null | null | card-games/lists.py | vietanhtran2710/python-exercism | 1f88dfca56928276ab81a274e8259ce465a2d425 | [
"MIT"
] | null | null | null | """
Card games exercise
"""
def get_rounds(number):
"""
:param number: int - current round number.
:return: list - current round and the two that follow.
"""
return [i + number for i in range(3)]
def concatenate_rounds(rounds_1, rounds_2):
"""
:param rounds_1: list - first rounds played.
:param rounds_2: list - second set of rounds played.
:return: list - all rounds played.
"""
return rounds_1 + rounds_2
def list_contains_round(rounds, number):
"""
:param rounds: list - rounds played.
:param number: int - round number.
:return: bool - was the round played?
"""
return number in rounds
def card_average(hand):
"""
:param hand: list - cards in hand.
:return: float - average value of the cards in the hand.
"""
return sum(hand) / len(hand)
def approx_average_is_average(hand):
"""
:param hand: list - cards in hand.
:return: bool - if approximate average equals to the `true average`.
"""
return card_average(hand) in (hand[len(hand) // 2], (hand[0] + hand[-1]) / 2)
def average_even_is_average_odd(hand):
"""
:param hand: list - cards in hand.
:return: bool - are even and odd averages equal?
"""
even = [hand[index] for index in range(0, len(hand), 2)]
odd = [hand[index] for index in range(1, len(hand), 2)]
return card_average(even) == card_average(odd)
def maybe_double_last(hand):
"""
:param hand: list - cards in hand.
:return: list - hand with Jacks (if present) value doubled.
"""
if hand[-1] == 11:
hand[-1] *= 2
return hand
| 20.78481 | 81 | 0.612667 |
83d86b44c36b2efbdda4224e3eee5b832e8c3e4e | 3,109 | py | Python | stock_quantity_history_location/tests/test_stock_quantity_history_location.py | NextERP-Romania/addons_extern | d08f428aeea4cda1890adfd250bc359bda0c33f3 | [
"Apache-2.0"
] | null | null | null | stock_quantity_history_location/tests/test_stock_quantity_history_location.py | NextERP-Romania/addons_extern | d08f428aeea4cda1890adfd250bc359bda0c33f3 | [
"Apache-2.0"
] | null | null | null | stock_quantity_history_location/tests/test_stock_quantity_history_location.py | NextERP-Romania/addons_extern | d08f428aeea4cda1890adfd250bc359bda0c33f3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 ForgeFlow S.L.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo.tests.common import SavepointCase
| 39.35443 | 85 | 0.587327 |
83d8ec7a846eebb200f4cc5baae5280d08288d60 | 1,463 | py | Python | viterbi_tagging.py | cryingmiso/Natural-Language-Processing | 471c3e69c65cab90fb7c432d2b632801c87f7c8e | [
"MIT"
] | null | null | null | viterbi_tagging.py | cryingmiso/Natural-Language-Processing | 471c3e69c65cab90fb7c432d2b632801c87f7c8e | [
"MIT"
] | null | null | null | viterbi_tagging.py | cryingmiso/Natural-Language-Processing | 471c3e69c65cab90fb7c432d2b632801c87f7c8e | [
"MIT"
] | 1 | 2018-04-22T11:41:10.000Z | 2018-04-22T11:41:10.000Z | # -*- coding:utf-8 -*-
states = ("B","M","E","S")
test_input = "BBMESBMEBEBESSMEBBME"
observations = [obs for obs in test_input]
#
start_prob = {"B":0.4,"M":0.2,"E":0.2,"S":0.2}
#
transit_prob = {"B": {"B": 0.1, "M": 0.4, "E": 0.4, "S": 0.1},
"M": {"B": 0.1, "M": 0.4, "E": 0.4, "S": 0.1},
"E": {"B": 0.4, "M": 0.1, "E": 0.1, "S": 0.4},
"S": {"B": 0.4, "M": 0.1, "E": 0.1, "S": 0.4}}
#
emission_prob = {'B': {"B": 0.4, "M": 0.2, "E": 0.2, "S": 0.2},
"M": {"B": 0.2, "M": 0.4, "E": 0.2, "S": 0.2},
"E": {"B": 0.2, "M": 0.2, "E": 0.4, "S": 0.2},
"S": {"B": 0.2, "M": 0.2, "E": 0.2, "S": 0.4}}
if __name__=="__main__":
print observations
per,last = viterbi(observations,states,
start_prob,transit_prob,emission_prob)
print last
print per | 29.857143 | 64 | 0.455913 |
83d96b48773397f017510e5831d9b5aab1d08ad6 | 2,534 | py | Python | src/haddock/core/cns_paths.py | sverhoeven/haddock3 | d863106f21ebc128f18c6d73a0d15b97824d050c | [
"Apache-2.0"
] | null | null | null | src/haddock/core/cns_paths.py | sverhoeven/haddock3 | d863106f21ebc128f18c6d73a0d15b97824d050c | [
"Apache-2.0"
] | null | null | null | src/haddock/core/cns_paths.py | sverhoeven/haddock3 | d863106f21ebc128f18c6d73a0d15b97824d050c | [
"Apache-2.0"
] | null | null | null | """
Path to CNS-related files.
Most paths are defined by dictionaries that gather several related
paths. Here, instead of defining the dictionaries with static paths, we
have functions that create those dict-containing paths dynamically. The
default values are defined by:
- axis
- tensors
- translation_vectors
- water_box
But you can re-use the functions to create new dictionaries with updated
paths. This is useful for those cases when the `cns/` folder is moved
to a different folder.
"""
from pathlib import Path
from haddock import toppar_path
# exact file names as present in the cns/ scripts folder
PARAMETERS_FILE = "haddock.param"
TOPOLOGY_FILE = "haddock.top"
LINK_FILE = "protein-allhdg5-4-noter.link"
SCATTER_LIB = "scatter.lib"
INITIAL_POSITIONS_DIR = "initial_positions"
# default prepared paths
parameters_file = Path(toppar_path, PARAMETERS_FILE)
topology_file = Path(toppar_path, TOPOLOGY_FILE)
link_file = Path(toppar_path, LINK_FILE)
scatter_lib = Path(toppar_path, SCATTER_LIB)
def get_translation_vectors(path):
"""
Generate paths for translation vectors.
Parameters
----------
path : pathlib.Path
If absolute, paths will be absolute, if relative paths will be
relative. Adds the INITIAL_POSITIONS_DIR path before the file
name.
"""
translation_vectors = {}
for i in range(51):
_s = f'trans_vector_{i}'
_p = Path(path, INITIAL_POSITIONS_DIR, _s)
translation_vectors[_s] = _p
return translation_vectors
def get_tensors(path):
"""Generate paths for tensors."""
tensors = {
"tensor_psf": Path(path, "tensor.psf"),
"tensor_pdb": Path(path, "tensor.pdb"),
"tensor_para_psf": Path(path, "tensor_para.psf"),
"tensor_para_pdb": Path(path, "tensor_para.pdb"),
"tensor_dani_psf": Path(path, "tensor_dani.psf"),
"tensor_dani_pdb": Path(path, "tensor_dani.pdb"),
}
return tensors
def get_axis(path):
"""Generate paths for axis."""
axis = {
"top_axis": Path(path, "top_axis.pro"),
"par_axis": Path(path, "par_axis.pro"),
"top_axis_dani": Path(path, "top_axis_dani.pro"),
}
return axis
def get_water_box(path):
"""Generate paths for water box."""
water_box = {
"boxtyp20": Path(path, "boxtyp20.pdb"),
}
return water_box
axis = get_axis(toppar_path)
tensors = get_tensors(toppar_path)
translation_vectors = get_translation_vectors(toppar_path)
water_box = get_water_box(toppar_path)
| 27.543478 | 72 | 0.696527 |
83d9e4d213f9057ac120341c7210734a02cf3aa5 | 3,185 | py | Python | src/reanalysis_dbns/utils/__init__.py | azedarach/reanalysis-dbns | 160f405762fb33cfde38b1d3d63cc19e0bb3d591 | [
"MIT"
] | null | null | null | src/reanalysis_dbns/utils/__init__.py | azedarach/reanalysis-dbns | 160f405762fb33cfde38b1d3d63cc19e0bb3d591 | [
"MIT"
] | null | null | null | src/reanalysis_dbns/utils/__init__.py | azedarach/reanalysis-dbns | 160f405762fb33cfde38b1d3d63cc19e0bb3d591 | [
"MIT"
] | null | null | null | """
Provides helper routines for reanalysis DBNs study.
"""
# License: MIT
from __future__ import absolute_import
from .computation import (calc_truncated_svd, downsample_data,
meridional_mean,
pattern_correlation, select_lat_band,
select_latlon_box, select_lon_band,
standardized_anomalies, zonal_mean)
from .defaults import (get_coordinate_standard_name,
get_default_coefficient_name,
get_default_indicator_name, get_lat_name,
get_level_name, get_lon_name, get_time_name)
from .eofs import (eofs, reofs)
from .preprocessing import (construct_lagged_data,
get_offset_variable_name,
remove_polynomial_trend,
standardize_time_series)
from .time_helpers import datetime_to_string
from .validation import (check_array_shape, check_base_period,
check_fixed_missing_values,
check_max_memory, check_max_parents,
check_number_of_chains,
check_number_of_initializations,
check_number_of_iterations,
check_tolerance, check_warmup,
detect_frequency, ensure_data_array,
ensure_variables_in_data,
has_fixed_missing_values,
is_daily_data,
is_dask_array, is_data_array, is_dataset,
is_integer, is_monthly_data, is_pandas_dataframe,
is_pandas_object, is_pandas_series, is_scalar,
is_xarray_object, remove_missing_features,
restore_missing_features)
__all__ = [
'calc_truncated_svd',
'check_array_shape',
'check_fixed_missing_values',
'check_base_period',
'check_max_memory',
'check_max_parents',
'check_number_of_chains',
'check_number_of_initializations',
'check_number_of_iterations',
'check_tolerance',
'check_warmup',
'construct_lagged_data',
'datetime_to_string',
'detect_frequency',
'downsample_data',
'ensure_data_array',
'ensure_variables_in_data',
'eofs',
'get_coordinate_standard_name',
'get_default_coefficient_name',
'get_default_indicator_name',
'get_lat_name',
'get_level_name',
'get_lon_name',
'get_offset_variable_name',
'get_time_name',
'get_valid_variables',
'has_fixed_missing_values',
'is_daily_data',
'is_dask_array',
'is_data_array',
'is_dataset',
'is_integer',
'is_monthly_data',
'is_pandas_dataframe',
'is_pandas_object',
'is_pandas_series',
'is_scalar',
'is_xarray_object',
'meridional_mean',
'pattern_correlation',
'remove_missing_features',
'remove_polynomial_trend',
'restore_missing_features',
'reofs',
'select_lat_band',
'select_latlon_box',
'select_lon_band',
'standardized_anomalies',
'standardize_time_series',
'zonal_mean'
]
| 33.526316 | 74 | 0.621978 |
83da20131082094621964e1f90f87f88548deff3 | 121 | py | Python | output/models/ms_data/regex/re_k14_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/ms_data/regex/re_k14_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/ms_data/regex/re_k14_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from output.models.ms_data.regex.re_k14_xsd.re_k14 import (
Regex,
Doc,
)
__all__ = [
"Regex",
"Doc",
]
| 12.1 | 59 | 0.603306 |
83db648d31e6571eb460e05dda3b0b88c276583d | 2,364 | py | Python | generator/src/googleapis/codegen/utilities/json_expander.py | romulobusatto/google-api-php-client-services | 7f3d938a1e4b364afa633b5ba13a0d3c9bc156bf | [
"Apache-2.0"
] | 709 | 2018-09-13T01:13:59.000Z | 2022-03-31T10:28:41.000Z | generator/src/googleapis/codegen/utilities/json_expander.py | romulobusatto/google-api-php-client-services | 7f3d938a1e4b364afa633b5ba13a0d3c9bc156bf | [
"Apache-2.0"
] | 1,351 | 2018-10-12T23:07:12.000Z | 2022-03-05T09:25:29.000Z | generator/src/googleapis/codegen/utilities/json_expander.py | romulobusatto/google-api-php-client-services | 7f3d938a1e4b364afa633b5ba13a0d3c9bc156bf | [
"Apache-2.0"
] | 307 | 2018-09-04T20:15:31.000Z | 2022-03-31T09:42:39.000Z | #!/usr/bin/python2.7
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Support for simple JSON templates.
A JSON template is a dictionary of JSON data in which string values
may be simple templates in string.Template format (i.e.,
$dollarSignEscaping). By default, the template is expanded against
its own data, optionally updated with additional context.
"""
import json
from string import Template
import sys
__author__ = 'smulloni@google.com (Jacob Smullyan)'
def ExpandJsonTemplate(json_data, extra_context=None, use_self=True):
"""Recursively template-expand a json dict against itself or other context.
The context for string expansion is the json dict itself by default, updated
by extra_context, if supplied.
Args:
json_data: (dict) A JSON object where string values may be templates.
extra_context: (dict) Additional context for template expansion.
use_self: (bool) Whether to expand the template against itself, or only use
extra_context.
Returns:
A dict where string template values have been expanded against
the context.
"""
if use_self:
context = dict(json_data)
else:
context = {}
if extra_context:
context.update(extra_context)
return RecursiveExpand(json_data)
if __name__ == '__main__':
if len(sys.argv) > 1:
json_in = open(sys.argv[1])
else:
json_in = sys.stdin
data = json.load(json_in)
expanded = ExpandJsonTemplate(data)
json.dump(expanded, sys.stdout, indent=2)
| 31.52 | 79 | 0.730118 |
83dc4959d0371e253276a653ff679aa4ad785db5 | 10,237 | py | Python | pyeventbus/tests/IO_performance_testing.py | n89nanda/EventBus | d1e35fa1ce9a2cb502404ecc2328c6c59745fce6 | [
"MIT"
] | 24 | 2018-02-02T03:12:05.000Z | 2021-11-11T10:06:22.000Z | pyeventbus/tests/IO_performance_testing.py | n89nanda/EventBus | d1e35fa1ce9a2cb502404ecc2328c6c59745fce6 | [
"MIT"
] | 2 | 2018-08-13T14:08:51.000Z | 2020-02-18T20:11:19.000Z | pyeventbus/tests/IO_performance_testing.py | n89nanda/EventBus | d1e35fa1ce9a2cb502404ecc2328c6c59745fce6 | [
"MIT"
] | 2 | 2020-01-17T12:47:30.000Z | 2020-05-05T14:10:10.000Z | from pyeventbus import *
from timeit import default_timer as timer
import numpy
import sys
from os import getcwd
import json
if __name__ == '__main__':
tester = PerformanceTester()
tester.register(tester)
executer = PerformanceExecuter()
executer.register(executer)
print sys.argv[1:][0]
arg = sys.argv[1:][0]
if arg == 'startIOHeavyTestInMain': tester.startIOHeavyTestInMain()
elif arg == 'startIOHeavyTestInBackground': tester.startIOHeavyTestInBackground()
elif arg == 'startIOHeavyTestInGreenlet': tester.startIOHeavyTestInGreenlet()
elif arg == 'startIOHeavyTestInParallel': tester.startIOHeavyTestInParallel()
elif arg == 'startIOHeavyTestInConcurrent': tester.startIOHeavyTestInConcurrent()
# tester.startIOHeavyTestInMain()
# tester.startIOHeavyTestInBackground()
# tester.startIOHeavyTestInGreenlet()
# tester.startIOHeavyTestInParallel()
# tester.startIOHeavyTestInConcurrent()
| 38.197761 | 111 | 0.62069 |
83dcc185970f786453677691d5a450058ad2e7d1 | 511 | py | Python | book/migrations/0006_alter_book_cover_img.py | KhudadadKhawari/the-library | a6acd2e8ce9ca350339d99775f1e7906d343c7d4 | [
"MIT"
] | null | null | null | book/migrations/0006_alter_book_cover_img.py | KhudadadKhawari/the-library | a6acd2e8ce9ca350339d99775f1e7906d343c7d4 | [
"MIT"
] | null | null | null | book/migrations/0006_alter_book_cover_img.py | KhudadadKhawari/the-library | a6acd2e8ce9ca350339d99775f1e7906d343c7d4 | [
"MIT"
] | null | null | null | # Generated by Django 4.0 on 2021-12-15 09:04
from django.db import migrations, models
import django.utils.timezone
| 24.333333 | 109 | 0.639922 |
83dd12a100f10a5e78beecf49e3037dfe7dab6b8 | 79 | py | Python | hrsxrate.py | fabiovitoriano7/pythoncourse | cceb9b727abd15c4a63f08b5678b224011441997 | [
"MIT"
] | null | null | null | hrsxrate.py | fabiovitoriano7/pythoncourse | cceb9b727abd15c4a63f08b5678b224011441997 | [
"MIT"
] | null | null | null | hrsxrate.py | fabiovitoriano7/pythoncourse | cceb9b727abd15c4a63f08b5678b224011441997 | [
"MIT"
] | null | null | null | hrs = input("Enter Hours:")
rate=2.75
print ("Pay: " + str(float(rate) * hrs))
| 19.75 | 40 | 0.607595 |
83dda5970adb161d516652e3bbdec232d2bc568b | 34,005 | py | Python | main5.py | LinXueyuanStdio/MyTransE | 971901757aba6af22fc2791b5bb32028390b9625 | [
"Apache-2.0"
] | null | null | null | main5.py | LinXueyuanStdio/MyTransE | 971901757aba6af22fc2791b5bb32028390b9625 | [
"Apache-2.0"
] | null | null | null | main5.py | LinXueyuanStdio/MyTransE | 971901757aba6af22fc2791b5bb32028390b9625 | [
"Apache-2.0"
] | 1 | 2020-10-11T02:22:33.000Z | 2020-10-11T02:22:33.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _thread
import sys
import time
from math import exp
from random import random
from typing import List, Tuple, Set
from scipy import spatial
import numpy as np
import torch
from torch import nn
from torch.optim import optimizer
from torch.utils import tensorboard
from torch.utils.data import DataLoader
import torch.nn.functional as F
from dataloader import BidirectionalOneShotIterator
from dataloader import TrainDataset
from dataloader import TestDataset
import tensorflow as tf
import tensorboard as tb
import logging
tf.io.gfile = tb.compat.tensorflow_stub.io.gfile
torch.random.manual_seed(123456)
# region model
# endregion
# region
def get_logger(filename):
"""
Return instance of logger
"""
logger = logging.getLogger('logger')
logger.setLevel(logging.INFO)
logging.basicConfig(format='%(message)s', level=logging.INFO)
handler = logging.FileHandler(filename)
handler.setLevel(logging.INFO)
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logging.getLogger().addHandler(handler)
return logger
logger = get_logger("./train.log")
# endregion
# region
# endregion
# region
def get_hits2(self, Lvec, Rvec, top_k=(1, 10, 50, 100)):
sim = spatial.distance.cdist(Lvec, Rvec, metric='cityblock')
return self.get_hits(Lvec, Rvec, sim, top_k)
def get_hits(self, Lvec, Rvec, sim, top_k=(1, 10, 50, 100)):
# Lvec (m, d), Rvec (m, d)
# LvecRvecdm
# sim=distance(Lvec, Rvec) (m, m)
# sim[i, j] Lvec i Rvec j
top_lr = [0] * len(top_k)
for i in range(Lvec.shape[0]): # KG1
rank = sim[i, :].argsort()
# sim[i, :] Lvec i Rvec
# argsort [6,3,5][0,1,2][3,5,6][1,2,0]
rank_index = np.where(rank == i)[0][0]
# np.where(rank == i) list(rank).index(i) i rank
# i Lvec i Rvec i
for j in range(len(top_k)):
if rank_index < top_k[j]: # index 0 '<'
top_lr[j] += 1
top_rl = [0] * len(top_k)
for i in range(Rvec.shape[0]):
rank = sim[:, i].argsort()
rank_index = np.where(rank == i)[0][0]
for j in range(len(top_k)):
if rank_index < top_k[j]:
top_rl[j] += 1
logger.info('For each left:')
left = []
for i in range(len(top_lr)):
hits = top_k[i]
hits_value = top_lr[i] / len(self.test_seeds) * 100
left.append((hits, hits_value))
logger.info('Hits@%d: %.2f%%' % (hits, hits_value))
logger.info('For each right:')
right = []
for i in range(len(top_rl)):
hits = top_k[i]
hits_value = top_rl[i] / len(self.test_seeds) * 100
right.append((hits, hits_value))
logger.info('Hits@%d: %.2f%%' % (hits, hits_value))
return {
"left": left,
"right": right,
}
# endregion
# region
_MODEL_STATE_DICT = "model_state_dict"
_OPTIMIZER_STATE_DICT = "optimizer_state_dict"
_EPOCH = "epoch"
_STEP = "step"
_BEST_SCORE = "best_score"
_LOSS = "loss"
def load_checkpoint(model: nn.Module, optim: optimizer.Optimizer,
checkpoint_path="./result/fr_en/checkpoint.tar") -> Tuple[int, int, float, float]:
"""Loads training checkpoint.
:param checkpoint_path: path to checkpoint
:param model: model to update state
:param optim: optimizer to update state
:return tuple of starting epoch id, starting step id, best checkpoint score
"""
checkpoint = torch.load(checkpoint_path)
model.load_state_dict(checkpoint[_MODEL_STATE_DICT])
optim.load_state_dict(checkpoint[_OPTIMIZER_STATE_DICT])
start_epoch_id = checkpoint[_EPOCH] + 1
step = checkpoint[_STEP] + 1
best_score = checkpoint[_BEST_SCORE]
loss = checkpoint[_LOSS]
return start_epoch_id, step, best_score, loss
# endregion
# region
# endregion
# train_model_for_fr_en()
# train_model_for_ja_en()
train_model_for_zh_en()
| 37.327113 | 118 | 0.589002 |
83deb844d22e41b2c14e852a19602c5b2980d2b2 | 25,395 | py | Python | cogs/profiles.py | Greenfoot5/BattleBot | f4318124bb85786c3d0ff562132121c382445c36 | [
"MIT"
] | 2 | 2020-01-13T22:58:22.000Z | 2020-02-19T16:47:17.000Z | cogs/profiles.py | Greenfoot5/BattleBot | f4318124bb85786c3d0ff562132121c382445c36 | [
"MIT"
] | 29 | 2020-01-13T23:30:03.000Z | 2020-06-26T18:08:01.000Z | cogs/profiles.py | Greenfoot5/BattleBot | f4318124bb85786c3d0ff562132121c382445c36 | [
"MIT"
] | 2 | 2020-01-15T00:20:10.000Z | 2020-02-18T00:02:55.000Z | import discord
import time
import random
import datetime
import asyncio
import json
import config
from discord.ext import commands
from data.data_handler import data_handler
from itertools import chain
from collections import OrderedDict
# Function to get a user's rank and remaining rp to next rank.
# Takes current rp as parameter
# Function to get profile pages (1 - 3)
# get reaction with number + vice versa
def get_reaction(number, reaction = None):
reactions = {
1: "1\u20e3",
2: "2\u20e3",
3: "3\u20e3",
4: "4\u20e3",
5: "5\u20e3",
6: "6\u20e3",
7: "7\u20e3",
8: "8\u20e3",
9: "9\u20e3",
10: "10\u20e3"
}
if reaction is None:
return reactions.get(number, 0)
else:
return list(reactions.keys())[list(reactions.values()).index(reaction)]
# async handling of user reactions
def setup(bot):
bot.add_cog(Profiles(bot))
| 42.680672 | 233 | 0.549478 |
83df200991f24e112dfb55e0124bf7a8c642cf9c | 7,985 | py | Python | blender/.blender/scripts/uvcalc_follow_active_coords.py | visnz/sketchfab_download | 976f667d5c2c2864b2bad65aceac0dab5ce51b74 | [
"Apache-2.0"
] | 41 | 2021-02-18T05:56:26.000Z | 2021-12-06T07:58:15.000Z | blender/.blender/scripts/uvcalc_follow_active_coords.py | visnz/sketchfab_download | 976f667d5c2c2864b2bad65aceac0dab5ce51b74 | [
"Apache-2.0"
] | 19 | 2021-02-18T05:59:03.000Z | 2022-01-13T01:00:52.000Z | blender/.blender/scripts/uvcalc_follow_active_coords.py | visnz/sketchfab_download | 976f667d5c2c2864b2bad65aceac0dab5ce51b74 | [
"Apache-2.0"
] | 18 | 2021-02-22T13:32:56.000Z | 2022-01-22T12:38:29.000Z | #!BPY
"""
Name: 'Follow Active (quads)'
Blender: 242
Group: 'UVCalculation'
Tooltip: 'Follow from active quads.'
"""
__author__ = "Campbell Barton"
__url__ = ("blender", "blenderartists.org")
__version__ = "1.0 2006/02/07"
__bpydoc__ = """\
This script sets the UV mapping and image of selected faces from adjacent unselected faces.
for full docs see...
http://mediawiki.blender.org/index.php/Scripts/Manual/UV_Calculate/Follow_active_quads
"""
# ***** BEGIN GPL LICENSE BLOCK *****
#
# Script copyright (C) Campbell J Barton
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# ***** END GPL LICENCE BLOCK *****
# --------------------------------------------------------------------------
from Blender import *
import bpy
import BPyMesh
if __name__ == '__main__':
main()
| 31.313725 | 195 | 0.707201 |
83df6ece272b6dd9b07c901d59a3ab3e529c228e | 1,196 | py | Python | bloom/editor/ror_constants.py | thomasrogers03/bloom | 5d49c18a241216aca354aa79971940691e6f33b4 | [
"Apache-2.0"
] | 9 | 2020-11-22T03:04:52.000Z | 2022-01-17T15:36:25.000Z | bloom/editor/ror_constants.py | thomasrogers03/bloom | 5d49c18a241216aca354aa79971940691e6f33b4 | [
"Apache-2.0"
] | null | null | null | bloom/editor/ror_constants.py | thomasrogers03/bloom | 5d49c18a241216aca354aa79971940691e6f33b4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Thomas Rogers
# SPDX-License-Identifier: Apache-2.0
LOWER_LINK_TAG = 6
UPPER_LINK_TAG = 7
UPPER_WATER_TAG = 9
LOWER_WATER_TAG = 10
UPPER_STACK_TAG = 11
LOWER_STACK_TAG = 12
UPPER_GOO_TAG = 13
LOWER_GOO_TAG = 14
LOWER_LINK_TYPES = {LOWER_LINK_TAG, LOWER_WATER_TAG, LOWER_STACK_TAG, LOWER_GOO_TAG}
UPPER_LINK_TYPES = {UPPER_LINK_TAG, UPPER_WATER_TAG, UPPER_STACK_TAG, UPPER_GOO_TAG}
ROR_TYPE_LINK = "Link"
ROR_TYPE_STACK = "Stack"
ROR_TYPE_WATER = "Water"
ROR_TYPE_GOO = "Goo"
UPPER_TAG_MAPPING = {
ROR_TYPE_LINK: UPPER_LINK_TAG,
ROR_TYPE_STACK: UPPER_STACK_TAG,
ROR_TYPE_WATER: UPPER_WATER_TAG,
ROR_TYPE_GOO: UPPER_GOO_TAG,
}
UPPER_TAG_REVERSE_MAPPING = {
UPPER_LINK_TAG: ROR_TYPE_LINK,
UPPER_STACK_TAG: ROR_TYPE_STACK,
UPPER_WATER_TAG: ROR_TYPE_WATER,
UPPER_GOO_TAG: ROR_TYPE_GOO,
}
LOWER_TAG_MAPPING = {
ROR_TYPE_LINK: LOWER_LINK_TAG,
ROR_TYPE_STACK: LOWER_STACK_TAG,
ROR_TYPE_WATER: LOWER_WATER_TAG,
ROR_TYPE_GOO: LOWER_GOO_TAG,
}
ROR_TILE_MAPPING = {
ROR_TYPE_LINK: 504,
ROR_TYPE_STACK: 504,
ROR_TYPE_WATER: 2915,
ROR_TYPE_GOO: 1120,
}
ROR_TYPES_WITH_WATER = {
ROR_TYPE_WATER,
ROR_TYPE_GOO,
}
| 21.357143 | 84 | 0.76505 |
83e3a8eb149951bf1ec4846a449c1ac8b36faf3a | 6,107 | py | Python | tests/validation/tests/v3_api/test_sbx_custom_filter.py | sambabox/rancher | ccb6b40e5c8bb183dbe20f5a099513eb623ed806 | [
"Apache-2.0"
] | null | null | null | tests/validation/tests/v3_api/test_sbx_custom_filter.py | sambabox/rancher | ccb6b40e5c8bb183dbe20f5a099513eb623ed806 | [
"Apache-2.0"
] | null | null | null | tests/validation/tests/v3_api/test_sbx_custom_filter.py | sambabox/rancher | ccb6b40e5c8bb183dbe20f5a099513eb623ed806 | [
"Apache-2.0"
] | null | null | null | from .common import * # NOQA
import requests
AUTH_PROVIDER = os.environ.get('RANCHER_AUTH_PROVIDER', "")
'''
Prerequisite:
Enable SBX without TLS, and using testuser1 as admin user.
Description:
In this test, we are testing the customized user and group search filter
functionalities.
1) For customized user search filter:
The filter looks like:
(&(objectClass=person)(|(sAMAccountName=test*)(sn=test*)(givenName=test*))
[user customized filter])
Here, after we add
userSearchFilter = (memberOf=CN=testgroup5,CN=Users,DC=tad,DC=rancher,DC=io)
we will filter out only testuser40 and testuser41, otherwise, all users start
with search keyword "testuser" will be listed out.
2) For customized group search filter:
The filter looks like:
(&(objectClass=group)(sAMAccountName=test)[group customized filter])
Here, after we add groupSearchFilter = (cn=testgroup2)
we will filter out only testgroup2, otherwise, all groups has search
keyword "testgroup" will be listed out.
'''
# Config Fields
HOSTNAME_OR_IP_ADDRESS = os.environ.get("RANCHER_HOSTNAME_OR_IP_ADDRESS")
PORT = os.environ.get("RANCHER_PORT")
CONNECTION_TIMEOUT = os.environ.get("RANCHER_CONNECTION_TIMEOUT")
SERVICE_ACCOUNT_NAME = os.environ.get("RANCHER_SERVICE_ACCOUNT_NAME")
SERVICE_ACCOUNT_PASSWORD = os.environ.get("RANCHER_SERVICE_ACCOUNT_PASSWORD")
DEFAULT_LOGIN_DOMAIN = os.environ.get("RANCHER_DEFAULT_LOGIN_DOMAIN")
USER_SEARCH_BASE = os.environ.get("RANCHER_USER_SEARCH_BASE")
GROUP_SEARCH_BASE = os.environ.get("RANCHER_GROUP_SEARCH_BASE")
PASSWORD = os.environ.get('RANCHER_USER_PASSWORD', "")
CATTLE_AUTH_URL = \
CATTLE_TEST_URL + \
"/v3-public/"+AUTH_PROVIDER+"Providers/" + \
AUTH_PROVIDER.lower()+"?action=login"
CATTLE_AUTH_PROVIDER_URL = \
CATTLE_TEST_URL + "/v3/"+AUTH_PROVIDER+"Configs/"+AUTH_PROVIDER.lower()
CATTLE_AUTH_PRINCIPAL_URL = CATTLE_TEST_URL + "/v3/principals?action=search"
CATTLE_AUTH_ENABLE_URL = CATTLE_AUTH_PROVIDER_URL + "?action=testAndApply"
CATTLE_AUTH_DISABLE_URL = CATTLE_AUTH_PROVIDER_URL + "?action=disable"
| 37.012121 | 77 | 0.667267 |
83e3b262a987de45abbd2e106414db47c397b8e3 | 7,738 | py | Python | CIM14/ENTSOE/Dynamics/IEC61970/Dynamics/DynamicsMetaBlock.py | MaximeBaudette/PyCIM | d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14 | [
"MIT"
] | 58 | 2015-04-22T10:41:03.000Z | 2022-03-29T16:04:34.000Z | CIM14/ENTSOE/Dynamics/IEC61970/Dynamics/DynamicsMetaBlock.py | MaximeBaudette/PyCIM | d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14 | [
"MIT"
] | 12 | 2015-08-26T03:57:23.000Z | 2020-12-11T20:14:42.000Z | CIM14/ENTSOE/Dynamics/IEC61970/Dynamics/DynamicsMetaBlock.py | MaximeBaudette/PyCIM | d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14 | [
"MIT"
] | 35 | 2015-01-10T12:21:03.000Z | 2020-09-09T08:18:16.000Z | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.ENTSOE.Dynamics.IEC61970.Core.CoreIdentifiedObject import CoreIdentifiedObject
| 33.938596 | 222 | 0.674334 |
83e3deec67e89aa7e42ab0f38a20a3246b563ad9 | 1,551 | py | Python | official/cv/ADNet/export_model.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 77 | 2021-10-15T08:32:37.000Z | 2022-03-30T13:09:11.000Z | official/cv/ADNet/export_model.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 3 | 2021-10-30T14:44:57.000Z | 2022-02-14T06:57:57.000Z | official/cv/ADNet/export_model.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 24 | 2021-10-15T08:32:45.000Z | 2022-03-24T18:45:20.000Z | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import argparse
import numpy as np
from src.options.general import opts
from src.models.ADNet import adnet
from mindspore import Tensor, export, context
parser = argparse.ArgumentParser(
description='ADNet test')
parser.add_argument('--weight_file', default='', type=str, help='The pretrained weight file')
parser.add_argument('--device_target', type=str, default="Ascend", choices=['Ascend', 'GPU', 'CPU'])
parser.add_argument('--target_device', type=int, default=0)
args = parser.parse_args()
context.set_context(device_target=args.device_target, mode=context.PYNATIVE_MODE, device_id=args.target_device)
opts['num_videos'] = 1
net, domain_specific_nets = adnet(opts, trained_file=args.weight_file)
input_ = np.random.uniform(0.0, 1.0, size=[128, 3, 112, 112]).astype(np.float32)
export(net, Tensor(input_), file_name='ADNet', file_format='MINDIR')
print('export finished')
| 43.083333 | 111 | 0.728562 |
83e465c1f4e10369e60b79f24679537b6a23af68 | 189 | py | Python | pyradex/tests/setup_package_data.py | SpacialTree/pyradex | 722f9fdc45ff080cdcb151e37aa7075fab548f68 | [
"BSD-3-Clause"
] | 12 | 2016-01-26T13:39:56.000Z | 2021-09-01T07:38:04.000Z | pyradex/tests/setup_package_data.py | SpacialTree/pyradex | 722f9fdc45ff080cdcb151e37aa7075fab548f68 | [
"BSD-3-Clause"
] | 27 | 2015-05-29T16:01:31.000Z | 2022-01-31T23:41:36.000Z | pyradex/tests/setup_package_data.py | SpacialTree/pyradex | 722f9fdc45ff080cdcb151e37aa7075fab548f68 | [
"BSD-3-Clause"
] | 13 | 2015-01-13T10:40:50.000Z | 2022-01-25T22:24:46.000Z | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
| 27 | 63 | 0.693122 |
83e5340e1845145c339f0d7b935ed161bcb52088 | 566 | py | Python | ipaqe_provision_hosts/backend/loader.py | apophys/idm-prepare-hosts | 8075600cab44a1b0c4dbe6fe14a8235725eb06d1 | [
"MIT"
] | 1 | 2017-04-04T14:35:57.000Z | 2017-04-04T14:35:57.000Z | ipaqe_provision_hosts/backend/loader.py | apophys/idm-prepare-hosts | 8075600cab44a1b0c4dbe6fe14a8235725eb06d1 | [
"MIT"
] | null | null | null | ipaqe_provision_hosts/backend/loader.py | apophys/idm-prepare-hosts | 8075600cab44a1b0c4dbe6fe14a8235725eb06d1 | [
"MIT"
] | null | null | null | # Author: Milan Kubik, 2017
"""Backend entry point manipulation"""
import logging
from pkg_resources import iter_entry_points
RESOURCE_GROUP = "ipaqe_provision_hosts.backends"
log = logging.getLogger(__name__)
def load_backends(exclude=()):
"""Load all registered modules"""
log.debug("Loading entry points from %s.", RESOURCE_GROUP)
entry_points = {
ep.name: ep.load() for ep in iter_entry_points(RESOURCE_GROUP)
if ep.name not in exclude
}
log.debug("Loaded entry points: %s", entry_points.keys())
return entry_points
| 25.727273 | 70 | 0.717314 |
83e5b68657474d465d5e1fcc4797976830c9d62f | 100 | py | Python | cfpland_bot/exceptions/__init__.py | jonatasbaldin/cfpland-telegram-bot | fdd846240705ff6ce7705413336f6d7169a2e7fc | [
"MIT"
] | 3 | 2019-04-23T14:16:11.000Z | 2019-04-24T06:21:10.000Z | cfpland_bot/exceptions/__init__.py | jonatasbaldin/cfpland-telegram-bot | fdd846240705ff6ce7705413336f6d7169a2e7fc | [
"MIT"
] | 2 | 2020-07-17T14:53:16.000Z | 2021-05-09T21:42:43.000Z | cfpland_bot/exceptions/__init__.py | jonatasbaldin/cfpland-telegram-bot | fdd846240705ff6ce7705413336f6d7169a2e7fc | [
"MIT"
] | null | null | null | from .exceptions import ( # noqa: F401
MissingCFPAttributes,
MissingEnvironmentVariable,
)
| 20 | 39 | 0.74 |
83e738fd60db75ae5d34cea420004504804a6032 | 8,309 | py | Python | main_tmp.py | tiffanydho/chip2probe | 2c7e00796e048d39ad4da85b90bf76d021c6be1c | [
"MIT"
] | null | null | null | main_tmp.py | tiffanydho/chip2probe | 2c7e00796e048d39ad4da85b90bf76d021c6be1c | [
"MIT"
] | null | null | null | main_tmp.py | tiffanydho/chip2probe | 2c7e00796e048d39ad4da85b90bf76d021c6be1c | [
"MIT"
] | null | null | null | import urllib.request
import os
import subprocess
import pandas as pd
from tqdm import tqdm
import sys
sys.path.append("probefilter")
sys.path.append("probefilter/libsvm-3.23/python")
from sitesfinder.imads import iMADS
from sitesfinder.imadsmodel import iMADSModel
from sitesfinder.plotcombiner import PlotCombiner
from sitesfinder.pbmescore import PBMEscore
from sitesfinder.sequence import Sequence
from sitesfinder.prediction.basepred import BasePrediction
from cooperative import coopfilter
'''
Summarize
lab-archive -> note the result
information about the data in the plot
'''
chipname = "ets1_GM12878"
chipurls = {
"r1":"https://www.encodeproject.org/files/ENCFF477EHC/@@download/ENCFF477EHC.bam",
"r2":"https://www.encodeproject.org/files/ENCFF371ZBY/@@download/ENCFF371ZBY.bam",
"c1":"https://www.encodeproject.org/files/ENCFF963CVB/@@download/ENCFF963CVB.bam",
"c2":""
}
tagsize = 36
#bedpath = "/data/gordanlab/vincentius/cooperative_probe/hg19_0005_Ets1.bed"
bedpath = "/Users/vincentiusmartin/Research/chip2gcPBM/resources/imads_preds/predictions/hg19_0005_Ets1_filtered.bed"
# Analysis directory
escore_short_path = "/Users/vincentiusmartin/Research/chip2gcPBM/resources/escores/ets1_escores.txt"
escore_map_path = "/Users/vincentiusmartin/Research/chip2gcPBM/resources/escores/index_short_to_long.csv"
# for iMADS, must specify cores and model files
modelcores = ["GGAA", "GGAT"]
modelpaths = ["/Users/vincentiusmartin/Research/chip2gcPBM/resources/imads_preds/models/ets1/ETS1_100nM_Bound_filtered_normalized_transformed_20bp_GGAA_1a2a3mer_format.model",
"/Users/vincentiusmartin/Research/chip2gcPBM/resources/imads_preds/models/ets1/ETS1_100nM_Bound_filtered_normalized_transformed_20bp_GGAT_1a2a3mer_format.model"]
modelwidth = 20 # TODO: confirm if we can get length without manually specifying it
imads_cutoff = 0.2128
model_kmers = [1,2,3]
escore_cutoff = 0.4
# ============================
outdir = "../result/%s" % chipname
# From https://stackoverflow.com/questions/15644964/python-progress-bar-and-downloads
if __name__=="__main__":
if not os.path.exists(outdir):
os.makedirs(outdir)
chipdata_path = "%s/chipseq_data" % (outdir)
if not os.path.exists(chipdata_path):
os.makedirs(chipdata_path)
chipdata = {}
chip_info = "ChIP-seq data for %s:\n" % chipname
# ===== Download ChIP-seq data =====
for key in chipurls:
fname = os.path.basename(chipurls[key])
saveto = os.path.join(chipdata_path, fname)
chipdata[key] = saveto
chip_info += "%s: %s\n" % (key,fname)
print("Downloading %s to %s:" % (key,saveto))
#download_url(chipurls[key], saveto)
with open("%s/chipinfo.txt" % (outdir), 'w') as f:
f.write(chip_info)
macs_result_path = "%s/macs_result" % (outdir)
if not os.path.exists(macs_result_path):
os.makedirs(macs_result_path)
print("Running macs...")
subprocess.call(["./macs2.sh",chipdata["r1"],chipdata["r2"],chipdata["c1"],chipdata["c2"],"%s/%s" % (macs_result_path,chipname), str(tagsize)],shell=False)
print("Finished running macs, results are saved in %s" % macs_result_path)
idr_result_path = "%s/idr_result" % (outdir)
if not os.path.exists(idr_result_path):
os.makedirs(idr_result_path)
print("Running idrs...")
subprocess.call(["./idr.sh","%s/%s" % (macs_result_path,chipname),idr_result_path],shell=False)
analysis_result_path = "%s/analysis_result" % (outdir)
if not os.path.exists(analysis_result_path):
os.makedirs(analysis_result_path)
print("Running analysis...")
pwd = os.path.dirname(os.path.realpath(__file__))
pu1_path = "%s/%s%s" % (macs_result_path,chipname,"_r1_treat_pileup.bdg")
pu2_path = "%s/%s%s" % (macs_result_path,chipname,"_r2_treat_pileup.bdg")
pu_both_path = "%s/%s%s" % (macs_result_path,chipname,"_bothrs_treat_pileup.bdg")
nrwp_preidr_path = "%s/%s%s" % (macs_result_path,chipname,"_bothrs_peaks.narrowPeak")
nrwp_postidr_path = "%s/%s" % (idr_result_path,"idr_001p_wlist.005i")
args_rscript = [pu1_path, pu2_path, pu_both_path, nrwp_preidr_path, nrwp_postidr_path, bedpath, analysis_result_path, chipname]
#print(["R_analysis/main.R",pwd] + args_rscript)
#subprocess.call(["srun","Rscript","R_analysis/main.R",pwd] + args_rscript,shell=False)
subprocess.call(["Rscript","R_analysis/main.R",pwd] + args_rscript,shell=False)
# ============== PLOT AND FILTERING PART ==============
# First, we can just load the models to avoid having to reload this on every iteration
models = [iMADSModel(modelpath, modelcore, modelwidth, model_kmers) for modelpath, modelcore in zip(modelpaths, modelcores)]
imads = iMADS(models, imads_cutoff) # 0.2128 is for the ETS1 cutoff
escore = PBMEscore(escore_short_path, escore_map_path)
sitelist_path = "%s/%s" % (analysis_result_path, "sitefiles_list.txt")
with open(sitelist_path, 'r') as f:
sitelist = [line.strip() for line in f.readlines()]
for sitepath in sitelist:
print(sitepath)
filename = os.path.basename(os.path.splitext(sitepath)[0])
print("Making sites plot for %s" % filename)
seqdf = pd.read_csv(sitepath, sep='\t')
# Make Escore object
es_preds = escore.predict_sequences(seqdf)
eplots = escore.plot(es_preds)
# Make iMADS plot
imads_preds = imads.predict_sequences(seqdf)
imadsplots = imads.plot(imads_preds)
plots = [imadsplots, eplots]
pc = PlotCombiner() # can do this just once but not a big deal
plotpath = "%s/sitesplot_%s.pdf" % (analysis_result_path, filename)
pc.plot_seq_combine(plots, filepath=plotpath)
filtered_sites = {}
print("Site filtering...")
for key in es_preds:
bs = Sequence(es_preds[key],imads_preds[key])
if bs.site_count() == 2:
filtered_sites[key] = bs
#site_list = [{**{"key":site, "sequence":es_preds[site].sequence},**filtered_sites[site].get_sites_dict()} for site in filtered_sites]
#columns = ["key", "site_start_1", "site_start_2", "site_end_1", "site_end_2", "site_pos_1", "site_pos_2", "imads_score_1", "imads_score_2", "sequence"]
#pd.DataFrame(site_list).to_csv("%s/sitelist_%s.pdf" % (analysis_result_path), index=False, columns=columns, float_format='%.4f')
seqdict = {}
funcdict = {}
filtered_probes = []
# TODO: tmr look at 110,271
for key in filtered_sites:
#for key in ["sequence11"]:
# Visualization part
seqdict["%s-wt" % key] = filtered_sites[key].sequence
for idx,mut in enumerate([[0],[1],[0,1]]):
mutseq = filtered_sites[key].abolish_sites(mut,escore)
seqdict["%s-m%d" % (key,idx + 1)] = mutseq.sequence
funcdict["%s-m%d" % (key,idx + 1)] = mutseq.plot_functions
if coopfilter.filter_coopseq(seqdict["%s-wt"%key], seqdict["%s-m1"%key],
seqdict["%s-m2"%key], seqdict["%s-m3"%key],
filtered_sites[key].get_sites_dict(), escore):
filtered_probes.append({"key":key, "wt":seqdict["%s-wt"%key], "m1":seqdict["%s-m1"%key],
"m2":seqdict["%s-m2"%key], "m3":seqdict["%s-m3"%key]})
pp = escore.plot(escore.predict_sequences(seqdict),additional_functions=funcdict)
pc.plot_seq_combine([pp], filepath="%s/plot_mut_%s.pdf" % (analysis_result_path,filename))
# probably should check here if filtered_probes is empty
pd.DataFrame(filtered_probes).to_csv("%s/mutated_probes_%s.tsv" % (analysis_result_path,filename),sep="\t",index=False,columns=["key","wt","m1","m2","m3"])
#print(fname,header)
| 46.161111 | 175 | 0.672764 |
83e959ba13c92777006cc78510ef1401b37ed85b | 633 | py | Python | src/dagos/platform/__init__.py | DAG-OS/dagos | ac663ecf1cb9abe12669136e2b2e22b936ec88b5 | [
"MIT"
] | null | null | null | src/dagos/platform/__init__.py | DAG-OS/dagos | ac663ecf1cb9abe12669136e2b2e22b936ec88b5 | [
"MIT"
] | 8 | 2022-02-20T15:43:03.000Z | 2022-03-27T19:04:16.000Z | src/dagos/platform/__init__.py | DAG-OS/dagos | ac663ecf1cb9abe12669136e2b2e22b936ec88b5 | [
"MIT"
] | null | null | null | import dagos.platform.platform_utils as platform_utils
from .command_runner import CommandRunner
from .command_runner import ContainerCommandRunner
from .command_runner import LocalCommandRunner
from .platform_domain import CommandNotAvailableIssue
from .platform_domain import OperatingSystem
from .platform_domain import PlatformIssue
from .platform_domain import PlatformScope
from .platform_domain import UnsupportedOperatingSystemIssue
from .platform_exceptions import UnsupportedOperatingSystemException
from .platform_exceptions import UnsupportedPlatformException
from .platform_support_checker import PlatformSupportChecker
| 48.692308 | 68 | 0.903633 |
83eb304b78bbd24868418bb775b73ade9aefef43 | 1,593 | py | Python | scripts/find_guids_without_referents.py | DanielSBrown/osf.io | 98dda2ac237377197acacce78274bc0a4ce8f303 | [
"Apache-2.0"
] | 1 | 2015-10-02T18:35:53.000Z | 2015-10-02T18:35:53.000Z | scripts/find_guids_without_referents.py | DanielSBrown/osf.io | 98dda2ac237377197acacce78274bc0a4ce8f303 | [
"Apache-2.0"
] | 13 | 2020-03-24T15:29:41.000Z | 2022-03-11T23:15:28.000Z | scripts/find_guids_without_referents.py | DanielSBrown/osf.io | 98dda2ac237377197acacce78274bc0a4ce8f303 | [
"Apache-2.0"
] | null | null | null | """Finds Guids that do not have referents or that point to referents that no longer exist.
E.g. a node was created and given a guid but an error caused the node to
get deleted, leaving behind a guid that points to nothing.
"""
import sys
from modularodm import Q
from framework.guid.model import Guid
from website.app import init_app
from scripts import utils as scripts_utils
import logging
logger = logging.getLogger(__name__)
def get_targets():
"""Find GUIDs with no referents and GUIDs with referents that no longer exist."""
# Use a loop because querying MODM with Guid.find(Q('referent', 'eq', None))
# only catches the first case.
ret = []
# NodeFiles were once a GuidStored object and are no longer used any more.
# However, they still exist in the production database. We just skip over them
# for now, but they can probably need to be removed in the future.
# There were also 10 osfguidfile objects that lived in a corrupt repo that
# were not migrated to OSF storage, so we skip those as well. /sloria /jmcarp
for each in Guid.find(Q('referent.1', 'nin', ['nodefile', 'osfguidfile'])):
if each.referent is None:
logger.info('GUID {} has no referent.'.format(each._id))
ret.append(each)
return ret
if __name__ == '__main__':
main()
| 36.204545 | 90 | 0.702448 |
83eb4550225e76cac1d76f96f09f214fbc122c76 | 13,836 | py | Python | test/test_app.py | IoT-Partners/Platform | ecb17ca5e3e5cf447ecb48c22bfab36b102f01b0 | [
"MIT"
] | null | null | null | test/test_app.py | IoT-Partners/Platform | ecb17ca5e3e5cf447ecb48c22bfab36b102f01b0 | [
"MIT"
] | null | null | null | test/test_app.py | IoT-Partners/Platform | ecb17ca5e3e5cf447ecb48c22bfab36b102f01b0 | [
"MIT"
] | null | null | null | """
This script is for testing/calling in several different ways
functions from QRColorChecker modules.
@author: Eduard Cespedes Borrs
@mail: eduard@iot-partners.com
"""
import unittest
import hashlib
import dateutil
from chalicelib.server import Server
import sys
import json
from datetime import datetime
sys.path.append('../chalicelib')
| 37.700272 | 128 | 0.594681 |
83ecbdee9bb1d4607592c7d48726a571593fde4f | 3,497 | py | Python | test/test_config.py | beremaran/spdown | 59e5ea6996be51ad015f9da6758e2ce556b9fb94 | [
"MIT"
] | 2 | 2019-08-13T15:13:58.000Z | 2019-10-04T09:09:24.000Z | test/test_config.py | beremaran/spdown | 59e5ea6996be51ad015f9da6758e2ce556b9fb94 | [
"MIT"
] | 4 | 2021-02-08T20:23:42.000Z | 2022-03-11T23:27:07.000Z | test/test_config.py | beremaran/spdown | 59e5ea6996be51ad015f9da6758e2ce556b9fb94 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import json
import unittest
from collections import OrderedDict
from spdown.config import Config
TEST_CONFIG_PATHS = OrderedDict([
('local', 'config.json'),
('home', os.path.join(
os.path.expanduser('~'), '.config',
'spdown', 'config'
))
])
TEST_CONFIG = {
'download_directory': '~/TestMusic'
}
if __name__ == "__main__":
unittest.main()
| 31.223214 | 83 | 0.659994 |
83ed5076917201fcac6f1e8e51002b51c7395c85 | 2,167 | py | Python | external/emulation/tests/test_config.py | ai2cm/fv3net | e62038aee0a97d6207e66baabd8938467838cf51 | [
"MIT"
] | 1 | 2021-12-14T23:43:35.000Z | 2021-12-14T23:43:35.000Z | external/emulation/tests/test_config.py | ai2cm/fv3net | e62038aee0a97d6207e66baabd8938467838cf51 | [
"MIT"
] | 195 | 2021-09-16T05:47:18.000Z | 2022-03-31T22:03:15.000Z | external/emulation/tests/test_config.py | ai2cm/fv3net | e62038aee0a97d6207e66baabd8938467838cf51 | [
"MIT"
] | null | null | null | from emulation._emulate.microphysics import TimeMask
from emulation.config import (
EmulationConfig,
ModelConfig,
StorageConfig,
_load_nml,
_get_timestep,
_get_storage_hook,
get_hooks,
)
import emulation.zhao_carr
import datetime
| 25.494118 | 85 | 0.684818 |
83ed572ee1b1140fe9364cb212822f09bee7de36 | 323 | py | Python | sorting/insertion_sort.py | src24/algos | b1ac1049be6adaafedaa0572f009668e2c8d3809 | [
"MIT"
] | null | null | null | sorting/insertion_sort.py | src24/algos | b1ac1049be6adaafedaa0572f009668e2c8d3809 | [
"MIT"
] | null | null | null | sorting/insertion_sort.py | src24/algos | b1ac1049be6adaafedaa0572f009668e2c8d3809 | [
"MIT"
] | null | null | null | from typing import List
# O(n^2)
| 23.071429 | 63 | 0.4613 |
83ee40ca37d52089325ca67f4f809d3e842c7b0b | 8,939 | py | Python | tests/test_client.py | ocefpaf/pystac-client | ddf0e0566b2b1783a4d32d3d77f9f51b80270df3 | [
"Apache-2.0"
] | null | null | null | tests/test_client.py | ocefpaf/pystac-client | ddf0e0566b2b1783a4d32d3d77f9f51b80270df3 | [
"Apache-2.0"
] | null | null | null | tests/test_client.py | ocefpaf/pystac-client | ddf0e0566b2b1783a4d32d3d77f9f51b80270df3 | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
from urllib.parse import urlsplit, parse_qs
from dateutil.tz import tzutc
import pystac
import pytest
from pystac_client import Client
from pystac_client.conformance import ConformanceClasses
from .helpers import STAC_URLS, TEST_DATA, read_data_file
def test_invalid_url(self):
with pytest.raises(TypeError):
Client.open()
def test_get_collections_with_conformance(self, requests_mock):
"""Checks that the "data" endpoint is used if the API published the collections conformance class."""
pc_root_text = read_data_file("planetary-computer-root.json")
pc_collection_dict = read_data_file("planetary-computer-aster-l1t-collection.json",
parse_json=True)
# Mock the root catalog
requests_mock.get(STAC_URLS["PLANETARY-COMPUTER"], status_code=200, text=pc_root_text)
api = Client.open(STAC_URLS["PLANETARY-COMPUTER"])
assert api._stac_io.conforms_to(ConformanceClasses.COLLECTIONS)
# Get & mock the collections (rel type "data") link
collections_link = api.get_single_link("data")
requests_mock.get(collections_link.href,
status_code=200,
json={
"collections": [pc_collection_dict],
"links": []
})
_ = next(api.get_collections())
history = requests_mock.request_history
assert len(history) == 2
assert history[1].url == collections_link.href
def test_custom_request_parameters(self, requests_mock):
pc_root_text = read_data_file("planetary-computer-root.json")
pc_collection_dict = read_data_file("planetary-computer-collection.json", parse_json=True)
requests_mock.get(STAC_URLS["PLANETARY-COMPUTER"], status_code=200, text=pc_root_text)
init_qp_name = "my-param"
init_qp_value = "some-value"
api = Client.open(STAC_URLS['PLANETARY-COMPUTER'], parameters={init_qp_name: init_qp_value})
# Ensure that the the Client will use the /collections endpoint and not fall back
# to traversing child links.
assert api._stac_io.conforms_to(ConformanceClasses.COLLECTIONS)
# Get the /collections endpoint
collections_link = api.get_single_link("data")
# Mock the request
requests_mock.get(collections_link.href,
status_code=200,
json={
"collections": [pc_collection_dict],
"links": []
})
# Make the collections request
_ = next(api.get_collections())
history = requests_mock.request_history
assert len(history) == 2
actual_qs = urlsplit(history[1].url).query
actual_qp = parse_qs(actual_qs)
# Check that the param from the init method is present
assert init_qp_name in actual_qp
assert len(actual_qp[init_qp_name]) == 1
assert actual_qp[init_qp_name][0] == init_qp_value
def test_get_collections_without_conformance(self, requests_mock):
"""Checks that the "data" endpoint is used if the API published the collections conformance class."""
pc_root_dict = read_data_file("planetary-computer-root.json", parse_json=True)
pc_collection_dict = read_data_file("planetary-computer-aster-l1t-collection.json",
parse_json=True)
# Remove the collections conformance class
pc_root_dict["conformsTo"].remove(
"http://www.opengis.net/spec/ogcapi-features-1/1.0/conf/oas30")
# Remove all child links except for the collection that we are mocking
pc_collection_href = next(link["href"] for link in pc_collection_dict["links"]
if link["rel"] == "self")
pc_root_dict["links"] = [
link for link in pc_root_dict["links"]
if link["rel"] != "child" or link["href"] == pc_collection_href
]
# Mock the root catalog
requests_mock.get(STAC_URLS["PLANETARY-COMPUTER"], status_code=200, json=pc_root_dict)
api = Client.open(STAC_URLS["PLANETARY-COMPUTER"])
assert not api._stac_io.conforms_to(ConformanceClasses.COLLECTIONS)
# Mock the collection
requests_mock.get(pc_collection_href, status_code=200, json=pc_collection_dict)
_ = next(api.get_collections())
history = requests_mock.request_history
assert len(history) == 2
assert history[1].url == pc_collection_href
class TestAPISearch:
def test_search_conformance_error(self, api):
"""Should raise a NotImplementedError if the API doesn't conform to the Item Search spec. Message should
include information about the spec that was not conformed to."""
# Set the conformance to only STAC API - Core
api._stac_io._conformance = [api._stac_io._conformance[0]]
with pytest.raises(NotImplementedError) as excinfo:
api.search(limit=10, max_items=10, collections='mr-peebles')
assert str(ConformanceClasses.ITEM_SEARCH) in str(excinfo.value)
| 41.193548 | 120 | 0.650632 |
83ee7b4543ab79bc0395dcd6db36fd4ba26a265c | 378 | py | Python | setup.py | mattpatey/text2qrcode | f0cbb006241ba20c76b16d67815836fd44890315 | [
"Xnet",
"X11"
] | 1 | 2020-11-13T20:59:08.000Z | 2020-11-13T20:59:08.000Z | setup.py | mattpatey/text2qrcode | f0cbb006241ba20c76b16d67815836fd44890315 | [
"Xnet",
"X11"
] | null | null | null | setup.py | mattpatey/text2qrcode | f0cbb006241ba20c76b16d67815836fd44890315 | [
"Xnet",
"X11"
] | null | null | null | from setuptools import (
find_packages,
setup,
)
setup(
name="text2qrcode",
version="1.0-a1",
description="Render a QR code image from input text",
author="Matt Patey",
packages=find_packages(),
install_requires=["qrcode", "pillow"],
entry_points={
"console_scripts": [
"t2qr=text2qrcode.main:main"
]
}
)
| 19.894737 | 57 | 0.595238 |
83ef28442d472afe61e0a90f60e0718bf2a46056 | 363 | py | Python | test/test_image.py | arkagogoldey/cloud_coverage_image_analysis | dde9954a27f70e77f9760455d12eeb6e458f8dba | [
"MIT"
] | 1 | 2021-10-16T09:26:53.000Z | 2021-10-16T09:26:53.000Z | test/test_image.py | arkagogoldey/cloud_coverage_image_analysis | dde9954a27f70e77f9760455d12eeb6e458f8dba | [
"MIT"
] | null | null | null | test/test_image.py | arkagogoldey/cloud_coverage_image_analysis | dde9954a27f70e77f9760455d12eeb6e458f8dba | [
"MIT"
] | null | null | null | import numpy as np
import random
from proyecto2.image import Image
| 24.2 | 49 | 0.584022 |
83effd89a13b4f1b810c9a266a94d6710b5a3afc | 2,236 | py | Python | test_autolens/unit/pipeline/phase/point_source/test_phase_point_source.py | agarwalutkarsh554/PyAutoLens | 72d2f5c39834446e72879fd119b591e52b36cac4 | [
"MIT"
] | null | null | null | test_autolens/unit/pipeline/phase/point_source/test_phase_point_source.py | agarwalutkarsh554/PyAutoLens | 72d2f5c39834446e72879fd119b591e52b36cac4 | [
"MIT"
] | null | null | null | test_autolens/unit/pipeline/phase/point_source/test_phase_point_source.py | agarwalutkarsh554/PyAutoLens | 72d2f5c39834446e72879fd119b591e52b36cac4 | [
"MIT"
] | null | null | null | from os import path
import numpy as np
import pytest
import autofit as af
import autolens as al
from autolens.mock import mock
pytestmark = pytest.mark.filterwarnings(
"ignore:Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of "
"`arr[seq]`. In the future this will be interpreted as an arrays index, `arr[np.arrays(seq)]`, which will result "
"either in an error or a different result."
)
directory = path.dirname(path.realpath(__file__))
| 33.373134 | 119 | 0.654293 |
83f051af9726ef346dde4699fd1ff70473f62a92 | 1,737 | py | Python | convert.py | lfe999/xenforo-scraper | a06dd9412658941b269889932534d071ad30367e | [
"MIT"
] | 2 | 2021-07-30T03:11:06.000Z | 2022-03-07T15:40:30.000Z | convert.py | lfe999/xenforo-scraper | a06dd9412658941b269889932534d071ad30367e | [
"MIT"
] | null | null | null | convert.py | lfe999/xenforo-scraper | a06dd9412658941b269889932534d071ad30367e | [
"MIT"
] | 1 | 2021-07-07T16:05:07.000Z | 2021-07-07T16:05:07.000Z | formats = {"KiB": 1024, "KB": 1000,
"MiB": 1024**2, "MB": 1000**2,
"GiB": 1024**3, "GB": 1000**3,
"TiB": 1024**4, "TB": 1000**4}
# Converts shorthand into number of bytes, ex. 1KiB = 1024
# Converts the number of bytes into shorthand expression, ex. 2500 = 2.5KB
# Run tests only if file is ran as standalone.
if __name__ == '__main__':
# Tests
import pytest
assert shortToBytes("103kib") == 105472
assert shortToBytes("103GIB") == 110595407872
assert shortToBytes("0.5TB") == 500000000000
assert bytesToShort(105472) == "105.47KB"
assert bytesToShort(110595407872) == "110.6GB"
assert bytesToShort(500000000000) == "500.0GB"
with pytest.raises(Exception):
print(bytesToShort("k2jfzsk2"))
with pytest.raises(Exception):
print(shortToBytes("ad2wd2"))
with pytest.raises(Exception):
print(shortToBytes(25252))
| 35.44898 | 91 | 0.614853 |
83f147a88053ee096c8c450bcf0c3e2aae29aca2 | 12,023 | py | Python | old/pro/src/GUI/lofarBFgui.py | peijin94/LOFAR-Sun-tools | 23ace5a5e8c0bdaa0cbb5ab6e37f6527716d16f3 | [
"MIT"
] | null | null | null | old/pro/src/GUI/lofarBFgui.py | peijin94/LOFAR-Sun-tools | 23ace5a5e8c0bdaa0cbb5ab6e37f6527716d16f3 | [
"MIT"
] | null | null | null | old/pro/src/GUI/lofarBFgui.py | peijin94/LOFAR-Sun-tools | 23ace5a5e8c0bdaa0cbb5ab6e37f6527716d16f3 | [
"MIT"
] | null | null | null |
# The UI interface and analysis of the lofar solar beam from
import sys
# insert at 1, 0 is the script path (or '' in REPL)
sys.path.insert(1, '..')
from PyQt5.QtWidgets import *
from PyQt5.QtGui import QIcon
from PyQt5.uic import loadUi
from PyQt5.QtCore import Qt
import matplotlib
from matplotlib.backends.backend_qt5agg import (NavigationToolbar2QT as NavigationToolbar)
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import griddata
from skimage import measure
import matplotlib.dates as mdates
import resource_rc
from lofarSun.lofarData import LofarDataBF
from pandas.plotting import register_matplotlib_converters
import platform
import matplotlib as mpl
# try to use the precise epoch
mpl.rcParams['date.epoch']='1970-01-01T00:00:00'
try:
mdates.set_epoch('1970-01-01T00:00:00')
except:
pass
register_matplotlib_converters()
if platform.system() != "Darwin":
matplotlib.use('TkAgg')
else:
print("Detected MacOS, using the default matplotlib backend: " +
matplotlib.get_backend())
app = QApplication([])
window = MatplotlibWidget()
window.show()
app.exec_()
| 38.909385 | 115 | 0.592198 |
83f17a06a8bc16cfd0111230bb492518bce41c73 | 2,169 | py | Python | otter/api.py | sean-morris/otter-grader | 72135c78a69836dbbc920e25f737d4382bee0ec1 | [
"BSD-3-Clause"
] | null | null | null | otter/api.py | sean-morris/otter-grader | 72135c78a69836dbbc920e25f737d4382bee0ec1 | [
"BSD-3-Clause"
] | null | null | null | otter/api.py | sean-morris/otter-grader | 72135c78a69836dbbc920e25f737d4382bee0ec1 | [
"BSD-3-Clause"
] | null | null | null | """
"""
__all__ = ["export_notebook", "grade_submission"]
import os
import sys
import shutil
import tempfile
from contextlib import redirect_stdout
try:
from contextlib import nullcontext
except ImportError:
from .utils import nullcontext # nullcontext is new in Python 3.7
from .argparser import get_parser
from .export import export_notebook
from .run import main as run_grader
PARSER = get_parser()
ARGS_STARTER = ["run"]
def grade_submission(ag_path, submission_path, quiet=False, debug=False):
"""
Runs non-containerized grading on a single submission at ``submission_path`` using the autograder
configuration file at ``ag_path``.
Creates a temporary grading directory using the ``tempfile`` library and grades the submission
by replicating the autograder tree structure in that folder and running the autograder there. Does
not run environment setup files (e.g. ``setup.sh``) or install requirements, so any requirements
should be available in the environment being used for grading.
Print statements executed during grading can be suppressed with ``quiet``.
Args:
ag_path (``str``): path to autograder zip file
submission_path (``str``): path to submission file
quiet (``bool``, optional): whether to suppress print statements during grading; default
``False``
debug (``bool``, optional): whether to run the submission in debug mode (without ignoring
errors)
Returns:
``otter.test_files.GradingResults``: the results object produced during the grading of the
submission.
"""
dp = tempfile.mkdtemp()
args_list = ARGS_STARTER.copy()
args_list.extend([
"-a", ag_path,
"-o", dp,
submission_path,
"--no-logo",
])
if debug:
args_list.append("--debug")
args = PARSER.parse_args(args_list)
if quiet:
f = open(os.devnull, "w")
cm = redirect_stdout(f)
else:
cm = nullcontext()
with cm:
results = run_grader(**vars(args))
if quiet:
f.close()
shutil.rmtree(dp)
return results
| 27.1125 | 102 | 0.664361 |
83f188e156ec6c7d9f2733735708e0459183598e | 930 | py | Python | taurex/data/profiles/pressure/arraypressure.py | ucl-exoplanets/TauREx3_public | cf8da465448df44c3c4dcc2cd0002ef34edd3920 | [
"BSD-3-Clause"
] | 10 | 2019-12-18T09:19:16.000Z | 2021-06-21T11:02:06.000Z | taurex/data/profiles/pressure/arraypressure.py | ucl-exoplanets/TauREx3_public | cf8da465448df44c3c4dcc2cd0002ef34edd3920 | [
"BSD-3-Clause"
] | 10 | 2020-03-24T18:02:15.000Z | 2021-08-23T20:32:09.000Z | taurex/data/profiles/pressure/arraypressure.py | ucl-exoplanets/TauREx3_public | cf8da465448df44c3c4dcc2cd0002ef34edd3920 | [
"BSD-3-Clause"
] | 8 | 2020-03-26T14:16:42.000Z | 2021-12-18T22:11:25.000Z | from .pressureprofile import PressureProfile
import numpy as np
| 24.473684 | 66 | 0.62043 |
83f1b322463e935e9c59457e936e5b4e88b767fd | 2,857 | py | Python | bin/check_samplesheet.py | ggabernet/vcreport | fe5d315364c19d7286c5f7419cc5ff4599ed373d | [
"MIT"
] | 1 | 2021-08-23T20:15:15.000Z | 2021-08-23T20:15:15.000Z | bin/check_samplesheet.py | ggabernet/vcreport | fe5d315364c19d7286c5f7419cc5ff4599ed373d | [
"MIT"
] | null | null | null | bin/check_samplesheet.py | ggabernet/vcreport | fe5d315364c19d7286c5f7419cc5ff4599ed373d | [
"MIT"
] | 1 | 2021-09-09T09:40:11.000Z | 2021-09-09T09:40:11.000Z | #!/usr/bin/env python
# This script is based on the example at: https://raw.githubusercontent.com/nf-core/test-datasets/viralrecon/samplesheet/samplesheet_test_illumina_amplicon.csv
import os
import sys
import errno
import argparse
# TODO nf-core: Update the check_samplesheet function
def check_samplesheet(file_in):
"""
This function checks that the samplesheet follows the following structure:
sample,vcf
SAMPLE1,sample1.vcf
SAMPLE2,sample2.vcf
For an example see:
https://raw.githubusercontent.com/nf-core/test-datasets/viralrecon/samplesheet/samplesheet_test_illumina_amplicon.csv
"""
sample_mapping_dict = {}
with open(file_in, "r") as fin:
## Check header
MIN_COLS = 2
# TODO nf-core: Update the column names for the input samplesheet
HEADER = ["sample", "vcf"]
header = [x.strip('"') for x in fin.readline().strip().split(",")]
if header[: len(HEADER)] != HEADER:
print("ERROR: Please check samplesheet header -> {} != {}".format(",".join(header), ",".join(HEADER)))
sys.exit(1)
## Check sample entries
for line in fin:
lspl = [x.strip().strip('"') for x in line.strip().split(",")]
# Check valid number of columns per row
if len(lspl) < len(HEADER):
print_error(
"Invalid number of columns (minimum = {})!".format(len(HEADER)),
"Line",
line,
)
num_cols = len([x for x in lspl if x])
if num_cols < MIN_COLS:
print_error(
"Invalid number of populated columns (minimum = {})!".format(MIN_COLS),
"Line",
line,
)
if __name__ == "__main__":
sys.exit(main())
| 31.395604 | 159 | 0.60273 |
83f1ef1dcba662400bb9b8d83a966ab6acf3c9c8 | 2,093 | py | Python | mltraining.py | krumaska/FTIFTC | aff8a00a7a4c720801de9b2ac20ce69e9e2c561a | [
"MIT"
] | null | null | null | mltraining.py | krumaska/FTIFTC | aff8a00a7a4c720801de9b2ac20ce69e9e2c561a | [
"MIT"
] | null | null | null | mltraining.py | krumaska/FTIFTC | aff8a00a7a4c720801de9b2ac20ce69e9e2c561a | [
"MIT"
] | null | null | null | from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import random
lol = pd.read_csv('./data/sample_SilverKDA.csv')
lol.drop(['Unnamed: 0'],axis=1,inplace=True)
print(lol)
f, ax = plt.subplots(1, 2, figsize=(18, 8))
lol['gameResult'].value_counts().plot.pie(explode= [0, 0.1], autopct='%1.1f%%', ax=ax[0], shadow=True)
ax[0].set_title('Pie plot - Game Result')
ax[0].set_ylabel('')
sns.countplot('gameResult', data=lol, ax=ax[1])
ax[1].set_title('Count plot - Game Result')
pd.crosstab(lol['JUNGLE'], lol['gameResult'], margins=True)
plt.show()
x = range(0,50)
print(x)
randInt = random.randint(0,lol['gameResult'].count()-50)
y0 = lol['gameResult'][randInt:randInt+50]
plt.plot(x, y0, label="gameResult")
y1 = lol['TOP'][randInt:randInt+50]
plt.plot(x, y1, label="TOP")
y2 = lol['JUNGLE'][randInt:randInt+50]
plt.plot(x, y2, label="JUNGLE")
y3 = lol['MIDDLE'][randInt:randInt+50]
plt.plot(x, y3, label="MIDDLE")
y4 = lol['BOTTOM'][randInt:randInt+50]
plt.plot(x, y4, label="BOTTOM")
y5 = lol['SUPPORT'][randInt:randInt+50]
plt.plot(x, y5, label="SUPPORT")
print(randInt)
plt.xlabel('count')
plt.ylabel('data')
plt.legend()
plt.show()
print(lol.head())
print(lol.info())
X = lol[['TOP','JUNGLE','MIDDLE','BOTTOM','SUPPORT']]
y = lol['gameResult']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=13)
lr = LogisticRegression(random_state=13, solver='liblinear')
lr.fit(X_train, y_train)
pred = lr.predict(X_test)
print(accuracy_score(y_test, pred))
import numpy as np
thisPic = np.array([[1.43, 1.84, 1.92, 2.50, 3.92]])
winRate = lr.predict_proba(thisPic)[0,1]
if winRate >= 0.5 and winRate <=0.6:
print(".")
elif winRate <0.5 and winRate >=0.3:
print(" .")
elif winRate <0.3:
print(" .")
else:
print(" .")
print(' : ',lr.predict_proba(thisPic)[0,1]*100,"%")
| 26.833333 | 102 | 0.698041 |
83f254257c334bebe3b34129f3e77014a18affa5 | 1,300 | py | Python | timeboard.py | jtbarker/hiring-engineers | cd00fff1bb2be6374fc462891c3bf629e3c3ccb1 | [
"Apache-2.0"
] | null | null | null | timeboard.py | jtbarker/hiring-engineers | cd00fff1bb2be6374fc462891c3bf629e3c3ccb1 | [
"Apache-2.0"
] | null | null | null | timeboard.py | jtbarker/hiring-engineers | cd00fff1bb2be6374fc462891c3bf629e3c3ccb1 | [
"Apache-2.0"
] | null | null | null | from datadog import initialize, api
options = {
'api_key': '16ff05c7af6ed4652a20f5a8d0c609ce',
'app_key': 'e6a169b9b337355eef90002878fbf9a565e9ee77'
}
initialize(**options)
title = "Mymetric timeboard"
description = "Mymetric Timeboard"
graphs = [
{
"definition": {
"events": [],
"requests": [
{"q": "avg:mymetric{host:ubuntu-xenial}"}
],
"viz": "timeseries"
},
"title": "mymetric in timeseries"
},
{
"definition": {
"events": [],
"requests": [
{"q": "anomalies(avg:postgres.connections.current{host:ubuntu-xenial}, 'basic', 2)"}
],
"viz": "timeseries"
},
"title": "PostgreSQL connections"
},
{
"definition": {
"events": [],
"requests": [
{"q": "avg:mymetric{host:ubuntu-xenial}.rollup(sum, 3600)"}
],
"viz": "timeseries"
},
"title": "Rollup function mymetric"
},
]
template_variables = [{
"name": "ubuntu_xenial",
"prefix": "host",
"default": "host:my-host"
}]
read_only = True
api.Timeboard.create(title=title,description=description,graphs=graphs,template_variables=template_variables) | 26 | 109 | 0.529231 |
83f2cb0d5c42a6b58a56b8a67b072fa321682a58 | 1,104 | py | Python | tests/python/rlview/test-run.py | JonathanLehner/korali | 90f97d8e2fed2311f988f39cfe014f23ba7dd6cf | [
"MIT"
] | 43 | 2018-07-26T07:20:42.000Z | 2022-03-02T10:23:12.000Z | tests/python/rlview/test-run.py | JonathanLehner/korali | 90f97d8e2fed2311f988f39cfe014f23ba7dd6cf | [
"MIT"
] | 212 | 2018-09-21T10:44:07.000Z | 2022-03-22T14:33:05.000Z | tests/python/rlview/test-run.py | JonathanLehner/korali | 90f97d8e2fed2311f988f39cfe014f23ba7dd6cf | [
"MIT"
] | 16 | 2018-07-25T15:00:36.000Z | 2022-03-22T14:19:46.000Z | #! /usr/bin/env python3
from subprocess import call
r = call(["python3", "-m", "korali.rlview", "--help"])
if r!=0:
exit(r)
r = call(["python3", "-m", "korali.rlview", "--dir", "abf2d_vracer1", "--test"])
if r!=0:
exit(r)
r = call(["python3", "-m", "korali.rlview", "--dir", "abf2d_vracer1", "--maxObservations", "10000", "--test"])
if r!=0:
exit(r)
r = call(["python3", "-m", "korali.rlview", "--dir", "abf2d_vracer1", "--maxReward", "20.0", "--test"])
if r!=0:
exit(r)
r = call(["python3", "-m", "korali.rlview", "--dir", "abf2d_vracer1", "--minReward", "-1.0", "--test"])
if r!=0:
exit(r)
r = call(["python3", "-m", "korali.rlview", "--dir", "abf2d_vracer1", "--showCI", "0.2", "--test"])
if r!=0:
exit(r)
r = call(["python3", "-m", "korali.rlview", "--dir", "abf2d_vracer1", "--averageDepth", "30", "--test"])
if r!=0:
exit(r)
r = call(["python3", "-m", "korali.rlview", "--dir", "abf2d_vracer1", "abf2d_vracer2", "--test"])
if r!=0:
exit(r)
r = call(["python3", "-m", "korali.rlview", "--dir", "abf2d_vracer1", "--output", "test.png", "--test"])
if r!=0:
exit(r)
exit(0)
| 26.926829 | 110 | 0.548913 |
83f2f7aa75a2c9e552bb8125bde1278a1b2c932e | 1,643 | bzl | Python | cmake/build_defs.bzl | benjaminp/upb | 901744a97e5170bfdd5b408a26b6603b1fbab9ad | [
"BSD-3-Clause"
] | null | null | null | cmake/build_defs.bzl | benjaminp/upb | 901744a97e5170bfdd5b408a26b6603b1fbab9ad | [
"BSD-3-Clause"
] | null | null | null | cmake/build_defs.bzl | benjaminp/upb | 901744a97e5170bfdd5b408a26b6603b1fbab9ad | [
"BSD-3-Clause"
] | null | null | null |
def generated_file_staleness_test(name, outs, generated_pattern):
"""Tests that checked-in file(s) match the contents of generated file(s).
The resulting test will verify that all output files exist and have the
correct contents. If the test fails, it can be invoked with --fix to
bring the checked-in files up to date.
Args:
name: Name of the rule.
outs: the checked-in files that are copied from generated files.
generated_pattern: the pattern for transforming each "out" file into a
generated file. For example, if generated_pattern="generated/%s" then
a file foo.txt will look for generated file generated/foo.txt.
"""
script_name = name + ".py"
script_src = ":staleness_test.py"
# Filter out non-existing rules so Blaze doesn't error out before we even
# run the test.
existing_outs = native.glob(include = outs)
# The file list contains a few extra bits of information at the end.
# These get unpacked by the Config class in staleness_test_lib.py.
file_list = outs + [generated_pattern, native.package_name() or ".", name]
native.genrule(
name = name + "_makescript",
outs = [script_name],
srcs = [script_src],
testonly = 1,
cmd = "cat $(location " + script_src + ") > $@; " +
"sed -i.bak -e 's|INSERT_FILE_LIST_HERE|" + "\\\n ".join(file_list) + "|' $@",
)
native.py_test(
name = name,
srcs = [script_name],
data = existing_outs + [generated_pattern % file for file in outs],
deps = [
":staleness_test_lib",
],
)
| 36.511111 | 93 | 0.634814 |
83f32ec218d69fd1e8829338d9a53be2a269009b | 4,894 | py | Python | tests/transform_finding_test.py | aws-samples/aws-security-hub-analytic-pipeline | 3e4242c24297725f656ff8a560ff180604443223 | [
"MIT-0"
] | 7 | 2021-06-16T00:55:44.000Z | 2022-02-13T23:00:27.000Z | tests/transform_finding_test.py | QPC-database/aws-security-hub-analytic-pipeline | aff8cd7f5954c285b93fe8f67f8bef2482a1f686 | [
"MIT-0"
] | null | null | null | tests/transform_finding_test.py | QPC-database/aws-security-hub-analytic-pipeline | aff8cd7f5954c285b93fe8f67f8bef2482a1f686 | [
"MIT-0"
] | 2 | 2021-07-11T02:41:38.000Z | 2022-03-29T20:34:23.000Z | from assets.lambdas.transform_findings.index import TransformFindings
import boto3
from moto import mock_s3
| 56.252874 | 238 | 0.647323 |
83f41e9d7d2619c0ed48dbceaafa749c11834cc5 | 1,431 | py | Python | Breeze18/Breeze/migrations/0006_auto_20180110_2205.py | Breeze18/Breeze | 4215776e2f02fab3ce357e67b3b6ca378742049c | [
"Apache-2.0"
] | null | null | null | Breeze18/Breeze/migrations/0006_auto_20180110_2205.py | Breeze18/Breeze | 4215776e2f02fab3ce357e67b3b6ca378742049c | [
"Apache-2.0"
] | 1 | 2017-11-09T13:07:24.000Z | 2018-01-29T04:31:26.000Z | Breeze18/Breeze/migrations/0006_auto_20180110_2205.py | Breeze18/Breeze | 4215776e2f02fab3ce357e67b3b6ca378742049c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-01-10 16:35
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| 30.446809 | 110 | 0.600978 |
83f4f90a2f2418b0454a8f8ffca04dc4c58e2aca | 25,414 | py | Python | plugin.video.rebirth/resources/lib/modules/libtools.py | TheWardoctor/wardoctors-repo | 893f646d9e27251ffc00ca5f918e4eb859a5c8f0 | [
"Apache-2.0"
] | 1 | 2019-03-05T09:38:10.000Z | 2019-03-05T09:38:10.000Z | plugin.video.rebirth/resources/lib/modules/libtools.py | TheWardoctor/wardoctors-repo | 893f646d9e27251ffc00ca5f918e4eb859a5c8f0 | [
"Apache-2.0"
] | null | null | null | plugin.video.rebirth/resources/lib/modules/libtools.py | TheWardoctor/wardoctors-repo | 893f646d9e27251ffc00ca5f918e4eb859a5c8f0 | [
"Apache-2.0"
] | 1 | 2021-11-05T20:48:09.000Z | 2021-11-05T20:48:09.000Z | # -*- coding: utf-8 -*-
################################################################################
# | #
# | ______________________________________________________________ #
# | :~8a.`~888a:::::::::::::::88......88:::::::::::::::;a8~".a88::| #
# | ::::~8a.`~888a::::::::::::88......88::::::::::::;a8~".a888~:::| #
# | :::::::~8a.`~888a:::::::::88......88:::::::::;a8~".a888~::::::| #
# | ::::::::::~8a.`~888a::::::88......88::::::;a8~".a888~:::::::::| #
# | :::::::::::::~8a.`~888a:::88......88:::;a8~".a888~::::::::::::| #
# | :::::::::::: :~8a.`~888a:88 .....88;a8~".a888~:::::::::::::::| #
# | :::::::::::::::::::~8a.`~888......88~".a888~::::::::::::::::::| #
# | 8888888888888888888888888888......8888888888888888888888888888| #
# | ..............................................................| #
# | ..............................................................| #
# | 8888888888888888888888888888......8888888888888888888888888888| #
# | ::::::::::::::::::a888~".a88......888a."~8;:::::::::::::::::::| #
# | :::::::::::::::a888~".a8~:88......88~888a."~8;::::::::::::::::| #
# | ::::::::::::a888~".a8~::::88......88:::~888a."~8;:::::::::::::| #
# | :::::::::a888~".a8~:::::::88......88::::::~888a."~8;::::::::::| #
# | ::::::a888~".a8~::::::::::88......88:::::::::~888a."~8;:::::::| #
# | :::a888~".a8~:::::::::::::88......88::::::::::::~888a."~8;::::| #
# | a888~".a8~::::::::::::::::88......88:::::::::::::::~888a."~8;:| #
# | #
# | Rebirth Addon #
# | Copyright (C) 2017 Cypher #
# | #
# | This program is free software: you can redistribute it and/or modify #
# | it under the terms of the GNU General Public License as published by #
# | the Free Software Foundation, either version 3 of the License, or #
# | (at your option) any later version. #
# | #
# | This program is distributed in the hope that it will be useful, #
# | but WITHOUT ANY WARRANTY; without even the implied warranty of #
# | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# | GNU General Public License for more details. #
# | #
################################################################################
try:
from sqlite3 import dbapi2 as database
except:
from pysqlite2 import dbapi2 as database
import datetime
import json
import os
import re
import sys
import urllib
import urlparse
import xbmc
from resources.lib.modules import control
from resources.lib.modules import cleantitle
| 43.666667 | 388 | 0.517235 |
83f68d37160408c53d642878608195421084467e | 9,670 | py | Python | frappe/website/page_renderers/template_page.py | sersaber/frappe | e0c25d2b2c19fe79f7c7848e7307d90a5f27c68a | [
"MIT"
] | null | null | null | frappe/website/page_renderers/template_page.py | sersaber/frappe | e0c25d2b2c19fe79f7c7848e7307d90a5f27c68a | [
"MIT"
] | null | null | null | frappe/website/page_renderers/template_page.py | sersaber/frappe | e0c25d2b2c19fe79f7c7848e7307d90a5f27c68a | [
"MIT"
] | null | null | null | import io
import os
import click
import frappe
from frappe.website.page_renderers.base_template_page import BaseTemplatePage
from frappe.website.router import get_base_template, get_page_info
from frappe.website.utils import (
cache_html,
extract_comment_tag,
extract_title,
get_frontmatter,
get_next_link,
get_sidebar_items,
get_toc,
is_binary_file,
)
WEBPAGE_PY_MODULE_PROPERTIES = (
"base_template_path",
"template",
"no_cache",
"sitemap",
"condition_field",
)
COMMENT_PROPERTY_KEY_VALUE_MAP = {
"no-breadcrumbs": ("no_breadcrumbs", 1),
"show-sidebar": ("show_sidebar", 1),
"add-breadcrumbs": ("add_breadcrumbs", 1),
"no-header": ("no_header", 1),
"add-next-prev-links": ("add_next_prev_links", 1),
"no-cache": ("no_cache", 1),
"no-sitemap": ("sitemap", 0),
"sitemap": ("sitemap", 1),
}
| 31.193548 | 99 | 0.729369 |
83f948132b34592da621aabbd2b53119e725f6d1 | 17,013 | py | Python | mistral/db/v2/sqlalchemy/models.py | mail2nsrajesh/mistral | b19d87141563e00f18cd74c685392d0b9b70e351 | [
"Apache-2.0"
] | null | null | null | mistral/db/v2/sqlalchemy/models.py | mail2nsrajesh/mistral | b19d87141563e00f18cd74c685392d0b9b70e351 | [
"Apache-2.0"
] | null | null | null | mistral/db/v2/sqlalchemy/models.py | mail2nsrajesh/mistral | b19d87141563e00f18cd74c685392d0b9b70e351 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import json
import sqlalchemy as sa
from sqlalchemy import event
from sqlalchemy.orm import backref
from sqlalchemy.orm import relationship
import sys
from oslo_config import cfg
from oslo_log import log as logging
from mistral.db.sqlalchemy import model_base as mb
from mistral.db.sqlalchemy import types as st
from mistral import exceptions as exc
from mistral.services import security
from mistral import utils
# Definition objects.
LOG = logging.getLogger(__name__)
def validate_long_type_length(cls, field_name, value):
"""Makes sure the value does not exceeds the maximum size."""
if value:
# Get the configured limit.
size_limit_kb = cfg.CONF.engine.execution_field_size_limit_kb
# If the size is unlimited.
if size_limit_kb < 0:
return
size_kb = int(sys.getsizeof(str(value)) / 1024)
if size_kb > size_limit_kb:
LOG.error(
"Size limit %dKB exceed for class [%s], "
"field %s of size %dKB.",
size_limit_kb, str(cls), field_name, size_kb
)
raise exc.SizeLimitExceededException(
field_name,
size_kb,
size_limit_kb
)
def register_length_validator(attr_name):
"""Register an event listener on the attribute.
This event listener will validate the size every
time a 'set' occurs.
"""
for cls in utils.iter_subclasses(Execution):
if hasattr(cls, attr_name):
event.listen(
getattr(cls, attr_name),
'set',
lambda t, v, o, i: validate_long_type_length(cls, attr_name, v)
)
# There's no WorkbookExecution so we safely omit "Definition" in the name.
# Execution objects.
for cls in utils.iter_subclasses(Execution):
event.listen(
# Catch and trim Execution.state_info to always fit allocated size.
# Note that the limit is 65500 which is less than 65535 (2^16 -1).
# The reason is that utils.cut() is not exactly accurate in case if
# the value is not a string, but, for example, a dictionary. If we
# limit it exactly to 65535 then once in a while it may go slightly
# beyond the allowed maximum size. It may depend on the order of
# keys in a string representation and other things that are hidden
# inside utils.cut_dict() method.
cls.state_info,
'set',
lambda t, v, o, i: utils.cut(v, 65500),
retval=True
)
# Many-to-one for 'ActionExecution' and 'TaskExecution'.
ActionExecution.task_execution_id = sa.Column(
sa.String(36),
sa.ForeignKey(TaskExecution.id, ondelete='CASCADE'),
nullable=True
)
TaskExecution.action_executions = relationship(
ActionExecution,
backref=backref('task_execution', remote_side=[TaskExecution.id]),
cascade='all, delete-orphan',
foreign_keys=ActionExecution.task_execution_id,
lazy='select'
)
sa.Index(
'%s_task_execution_id' % ActionExecution.__tablename__,
'task_execution_id'
)
# Many-to-one for 'WorkflowExecution' and 'TaskExecution'.
WorkflowExecution.task_execution_id = sa.Column(
sa.String(36),
sa.ForeignKey(TaskExecution.id, ondelete='CASCADE'),
nullable=True
)
TaskExecution.workflow_executions = relationship(
WorkflowExecution,
backref=backref('task_execution', remote_side=[TaskExecution.id]),
cascade='all, delete-orphan',
foreign_keys=WorkflowExecution.task_execution_id,
lazy='select'
)
sa.Index(
'%s_task_execution_id' % WorkflowExecution.__tablename__,
'task_execution_id'
)
# Many-to-one for 'TaskExecution' and 'WorkflowExecution'.
TaskExecution.workflow_execution_id = sa.Column(
sa.String(36),
sa.ForeignKey(WorkflowExecution.id, ondelete='CASCADE')
)
WorkflowExecution.task_executions = relationship(
TaskExecution,
backref=backref('workflow_execution', remote_side=[WorkflowExecution.id]),
cascade='all, delete-orphan',
foreign_keys=TaskExecution.workflow_execution_id,
lazy='select'
)
sa.Index(
'%s_workflow_execution_id' % TaskExecution.__tablename__,
TaskExecution.workflow_execution_id
)
# Other objects.
sa.Index(
'%s_execution_time' % DelayedCall.__tablename__,
DelayedCall.execution_time
)
# Register all hooks related to secure models.
mb.register_secure_model_hooks()
# TODO(rakhmerov): This is a bad solution. It's hard to find in the code,
# configure flexibly etc. Fix it.
# Register an event listener to verify that the size of all the long columns
# affected by the user do not exceed the limit configuration.
for attr_name in ['input', 'output', 'params', 'published']:
register_length_validator(attr_name)
sa.UniqueConstraint(NamedLock.name)
| 31.622677 | 79 | 0.673779 |
83fa1f4c15e8b0c13b7079f93983ba9e472a57f2 | 3,286 | bzl | Python | haskell/private/packages.bzl | andyscott/rules_haskell | 1c7341f885f62cecad705f6a9e8b610b73f11527 | [
"Apache-2.0"
] | null | null | null | haskell/private/packages.bzl | andyscott/rules_haskell | 1c7341f885f62cecad705f6a9e8b610b73f11527 | [
"Apache-2.0"
] | null | null | null | haskell/private/packages.bzl | andyscott/rules_haskell | 1c7341f885f62cecad705f6a9e8b610b73f11527 | [
"Apache-2.0"
] | null | null | null | """Package list handling"""
load(":private/set.bzl", "set")
def pkg_info_to_ghc_args(pkg_info):
"""
Takes the package info collected by `ghc_info()` and returns the actual
list of command line arguments that should be passed to GHC.
"""
args = [
# In compile.bzl, we pass this just before all -package-id
# arguments. Not doing so leads to bizarre compile-time failures.
# It turns out that equally, not doing so leads to bizarre
# link-time failures. See
# https://github.com/tweag/rules_haskell/issues/395.
"-hide-all-packages",
]
if not pkg_info.has_version:
args.extend([
# Macro version are disabled for all packages by default
# and enabled for package with version
# see https://github.com/tweag/rules_haskell/issues/414
"-fno-version-macros",
])
for package in pkg_info.packages:
args.extend(["-package", package])
for package_id in pkg_info.package_ids:
args.extend(["-package-id", package_id])
for package_db in pkg_info.package_dbs:
args.extend(["-package-db", package_db])
return args
def expose_packages(build_info, lib_info, use_direct, use_my_pkg_id, custom_package_caches, version):
"""
Returns the information that is needed by GHC in order to enable haskell
packages.
build_info: is common to all builds
version: if the rule contains a version, we will export the CPP version macro
All the other arguments are not understood well:
lib_info: only used for repl and linter
use_direct: only used for repl and linter
use_my_pkg_id: only used for one specific task in compile.bzl
custom_package_caches: override the package_caches of build_info, used only by the repl
"""
has_version = version != None and version != ""
# Expose all prebuilt dependencies
#
# We have to remember to specify all (transitive) wired-in
# dependencies or we can't find objects for linking
#
# Set use_direct if build_info does not have a direct_prebuilt_deps field.
packages = []
for prebuilt_dep in set.to_list(build_info.direct_prebuilt_deps if use_direct else build_info.prebuilt_dependencies):
packages.append(prebuilt_dep.package)
# Expose all bazel dependencies
package_ids = []
for package in set.to_list(build_info.package_ids):
# XXX: repl and lint uses this lib_info flags
# It is set to None in all other usage of this function
# TODO: find the meaning of this flag
if lib_info == None or package != lib_info.package_id:
# XXX: use_my_pkg_id is not None only in compile.bzl
if (use_my_pkg_id == None) or package != use_my_pkg_id:
package_ids.append(package)
# Only include package DBs for deps, prebuilt deps should be found
# auto-magically by GHC
package_dbs = []
for cache in set.to_list(build_info.package_caches if not custom_package_caches else custom_package_caches):
package_dbs.append(cache.dirname)
ghc_info = struct(
has_version = has_version,
packages = packages,
package_ids = package_ids,
package_dbs = package_dbs,
)
return ghc_info
| 36.921348 | 121 | 0.680158 |
83fa2b2bb34bdebf6c4cd5300d5d2f1279a8b7ff | 223 | py | Python | get_repo/git.py | florian42/get-repo | 5c1dbf5ecfbdb28f3a628bff57e3a0963ec0fdcc | [
"MIT"
] | null | null | null | get_repo/git.py | florian42/get-repo | 5c1dbf5ecfbdb28f3a628bff57e3a0963ec0fdcc | [
"MIT"
] | null | null | null | get_repo/git.py | florian42/get-repo | 5c1dbf5ecfbdb28f3a628bff57e3a0963ec0fdcc | [
"MIT"
] | null | null | null | import subprocess
| 24.777778 | 68 | 0.659193 |
83fa3e28d2e07cc9a136e3744d2ab599a591cc63 | 2,220 | py | Python | tests/test_models/test_state.py | adrian-blip/AirBnB_clone_v2 | c27a9d923631c78ec437e4608b5c98f3f9fd1cad | [
"MIT"
] | null | null | null | tests/test_models/test_state.py | adrian-blip/AirBnB_clone_v2 | c27a9d923631c78ec437e4608b5c98f3f9fd1cad | [
"MIT"
] | null | null | null | tests/test_models/test_state.py | adrian-blip/AirBnB_clone_v2 | c27a9d923631c78ec437e4608b5c98f3f9fd1cad | [
"MIT"
] | 1 | 2021-07-07T21:37:54.000Z | 2021-07-07T21:37:54.000Z | #!/usr/bin/python3
"""
===============================================================================
===============================================================================
"""
from models.base_model import BaseModel
from models.state import State
import unittest
import json
import pep8
import datetime
| 38.275862 | 80 | 0.440991 |
83fa4a8d78bac76a3513caf7e33e512c2461662e | 39,191 | py | Python | src/generatorse/EESG_1.7.x.py | WISDEM/GeneratorSE | ee85646bb82f9d120a3efd39a5530e674062da08 | [
"Apache-2.0"
] | null | null | null | src/generatorse/EESG_1.7.x.py | WISDEM/GeneratorSE | ee85646bb82f9d120a3efd39a5530e674062da08 | [
"Apache-2.0"
] | 2 | 2017-04-19T20:55:22.000Z | 2019-05-06T10:48:39.000Z | src/generatorse/EESG_1.7.x.py | WISDEM/GeneratorSE | ee85646bb82f9d120a3efd39a5530e674062da08 | [
"Apache-2.0"
] | 3 | 2018-10-10T00:07:35.000Z | 2021-04-19T18:41:05.000Z | """EESG.py
Created by Latha Sethuraman, Katherine Dykes.
Copyright (c) NREL. All rights reserved.
Electromagnetic design based on conventional magnetic circuit laws
Structural design based on McDonald's thesis """
from openmdao.api import Group, Problem, Component,ExecComp,IndepVarComp,ScipyOptimizer,pyOptSparseDriver
from openmdao.drivers.pyoptsparse_driver import pyOptSparseDriver
from openmdao.drivers import *
import numpy as np
from numpy import array,float,min,sign
from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan
import pandas
####################################################Cost Analysis#######################################################################
####################################################OPTIMISATION SET_UP ###############################################################
def EESG_Opt_example():
opt_problem=Problem(root=EESG_Opt())
#Example optimization of an EESG for costs on a 5 MW reference turbine
# add optimizer and set-up problem (using user defined input on objective function)
#
opt_problem.driver=pyOptSparseDriver()
opt_problem.driver.options['optimizer'] = 'CONMIN'
opt_problem.driver.add_objective('Costs') # Define Objective
opt_problem.driver.opt_settings['IPRINT'] = 4
opt_problem.driver.opt_settings['ITRM'] = 3
opt_problem.driver.opt_settings['ITMAX'] = 10
opt_problem.driver.opt_settings['DELFUN'] = 1e-3
opt_problem.driver.opt_settings['DABFUN'] = 1e-3
opt_problem.driver.opt_settings['IFILE'] = 'CONMIN_EESG.out'
opt_problem.root.deriv_options['type']='fd'
# Specificiency target efficiency(%)
Eta_Target = 93.0
# Set bounds for design variables for an EESG designed for a 5MW turbine
opt_problem.driver.add_desvar('r_s',lower=0.5,upper=9.0)
opt_problem.driver.add_desvar('l_s', lower=0.5, upper=2.5)
opt_problem.driver.add_desvar('h_s', lower=0.06, upper=0.15)
opt_problem.driver.add_desvar('tau_p', lower=0.04, upper=0.2)
opt_problem.driver.add_desvar('N_f', lower=10, upper=300)
opt_problem.driver.add_desvar('I_f', lower=1, upper=500)
opt_problem.driver.add_desvar('n_r', lower=5.0, upper=15.0)
opt_problem.driver.add_desvar('h_yr', lower=0.01, upper=0.25)
opt_problem.driver.add_desvar('h_ys', lower=0.01, upper=0.25)
opt_problem.driver.add_desvar('b_r', lower=0.1, upper=1.5)
opt_problem.driver.add_desvar('d_r', lower=0.1, upper=1.5)
opt_problem.driver.add_desvar('t_wr', lower=0.001, upper=0.2)
opt_problem.driver.add_desvar('n_s', lower=5.0, upper=15.0)
opt_problem.driver.add_desvar('b_st', lower=0.1, upper=1.5)
opt_problem.driver.add_desvar('d_s', lower=0.1, upper=1.5)
opt_problem.driver.add_desvar('t_ws', lower=0.001, upper=0.2)
# set up constraints for the PMSG_arms generator
opt_problem.driver.add_constraint('B_symax',upper=2.0-1.0e-6) #1
opt_problem.driver.add_constraint('B_rymax',upper=2.0-1.0e-6) #2
opt_problem.driver.add_constraint('B_tmax',upper=2.0-1.0e-6) #3
opt_problem.driver.add_constraint('B_gfm',lower=0.617031,upper=1.057768) #4
opt_problem.driver.add_constraint('B_g',lower=0.7,upper=1.2) #5
opt_problem.driver.add_constraint('B_pc',upper=2.0) #6
opt_problem.driver.add_constraint('E_s',lower=500.0,upper=5000.0) #7
opt_problem.driver.add_constraint('con_uAs',lower=0.0+1.0e-6) #8
opt_problem.driver.add_constraint('con_zAs',lower=0.0+1.0e-6) #9
opt_problem.driver.add_constraint('con_yAs',lower=0.0+1.0e-6) #10
opt_problem.driver.add_constraint('con_uAr',lower=0.0+1.0e-6) #11
opt_problem.driver.add_constraint('con_zAr',lower=0.0+1.0e-6) #12
opt_problem.driver.add_constraint('con_yAr',lower=0.0+1.0e-6) #13
opt_problem.driver.add_constraint('con_TC2',lower=0.0+1.0e-6) #14
opt_problem.driver.add_constraint('con_TC3',lower=0.0+1e-6) #15
opt_problem.driver.add_constraint('con_br',lower=0.0+1e-6) #16
opt_problem.driver.add_constraint('con_bst',lower=0.0-1e-6) #17
opt_problem.driver.add_constraint('A_1',upper=60000.0-1e-6) #18
opt_problem.driver.add_constraint('J_s',upper=6.0) #19
opt_problem.driver.add_constraint('J_f',upper=6.0) #20
opt_problem.driver.add_constraint('A_Cuscalc',lower=5.0,upper=300) #22
opt_problem.driver.add_constraint('A_Curcalc',lower=10,upper=300) #23
opt_problem.driver.add_constraint('K_rad',lower=0.2+1e-6,upper=0.27) #24
opt_problem.driver.add_constraint('Slot_aspect_ratio',lower=4.0,upper=10.0)#25
opt_problem.driver.add_constraint('gen_eff',lower=Eta_Target) #26
opt_problem.driver.add_constraint('n_brushes',upper=6) #27
opt_problem.driver.add_constraint('Power_ratio',upper=2-1.0e-6) #28
opt_problem.setup()
# Specify Target machine parameters
opt_problem['machine_rating']=5000000.0
opt_problem['Torque']=4.143289e6
opt_problem['n_nom']=12.1
# Initial design variables
opt_problem['r_s']=3.2
opt_problem['l_s']=1.4
opt_problem['h_s']= 0.060
opt_problem['tau_p']= 0.170
opt_problem['I_f']= 69
opt_problem['N_f']= 100
opt_problem['h_ys']= 0.130
opt_problem['h_yr']= 0.120
opt_problem['n_s']= 5
opt_problem['b_st']= 0.470
opt_problem['n_r']=5
opt_problem['b_r']= 0.480
opt_problem['d_r']= 0.510
opt_problem['d_s']= 0.400
opt_problem['t_wr']=0.140
opt_problem['t_ws']=0.070
opt_problem['R_o']=0.43 #10MW: 0.523950817,#5MW: 0.43, #3MW:0.363882632 #1.5MW: 0.2775 0.75MW: 0.17625
# Costs
opt_problem['C_Cu']=4.786
opt_problem['C_Fe']= 0.556
opt_problem['C_Fes']=0.50139
#Material properties
opt_problem['rho_Fe']= 7700 #Magnetic Steel/iron density
opt_problem['rho_Fes']= 7850 #structural Steel density
opt_problem['rho_Copper']=8900 # Kg/m3 copper density
opt_problem['main_shaft_cm']=np.array([0.0, 0.0, 0.0])
opt_problem['main_shaft_length'] =2.0
#Run optimization
opt_problem.run()
"""Uncomment to print solution to screen/an excel file
raw_data = {'Parameters': ['Rating','Stator Arms', 'Stator Axial arm dimension','Stator Circumferential arm dimension',' Stator arm Thickness' ,'Rotor Arms', 'Rotor Axial arm dimension','Rotor Circumferential arm dimension',\
'Rotor Arm thickness', ' Rotor Radial deflection', 'Rotor Axial deflection','Rotor circum deflection', 'Stator Radial deflection',' Stator Axial deflection',' Stator Circumferential deflection','Air gap diameter', 'Stator length',\
'l/D ratio', 'Pole pitch', 'Stator slot height','Stator slot width','Slot aspect ratio','Stator tooth width', 'Stator yoke height', 'Rotor yoke height', 'Rotor pole height', 'Rotor pole width', 'Average no load flux density', \
'Peak air gap flux density','Peak stator yoke flux density','Peak rotor yoke flux density','Stator tooth flux density','Rotor pole core flux density','Pole pairs', 'Generator output frequency', 'Generator output phase voltage(rms value)', \
'Generator Output phase current', 'Stator resistance', 'Synchronous inductance','Stator slots','Stator turns','Stator conductor cross-section','Stator Current density ','Specific current loading','Field turns','Conductor cross-section',\
'Field Current','D.C Field resistance','MMF ratio at rated load(Rotor/Stator)','Excitation Power (% of Rated Power)','Number of brushes/polarity','Field Current density','Generator Efficiency', 'Iron mass', 'Copper mass','Mass of Arms','Total Mass','Total Cost'],\
'Values': [opt_problem['machine_rating']/1e6,opt_problem['n_s'],opt_problem['d_s']*1000,opt_problem['b_st']*1000,opt_problem['t_ws']*1000,opt_problem['n_r'],opt_problem['d_r']*1000,opt_problem['b_r']*1000,opt_problem['t_wr']*1000,opt_problem['u_Ar']*1000,\
opt_problem['y_Ar']*1000,opt_problem['z_A_r']*1000,opt_problem['u_As']*1000,opt_problem['y_As']*1000,opt_problem['z_A_s']*1000,2*opt_problem['r_s'],opt_problem['l_s'],opt_problem['K_rad'],opt_problem['tau_p']*1000,opt_problem['h_s']*1000,opt_problem['b_s']*1000,\
opt_problem['Slot_aspect_ratio'],opt_problem['b_t']*1000,opt_problem['h_ys']*1000,opt_problem['h_yr']*1000,opt_problem['h_p']*1000,opt_problem['b_p']*1000,opt_problem['B_gfm'],opt_problem['B_g'],opt_problem['B_symax'],opt_problem['B_rymax'],opt_problem['B_tmax'],\
opt_problem['B_pc'],opt_problem['p'],opt_problem['f'],opt_problem['E_s'],opt_problem['I_s'],opt_problem['R_s'],opt_problem['L_m'],opt_problem['S'],opt_problem['N_s'],opt_problem['A_Cuscalc'],opt_problem['J_s'],opt_problem['A_1']/1000,opt_problem['N_f'],opt_problem['A_Curcalc'],\
opt_problem['I_f'],opt_problem['R_r'],opt_problem['Load_mmf_ratio'],opt_problem['Power_ratio'],opt_problem['n_brushes'],opt_problem['J_f'],opt_problem['gen_eff'],opt_problem['Iron']/1000,opt_problem['Copper']/1000,opt_problem['Structural_mass']/1000,\
opt_problem['Mass']/1000,opt_problem['Costs']/1000],
'Limit': ['','','',opt_problem['b_all_s']*1000,'','','',opt_problem['b_all_r']*1000,'',opt_problem['u_all_r']*1000,opt_problem['y_all']*1000,opt_problem['z_all_r']*1000,opt_problem['u_all_s']*1000,opt_problem['y_all']*1000,opt_problem['z_all_s']*1000,\
'','','(0.2-0.27)','','','','(4-10)','','','','','','(0.62-1.05)','1.2','2','2','2','2','','(10-60)','','','','','','','','(3-6)','<60','','','','','','<2%','','(3-6)',Eta_Target,'','','','',''],
'Units':['MW','unit','mm','mm','mm','unit','mm','mm','mm','mm','mm','mm','mm','mm','mm','m','m','','','mm','mm','mm','mm','mm','mm','mm','mm','T','T','T','T','T','T','-','Hz','V','A','om/phase',\
'p.u','slots','turns','mm^2','A/mm^2','kA/m','turns','mm^2','A','ohm','%','%','brushes','A/mm^2','turns','%','tons','tons','tons','1000$']}
df=pandas.DataFrame(raw_data, columns=['Parameters','Values','Limit','Units'])
print df
df.to_excel('EESG_'+str(opt_problem['machine_rating']/1e6)+'MW_1.7.x.xlsx')
"""
if __name__=="__main__":
# Run an example optimization of EESG generator on cost
EESG_Opt_example()
| 48.264778 | 282 | 0.660177 |
83fc1d70cbfd496107dfaac0a519dc08e54e550f | 2,879 | py | Python | haiku/_src/integration/numpy_inputs_test.py | timwillhack/dm-haikuBah2 | b76a3db3a39b82c8a1ae5a81a8a0173c23c252e5 | [
"Apache-2.0"
] | 1,647 | 2020-02-21T14:24:31.000Z | 2022-03-31T04:31:34.000Z | haiku/_src/integration/numpy_inputs_test.py | timwillhack/dm-haikuBah2 | b76a3db3a39b82c8a1ae5a81a8a0173c23c252e5 | [
"Apache-2.0"
] | 169 | 2020-02-21T14:07:25.000Z | 2022-03-31T13:08:28.000Z | haiku/_src/integration/numpy_inputs_test.py | timwillhack/dm-haikuBah2 | b76a3db3a39b82c8a1ae5a81a8a0173c23c252e5 | [
"Apache-2.0"
] | 159 | 2020-02-21T19:31:02.000Z | 2022-03-29T12:41:35.000Z | # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests whether modules produce similar output given np.ndarray inputs."""
import functools
from typing import Tuple
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
from haiku._src import test_utils
from haiku._src.integration import descriptors
import jax
import jax.numpy as jnp
import numpy as np
ModuleFn = descriptors.ModuleFn
if __name__ == '__main__':
absltest.main()
| 31.637363 | 95 | 0.685655 |
83fc4f9ad87af1b6d3bc93e82f86f3dfb8315e07 | 519 | py | Python | OldStreamingExperiments/NeighbourReducerCounter.py | AldurD392/SubgraphExplorer | d7c5de234a9ae1a83a017e77074fde5fd1d430b9 | [
"MIT"
] | null | null | null | OldStreamingExperiments/NeighbourReducerCounter.py | AldurD392/SubgraphExplorer | d7c5de234a9ae1a83a017e77074fde5fd1d430b9 | [
"MIT"
] | null | null | null | OldStreamingExperiments/NeighbourReducerCounter.py | AldurD392/SubgraphExplorer | d7c5de234a9ae1a83a017e77074fde5fd1d430b9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""A more advanced Reducer, using Python iterators and generators."""
from itertools import groupby
from operator import itemgetter
import sys
if __name__ == "__main__":
main()
| 24.714286 | 69 | 0.689788 |
83fd35f6f554a1bd8fc3e12924ca6da425b05086 | 486 | py | Python | tests/programs/lists/member_isin.py | astraldawn/pylps | e9964a24bb38657b180d441223b4cdb9e1dadc8a | [
"MIT"
] | 1 | 2018-05-19T18:28:12.000Z | 2018-05-19T18:28:12.000Z | tests/programs/lists/member_isin.py | astraldawn/pylps | e9964a24bb38657b180d441223b4cdb9e1dadc8a | [
"MIT"
] | 12 | 2018-04-26T00:58:11.000Z | 2018-05-13T22:03:39.000Z | tests/programs/lists/member_isin.py | astraldawn/pylps | e9964a24bb38657b180d441223b4cdb9e1dadc8a | [
"MIT"
] | null | null | null | from pylps.core import *
initialise(max_time=5)
create_actions('say(_, _)', 'say_single(_)')
create_events('member(_, _)')
create_facts('inp(_, _)')
create_variables('X', 'Y', 'F', 'Item', 'List', 'Tail')
inp([], [[]])
inp('z', ['a', 'b', 'c', 'd', 'e'])
inp('a', ['b', 'c', 'a'])
inp(['b', 'c'], ['d', ['a', 'c']])
inp(['b', 'c'], ['d', ['a', 'c'], ['b', 'c']])
reactive_rule(inp(Item, List)).then(
Item.is_in(List),
say(Item, List),
)
execute(debug=False)
show_kb_log()
| 20.25 | 55 | 0.522634 |
83ff49e0443c8a936583d9a35d43b023aa52642a | 6,663 | py | Python | fastface/dataset/base.py | mdornseif/fastface | 72772db1fae4af17e829cd5479c4848fe5eb8948 | [
"MIT"
] | 72 | 2021-01-03T05:43:56.000Z | 2021-09-17T06:09:35.000Z | fastface/dataset/base.py | mdornseif/fastface | 72772db1fae4af17e829cd5479c4848fe5eb8948 | [
"MIT"
] | 3 | 2021-09-23T22:26:57.000Z | 2021-10-31T10:11:48.000Z | fastface/dataset/base.py | mdornseif/fastface | 72772db1fae4af17e829cd5479c4848fe5eb8948 | [
"MIT"
] | 6 | 2021-02-15T19:58:57.000Z | 2021-08-19T12:46:41.000Z | import copy
import logging
import os
from typing import Dict, List, Tuple
import checksumdir
import imageio
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
from ..adapter import download_object
logger = logging.getLogger("fastface.dataset")
| 31.880383 | 88 | 0.559508 |
83ff592ddca67877e6b752e54e39cc96af464cdd | 1,565 | py | Python | clevr_video/params.py | jiaqi-xi/slot_attention | 8420414eb261501e5b056e4d409c338d909397ef | [
"Apache-2.0"
] | null | null | null | clevr_video/params.py | jiaqi-xi/slot_attention | 8420414eb261501e5b056e4d409c338d909397ef | [
"Apache-2.0"
] | null | null | null | clevr_video/params.py | jiaqi-xi/slot_attention | 8420414eb261501e5b056e4d409c338d909397ef | [
"Apache-2.0"
] | 1 | 2021-11-11T19:44:14.000Z | 2021-11-11T19:44:14.000Z | from typing import Optional
from typing import Tuple
import attr
| 34.021739 | 77 | 0.686901 |
83fff1491dc4525ae4d3d5754c54e0efcce41659 | 989 | py | Python | sources/car.py | amaurylrd/banlieu_drift | bd9e435bf5ce25e782a59de33472beb932cac9ad | [
"Apache-2.0"
] | 1 | 2021-11-26T16:57:54.000Z | 2021-11-26T16:57:54.000Z | sources/car.py | amaurylrd/banlieu_drift | bd9e435bf5ce25e782a59de33472beb932cac9ad | [
"Apache-2.0"
] | null | null | null | sources/car.py | amaurylrd/banlieu_drift | bd9e435bf5ce25e782a59de33472beb932cac9ad | [
"Apache-2.0"
] | null | null | null | import pygame
import math
coef_turn = 0.3
coef_drift = 0.07 # adhrence au sol
coef_vel = 10 | 29.088235 | 96 | 0.50455 |
8601805a4413deebe6198ae3e881b519806f6bcf | 6,014 | py | Python | test/test_static.py | fjarri/grunnur | 5eea8ec408e431f43a59780cdf8be2f441a9ebb5 | [
"MIT"
] | 1 | 2020-12-04T12:19:18.000Z | 2020-12-04T12:19:18.000Z | test/test_static.py | fjarri/grunnur | 5eea8ec408e431f43a59780cdf8be2f441a9ebb5 | [
"MIT"
] | 11 | 2021-03-11T00:20:23.000Z | 2021-03-11T01:05:54.000Z | test/test_static.py | fjarri/grunnur | 5eea8ec408e431f43a59780cdf8be2f441a9ebb5 | [
"MIT"
] | null | null | null | import pytest
import numpy
from grunnur import (
cuda_api_id, opencl_api_id,
StaticKernel, VirtualSizeError, API, Context, Queue, MultiQueue, Array, MultiArray
)
from grunnur.template import DefTemplate
from .mock_base import MockKernel, MockDefTemplate, MockDefTemplate
from .mock_pycuda import PyCUDADeviceInfo
from .mock_pyopencl import PyOpenCLDeviceInfo
from .test_program import _test_constant_memory
SRC = """
KERNEL void multiply(GLOBAL_MEM int *dest, GLOBAL_MEM int *a, GLOBAL_MEM int *b)
{
${static.begin};
const int i = ${static.global_id}(0);
const int j = ${static.global_id}(1);
const int idx = ${static.global_flat_id}();
dest[idx] = a[i] * b[j];
}
"""
| 34.365714 | 105 | 0.689225 |
86024a0f256f012bd58b4d8e9b5de4b21cc1702d | 1,024 | py | Python | stat_ip_in_hash_woker_table.py | ligang945/pyMisc | 3107c80f7f53ffc797b289ec73d1ef4db80f0b63 | [
"MIT"
] | null | null | null | stat_ip_in_hash_woker_table.py | ligang945/pyMisc | 3107c80f7f53ffc797b289ec73d1ef4db80f0b63 | [
"MIT"
] | null | null | null | stat_ip_in_hash_woker_table.py | ligang945/pyMisc | 3107c80f7f53ffc797b289ec73d1ef4db80f0b63 | [
"MIT"
] | null | null | null |
ipint2str = lambda x: '.'.join([str(x/(256**i)%256) for i in range(3,-1,-1)])
ipstr2int = lambda x:sum([256**j*int(i) for j,i in enumerate(x.split('.')[::-1])])
src_ip = dict()
dst_ip = dict()
i =0
with open('hash_key_value') as f:
for line in f:
i += 1
# if i==8424720:
if i==328:
break
ip = int(line.split(',')[0], 16)
dir = int(line.split(',')[1])
if dir==1:
src_ip.setdefault(ip, dir)
elif dir ==0:
dst_ip.setdefault(ip, dir)
print len(src_ip)
for key in src_ip:
print ipint2str(key)+' ' ,
print '======='
print len(dst_ip)
for key in dst_ip:
print ipint2str(key)+' ' ,
# keys = src_ip.items()
# keys.sort()
# for key in keys:
# print ipint2str(key[0])
# keys = dst_ip.items()
# keys.sort()
# for key in keys:
# print ipint2str(key[0])
| 20.078431 | 82 | 0.512695 |
86028d3af8b32e8fcc5b56f0951579ff48885aaa | 8,212 | py | Python | platform/winrt/detect.py | bdero/godot | e7572c690a3a6792e5aa183e16d902bff77398bc | [
"CC-BY-3.0",
"MIT"
] | 24 | 2016-10-14T16:54:01.000Z | 2022-01-15T06:39:17.000Z | platform/winrt/detect.py | bdero/godot | e7572c690a3a6792e5aa183e16d902bff77398bc | [
"CC-BY-3.0",
"MIT"
] | 2 | 2021-08-17T02:04:05.000Z | 2021-09-18T13:55:13.000Z | platform/winrt/detect.py | bdero/godot | e7572c690a3a6792e5aa183e16d902bff77398bc | [
"CC-BY-3.0",
"MIT"
] | 9 | 2017-08-04T12:00:16.000Z | 2021-12-10T06:48:28.000Z |
import os
import sys
import string
#/c/Program Files (x86)/Windows Phone Kits/8.1/lib/ARM/WindowsPhoneCore.lib
| 52.305732 | 846 | 0.693132 |
8602e07af8df333a6a9bc854df324adb49b003af | 6,934 | py | Python | monasca-log-api-2.9.0/monasca_log_api/tests/test_role_middleware.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | null | null | null | monasca-log-api-2.9.0/monasca_log_api/tests/test_role_middleware.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | monasca-log-api-2.9.0/monasca_log_api/tests/test_role_middleware.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 2 | 2020-03-15T01:24:15.000Z | 2020-07-22T20:34:26.000Z | # Copyright 2015-2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from webob import response
from monasca_log_api.middleware import role_middleware as rm
from monasca_log_api.tests import base
| 30.546256 | 77 | 0.657629 |
86035a85164418f81cd1bcd44a084b4bd7b49d04 | 328 | py | Python | app/LOGS/logger_.py | innovationb1ue/XMU_HealthReport | 6ee0c7830a0e30fc9730401585a303873f382bac | [
"MIT"
] | 2 | 2021-09-03T18:13:46.000Z | 2022-01-13T08:48:36.000Z | app/LOGS/logger_.py | buuuuuuug/XMU_HealthReport | cb545959eceddf676b34237c38b1ba6f797764f5 | [
"MIT"
] | null | null | null | app/LOGS/logger_.py | buuuuuuug/XMU_HealthReport | cb545959eceddf676b34237c38b1ba6f797764f5 | [
"MIT"
] | 1 | 2021-07-14T09:48:19.000Z | 2021-07-14T09:48:19.000Z | import logging
| 18.222222 | 51 | 0.664634 |
860369b74b7a50328a72400a0fc52d3fc97e9d16 | 80 | py | Python | route/link.py | moluwole/Bast_skeleton | 9e58c1c0da3085b377896aab1e3007689c328c1c | [
"MIT"
] | 3 | 2018-08-04T21:11:35.000Z | 2018-08-24T04:47:16.000Z | route/link.py | moluwole/Bast_skeleton | 9e58c1c0da3085b377896aab1e3007689c328c1c | [
"MIT"
] | 1 | 2018-08-24T20:57:36.000Z | 2018-08-24T20:57:36.000Z | route/link.py | moluwole/Bast_skeleton | 9e58c1c0da3085b377896aab1e3007689c328c1c | [
"MIT"
] | 2 | 2018-08-05T19:14:16.000Z | 2018-08-15T08:13:50.000Z | from bast import Route
route = Route()
route.get('/', 'HelloController.index')
| 16 | 39 | 0.7125 |
86040bdb269e301a7c36430ecb672c9bac61af90 | 1,626 | py | Python | paradrop/daemon/paradrop/core/config/wifi.py | VegetableChook/Paradrop | a38e1773877d5b136c3b626edd8c033a12b43e56 | [
"Apache-2.0"
] | 1 | 2018-03-22T13:04:19.000Z | 2018-03-22T13:04:19.000Z | paradrop/daemon/paradrop/core/config/wifi.py | VegetableChook/Paradrop | a38e1773877d5b136c3b626edd8c033a12b43e56 | [
"Apache-2.0"
] | null | null | null | paradrop/daemon/paradrop/core/config/wifi.py | VegetableChook/Paradrop | a38e1773877d5b136c3b626edd8c033a12b43e56 | [
"Apache-2.0"
] | null | null | null | from paradrop.base.output import out
from paradrop.lib.utils import uci
from . import configservice, uciutils
def getOSWirelessConfig(update):
"""
Read settings from networkInterfaces for wireless interfaces.
Store wireless configuration settings in osWirelessConfig.
"""
# old code under lib.internal.chs.chutelxc same function name
interfaces = update.new.getCache('networkInterfaces')
if interfaces is None:
return
wifiIfaces = list()
for iface in interfaces:
# Only look at wifi interfaces.
if iface['netType'] != "wifi":
continue
config = {'type': 'wifi-iface'}
options = {
'device': iface['device'],
'network': iface['externalIntf'],
'mode': iface.get('mode', 'ap')
}
# Required for AP and client mode but not monitor mode.
if 'ssid' in iface:
options['ssid'] = iface['ssid']
# Optional encryption settings
if 'encryption' in iface:
options['encryption'] = iface['encryption']
if 'key' in iface:
options['key'] = iface['key']
# Add extra options.
options.update(iface['options'])
wifiIfaces.append((config, options))
update.new.setCache('osWirelessConfig', wifiIfaces)
def setOSWirelessConfig(update):
"""
Write settings from osWirelessConfig out to UCI files.
"""
changed = uciutils.setConfig(update.new, update.old,
cacheKeys=['osWirelessConfig'],
filepath=uci.getSystemPath("wireless"))
| 29.563636 | 72 | 0.602706 |
86040d6a3dcd14bd0d738d5bbdbdef5ec27bd32e | 44,518 | py | Python | xixi.py | niushuqing123/final-project | 237745dd27a29c9a4b0574003c37fe4c875fde91 | [
"MIT"
] | 9 | 2022-03-10T06:53:38.000Z | 2022-03-19T08:54:51.000Z | xixi.py | niushuqing123/final-project | 237745dd27a29c9a4b0574003c37fe4c875fde91 | [
"MIT"
] | null | null | null | xixi.py | niushuqing123/final-project | 237745dd27a29c9a4b0574003c37fe4c875fde91 | [
"MIT"
] | null | null | null | import taichi as ti
import numpy as np
from functools import reduce
# from sph_base import SPHBase
# ti.init(arch=ti.cpu)
# Use GPU for higher peformance if available
ti.init(arch=ti.gpu, device_memory_GB=4, packed=True)
#
# res = (720,720)
res = (512,512)
dim = 2
assert dim > 1
screen_to_world_ratio = 50
bound = np.array(res) / screen_to_world_ratio
print(bound)
# Material
material_boundary = 0
particle_radius = 0.05 # particle radius
particle_diameter = 2 * particle_radius
support_radius = particle_radius * 4.0 # support radius
m_V = 0.8 * particle_diameter ** dim
particle_max_num = 2 ** 15
particle_max_num_per_cell = 100
particle_max_num_neighbor = 200
particle_num = ti.field(int, shape=())
# gravity = -98.0 #
viscosity = 0.05 #
density_0 = 1000.0 #
mass = m_V * density_0
dt =3e-4
exponent = 7.0
stiffness = 50.0
#
x = ti.Vector.field(dim, dtype=float)
v = ti.Vector.field(dim, dtype=float)
d_velocity = ti.Vector.field(dim, dtype=float)
density = ti.field(dtype=float)
pressure = ti.field(dtype=float)
material = ti.field(dtype=int)
color = ti.field(dtype=int)
particle_neighbors = ti.field(int)
particle_neighbors_num = ti.field(int)
particles_node = ti.root.dense(ti.i, particle_max_num)
particles_node.place(x,v,d_velocity, density, pressure, material, color,particle_neighbors_num)
# Grid related properties
grid_size = support_radius
grid_num = np.ceil(np.array(res) / grid_size).astype(int)
print(grid_num)
grid_particles_num = ti.field(int)
grid_particles = ti.field(int)
padding = grid_size
particle_node = particles_node.dense(ti.j, particle_max_num_neighbor)
particle_node.place(particle_neighbors)
index = ti.ij if dim == 2 else ti.ijk
grid_node = ti.root.dense(index, grid_num)
grid_node.place(grid_particles_num)
cell_index = ti.k if dim == 2 else ti.l
cell_node = grid_node.dense(cell_index, particle_max_num_per_cell)
cell_node.place(grid_particles)
# ========================================
#
# boundary particle
#
circular_max_num=1000
circular_num= ti.field(int, shape=())
circular_node = ti.root.dense(ti.i, circular_max_num)
c_x=ti.Vector.field(dim, dtype=float)
c_v=ti.Vector.field(dim, dtype=float)
c_f=ti.Vector.field(dim, dtype=float)
c_r=ti.field(float)
c_m=ti.field(float)
fixed = ti.field(int)
circular_node.place(c_x,c_v,c_f,c_r,c_m,fixed)
Young_modulus=2000000
#
rest_length = ti.field(dtype=float, shape=(circular_max_num, circular_max_num))
Young_modulus_spring=921000
dashpot_damping=300#
=0.2#
def substep():
grid_particles_num.fill(0)
particle_neighbors.fill(-1)
solve()
=0
=[0,0]
def ():
if [0]==0 and [1]==0 :
for i in range(,particle_num[None]):
if(material[i]==2):
material[i] = 1
else:
for i in range([0],[1]):
material[i] = 1
# @ti.kernel
def add_particle_cube(pos,size,material,color_):
li=(int)(size[0]*10)
lj=(int)(size[1]*10)
for i in range(li):
for j in range(lj):
pass
add_particle(pos[0]+i/18,pos[1]+j/18,0,0,material,color_)
if(==0):_=1
num=circular_num[None]
c_x[num] = ti.Vector([pos_x, pos_y]) # x
c_v[num]=ti.Vector([vx, vy])
fixed[num]=fix
c_r[num]=r1
c_m[num]=r1*r1
circular_num[None] += 1 #
if(spring==1):
for i in range(num): # ,
if(c_x[num]-c_x[i]).norm() < _: # 0.15
rest_length[num, i] = _ #
rest_length[i, num] = _
#,,
#p_bond
#,
= ti.field(int, shape=())
#
buff=[0,0]
def revocation_a_cirulars():
circular_num[None] -= 1
num=circular_num[None]
#
c_x[num] = ti.Vector([0, 0])
c_v[num]=ti.Vector([0, 0])
c_r[num]=0
c_m[num]=0
fixed[num]=0
for i in range(num):
#
rest_length[i, num] = 0
rest_length[num, i] = 0
#
#
if __name__ == "__main__":
main()
| 32.471189 | 207 | 0.536682 |
86048fe3a3ee21e21bf198adc5adb3af9aad7917 | 1,129 | py | Python | pexen/factory/module.py | comps/pexen | 3da6b110cf063646e5d2ae671f4408a14662fcca | [
"MIT"
] | 1 | 2019-09-06T10:41:30.000Z | 2019-09-06T10:41:30.000Z | pexen/factory/module.py | comps/pexen | 3da6b110cf063646e5d2ae671f4408a14662fcca | [
"MIT"
] | null | null | null | pexen/factory/module.py | comps/pexen | 3da6b110cf063646e5d2ae671f4408a14662fcca | [
"MIT"
] | 1 | 2020-06-24T00:31:13.000Z | 2020-06-24T00:31:13.000Z | import inspect
from fnmatch import fnmatchcase
from ..sched import meta
from .base import BaseFactory
| 28.225 | 77 | 0.639504 |
860663bf7fc7f279ff0aaf05a3df989c0b80600b | 2,431 | py | Python | Python/zzz_training_challenge/Python_Challenge/solutions/ch07_recursion_advanced/solutions/ex07_water_jugs.py | Kreijeck/learning | eaffee08e61f2a34e01eb8f9f04519aac633f48c | [
"MIT"
] | null | null | null | Python/zzz_training_challenge/Python_Challenge/solutions/ch07_recursion_advanced/solutions/ex07_water_jugs.py | Kreijeck/learning | eaffee08e61f2a34e01eb8f9f04519aac633f48c | [
"MIT"
] | null | null | null | Python/zzz_training_challenge/Python_Challenge/solutions/ch07_recursion_advanced/solutions/ex07_water_jugs.py | Kreijeck/learning | eaffee08e61f2a34e01eb8f9f04519aac633f48c | [
"MIT"
] | null | null | null | # Beispielprogramm fr das Buch "Python Challenge"
#
# Copyright 2020 by Michael Inden
if __name__ == "__main__":
main()
| 37.4 | 80 | 0.559441 |
8606af2e767f86b97fc991c40020f2652a4de91b | 9,424 | py | Python | cancat/vstruct/defs/elf.py | kimocoder/CanCat | e06f45b22db68b67b6fd93d63d826df9b5d1069c | [
"BSD-2-Clause"
] | 2 | 2020-06-07T04:05:29.000Z | 2022-02-09T00:00:49.000Z | vstruct/defs/elf.py | ConfusedMoonbear/vivisect | 8d6048037f85f745cd11923c6a8d662c150fe330 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | vstruct/defs/elf.py | ConfusedMoonbear/vivisect | 8d6048037f85f745cd11923c6a8d662c150fe330 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import vstruct
from vstruct.primitives import *
EI_NIDENT = 4
EI_PADLEN = 7
| 35.969466 | 54 | 0.612585 |
8606c99e338a87761250aeed31095b32a52bc802 | 10,041 | py | Python | Acquire/Identity/_useraccount.py | openghg/acquire | 8af8701b092f7304c02fea1ee6360e53502dfd64 | [
"Apache-2.0"
] | 1 | 2021-10-18T17:11:47.000Z | 2021-10-18T17:11:47.000Z | Acquire/Identity/_useraccount.py | openghg/acquire | 8af8701b092f7304c02fea1ee6360e53502dfd64 | [
"Apache-2.0"
] | null | null | null | Acquire/Identity/_useraccount.py | openghg/acquire | 8af8701b092f7304c02fea1ee6360e53502dfd64 | [
"Apache-2.0"
] | null | null | null | __all__ = ["UserAccount"]
_user_root = "identity/users"
def _encode_username(username):
"""This function returns an encoded (sanitised) version of
the username. This will ensure that the username
is valid (must be between 3 and 50 characters).
The sanitised username is the encoded version,
meaning that a user can use a unicode (emoji)
username if they so desire
"""
if username is None:
return None
if len(username) < 3 or len(username) > 150:
from Acquire.Identity import UsernameError
raise UsernameError("The username must be between 3 and 150 characters!")
from Acquire.ObjectStore import string_to_encoded as _string_to_encoded
return _string_to_encoded(username)
def name(self):
"""Return the name of this account"""
return self._username
def username(self):
"""Synonym for 'name'"""
return self.name()
def encoded_name(self):
"""Return the encoded (sanitised) username"""
return _encode_username(self._username)
def uid(self):
"""Return the globally unique ID for this account"""
return self._uid
def login_root_url(self):
"""Return the root URL used to log into this account"""
from Acquire.Service import get_this_service as _get_this_service
return _get_this_service().canonical_url()
def is_valid(self):
"""Return whether or not this is a valid account"""
return not (self._status is None)
def is_active(self):
"""Return whether or not this is an active account"""
if self._status is None:
return False
else:
return self._status == "active"
def public_key(self):
"""Return the lines of the public key for this account"""
return self._privkey.public_key()
def private_key(self):
"""Return the lines of the private key for this account"""
return self._privkey
def status(self):
"""Return the status for this account"""
if self._status is None:
return "invalid"
return self._status
def to_data(self, passphrase, mangleFunction=None):
"""Return a data representation of this object (dictionary)"""
if self._username is None:
return None
data = {}
data["username"] = self._username
data["status"] = self._status
data["uid"] = self._uid
data["private_key"] = self._privkey.to_data(passphrase=passphrase, mangleFunction=mangleFunction)
return data
| 37.74812 | 110 | 0.66587 |
86073f9a281f5bb9b144352abdd430f4d907d4bc | 3,337 | py | Python | saharaclient/api/job_binaries.py | openstack/python-saharaclient | 2f01b878a9e07bc712fae9c6c2c5f823bd986dd6 | [
"Apache-2.0"
] | 34 | 2015-01-26T21:39:46.000Z | 2021-01-16T17:30:25.000Z | saharaclient/api/job_binaries.py | openstack/python-saharaclient | 2f01b878a9e07bc712fae9c6c2c5f823bd986dd6 | [
"Apache-2.0"
] | null | null | null | saharaclient/api/job_binaries.py | openstack/python-saharaclient | 2f01b878a9e07bc712fae9c6c2c5f823bd986dd6 | [
"Apache-2.0"
] | 15 | 2015-03-13T23:24:59.000Z | 2017-06-22T12:15:46.000Z | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from saharaclient.api import base
# NOTE(jfreud): keep this around for backwards compatibility
JobBinariesManager = JobBinariesManagerV1
| 32.086538 | 78 | 0.631106 |
860766f5b7e396034ed70275ab698c4665993ebb | 584 | py | Python | tests/test_translator.py | Attsun1031/schematics | 90dee53fd1d5c29f2c947bec6f5ffe5f74305ab1 | [
"BSD-3-Clause"
] | 1,430 | 2015-01-01T19:22:19.000Z | 2022-03-29T11:34:35.000Z | tests/test_translator.py | Attsun1031/schematics | 90dee53fd1d5c29f2c947bec6f5ffe5f74305ab1 | [
"BSD-3-Clause"
] | 360 | 2015-01-02T05:27:34.000Z | 2022-03-18T14:08:27.000Z | tests/test_translator.py | Attsun1031/schematics | 90dee53fd1d5c29f2c947bec6f5ffe5f74305ab1 | [
"BSD-3-Clause"
] | 222 | 2015-01-07T20:07:02.000Z | 2022-03-22T16:12:47.000Z | # -*- coding: utf-8 -*-
import pytest
| 30.736842 | 86 | 0.72089 |
860804e29db65321937c10951cae50769822d370 | 641 | py | Python | 1014 Trie Tree/test.py | SLAPaper/hihoCoder | 3f64d678c5dd46db36345736eb56880fb2d2c5fe | [
"MIT"
] | null | null | null | 1014 Trie Tree/test.py | SLAPaper/hihoCoder | 3f64d678c5dd46db36345736eb56880fb2d2c5fe | [
"MIT"
] | null | null | null | 1014 Trie Tree/test.py | SLAPaper/hihoCoder | 3f64d678c5dd46db36345736eb56880fb2d2c5fe | [
"MIT"
] | null | null | null | # generate 900k word and 900k query to test the runtime
from main import TrieTree
import time
import random
vocal = list(range(26))
trie = TrieTree()
words = [[random.choice(vocal) for _ in range(random.randrange(1, 11))] for _ in range(100000)]
queries = [[random.choice(vocal) for _ in range(random.randrange(1, 11))] for _ in range(100000)]
begin = time.time()
for word in words:
trie.insert(word)
insert_end = time.time()
for query in queries:
trie.query(query)
end = time.time()
print("insert time used:", insert_end - begin, 's')
print("query time used:", end - insert_end, 's')
print("time used:", end - begin, 's')
| 22.103448 | 97 | 0.692668 |
86081d580f0b29a7dc03878e4040b5668fd409d4 | 894 | py | Python | tools/mo/openvino/tools/mo/front/onnx/mean_variance_normalization_ext.py | pazamelin/openvino | b7e8ef910d7ed8e52326d14dc6fd53b71d16ed48 | [
"Apache-2.0"
] | 1 | 2021-10-21T03:04:16.000Z | 2021-10-21T03:04:16.000Z | tools/mo/openvino/tools/mo/front/onnx/mean_variance_normalization_ext.py | pazamelin/openvino | b7e8ef910d7ed8e52326d14dc6fd53b71d16ed48 | [
"Apache-2.0"
] | 58 | 2020-11-06T12:13:45.000Z | 2022-03-28T13:20:11.000Z | tools/mo/openvino/tools/mo/front/onnx/mean_variance_normalization_ext.py | pazamelin/openvino | b7e8ef910d7ed8e52326d14dc6fd53b71d16ed48 | [
"Apache-2.0"
] | 2 | 2019-09-20T01:33:37.000Z | 2019-09-20T08:42:11.000Z | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.tools.mo.ops.mvn import MVNOnnx
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr
| 30.827586 | 74 | 0.647651 |
f7a5670ed8d1bfbab967804b7afd49109f337bfe | 11,661 | py | Python | enemy.py | KasiaWo/Rabbit_Bobble | 89afbaa4f8b46e20ad33e9c410f50c85ddae747b | [
"MIT"
] | null | null | null | enemy.py | KasiaWo/Rabbit_Bobble | 89afbaa4f8b46e20ad33e9c410f50c85ddae747b | [
"MIT"
] | null | null | null | enemy.py | KasiaWo/Rabbit_Bobble | 89afbaa4f8b46e20ad33e9c410f50c85ddae747b | [
"MIT"
] | null | null | null | """
Module for managing enemies.
"""
import random
import constants as const
import pygame
import random
import platforms
from spritesheet_functions import SpriteSheet
| 35.769939 | 116 | 0.54738 |
f7a6e933e409ba532f518e3e1b2e619a58f1715d | 10,715 | py | Python | schicluster/_hicluster_internal.py | zhoujt1994/scHiCluster | 1f7e0cc5a56a357659a6b10b34053e6addbf30a5 | [
"MIT"
] | 27 | 2019-07-10T23:17:33.000Z | 2022-01-14T07:34:42.000Z | schicluster/_hicluster_internal.py | zhoujt1994/scHiCluster | 1f7e0cc5a56a357659a6b10b34053e6addbf30a5 | [
"MIT"
] | 4 | 2019-11-01T01:12:09.000Z | 2022-03-29T11:24:35.000Z | schicluster/_hicluster_internal.py | zhoujt1994/scHiCluster | 1f7e0cc5a56a357659a6b10b34053e6addbf30a5 | [
"MIT"
] | 8 | 2019-12-24T13:54:11.000Z | 2022-01-26T17:21:55.000Z | import argparse
import inspect
import logging
import sys
from .__main__ import setup_logging
log = logging.getLogger()
DESCRIPTION = """
hic-internal is used for automation, not intend to be used by end user.
Use hicluster instead.
"""
EPILOG = ''
| 23.653422 | 105 | 0.583481 |
f7a809bbbe91eb7260be62d0dad3baf769a4cf97 | 5,013 | py | Python | tests/test_host_resolver.py | mssaleh/aioatomapi | 2b9f00fce993153c52595e09ecc80562574af62c | [
"MIT"
] | null | null | null | tests/test_host_resolver.py | mssaleh/aioatomapi | 2b9f00fce993153c52595e09ecc80562574af62c | [
"MIT"
] | 27 | 2021-10-13T17:17:38.000Z | 2022-03-31T17:24:08.000Z | tests/test_host_resolver.py | mssaleh/aioatomapi | 2b9f00fce993153c52595e09ecc80562574af62c | [
"MIT"
] | null | null | null | import asyncio
import socket
import pytest
from mock import AsyncMock, MagicMock, patch
import aioatomapi.host_resolver as hr
from aioatomapi.core import APIConnectionError
| 32.764706 | 85 | 0.710353 |
f7a952870293e9fc90ffc7d9d4a818a5a7c4f56d | 85,241 | py | Python | azure-mgmt-web/azure/mgmt/web/operations/diagnostics_operations.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | azure-mgmt-web/azure/mgmt/web/operations/diagnostics_operations.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | azure-mgmt-web/azure/mgmt/web/operations/diagnostics_operations.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2019-06-17T22:18:23.000Z | 2019-06-17T22:18:23.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from .. import models
| 49.472432 | 239 | 0.66407 |
f7abc4036e6849052f1ad734c829603c8746cd22 | 237 | py | Python | data/ck/check_data.py | jorgimello/meta-learning-fer | 793610ae8471f794a6837930d8bb51866c1f7c02 | [
"MIT"
] | 4 | 2020-10-10T03:33:15.000Z | 2022-01-17T08:00:32.000Z | data/ck/check_data.py | jorgimello/meta-learning-facial-expression-recognition | 793610ae8471f794a6837930d8bb51866c1f7c02 | [
"MIT"
] | null | null | null | data/ck/check_data.py | jorgimello/meta-learning-facial-expression-recognition | 793610ae8471f794a6837930d8bb51866c1f7c02 | [
"MIT"
] | null | null | null | import numpy as np
import os, cv2
imgs = np.load('test_set_ck_extended_no_resize.npy')
lbls = np.load('test_labels_ck_extended_no_resize.npy')
for i in range(imgs.shape[0]):
print (lbls[i])
cv2.imshow('img', imgs[i])
cv2.waitKey(0)
| 21.545455 | 55 | 0.734177 |
f7add4b7f65c543a8a0fd87ede46693f7cb004d9 | 773 | py | Python | app/db/schemas/users.py | ergo-pad/paideia-api | 7ffc78366567c72722d107f06ad37aa7557b05be | [
"MIT"
] | null | null | null | app/db/schemas/users.py | ergo-pad/paideia-api | 7ffc78366567c72722d107f06ad37aa7557b05be | [
"MIT"
] | null | null | null | app/db/schemas/users.py | ergo-pad/paideia-api | 7ffc78366567c72722d107f06ad37aa7557b05be | [
"MIT"
] | null | null | null | from pydantic import BaseModel
import typing as t
### SCHEMAS FOR USERS ###
| 14.865385 | 46 | 0.667529 |
f7af32c0de7c050bf221c8fa53e7b8146120211e | 11,892 | py | Python | custom_components/discord_game/sensor.py | Myztillx/discord_game | d2413a41ca3918bf2836b3b577fccca86b85ff05 | [
"MIT"
] | null | null | null | custom_components/discord_game/sensor.py | Myztillx/discord_game | d2413a41ca3918bf2836b3b577fccca86b85ff05 | [
"MIT"
] | null | null | null | custom_components/discord_game/sensor.py | Myztillx/discord_game | d2413a41ca3918bf2836b3b577fccca86b85ff05 | [
"MIT"
] | null | null | null | import asyncio
import json
import logging
import re
import homeassistant.helpers.config_validation as cv
import voluptuous as vol
from discord import ActivityType, Spotify, Game, Streaming, CustomActivity, Activity, Member, User
from homeassistant.components.notify import PLATFORM_SCHEMA
from homeassistant.const import (EVENT_HOMEASSISTANT_STOP, EVENT_HOMEASSISTANT_START)
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['discord.py==1.5.1']
CONF_TOKEN = 'token'
CONF_MEMBERS = 'members'
CONF_IMAGE_FORMAT = 'image_format'
DOMAIN = 'sensor'
ENTITY_ID_FORMAT = "sensor.discord_{}"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_TOKEN): cv.string,
vol.Required(CONF_MEMBERS, default=[]): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_IMAGE_FORMAT, default='webp'): vol.In(['png', 'webp', 'jpeg', 'jpg']),
})
| 38.862745 | 147 | 0.632862 |
f7af40aed66aeeaae2505edaa30898f512812b45 | 329 | py | Python | Mundo 1/ex_014.py | Shock3/Python_Exercicios | 4420569e881b883728168aabe76b0e9f3a42597f | [
"MIT"
] | null | null | null | Mundo 1/ex_014.py | Shock3/Python_Exercicios | 4420569e881b883728168aabe76b0e9f3a42597f | [
"MIT"
] | null | null | null | Mundo 1/ex_014.py | Shock3/Python_Exercicios | 4420569e881b883728168aabe76b0e9f3a42597f | [
"MIT"
] | null | null | null | """
Escreva um programa que converta uma temperatura,
digitando em graus Celsius e converta para graus Fahrenheit.
"""
celsius = int(input('Digite a temperatura: '))
fahrenheit = (celsius / 5) * 9 + 32
Kelvin = celsius + 273
print(f'A temperatura {celsius}C em Fahrenheit {fahrenheit}F')
print(f'E em Kevin fica {Kelvin} K')
| 32.9 | 66 | 0.723404 |
f7af8bb0d4f3220811a9ca15ffd7c866a271a05f | 24 | py | Python | opensecrets/__init__.py | ndanielsen/py-opensecrets | b362d993fdcff6fc6a0d33ec2db75fb1da418a84 | [
"MIT"
] | 1 | 2018-02-15T03:59:13.000Z | 2018-02-15T03:59:13.000Z | opensecrets/__init__.py | ndanielsen/py-opensecrets | b362d993fdcff6fc6a0d33ec2db75fb1da418a84 | [
"MIT"
] | 11 | 2018-02-14T16:23:17.000Z | 2018-04-05T16:14:49.000Z | opensecrets/__init__.py | ndanielsen/py-opensecrets | b362d993fdcff6fc6a0d33ec2db75fb1da418a84 | [
"MIT"
] | null | null | null | from .crpapi import CRP
| 12 | 23 | 0.791667 |
f7afb1df4dc8682c54d3708fff34533b6c3286db | 2,933 | py | Python | fumblr/services/imgur.py | jonoco/fumblr | cfbbea365299b9edba05c04de77cb003d03b6186 | [
"MIT"
] | 2 | 2017-04-13T02:58:24.000Z | 2021-05-04T00:36:57.000Z | fumblr/services/imgur.py | jonoco/fumblr | cfbbea365299b9edba05c04de77cb003d03b6186 | [
"MIT"
] | 2 | 2020-09-09T22:10:40.000Z | 2021-02-08T20:22:28.000Z | fumblr/services/imgur.py | jonoco/fumblr | cfbbea365299b9edba05c04de77cb003d03b6186 | [
"MIT"
] | 4 | 2019-07-30T12:17:55.000Z | 2020-08-28T14:51:22.000Z | from fumblr.keys import IMGUR_SECRET, IMGUR_ID
from imgurpython import ImgurClient, helpers
import os
import base64
API_URL = 'https://api.imgur.com/3/'
def get_client():
"""
Get an API client for Imgur
Returns:
Imgur client if it is available
"""
try:
return ImgurClient(IMGUR_ID, IMGUR_SECRET)
except helpers.error.ImgurClientError:
print(f'Error: imgur client error - id: {IMGUR_ID} secret: {IMGUR_SECRET}')
def delete_image(deletehash):
"""
Delete image from Imgur with given deletehash
Args:
deletehash: Hash id of image to delete
Returns:
Response from Imgur of image deletion if successful, otherwise False
"""
client = get_client()
if client:
try:
return client.delete_image(deletehash)
except:
return False
def upload_image(path):
"""
Upload image at system path to Imgur
Example of response data from Imgur upload:
{'size': 3527,
'title': None,
'animated': False,
'deletehash': 'YkK79ucEtDDn1b9',
'views': 0,
'width': 187,
'account_url': None,
'in_gallery': False,
'name': '',
'section': None,
'account_id': 0,
'type': 'image/png',
'datetime': 1473926225,
'description': None,
'height': 242,
'bandwidth': 0,
'id': 'AEvnA7h',
'favorite': False,
'nsfw': None,
'link': 'http://i.imgur.com/AEvnA7h.png',
'is_ad': False,
'vote': None}
Args:
path: System path of image
Returns:
Response from Imgur
"""
client = get_client()
if client:
image_path = os.path.abspath(path)
upload = client.upload_from_path(image_path)
return upload
def upload(image):
"""
Upload image to Imgur from file
Args:
image: File object
Returns:
Imgur response object
"""
client = get_client()
if client:
contents = image.read()
b64 = base64.b64encode(contents)
data = {
'image': b64,
'type': 'base64'
}
return client.make_request('POST', 'upload', data, True)
def upload_from_url(url):
"""
Upload image to Imgur from url
Args:
url: URL of image
Returns:
Imgur Response object if successful, otherwise False
"""
client = get_client()
if client:
try:
return client.upload_from_url(url)
except helpers.error.ImgurClientError:
print('Error: imgur client error')
return False
def get_image(id):
"""
Return image data for image with given id
Args:
id: Imgur image id
Returns:
Response from Imgur
"""
client = get_client()
if client:
image_data = client.get_image(id)
return image_data
| 20.51049 | 83 | 0.57177 |
f7b31ccb2c831e63f0930af029ae9e690135237c | 3,584 | py | Python | source/src/molecular-unfolding/lambda/AthenaTableLambda/app.py | awslabs/quantum-ready-solution-for-drug-discovery | a015589995dc17a56bcd0da9332f63d966d08ace | [
"Apache-2.0"
] | 10 | 2022-01-26T01:08:50.000Z | 2022-03-31T03:03:44.000Z | source/src/molecular-unfolding/lambda/AthenaTableLambda/app.py | awslabs/quantum-ready-solution-for-drug-discovery | a015589995dc17a56bcd0da9332f63d966d08ace | [
"Apache-2.0"
] | 47 | 2022-01-26T01:27:35.000Z | 2022-03-29T04:34:51.000Z | source/src/molecular-unfolding/lambda/AthenaTableLambda/app.py | awslabs/quantum-ready-solution-for-drug-discovery | a015589995dc17a56bcd0da9332f63d966d08ace | [
"Apache-2.0"
] | 5 | 2022-02-08T02:30:11.000Z | 2022-03-25T01:59:15.000Z | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import boto3
import botocore
import os
import logging
import time
import json
import datetime
log = logging.getLogger()
log.setLevel('INFO')
bucket = os.environ['BUCKET']
region = os.environ['AWS_REGION']
solution_version = os.environ.get('SOLUTION_VERSION', 'v1.0.0')
solution_id = os.environ.get('SOLUTION_ID')
user_agent_config = {
'user_agent_extra': f'AwsSolution/{solution_id}/{solution_version}',
'region_name': region
}
default_config = botocore.config.Config(**user_agent_config)
athena_client = boto3.client('athena', config=default_config)
| 34.461538 | 265 | 0.677176 |
f7b33150fa99668b4eb5ad17455848d84b07ab75 | 14,664 | py | Python | osf/management/commands/populate_custom_taxonomies.py | gaybro8777/osf.io | 30408511510a40bc393565817b343ef5fd76ab14 | [
"Apache-2.0"
] | 628 | 2015-01-15T04:33:22.000Z | 2022-03-30T06:40:10.000Z | osf/management/commands/populate_custom_taxonomies.py | gaybro8777/osf.io | 30408511510a40bc393565817b343ef5fd76ab14 | [
"Apache-2.0"
] | 4,712 | 2015-01-02T01:41:53.000Z | 2022-03-30T14:18:40.000Z | osf/management/commands/populate_custom_taxonomies.py | Johnetordoff/osf.io | de10bf249c46cede04c78f7e6f7e352c69e6e6b5 | [
"Apache-2.0"
] | 371 | 2015-01-12T16:14:08.000Z | 2022-03-31T18:58:29.000Z | import json
import logging
from django.core.management.base import BaseCommand
from django.db import transaction
from osf.models import AbstractProvider, PreprintProvider, Preprint, Subject
from osf.models.provider import rules_to_subjects
from scripts import utils as script_utils
from osf.models.validators import validate_subject_hierarchy
from website.preprints.tasks import on_preprint_updated
logger = logging.getLogger(__name__)
BEPRESS_PROVIDER = None
| 52.185053 | 228 | 0.694558 |
f7b36ab04da3147e45f62315611a09ce95152628 | 2,999 | py | Python | examples/animated_rsh.py | sophiaas/e3nn | 92351b9225df7aeaf70fdc124c7b0e566d4c0eda | [
"MIT"
] | 1 | 2021-01-11T18:34:39.000Z | 2021-01-11T18:34:39.000Z | examples/animated_rsh.py | sophiaas/e3nn | 92351b9225df7aeaf70fdc124c7b0e566d4c0eda | [
"MIT"
] | null | null | null | examples/animated_rsh.py | sophiaas/e3nn | 92351b9225df7aeaf70fdc124c7b0e566d4c0eda | [
"MIT"
] | null | null | null | # pylint: disable=not-callable, no-member, invalid-name, missing-docstring, line-too-long
import math
import os
import subprocess
import argparse
import shutil
import tqdm
import plotly.graph_objs as go
import torch
from e3nn import o3, rsh
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--lmax", type=int, default=2)
parser.add_argument("--resolution", type=int, default=500)
parser.add_argument("--steps", type=int, default=30)
args = parser.parse_args()
main(args.lmax, args.resolution, args.steps)
| 26.776786 | 118 | 0.518506 |
f7b44e0603289410fe1b212dcf1e2a0ad54c9500 | 62 | py | Python | errores.py | fbzavaleta/DS_Software_Stack | 37cb42c129a6ff4e04704b90cd5b13db3ad9dfcf | [
"MIT"
] | null | null | null | errores.py | fbzavaleta/DS_Software_Stack | 37cb42c129a6ff4e04704b90cd5b13db3ad9dfcf | [
"MIT"
] | null | null | null | errores.py | fbzavaleta/DS_Software_Stack | 37cb42c129a6ff4e04704b90cd5b13db3ad9dfcf | [
"MIT"
] | null | null | null | #
E_LEN = "No es posible operar vectores de diferente mdulo"
| 20.666667 | 59 | 0.758065 |
f7b491ed05c90e96397d418234149764a3bc7143 | 8,449 | py | Python | dataset.py | gzaraunitn/TA3N | d83ae5d9c8f4452ff69dd9002bb4016a695a4be8 | [
"MIT"
] | null | null | null | dataset.py | gzaraunitn/TA3N | d83ae5d9c8f4452ff69dd9002bb4016a695a4be8 | [
"MIT"
] | null | null | null | dataset.py | gzaraunitn/TA3N | d83ae5d9c8f4452ff69dd9002bb4016a695a4be8 | [
"MIT"
] | null | null | null | import torch.utils.data as data
import os
import os.path
import numpy as np
from numpy.random import randint
import torch
from colorama import init
from colorama import Fore, Back, Style
import random
from os import listdir
from os.path import join, splitext
import numpy as np
import torch
import torch.nn.functional as F
import torchvision.transforms.functional as TF
from PIL import Image, ImageFilter, ImageFile
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
init(autoreset=True)
| 34.345528 | 142 | 0.610486 |
f7b50b715b179630c9fcdafb1ce4cd54b3be0ee5 | 423 | py | Python | edm_web1/middleware/errors.py | zhouli121018/nodejsgm | 0ccbc8acf61badc812f684dd39253d55c99f08eb | [
"MIT"
] | null | null | null | edm_web1/middleware/errors.py | zhouli121018/nodejsgm | 0ccbc8acf61badc812f684dd39253d55c99f08eb | [
"MIT"
] | 18 | 2020-06-05T18:17:40.000Z | 2022-03-11T23:25:21.000Z | edm_web1/middleware/errors.py | zhouli121018/nodejsgm | 0ccbc8acf61badc812f684dd39253d55c99f08eb | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django.http import HttpResponseForbidden
from django.template import loader
from django.utils.translation import ugettext_lazy as _
#
_msg = _(u'30s(Request too often)')
limitip_requred_forbid = _requred_forbid(_msg)
| 26.4375 | 55 | 0.754137 |
f7b55905ea97e096b70cfda1b4ce991e067b06eb | 151 | py | Python | data/windows/dr16/mask.py | dnidever/apogee | 83ad7496a0b4193df9e2c01b06dc36cb879ea6c1 | [
"BSD-3-Clause"
] | 5 | 2019-04-11T13:35:24.000Z | 2019-11-14T06:12:51.000Z | data/windows/dr16/mask.py | dnidever/apogee | 83ad7496a0b4193df9e2c01b06dc36cb879ea6c1 | [
"BSD-3-Clause"
] | null | null | null | data/windows/dr16/mask.py | dnidever/apogee | 83ad7496a0b4193df9e2c01b06dc36cb879ea6c1 | [
"BSD-3-Clause"
] | 5 | 2018-09-20T22:07:43.000Z | 2021-01-15T07:13:38.000Z | from apogee.aspcap import aspcap
from apogee.aspcap import mask
els=aspcap.elems()
for el in els[0]: mask.mkmask(el,globalmask='mask_v02_aspcap.txt')
| 25.166667 | 66 | 0.788079 |
f7b6cec7ff18c898066933b6660bdaa93907b21d | 7,368 | py | Python | dragonfly/opt/unittest_cp_random_multiobjective_optimiser.py | anonymous-submission000/mobo | 090f774d742c7155c5e5ba01c10e7db7b93b6a0a | [
"MIT"
] | 1 | 2022-02-17T08:50:47.000Z | 2022-02-17T08:50:47.000Z | dragonfly/opt/unittest_cp_random_multiobjective_optimiser.py | anonymous-submission000/mobo | 090f774d742c7155c5e5ba01c10e7db7b93b6a0a | [
"MIT"
] | null | null | null | dragonfly/opt/unittest_cp_random_multiobjective_optimiser.py | anonymous-submission000/mobo | 090f774d742c7155c5e5ba01c10e7db7b93b6a0a | [
"MIT"
] | null | null | null | """
Unit tests for Random CP optimiser on Cartesian product domains.
-- kandasamy@cs.cmu.edu
"""
# pylint: disable=invalid-name
# pylint: disable=abstract-class-little-used
import os
from . import random_multiobjective_optimiser
from ..exd.cp_domain_utils import get_raw_point_from_processed_point, \
load_config_file
from ..exd.experiment_caller import get_multifunction_caller_from_config
from ..exd.worker_manager import SyntheticWorkerManager
# Local imports
from ..test_data.multiobjective_hartmann.multiobjective_hartmann \
import objectives as moo_hartmann
from ..test_data.multiobjective_park.multiobjective_park \
import objectives as moo_park
from ..utils.base_test_class import BaseTestClass, execute_tests
from ..utils.reporters import get_reporter
def _test_optimiser_results(self, raw_prob_funcs, pareto_vals, pareto_points,
history, dcf):
""" Tests optimiser results. """
config = load_config_file(dcf)
multi_func_caller = get_multifunction_caller_from_config(raw_prob_funcs, config)
raw_pareto_points = [get_raw_point_from_processed_point(pop, config.domain,
config.domain_orderings.index_ordering,
config.domain_orderings.dim_ordering)
for pop in pareto_points]
self.report('Pareto opt point [-1]: proc=%s, raw=%s.' % (pareto_points[-1],
raw_pareto_points[-1]))
saved_in_history = [key for key, _ in list(history.__dict__.items()) if not
key.startswith('__')]
self.report('Stored in history: %s.' % (saved_in_history), 'test_result')
assert len(history.curr_pareto_vals) == len(history.curr_pareto_points)
for val in pareto_vals:
assert len(val) == multi_func_caller.num_funcs
for pt in pareto_points:
assert len(pt) == config.domain.num_domains
self.report('Pareto optimal points: %s.' % (pareto_points))
self.report('Pareto optimal values: %s.' % (pareto_vals))
def test_optimisation_single(self):
""" Test optimisation with a single worker. """
self.report('')
self.report('Testing %s with one worker.' % (type(self)))
for idx, (dcf, (raw_prob_funcs,)) in enumerate(self.opt_problems):
self.report('[%d/%d] Testing optimisation with 1 worker on %s.' % (
idx + 1, len(self.opt_problems), dcf), 'test_result')
self.worker_manager_1.reset()
pareto_vals, pareto_points, history = self._run_optimiser(raw_prob_funcs, dcf,
self.worker_manager_1, self.max_capital, 'asy')
self._test_optimiser_results(raw_prob_funcs, pareto_vals, pareto_points, history,
dcf)
self.report('')
def test_optimisation_asynchronous(self):
""" Testing random optimiser with three asynchronous workers. """
self.report('')
self.report('Testing %s with three asynchronous workers.' % (type(self)))
for idx, (dcf, (raw_prob_funcs,)) in enumerate(self.opt_problems):
self.report('[%d/%d] Testing optimisation with 3 asynchronous workers on %s.' % (
idx + 1, len(self.opt_problems), dcf), 'test_result')
self.worker_manager_3.reset()
pareto_vals, pareto_points, history = self._run_optimiser(raw_prob_funcs, dcf,
self.worker_manager_3, self.max_capital, 'asy')
self._test_optimiser_results(raw_prob_funcs, pareto_vals, pareto_points, history,
dcf)
self.report('')
class CPRandomMultiObjectiveOptimiserTestCase(
CPMultiObjectiveOptimiserBaseTestCase, BaseTestClass):
""" Unit tests for random multi-objective optimisation. """
if __name__ == '__main__':
execute_tests()
| 48.794702 | 120 | 0.621607 |