hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4dcb9047f54eac204a9bac1c46c12bc3341a699a | 11,237 | py | Python | Leetcode.py | SakuraSa/Leetcode_CodeDownloader | cba23e3ec85b24e14fdf856e0e7eefb2c95644eb | [
"Apache-2.0"
] | 3 | 2015-10-20T13:05:18.000Z | 2020-07-27T19:45:58.000Z | Leetcode.py | SakuraSa/Leetcode_CodeDownloader | cba23e3ec85b24e14fdf856e0e7eefb2c95644eb | [
"Apache-2.0"
] | null | null | null | Leetcode.py | SakuraSa/Leetcode_CodeDownloader | cba23e3ec85b24e14fdf856e0e7eefb2c95644eb | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#coding=utf-8
import os
import re
import requests
import datetime
import BeautifulSoup
#url requests setting
host_url = 'https://oj.leetcode.com'
login_url = 'https://oj.leetcode.com/accounts/login/'
question_list_url = 'https://oj.leetcode.com/problems/'
code_base_url = 'https://oj.leetcode.com/submissions/detail/%s/'
code_list_base_url = 'https://oj.leetcode.com/submissions/%d/'
github_login_url = 'https://oj.leetcode.com/accounts/github/login/'
code_regex = re.compile("storage\.put\('(python|cpp|java)', '([^']+)'\);")
leetcode_request_header = {
'Host': 'oj.leetcode.com',
'Origin': 'https://oj.leetcode.com',
'Referer': 'https://oj.leetcode.com/accounts/login/'
}
github_request_header = {
'Host': 'github.com',
'Origin': 'https://github.com',
'Referer': 'https://github.com/'
}
#code setting
ext_dic = {'python': '.py', 'cpp': '.cpp', 'java': '.java'}
comment_char_dic = {'python': '#', 'cpp': '//', 'java': '//'}
if __name__ == '__main__':
#login form leetcode account
USERNAME = 'YOUR USERNAME'
PASSWORD = 'YOUR PASSWORD'
#login form github account
#downloader.login_from_github(username='YOUR USERNAME', password='YOUR PASSWORD')
from taskbar import TaskBar
downloader = LeetcodeDownloader()
print "Logging..."
if downloader.login(username=USERNAME, password=PASSWORD):
print "ok, logged in."
else:
print "error, logging failed."
exit()
task_bar = TaskBar(40)
print "Loading submissions..."
task_param_list = task_bar.processing(
task=lambda: list((func, ([table_data_list], {})) for table_data_list in downloader.page_code_all()),
title=" Loading submissions...",
show_total=False
)
print "ok, %s submissions found in %.2fs." % (len(task_param_list), task_bar.time_cost)
print "Downloading submissions..."
task_bar.do_task(task_param_list)
| 41.464945 | 112 | 0.568924 |
4dccb31b43009dc8e9a6ff9aaa09678332eccb6f | 1,028 | py | Python | tokenizer.py | momennaas/kalam-lp | fdf032ca71a155169f507cba40275ca38f409c87 | [
"MIT"
] | 6 | 2019-03-31T04:46:27.000Z | 2020-02-27T16:39:31.000Z | tokenizer.py | momennaas/kalam-lp | fdf032ca71a155169f507cba40275ca38f409c87 | [
"MIT"
] | null | null | null | tokenizer.py | momennaas/kalam-lp | fdf032ca71a155169f507cba40275ca38f409c87 | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
##############################################################
## Author: Abdulmumen Naas
## Description: Arabic Natural Language Processor (Kalam-lp)
## Version: 0.0.1
## Copyright (c) 2014 Abdulmumen Naas
##############################################################
import re
import string
from constants import *
if __name__ == "__main__":
main()
| 25.073171 | 72 | 0.559339 |
4dcd01eb4188987a9436e56ef1dddd73f316c897 | 1,617 | py | Python | Class4/shoppingcart_pom/features/lib/pages/summer_dresses_catalog_page.py | techsparksguru/python_ci_automation | 65e66266fdf2c14f593c6f098a23770621faef41 | [
"MIT"
] | null | null | null | Class4/shoppingcart_pom/features/lib/pages/summer_dresses_catalog_page.py | techsparksguru/python_ci_automation | 65e66266fdf2c14f593c6f098a23770621faef41 | [
"MIT"
] | 9 | 2020-02-13T09:14:12.000Z | 2022-01-13T03:17:03.000Z | Class4/shoppingcart_pom/features/lib/pages/summer_dresses_catalog_page.py | techsparksguru/python_ci_automation | 65e66266fdf2c14f593c6f098a23770621faef41 | [
"MIT"
] | 1 | 2021-03-10T03:27:37.000Z | 2021-03-10T03:27:37.000Z | __author__ = 'techsparksguru'
from selenium.webdriver.common.by import By
from .base_page_object import BasePage
| 37.604651 | 100 | 0.564626 |
4dcdd9abff0ad027ebd337ca976c53333922e6fc | 446 | py | Python | ch3/collatz_test.py | jakdept/pythonbook | 862e445ef1bcb36c890fe7e27e144354f6c855b5 | [
"MIT"
] | null | null | null | ch3/collatz_test.py | jakdept/pythonbook | 862e445ef1bcb36c890fe7e27e144354f6c855b5 | [
"MIT"
] | null | null | null | ch3/collatz_test.py | jakdept/pythonbook | 862e445ef1bcb36c890fe7e27e144354f6c855b5 | [
"MIT"
] | null | null | null |
import unittest
import collatz
if __name__ == "__main__":
unittest.main()
| 21.238095 | 63 | 0.544843 |
4dceebb4aaf3cbc5f66e75e0222673f73c95b189 | 4,046 | py | Python | test/surrogate/test_sk_random_forest.py | Dee-Why/lite-bo | 804e93b950148fb98b7e52bd56c713edacdb9b6c | [
"BSD-3-Clause"
] | 184 | 2021-06-02T06:35:25.000Z | 2022-03-31T10:33:11.000Z | test/surrogate/test_sk_random_forest.py | ZongWei-HUST/open-box | 011791aba4e44b20a6544020c73601638886d143 | [
"MIT"
] | 16 | 2021-11-15T11:13:57.000Z | 2022-03-24T12:51:17.000Z | test/surrogate/test_sk_random_forest.py | ZongWei-HUST/open-box | 011791aba4e44b20a6544020c73601638886d143 | [
"MIT"
] | 24 | 2021-06-18T04:52:57.000Z | 2022-03-30T11:14:03.000Z | from sklearn.ensemble import RandomForestRegressor
from openbox.utils.config_space import ConfigurationSpace
from openbox.utils.config_space import UniformFloatHyperparameter, \
CategoricalHyperparameter, Constant, UniformIntegerHyperparameter
import numpy as np
from openbox.utils.config_space.util import convert_configurations_to_array
import threading
from joblib import Parallel, delayed
from sklearn.utils.fixes import _joblib_parallel_args
from sklearn.utils.validation import check_is_fitted
from sklearn.ensemble._base import _partition_estimators
def _accumulate_prediction(predict, X, out, lock):
"""
This is a utility function for joblib's Parallel.
It can't go locally in ForestClassifier or ForestRegressor, because joblib
complains that it cannot pickle it when placed there.
"""
prediction = predict(X, check_input=False)
with lock:
if len(out) == 1:
out[0] += prediction
else:
for i in range(len(out)):
out[i] += prediction[i]
def _collect_prediction(predict, X, out, lock):
"""
This is a utility function for joblib's Parallel.
It can't go locally in ForestClassifier or ForestRegressor, because joblib
complains that it cannot pickle it when placed there.
"""
prediction = predict(X, check_input=False)
with lock:
out.append(prediction)
n_obs = 50
n_new = 5
cs = get_cs()
cs.seed(1)
configs = cs.sample_configuration(n_obs)
new_configs = cs.sample_configuration(n_new)
X = convert_configurations_to_array(configs)
Y = np.random.RandomState(47).random(size=(n_obs,))
pX = convert_configurations_to_array(new_configs)
print('shape of pX', pX.shape)
rf = RandomForestRegressor(random_state=np.random.RandomState(47), n_estimators=3)
rf.fit(X, Y)
preds = rf.predict(pX)
print(preds)
ppp = predictmv(rf, pX)
print('final predict', ppp)
m = np.mean(ppp, axis=0)
v = np.var(ppp, axis=0)
print(m, v)
print(type(m), type(v))
from joblib import effective_n_jobs
print(effective_n_jobs(None))
| 32.894309 | 109 | 0.712803 |
4dcf592e4a02e009b4cb4e7b4d57ff918fb14acc | 3,258 | py | Python | cli_wrapper.py | anirbandas18/report-engine | de7d3c0caab972243a61e681abbb9a06e9c54857 | [
"MIT"
] | null | null | null | cli_wrapper.py | anirbandas18/report-engine | de7d3c0caab972243a61e681abbb9a06e9c54857 | [
"MIT"
] | null | null | null | cli_wrapper.py | anirbandas18/report-engine | de7d3c0caab972243a61e681abbb9a06e9c54857 | [
"MIT"
] | null | null | null | import subprocess, os
# constants with global scope
INPUT = "--input"
OUTPUT = "--output"
FILTERS = "--filters"
SUPPPLEMENTS = "--supplements"
JAR_DIRECTORY = "target"
JAR_NAME = "report-engine.jar"
if __name__ == '__main__':
main() | 42.868421 | 123 | 0.624002 |
4dcfc7f344c60db35f7d0923585dc078c2f43a3c | 11,267 | py | Python | 19-05-150_protein_ridge/inference.py | danhtaihoang/sparse-network | 763a19f5f333df5cfa9852d965a7110e813d52d5 | [
"MIT"
] | null | null | null | 19-05-150_protein_ridge/inference.py | danhtaihoang/sparse-network | 763a19f5f333df5cfa9852d965a7110e813d52d5 | [
"MIT"
] | null | null | null | 19-05-150_protein_ridge/inference.py | danhtaihoang/sparse-network | 763a19f5f333df5cfa9852d965a7110e813d52d5 | [
"MIT"
] | null | null | null | ##========================================================================================
import numpy as np
from scipy import linalg
from sklearn.preprocessing import OneHotEncoder
from scipy.spatial import distance
#=========================================================================================
#=========================================================================================
# generate coupling matrix w0: wji from j to i
#=========================================================================================
#=========================================================================================
# 2018.10.27: generate time series by MCMC
#===================================================================================================
# 2018.12.22: inverse of covariance between values of x
#=========================================================================================
# 2018.12.28: fit interaction to residues at position i
# additive update
#=========================================================================================
# 2019.02.25: fit interaction to residues at position i
# multiplicative update (new version, NOT select each pair as the old version)
#=========================================================================================
# 2019.05.15: add ridge regression term to coupling w
#===================================================================================================
| 30.125668 | 101 | 0.454158 |
4dcff13d4501aa2f3c3df9d643bf2c4ada7cfd82 | 335 | py | Python | src/test/resources/script/jython/testReturnString.py | adchilds/jythonutil | 24e6b945cf7474358be1f43e0a72f37411289e39 | [
"CNRI-Jython"
] | 5 | 2016-02-05T19:44:57.000Z | 2017-05-26T10:26:29.000Z | src/test/resources/script/jython/testReturnString.py | adchilds/jythonutil | 24e6b945cf7474358be1f43e0a72f37411289e39 | [
"CNRI-Jython"
] | 1 | 2017-02-03T06:19:21.000Z | 2017-02-11T03:55:55.000Z | src/test/resources/script/jython/testReturnString.py | adchilds/jythonutil | 24e6b945cf7474358be1f43e0a72f37411289e39 | [
"CNRI-Jython"
] | null | null | null | import sys
if __name__ == '__main__':
# Set the defaults
a = ''
b = ''
# If arguments were passed to this script, use those
try:
a = sys.argv[1]
b = sys.argv[2]
except Exception:
pass
# Sets the result to the longer of the two Strings
result = a if len(a) > len(b) else b | 19.705882 | 56 | 0.552239 |
4dd0b98da97f43f66eaf8f6486394d5b6746b436 | 5,050 | py | Python | scraper.py | quake0day/chessreview | 1cb1aa6689f2db46546da9b1bf328da25b1b67ba | [
"Apache-2.0"
] | null | null | null | scraper.py | quake0day/chessreview | 1cb1aa6689f2db46546da9b1bf328da25b1b67ba | [
"Apache-2.0"
] | null | null | null | scraper.py | quake0day/chessreview | 1cb1aa6689f2db46546da9b1bf328da25b1b67ba | [
"Apache-2.0"
] | null | null | null | """
PGN Scraper is a small program which downloads each of a user's archived games from chess.com and stores them in a pgn file.
When running the user is asked for the account name which shall be scraped and for game types.
The scraper only downloads games of the correct type.
Supported types are: bullet, rapid, blitz
rated, unrated
standard chess, other ruless (chess960, oddchess, etc.)
"""
from datetime import datetime
import json
import urllib.request
import os
def CheckFileName(file_name):
"""
This function checks if a file with file_name already exists. If yes an error message is printed and the script aborted.
"""
if os.path.isfile(os.getcwd()+f"/{file_name}"):
print(f"Error: A file named '{file_name}' already exists.")
print("Exiting...")
quit()
def GameTypeTrue(game,game_type,rated,rules):
"""
This function checks if the game is of the type defined in game_type (bullet, rapid or blitz) and returns either True or False.
"""
# Check if game is of the correct type
for type in game_type:
for ra in rated:
for ru in rules:
if (game["time_class"] == type) and (game["rated"] == ra) and ( (game["rules"] == "chess") == ru):
return True
# If not correct type return False
return False
def initScrape():
"""
This functions is used to set up the scraping parameters like account name and game type.
"""
# Input account name
acc_name = input("Enter account name: ").strip()
# Check if acc_name is empty
if bool(acc_name) == False:
print("Error: Empty account name!")
quit()
# Input game type
#game_type_code = input("Enter game type [1] All (default), [2] Rapid, [3] Blitz, [4] Bullet, [5] Rapid and Blitz: ").strip()
# If game_type_code is empty set to 1
#if bool(game_type_code) == False:
game_type_code = "1"
# Create dictionary for different game type options und apply input
game_type_dict = {
"1" : ["bullet", "blitz", "rapid"],
"2" : ["rapid"],
"3" : ["blitz"],
"4" : ["bullet"],
"5" : ["blitz", "rapid"]
}
game_type = game_type_dict["1"]
# Input rated/unrated
#rated_code = input("Consider [1] only rated games (default), [2] only unrated or [3] all games: ").strip()
# If rated_code is empty set to 1
#if bool(rated_code) == False:
rated_code = "1"
# Create dictionary for rated/unraked and apply input
rated_dict = {
"1" : [True],
"2" : [False],
"3" : [True, False]
}
# try:
rated = rated_dict["3"]
# except KeyError:
# print("Error: Invalid input!\nExiting...")
# quit()
# Input rules ("chess"/other)
# rules_code = input("Consider [1] only standard chess (default), [2] only other modes (oddchess, bughouse etc.) or [3] any type: ").strip()
# If rules_code is empty set to 1
# if bool(rules_code) == False:
rules_code = "1"
# Create dictionary for rules and apply input
rules_dict = {
"1" : [True],
"2" : [False],
"3" : [True, False]
}
#try:
rules = rules_dict[rules_code]
# except KeyError:
# print("Error: Invalid input!\nExiting...")
# quit()
# Print warning if only rated and only other rules are selected
if (rated_code == "1") and (rules_code == "2"):
print("Warning: You selected only rated AND only other chess modes!")
print(" Other chess modes are often unrated!")
return [acc_name, game_type, rated, rules]
def beginScrape(params):
"""
The downloading of the PGN archives happens here.
The file is saved as "username_YYYY-MM-dd.pgn"
"""
# Passing the predefined parameters
acc_name = params[0]
game_type = params[1]
rated = params[2]
rules = params[3]
# Create name of pgn file
now = datetime.now()
date = now.strftime("%Y-%m-%d")
game_type_string = "_".join(game_type)
file_name = f"{acc_name}_{date}_{game_type_string}.pgn"
# Check if file already exists
CheckFileName(file_name)
# Run the request, check games for type and write correct ones to file
with urllib.request.urlopen(f"https://api.chess.com/pub/player/{acc_name}/games/archives") as url:
archives = list(dict(json.loads(url.read().decode()))["archives"])
for archive in archives:
with urllib.request.urlopen(archive) as url:
games = list(dict(json.loads(url.read().decode()))["games"])
for game in games:
if GameTypeTrue(game,game_type,rated,rules):
with open(file_name, "a") as text_file:
print(game["pgn"], file=text_file)
print("\n", file=text_file)
def main():
"""
Scrape PGN files from chess.com .
"""
params = initScrape()
beginScrape(params)
if __name__ == '__main__':
main()
| 31.36646 | 143 | 0.60396 |
4dd0f6aca6f1e8e85ab78942074e05e47cb24566 | 2,117 | py | Python | testpro1/DB_handler_jjd.py | dongkakika/OXS | 95166365fb5e35155af3b8de6859ec87f3d9ca78 | [
"MIT"
] | 4 | 2020-04-22T08:42:01.000Z | 2021-07-31T19:28:51.000Z | testpro1/DB_handler_jjd.py | dongkakika/OXS | 95166365fb5e35155af3b8de6859ec87f3d9ca78 | [
"MIT"
] | null | null | null | testpro1/DB_handler_jjd.py | dongkakika/OXS | 95166365fb5e35155af3b8de6859ec87f3d9ca78 | [
"MIT"
] | null | null | null | import sqlite3
import codecs # for using ''
import os
#
f = codecs.open("jjd_info_title.txt", "r")
title_list = []
while True:
line = f.readline() #
if not line: break # break the loop when it's End Of File
title_list.append(line) # split the line and append it to list
f.close()
#
f = codecs.open("jjd_info_date.txt", "r")
date_list = []
while True:
line = f.readline() #
if not line: break # break the loop when it's End Of File
date_list.append(line) # split the line and append it to list
f.close()
#
f = codecs.open("jjd_info_view.txt", "r")
view_list = []
while True:
line = f.readline()
if not line: break
view_list.append(line)
f.close
# href()
f = codecs.open("jjd_info_href.txt", "r")
href_list = []
while True:
line = f.readline()
if not line: break
href_list.append(line)
f.close
################################################################################
###################################### DB ######################################
# below 'print' is for checking the data structure. Don't care.
#print("saved data(1) : ", list[0][0])
#print("saved data(2) : ", list[1])
# connect 'db.sqlite3' in the django folder and manipulate it
con = sqlite3.connect("db.sqlite3")
cur = con.cursor() # use 'cursor' to use DB
# you don't need to care the below CREATE TABLE command.
# cur.execute("CREATE TABLE if not exists website1_crawlingdata(Name text, Period text);")
total_list = []
for i in range(len(date_list)):
temp = [str(i+1), title_list[i], date_list[i], view_list[i], href_list[i]]
total_list.append(temp)
# print(total_list)
cur.execute("delete from website1_jjd_info;")
idx = 0 #
while idx < len(date_list):
cur.execute("INSERT INTO website1_jjd_info VALUES(?, ?, ?, ?, ?);", total_list[idx])
# 'INSERT' each value of the total_list to the table of DB.
idx += 1
con.commit() # The new input is gonna be saved in the DB with 'commit' command
idx = 0
con.close()
| 28.608108 | 90 | 0.600378 |
4dd104cc2e6c9e4bdd3ba911a3d5a31df0366e7f | 429 | py | Python | scripts/regression_tests.py | zhangxaochen/Opt | 7f1af802bfc84cc9ef1adb9facbe4957078f529a | [
"MIT"
] | 260 | 2017-03-02T19:57:51.000Z | 2022-01-21T03:52:03.000Z | scripts/regression_tests.py | zhangxaochen/Opt | 7f1af802bfc84cc9ef1adb9facbe4957078f529a | [
"MIT"
] | 102 | 2017-03-03T00:42:56.000Z | 2022-03-30T14:15:20.000Z | scripts/regression_tests.py | zhangxaochen/Opt | 7f1af802bfc84cc9ef1adb9facbe4957078f529a | [
"MIT"
] | 71 | 2017-03-02T20:22:33.000Z | 2022-01-02T03:49:04.000Z | from opt_utils import *
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--skip_compilation", action='store_true', help="skip compilation")
args = parser.parse_args()
if not args.skip_compilation:
compile_all_opt_examples()
for example in all_examples:
args = []
output = run_example(example, args, True).decode('ascii')
with open(example + ".log", "w") as text_file:
text_file.write(output)
| 28.6 | 93 | 0.748252 |
4dd688bf34007f2b88b7cc72d6792e3f5c02e4ad | 801 | py | Python | rec/migrations/0005_auto_20200922_1701.py | lpkyrius/rg1 | 6132ec5cd8db86088f8635f2e12ce6bf16aeff8e | [
"MIT"
] | null | null | null | rec/migrations/0005_auto_20200922_1701.py | lpkyrius/rg1 | 6132ec5cd8db86088f8635f2e12ce6bf16aeff8e | [
"MIT"
] | 2 | 2020-09-16T14:06:34.000Z | 2020-09-16T18:14:26.000Z | rec/migrations/0005_auto_20200922_1701.py | lpkyrius/rg1 | 6132ec5cd8db86088f8635f2e12ce6bf16aeff8e | [
"MIT"
] | null | null | null | # Generated by Django 3.1.1 on 2020-09-22 20:01
from django.db import migrations, models
| 27.62069 | 87 | 0.574282 |
4dd73302eae1ae2e039d31c3cb2e7f24834961a5 | 6,452 | py | Python | deeppavlov/deep.py | cclauss/DeepPavlov | 8726173c92994b3f789790b5879052d2f7953f47 | [
"Apache-2.0"
] | 3 | 2020-04-16T04:25:10.000Z | 2021-05-07T23:04:43.000Z | deeppavlov/deep.py | sachinsingh3107/Deeppavlov_Chatbot | f10b9485c118cdec69e73c89833a1a5a164404de | [
"Apache-2.0"
] | 12 | 2020-01-28T22:14:04.000Z | 2022-02-10T00:10:17.000Z | deeppavlov/deep.py | sachinsingh3107/Deeppavlov_Chatbot | f10b9485c118cdec69e73c89833a1a5a164404de | [
"Apache-2.0"
] | 1 | 2021-02-05T13:01:48.000Z | 2021-02-05T13:01:48.000Z | """
Copyright 2017 Neural Networks and Deep Learning lab, MIPT
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
from logging import getLogger
from deeppavlov.core.commands.infer import interact_model, predict_on_stream
from deeppavlov.core.commands.train import train_evaluate_model_from_config
from deeppavlov.core.common.cross_validation import calc_cv_score
from deeppavlov.core.common.file import find_config
from deeppavlov.download import deep_download
from deeppavlov.utils.alexa.server import run_alexa_default_agent
from deeppavlov.utils.alice import start_alice_server
from deeppavlov.utils.ms_bot_framework.server import run_ms_bf_default_agent
from deeppavlov.utils.pip_wrapper import install_from_config
from deeppavlov.utils.server.server import start_model_server
from deeppavlov.utils.telegram.telegram_ui import interact_model_by_telegram
log = getLogger(__name__)
parser = argparse.ArgumentParser()
parser.add_argument("mode", help="select a mode, train or interact", type=str,
choices={'train', 'evaluate', 'interact', 'predict', 'interactbot', 'interactmsbot',
'alexa', 'riseapi', 'download', 'install', 'crossval'})
parser.add_argument("config_path", help="path to a pipeline json config", type=str)
parser.add_argument("-e", "--start-epoch-num", dest="start_epoch_num", default=None,
help="Start epoch number", type=int)
parser.add_argument("--recursive", action="store_true", help="Train nested configs")
parser.add_argument("-b", "--batch-size", dest="batch_size", default=1, help="inference batch size", type=int)
parser.add_argument("-f", "--input-file", dest="file_path", default=None, help="Path to the input file", type=str)
parser.add_argument("-d", "--download", action="store_true", help="download model components")
parser.add_argument("--folds", help="number of folds", type=int, default=5)
parser.add_argument("-t", "--token", default=None, help="telegram bot token", type=str)
parser.add_argument("-i", "--ms-id", default=None, help="microsoft bot framework app id", type=str)
parser.add_argument("-s", "--ms-secret", default=None, help="microsoft bot framework app secret", type=str)
parser.add_argument("--multi-instance", action="store_true", help="allow rising of several instances of the model")
parser.add_argument("--stateful", action="store_true", help="interact with a stateful model")
parser.add_argument("--no-default-skill", action="store_true", help="do not wrap with default skill")
parser.add_argument("--https", action="store_true", help="run model in https mode")
parser.add_argument("--key", default=None, help="ssl key", type=str)
parser.add_argument("--cert", default=None, help="ssl certificate", type=str)
parser.add_argument("-p", "--port", default=None, help="api port", type=str)
parser.add_argument("--api-mode", help="rest api mode: 'basic' with batches or 'alice' for Yandex.Dialogs format",
type=str, default='basic', choices={'basic', 'alice'})
if __name__ == "__main__":
main()
| 47.094891 | 116 | 0.66739 |
4dd83f2bdedcce578bc2f4f15b92a56d3b2455a9 | 3,345 | py | Python | test/test_cfg/read_grammar.py | wannaphong/pycfg | ffa67958ed1c3deb73cadb3969ac086336fb1269 | [
"MIT"
] | 8 | 2017-12-18T08:51:27.000Z | 2020-11-26T02:21:06.000Z | test/test_cfg/read_grammar.py | wannaphong/pycfg | ffa67958ed1c3deb73cadb3969ac086336fb1269 | [
"MIT"
] | 1 | 2020-01-09T15:41:09.000Z | 2020-01-09T15:41:09.000Z | test/test_cfg/read_grammar.py | wannaphong/pycfg | ffa67958ed1c3deb73cadb3969ac086336fb1269 | [
"MIT"
] | 6 | 2017-06-12T16:58:40.000Z | 2019-11-27T06:55:07.000Z | '''Read grammar specifications for test cases.'''
import re
import sys
from pprint import pprint
from cfg.core import ContextFreeGrammar, Terminal, Nonterminal, Marker
from cfg.table import END_MARKER, ParseTableNormalForm
label_re = re.compile('^\s*==\s*(.*?)\s*==\s*$')
comment_re = re.compile('^([^#]*)')
shift_re = re.compile('^sh(\d+)$')
reduce_re = re.compile('^re(\d+)$')
def read_test_case(finname):
'''Read a grammar test case from a file.'''
label = 'grammar'
sections = {}
with open(finname, 'r') as fin:
for line in filter(None, map(lambda s: comment_re.match(s).group(1).strip(), fin)):
m = label_re.match(line)
if m:
label = m.group(1).lower()
else:
sections.setdefault(label, []).append(line)
retype('grammar', read_grammar)
retype('table', retype_table)
retype('tablea', retype_table)
retype('tableb', retype_table)
retype('result', read_bool)
return GrammarTestCase(sections, finname)
if __name__ == '__main__':
if len(sys.argv) != 2:
sys.stderr.write('Usage: read_grammar.py <file>\n')
sys.exit(1)
print read_test_case(sys.argv[1])
| 31.261682 | 93 | 0.564425 |
4dd85a981091c632d855dbb819f62a7e6d570ba9 | 59,286 | py | Python | pype/plugins/global/publish/extract_review.py | barklaya/pype | db3f708b1918d4f81951b36e1575eb3ecf0551c5 | [
"MIT"
] | null | null | null | pype/plugins/global/publish/extract_review.py | barklaya/pype | db3f708b1918d4f81951b36e1575eb3ecf0551c5 | [
"MIT"
] | null | null | null | pype/plugins/global/publish/extract_review.py | barklaya/pype | db3f708b1918d4f81951b36e1575eb3ecf0551c5 | [
"MIT"
] | null | null | null | import os
import re
import copy
import json
import pyblish.api
import clique
import pype.api
import pype.lib
| 38.003846 | 95 | 0.540347 |
4dd8bacf6b045e8713670a0e2435de01e5e09f0a | 6,683 | py | Python | tests/peerfinder_test.py | wusel42/PeerFinder | 35f132b45f2947902adfb6327ebcdf60bce4bdc2 | [
"MIT"
] | 49 | 2017-07-13T13:58:14.000Z | 2022-03-04T12:23:35.000Z | tests/peerfinder_test.py | wusel42/PeerFinder | 35f132b45f2947902adfb6327ebcdf60bce4bdc2 | [
"MIT"
] | 9 | 2017-07-11T13:23:15.000Z | 2021-02-06T22:25:15.000Z | tests/peerfinder_test.py | wusel42/PeerFinder | 35f132b45f2947902adfb6327ebcdf60bce4bdc2 | [
"MIT"
] | 17 | 2017-07-11T12:37:25.000Z | 2022-01-29T14:19:35.000Z | import unittest
from unittest.mock import Mock
import mock
import peerfinder.peerfinder as peerfinder
import requests
from ipaddress import IPv6Address, IPv4Address
if __name__ == "__main__":
unittest.main()
| 36.519126 | 87 | 0.575939 |
4dd917ca4b89b1723693aa78f18f3c1b80e9acd7 | 5,372 | py | Python | ceilometer/network/notifications.py | rackerlabs/instrumented-ceilometer | 6ac5215ac0476120d9c99adcabc9cad0d32963da | [
"Apache-2.0"
] | 3 | 2021-04-18T00:37:48.000Z | 2021-07-21T10:20:11.000Z | ceilometer/network/notifications.py | lexxito/monitoring | bec8dfb8d3610331c7ae5ec543e0b8da0948c164 | [
"Apache-2.0"
] | null | null | null | ceilometer/network/notifications.py | lexxito/monitoring | bec8dfb8d3610331c7ae5ec543e0b8da0948c164 | [
"Apache-2.0"
] | null | null | null | # -*- encoding: utf-8 -*-
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Author: Julien Danjou <julien@danjou.info>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handler for producing network counter messages from Neutron notification
events.
"""
from oslo.config import cfg
from ceilometer.openstack.common.gettextutils import _ # noqa
from ceilometer.openstack.common import log
from ceilometer import plugin
from ceilometer import sample
OPTS = [
cfg.StrOpt('neutron_control_exchange',
default='neutron',
help="Exchange name for Neutron notifications",
deprecated_name='quantum_control_exchange'),
]
cfg.CONF.register_opts(OPTS)
LOG = log.getLogger(__name__)
class Network(NetworkNotificationBase):
"""Listen for Neutron network notifications in order to mediate with the
metering framework.
"""
resource_name = 'network'
class Subnet(NetworkNotificationBase):
"""Listen for Neutron notifications in order to mediate with the
metering framework.
"""
resource_name = 'subnet'
class Port(NetworkNotificationBase):
"""Listen for Neutron notifications in order to mediate with the
metering framework.
"""
resource_name = 'port'
class Router(NetworkNotificationBase):
"""Listen for Neutron notifications in order to mediate with the
metering framework.
"""
resource_name = 'router'
class FloatingIP(NetworkNotificationBase):
"""Listen for Neutron notifications in order to mediate with the
metering framework.
"""
resource_name = 'floatingip'
counter_name = 'ip.floating'
unit = 'ip'
class Bandwidth(NetworkNotificationBase):
"""Listen for Neutron notifications in order to mediate with the
metering framework.
"""
event_types = ['l3.meter']
| 32.167665 | 79 | 0.642033 |
4dda7edb222a2d84997df6163df89166d292eb6b | 2,407 | py | Python | optax/_src/update_test.py | pierricklee/optax | a75dbf99ce7af05e18bb6a2c518531ddc7303d13 | [
"Apache-2.0"
] | 2 | 2021-03-13T23:25:27.000Z | 2022-03-09T09:38:27.000Z | optax/_src/update_test.py | rwightman/optax | ba0bc11d172054d65b4387ecae840c04e2bc7035 | [
"Apache-2.0"
] | null | null | null | optax/_src/update_test.py | rwightman/optax | ba0bc11d172054d65b4387ecae840c04e2bc7035 | [
"Apache-2.0"
] | null | null | null | # Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `update.py`."""
from absl.testing import absltest
import chex
import jax
import jax.numpy as jnp
from optax._src import update
if __name__ == '__main__':
absltest.main()
| 33.901408 | 80 | 0.665974 |
4ddab5e3d9aa744300fde8fef5e302f340725170 | 44,868 | py | Python | scripts/venv/lib/python2.7/site-packages/cogent/core/entity.py | sauloal/cnidaria | fe6f8c8dfed86d39c80f2804a753c05bb2e485b4 | [
"MIT"
] | 3 | 2015-11-20T08:44:42.000Z | 2016-12-14T01:40:03.000Z | scripts/venv/lib/python2.7/site-packages/cogent/core/entity.py | sauloal/cnidaria | fe6f8c8dfed86d39c80f2804a753c05bb2e485b4 | [
"MIT"
] | 1 | 2017-09-04T14:04:32.000Z | 2020-05-26T19:04:00.000Z | scripts/venv/lib/python2.7/site-packages/cogent/core/entity.py | sauloal/cnidaria | fe6f8c8dfed86d39c80f2804a753c05bb2e485b4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Provides the entities, the building blocks of the SMRCA hierachy
representation of a macromolecular structure.
The MultiEntity class is a special Entity class to hold multiple instances of
other entities. All Entities apart from the Atom can hold others and inherit
from the MultiEntity. The Entity is the most basic class to deal with
structural and molecular data. Do not use it directly since some functions
depend on methods provided by sub-classes. Classes inheriting from MultiEntity
have to provide some attributes during init e.g: self.level = a valid string
inside the SMCRA hierarchy). Holders of entities are like normal MultiEntities,
but are temporary and are outside the parent-children axes.
"""
import cogent
from cogent.core.annotation import SimpleVariable
from numpy import (sqrt, arctan2, power, array, mean, sum)
from cogent.data.protein_properties import AA_NAMES, AA_ATOM_BACKBONE_ORDER, \
AA_ATOM_REMOTE_ORDER, AREAIMOL_VDW_RADII, \
DEFAULT_AREAIMOL_VDW_RADIUS, AA_NAMES_3to1
from cogent.data.ligand_properties import HOH_NAMES, LIGAND_AREAIMOL_VDW_RADII
from operator import itemgetter, gt, ge, lt, le, eq, ne, or_, and_, contains, \
is_, is_not
from collections import defaultdict
from itertools import izip
from copy import copy, deepcopy
__author__ = "Marcin Cieslik"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__credits__ = ["Marcin Cieslik"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "Marcin Cieslik"
__email__ = "mpc4p@virginia.edu"
__status__ = "Development"
ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_ '
HIERARCHY = ['H', 'S', 'M', 'C', 'R', 'A']
AREAIMOL_VDW_RADII.update(LIGAND_AREAIMOL_VDW_RADII)
# error while creating a structure (non-recoverable error)
# warning while creating a structure
# (something wrong with the input, but recoverable)
def sort_id_list(id_list, sort_tuple):
"""Sorts lists of id tuples. The order is defined by the PDB file
specification."""
(hol_loc, str_loc, mod_loc, chn_loc, res_loc, at_loc) = sort_tuple
# even a simple id is a tuple, this makes sorting general
# this assumes that the implementation of sorting is stable.
# does it work for others then cPython.
if res_loc or res_loc is 0:
id_list.sort(key=itemgetter(res_loc), cmp=lambda x, y: residue(x[0], y[0])) # by res_name
if at_loc or at_loc is 0:
id_list.sort(key=itemgetter(at_loc), cmp=lambda x, y: space_last(x[1], y[1])) # by alt_loc
if at_loc or at_loc is 0:
id_list.sort(key=itemgetter(at_loc), cmp=lambda x, y: atom(x[0], y[0])) # by at_id
if res_loc or res_loc is 0:
id_list.sort(key=itemgetter(res_loc), cmp=lambda x, y: cmp(x[2], y[2])) # by res_ic
if res_loc or res_loc is 0:
id_list.sort(key=itemgetter(res_loc), cmp=lambda x, y: cmp(x[1], y[1])) # by res_id
if chn_loc or chn_loc is 0:
id_list.sort(key=itemgetter(chn_loc), cmp=space_last) # by chain
if mod_loc or mod_loc is 0:
id_list.sort(key=itemgetter(mod_loc)) # by model
if str_loc or str_loc is 0:
id_list.sort(key=itemgetter(str_loc)) # by structure
return id_list
def merge(dicts):
"""Merges multiple dictionaries into a new one."""
master_dict = {}
for dict_ in dicts:
master_dict.update(dict_)
return master_dict
def unique(lists):
"""Merges multiple iterables into a unique sorted tuple (sorted set)."""
master_set = set()
for set_ in lists:
master_set.update(set_)
return tuple(sorted(master_set))
| 37.483709 | 99 | 0.584693 |
4ddb38d835903f3211b8436bd705a411ed81f133 | 3,381 | py | Python | venv/lib/python3.9/site-packages/ajsonrpc/tests/test_dispatcher.py | janten/ESP32-Paxcounter | 212317f3800ec87aef4847e7d60971d4bb9e7d70 | [
"Apache-2.0"
] | 12 | 2019-03-06T03:44:42.000Z | 2021-07-22T03:47:24.000Z | venv/lib/python3.9/site-packages/ajsonrpc/tests/test_dispatcher.py | janten/ESP32-Paxcounter | 212317f3800ec87aef4847e7d60971d4bb9e7d70 | [
"Apache-2.0"
] | 10 | 2020-10-28T10:04:58.000Z | 2021-07-21T20:47:27.000Z | venv/lib/python3.9/site-packages/ajsonrpc/tests/test_dispatcher.py | janten/ESP32-Paxcounter | 212317f3800ec87aef4847e7d60971d4bb9e7d70 | [
"Apache-2.0"
] | 4 | 2021-07-21T20:00:14.000Z | 2021-10-12T19:43:30.000Z | import unittest
from ..dispatcher import Dispatcher
class TestDispatcher(unittest.TestCase):
| 27.487805 | 79 | 0.561964 |
4ddd26506c5a2c32c298c1cac79c89b498178da9 | 7,206 | py | Python | mesh.py | msellens/pms | d175fded80087a907e8fab6ae09f6d1be69b3353 | [
"MIT"
] | null | null | null | mesh.py | msellens/pms | d175fded80087a907e8fab6ae09f6d1be69b3353 | [
"MIT"
] | null | null | null | mesh.py | msellens/pms | d175fded80087a907e8fab6ae09f6d1be69b3353 | [
"MIT"
] | null | null | null | from itertools import product
import struct
import pickle
import numpy as np
from scipy import sparse
from scipy import isnan as scipy_isnan
import numpy.matlib
ASCII_FACET = """facet normal 0 0 0
outer loop
vertex {face[0][0]:.4f} {face[0][1]:.4f} {face[0][2]:.4f}
vertex {face[1][0]:.4f} {face[1][1]:.4f} {face[1][2]:.4f}
vertex {face[2][0]:.4f} {face[2][1]:.4f} {face[2][2]:.4f}
endloop
endfacet
"""
BINARY_HEADER ="80sI"
BINARY_FACET = "12fH"
def get_quad(center, n, side=1.):
x, y, z = np.array(center).astype('float64')
n1, n2, n3 = np.array(n).astype('float64')
l = side/2.
nm = np.linalg.norm
s = np.sign
if any(np.isnan(v) for v in n):
return
if np.allclose(n, np.zeros(n.shape)):
return
# Build two vectors orthogonal between themselves and the normal
if (np.abs(n2) > 0.2 or np.abs(n3) > 0.2):
C = np.array([1, 0, 0])
else:
C = np.array([0, 1, 0])
ortho1 = np.cross(n, C)
ortho1 *= l / np.linalg.norm(ortho1)
ortho2 = np.cross(n, ortho1)
ortho2 *= l / np.linalg.norm(ortho2)
#ortho1[[2,1]] = ortho1[[1,2]]
#ortho2[[2,1]] = ortho2[[1,2]]
ortho1[1] = -ortho1[1]
ortho2[1] = -ortho2[1]
return [[
center + ortho1,
center + ortho2,
center - ortho1,
center - ortho2,
]]
def surfaceFromNormals(normals):
valid_indices = ~np.isnan(normals)
w, h, d = normals.shape
nx = np.transpose(np.hstack((
normals[:,:,0].ravel(),
normals[:,:,0].ravel(),
)))
ny = np.transpose(np.hstack((
normals[:,:,1].ravel(),
normals[:,:,1].ravel(),
)))
nz = np.transpose(np.hstack((
normals[:,:,2].ravel(),
normals[:,:,2].ravel(),
)))
vectorsize = nz.shape
valid_idx = ~np.isnan(nz)
M = sparse.dia_matrix((2*w*h, w*h), dtype=np.float64)
# n_z z(x + 1, y) - n_z z(x,y) = n_x
M.setdiag(-nz, 0)
M.setdiag(nz, 1)
# n_z z(x, y + 1) - n_z z(x,y) = n_y
M.setdiag(-nz, -w*h)
M.setdiag(np.hstack(([0] * w, nz)), -w*h + w)
# Boundary values
# n_y ( z(x,y) - z(x + 1, y)) = n_x ( z(x,y) - z(x, y + 1))
# TODO: Redo for boundaries in Y-axis
M = M.tolil()
half_size = valid_idx.size // 2
bidxd = np.hstack((np.diff(valid_idx.astype('int8')[:half_size]), [0]))
inner_boundaries = np.roll(bidxd==1, 1) | (bidxd==-1)
outer_boundaries = (bidxd==1) | (np.roll(bidxd==-1, 1))
nz_t = np.transpose(valid_idx.reshape((w,h,d*2//3)), (1, 0, 2)).ravel()
valid_idx_t = ~np.isnan(nz_t)
bidxd = np.hstack((np.diff(valid_idx_t.astype('int8')[:half_size]), [0]))
inner_boundaries |= np.roll(bidxd==1, 1) | (bidxd==-1)
outer_boundaries |= (bidxd==1) | (np.roll(bidxd==-1, 1))
bidx = np.zeros((half_size,), dtype=np.bool)
bidx[inner_boundaries] = True
bidx = np.indices(bidx.shape)[0][bidx]
M[bidx, bidx] = nx[bidx]
M[bidx, bidx + w] = -nx[bidx]
M[bidx + half_size, bidx] = ny[bidx]
M[bidx + half_size, bidx + 1] = -ny[bidx]
M = M.tocsr()[valid_idx]
weight = 1
OB = np.zeros((outer_boundaries.sum(), w*h,))
OB[np.indices((outer_boundaries.sum(),))[0], np.where(outer_boundaries==True)] = weight
M = sparse.vstack((M,OB))
# Build [ n_x n_y ]'
m = np.hstack((
normals[:,:,0].ravel(),
normals[:,:,1].ravel(),
)).reshape(-1, 1)
print(inner_boundaries.shape, m.shape)
i_b = np.hstack((inner_boundaries, inner_boundaries)).reshape(-1,1)
print(i_b.shape, m.shape)
m[i_b] = 0
m = m[valid_idx]
m = np.vstack((
m,
np.zeros((outer_boundaries.sum(), 1)),
))
# Solve least squares
assert not np.isnan(m).any()
# x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var = sparse.linalg.lsqr(M, m)
x, istop, itn, normr, normar, norma, conda, normx = sparse.linalg.lsmr(M, m)
# Build the surface (x, y, z) with the computed values of z
surface = np.dstack((
np.indices((w, h))[0],
np.indices((w, h))[1],
x.reshape((w, h))
))
return surface
def writeMesh(surface, normals, filename):
s = surface
with open(filename, 'wb') as fp:
writer = Binary_STL_Writer(fp)
for x in range(0, s.shape[0], 5):
for y in range(0, s.shape[1], 5):
#for x, y in product(range(s.shape[0]), range(s.shape[1])):
quad = get_quad(
s[x,y,:],
normals[x,y,:],
4,
)
if quad:
writer.add_faces(quad)
writer.close()
def write3dNormals(normals, filename):
with open(filename, 'wb') as fp:
writer = Binary_STL_Writer(fp)
for x in range(0, normals.shape[0], 5):
for y in range(0, normals.shape[1], 5):
quad = get_quad(
(0, x, y),
normals[x,y,:],
4,
)
if quad:
writer.add_faces(quad)
writer.close()
def surfaceToHeight(surface):
minH = np.amin(surface[:,:,2])
maxH = np.amax(surface[:,:,2])
scale = maxH - minH
height = (surface[:,:,2] - minH) / scale
return height
def writeObj(surface, normals, filename):
print('obj here')
if __name__ == '__main__':
with open('data.pkl', 'rb') as fhdl:
normals = pickle.load(fhdl)
writeMesh(normals)
| 28.709163 | 96 | 0.543991 |
4ddd878eccdd7091a7bbb342e9e801e07d0428f5 | 4,759 | py | Python | vaccine.py | brannbrann/findavaccinesms | 91e21a91a25d69efed3266c2ccbb5b0e76f5ca1b | [
"Apache-2.0"
] | null | null | null | vaccine.py | brannbrann/findavaccinesms | 91e21a91a25d69efed3266c2ccbb5b0e76f5ca1b | [
"Apache-2.0"
] | null | null | null | vaccine.py | brannbrann/findavaccinesms | 91e21a91a25d69efed3266c2ccbb5b0e76f5ca1b | [
"Apache-2.0"
] | null | null | null | '''
This is a python script that requires you have python installed, or in a cloud environment.
This script scrapes the CVS website looking for vaccine appointments in the cities you list.
To update for your area, update the locations commented below.
If you receive an error that says something is not installed, type
pip install requests
etc.
Happy vaccination!
'''
import requests
import time
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from datetime import datetime, timedelta
if __name__ == '__main__':
try:
findAVaccine()
except KeyboardInterrupt:
print('Exiting...')
| 37.769841 | 128 | 0.63921 |
4ddf8f7618bc1ce4a506f069f1a4aa3da6ef6a1b | 22 | py | Python | pefile/__init__.py | 0x1F9F1/binja-msvc | be2577c22c8d37fd1e2e211f80b1c9a920705bd2 | [
"MIT"
] | 9 | 2019-02-08T10:01:39.000Z | 2021-04-29T12:27:34.000Z | pefile/__init__.py | DatBrick/binja-msvc | 751ffc1450c569bad23ac67a761d0f1fbd4ca4c4 | [
"MIT"
] | 1 | 2019-07-04T20:09:57.000Z | 2019-07-12T11:10:15.000Z | pefile/__init__.py | DatBrick/binja-msvc | 751ffc1450c569bad23ac67a761d0f1fbd4ca4c4 | [
"MIT"
] | 2 | 2019-03-03T13:00:14.000Z | 2020-05-01T05:35:04.000Z | from .pefile import *
| 11 | 21 | 0.727273 |
4de04f66464c9444c5a3decd7af60b9026030890 | 6,643 | py | Python | examples/viewer3DVolume.py | vincefn/silx | 4b239abfc90d2fa7d6ab61425f8bfc7b83c0f444 | [
"CC0-1.0",
"MIT"
] | null | null | null | examples/viewer3DVolume.py | vincefn/silx | 4b239abfc90d2fa7d6ab61425f8bfc7b83c0f444 | [
"CC0-1.0",
"MIT"
] | null | null | null | examples/viewer3DVolume.py | vincefn/silx | 4b239abfc90d2fa7d6ab61425f8bfc7b83c0f444 | [
"CC0-1.0",
"MIT"
] | 1 | 2017-04-02T18:00:14.000Z | 2017-04-02T18:00:14.000Z | # coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2016-2017 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""This script illustrates the use of :class:`silx.gui.plot3d.ScalarFieldView`.
It loads a 3D scalar data set from a file and displays iso-surfaces and
an interactive cutting plane.
It can also be started without providing a file.
"""
from __future__ import absolute_import, division, unicode_literals
__authors__ = ["T. Vincent"]
__license__ = "MIT"
__date__ = "05/01/2017"
import argparse
import logging
import os.path
import sys
import numpy
from silx.gui import qt
from silx.gui.plot3d.ScalarFieldView import ScalarFieldView
from silx.gui.plot3d import SFViewParamTree
logging.basicConfig()
_logger = logging.getLogger(__name__)
import h5py
def load(filename):
"""Load 3D scalar field from file.
It supports 3D stack HDF5 files and numpy files.
:param str filename: Name of the file to open
and path in file for hdf5 file
:return: numpy.ndarray with 3 dimensions.
"""
if not os.path.isfile(filename.split('::')[0]):
raise IOError('No input file: %s' % filename)
if h5py.is_hdf5(filename.split('::')[0]):
if '::' not in filename:
raise ValueError(
'HDF5 path not provided: Use <filename>::<path> format')
filename, path = filename.split('::')
path, indices = path.split('#')[0], path.split('#')[1:]
with h5py.File(filename) as f:
data = f[path]
# Loop through indices along first dimensions
for index in indices:
data = data[int(index)]
data = numpy.array(data, order='C', dtype='float32')
else: # Try with numpy
try:
data = numpy.load(filename)
except IOError:
raise IOError('Unsupported file format: %s' % filename)
if data.ndim != 3:
raise RuntimeError(
'Unsupported data set dimensions, only supports 3D datasets')
return data
def default_isolevel(data):
"""Compute a default isosurface level: mean + 1 std
:param numpy.ndarray data: The data to process
:rtype: float
"""
data = data[numpy.isfinite(data)]
if len(data) == 0:
return 0
else:
return numpy.mean(data) + numpy.std(data)
# Parse input arguments
parser = argparse.ArgumentParser(
description=__doc__)
parser.add_argument(
'-l', '--level', nargs='?', type=float, default=float('nan'),
help="The value at which to generate the iso-surface")
parser.add_argument(
'-sx', '--xscale', nargs='?', type=float, default=1.,
help="The scale of the data on the X axis")
parser.add_argument(
'-sy', '--yscale', nargs='?', type=float, default=1.,
help="The scale of the data on the Y axis")
parser.add_argument(
'-sz', '--zscale', nargs='?', type=float, default=1.,
help="The scale of the data on the Z axis")
parser.add_argument(
'-ox', '--xoffset', nargs='?', type=float, default=0.,
help="The offset of the data on the X axis")
parser.add_argument(
'-oy', '--yoffset', nargs='?', type=float, default=0.,
help="The offset of the data on the Y axis")
parser.add_argument(
'-oz', '--zoffset', nargs='?', type=float, default=0.,
help="The offset of the data on the Z axis")
parser.add_argument(
'filename',
nargs='?',
default=None,
help="""Filename to open.
It supports 3D volume saved as .npy or in .h5 files.
It also support nD data set (n>=3) stored in a HDF5 file.
For HDF5, provide the filename and path as: <filename>::<path_in_file>.
If the data set has more than 3 dimensions, it is possible to choose a
3D data set as a subset by providing the indices along the first n-3
dimensions with '#':
<filename>::<path_in_file>#<1st_dim_index>...#<n-3th_dim_index>
E.g.: data.h5::/data_5D#1#1
""")
args = parser.parse_args(args=sys.argv[1:])
# Start GUI
app = qt.QApplication([])
# Create the viewer main window
window = ScalarFieldView()
# Create a parameter tree for the scalar field view
treeView = SFViewParamTree.TreeView(window)
treeView.setSfView(window) # Attach the parameter tree to the view
# Add the parameter tree to the main window in a dock widget
dock = qt.QDockWidget()
dock.setWindowTitle('Parameters')
dock.setWidget(treeView)
window.addDockWidget(qt.Qt.RightDockWidgetArea, dock)
# Load data from file
if args.filename is not None:
data = load(args.filename)
_logger.info('Data:\n\tShape: %s\n\tRange: [%f, %f]',
str(data.shape), data.min(), data.max())
else:
# Create dummy data
_logger.warning('Not data file provided, creating dummy data')
coords = numpy.linspace(-10, 10, 64)
z = coords.reshape(-1, 1, 1)
y = coords.reshape(1, -1, 1)
x = coords.reshape(1, 1, -1)
data = numpy.sin(x * y * z) / (x * y * z)
# Set ScalarFieldView data
window.setData(data)
# Set scale of the data
window.setScale(args.xscale, args.yscale, args.zscale)
# Set offset of the data
window.setTranslation(args.xoffset, args.yoffset, args.zoffset)
# Set axes labels
window.setAxesLabels('X', 'Y', 'Z')
# Add an iso-surface
if not numpy.isnan(args.level):
# Add an iso-surface at the given iso-level
window.addIsosurface(args.level, '#FF0000FF')
else:
# Add an iso-surface from a function
window.addIsosurface(default_isolevel, '#FF0000FF')
window.show()
app.exec_()
| 32.091787 | 79 | 0.663104 |
4de27831141702d223c7260054a467c2f0b9791f | 260 | py | Python | solentware_misc/core/__init__.py | RogerMarsh/solentware-misc | 3b031b26bc747193f25f7ffc9e6d24d7278ad30b | [
"BSD-3-Clause"
] | null | null | null | solentware_misc/core/__init__.py | RogerMarsh/solentware-misc | 3b031b26bc747193f25f7ffc9e6d24d7278ad30b | [
"BSD-3-Clause"
] | null | null | null | solentware_misc/core/__init__.py | RogerMarsh/solentware-misc | 3b031b26bc747193f25f7ffc9e6d24d7278ad30b | [
"BSD-3-Clause"
] | null | null | null | # __init__.py
# Copyright 2017 Roger Marsh
# Licence: See LICENCE (BSD licence)
"""Miscellaneous modules for applications available at solentware.co.uk.
These do not belong in the solentware_base or solentware_grid packages,
siblings of solentware_misc.
"""
| 26 | 72 | 0.792308 |
4de2f8a837d616a9960e145e5c2a45f95ecf9856 | 127 | py | Python | learn_tf/MNIST.py | pkumusic/AI | 912f1b6f12177e301c4a7efccc305bcb52e4d823 | [
"MIT"
] | 1 | 2017-05-26T15:23:03.000Z | 2017-05-26T15:23:03.000Z | learn_tf/MNIST.py | pkumusic/AI | 912f1b6f12177e301c4a7efccc305bcb52e4d823 | [
"MIT"
] | null | null | null | learn_tf/MNIST.py | pkumusic/AI | 912f1b6f12177e301c4a7efccc305bcb52e4d823 | [
"MIT"
] | null | null | null | __author__ = "Music"
# MNIST For ML Beginners
# https://www.tensorflow.org/versions/r0.9/tutorials/mnist/beginners/index.html
| 25.4 | 79 | 0.771654 |
4de340ca20d63248997dbff4ccd4dfac76793fb6 | 294 | py | Python | EXC/CW1/task7/mapper.py | easyCZ/UoE-Projects | 7651c8caf329c4f7b4562eba441bfc24124cfcfd | [
"BSD-2-Clause"
] | null | null | null | EXC/CW1/task7/mapper.py | easyCZ/UoE-Projects | 7651c8caf329c4f7b4562eba441bfc24124cfcfd | [
"BSD-2-Clause"
] | 1 | 2022-02-23T07:34:53.000Z | 2022-02-23T07:34:53.000Z | EXC/CW1/task7/mapper.py | easyCZ/UoE-Projects | 7651c8caf329c4f7b4562eba441bfc24124cfcfd | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/python
# mapper.py
import sys
for line in sys.stdin:
row, values = line.strip().split('\t')
row_values = values.split(' ')
for (col, col_value) in enumerate(row_values):
# out: <col> <row> <value>
print("{0}\t{1}\t{2}".format(col, row, col_value)) | 26.727273 | 58 | 0.585034 |
4de518de130a1d423998bfe32aad3a8e89b7b784 | 171 | py | Python | rllib/algorithms/maddpg/__init__.py | willfrey/ray | 288a81b42ef0186ab4db33b30191614a7bdb69f6 | [
"Apache-2.0"
] | null | null | null | rllib/algorithms/maddpg/__init__.py | willfrey/ray | 288a81b42ef0186ab4db33b30191614a7bdb69f6 | [
"Apache-2.0"
] | null | null | null | rllib/algorithms/maddpg/__init__.py | willfrey/ray | 288a81b42ef0186ab4db33b30191614a7bdb69f6 | [
"Apache-2.0"
] | 1 | 2019-09-24T16:24:49.000Z | 2019-09-24T16:24:49.000Z | from ray.rllib.algorithms.maddpg.maddpg import (
MADDPGConfig,
MADDPGTrainer,
DEFAULT_CONFIG,
)
__all__ = ["MADDPGConfig", "MADDPGTrainer", "DEFAULT_CONFIG"]
| 21.375 | 61 | 0.730994 |
4de6e32302e33f5a63e0ba995f624e069fef3439 | 1,849 | py | Python | Fig8_RTM/RTM.py | GeoCode-polymtl/Seis_float16 | 5f9660cbdc37e5ab7f6054f7547df2ffb661a81d | [
"MIT"
] | null | null | null | Fig8_RTM/RTM.py | GeoCode-polymtl/Seis_float16 | 5f9660cbdc37e5ab7f6054f7547df2ffb661a81d | [
"MIT"
] | 5 | 2020-01-28T22:17:04.000Z | 2022-02-09T23:33:07.000Z | Fig8_RTM/RTM.py | GeoCode-polymtl/Seis_float16 | 5f9660cbdc37e5ab7f6054f7547df2ffb661a81d | [
"MIT"
] | 3 | 2019-11-27T06:06:04.000Z | 2020-06-05T17:18:15.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Perform RTM on marmousi
"""
import os
import numpy as np
import h5py as h5
from scipy.ndimage.filters import gaussian_filter
import sys
import shutil
from SeisCL import SeisCL
names = ['fp32', 'fp16io', 'fp16com']
filedata = os.getcwd() + '/marmfp32'
seis = SeisCL()
seis.file = os.getcwd() + '/marmfp32'
seis.read_csts(workdir="")
seis.file = 'SeisCL'
seis.file_datalist = filedata + '_din.mat'
seis.file_din = filedata + '_din.mat'
file = h5.File(filedata + '_model.mat', "r")
models = {'vp': gaussian_filter(np.transpose(file['vp']), sigma=3),
'vs': np.transpose(file['vs']),
'rho': np.transpose(file['rho'])}
file.close()
"""
_________________Set inversion parameters for SeisCL____________________
"""
seis.csts['gradout'] = 1 # SeisCl has to output the gradient
seis.csts['scalerms'] = 0 # We don't scale each trace by the rms of the data
seis.csts['scalermsnorm'] = 0 # We don't scale each trave by the rms its rms
seis.csts['scaleshot'] = 0 # We don't scale each shots
seis.csts['back_prop_type'] = 1
seis.csts['restype'] = 1 # Migration cost function
seis.csts['tmin'] = 0*(np.float(seis.csts['NT'])-2) * seis.csts['dt']
for ii, FP16 in enumerate([1, 2, 3]):
"""
_______________________Constants for inversion__________________________
"""
filework = os.getcwd() + '/marmgrad_' + names[ii]
seis.csts['FP16'] = FP16
"""
_________________________Perform Migration______________________________
"""
if not os.path.isfile(filework + '_gout.mat'):
seis.set_forward(seis.src_pos_all[3, :], models, withgrad=True)
seis.execute()
shutil.copy2(seis.workdir + "/" + seis.file_gout, filework + '_gout.mat')
sys.stdout.write('Gradient calculation completed \n')
sys.stdout.flush()
| 31.87931 | 81 | 0.670092 |
4de77886992362775de86d085f926f5ea3304df0 | 954 | py | Python | doc/default_issue/fix.py | nadavweidman/pytconf | 6203d3607c1cc383c60d1c138efc1109c7a6ab59 | [
"MIT"
] | null | null | null | doc/default_issue/fix.py | nadavweidman/pytconf | 6203d3607c1cc383c60d1c138efc1109c7a6ab59 | [
"MIT"
] | 1 | 2021-12-03T11:35:46.000Z | 2021-12-03T11:52:52.000Z | doc/default_issue/fix.py | nadavweidman/pytconf | 6203d3607c1cc383c60d1c138efc1109c7a6ab59 | [
"MIT"
] | 8 | 2021-12-03T11:07:55.000Z | 2022-03-23T13:35:05.000Z | #!/usr/bin/python3
from typing import List
from registry import the_registry
from param_collector import the_collector
NO_DEFAULT = Unique()
NO_DEFAULT_TYPE = type(NO_DEFAULT)
for x in Foobar.columns:
print(x)
| 18.346154 | 73 | 0.627883 |
4de7d409e55429843384ad1f22b9b00b0eb2103a | 3,437 | py | Python | argonneV14.py | floresab/Toy-Models | 0b990563e1be903cbdcb56ead57d83bc3ca71198 | [
"MIT"
] | null | null | null | argonneV14.py | floresab/Toy-Models | 0b990563e1be903cbdcb56ead57d83bc3ca71198 | [
"MIT"
] | null | null | null | argonneV14.py | floresab/Toy-Models | 0b990563e1be903cbdcb56ead57d83bc3ca71198 | [
"MIT"
] | null | null | null | """
File : argonneV14.py
Language : Python 3.6
Created : 7/13/2018
Edited : 7/13/2018
San Digeo State University
Department of Physics and Astronomy
#https://journals.aps.org/prc/pdf/10.1103/PhysRevC.51.38 --argonneV18
This code implements Argonne V14 potential outlined in ...
--CONSTANTS --
Hbar*c | 197.33 MeV fm
pion-Mass | 138.03 MeV
Wood-Saxon|
R | 0.5 fm
a | 0.2 fm
Operator | p | Ip | Sp | Index |
-----------------------------------------------------------
central | c | -4.801125 | 2061.5625 | 0 |
tao dot tao | tao | 0.798925 | -477.3125 | 1 |
sigma dot sigma| sigma | 1.189325 | -502.3125 | 2 |
(sigma)(tao) | sigma-tao | 0.182875 | 97.0625 | 3 |
Sij | t | -0.1575 | 108.75 | 4 |
Sij(tao) | t-tao | -0.7525 | 297.25 | 5 |
L dot S | b | 0.5625 | -719.75 | 6 |
L dot S (tao) | b-tao | 0.0475 | -159.25 | 7 |
L squared | q | 0.070625 | 8.625 | 8 |
L^2(tao) | q-tao | -0.148125 | 5.625 | 9 |
L^2(sigma | q-sigma | -0.040625 | 17.375 | 10 |
L^2(sigma)(tao)| q-sigma-tao | -0.001875 | -33.625 | 11 |
(L dot S)^2 | bb | -0.5425 | 391.0 | 12 |
(LS)^2(tao) | bb-tao | 0.0025 | 145.0 | 13 |
"""
import numpy as np
| 33.696078 | 84 | 0.364562 |
4de80e2e1c94dbe6762d16201a946a481593a775 | 543 | py | Python | solutions/python3/problem1556.py | tjyiiuan/LeetCode | abd10944c6a1f7a7f36bd9b6218c511cf6c0f53e | [
"MIT"
] | null | null | null | solutions/python3/problem1556.py | tjyiiuan/LeetCode | abd10944c6a1f7a7f36bd9b6218c511cf6c0f53e | [
"MIT"
] | null | null | null | solutions/python3/problem1556.py | tjyiiuan/LeetCode | abd10944c6a1f7a7f36bd9b6218c511cf6c0f53e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
1556. Thousand Separator
Given an integer n, add a dot (".") as the thousands separator and return it in string format.
Constraints:
0 <= n < 2^31
"""
| 19.392857 | 94 | 0.46593 |
4de95b2ae160d83f0a0fab9908a283c692256619 | 6,483 | py | Python | app/resources/base.py | smartlab-br/datahub-api | 193e71172bb4891a5bbffc902da07ef57df9ab07 | [
"MIT"
] | 1 | 2019-07-25T21:15:05.000Z | 2019-07-25T21:15:05.000Z | app/resources/base.py | smartlab-br/datahub-api | 193e71172bb4891a5bbffc902da07ef57df9ab07 | [
"MIT"
] | 44 | 2019-08-05T15:24:00.000Z | 2022-01-31T23:11:31.000Z | app/resources/base.py | smartlab-br/datahub-api | 193e71172bb4891a5bbffc902da07ef57df9ab07 | [
"MIT"
] | 1 | 2021-05-11T07:49:51.000Z | 2021-05-11T07:49:51.000Z | ''' Controller para fornecer dados da CEE '''
from flask_restful import Resource
from service.qry_options_builder import QueryOptionsBuilder
from model.thematic import Thematic
def get_domain(self):
''' Carrega o modelo de domnio, se no o encontrar '''
if self.domain is None:
self.domain = Thematic()
return self.domain
def set_domain(self):
''' Setter invoked from constructor '''
self.domain = Thematic()
| 46.640288 | 94 | 0.608669 |
4de9705438995df854b9ebaf6e2d9530e21d53a7 | 3,155 | py | Python | tapioca_trello/resource_mapping/checklist.py | humrochagf/tapioca-trello | a7067a4c43b22e64cef67b68068580448a4cb420 | [
"MIT"
] | null | null | null | tapioca_trello/resource_mapping/checklist.py | humrochagf/tapioca-trello | a7067a4c43b22e64cef67b68068580448a4cb420 | [
"MIT"
] | null | null | null | tapioca_trello/resource_mapping/checklist.py | humrochagf/tapioca-trello | a7067a4c43b22e64cef67b68068580448a4cb420 | [
"MIT"
] | 1 | 2018-07-31T23:04:34.000Z | 2018-07-31T23:04:34.000Z | # -*- coding: utf-8 -*-
CHECKLIST_MAPPING = {
'checklist_retrieve': {
'resource': '/checklists/{id}',
'docs': (
'https://developers.trello.com/v1.0/reference'
'#checklistsid'
),
'methods': ['GET'],
},
'checklist_field_retrieve': {
'resource': '/checklists/{id}/{field}',
'docs': (
'https://developers.trello.com/v1.0/reference'
'#checklistsidfield'
),
'methods': ['GET'],
},
'checklist_board_retrieve': {
'resource': '/checklists/{id}/board',
'docs': (
'https://developers.trello.com/v1.0/reference'
'#checklistsidboard'
),
'methods': ['GET'],
},
'checklist_card_retrieve': {
'resource': '/checklists/{id}/cards',
'docs': (
'https://developers.trello.com/v1.0/reference'
'#checklistsidcards'
),
'methods': ['GET'],
},
'checklist_item_list': {
'resource': '/checklists/{id}/checkItems',
'docs': (
'https://developers.trello.com/v1.0/reference'
'#checklistsidcardscheckitems'
),
'methods': ['GET'],
},
'checklist_item_retrieve': {
'resource': '/checklists/{id}/checkItems/{idCheckItem}',
'docs': (
'https://developers.trello.com/v1.0/reference'
'#checklistsidcardscheckitemscheckitemid'
),
'methods': ['GET'],
},
'checklist_update': {
'resource': '/checklists/{id}',
'docs': (
'https://developers.trello.com/v1.0/reference'
'#checklistsid-1'
),
'methods': ['PUT'],
},
'checklist_item_update': {
'resource': '/checklists/{id}/checkItems/{idCheckItem}',
'docs': (
'https://developers.trello.com/v1.0/reference'
'#checklistsidcheckitemsidcheckitem'
),
'methods': ['PUT'],
},
'checklist_name_update': {
'resource': '/checklists/{id}/name',
'docs': (
'https://developers.trello.com/v1.0/reference'
'#checklistsidname'
),
'methods': ['PUT'],
},
'checklist_create': {
'resource': '/checklists',
'docs': (
'https://developers.trello.com/v1.0/reference'
'#checklists'
),
'methods': ['POST'],
},
'checklist_item_create': {
'resource': '/checklists/{id}/checkItems',
'docs': (
'https://developers.trello.com/v1.0/reference'
'#checklistsidcheckitems'
),
'methods': ['POST'],
},
'checklist_delete': {
'resource': '/checklists/{id}',
'docs': (
'https://developers.trello.com/v1.0/reference'
'#checklistsid-2'
),
'methods': ['DELETE'],
},
'checklist_item_delete': {
'resource': '/checklists/{id}/checkItems/{idCheckItem}',
'docs': (
'https://developers.trello.com/v1.0/reference'
'#checklistsidcheckitemsid'
),
'methods': ['DELETE'],
},
}
| 28.944954 | 64 | 0.496672 |
4dea1d4995a7ebb956d68ed48040d475a502bb1f | 2,962 | py | Python | investimentos.py | isaiaspereira307/invest | ad0aa40dca4ece75fb7dad98415e73dc382f662a | [
"MIT"
] | null | null | null | investimentos.py | isaiaspereira307/invest | ad0aa40dca4ece75fb7dad98415e73dc382f662a | [
"MIT"
] | null | null | null | investimentos.py | isaiaspereira307/invest | ad0aa40dca4ece75fb7dad98415e73dc382f662a | [
"MIT"
] | null | null | null | import json
import os
acoes = ler_arquivo("acoes.json")
opcao=chamarMenu()
while opcao > 0 and opcao < 5:
if opcao == 1:
print(registrar(acoes, "acoes.json"))
elif opcao == 2:
exibir("acoes.json")
elif opcao == 3:
sair()
opcao = chamarMenu() | 27.174312 | 61 | 0.609723 |
4dea6041225ae15383493ad1d5f6078ade49cd6b | 10,718 | py | Python | lib/ipython_view.py | drewp/light9 | ab173a40d095051546e532962f7a33ac502943a6 | [
"MIT"
] | 2 | 2018-10-05T13:32:46.000Z | 2022-01-01T22:51:20.000Z | lib/ipython_view.py | drewp/light9 | ab173a40d095051546e532962f7a33ac502943a6 | [
"MIT"
] | 4 | 2021-06-08T19:33:40.000Z | 2022-03-11T23:18:06.000Z | lib/ipython_view.py | drewp/light9 | ab173a40d095051546e532962f7a33ac502943a6 | [
"MIT"
] | null | null | null | # this version is adapted from http://wiki.ipython.org/Old_Embedding/GTK
"""
Backend to the console plugin.
@author: Eitan Isaacson
@organization: IBM Corporation
@copyright: Copyright (c) 2007 IBM Corporation
@license: BSD
All rights reserved. This program and the accompanying materials are made
available under the terms of the BSD which accompanies this distribution, and
is available at U{http://www.opensource.org/licenses/bsd-license.php}
"""
# this file is a modified version of source code from the Accerciser project
# http://live.gnome.org/accerciser
from gi.repository import Gtk
from gi.repository import Gdk
import re
import sys
import os
from gi.repository import Pango
from io import StringIO
from functools import reduce
try:
import IPython
except Exception as e:
raise "Error importing IPython (%s)" % str(e)
ansi_colors = {'0;30': 'Black',
'0;31': 'Red',
'0;32': 'Green',
'0;33': 'Brown',
'0;34': 'Blue',
'0;35': 'Purple',
'0;36': 'Cyan',
'0;37': 'LightGray',
'1;30': 'DarkGray',
'1;31': 'DarkRed',
'1;32': 'SeaGreen',
'1;33': 'Yellow',
'1;34': 'LightBlue',
'1;35': 'MediumPurple',
'1;36': 'LightCyan',
'1;37': 'White'}
| 35.026144 | 90 | 0.628755 |
4deba880f54b833c42a876a0e52201d76815fdfb | 513 | py | Python | todo/urls.py | incomparable/Django | ba2f38f694b1055215559c4ca4173c245918fabf | [
"Apache-2.0"
] | null | null | null | todo/urls.py | incomparable/Django | ba2f38f694b1055215559c4ca4173c245918fabf | [
"Apache-2.0"
] | null | null | null | todo/urls.py | incomparable/Django | ba2f38f694b1055215559c4ca4173c245918fabf | [
"Apache-2.0"
] | null | null | null | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^get', views.index, name='index'),
url(r'^details/(?P<id>\w)/$', views.details, name='details'),
url(r'^add', views.add, name='add'),
url(r'^delete', views.delete, name='delete'),
url(r'^update', views.update, name='update'),
# url(r'^signup', views.signup, name='signup'),
# url(r'^login', views.login, name='login'),
# url(r'^login/$', auth_views.login),
]
| 28.5 | 65 | 0.598441 |
4debda04f4303a03d05f73d0f622731078a63cdf | 336 | py | Python | first_steps_in_coding_and_simple_operations_and_calculations/exercise/charity_campaign.py | PetkoAndreev/Python-basics | a376362548380ae50c7c707551cb821547f44402 | [
"MIT"
] | null | null | null | first_steps_in_coding_and_simple_operations_and_calculations/exercise/charity_campaign.py | PetkoAndreev/Python-basics | a376362548380ae50c7c707551cb821547f44402 | [
"MIT"
] | null | null | null | first_steps_in_coding_and_simple_operations_and_calculations/exercise/charity_campaign.py | PetkoAndreev/Python-basics | a376362548380ae50c7c707551cb821547f44402 | [
"MIT"
] | null | null | null | days = int(input())
sladkar = int(input())
cake = int(input())
gofreta = int(input())
pancake = int(input())
cake_price = cake*45
gofreta_price = gofreta*5.8
pancake_price = pancake*3.2
day_price = (cake_price + gofreta_price + pancake_price)*sladkar
total_price = days*day_price
campaign = total_price - (total_price/8)
print(campaign) | 28 | 64 | 0.741071 |
4dec80c0904955f695f9881970d5b2f7945e222c | 9,234 | py | Python | deepbiome/loss_and_metric.py | Young-won/deepbiome | 644bc226f1149038d0af7203a03a77ca6e931835 | [
"BSD-3-Clause"
] | 4 | 2019-10-20T15:56:19.000Z | 2021-03-17T16:48:35.000Z | deepbiome/loss_and_metric.py | Young-won/deepbiome | 644bc226f1149038d0af7203a03a77ca6e931835 | [
"BSD-3-Clause"
] | 1 | 2019-11-11T22:47:57.000Z | 2019-11-11T22:47:57.000Z | deepbiome/loss_and_metric.py | Young-won/deepbiome | 644bc226f1149038d0af7203a03a77ca6e931835 | [
"BSD-3-Clause"
] | 1 | 2019-11-11T18:17:58.000Z | 2019-11-11T18:17:58.000Z | ######################################################################
## DeepBiome
## - Loss and metrics (mse, cross-entropy)
##
## July 10. 2019
## Youngwon (youngwon08@gmail.com)
##
## Reference
## - Keras (https://github.com/keras-team/keras)
######################################################################
import numpy as np
import sklearn.metrics as skmetrics
from keras.callbacks import Callback
import tensorflow as tf
import keras.backend as K
from keras.losses import mean_squared_error, mean_absolute_error, binary_crossentropy, categorical_crossentropy, sparse_categorical_crossentropy
from keras.metrics import binary_accuracy, categorical_accuracy, sparse_categorical_accuracy
from sklearn.metrics import roc_auc_score, f1_score, precision_score, recall_score
###############################################################################################################################
# tf loss functions
# TODO
# https://stackoverflow.com/questions/41032551/how-to-compute-receiving-operating-characteristic-roc-and-auc-in-keras
# def auc(y_true, y_pred):
# return NotImplementedError()
###############################################################################################################################
# helper
###############################################################################################################################
# if __name__ == "__main__":
# test_metrics = {'Accuracy':binary_accuracy, 'Precision':precision, 'Recall':recall}
# print('Test loss functions %s' % test_metrics.keys())
# y_true_set = np.array([[[0,0,0,0,0],
# [0,0,0,0,0],
# [0,1,1,0,0],
# [1,1,1,0,0],
# [0,1,0,0,0]]])
# y_pred_set = np.array([[[0,0,0,0,1],
# [0,0,0,0,0],
# [0,1,0.6,0,0],
# [0,1,1,0,0],
# [0,0.3,0,0,0]]])
# def test(acc, y_true_set, y_pred_set):
# sess = tf.Session()
# K.set_session(sess)
# with sess.as_default():
# return acc.eval(feed_dict={y_true: y_true_set, y_pred: y_pred_set})
# # tf
# y_true = tf.placeholder("float32", shape=(None,y_true_set.shape[1],y_true_set.shape[2]))
# y_pred = tf.placeholder("float32", shape=(None,y_pred_set.shape[1],y_pred_set.shape[2]))
# metric_list = [binary_accuracy(y_true, y_pred),
# precision(y_true, y_pred),
# recall(y_true, y_pred)]
# # numpy
# print('%15s %15s %15s' % tuple(test_metrics.keys()))
# print('tf : {}'.format([test(acc, y_true_set, y_pred_set) for acc in metric_list]))
# print('np : {}'.format(np.round(metric_test(y_true_set[0],y_pred_set[0]),8)))
| 42.164384 | 144 | 0.594975 |
4ded2765ebba38c75e11130b9978c0647bfd5359 | 3,177 | py | Python | Hough.py | andresgmz/Scripts-Python | 1f56e5790dc9c38d9bbf5dc040ead45a8f3ca937 | [
"MIT"
] | null | null | null | Hough.py | andresgmz/Scripts-Python | 1f56e5790dc9c38d9bbf5dc040ead45a8f3ca937 | [
"MIT"
] | null | null | null | Hough.py | andresgmz/Scripts-Python | 1f56e5790dc9c38d9bbf5dc040ead45a8f3ca937 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
import matplotlib.pyplot as plt
#from matplotlib import pyplot as plt
from tkinter import filedialog
from tkinter import *
root = Tk()
root.withdraw()
root.filename = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("all files",".*"),("jpg files",".jpg")))
img = cv2.imread(root.filename)
root.destroy()
# Convert to gray-scale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Blur the image to reduce noise
img_blur = cv2.medianBlur(gray, 5)
# Apply hough transform on the image8 $$$img.shape[0]/16, param1=100, param2=11, minRadius=62, maxRadius=67
# Draw detected circles; circles = cv2.HoughCircles(img_blur, cv2.HOUGH_GRADIENT, 1, img.shape[0]/16, param1=200, param2=25, minRadius=60, maxRadius=67)
face_cascade = cv2.CascadeClassifier('C:/Users/andre/Desktop/NovenoSemestre/VisionArtificial/Python/haarcascade_frontalface_alt.xml')
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
center = (x + w//2, y + h//2)
#circles = cv2.HoughCircles(img_blur, cv2.HOUGH_GRADIENT, 1, img.shape[0]/128, param1=100, param2=11, minRadius=50, maxRadius=100)
circles = cv2.HoughCircles(img_blur, cv2.HOUGH_GRADIENT, 1, img.shape[0]/128, param1=100, param2=11, minRadius=(w//2-10), maxRadius=(w//2+10))
(h, w) = img_blur.shape[:2] #Calcular tamao de la imageb
(pointRefX,pointRefY) = center
puntoMinimo =100
if circles is not None:
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
#Definir el circulo mas cercano de la
xCercano =np.absolute(i[0]-pointRefX)
yCercano =np.absolute(i[1]-pointRefY)
puntoCercano = xCercano+yCercano
if (puntoCercano < puntoMinimo):
puntoMinimo = puntoCercano
circuloCercano = i
# Draw outer circle
#frame = cv2.ellipse(img, center, (w//2, h//2), 0, 0, 360,(100, 7, 55), 2)
cv2.ellipse(img, (circuloCercano[0], circuloCercano[1]),(circuloCercano[2],circuloCercano[2]+15),0,0,360,(0, 255, 0), 2)
# Draw inner circle
cv2.circle(img, (circuloCercano[0], circuloCercano[1]), circuloCercano[2], (0, 255, 0), 2)
cv2.circle(img, (circuloCercano[0], circuloCercano[1]), 2, (0, 0, 255), 3)
""" cv2.circle(img, (circuloCercano[0], circuloCercano[1]), circuloCercano[2], (0, 255, 0), 2)
# Draw inner circle
cv2.circle(img, (circuloCercano[0], circuloCercano[1]), 2, (0, 0, 255), 3) """
""" if circles is not None:
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
#Definir el circulo mas cercano de la
xCercano =np.absolute(i[0]-pointRefX)
yCercano =np.absolute(i[1]-pointRefY)
puntoCercano = xCercano+yCercano
if (puntoCercano < puntoMinimo):
puntoMinimo = puntoCercano
circuloCercano = i
# Draw outer circle
cv2.circle(img, (i[0], i[1]), i[2], (0, 255, 0), 2)
# Draw inner circle
cv2.circle(img, (i[0], i[1]), 2, (0, 0, 255), 3)
"""
cv2.imshow("Mascara",img)
cv2.waitKey(0) | 38.743902 | 153 | 0.645892 |
4dee15ccda1b59264009aac028177487941365ec | 3,927 | py | Python | src/SentimentAnalyzer.py | IChowdhury01/Sentiment-Analyzer | 0a566365eed00b0e76feb77c638579dd80f75068 | [
"MIT"
] | null | null | null | src/SentimentAnalyzer.py | IChowdhury01/Sentiment-Analyzer | 0a566365eed00b0e76feb77c638579dd80f75068 | [
"MIT"
] | null | null | null | src/SentimentAnalyzer.py | IChowdhury01/Sentiment-Analyzer | 0a566365eed00b0e76feb77c638579dd80f75068 | [
"MIT"
] | null | null | null | # Binary Sentiment Analysis using Recurrent Neural Networks
# Import libraries & dataset list
import tensorflow as tf
import tensorflow_datasets as dslist
# Load Dataset
print("\nLoading dataset...")
# Download dataset and dataset info
DATASET_CODE = 'imdb_reviews/subwords8k' # Using a TensorFlow binary sentiment classification dataset
dataset, dsinfo = dslist.load(DATASET_CODE,
with_info=True,
as_supervised=True)
# Separate into training and testing data.
training = dataset['train']
testing = dataset['test']
# Declare encoder (maps each word in a string to its index in the dataset's vocabulary)
encoder = dsinfo.features['text'].encoder
print("Dataset loaded.")
# Setup for training
# Prepare data. Create batches of encoded strings and zero-pad them.
BUFFER_SIZE = 10000
BATCH_SIZE = 64 # Max number of encoded strings in batch
padded_shapes = ([None], ())
training = (training
.shuffle(BUFFER_SIZE)
.padded_batch(BATCH_SIZE, padded_shapes=padded_shapes))
testing = (testing
.padded_batch(BATCH_SIZE, padded_shapes=padded_shapes))
# Setup Recurrent Neural Network (RNN)
# Create RNN model using Keras.
OUTPUT_SIZE = 64
rnn_model = tf.keras.Sequential([ # Keras Sequential model: processes sequence of encoded strings (indices), embeds each index into vector, then processes through embedding layer
tf.keras.layers.Embedding(encoder.vocab_size, OUTPUT_SIZE), # Add embedding layer: stores each word as trainable vector
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64)), # Make input sequence iterate both directions through LTSM layer (helps learn long-range dependencies).
# Add layers
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(1)
])
# Compile RNN model
rnn_model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(1e-4),
metrics=['accuracy'])
# Train RNN
NUM_ITERATIONS = 1
print("\nTraining neural network...")
history = rnn_model.fit(training, epochs=NUM_ITERATIONS, validation_data=testing)
print("Training complete.")
# Test RNN.
print("\nTesting on dataset...")
loss, accuracy = rnn_model.evaluate(testing) # Return test loss and test accuracy
print("Testing complete.")
# Process and print results
loss = round(loss, 3)
accuracy = round(accuracy*100, 2)
print("Test Loss: {}".format(loss))
print("Test Accuracy: {}%".format(accuracy))
# Prediction
# Zero-pads a vector up to a target size.
# Predicts sentiment. Output will be a decimal number.
# Predictions with value over 0.5 are positive sentiments.
# Predict sentiment of user-inputted review
user_query = input("\nEnter a review to predict its sentiment, or enter nothing to exit the program:\n")
while(user_query != ""):
prediction = predict_sentiment(user_query)
sentiment = interpret_prediction(prediction)
print("\nSentiment: {} (Value: {})".format(sentiment, prediction))
user_query = input("\n\nEnter a review to predict its sentiment, or enter nothing to exit the program:\n") | 30.679688 | 181 | 0.689585 |
4dee9911d375f6b557bb57e2701f998ccd07ef1c | 5,146 | py | Python | google_image_scraping_script_for_arg.py | KuoYuHong/Shihu-Cat-Image-Recognition-System | 5f184e4902fa6edb4602f01369b56ef03ad4790d | [
"MIT"
] | 1 | 2021-11-24T14:46:06.000Z | 2021-11-24T14:46:06.000Z | google_image_scraping_script_for_arg.py | KuoYuHong/Shihu-Cat-Image-Recognition-System | 5f184e4902fa6edb4602f01369b56ef03ad4790d | [
"MIT"
] | null | null | null | google_image_scraping_script_for_arg.py | KuoYuHong/Shihu-Cat-Image-Recognition-System | 5f184e4902fa6edb4602f01369b56ef03ad4790d | [
"MIT"
] | null | null | null | import selenium
from selenium import webdriver
import time
import requests
import os
from PIL import Image
import io
import hashlib
# All in same directory
DRIVER_PATH = 'chromedriver.exe'
if __name__ == '__main__':
'''
chromedriver.execmd
python google_image_scraping_script_for_arg.py
python google_image_scraping_script_for_arg.py 30
python google_image_scraping_script_for_arg.py 50
'''
import sys
import ast
if len(sys.argv) == 3:
query_name = sys.argv[1]
number_of_picture = sys.argv[2]
print("query_name:",query_name) #str
print("number_of_picture:",number_of_picture) #str
wd = webdriver.Chrome(executable_path=DRIVER_PATH)
queries = [query_name] #change your set of queries here
for query in queries:
wd.get('https://google.com')
search_box = wd.find_element_by_css_selector('input.gLFyf')
search_box.send_keys(query)
links = fetch_image_urls(query,int(number_of_picture),wd) # 200 denotes no. of images you want to download
images_path = './'
for i in links:
persist_image(images_path,query,i)
wd.quit()
else:
print("Error input format")
| 36.496454 | 171 | 0.621842 |
4df16cb84c883d268ef0671570a73d61fad65816 | 1,515 | py | Python | pyslowloris/utils.py | goasdsdkai/daas | 78ef23b254893efca22748fe619ef22648b8c1e8 | [
"MIT"
] | 75 | 2017-06-15T05:58:02.000Z | 2022-03-31T22:59:25.000Z | pyslowloris/utils.py | goasdsdkai/daas | 78ef23b254893efca22748fe619ef22648b8c1e8 | [
"MIT"
] | 8 | 2017-08-25T04:14:19.000Z | 2021-09-10T06:21:33.000Z | pyslowloris/utils.py | goasdsdkai/daas | 78ef23b254893efca22748fe619ef22648b8c1e8 | [
"MIT"
] | 32 | 2017-03-22T22:52:26.000Z | 2022-03-07T15:53:01.000Z | """
MIT License
Copyright (c) 2020 Maxim Krivich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import re
url_pattern = re.compile(
r"^(?:http)s?://" # http:// or https://
# domain...
r"(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)"
r"+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|"
r"localhost|" # localhost...
r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})" # ...or ip
r"(?::\d+)?" # optional port
r"(?:/?|[/?]\S+)$", re.IGNORECASE
)
| 36.95122 | 78 | 0.69769 |
4df1faa8f49c3cdacafcecb2f8765081676e89ad | 5,305 | py | Python | brahe/data_models/geojson.py | duncaneddy/brahe | 4a1746ef3c14211b0709de6e7e34b6f52fc0e686 | [
"MIT"
] | 14 | 2019-05-29T13:36:55.000Z | 2022-02-11T15:26:13.000Z | brahe/data_models/geojson.py | duncaneddy/brahe | 4a1746ef3c14211b0709de6e7e34b6f52fc0e686 | [
"MIT"
] | 1 | 2020-05-27T12:14:39.000Z | 2020-05-27T15:51:21.000Z | brahe/data_models/geojson.py | duncaneddy/brahe | 4a1746ef3c14211b0709de6e7e34b6f52fc0e686 | [
"MIT"
] | 2 | 2019-10-24T05:20:54.000Z | 2019-12-08T03:59:10.000Z | """The geojson module provides data model classes for initialization and storing
of GeoJSON objects.
"""
import typing
import typing_extensions
import pydantic
import numpy as np
import brahe.astro as astro
import brahe.coordinates as coords
import brahe.frames as frames
geographic_point = pydantic.conlist(float, min_items=2, max_items=3) | 35.604027 | 121 | 0.604713 |
4df2f7977ee6df4348bd5f199099edb4427af89e | 521 | py | Python | lab7/7.7.py | rikudo765/algorithms | eb78852143662bc2e42df6271e9a015cfa8ffdd1 | [
"MIT"
] | 1 | 2020-11-16T18:46:24.000Z | 2020-11-16T18:46:24.000Z | lab7/7.7.py | rikudo765/algorithms | eb78852143662bc2e42df6271e9a015cfa8ffdd1 | [
"MIT"
] | null | null | null | lab7/7.7.py | rikudo765/algorithms | eb78852143662bc2e42df6271e9a015cfa8ffdd1 | [
"MIT"
] | null | null | null | n = int(input())
lst = list(map(int, input().split()))
sort1(lst)
| 19.296296 | 39 | 0.380038 |
4df34ddd891c605f94b640242ef9b998d8ecdfb4 | 7,141 | py | Python | CORE/engines/Gudmundsson_Constraint.py | geoffreynyaga/ostrich-project | 157cd7a3c3d9014e31ef21ca21de43f04d039997 | [
"MIT"
] | 15 | 2017-11-08T10:03:26.000Z | 2021-12-21T07:02:44.000Z | CORE/engines/Gudmundsson_Constraint.py | geoffreynyaga/ostrich-project | 157cd7a3c3d9014e31ef21ca21de43f04d039997 | [
"MIT"
] | 9 | 2020-01-17T15:09:22.000Z | 2022-03-25T19:02:05.000Z | CORE/engines/Gudmundsson_Constraint.py | geoffreynyaga/ostrich-project | 157cd7a3c3d9014e31ef21ca21de43f04d039997 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
##################################################################################
# File: c:\Projects\KENYA ONE PROJECT\CORE\engines\Gudmundsson_Constraint.py #
# Project: c:\Projects\KENYA ONE PROJECT\CORE\engines #
# Created Date: Thursday, January 9th 2020, 8:56:55 pm #
# Author: Geoffrey Nyaga Kinyua ( <info@geoffreynyaga.com> ) #
# ----- #
# Last Modified: Thursday January 9th 2020 8:56:55 pm #
# Modified By: Geoffrey Nyaga Kinyua ( <info@geoffreynyaga.com> ) #
# ----- #
# MIT License #
# #
# Copyright (c) 2020 KENYA ONE PROJECT #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy of#
# this software and associated documentation files (the "Software"), to deal in #
# the Software without restriction, including without limitation the rights to #
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies #
# of the Software, and to permit persons to whom the Software is furnished to do #
# so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all #
# copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
# SOFTWARE. #
# ----- #
# Copyright (c) 2020 KENYA ONE PROJECT #
##################################################################################
import sys
sys.path.append("../")
from CORE.API.db_API import write_to_db, read_from_db # type: ignore
from math import sqrt, pi
import numpy as np # type: ignore
import matplotlib.pyplot as plt # type: ignore
grossWeight = read_from_db("finalMTOW")
cruiseSpeed = read_from_db("cruiseSpeed")
ROC = read_from_db("rateOfClimb") * 3.28 * 60
vLof = read_from_db("stallSpeed") * 1.1
AR = read_from_db("AR")
cdMin = read_from_db("cdMin")
wsfromsizing = read_from_db("WS")
rhoSL = read_from_db("rhoSL")
propEff = read_from_db("propEff")
cruiseAltitude: int = 10000 # ft
gForce: float = 2.0
V_ROC: float = 80.0
groundRun: int = 900
serviceCeiling: int = 18000
wsInitial: float = 22.6 # lb/f**2
g: float = 32.174
CDto: float = 0.04
CLto: float = 0.5
groundFriction: float = 0.04
e = oswaldEff(AR)
k: float = 1 / (pi * AR * e)
write_to_db("k", k)
# dynamic pressure at altitude
rhoCruise = rhoAlt(cruiseAltitude)
# print ('air density at cruise altitude, rho = ' +str(rhoCruise))
qAltitude = 0.5 * rhoCruise * (1.688 * cruiseSpeed) ** 2
# print('dynamic pressure at altitude = ' +str(qAltitude))
# Gag Ferrar Model
def gagFerrar(bhp):
"takes in bhp and returns normalised bhp"
normBhp = bhp / (1.132 * (rhoCruise / rhoSL) - 0.132)
return normBhp
WS = np.arange(10, 30)
twTurn = qAltitude * ((cdMin / WS) + k * (gForce / qAltitude) ** 2 * (WS))
qROC = 0.5 * rhoSL * (V_ROC * 1.688) ** 2
Vv = ROC / 60
twROC = (Vv / (V_ROC * 1.688)) + (qROC * cdMin / WS) + (k * WS / qROC)
qVlof = 0.5 * rhoSL * (vLof * 1.688 / sqrt(2)) ** 2
twVlof = (
((vLof * 1.688) ** 2 / (2 * g * groundRun))
+ (qVlof * CDto / WS)
+ (groundFriction * (1 - (qVlof * CLto / WS)))
)
rhoCeiling = rhoAlt(serviceCeiling)
# print(rhoCeiling)
twCruise = qAltitude * cdMin * (1 / WS) + (k)
twCeiling = (1.667 / (np.sqrt((2 * WS / rhoCeiling) * sqrt(k / 3 * cdMin)))) + (
(k * cdMin / 3) * 4
)
plt.figure(1)
plt.subplot(121)
plt.plot(WS, twTurn, label="Rate of Turn")
plt.plot(WS, twROC, label="Rate of Climb")
plt.plot(WS, twVlof, label="Vlof")
plt.plot(WS, twCruise, label="Cruise")
plt.plot(WS, twCeiling, label="Ceiling")
plt.axvline(x=wsfromsizing)
plt.title(" Graph 1 \n HP/Weight ratio")
plt.legend()
# ax = plt.gca()
# ax.set_xticklabels([])
###NORMAlization
norm_twTurn = gagFerrar((grossWeight * twTurn * 1.688 * cruiseSpeed / (propEff * 550)))
test = grossWeight * twTurn * 1.688 * cruiseSpeed / (propEff * 550)
norm_twROC = gagFerrar((grossWeight * twROC * 1.688 * V_ROC / (propEff * 550)))
norm_twVlof = gagFerrar((grossWeight * twVlof * 1.688 * vLof / (propEff * 550)))
norm_twCruise = gagFerrar(
(grossWeight * twCruise * 1.688 * cruiseSpeed / (propEff * 550))
)
norm_twCeiling = gagFerrar(
(grossWeight * twCeiling * 1.688 * cruiseSpeed / (propEff * 550))
)
plt.subplot(122)
plt.plot(WS, norm_twTurn, label="Rate of Turn")
plt.plot(WS, norm_twROC, label="Rate of Climb")
plt.plot(WS, norm_twVlof, label="Vlof")
plt.plot(WS, norm_twCruise, label="Cruise")
plt.plot(WS, norm_twCeiling, label="Ceiling")
plt.title("Graph 2 \n Normalised BHP")
plt.legend()
plt.axvline(x=wsfromsizing)
plt.tight_layout()
if __name__ == "__main__":
plt.show()
# print(find_nearest(ws, plotWS))
plotWS = read_from_db("WS")
myidx = find_nearest(WS, plotWS)
finalBHP = point()
write_to_db("finalBHP", finalBHP)
print(finalBHP, "The Final normalised BHP")
# now switch back to figure 1 and make some changes
| 37 | 88 | 0.555805 |
4df5b2217528684af4f56e2341cb113e5407f9fe | 3,988 | py | Python | libs/blocks/tests/test_variable_filter.py | dendisuhubdy/attention-lvcsr | 598d487c118e66875fdd625baa84ed29d283b800 | [
"MIT"
] | 295 | 2015-09-25T21:15:04.000Z | 2022-01-13T01:16:18.000Z | libs/blocks/tests/test_variable_filter.py | shenshenzhanzhan/attention-lvcsr | 598d487c118e66875fdd625baa84ed29d283b800 | [
"MIT"
] | 21 | 2015-10-28T19:06:32.000Z | 2022-03-11T23:13:05.000Z | libs/blocks/tests/test_variable_filter.py | shenshenzhanzhan/attention-lvcsr | 598d487c118e66875fdd625baa84ed29d283b800 | [
"MIT"
] | 114 | 2015-09-26T21:23:02.000Z | 2021-11-19T02:36:41.000Z | from nose.tools import raises
from blocks.bricks import Bias, Linear, Logistic
from blocks.bricks.parallel import Merge
from blocks.filter import VariableFilter
from blocks.graph import ComputationGraph
from blocks.roles import BIAS, FILTER, PARAMETER, OUTPUT
from theano import tensor
| 34.678261 | 73 | 0.719157 |
4df630aed0715b9f32b05663f7a43496c48ccb52 | 12,437 | py | Python | techminer/gui/comparative_analysis.py | jdvelasq/techMiner | c611d96d2f812b0890513514d9d19787a1edfe2d | [
"MIT"
] | 2 | 2020-09-25T02:42:34.000Z | 2021-08-22T11:27:58.000Z | techminer/gui/comparative_analysis.py | jdvelasq/techMiner | c611d96d2f812b0890513514d9d19787a1edfe2d | [
"MIT"
] | 1 | 2020-10-17T14:38:45.000Z | 2020-10-17T14:50:19.000Z | techminer/gui/comparative_analysis.py | jdvelasq/techMiner | c611d96d2f812b0890513514d9d19787a1edfe2d | [
"MIT"
] | 2 | 2019-10-14T18:05:25.000Z | 2021-07-17T19:28:04.000Z | from collections import Counter
import pandas as pd
import ipywidgets as widgets
import techminer.core.dashboard as dash
from techminer.core import (
CA,
Dashboard,
TF_matrix,
TFIDF_matrix,
add_counters_to_axis,
clustering,
corpus_filter,
exclude_terms,
)
# from techminer.core.params import EXCLUDE_COLS
from techminer.plots import counters_to_node_sizes, xy_clusters_plot
from techminer.core.filter_records import filter_records
###############################################################################
##
## MODEL
##
###############################################################################
###############################################################################
##
## DASHBOARD
##
###############################################################################
COLUMNS = [
"Author_Keywords_CL",
"Author_Keywords",
"Index_Keywords_CL",
"Index_Keywords",
"Keywords_CL",
]
###############################################################################
##
## EXTERNAL INTERFACE
##
###############################################################################
def comparative_analysis(
limit_to=None,
exclude=None,
years_range=None,
):
return App(
limit_to=limit_to,
exclude=exclude,
years_range=years_range,
).run()
| 28.265909 | 88 | 0.514674 |
4df876adfaa448099ddfc3311827d0272a1fac44 | 56,425 | py | Python | WayOfTheTurtle1.0.py | BYHu-2/- | 3243d3a0ccd9144573943b00ac4364dc5c320207 | [
"MIT"
] | 2 | 2021-12-25T00:04:12.000Z | 2021-12-25T00:14:35.000Z | WayOfTheTurtle1.0.py | BYHu-2/Turtle | 3243d3a0ccd9144573943b00ac4364dc5c320207 | [
"MIT"
] | null | null | null | WayOfTheTurtle1.0.py | BYHu-2/Turtle | 3243d3a0ccd9144573943b00ac4364dc5c320207 | [
"MIT"
] | null | null | null | import sys
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
import qtawesome
import matplotlib.pyplot as plt
import csv
import numpy as np
import datetime
import os
def main():
app = QApplication(sys.argv)
gui = MainUI()
gui.show()
sys.exit(app.exec_())
def finddatepos(date):
i = 0
while result[i][0] != date:
i += 1
return i
def calAtr(result, start_time, end_time, tr_list): # Calculate atr
counter = 0
atr_list = []
for i in range(1, len(result)-1):
if result[i][0] == start_time:
counter = 1
if counter == 1:
tr = max(float(result[i][2])-float(result[i][3]), float(result[i][2])-float(result[i-1][4]), float(result[i-1][4])-float(result[i][3]))
tr_list.append([result[i][0], tr])
atr_list.append(tr)
if result[i][0] == end_time:
counter = 0
atr = int(np.floor(np.mean(atr_list)))
atr_half = int(np.floor(0.5 * atr))
return [atr, atr_half]
def calDon(result, time, atr_half, Dontime = 30): # Calculate Donchian tunnel
for i in range(Dontime, len(result)-1):
high_list = []
low_list = []
if result[i][0] == time:
for j in range(i-Dontime, i):
high_list.append(result[j][2])
low_list.append(result[j][3])
don_open = np.max(high_list)
don_close = np.min(low_list)
short_add_point = don_close - atr_half
short_stop_loss = don_close + atr_half
long_add_point = don_open + atr_half
long_stop_loss = don_open - atr_half
return [long_add_point, long_stop_loss, short_add_point, short_stop_loss]
def on_bar(date, atrtime = 10):
i = 0
while result[i][0] != date:
i += 1
yesterday = result[i-1][0]
startatrday = result[i-atrtime][0]
open = result[i][1]
atr = calAtr(result, startatrday, yesterday, tr_list)[0]
atr_half = calAtr(result, startatrday, yesterday, tr_list)[1]
Donlst = calDon(result, date, atr_half)
long_add_point = Donlst[0]
long_stop_loss = Donlst[1]
short_add_point = Donlst[2]
short_stop_loss = Donlst[3]
date_pos = 0
while cash[date_pos][0] != date:
date_pos += 1
position_long[date_pos][1] = position_long[date_pos - 1][1]
position_short[date_pos][1] = position_short[date_pos - 1][1]
cash[date_pos][1] = cash[date_pos - 1][1]
if position_long[date_pos][1] == 0 and position_short[date_pos][1] == 0:
if open > long_add_point - atr_half:
#
if cash[date_pos][1] >= (1 + backtest_commission_ratio) * open * unit(current_asset(yesterday),yesterday):
position_long[date_pos][1] = unit(current_asset(yesterday),yesterday)
print(date, '%.1f'%(unit(current_asset(yesterday),yesterday)))
cash[date_pos][1] -= (1 + backtest_commission_ratio) * open * unit(current_asset(yesterday),yesterday)
else:
position_long[date_pos][1] = cash[date_pos][1] / (1 + backtest_commission_ratio) / open
print(date, '%.1f'%(cash[date_pos][1] / (1 + backtest_commission_ratio) / open))
cash[date_pos][1] = 0
if open < short_add_point + atr_half:
#
position_short[date_pos][1] = unit(current_asset(yesterday),yesterday)
print(date, '%.1f'%(unit(current_asset(yesterday),yesterday)))
cash[date_pos][1] += (1 - backtest_commission_ratio) * open * unit(current_asset(yesterday),yesterday)
if position_long[date_pos][1] != 0:
if open > long_add_point:
# 1/2atr
if cash[date_pos][1] >= (1 + backtest_commission_ratio) * open * unit(current_asset(yesterday), yesterday):
position_long[date_pos][1] += unit(current_asset(yesterday),yesterday)
print(date, '%.1f'%(unit(current_asset(yesterday),yesterday)))
cash[date_pos][1] -= (1 + backtest_commission_ratio) * open * unit(current_asset(yesterday), yesterday)
else:
position_long[date_pos][1] += cash[date_pos][1] / (1 + backtest_commission_ratio) / open
print(date, '%.1f' % (cash[date_pos][1] / (1 + backtest_commission_ratio) / open))
cash[date_pos][1] = 0
if open < long_stop_loss:
#
if position_long[date_pos][1] - unit(current_asset(yesterday),yesterday) >= 0:
print(date, '%.1f'%(unit(current_asset(yesterday),yesterday)))
cash[date_pos][1] += (1 - backtest_commission_ratio) * open * unit(current_asset(yesterday),
yesterday)
else:
print(date, '%.1f' % (position_long[date_pos][1]))
cash[date_pos][1] += (1 - backtest_commission_ratio) * position_long[date_pos][1] * open
position_long[date_pos][1] = max(position_long[date_pos][1] - unit(current_asset(yesterday),yesterday), 0)
'''print(date, '%.1f'%(position_long[date_pos][1]))
cash[date_pos][1] += (1 - backtest_commission_ratio) * open * position_long[date_pos][1]
position_long[date_pos][1] = 0'''
if position_short[date_pos][1] != 0:
if open < short_add_point:
# 1/2atr
position_short[date_pos][1] += unit(current_asset(yesterday),yesterday)
print(date, '%.1f'%(unit(current_asset(yesterday),yesterday)))
cash[date_pos][1] += (1 - backtest_commission_ratio) * open * unit(current_asset(yesterday), yesterday)
if open > short_stop_loss:
#
m = min(position_short[date_pos][1] * open, open * unit(current_asset(yesterday),yesterday), cash[date_pos][1] / (1 + backtest_commission_ratio))
print(date, '%.1f'%(m / open))
cash[date_pos][1] -= (1 + backtest_commission_ratio) * m
position_short[date_pos][1] = position_short[date_pos][1] - m / open
'''m = position_short[date_pos][1] * open
print(date, '%.1f'%(m / open))
cash[date_pos][1] -= (1 + backtest_commission_ratio) * m
position_short[date_pos][1] = position_short[date_pos][1] - m / open'''
if __name__ == '__main__':
csvFile = open("data.csv", "r")
reader = csv.reader(csvFile)
result = []
for item in reader:
# Ignore first line
if reader.line_num == 1:
continue
result.append(
[item[0], float(item[1]), float(item[2]), float(item[3]), float(item[4])]) # date, open, high, low, close
csvFile.close()
initial_cash = 0
backtest_commission_ratio = 0.0001
start_time = '2021-03-01'
end_time = '2021-04-27'
tr_list = []
cash = []
position_short = []
position_long = []
atrtime = 20
Dontime = 30
unit_rate = 0.01
winningRate = 0
date = 0
time = 0
baseline = 0
annualized_rate = 0
l_time = []
l_asset = []
l_index = []
xs=[]
l_initial = []
main() | 43.504241 | 208 | 0.572955 |
4dfab55975cccc588661b8464faec98ada96eafa | 11,800 | py | Python | posthog/test/test_update_person_props.py | csmatar/posthog | 4587cfe18625f302726c531f06a32c18e9749e9d | [
"MIT"
] | 58 | 2020-08-26T16:26:18.000Z | 2022-03-30T05:32:23.000Z | posthog/test/test_update_person_props.py | csmatar/posthog | 4587cfe18625f302726c531f06a32c18e9749e9d | [
"MIT"
] | 15 | 2021-11-09T10:49:34.000Z | 2021-11-09T16:11:01.000Z | posthog/test/test_update_person_props.py | csmatar/posthog | 4587cfe18625f302726c531f06a32c18e9749e9d | [
"MIT"
] | 13 | 2020-09-08T13:27:07.000Z | 2022-03-19T17:27:10.000Z | from datetime import datetime
from django.db import connection
from posthog.models import Person
from posthog.test.base import BaseTest
# How we expect this function to behave:
# | call | value exists | call TS is ___ existing TS | previous fn | write/override
# 1| set | no | N/A | N/A | yes
# 2| set_once | no | N/A | N/A | yes
# 3| set | yes | before | set | no
# 4| set | yes | before | set_once | yes
# 5| set | yes | after | set | yes
# 6| set | yes | after | set_once | yes
# 7| set_once | yes | before | set | no
# 8| set_once | yes | before | set_once | yes
# 9| set_once | yes | after | set | no
# 10| set_once | yes | after | set_once | no
# 11| set | yes | equal | set | no
# 12| set_once | yes | equal | set | no
# 13| set | yes | equal | set_once | yes
# 14| set_once | yes | equal | set_once | no
FUTURE_TIMESTAMP = datetime(2050, 1, 1, 1, 1, 1).isoformat()
PAST_TIMESTAMP = datetime(2000, 1, 1, 1, 1, 1).isoformat()
# Refers to migration 0176_update_person_props_function
# This is a Postgres function we use in the plugin server
| 42.446043 | 107 | 0.527203 |
4dfb10a7a1f3430a5ca4e269077867482eeda87b | 762 | py | Python | setup.py | cclauss/AIF360 | 4fb4e0d3e4ed65c9b4d7a2d5238881a04cc334c1 | [
"Apache-2.0"
] | null | null | null | setup.py | cclauss/AIF360 | 4fb4e0d3e4ed65c9b4d7a2d5238881a04cc334c1 | [
"Apache-2.0"
] | null | null | null | setup.py | cclauss/AIF360 | 4fb4e0d3e4ed65c9b4d7a2d5238881a04cc334c1 | [
"Apache-2.0"
] | null | null | null | from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(name='aif360',
version='0.1.0',
description='IBM AI Fairness 360',
author='aif360 developers',
author_email='aif360@us.ibm.com',
url='https://github.com/IBM/AIF360',
long_description=long_description,
long_description_content_type='text/markdown',
license='Apache License 2.0',
packages=find_packages(),
# python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <3.7',
install_requires=[
'numpy',
'scipy',
'pandas==0.23.3',
'scikit-learn',
'numba',
],
include_package_data=True,
zip_safe=False)
| 29.307692 | 83 | 0.57874 |
4dfb5ac8775c4305591fb5eb4b61c6ac65e66c47 | 390 | py | Python | src/examples/customstyle/wow_style/widgetstyle/radiobutton.py | robertkist/qtmodernredux | c7f791a1492ff855f3e4b963b8e9f20c46ba503f | [
"Apache-2.0"
] | 4 | 2021-04-12T19:30:47.000Z | 2022-02-11T18:24:16.000Z | src/examples/customstyle/wow_style/widgetstyle/radiobutton.py | robertkist/qtmodernredux | c7f791a1492ff855f3e4b963b8e9f20c46ba503f | [
"Apache-2.0"
] | null | null | null | src/examples/customstyle/wow_style/widgetstyle/radiobutton.py | robertkist/qtmodernredux | c7f791a1492ff855f3e4b963b8e9f20c46ba503f | [
"Apache-2.0"
] | null | null | null | radiobutton_style = '''
QRadioButton:disabled {
background: transparent;
}
QRadioButton::indicator {
background: palette(dark);
width: 8px;
height: 8px;
border: 3px solid palette(dark);
border-radius: 7px;
}
QRadioButton::indicator:checked {
background: palette(highlight);
}
QRadioButton::indicator:checked:disabled {
background: palette(midlight);
}
''' | 18.571429 | 42 | 0.697436 |
4dfbb4858f95304472fccbca8344763f96bb417e | 1,788 | py | Python | engine.py | kevioconnor/day0 | 6a72bf55dba1021850b810e647c87cb53ef86763 | [
"MIT"
] | null | null | null | engine.py | kevioconnor/day0 | 6a72bf55dba1021850b810e647c87cb53ef86763 | [
"MIT"
] | null | null | null | engine.py | kevioconnor/day0 | 6a72bf55dba1021850b810e647c87cb53ef86763 | [
"MIT"
] | null | null | null | from __future__ import annotations
import lzma, pickle
from typing import TYPE_CHECKING
from numpy import e
from tcod.console import Console
from tcod.map import compute_fov
import exceptions, render_functions
from message_log import MessageLog
if TYPE_CHECKING:
from entity import Actor
from game_map import GameMap, GameWorld
| 33.111111 | 117 | 0.657718 |
4dfbb723c6f3d56895498fae876785ec1b7ea406 | 19,132 | py | Python | pysnmp/ERI-DNX-STS1-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/ERI-DNX-STS1-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/ERI-DNX-STS1-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module ERI-DNX-STS1-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ERI-DNX-STS1-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 18:51:50 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ConstraintsIntersection")
DecisionType, LinkCmdStatus, PortStatus, LinkPortAddress, FunctionSwitch, devices, trapSequence = mibBuilder.importSymbols("ERI-DNX-SMC-MIB", "DecisionType", "LinkCmdStatus", "PortStatus", "LinkPortAddress", "FunctionSwitch", "devices", "trapSequence")
eriMibs, = mibBuilder.importSymbols("ERI-ROOT-SMI", "eriMibs")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, Integer32, Gauge32, IpAddress, Counter64, ObjectIdentity, iso, Unsigned32, MibIdentifier, Counter32, Bits, TimeTicks, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "Integer32", "Gauge32", "IpAddress", "Counter64", "ObjectIdentity", "iso", "Unsigned32", "MibIdentifier", "Counter32", "Bits", "TimeTicks", "ModuleIdentity")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
eriDNXSts1MIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 644, 3, 4))
if mibBuilder.loadTexts: eriDNXSts1MIB.setLastUpdated('200204080000Z')
if mibBuilder.loadTexts: eriDNXSts1MIB.setOrganization('Eastern Research, Inc.')
dnxSTS1 = MibIdentifier((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3))
sts1Config = MibIdentifier((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1))
sts1Diag = MibIdentifier((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2))
sts1MapperConfigTable = MibTable((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 1), )
if mibBuilder.loadTexts: sts1MapperConfigTable.setStatus('current')
sts1MapperConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 1, 1), ).setIndexNames((0, "ERI-DNX-STS1-MIB", "sts1MapperAddr"))
if mibBuilder.loadTexts: sts1MapperConfigEntry.setStatus('current')
sts1MapperAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 1, 1, 1), LinkPortAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1MapperAddr.setStatus('current')
sts1MapperResource = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1MapperResource.setStatus('current')
sts1VtGroup1 = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 1, 1, 3), VtGroupType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1VtGroup1.setStatus('current')
sts1VtGroup2 = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 1, 1, 4), VtGroupType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1VtGroup2.setStatus('current')
sts1VtGroup3 = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 1, 1, 5), VtGroupType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1VtGroup3.setStatus('current')
sts1VtGroup4 = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 1, 1, 6), VtGroupType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1VtGroup4.setStatus('current')
sts1VtGroup5 = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 1, 1, 7), VtGroupType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1VtGroup5.setStatus('current')
sts1VtGroup6 = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 1, 1, 8), VtGroupType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1VtGroup6.setStatus('current')
sts1VtGroup7 = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 1, 1, 9), VtGroupType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1VtGroup7.setStatus('current')
sts1VtMapping = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("standardVT", 0), ("sequencialFrm", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1VtMapping.setStatus('current')
sts1Timing = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 1, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("internal", 0), ("ec1-Line", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sts1Timing.setStatus('current')
sts1ShortCable = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 1, 1, 12), FunctionSwitch()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sts1ShortCable.setStatus('current')
sts1MaprCmdStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 1, 1, 13), LinkCmdStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sts1MaprCmdStatus.setStatus('current')
sts1T1E1LinkConfigTable = MibTable((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 2), )
if mibBuilder.loadTexts: sts1T1E1LinkConfigTable.setStatus('current')
sts1T1E1LinkConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 2, 1), ).setIndexNames((0, "ERI-DNX-STS1-MIB", "sts1T1E1CfgLinkAddr"))
if mibBuilder.loadTexts: sts1T1E1LinkConfigEntry.setStatus('current')
sts1T1E1CfgLinkAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 2, 1, 1), LinkPortAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1T1E1CfgLinkAddr.setStatus('current')
sts1T1E1CfgResource = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1T1E1CfgResource.setStatus('current')
sts1T1E1CfgLinkName = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 2, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 20))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sts1T1E1CfgLinkName.setStatus('current')
sts1T1E1Status = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 2, 1, 4), PortStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sts1T1E1Status.setStatus('current')
sts1T1E1Clear = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("disabled", 0), ("framed", 1), ("unframed", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sts1T1E1Clear.setStatus('current')
sts1T1E1Framing = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(5, 6, 7))).clone(namedValues=NamedValues(("t1Esf", 5), ("t1D4", 6), ("t1Unframed", 7)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sts1T1E1Framing.setStatus('current')
sts1T1E1NetLoop = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 2, 1, 7), FunctionSwitch()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sts1T1E1NetLoop.setStatus('current')
sts1T1E1YelAlrm = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 2, 1, 8), DecisionType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sts1T1E1YelAlrm.setStatus('current')
sts1T1E1RecoverTime = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 2, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(3, 10, 15))).clone(namedValues=NamedValues(("timeout-3-secs", 3), ("timeout-10-secs", 10), ("timeout-15-secs", 15)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sts1T1E1RecoverTime.setStatus('current')
sts1T1E1EsfFormat = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 2, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("att-54016", 0), ("ansi-t1-403", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sts1T1E1EsfFormat.setStatus('current')
sts1T1E1IdleCode = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 2, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("busy", 0), ("idle", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sts1T1E1IdleCode.setStatus('current')
sts1T1E1CfgCmdStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 2, 1, 12), LinkCmdStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sts1T1E1CfgCmdStatus.setStatus('current')
sts1T1E1Gr303Facility = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 1, 2, 1, 13), DecisionType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1T1E1Gr303Facility.setStatus('obsolete')
sts1MapperStatusTable = MibTable((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 1), )
if mibBuilder.loadTexts: sts1MapperStatusTable.setStatus('current')
sts1MapperStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 1, 1), ).setIndexNames((0, "ERI-DNX-STS1-MIB", "sts1MapperStatusAddr"))
if mibBuilder.loadTexts: sts1MapperStatusEntry.setStatus('current')
sts1MapperStatusAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 1, 1, 1), LinkPortAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1MapperStatusAddr.setStatus('current')
sts1MapperStatusResource = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1MapperStatusResource.setStatus('current')
sts1MapperStatusState = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 32, 256, 512, 1024, 8192, 131072, 2147483647))).clone(namedValues=NamedValues(("ok", 0), ("lof", 32), ("lop", 256), ("oof", 512), ("ais", 1024), ("los", 8192), ("lomf", 131072), ("errors", 2147483647)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1MapperStatusState.setStatus('current')
sts1MapperStatusLOSErrs = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1MapperStatusLOSErrs.setStatus('current')
sts1MapperStatusOOFErrs = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1MapperStatusOOFErrs.setStatus('current')
sts1MapperStatusLOFErrs = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1MapperStatusLOFErrs.setStatus('current')
sts1MapperStatusLOPtrErrs = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1MapperStatusLOPtrErrs.setStatus('current')
sts1MapperStatusAISErrs = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1MapperStatusAISErrs.setStatus('current')
sts1MapperStatusMultiFErrs = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1MapperStatusMultiFErrs.setStatus('current')
sts1MapperStatusRxTraceErrs = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1MapperStatusRxTraceErrs.setStatus('current')
sts1MapperStatusTotErrSecs = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 1, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1MapperStatusTotErrSecs.setStatus('current')
sts1MapperStatusCmdStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 1, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 14, 101, 114, 200, 206, 500, 501, 502))).clone(namedValues=NamedValues(("ready-for-command", 0), ("update", 1), ("clearErrors", 14), ("update-successful", 101), ("clear-successful", 114), ("err-general-test-error", 200), ("err-field-cannot-be-set", 206), ("err-snmp-parse-failed", 500), ("err-invalid-snmp-type", 501), ("err-invalid-snmp-var-size", 502)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sts1MapperStatusCmdStatus.setStatus('current')
sts1LIUTable = MibTable((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 2), )
if mibBuilder.loadTexts: sts1LIUTable.setStatus('current')
sts1LIUEntry = MibTableRow((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 2, 1), ).setIndexNames((0, "ERI-DNX-STS1-MIB", "sts1LIUAddr"))
if mibBuilder.loadTexts: sts1LIUEntry.setStatus('current')
sts1LIUAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 2, 1, 1), LinkPortAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1LIUAddr.setStatus('current')
sts1LIUResource = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1LIUResource.setStatus('current')
sts1LIUBertState = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(45, 44))).clone(namedValues=NamedValues(("off", 45), ("liu-bert", 44)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sts1LIUBertState.setStatus('current')
sts1LIUBertErrSecs = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 2, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1LIUBertErrSecs.setStatus('current')
sts1LIUBertDuration = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 2, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1LIUBertDuration.setStatus('current')
sts1LIULoopType = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 39))).clone(namedValues=NamedValues(("off", 0), ("mapper", 1), ("liu", 39)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sts1LIULoopType.setStatus('current')
sts1LIUDigitalErrs = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 2, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1LIUDigitalErrs.setStatus('current')
sts1LIUAnalogErrs = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 2, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1LIUAnalogErrs.setStatus('current')
sts1LIUExcessZeros = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 2, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1LIUExcessZeros.setStatus('current')
sts1LIUCodingViolationErrs = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 2, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1LIUCodingViolationErrs.setStatus('current')
sts1LIUPRBSErrs = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 2, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sts1LIUPRBSErrs.setStatus('current')
sts1LIUCmdStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 2, 2, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 14, 101, 114, 200, 202, 203, 205, 206, 500, 501, 502))).clone(namedValues=NamedValues(("ready-for-command", 0), ("update", 1), ("clearErrors", 14), ("update-successful", 101), ("clear-successful", 114), ("err-general-test-error", 200), ("err-invalid-loop-type", 202), ("err-invalid-bert-type", 203), ("err-test-in-progress", 205), ("err-field-cannot-be-set", 206), ("err-snmp-parse-failed", 500), ("err-invalid-snmp-type", 501), ("err-invalid-snmp-var-size", 502)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sts1LIUCmdStatus.setStatus('current')
dnxSTS1Enterprise = ObjectIdentity((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 0))
if mibBuilder.loadTexts: dnxSTS1Enterprise.setStatus('current')
sts1MapperConfigTrap = NotificationType((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 0, 1)).setObjects(("ERI-DNX-SMC-MIB", "trapSequence"), ("ERI-DNX-STS1-MIB", "sts1MapperAddr"), ("ERI-DNX-STS1-MIB", "sts1MaprCmdStatus"))
if mibBuilder.loadTexts: sts1MapperConfigTrap.setStatus('current')
sts1T1E1ConfigTrap = NotificationType((1, 3, 6, 1, 4, 1, 644, 2, 4, 2, 3, 0, 2)).setObjects(("ERI-DNX-SMC-MIB", "trapSequence"), ("ERI-DNX-STS1-MIB", "sts1T1E1CfgLinkAddr"), ("ERI-DNX-STS1-MIB", "sts1T1E1CfgCmdStatus"))
if mibBuilder.loadTexts: sts1T1E1ConfigTrap.setStatus('current')
mibBuilder.exportSymbols("ERI-DNX-STS1-MIB", sts1MapperStatusCmdStatus=sts1MapperStatusCmdStatus, sts1MapperStatusTotErrSecs=sts1MapperStatusTotErrSecs, sts1MapperStatusEntry=sts1MapperStatusEntry, PYSNMP_MODULE_ID=eriDNXSts1MIB, sts1T1E1YelAlrm=sts1T1E1YelAlrm, sts1Config=sts1Config, sts1VtGroup5=sts1VtGroup5, sts1MapperStatusState=sts1MapperStatusState, sts1LIUDigitalErrs=sts1LIUDigitalErrs, sts1Diag=sts1Diag, sts1LIUBertDuration=sts1LIUBertDuration, sts1T1E1NetLoop=sts1T1E1NetLoop, sts1MapperResource=sts1MapperResource, sts1ShortCable=sts1ShortCable, sts1MapperStatusAISErrs=sts1MapperStatusAISErrs, sts1LIUCodingViolationErrs=sts1LIUCodingViolationErrs, sts1VtGroup1=sts1VtGroup1, sts1MapperAddr=sts1MapperAddr, sts1LIUResource=sts1LIUResource, sts1LIUBertState=sts1LIUBertState, dnxSTS1=dnxSTS1, sts1T1E1CfgLinkName=sts1T1E1CfgLinkName, sts1LIULoopType=sts1LIULoopType, sts1T1E1ConfigTrap=sts1T1E1ConfigTrap, sts1T1E1CfgResource=sts1T1E1CfgResource, sts1LIUAnalogErrs=sts1LIUAnalogErrs, sts1MapperStatusLOPtrErrs=sts1MapperStatusLOPtrErrs, sts1LIUAddr=sts1LIUAddr, sts1VtGroup6=sts1VtGroup6, sts1T1E1Status=sts1T1E1Status, sts1VtMapping=sts1VtMapping, VtGroupType=VtGroupType, sts1VtGroup3=sts1VtGroup3, sts1T1E1IdleCode=sts1T1E1IdleCode, sts1LIUBertErrSecs=sts1LIUBertErrSecs, sts1VtGroup4=sts1VtGroup4, sts1MapperConfigTable=sts1MapperConfigTable, sts1MapperStatusAddr=sts1MapperStatusAddr, sts1T1E1Gr303Facility=sts1T1E1Gr303Facility, sts1Timing=sts1Timing, sts1MapperStatusOOFErrs=sts1MapperStatusOOFErrs, sts1MapperStatusResource=sts1MapperStatusResource, sts1VtGroup2=sts1VtGroup2, eriDNXSts1MIB=eriDNXSts1MIB, sts1T1E1Framing=sts1T1E1Framing, sts1MapperStatusLOFErrs=sts1MapperStatusLOFErrs, sts1LIUTable=sts1LIUTable, sts1T1E1LinkConfigTable=sts1T1E1LinkConfigTable, sts1MapperStatusMultiFErrs=sts1MapperStatusMultiFErrs, sts1LIUExcessZeros=sts1LIUExcessZeros, sts1VtGroup7=sts1VtGroup7, sts1MapperStatusLOSErrs=sts1MapperStatusLOSErrs, sts1T1E1CfgLinkAddr=sts1T1E1CfgLinkAddr, sts1T1E1RecoverTime=sts1T1E1RecoverTime, dnxSTS1Enterprise=dnxSTS1Enterprise, sts1MaprCmdStatus=sts1MaprCmdStatus, sts1T1E1EsfFormat=sts1T1E1EsfFormat, sts1MapperStatusRxTraceErrs=sts1MapperStatusRxTraceErrs, sts1MapperConfigEntry=sts1MapperConfigEntry, sts1T1E1LinkConfigEntry=sts1T1E1LinkConfigEntry, sts1LIUCmdStatus=sts1LIUCmdStatus, sts1MapperConfigTrap=sts1MapperConfigTrap, sts1LIUEntry=sts1LIUEntry, sts1LIUPRBSErrs=sts1LIUPRBSErrs, sts1T1E1CfgCmdStatus=sts1T1E1CfgCmdStatus, sts1MapperStatusTable=sts1MapperStatusTable, sts1T1E1Clear=sts1T1E1Clear)
| 127.546667 | 2,554 | 0.744041 |
4dfc7fdfe3108af912d30eab1c90b722d5d0ec3d | 357 | py | Python | friday/models/__init__.py | alexa-infra/friday | 297f9bfd94e88490d53e460c93727c399b2efcb2 | [
"MIT"
] | 1 | 2019-03-17T08:11:18.000Z | 2019-03-17T08:11:18.000Z | friday/models/__init__.py | alexa-infra/friday | 297f9bfd94e88490d53e460c93727c399b2efcb2 | [
"MIT"
] | null | null | null | friday/models/__init__.py | alexa-infra/friday | 297f9bfd94e88490d53e460c93727c399b2efcb2 | [
"MIT"
] | null | null | null | # flake8: noqa
# pylint: disable=cyclic-import
from .base import db, Model, metadata
from .link import Link
from .user import User
from .event import Event, Repeat
from .bookmark import Bookmark
from .tag import Tag
from .doc import Doc, DocTag
from .recipe import Recipe, RecipeImage
from .pagination import paginate, Pagination
from .todo import TodoItem
| 27.461538 | 44 | 0.792717 |
4dfd222e1995b07a6acae65ab8a9083933dc5471 | 632 | py | Python | nqs_tf/models/ffnn.py | ameya1101/neural-quantum-states | 2ab4f970e4cd7ed2a4ed3ebfdfe66bab396c11af | [
"MIT"
] | null | null | null | nqs_tf/models/ffnn.py | ameya1101/neural-quantum-states | 2ab4f970e4cd7ed2a4ed3ebfdfe66bab396c11af | [
"MIT"
] | null | null | null | nqs_tf/models/ffnn.py | ameya1101/neural-quantum-states | 2ab4f970e4cd7ed2a4ed3ebfdfe66bab396c11af | [
"MIT"
] | null | null | null | import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense
from activations.activations import tan_sigmoid, exponential, ReLU
| 30.095238 | 67 | 0.683544 |
15005a003729bb6329d26f74028fc03fd8df4427 | 3,495 | py | Python | examples/other/text_frontend/test_g2p.py | zh794390558/DeepSpeech | 34178893327ad359cb816e55d7c66a10244fa08a | [
"Apache-2.0"
] | null | null | null | examples/other/text_frontend/test_g2p.py | zh794390558/DeepSpeech | 34178893327ad359cb816e55d7c66a10244fa08a | [
"Apache-2.0"
] | null | null | null | examples/other/text_frontend/test_g2p.py | zh794390558/DeepSpeech | 34178893327ad359cb816e55d7c66a10244fa08a | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import re
from pathlib import Path
from parakeet.frontend.zh_frontend import Frontend as zhFrontend
from parakeet.utils.error_rate import word_errors
SILENCE_TOKENS = {"sp", "sil", "sp1", "spl"}
if __name__ == "__main__":
main()
| 35.30303 | 77 | 0.640343 |
15011a09f8a6b93bb0cb155a2b3d2cf4e30e89b7 | 530 | py | Python | data_split.py | DataXujing/ExtremeNet-Pytorch | fc8bf91cb748c144e85d2de271aea117ea54e808 | [
"BSD-3-Clause"
] | 9 | 2020-01-15T05:54:54.000Z | 2021-12-08T06:01:37.000Z | data_split.py | DataXujing/ExtremeNet-Pytorch | fc8bf91cb748c144e85d2de271aea117ea54e808 | [
"BSD-3-Clause"
] | 3 | 2020-12-01T10:26:19.000Z | 2021-01-20T07:51:47.000Z | data_split.py | DataXujing/ExtremeNet-Pytorch | fc8bf91cb748c144e85d2de271aea117ea54e808 | [
"BSD-3-Clause"
] | 3 | 2020-03-31T14:40:08.000Z | 2021-02-22T07:49:34.000Z |
# VOC
import os
import random
import shutil
trainval_percent = 0.1
train_percent = 0.9
imgfilepath = '../myData/JPEGImages' #
total_img = os.listdir(imgfilepath)
sample_num = len(total_img)
trains = random.sample(total_img,int(sample_num*train_percent))
for file in total_img:
if file in trains:
shutil.copy(os.path.join(imgfilepath,file),"./myData/coco/images/train/"+file)
else:
shutil.copy(os.path.join(imgfilepath,file),"./myData/coco/images/val/"+file)
print(file)
| 17.096774 | 86 | 0.711321 |
15035bfbd1a02ccbee3c988cf9c68e7e783016d5 | 3,104 | py | Python | sciunit/models/examples.py | russelljjarvis/sciun | f8f6ede84299dc700afe94b07ae4e98f87a19116 | [
"MIT"
] | 1 | 2020-05-28T00:35:23.000Z | 2020-05-28T00:35:23.000Z | sciunit/models/examples.py | ChihweiLHBird/sciunit | f5669d165fa505c3a17ac17af3d3c78aafd44ae2 | [
"MIT"
] | 1 | 2020-12-29T04:28:57.000Z | 2020-12-29T04:28:57.000Z | sciunit/models/examples.py | russelljjarvis/sciunit | f8f6ede84299dc700afe94b07ae4e98f87a19116 | [
"MIT"
] | null | null | null | """Example SciUnit model classes."""
import random
from sciunit.models import Model
from sciunit.capabilities import ProducesNumber
from sciunit.utils import class_intern, method_cache
from sciunit.utils import method_memoize # Decorator for caching of capability method results.
from typing import Union
################################################################
# Here are several examples of caching and sharing can be used
# to reduce the computational load of testing.
################################################################
| 30.732673 | 171 | 0.661727 |
1504d1248cc2e761c3fb76bb1b97319d6ca7d7fb | 140 | py | Python | semantic/semantic/model/model.py | VladimirSiv/semantic-search-system | 96b6581f191aacb1157b1408b2726e317ddc2c49 | [
"MIT"
] | 1 | 2021-07-01T08:53:46.000Z | 2021-07-01T08:53:46.000Z | semantic/semantic/model/model.py | VladimirSiv/semantic-search-system | 96b6581f191aacb1157b1408b2726e317ddc2c49 | [
"MIT"
] | null | null | null | semantic/semantic/model/model.py | VladimirSiv/semantic-search-system | 96b6581f191aacb1157b1408b2726e317ddc2c49 | [
"MIT"
] | 1 | 2021-12-29T01:18:38.000Z | 2021-12-29T01:18:38.000Z | from sentence_transformers import SentenceTransformer
from semantic.config import CONFIG
model = SentenceTransformer(CONFIG["model_name"])
| 28 | 53 | 0.857143 |
1504effc59c426c8cdd37004ed34fbfb801a2d4e | 8,619 | py | Python | utils/models.py | miladalipour99/time_series_augmentation | 3c314468df689a70e84ae6b433f9cdf5bae63400 | [
"Apache-2.0"
] | 140 | 2020-04-21T05:01:42.000Z | 2022-03-30T20:03:21.000Z | utils/models.py | miladalipour99/time_series_augmentation | 3c314468df689a70e84ae6b433f9cdf5bae63400 | [
"Apache-2.0"
] | 5 | 2021-06-08T01:43:46.000Z | 2021-12-22T11:37:28.000Z | utils/models.py | miladalipour99/time_series_augmentation | 3c314468df689a70e84ae6b433f9cdf5bae63400 | [
"Apache-2.0"
] | 32 | 2020-04-26T14:00:58.000Z | 2022-03-09T01:25:32.000Z | from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Flatten, Dropout, Input
from tensorflow.keras.layers import MaxPooling1D, Conv1D
from tensorflow.keras.layers import LSTM, Bidirectional
from tensorflow.keras.layers import BatchNormalization, GlobalAveragePooling1D, Permute, concatenate, Activation, add
import numpy as np
import math
| 31.922222 | 257 | 0.647987 |
1504fcdc48e346e97fc1b686d7489c610536fa41 | 2,468 | py | Python | ai_flow/test/util/test_sqlalchemy_db.py | flink-extended/ai-flow | d1427a243097d94d77fedbe1966500ae26975a13 | [
"Apache-2.0"
] | 79 | 2021-10-15T07:32:27.000Z | 2022-03-28T04:10:19.000Z | ai_flow/test/util/test_sqlalchemy_db.py | flink-extended/ai-flow | d1427a243097d94d77fedbe1966500ae26975a13 | [
"Apache-2.0"
] | 153 | 2021-10-15T05:23:46.000Z | 2022-02-23T06:07:10.000Z | ai_flow/test/util/test_sqlalchemy_db.py | flink-extended/ai-flow | d1427a243097d94d77fedbe1966500ae26975a13 | [
"Apache-2.0"
] | 23 | 2021-10-15T02:36:37.000Z | 2022-03-17T02:59:27.000Z | # Copyright 2022 The AI Flow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import unittest
import sqlalchemy
from ai_flow.store.db.base_model import base
from ai_flow.util import sqlalchemy_db
SQLITE_FILE = 'ai_flow.db'
TEST_URL = 'sqlite:///ai_flow.db'
if __name__ == '__main__':
unittest.main()
| 29.73494 | 67 | 0.724878 |
1506feffa85f0e03250b9a11fac052405432fbe0 | 628 | py | Python | test.py | blodzbyte/isEven | 18e42cfdad052d34318900fdd91167a533b52210 | [
"MIT"
] | 44 | 2020-03-11T16:44:41.000Z | 2022-03-16T07:55:24.000Z | test.py | blodzbyte/isEven | 18e42cfdad052d34318900fdd91167a533b52210 | [
"MIT"
] | 9 | 2020-03-11T21:07:01.000Z | 2021-07-08T18:49:23.000Z | test.py | blodzbyte/isEven | 18e42cfdad052d34318900fdd91167a533b52210 | [
"MIT"
] | 18 | 2020-03-11T20:03:50.000Z | 2021-07-22T21:40:00.000Z | #!/usr/bin/env python3
from isEven import isEven
if __name__ == '__main__':
main()
| 29.904762 | 80 | 0.517516 |
15073013e66266b93b368bf7d20e3350da16c0c6 | 1,139 | py | Python | comm.py | thedognexttothetrashcan/spi_tmall | 021dc9a6a23841373000a5f09ca300abd376ad15 | [
"Apache-2.0"
] | null | null | null | comm.py | thedognexttothetrashcan/spi_tmall | 021dc9a6a23841373000a5f09ca300abd376ad15 | [
"Apache-2.0"
] | null | null | null | comm.py | thedognexttothetrashcan/spi_tmall | 021dc9a6a23841373000a5f09ca300abd376ad15 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/python
# encoding=utf-8
import os
import datetime,time
from selenium import webdriver
import config
import threading
import numpy as np
#Create Threading Pool
| 21.903846 | 73 | 0.637401 |
15077392ea3f2519132c06a08d94b11524ea1c19 | 1,584 | py | Python | sherlockpipe/objectinfo/preparer/LightcurveBuilder.py | LuisCerdenoMota/SHERLOCK | 5fb52795d3ab44e27bc7dbc6f2c2e6c214995ba1 | [
"MIT"
] | null | null | null | sherlockpipe/objectinfo/preparer/LightcurveBuilder.py | LuisCerdenoMota/SHERLOCK | 5fb52795d3ab44e27bc7dbc6f2c2e6c214995ba1 | [
"MIT"
] | null | null | null | sherlockpipe/objectinfo/preparer/LightcurveBuilder.py | LuisCerdenoMota/SHERLOCK | 5fb52795d3ab44e27bc7dbc6f2c2e6c214995ba1 | [
"MIT"
] | null | null | null | import re
from abc import ABC, abstractmethod
from sherlockpipe.star.EpicStarCatalog import EpicStarCatalog
from sherlockpipe.star.KicStarCatalog import KicStarCatalog
from sherlockpipe.star.TicStarCatalog import TicStarCatalog
| 38.634146 | 91 | 0.674874 |
1507c96d9d4f256bc65da807cd5af86c8c25fb94 | 6,371 | py | Python | dft/dft-hartree-hydrogen.py | marvinfriede/projects | 7050cd76880c8ff0d9de17b8676e82f1929a68e0 | [
"MIT"
] | null | null | null | dft/dft-hartree-hydrogen.py | marvinfriede/projects | 7050cd76880c8ff0d9de17b8676e82f1929a68e0 | [
"MIT"
] | 3 | 2021-04-14T20:15:26.000Z | 2021-04-14T20:20:54.000Z | dft/dft-hartree-hydrogen.py | marvinfriede/projects | 7050cd76880c8ff0d9de17b8676e82f1929a68e0 | [
"MIT"
] | null | null | null | #!/bin/env python3
# coding: utf8
'''
My implementation of DFT Assignment 5.1: Hartree energy for H-atom GS
Taught by Ren Wirnata in 2019/2020.
Links:
https://tu-freiberg.de/fakultaet2/thph/lehre/density-functional-theory
https://github.com/PandaScience/teaching-resources
This script uses the last assignment's code to determine a solution of the
radial Schrdinger equation for the hydrogen ground state (n=1, l=0). After
normalizing, the Hartree potential energy w(r) = r*vh(r) is computed in a
second "integration" step and numerically integrated to the Hartree energy
(~0.3125 Ha). For hydrogen, the homogeneous solution w_hom(r) = beta * r
is not required in order to match the boundary condition (--> beta = 0).
Note, that the integration limits (tmin, tmax) and step size (h) need to be
identical for solve_rseq() and solve_poisson() or you must use interpolated
versions of the functions w(r) and u(r) when computing the Hartree energy.
Further, tmin for solve_poisson() should not be smaller than tmin for
solve_rseq(), because extrapolating u(r) beyond the computed data points may
result in errors.
'''
import time
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import solve_ivp, trapz
from scipy.interpolate import interp1d
nsteps = 10000
rmin = 0.000001
rmax = 20
def secant(f, x1=-12345, x2=6789, maxiter=10000, tol=1e-10):
"""secant method; x1 and x2 are crucial for finding the desired root"""
for itr in range(maxiter):
xnew = x2 - (x2 - x1) / (f(x2) - f(x1)) * f(x2)
if abs(xnew - x2) < tol:
break
x1 = x2
x2 = xnew
else:
print("Calculation exceeded maximum number of iterations!")
exit()
return xnew, itr
def trapezoidal(f, a, b, n=10000):
"""trapez method for numerical integration"""
s = 0.0
h = (b - a) / n
for i in range(0, n):
s += f(a + i * h)
return h * (s + 0.5 * (f(a) + f(b)))
def rad_seq(t, y, energy):
"""returns radial SEQ as system of two 1st order differential equations"""
# input: y = [y1, y2]; return y = [y1', y2']
# y1' = y2; y2' = (...)*y1
return [y[1], (- 2 * (1 / t + energy)) * y[0]]
def initValues(r):
"""getting initial values for numeric intergration from correct solution"""
u = 2 * r * np.exp(-r)
uPrime = (1 - r) * 2 * np.exp(-r)
return [u, uPrime]
def solve_rad_seq(energy):
"""wrapper for ODE integration; energy and l as parameter, integration from
rmax to rmin (inwards)"""
sol = solve_ivp(
lambda t, y: rad_seq(t, y, energy),
t_span=[rmax, rmin],
t_eval=np.linspace(rmax, rmin, nsteps),
y0=initValues(rmax))
u = sol.y[0]
r = sol.t
return u[::-1], r[::-1]
def u0(energy):
"""get first value of integrated Schrdinger equation; since the array is
reversed, u[0] corresponds to the u-value at r = 0 (y-interscetion); different
energies are passed in by secant method"""
u, r = solve_rad_seq(energy)
return u[0]
def normalize(energy):
"""integrating with calculated energy eigenvalue and normalization"""
u, r = solve_rad_seq(energy)
norm = trapz(u * u, r)
u_norm = u / np.sqrt(norm)
return u_norm, r, norm
def poisson(t, y, u):
"""returns poisson equation w''(t) = - u(t) / t as system of two 1st order
differential equations"""
# input: y = [y1, y2]; return y = [y1', y2']
# y1' = y2; y2' = - u(t) / t
return [y[1], -u(t) ** 2 / t]
def solve_poisson(f_int):
"""solve radial poisson equation; input is u(r) from interpolation"""
sol = solve_ivp(
lambda t, y: poisson(t, y, f_int),
t_span=[rmin, rmax],
t_eval=np.linspace(rmin, rmax, nsteps),
y0=[0, 1])
return sol.y[0], sol.t
if __name__ == "__main__":
main()
| 30.777778 | 86 | 0.644169 |
1507f85202e8ecdff0fe986b123a48f1bb2bac41 | 18,714 | py | Python | workflow & analyses notebooks/fukushima_telomere_methods.py | Jared-Luxton/Fukushima-Nuclear-Disaster-Humans | 1cb84f63172005f3bd8947d2bca041deaeec90e8 | [
"MIT"
] | null | null | null | workflow & analyses notebooks/fukushima_telomere_methods.py | Jared-Luxton/Fukushima-Nuclear-Disaster-Humans | 1cb84f63172005f3bd8947d2bca041deaeec90e8 | [
"MIT"
] | null | null | null | workflow & analyses notebooks/fukushima_telomere_methods.py | Jared-Luxton/Fukushima-Nuclear-Disaster-Humans | 1cb84f63172005f3bd8947d2bca041deaeec90e8 | [
"MIT"
] | 1 | 2021-05-23T22:06:17.000Z | 2021-05-23T22:06:17.000Z | import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
from sklearn import datasets, linear_model
from difflib import SequenceMatcher
import seaborn as sns
from statistics import mean
from ast import literal_eval
from scipy import stats
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from pygam import LinearGAM, s, l, f
from matplotlib import lines
import six
def extract_boar_teloFISH_as_list(path):
"""
FUNCTION FOR PULLING KELLY'S TELOFISH DATA FOR 40 BOARS into a LIST.. TO BE MADE INTO A DATAFRAME & JOINED W/
MAIN DATAFRAME if possible
These excel files take forever to load.. the objective here is to synthesize all the excel files for
telomere FISH data into one dataframe, then save that dataframe to csv file to be retrieved later
loading one whole csv file containing all the data will be much, much faster than loading the parts of the whole
Along the way, we'll normalize the teloFISH data using controls internal to each excel file
"""
boar_teloFISH_list = []
for file in os.scandir(path):
if 'Hyb' in file.name:
print(f'Handling {file.name}...')
full_name = path + file.name
# making a dict of excel sheets, where KEY:VALUE pairs are SAMPLE ID:TELO DATA
telo_excel_dict = pd.read_excel(full_name, sheet_name=None, skiprows=4, usecols=[3], nrows=5000)
if 'Telomere Template' in telo_excel_dict.keys():
del telo_excel_dict['Telomere Template']
excel_file_list = []
for sample_id, telos in telo_excel_dict.items():
telos_cleaned = clean_individ_telos(telos)
if sample_id != 'Control':
excel_file_list.append([sample_id, telos_cleaned.values, np.mean(telos_cleaned)])
elif sample_id == 'Control':
control_value = np.mean(telos_cleaned)
#normalize teloFISH values by control value
for sample in excel_file_list:
sample_data = sample
#normalize individual telos
sample_data[1] = np.divide(sample_data[1], control_value)
#normalize telo means
sample_data[2] = np.divide(sample_data[2], control_value)
boar_teloFISH_list.append(sample_data)
print('Finished collecting boar teloFISH data')
return boar_teloFISH_list
# elif hue == 'Sex' and col == 'Sex':
# fig.suptitle(f'{x} vs. {y}\nper Sex in Fukushima Wild Boar', fontsize=16, weight='bold')
# fig.legend(fontsize='large')
# ax.savefig(f"../graphs/{x} vs {y} per sex.png", dpi=400)
def linear_regression_scores_X_y(df, y, y_name, dose_types):
"""
specifically for EDA
"""
for Xn in dose_types:
features_list = [[Xn], [Xn, 'Age (months)'], [Xn, 'Age (months)', 'encoded sex']]
for features in features_list:
X = df[features].values.reshape(-1, len(features))
fit_lm = LinearRegression().fit(X, y)
print(f'OLS | {features} vs. {y_name} --> R2: {fit_lm.score(X, y):.4f}')
print('')
return fit_lm
| 35.850575 | 125 | 0.589879 |
1508aa76e743b64f436cbb0a8c19cf6751c48d1b | 4,684 | py | Python | src/xia2/cli/report.py | graeme-winter/xia2 | e00d688137d4ddb4b125be9a3f37ae00265886c2 | [
"BSD-3-Clause"
] | 10 | 2015-10-30T06:36:55.000Z | 2021-12-10T20:06:22.000Z | src/xia2/cli/report.py | graeme-winter/xia2 | e00d688137d4ddb4b125be9a3f37ae00265886c2 | [
"BSD-3-Clause"
] | 528 | 2015-11-24T08:20:12.000Z | 2022-03-21T21:47:29.000Z | src/xia2/cli/report.py | graeme-winter/xia2 | e00d688137d4ddb4b125be9a3f37ae00265886c2 | [
"BSD-3-Clause"
] | 14 | 2016-03-15T22:07:03.000Z | 2020-12-14T07:13:35.000Z | import json
import os
import sys
from collections import OrderedDict
import iotbx.phil
import xia2.Handlers.Streams
from dials.util.options import OptionParser
from jinja2 import ChoiceLoader, Environment, PackageLoader
from xia2.Modules.Report import Report
from xia2.XIA2Version import Version
phil_scope = iotbx.phil.parse(
"""\
title = 'xia2 report'
.type = str
prefix = 'xia2'
.type = str
log_include = None
.type = path
include scope xia2.Modules.Analysis.phil_scope
json {
indent = None
.type = int(value_min=0)
}
""",
process_includes=True,
)
help_message = """
"""
| 28.216867 | 87 | 0.637916 |
150bff7433b6fabe00d05feee353f17bc33f7d36 | 757 | py | Python | minoan_project/minoan_project/urls.py | mtzirkel/minoan | 3eadeb1f73acf261e2f550642432ea5c25557ecb | [
"MIT"
] | null | null | null | minoan_project/minoan_project/urls.py | mtzirkel/minoan | 3eadeb1f73acf261e2f550642432ea5c25557ecb | [
"MIT"
] | null | null | null | minoan_project/minoan_project/urls.py | mtzirkel/minoan | 3eadeb1f73acf261e2f550642432ea5c25557ecb | [
"MIT"
] | null | null | null | from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.conf import settings
from django.views.generic import TemplateView
from . import views
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', TemplateView.as_view(template_name='base.html')),
url(r'^admin/', include(admin.site.urls)),
#login
url(r'^login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}),
#home
url(r'^home/$', views.home),
)
# Uncomment the next line to serve media files in dev.
# urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 27.035714 | 89 | 0.698811 |
150c07692f09dbc4c2bc2f82c96435eb48b056d8 | 324 | py | Python | algorithm/__init__.py | sirCamp/bioinformatics | 2609044c57eba1097263829f9db579cd1825b8bb | [
"MIT"
] | null | null | null | algorithm/__init__.py | sirCamp/bioinformatics | 2609044c57eba1097263829f9db579cd1825b8bb | [
"MIT"
] | null | null | null | algorithm/__init__.py | sirCamp/bioinformatics | 2609044c57eba1097263829f9db579cd1825b8bb | [
"MIT"
] | null | null | null | from algorithm.InsertionLengthAlgorithm import InsertionLengthAlgorithm
from algorithm.PhysicalCoverageAlgorithm import PhysicalCoverageAlgorithm
from algorithm.SequenceCoverageAlgorithm import SequenceCoverageAlgorithm
from algorithm.CigarAlgorithm import CigarAlgorithm
from algorithm.KmersAlgorithm import KmersAlgorithm
| 54 | 73 | 0.92284 |
150e69b2f9539045223d00d448f50c262f488903 | 1,874 | py | Python | attackMain.py | saurabhK99/substitution-cipher | dcf69cd4866ce7408eda6faf03ddd9b601bc3fec | [
"MIT"
] | null | null | null | attackMain.py | saurabhK99/substitution-cipher | dcf69cd4866ce7408eda6faf03ddd9b601bc3fec | [
"MIT"
] | null | null | null | attackMain.py | saurabhK99/substitution-cipher | dcf69cd4866ce7408eda6faf03ddd9b601bc3fec | [
"MIT"
] | null | null | null | from tkinter import *
from attack import *
#calls letter frequency attack
#defining main window
root = Tk()
root.title('Letter Frequency Attack')
root.configure(
background='#221b1b',
)
root.option_add('*Font', 'helvatica 12')
root.option_add('*Foreground', 'whitesmoke')
root.option_add('*Background', '#221b1b')
root.option_add('*Entry.HighlightColor', 'whitesmoke')
#key value pairs for radio buttons
types = [
('MONOALPHABETIC_CIPHER', 'MONOALPHABETIC_CIPHER'),
('ADDITIVE_CIPHER', 'ADDITIVE_CIPHER')
]
#variable to store current selection of radio button
attackOn= StringVar()
attackOn.set('MONOALPHABETIC_CIPHER')
Label(root, text='ATTACK ON').grid(row=0, column=0, padx=20)
#radio buttons
for i in range(2):
Radiobutton(
root,
text=types[i][0],
value=types[i][1],
variable=attackOn,
highlightthickness=0,
activebackground='#221b1b',
activeforeground='whitesmoke'
).grid(
row=0,
column=i+1,
padx=20,
pady=20
)
#label to show the result
answer = Label(root, text='ANSWER HERE', wraplength=700, justify=CENTER)
answer.grid(row=1, column=0, columnspan=3, pady=20)
#entry widget to input cipher text to crack
Label(root, text='CIPHER TXT').grid(row=6, column=0)
cipherTxt = Entry(root)
cipherTxt.grid(row=6, column=1, columnspan=2, pady=20)
#button to call attack()
Button(
root,
text='DECRYPT',
justify=CENTER,
command=lambda: attack(
attackOn.get(),
cipherTxt.get()
)
).grid(
row=7,
column=0,
columnspan=3,
pady=20
)
#mainloop of tkinter window
root.mainloop()
| 23.425 | 72 | 0.657417 |
150ef1714addd55d364456c56a5bbe4b9e5b825d | 12,703 | py | Python | eden.py | nobesio/eden | c301abdc64647fde02e8117ea137db322a804739 | [
"MIT"
] | null | null | null | eden.py | nobesio/eden | c301abdc64647fde02e8117ea137db322a804739 | [
"MIT"
] | null | null | null | eden.py | nobesio/eden | c301abdc64647fde02e8117ea137db322a804739 | [
"MIT"
] | null | null | null | from random import randint
import copy
# Auxiliary Function for rotating the DNA in each cycle.
# History is the object responsible for accounting all the organisms.
# Organism is the structure for the living organisms.
# QuantumPackages are the "food" of this simulation. The name comes from the concept used in operative systems.
# Enviroment is the class responsible for holding all the living organisms.
# Time is the class responsible for aging the living organisms.
# Death is the class responsible for killing old or starving organisms.
# Interpreter is the class that gives life to the organism. It executes the code in their DNA.
if __name__ == '__main__':
book = History()
earth = Enviroment(10)
earth.reportStatus()
earth.landscape[0][0] = QuantumPackage(10)
earth.landscape[1][1] = Organism("Eva", [8,7,0,9,7,1,10,7,2,11,7,3,12,7,4], 15)
#Poblemos Tierra
for i in range(0,4):
x = randint(0, earth.size-1)
y = randint(0, earth.size-1)
if earth.landscape[x][y] == 0:
dna = []
for a in range(1,11):
dna.append(randint(0,12))
earth.landscape[x][y] = Organism("Eva"+str(i), dna, 15)
earth.reportStatus()
chronos = Time()
parca = Death()
god = Interpreter()
for i in range(0,200):
if earth.countOrgs() > 0:
print("ciclo: ", i)
god.interprete((earth))
chronos.aging(earth)
parca.kill(earth)
earth.reportStatus()
for i in range(1,4):
x = randint(0,9)
y = randint(0,9)
if earth.landscape[x][y] == 0:
earth.landscape[x][y] = QuantumPackage(randint(5,10))
for org in earth.getOrganisms():
if not org in book.orgs:
book.addOrganism(org)
else:
print("SE MURIERON TODOS EN EL CICLO: ", i)
break
print("Living:", len(earth.getOrganisms()))
print("GENEPOOL:", book.getGenepool())
| 37.919403 | 112 | 0.492954 |
1512acbfbf9725f996d722bba323e798347b6270 | 2,407 | py | Python | examples/example_pipeline.py | madconsulting/datanectar | 7177b907c72c92de31fb136740f33c509ed5d499 | [
"Unlicense"
] | null | null | null | examples/example_pipeline.py | madconsulting/datanectar | 7177b907c72c92de31fb136740f33c509ed5d499 | [
"Unlicense"
] | null | null | null | examples/example_pipeline.py | madconsulting/datanectar | 7177b907c72c92de31fb136740f33c509ed5d499 | [
"Unlicense"
] | null | null | null | import os
import datetime
from pathlib import Path
import pandas as pd
import luigi
PROCESSED_DIR = 'processed'
ROLLUP_DIR = 'rollups'
if __name__ == '__main__':
luigi.run()
| 28.317647 | 93 | 0.617366 |
151306af1c1480903dd00ab70e45e88f683fbe48 | 2,463 | py | Python | scripts/tflite_model_tools/tflite/Metadata.py | LaudateCorpus1/edgeai-tidl-tools | d98789769a711e5a3700dfdc20d877073bd87da7 | [
"CNRI-Python"
] | 15 | 2021-09-05T03:43:54.000Z | 2022-03-29T14:17:29.000Z | scripts/tflite_model_tools/tflite/Metadata.py | LaudateCorpus1/edgeai-tidl-tools | d98789769a711e5a3700dfdc20d877073bd87da7 | [
"CNRI-Python"
] | 21 | 2021-09-01T06:58:31.000Z | 2022-03-31T06:33:15.000Z | scripts/tflite_model_tools/tflite/Metadata.py | LaudateCorpus1/edgeai-tidl-tools | d98789769a711e5a3700dfdc20d877073bd87da7 | [
"CNRI-Python"
] | 6 | 2021-09-22T06:44:19.000Z | 2022-02-07T06:28:35.000Z | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
# Metadata
def Name(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Metadata
def MetadataStart(builder): builder.StartObject(2)
# MetadataT
def Pack(self, builder):
if self.name is not None:
name = builder.CreateString(self.name)
MetadataStart(builder)
if self.name is not None:
MetadataAddName(builder, name)
MetadataAddBuffer(builder, self.buffer)
metadata = MetadataEnd(builder)
return metadata
| 29.674699 | 131 | 0.657734 |
15136d40366243c73182b9f6916a6c550042f55f | 1,124 | py | Python | kukur/config.py | timeseer-ai/kukur | 28210ff0bde396d961b60828782fef56e326b319 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2021-09-12T08:29:30.000Z | 2022-01-19T19:06:45.000Z | kukur/config.py | timeseer-ai/kukur | 28210ff0bde396d961b60828782fef56e326b319 | [
"ECL-2.0",
"Apache-2.0"
] | 34 | 2021-03-16T08:21:01.000Z | 2022-03-21T07:30:28.000Z | kukur/config.py | timeseer-ai/kukur | 28210ff0bde396d961b60828782fef56e326b319 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2021-09-12T08:29:34.000Z | 2021-09-12T08:29:34.000Z | """Read the Kukur configuration."""
# SPDX-FileCopyrightText: 2021 Timeseer.AI
#
# SPDX-License-Identifier: Apache-2.0
import glob
import toml
def from_toml(path):
"""Read the configuration from a TOML file, processing includes."""
config = toml.load(path)
for include_options in config.get("include", []):
if "glob" not in include_options:
raise InvalidIncludeException('"glob" is required')
for include_path in glob.glob(include_options["glob"]):
include_config = toml.load(include_path)
for k, v in include_config.items():
if k not in config:
config[k] = v
elif isinstance(config[k], list):
config[k].append(v)
elif isinstance(config[k], dict):
config[k].update(v)
else:
config[k] = v
return config
| 32.114286 | 71 | 0.598754 |
1514c4cab7976c14d2d2ff2686c1ed82e350d931 | 3,326 | py | Python | scheduletest.py | ambimanus/appsim | 8f60b3a736af8aa7f03435c28aef2685a3dbfbe3 | [
"MIT"
] | null | null | null | scheduletest.py | ambimanus/appsim | 8f60b3a736af8aa7f03435c28aef2685a3dbfbe3 | [
"MIT"
] | null | null | null | scheduletest.py | ambimanus/appsim | 8f60b3a736af8aa7f03435c28aef2685a3dbfbe3 | [
"MIT"
] | null | null | null | import time
from datetime import datetime
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.dates import epoch2num
import device_factory
if __name__ == '__main__':
amount = 50
devices = []
for i in range(amount):
device = device_factory.ecopower_4(i, i)
devices.append(device)
start = int(time.mktime(datetime(2010, 1, 2).timetuple()) // 60)
end = int(time.mktime(datetime(2010, 1, 3).timetuple()) // 60)
sample_time = start + 15 * 24
sample_dur = 16
P = [[] for d in devices]
T = [[] for d in devices]
Th = [[] for d in devices]
for now in range(start, sample_time):
for idx, device in enumerate(devices):
device.step(now)
P[idx].append(device.components.consumer.P)
T[idx].append(device.components.storage.T)
Th[idx].append(device.components.heatsink.in_heat)
samples = []
for d in devices:
# d.components.sampler.setpoint_density = 0.1
samples.append(d.components.sampler.sample(100, sample_dur))
# samples = [d.components.sampler.sample(100, sample_dur) for d in devices]
schedule = np.zeros(sample_dur)
for idx, device in enumerate(devices):
# min_schedule_idx = np.argmin(np.sum(np.abs(samples[idx]), axis=1))
# device.components.scheduler.schedule = samples[idx][min_schedule_idx]
# schedule += samples[idx][min_schedule_idx]
max_schedule_idx = np.argmax(np.sum(np.abs(samples[idx]), axis=1))
device.components.scheduler.schedule = samples[idx][max_schedule_idx]
schedule += samples[idx][max_schedule_idx]
for now in range(sample_time, end):
for idx, device in enumerate(devices):
device.step(now)
P[idx].append(device.components.consumer.P)
T[idx].append(device.components.storage.T)
Th[idx].append(device.components.heatsink.in_heat)
P = np.sum(P, axis=0)
Th = np.sum(Th, axis=0)
T = np.mean(T, axis=0)
ax = plt.subplot(2, 1, 1)
ax.grid(True)
tz = 60 # timezone deviation in minutes
x = epoch2num(np.arange((start + tz) * 60, (end + tz) * 60, 60))
Th = np.reshape(Th, (len(x) // 15, 15)).mean(axis=1)
ax.plot_date(x[::15], Th, color='magenta', label='P$_{th,out}$ (kW)', ls='-',
marker=None)
ax.legend()
ax = plt.subplot(2, 1, 2, sharex=ax)
ax.grid(True)
l1 = ax.plot_date(x, P, label='P$_{el}$ (kW)', ls='-', marker=None)
sched_x = epoch2num(np.arange(
(sample_time + tz) * 60, ((sample_time + tz) + sample_dur * 15) * 60, 60))
l2 = ax.plot_date(sched_x[::15], schedule, color='r', label='Schedule',
ls='-', marker=None)
ax = plt.twinx()
l3 = ax.plot_date(x, T, color='g', label='T (\\textdegree C)', ls='-', marker=None)
lines = l1 + l2 + l3
labels = [l.get_label() for l in lines]
ax.legend(lines, labels)
plt.gcf().autofmt_xdate()
# # Samples plot
# fig, ax = plt.subplots(len(samples))
# if len(samples) == 1:
# ax = [ax]
# for i, sample in enumerate(samples):
# t = np.arange(len(sample[0]))
# for s in sample:
# ax[i].plot(t, s)
plt.show()
| 35.010526 | 88 | 0.585989 |
15165694e2716645ea22f6406f0f303943c423b8 | 329 | py | Python | src/genie/libs/parser/iosxe/tests/ShowInstallState/cli/equal/golden_output3_expected.py | ykoehler/genieparser | b62cf622c3d8eab77c7b69e932c214ed04a2565a | [
"Apache-2.0"
] | null | null | null | src/genie/libs/parser/iosxe/tests/ShowInstallState/cli/equal/golden_output3_expected.py | ykoehler/genieparser | b62cf622c3d8eab77c7b69e932c214ed04a2565a | [
"Apache-2.0"
] | null | null | null | src/genie/libs/parser/iosxe/tests/ShowInstallState/cli/equal/golden_output3_expected.py | ykoehler/genieparser | b62cf622c3d8eab77c7b69e932c214ed04a2565a | [
"Apache-2.0"
] | null | null | null | expected_output = {
"location": {
"R0 R1": {
"auto_abort_timer": "inactive",
"pkg_state": {
1: {
"filename_version": "17.08.01.0.149429",
"state": "U",
"type": "IMG",
}
},
}
}
} | 23.5 | 60 | 0.31307 |
1516d58cc828bc371a33c9b4a9ca474fdb7eba79 | 8,637 | py | Python | lite/tests/unittest_py/pass/test_conv_elementwise_fuser_pass.py | 714627034/Paddle-Lite | 015ba88a4d639db0b73603e37f83e47be041a4eb | [
"Apache-2.0"
] | 808 | 2018-04-17T17:43:12.000Z | 2019-08-18T07:39:13.000Z | lite/tests/unittest_py/pass/test_conv_elementwise_fuser_pass.py | 714627034/Paddle-Lite | 015ba88a4d639db0b73603e37f83e47be041a4eb | [
"Apache-2.0"
] | 728 | 2018-04-18T08:15:25.000Z | 2019-08-16T07:14:43.000Z | lite/tests/unittest_py/pass/test_conv_elementwise_fuser_pass.py | 714627034/Paddle-Lite | 015ba88a4d639db0b73603e37f83e47be041a4eb | [
"Apache-2.0"
] | 364 | 2018-04-18T17:05:02.000Z | 2019-08-18T03:25:38.000Z | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('..')
sys.path.append('.')
from auto_scan_test import FusePassAutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import numpy as np
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
from test_conv_util import UpdatePaddingAndDilation, ConvOutputSize, ConvTransposeOutputSize
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume, reproduce_failure
import hypothesis.strategies as st
if __name__ == "__main__":
unittest.main(argv=[''])
| 40.359813 | 125 | 0.554706 |
151724d850402f50ae0bbd91cc2f5825d03ab2de | 22,871 | py | Python | cfn_policy_validator/tests/validation_tests/test_resource_validator.py | awslabs/aws-cloudformation-iam-policy-validator | 52c1439e4d76d2c7d45c97563cc87f8458134e0b | [
"MIT-0"
] | 41 | 2021-09-30T01:28:51.000Z | 2022-03-24T09:42:09.000Z | cfn_policy_validator/tests/validation_tests/test_resource_validator.py | awslabs/aws-cloudformation-iam-policy-validator | 52c1439e4d76d2c7d45c97563cc87f8458134e0b | [
"MIT-0"
] | 10 | 2021-09-30T08:13:11.000Z | 2022-03-22T07:34:41.000Z | cfn_policy_validator/tests/validation_tests/test_resource_validator.py | awslabs/aws-cloudformation-iam-policy-validator | 52c1439e4d76d2c7d45c97563cc87f8458134e0b | [
"MIT-0"
] | 3 | 2021-11-29T21:13:30.000Z | 2022-02-04T12:49:40.000Z | """
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
import boto3
import copy
import unittest
from botocore.stub import ANY
from cfn_policy_validator.tests import account_config, offline_only, only_run_for_end_to_end
from cfn_policy_validator.tests.boto_mocks import mock_test_setup, BotoResponse, get_test_mode, TEST_MODE
from cfn_policy_validator.tests.validation_tests import FINDING_TYPE, mock_access_analyzer_resource_setup, \
MockAccessPreviewFinding, MockNoFindings, MockInvalidConfiguration, MockUnknownError, \
MockTimeout, MockValidateResourcePolicyFinding
from cfn_policy_validator.validation.validator import validate_parser_output, Validator
from cfn_policy_validator.application_error import ApplicationError
from cfn_policy_validator.parsers.output import Output, Policy, Resource
resource_policy_with_no_findings = {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': '*',
'Principal': {
'AWS': account_config.account_id
},
'Resource': f'arn:aws:sqs:{account_config.region}:{account_config.account_id}:resource1'
}
]
}
lambda_permissions_policy_with_findings = {
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {},
"Action": "lambda:InvokeFunction",
"Resource": f"arn:aws:lambda:{account_config.region}:{account_config.account_id}:function:my-function"
}]
}
class WhenValidatingResources(BaseResourcePolicyTest):
sqs_queue_policy_that_allows_external_access = {
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {
"AWS": ["*"]
},
"Action": "sqs:SendMessage",
"Resource": "*"
}]
}
sqs_queue_policy_with_findings = {
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {},
"Action": "sqs:SendMessage",
"Resource": "*"
}]
}
sqs_queue_policy_with_no_findings = {
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {
"AWS": [f'{account_config.account_id}']
},
"Action": "sqs:SendMessage",
"Resource": "*"
}]
}
sqs_queue_invalid_policy = {
"Version": "2012-10-17",
"Statement": [{
"Effect": {"not": "valid"},
"Principal": {
"AWS": [f'{account_config.account_id}']
},
"Action": "sqs:SendMessage",
"Resource": "*"
}]
}
kms_key_policy_that_allows_external_access = {
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {
"AWS": "*"
},
"Action": "kms:*",
"Resource": "*"
}]
}
kms_key_policy_with_findings = {
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {},
"Action": "kms:*",
"Resource": "*"
}]
}
kms_key_policy_with_no_findings = {
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {
"AWS": f"arn:aws:iam::{account_config.account_id}:root"
},
"Action": "kms:*",
"Resource": "*"
}]
}
kms_key_invalid_policy = {
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {
"AWS": f"arn:aws:iam::{account_config.account_id}:root"
},
"Action": {"not": "valid"},
"Resource": "*"
}]
}
s3_bucket_invalid_policy = {
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"AWS": [f"arn:aws:iam::{account_config.account_id}:root"]},
"Action": ["s3:PutObject", "s3:PutObjectAcl"],
"Resource": {"not": "valid"}
}]
}
secrets_manager_resource_policy_that_allows_external_access = {
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"AWS": f"arn:aws:iam::777888999444:root"},
"Action": "secretsmanager:GetSecretValue",
"Resource": "*"
}]
}
secrets_manager_resource_policy_with_findings = {
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {},
"Action": "secretsmanager:GetSecretValue",
"Resource": "*"
}]
}
secrets_manager_resource_policy_with_no_findings = {
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {
"AWS": f"arn:aws:iam::{account_config.account_id}:root"
},
"Action": "secretsmanager:GetSecretValue",
"Resource": "*"
}]
}
secrets_manager_resource_invalid_policy = {
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {
"AWS": f"arn:aws:iam::{account_config.account_id}:root"
},
"Action": {"not": "valid"},
"Resource": "*"
}]
}
| 33.437135 | 145 | 0.773425 |
1518a255b1570670a775245440b45ebe73fe295d | 6,672 | py | Python | HDF4_H5_NETCDF/source2.7/h5py/tests/hl/test_datatype.py | Con-Mi/lambda-packs | b23a8464abdd88050b83310e1d0e99c54dac28ab | [
"MIT"
] | 31 | 2018-10-19T15:28:36.000Z | 2022-02-14T03:01:25.000Z | h5py/tests/hl/test_datatype.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 13 | 2020-01-28T22:20:14.000Z | 2022-03-11T23:20:14.000Z | h5py/tests/hl/test_datatype.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 10 | 2019-01-10T04:02:12.000Z | 2021-11-17T01:52:15.000Z | """
Tests for the h5py.Datatype class.
"""
from __future__ import absolute_import
from itertools import count
import numpy as np
import h5py
from ..common import ut, TestCase
| 33.527638 | 78 | 0.508243 |
151937c4e4552fde0563a4d7a5da8405bfdf819f | 2,278 | py | Python | conmon/regex.py | flashdagger/conmon | c6e75f115ad104ea7ecc7b14618efadefadad2f8 | [
"MIT"
] | null | null | null | conmon/regex.py | flashdagger/conmon | c6e75f115ad104ea7ecc7b14618efadefadad2f8 | [
"MIT"
] | null | null | null | conmon/regex.py | flashdagger/conmon | c6e75f115ad104ea7ecc7b14618efadefadad2f8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import re
from typing import Pattern, Tuple, Iterator, Match, Union, Optional, List, Dict
from conmon.conan import storage_path
DECOLORIZE_REGEX = re.compile(r"[\u001b]\[\d{1,2}m", re.UNICODE)
CONAN_DATA_PATH = re.compile(
r"""(?x)
(?P<path>
([a-zA-Z]:)?
(?P<sep>[\\/])
(?:[\w\-.]+(?P=sep)){5,} # conservative choice of characters in path names
(?:build|package)(?P=sep)
[a-f0-9]{40}
(?P=sep)
)
"""
)
REF_PART_PATTERN = r"\w[\w\+\.\-]{1,50}"
REF_REGEX = re.compile(
rf"""(?x)
(?P<ref>
(?P<name>{REF_PART_PATTERN})/
(?P<version>{REF_PART_PATTERN})
(?:
@
(?:
(?P<user>{REF_PART_PATTERN})/
(?P<channel>{REF_PART_PATTERN})
)?
)?
)
"""
)
def compact_pattern(regex: Pattern) -> Tuple[str, int]:
"""take verbose pattern and remove all whitespace and comments"""
flags = regex.flags
# remove inline flags
pattern = re.sub(r"\(\?([aiLmsux])+\)", "", regex.pattern, flags=re.ASCII)
# remove whitespace in verbose pattern
if flags & re.VERBOSE:
pattern = re.sub(r"(?<!\\)\s+|\\(?= )|#[^\n]+\n", "", pattern, flags=re.ASCII)
flags -= re.VERBOSE
return pattern, flags
| 28.475 | 87 | 0.565847 |
15195236d745c09ce968bf6af2311b1a616e1824 | 5,089 | py | Python | src/north/cli/gscli/main.py | falcacicd/goldstone-mgmt | e7348011180e3c2dcd0558636ddc5c21779c7a3f | [
"Apache-2.0"
] | null | null | null | src/north/cli/gscli/main.py | falcacicd/goldstone-mgmt | e7348011180e3c2dcd0558636ddc5c21779c7a3f | [
"Apache-2.0"
] | null | null | null | src/north/cli/gscli/main.py | falcacicd/goldstone-mgmt | e7348011180e3c2dcd0558636ddc5c21779c7a3f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import sysrepo as sr
import argparse
from prompt_toolkit import PromptSession
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.completion import Completer
import sys
import os
import logging
import asyncio
from .base import Object, InvalidInput, BreakLoop
from .onlp import Platform
from .tai import Transponder
logger = logging.getLogger(__name__)
stdout = logging.getLogger('stdout')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('-c', '--command-string')
parser.add_argument('-k', '--keep-open', action='store_true')
parser.add_argument('-x', '--stdin', action='store_true')
args = parser.parse_args()
formatter = logging.Formatter('[%(asctime)s][%(levelname)-5s][%(name)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
if args.verbose:
console.setLevel(logging.DEBUG)
log = sr.Logs()
log.set_stderr(sr.SR_LL_DBG)
console.setFormatter(formatter)
sh = logging.StreamHandler()
sh.setLevel(logging.DEBUG)
shf = logging.Formatter('%(message)s')
sh.setFormatter(shf)
stdout.setLevel(logging.DEBUG)
stdout.addHandler(sh)
shell = GoldstoneShell()
asyncio.run(_main())
if __name__ == '__main__':
main()
| 28.751412 | 118 | 0.592847 |
1519776f4ef0553b7494300ab7ab52a92881c3de | 350 | py | Python | InsertionSort/selectionSort/selectionsort/selectionSort.py | khaledshishani32/data-structures-and-algorithms-python | 6397ef2467958b100747ef430ddfb3e691a97a0f | [
"MIT"
] | null | null | null | InsertionSort/selectionSort/selectionsort/selectionSort.py | khaledshishani32/data-structures-and-algorithms-python | 6397ef2467958b100747ef430ddfb3e691a97a0f | [
"MIT"
] | null | null | null | InsertionSort/selectionSort/selectionsort/selectionSort.py | khaledshishani32/data-structures-and-algorithms-python | 6397ef2467958b100747ef430ddfb3e691a97a0f | [
"MIT"
] | null | null | null |
cus_list=[8,4,23,42,16,15]
selection_sort(cus_list)
| 25 | 69 | 0.611429 |
1519b725bc8e51fd74703c95a095ecb5723fb0b3 | 437 | py | Python | tests/creditcrawler_test.py | Mivinci/cqupt-piper | ce76a4334a2d7a7b75750d7bfac9efa747f968c7 | [
"MIT"
] | 3 | 2019-09-08T16:22:30.000Z | 2021-01-23T02:54:10.000Z | tests/creditcrawler_test.py | Mivinci/cqupt-piper | ce76a4334a2d7a7b75750d7bfac9efa747f968c7 | [
"MIT"
] | 1 | 2020-01-11T05:13:43.000Z | 2020-01-11T05:13:43.000Z | tests/creditcrawler_test.py | Mivinci/cqupt-piper | ce76a4334a2d7a7b75750d7bfac9efa747f968c7 | [
"MIT"
] | null | null | null | import requests
from bs4 import BeautifulSoup
from prettytable import PrettyTable
# html = requests.get(
# 'http://jwzx.cqu.pt/student/xkxfTj.php',
# cookies={'PHPSESSID': 'o2r2fpddrj892dp1ntqddcp2hv'}).text
# soup = BeautifulSoup(html, 'html.parser')
# for tr in soup.find('table', {'id': 'AxfTjTable'}).findAll('tr')[1:]:
# tds = tr.findAll('td')
# print(tds[1:5])
table = PrettyTable(['aaa', 'bbb'])
print(table) | 24.277778 | 71 | 0.665904 |
1519c99cb202a036f7cd0c6cfb24bf58a516d62b | 602 | py | Python | ClassMethod.py | AdarshKvT/python-oop | b619226807c3a0b434fe9789952cc86dc8cde9b7 | [
"Apache-2.0"
] | null | null | null | ClassMethod.py | AdarshKvT/python-oop | b619226807c3a0b434fe9789952cc86dc8cde9b7 | [
"Apache-2.0"
] | null | null | null | ClassMethod.py | AdarshKvT/python-oop | b619226807c3a0b434fe9789952cc86dc8cde9b7 | [
"Apache-2.0"
] | null | null | null |
# create an object of person
p1 = Person("KvT")
# creating another instance
p2 = Person("Shin")
# accessing the class method directly
print(Person.num_of_people())
| 20.066667 | 43 | 0.647841 |
1519fb893e14d2984bb652c58400576b1b324256 | 1,117 | py | Python | webpack_manifest/templatetags/webpack_manifest_tags.py | temoto/python-webpack-manifest | bb10dbb718f2b41d8356c983b375b064e220d521 | [
"MIT"
] | 55 | 2015-11-02T19:50:41.000Z | 2022-03-06T21:48:36.000Z | webpack_manifest/templatetags/webpack_manifest_tags.py | temoto/python-webpack-manifest | bb10dbb718f2b41d8356c983b375b064e220d521 | [
"MIT"
] | 7 | 2015-09-16T05:24:37.000Z | 2018-07-25T23:10:30.000Z | webpack_manifest/templatetags/webpack_manifest_tags.py | temoto/python-webpack-manifest | bb10dbb718f2b41d8356c983b375b064e220d521 | [
"MIT"
] | 10 | 2016-03-06T16:30:00.000Z | 2020-08-12T01:41:51.000Z | from django import template
from django.conf import settings
from webpack_manifest import webpack_manifest
if not hasattr(settings, 'WEBPACK_MANIFEST'):
raise webpack_manifest.WebpackManifestConfigError('`WEBPACK_MANIFEST` has not been defined in settings')
if 'manifests' not in settings.WEBPACK_MANIFEST:
raise webpack_manifest.WebpackManifestConfigError(
'`WEBPACK_MANIFEST[\'manifests\']` has not been defined in settings'
)
register = template.Library()
| 34.90625 | 108 | 0.706356 |
151a77fa24452704d617da768baec7d8f8f8b186 | 2,668 | py | Python | utilities/jaccard_utilities.py | jjc2718/netreg | 292540e911cdfbe18ff6fe0f9bfe8e055053d23c | [
"BSD-3-Clause"
] | null | null | null | utilities/jaccard_utilities.py | jjc2718/netreg | 292540e911cdfbe18ff6fe0f9bfe8e055053d23c | [
"BSD-3-Clause"
] | 6 | 2019-07-12T15:52:31.000Z | 2020-01-13T18:14:41.000Z | utilities/jaccard_utilities.py | jjc2718/netreg | 292540e911cdfbe18ff6fe0f9bfe8e055053d23c | [
"BSD-3-Clause"
] | 1 | 2019-07-18T18:28:59.000Z | 2019-07-18T18:28:59.000Z | import os
import itertools as it
import pandas as pd
| 44.466667 | 110 | 0.642054 |
151aa06c987c92f779a676ea9b8988f697c25f28 | 2,600 | py | Python | CursoEmVideo/pythonProject/venv/Lib/site-packages/Interface/tests/unitfixtures.py | cassio645/Aprendendo-python | 17a8b5a0e7abc3342d24841ed28093db13d2c130 | [
"MIT"
] | null | null | null | CursoEmVideo/pythonProject/venv/Lib/site-packages/Interface/tests/unitfixtures.py | cassio645/Aprendendo-python | 17a8b5a0e7abc3342d24841ed28093db13d2c130 | [
"MIT"
] | null | null | null | CursoEmVideo/pythonProject/venv/Lib/site-packages/Interface/tests/unitfixtures.py | cassio645/Aprendendo-python | 17a8b5a0e7abc3342d24841ed28093db13d2c130 | [
"MIT"
] | null | null | null | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
from Interface import Interface
from Interface.Attribute import Attribute
# testInstancesOfClassImplements
# YAGNI IC=Interface.impliedInterface(C)
C.__implements__=IC
foo_instance = Foo()
new = Interface.__class__
FunInterface = new('FunInterface')
BarInterface = new('BarInterface', [FunInterface])
BobInterface = new('BobInterface')
BazInterface = new('BazInterface', [BobInterface, BarInterface])
| 22.033898 | 78 | 0.602308 |
151beeecee85f8f8f1854a4eb0eedf92f2702417 | 7,188 | py | Python | noise_robust_cobras/noise_robust/datastructures/cycle.py | jonassoenen/noise_robust_cobras | 0e5823dbba0263c3ccb3c2afb4267f2f542fc568 | [
"Apache-2.0"
] | 2 | 2020-07-30T15:09:53.000Z | 2020-07-31T06:33:36.000Z | noise_robust_cobras/noise_robust/datastructures/cycle.py | magicalJohn/noise_robust_cobras | 0e5823dbba0263c3ccb3c2afb4267f2f542fc568 | [
"Apache-2.0"
] | null | null | null | noise_robust_cobras/noise_robust/datastructures/cycle.py | magicalJohn/noise_robust_cobras | 0e5823dbba0263c3ccb3c2afb4267f2f542fc568 | [
"Apache-2.0"
] | 1 | 2021-12-12T11:11:25.000Z | 2021-12-12T11:11:25.000Z | from collections import defaultdict
from noise_robust_cobras.noise_robust.datastructures.constraint import Constraint
from noise_robust_cobras.noise_robust.datastructures.constraint_index import (
ConstraintIndex,
)
def get_sorted_constraint_list(self):
"""
:return: a list of all constraints in the order by which they appear in the cycle with an arbitrary starting constraints
"""
all_constraints = list(self.constraints)
start_constraint = all_constraints[0]
temp_index = ConstraintIndex()
for constraint in all_constraints[1:]:
temp_index.add_constraint(constraint)
current_list = [(start_constraint.get_instance_tuple(), start_constraint)]
current_instance = start_constraint.i2
while len(temp_index.constraints) > 0:
matching_constraints = temp_index.find_constraints_for_instance(
current_instance
)
if len(matching_constraints) == 1:
matching_constraint = list(matching_constraints)[0]
else:
raise Exception("Not a valid cycle!")
other_instance = matching_constraint.get_other_instance(current_instance)
current_list.append(
((current_instance, other_instance), matching_constraint)
)
current_instance = other_instance
temp_index.remove_constraint(matching_constraint)
# check if the cycle is complete
if start_constraint.i1 != current_instance:
raise Exception("Not a valid cycle!")
return current_list
| 37.243523 | 128 | 0.657763 |
151d22605d16726325dce1205b7a8ba505f35329 | 525 | py | Python | python3/hackerrank_leetcode/remove_duplicates_from_sorted_array/test.py | seLain/codesnippets | ae9a1fa05b67f4b3ac1703cc962fcf5f6de1e289 | [
"MIT"
] | null | null | null | python3/hackerrank_leetcode/remove_duplicates_from_sorted_array/test.py | seLain/codesnippets | ae9a1fa05b67f4b3ac1703cc962fcf5f6de1e289 | [
"MIT"
] | null | null | null | python3/hackerrank_leetcode/remove_duplicates_from_sorted_array/test.py | seLain/codesnippets | ae9a1fa05b67f4b3ac1703cc962fcf5f6de1e289 | [
"MIT"
] | null | null | null | import unittest
from main import Solution
if __name__ == '__main__':
unittest.main() | 30.882353 | 72 | 0.693333 |
12772bd26a04aaf3f825acfbb2e6f63963b94d81 | 246 | py | Python | 7KYU/word_splitter.py | yaznasivasai/python_codewars | 25493591dde4649dc9c1ec3bece8191a3bed6818 | [
"MIT"
] | 4 | 2021-07-17T22:48:03.000Z | 2022-03-25T14:10:58.000Z | 7KYU/word_splitter.py | yaznasivasai/python_codewars | 25493591dde4649dc9c1ec3bece8191a3bed6818 | [
"MIT"
] | null | null | null | 7KYU/word_splitter.py | yaznasivasai/python_codewars | 25493591dde4649dc9c1ec3bece8191a3bed6818 | [
"MIT"
] | 3 | 2021-06-14T14:18:16.000Z | 2022-03-16T06:02:02.000Z | SEPARATOR: list = [':', ',', '*', ';', '#', '|', '+', '%', '>', '?', '&', '=', '!']
| 35.142857 | 83 | 0.426829 |
12781452042b292ed356843d47c2a5e60478909f | 7,998 | py | Python | parsers/sales_order.py | njncalub/logistiko | 74b1d17bc76538de6f5f70c7eca927780d6b4113 | [
"MIT"
] | null | null | null | parsers/sales_order.py | njncalub/logistiko | 74b1d17bc76538de6f5f70c7eca927780d6b4113 | [
"MIT"
] | null | null | null | parsers/sales_order.py | njncalub/logistiko | 74b1d17bc76538de6f5f70c7eca927780d6b4113 | [
"MIT"
] | null | null | null | import csv
from core.exceptions import InvalidFileException
| 46.77193 | 79 | 0.542136 |
1278169f69007b0aff65ad2222788f61228ad8d6 | 8,342 | py | Python | maps.py | BouncyButton/places-simulator | a1f5fc385750af9968cc3c6216ba20f5de4719fd | [
"MIT"
] | null | null | null | maps.py | BouncyButton/places-simulator | a1f5fc385750af9968cc3c6216ba20f5de4719fd | [
"MIT"
] | null | null | null | maps.py | BouncyButton/places-simulator | a1f5fc385750af9968cc3c6216ba20f5de4719fd | [
"MIT"
] | null | null | null | import googlemaps
import secret
from datetime import datetime
import requests
import pickle
import time
gmaps = googlemaps.Client(key=secret.PLACES_API_KEY)
# lat = 45.411400
# lon = 11.887491
coordinates = [
(45.411400, 11.887491), # torre archimede
(45.409218, 11.877915), # piazza garibaldi
(45.407698, 11.873351), # piazza dei signori
(45.401403, 11.880813), # basilica di sant'antonio
]
# def find_places():
# results = gmaps.places_nearby(location=(lat, lon), type='bar', radius=500)
# print(len(results))
# return results
d = read_data()
occ = text_analysis(d)
word2vec_analysis(occ.keys(), list(occ.values()), N=12, translate=True)
| 29.167832 | 121 | 0.609686 |
12785f321ec0fa0181c3a4c19bc2048854ea35ad | 31,231 | py | Python | azure-iot-device/tests/iothub/test_sync_handler_manager.py | dt-boringtao/azure-iot-sdk-python | 35a09679bdf4d7a727391b265a8f1fbb99a30c45 | [
"MIT"
] | null | null | null | azure-iot-device/tests/iothub/test_sync_handler_manager.py | dt-boringtao/azure-iot-sdk-python | 35a09679bdf4d7a727391b265a8f1fbb99a30c45 | [
"MIT"
] | null | null | null | azure-iot-device/tests/iothub/test_sync_handler_manager.py | dt-boringtao/azure-iot-sdk-python | 35a09679bdf4d7a727391b265a8f1fbb99a30c45 | [
"MIT"
] | null | null | null | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import logging
import pytest
import threading
import time
from azure.iot.device.common import handle_exceptions
from azure.iot.device.iothub import client_event
from azure.iot.device.iothub.sync_handler_manager import SyncHandlerManager, HandlerManagerException
from azure.iot.device.iothub.sync_handler_manager import MESSAGE, METHOD, TWIN_DP_PATCH
from azure.iot.device.iothub.inbox_manager import InboxManager
from azure.iot.device.iothub.sync_inbox import SyncClientInbox
logging.basicConfig(level=logging.DEBUG)
# NOTE ON TEST IMPLEMENTATION:
# Despite having significant shared implementation between the sync and async handler managers,
# there are not shared tests. This is because while both have the same set of requirements and
# APIs, the internal implementation is different to an extent that it simply isn't really possible
# to test them to an appropriate degree of correctness with a shared set of tests.
# This means we must be very careful to always change both test modules when a change is made to
# shared behavior, or when shared features are added.
# NOTE ON TIMING/DELAY
# Several tests in this module have sleeps/delays in their implementation due to needing to wait
# for things to happen in other threads.
all_internal_receiver_handlers = [MESSAGE, METHOD, TWIN_DP_PATCH]
all_internal_client_event_handlers = [
"_on_connection_state_change",
"_on_new_sastoken_required",
"_on_background_exception",
]
all_internal_handlers = all_internal_receiver_handlers + all_internal_client_event_handlers
all_receiver_handlers = [s.lstrip("_") for s in all_internal_receiver_handlers]
all_client_event_handlers = [s.lstrip("_") for s in all_internal_client_event_handlers]
all_handlers = all_receiver_handlers + all_client_event_handlers
# ##############
# # PROPERTIES #
# ##############
class SharedHandlerPropertyTests(object):
# NOTE: We use setattr() and getattr() in these tests so they're generic to all properties.
# This is functionally identical to doing explicit assignment to a property, it just
# doesn't read quite as well.
| 43.077241 | 170 | 0.700202 |
1278ee593e924b3273cd53898ff8735b235b993e | 885 | py | Python | src/python/Chameleon.Faas/demo/helloworld_grpc_client.py | sevenTiny/Seventiny.Cloud.ScriptEngine | dda66a7d2ec8c203823e07666314b9d0c8795768 | [
"Apache-2.0"
] | 2 | 2020-01-17T03:16:42.000Z | 2020-08-28T04:23:06.000Z | src/python/Chameleon.Faas/demo/helloworld_grpc_client.py | sevenTiny/Seventiny.Cloud.ScriptEngine | dda66a7d2ec8c203823e07666314b9d0c8795768 | [
"Apache-2.0"
] | null | null | null | src/python/Chameleon.Faas/demo/helloworld_grpc_client.py | sevenTiny/Seventiny.Cloud.ScriptEngine | dda66a7d2ec8c203823e07666314b9d0c8795768 | [
"Apache-2.0"
] | 1 | 2019-12-13T07:02:56.000Z | 2019-12-13T07:02:56.000Z | import grpc
import helloworld_pb2
import helloworld_pb2_grpc
from grpc.beta import implementations
if __name__ == '__main__':
run() | 34.038462 | 118 | 0.701695 |
1279a170c86c50a1d9aa504d29a7b4fbc15ef3a6 | 2,350 | py | Python | tools/pca_outcore.py | escorciav/deep-action-proposals | c14f512febc1abd0ec40bd3188a83e4ee3913535 | [
"MIT"
] | 28 | 2017-03-19T12:02:22.000Z | 2021-07-08T13:49:41.000Z | tools/pca_outcore.py | escorciav/deep-action-proposals | c14f512febc1abd0ec40bd3188a83e4ee3913535 | [
"MIT"
] | 2 | 2018-05-07T07:43:15.000Z | 2018-12-14T16:06:48.000Z | tools/pca_outcore.py | escorciav/deep-action-proposals | c14f512febc1abd0ec40bd3188a83e4ee3913535 | [
"MIT"
] | 7 | 2017-03-19T11:51:21.000Z | 2020-01-07T11:17:48.000Z | #!/usr/bin/env python
"""
PCA done via matrix multiplication out-of-core.
"""
import argparse
import time
import h5py
import hickle as hkl
import numpy as np
if __name__ == '__main__':
p = input_parse()
args = p.parse_args()
main(**vars(args))
| 30.519481 | 73 | 0.609362 |
127b202282fe9d7b819fac4de12d835378edbe4e | 5,680 | py | Python | azdev/params.py | marstr/azure-cli-dev-tools | 8b82b1867a425a9a017868c6c1aef2f4bb5aa62b | [
"MIT"
] | null | null | null | azdev/params.py | marstr/azure-cli-dev-tools | 8b82b1867a425a9a017868c6c1aef2f4bb5aa62b | [
"MIT"
] | null | null | null | azdev/params.py | marstr/azure-cli-dev-tools | 8b82b1867a425a9a017868c6c1aef2f4bb5aa62b | [
"MIT"
] | null | null | null | # -----------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -----------------------------------------------------------------------------
# pylint: disable=line-too-long
import argparse
from knack.arguments import ArgumentsContext
from azdev.completer import get_test_completion
| 67.619048 | 214 | 0.659859 |
127b40e7a10ad49a4f232756467391a18976528f | 1,968 | py | Python | gamry_parser/cv.py | bcliang/gamry-parser | c1dfcf73d973c88ee496f0aa256d99f642ab6013 | [
"MIT"
] | 6 | 2019-03-14T21:21:13.000Z | 2022-03-04T19:21:32.000Z | gamry_parser/cv.py | bcliang/gamry-parser | c1dfcf73d973c88ee496f0aa256d99f642ab6013 | [
"MIT"
] | 34 | 2019-03-11T04:21:51.000Z | 2022-01-10T21:45:38.000Z | gamry_parser/cv.py | bcliang/gamry-parser | c1dfcf73d973c88ee496f0aa256d99f642ab6013 | [
"MIT"
] | 5 | 2019-08-11T15:38:30.000Z | 2021-04-24T20:06:09.000Z | import gamry_parser as parser
| 29.373134 | 83 | 0.571646 |
127c2b5fae2468e39370fecece20d2e64788de00 | 11,609 | py | Python | comps.py | matthewb66/bdconsole | edc9a03f93dd782d58ff274ebe5152f7eccecff7 | [
"MIT"
] | null | null | null | comps.py | matthewb66/bdconsole | edc9a03f93dd782d58ff274ebe5152f7eccecff7 | [
"MIT"
] | null | null | null | comps.py | matthewb66/bdconsole | edc9a03f93dd782d58ff274ebe5152f7eccecff7 | [
"MIT"
] | null | null | null | import json
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import dash_table
col_data_comps = [
{"name": ['Component'], "id": "componentName"},
{"name": ['Version'], "id": "componentVersionName"},
{"name": ['Ignored'], "id": "ignored"},
# {"name": ['Ignored'], "id": "ignoreIcon"},
{"name": ['Reviewed'], "id": "reviewStatus"},
{"name": ['Policy Violation'], "id": "policyStatus"},
# {"name": ['Policy Status'], "id": "polIcon"},
{"name": ['Usage'], "id": "usages"},
{"name": ['Match Types'], "id": "matchTypes"},
]
def make_comp_toast(message):
"""
Helper function for making a toast. dict id for use in pattern matching
callbacks.
"""
return dbc.Toast(
message,
id={"type": "toast", "id": "toast_comp"},
key='toast_comp',
header="Component Processing",
is_open=True,
dismissable=False,
icon="info",
duration=8000,
)
| 41.460714 | 103 | 0.450168 |
127c9e72b97842964045050d2c4c20f3d0a12a28 | 656 | py | Python | CursoemVideoPython/Desafio 35.py | Beebruna/Python | bdbe10ea76acca1b417f5960db0aae8be44e0af3 | [
"MIT"
] | null | null | null | CursoemVideoPython/Desafio 35.py | Beebruna/Python | bdbe10ea76acca1b417f5960db0aae8be44e0af3 | [
"MIT"
] | null | null | null | CursoemVideoPython/Desafio 35.py | Beebruna/Python | bdbe10ea76acca1b417f5960db0aae8be44e0af3 | [
"MIT"
] | null | null | null | '''
Desenvolva um programa que leia o comprimento de trs retas e diga ao usurio se elas
podem ou no formar um tringulo.
'''
reta1 = float(input('Digite o comprimento da primeira reta: '))
reta2 = float(input('Digite o comprimento da segunda reta: '))
reta3 = float(input('Digite o comprimento da terceira reta: '))
if reta1 < 0 or reta2 < 0 or reta3 < 0:
print('\nValor Invlido!')
print('No EXISTE medida de lado NEGATIVA!')
else:
if reta1 + reta2 > reta3 and reta1 + reta3 > reta2 and reta2 + reta3 > reta1:
print('\nAs trs retas podem formar tringulo!')
else:
print('\nAs trs retas NO podem formar tringulo!') | 38.588235 | 85 | 0.689024 |