hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1ca4f326c31dc7913ff0486df63cbab12df18fbe | 888 | py | Python | LEDdebug/examples/led-demo.py | UrsaLeo/LEDdebug | 228af02468e4f3b617a50e6195931a623a4ad848 | [
"Apache-2.0"
] | null | null | null | LEDdebug/examples/led-demo.py | UrsaLeo/LEDdebug | 228af02468e4f3b617a50e6195931a623a4ad848 | [
"Apache-2.0"
] | null | null | null | LEDdebug/examples/led-demo.py | UrsaLeo/LEDdebug | 228af02468e4f3b617a50e6195931a623a4ad848 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
"""UrsaLeo LEDdebug board LED demo
Turn the LED's on one at a time, then all off"""
import time
ON = 1
OFF = 0
DELAY = 0.5 # seconds
try:
from LEDdebug import LEDdebug
except ImportError:
try:
import sys
import os
sys.path.append("..")
sys.path.append(os.path.join(os.path.dirname(__file__), '..',
'LEDdebug'))
from LEDdebug import LEDdebug
except ImportError:
print('LEDdebug import failed')
exit(0)
if __name__ == '__main__':
main()
| 20.651163 | 69 | 0.595721 |
1ca57a9994de049d5edebf8ee7ac8544c88a916a | 6,352 | py | Python | modules/server.py | Nitin-Mane/SARS-CoV-2-xDNN-Classifier | abb6a82b8ee89a041b0e26e14ec1e416c4561266 | [
"MIT"
] | null | null | null | modules/server.py | Nitin-Mane/SARS-CoV-2-xDNN-Classifier | abb6a82b8ee89a041b0e26e14ec1e416c4561266 | [
"MIT"
] | null | null | null | modules/server.py | Nitin-Mane/SARS-CoV-2-xDNN-Classifier | abb6a82b8ee89a041b0e26e14ec1e416c4561266 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
###################################################################################
##
## Project: COVID -19 xDNN Classifier 2020
## Version: 1.0.0
## Module: Server
## Desription: The COVID -19 xDNN Classifier 2020 server.
## License: MIT
## Copyright: 2021, Asociacion De Investigacion En Inteligencia Artificial Para
## La Leucemia Peter Moss.
## Author: Nitin Mane
## Maintainer: Nitin Mane
##
## Modified: 2021-2-19
##
###################################################################################
##
## Permission is hereby granted, free of charge, to any person obtaining a copy
## of this software and associated documentation files(the "Software"), to deal
## in the Software without restriction, including without limitation the rights
## to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
## copies of the Software, and to permit persons to whom the Software is
## furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included in all
## copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
## SOFTWARE.
##
###################################################################################
import cv2
import json
import jsonpickle
import os
import requests
import time
import numpy as np
import tensorflow as tf
from modules.AbstractServer import AbstractServer
from flask import Flask, request, Response
from io import BytesIO
from PIL import Image
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.vgg16 import preprocess_input
| 31.60199 | 83 | 0.65318 |
1ca5ea50e3b728f56855c54f3c17bbd2fb106298 | 4,382 | py | Python | rdr_service/lib_fhir/fhirclient_3_0_0/models/allergyintolerance_tests.py | all-of-us/raw-data-repository | d28ad957557587b03ff9c63d55dd55e0508f91d8 | [
"BSD-3-Clause"
] | 39 | 2017-10-13T19:16:27.000Z | 2021-09-24T16:58:21.000Z | rdr_service/lib_fhir/fhirclient_3_0_0/models/allergyintolerance_tests.py | all-of-us/raw-data-repository | d28ad957557587b03ff9c63d55dd55e0508f91d8 | [
"BSD-3-Clause"
] | 312 | 2017-09-08T15:42:13.000Z | 2022-03-23T18:21:40.000Z | rdr_service/lib_fhir/fhirclient_3_0_0/models/allergyintolerance_tests.py | all-of-us/raw-data-repository | d28ad957557587b03ff9c63d55dd55e0508f91d8 | [
"BSD-3-Clause"
] | 19 | 2017-09-15T13:58:00.000Z | 2022-02-07T18:33:20.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 on 2017-03-22.
# 2017, SMART Health IT.
import io
import json
import os
import unittest
from . import allergyintolerance
from .fhirdate import FHIRDate
| 57.657895 | 153 | 0.707211 |
1ca66df25ee895df823541d354d97c61178071b8 | 4,107 | py | Python | jsparse/meijiexia/meijiexia.py | PyDee/Spiders | 6fc0a414060032b5ba4332302285e3fcc9a6113e | [
"Apache-2.0"
] | 6 | 2020-06-02T16:22:58.000Z | 2021-09-18T03:20:16.000Z | jsparse/meijiexia/meijiexia.py | PyDee/Spiders | 6fc0a414060032b5ba4332302285e3fcc9a6113e | [
"Apache-2.0"
] | 4 | 2021-03-31T19:54:37.000Z | 2022-03-12T00:33:41.000Z | jsparse/meijiexia/meijiexia.py | PyDee/Spiders | 6fc0a414060032b5ba4332302285e3fcc9a6113e | [
"Apache-2.0"
] | 5 | 2020-06-02T16:23:00.000Z | 2021-09-03T02:16:15.000Z | import time
import random
import requests
from lxml import etree
import pymongo
from .url_file import mjx_weibo, mjx_dy, mjx_ks, mjx_xhs
if __name__ == '__main__':
mjx = MJX()
mjx.run()
| 41.07 | 450 | 0.567324 |
1ca67740a4b7ba54382fd28803af944938695c13 | 2,756 | py | Python | MLModules/ABD/B_PCAQDA.py | jamster112233/ICS_IDS | dac6abc3c8d6e840a21adedcb9e8dcfaa304b499 | [
"BSD-3-Clause"
] | null | null | null | MLModules/ABD/B_PCAQDA.py | jamster112233/ICS_IDS | dac6abc3c8d6e840a21adedcb9e8dcfaa304b499 | [
"BSD-3-Clause"
] | null | null | null | MLModules/ABD/B_PCAQDA.py | jamster112233/ICS_IDS | dac6abc3c8d6e840a21adedcb9e8dcfaa304b499 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from keras.utils import np_utils
import pandas as pd
import sys
from sklearn.preprocessing import LabelEncoder
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis as QDA
from sklearn.decomposition import PCA
import os
from sklearn.externals import joblib
from sklearn.metrics import f1_score
trainName = sys.argv[1]
testName = sys.argv[2]
# Create an object called iris with the iris Data
dftrain = pd.read_csv(filepath_or_buffer=trainName, header=None, sep=',')
dftest = pd.read_csv(filepath_or_buffer=testName, header=None, sep=',')
cols = ['Proto']
for i in range(1,dftrain.shape[1]):
cols.append('Byte' + str(i))
dftrain.columns=cols
dftrain.dropna(how="all", inplace=True)
dftrain.tail()
dftest.columns=cols
dftest.dropna(how="all", inplace=True)
dftest.tail()
Xtrain = dftrain.ix[:,1:dftrain.shape[1]].values
Ytrain = dftrain.ix[:,0].values
Xtest = dftest.ix[:,1:dftrain.shape[1]].values
Ytest = dftest.ix[:,0].values
encoder = LabelEncoder()
encoder.fit(Ytrain)
encYtrain = encoder.transform(Ytrain)
encoder = LabelEncoder()
encoder.fit(Ytest)
encYtest = encoder.transform(Ytest)
directory = "models/ABD/QDA/"
if not os.path.exists(directory):
os.makedirs(directory)
logfile = directory + "log-0.csv"
with open(logfile, "w") as file:
file.write("PCAlevel,acc,val_acc,f1\n")
fscores = []
accs = []
for q in xrange(1,151):
pca = PCA(n_components=q)
Xtrain_pca = pca.fit_transform(Xtrain)
Xtest_pca = pca.transform(Xtest)
clf = QDA(priors=None, reg_param=0.0)
clf.fit(Xtrain_pca, encYtrain)
trainPred = clf.predict(Xtrain_pca)
testPred = clf.predict(Xtest_pca)
score = 0.0
for i in xrange(0, len(trainPred)):
if trainPred[i] == encYtrain[i]:
score += 1
trainAcc = float(score) / len(trainPred)
score = 0.0
for i in xrange(0, len(testPred)):
if testPred[i] == encYtest[i]:
score += 1
testAcc = float(score) / len(testPred)
f1 = f1_score(encYtest, testPred)
accs.append(testAcc)
fscores.append(f1)
print("Train " + str(trainAcc))
print("Test " + str(testAcc))
print("F1 " + str(f1))
with open(logfile, "a") as file:
file.write(str(q) + "," + str(trainAcc) + "," + str(testAcc) + "," + str(f1) + "\n")
if q == 2:
joblib.dump(clf, 'QDA2.pkl')
print("Val Acc max" + str(max(accs)))
print("FMAX " + str(max(fscores)))
# print(str(q) + ":" + str((float(score)/len(classesPred)*100)) + "%")
#
# preds = classesPred
# if(len(preds) > 0):
# preds = np.array(list(encoder.inverse_transform(preds)))
#
# df = pd.crosstab(dftest['Proto'], preds, rownames=['Actual Protocol'], colnames=['Predicted Protocol'])
# df.to_csv('ConfusionMatrixLDA.csv')
| 26.757282 | 105 | 0.675617 |
1ca68195c840c66d0de8f1f855f4ded2b7c95a94 | 2,850 | py | Python | GR2-Save-Loader.py | 203Null/Gravity-Rush-2-Save-Loader | 40cf8a1748449c0e019a2e57ac2b8eccd50d8917 | [
"MIT"
] | 2 | 2022-02-06T10:40:22.000Z | 2022-02-06T10:45:51.000Z | GR2-Save-Loader.py | 203Null/Gravity-Rush-2-Save-Loader | 40cf8a1748449c0e019a2e57ac2b8eccd50d8917 | [
"MIT"
] | null | null | null | GR2-Save-Loader.py | 203Null/Gravity-Rush-2-Save-Loader | 40cf8a1748449c0e019a2e57ac2b8eccd50d8917 | [
"MIT"
] | null | null | null | import struct
import json
from collections import OrderedDict
file_path = "data0002.bin"
show_offset = True
show_hash = False
loaded_data = 0
file = open(file_path, mode='rb')
data = file.read()
data_set = OrderedDict()
if len(data) > 0x40 and data[0:4] == b'ggdL':
file.seek(0x0c, 0)
numOfData = int.from_bytes(file.read(4), byteorder='little')
while loaded_data < numOfData:
unpack(data_set)
print()
print(data_set)
print()
print("Complete with %i/%i data" % (loaded_data, numOfData))
with open(r"%s.txt" % (file_path.split('.')[0]), 'w', encoding='utf-8') as json_file:
json.dump(data_set, json_file, indent=4, ensure_ascii=False)
else:
print("File Incorrect") | 36.075949 | 115 | 0.587719 |
1ca867b22f2e4c2942595bca95ab919246220f6f | 342 | py | Python | python/Recursion.py | itzsoumyadip/vs | acf32cd0bacb26e62854060e0acf5eb41b7a68c8 | [
"Unlicense"
] | 1 | 2019-07-05T04:27:05.000Z | 2019-07-05T04:27:05.000Z | python/Recursion.py | itzsoumyadip/vs | acf32cd0bacb26e62854060e0acf5eb41b7a68c8 | [
"Unlicense"
] | null | null | null | python/Recursion.py | itzsoumyadip/vs | acf32cd0bacb26e62854060e0acf5eb41b7a68c8 | [
"Unlicense"
] | null | null | null | ## to change recursion limit
import sys
print(sys.getrecursionlimit()) #Return the current value of the recursion limit
#1000
## change the limit
sys.setrecursionlimit(2000) # change value of the recursion limit
#2000
i=0
greet() # hellow 1996 then error
| 18 | 80 | 0.678363 |
1ca91ede49b4b76cb020ec83f9b1603af4b3c7c0 | 1,406 | py | Python | pages/tests/test_views.py | andywar65/starter-fullstack | 683d6282eb02a9b967d15cd254976e67549672e9 | [
"BSD-2-Clause"
] | null | null | null | pages/tests/test_views.py | andywar65/starter-fullstack | 683d6282eb02a9b967d15cd254976e67549672e9 | [
"BSD-2-Clause"
] | null | null | null | pages/tests/test_views.py | andywar65/starter-fullstack | 683d6282eb02a9b967d15cd254976e67549672e9 | [
"BSD-2-Clause"
] | null | null | null | from django.test import TestCase, override_settings
from django.urls import reverse
from pages.models import Article, HomePage
| 35.15 | 77 | 0.647226 |
1caa879917346512e7a2dc23a9df954e997c28d0 | 26,030 | py | Python | poco/services/batch/server.py | sunliwen/poco | a4b8c4ede63711eea42a444fb9d922c350855364 | [
"MIT"
] | null | null | null | poco/services/batch/server.py | sunliwen/poco | a4b8c4ede63711eea42a444fb9d922c350855364 | [
"MIT"
] | 7 | 2019-03-22T06:26:39.000Z | 2021-06-10T19:36:06.000Z | poco/services/batch/server.py | sunliwen/poco | a4b8c4ede63711eea42a444fb9d922c350855364 | [
"MIT"
] | 1 | 2017-10-25T03:43:51.000Z | 2017-10-25T03:43:51.000Z | #!/usr/bin/env python
import logging
import sys
sys.path.append("../../")
sys.path.append("pylib")
import time
import datetime
import pymongo
import uuid
import os
import subprocess
import os.path
import settings
from common.utils import getSiteDBCollection
sys.path.insert(0, "../../")
logging_manager = LoggingManager()
connection = getConnection()
# TODO: removed items' similarities should also be removed.
begin_flow = BeginFlow()
preprocessing_flow = PreprocessingFlow()
preprocessing_flow.dependOn(begin_flow)
hive_based_statistics_flow = HiveBasedStatisticsFlow()
hive_based_statistics_flow.dependOn(preprocessing_flow)
v_similarity_calc_flow = VSimiliarityCalcFlow()
v_similarity_calc_flow.dependOn(preprocessing_flow)
plo_similarity_calc_flow = PLOSimilarityCalcFlow()
plo_similarity_calc_flow.dependOn(preprocessing_flow)
buy_together_similarity_flow = BuyTogetherSimilarityFlow()
buy_together_similarity_flow.dependOn(preprocessing_flow)
viewed_ultimately_buy_flow = ViewedUltimatelyBuyFlow()
viewed_ultimately_buy_flow.dependOn(preprocessing_flow)
#edm_related_preprocessing_flow = EDMRelatedPreprocessingFlow()
# edm_related_preprocessing_flow.dependOn(preprocessing_flow)
if __name__ == "__main__":
os.environ["PATH"] = "%s:%s" % (getattr(settings, "extra_shell_path", ""), os.environ["PATH"])
while True:
#site_ids = ["test_with_gdian_data"]
for site in loadSites(connection):
for site in getManualCalculationSites():
workOnSiteWithRetries(site, is_manual_calculation=True)
workOnSiteWithRetries(site)
sleep_seconds = 1
time.sleep(sleep_seconds)
| 39.142857 | 106 | 0.678871 |
1caaa79685649df41865169e49ad903c14174dcc | 4,488 | py | Python | tests/integration/basket/model_tests.py | makielab/django-oscar | 0a325cd0f04a4278201872b2e163868b72b6fabe | [
"BSD-3-Clause"
] | null | null | null | tests/integration/basket/model_tests.py | makielab/django-oscar | 0a325cd0f04a4278201872b2e163868b72b6fabe | [
"BSD-3-Clause"
] | null | null | null | tests/integration/basket/model_tests.py | makielab/django-oscar | 0a325cd0f04a4278201872b2e163868b72b6fabe | [
"BSD-3-Clause"
] | null | null | null | from decimal import Decimal as D
from django.test import TestCase
from oscar.apps.basket.models import Basket
from oscar.apps.partner import strategy
from oscar.test import factories
from oscar.apps.catalogue.models import Option
| 35.619048 | 93 | 0.694296 |
1caab3990fb21bf24a942c5cae050f1ff9f8b143 | 305 | py | Python | tests/fixtures/db/sqlite.py | code-watch/meltano | 2afff73ed43669b5134dacfce61814f7f4e77a13 | [
"MIT"
] | 8 | 2020-06-16T22:29:54.000Z | 2021-06-04T11:57:57.000Z | tests/fixtures/db/sqlite.py | dotmesh-io/meltano | 4616d44ded9dff4e9ad19a9004349e9baa16ddd5 | [
"MIT"
] | 38 | 2019-12-09T06:53:33.000Z | 2022-03-29T22:29:19.000Z | tests/fixtures/db/sqlite.py | aroder/meltano | b8d1d812f4051b6334986fc6b447d23c4d0d5043 | [
"MIT"
] | 2 | 2020-06-16T22:29:59.000Z | 2020-11-04T05:47:50.000Z | import pytest
import os
import sqlalchemy
import contextlib
| 17.941176 | 58 | 0.714754 |
1cab057f92135b745b2c22597acdb2d7401a8e30 | 11,134 | py | Python | experiments/render-tests-avg.py | piotr-karon/realworld-starter-kit | 6285e4b5913fe5e99d72e9178eb4b1db246d02c9 | [
"MIT"
] | null | null | null | experiments/render-tests-avg.py | piotr-karon/realworld-starter-kit | 6285e4b5913fe5e99d72e9178eb4b1db246d02c9 | [
"MIT"
] | null | null | null | experiments/render-tests-avg.py | piotr-karon/realworld-starter-kit | 6285e4b5913fe5e99d72e9178eb4b1db246d02c9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import json
import os
from pathlib import Path
import numpy as np
from natsort import natsorted
try:
from docopt import docopt
from marko.ext.gfm import gfm
import pygal
from pygal.style import Style, DefaultStyle
except ImportError as e:
raise Exception('Some external dependencies not found, install them using: pip install -r requirements.txt') from e
FIGURE_FUNCS = []
def figure(func):
"""Simple decorator to mark a function as a figure generator."""
FIGURE_FUNCS.append(func)
return func
def latency_vs_connections_figure(percentile, names, suites, config):
all_vals = [[s[f'latency_{percentile}p_ms_avg'] for s in suites[name]['stats'][0:]] for name in names]
mx = np.max(all_vals)
mn = np.min(all_vals)
config.range = (mn - mn * .5, mx + mx * .5)
chart = pygal.Line(config, logarithmic=True, value_formatter=lambda x: "{:0.0f}".format(x))
chart.title = f'{percentile}. centyl czasu odpowiedzi wzgldem liczby pocze (ms)'
connections_x_labels(chart, suites, skip=0)
for name in names:
chart.add(name, [s[f'latency_{percentile}p_ms_avg']
for s in suites[name]['stats'][0:]])
return chart
def connections_x_labels(chart, suites, skip=0):
chart.x_labels = [f"{s['connections']} conn's" if s['connections'] else s['message']
for s in next(iter(suites.values()))['stats']][skip:]
chart.x_label_rotation = -30
def div_or_none(numerator, denominator, scale=1):
if not denominator:
return None
return scale * numerator / denominator
HTML_PREFIX = '''<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Benchmark Report</title>
</head>
<body>
'''
HTML_SUFFIX = ''' </body>
</html>
'''
if __name__ == '__main__':
# args = docopt(__doc__)
render()
| 36.032362 | 157 | 0.647566 |
1cab32916328111ed29e8c7581e89b8013c63586 | 9,839 | py | Python | litex/build/altera/quartus.py | osterwood/litex | db20cb172dc982c5879aa8080ec7aa18de181cc5 | [
"ADSL"
] | 1,501 | 2016-04-19T18:16:21.000Z | 2022-03-31T17:46:31.000Z | litex/build/altera/quartus.py | osterwood/litex | db20cb172dc982c5879aa8080ec7aa18de181cc5 | [
"ADSL"
] | 1,135 | 2016-04-19T05:49:14.000Z | 2022-03-31T15:21:19.000Z | litex/build/altera/quartus.py | osterwood/litex | db20cb172dc982c5879aa8080ec7aa18de181cc5 | [
"ADSL"
] | 357 | 2016-04-19T05:00:24.000Z | 2022-03-31T11:28:32.000Z | #
# This file is part of LiteX.
#
# Copyright (c) 2014-2019 Florent Kermarrec <florent@enjoy-digital.fr>
# Copyright (c) 2019 msloniewski <marcin.sloniewski@gmail.com>
# Copyright (c) 2019 vytautasb <v.buitvydas@limemicro.com>
# SPDX-License-Identifier: BSD-2-Clause
import os
import subprocess
import sys
import math
from shutil import which
from migen.fhdl.structure import _Fragment
from litex.build.generic_platform import Pins, IOStandard, Misc
from litex.build import tools
# IO/Placement Constraints (.qsf) ------------------------------------------------------------------
# Timing Constraints (.sdc) ------------------------------------------------------------------------
# Project (.qsf) -----------------------------------------------------------------------------------
# Script -------------------------------------------------------------------------------------------
# AlteraQuartusToolchain ---------------------------------------------------------------------------
| 38.433594 | 107 | 0.598536 |
1cab4f72005e2a4605f4cdeb62be5961ecba1542 | 336 | py | Python | arxiv/canonical/util.py | arXiv/arxiv-canonical | a758ed88a568f23a834288aed4dcf7039c1340cf | [
"MIT"
] | 5 | 2019-05-26T22:52:54.000Z | 2021-11-05T12:27:11.000Z | arxiv/canonical/util.py | arXiv/arxiv-canonical | a758ed88a568f23a834288aed4dcf7039c1340cf | [
"MIT"
] | 31 | 2019-06-24T13:51:25.000Z | 2021-11-12T22:27:10.000Z | arxiv/canonical/util.py | arXiv/arxiv-canonical | a758ed88a568f23a834288aed4dcf7039c1340cf | [
"MIT"
] | 4 | 2019-01-10T22:01:54.000Z | 2021-11-05T12:26:58.000Z | """Various helpers and utilities that don't belong anywhere else."""
from typing import Dict, Generic, TypeVar
KeyType = TypeVar('KeyType')
ValueType = TypeVar('ValueType')
| 28 | 68 | 0.720238 |
1cac1152c0bc42f93be158e0a7b59715a3e05f13 | 198 | py | Python | records/urls.py | Glucemy/Glucemy-back | c9fcf7996b3f13c67697aadd449e3e32afb1fa1b | [
"MIT"
] | null | null | null | records/urls.py | Glucemy/Glucemy-back | c9fcf7996b3f13c67697aadd449e3e32afb1fa1b | [
"MIT"
] | null | null | null | records/urls.py | Glucemy/Glucemy-back | c9fcf7996b3f13c67697aadd449e3e32afb1fa1b | [
"MIT"
] | null | null | null | from rest_framework.routers import DefaultRouter
from records.views import RecordViewSet
router = DefaultRouter()
router.register('', RecordViewSet, basename='records')
urlpatterns = router.urls
| 22 | 54 | 0.813131 |
1cac38aa4a5a8e636d6285190a3fb18a56c06114 | 10,831 | py | Python | polystores/stores/azure_store.py | polyaxon/polystores | 141789ef75622c80d1f3875cec6952ad3c2d5ec7 | [
"MIT"
] | 50 | 2018-12-10T14:46:12.000Z | 2021-11-03T16:38:58.000Z | polystores/stores/azure_store.py | polyaxon/polystores | 141789ef75622c80d1f3875cec6952ad3c2d5ec7 | [
"MIT"
] | 17 | 2019-01-21T14:14:30.000Z | 2019-08-23T20:39:07.000Z | polystores/stores/azure_store.py | polyaxon/polystores | 141789ef75622c80d1f3875cec6952ad3c2d5ec7 | [
"MIT"
] | 8 | 2019-01-21T14:52:37.000Z | 2019-07-29T19:53:12.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import os
from rhea import RheaError
from rhea import parser as rhea_parser
from azure.common import AzureHttpError
from azure.storage.blob.models import BlobPrefix
from polystores.clients.azure_client import get_blob_service_connection
from polystores.exceptions import PolyaxonStoresException
from polystores.stores.base_store import BaseStore
from polystores.utils import append_basename, check_dirname_exists, get_files_in_current_directory
# pylint:disable=arguments-differ
| 35.864238 | 98 | 0.587942 |
1cad3cf72fd9e55c370708003b5cfc6962c4bf8e | 22,217 | py | Python | analysis/webservice/NexusHandler.py | dataplumber/nexus | f25a89e85eba098da9c6db1ff3d408dae8a6b310 | [
"Apache-2.0"
] | 23 | 2016-08-09T22:45:14.000Z | 2020-02-17T08:18:29.000Z | analysis/webservice/NexusHandler.py | lewismc/incubator-sdap-nexus | ff98fa346303431542b8391cc2a1bf7561d1bd03 | [
"Apache-2.0"
] | 6 | 2017-04-27T21:22:17.000Z | 2021-06-01T21:45:52.000Z | analysis/webservice/NexusHandler.py | dataplumber/nexus | f25a89e85eba098da9c6db1ff3d408dae8a6b310 | [
"Apache-2.0"
] | 5 | 2016-08-31T13:47:29.000Z | 2017-11-14T21:45:22.000Z | """
Copyright (c) 2016 Jet Propulsion Laboratory,
California Institute of Technology. All rights reserved
"""
import sys
import numpy as np
import logging
import time
import types
from datetime import datetime
from netCDF4 import Dataset
from nexustiles.nexustiles import NexusTileService
from webservice.webmodel import NexusProcessingException
AVAILABLE_HANDLERS = []
AVAILABLE_INITIALIZERS = []
DEFAULT_PARAMETERS_SPEC = {
"ds": {
"name": "Dataset",
"type": "string",
"description": "One or more comma-separated dataset shortnames"
},
"minLat": {
"name": "Minimum Latitude",
"type": "float",
"description": "Minimum (Southern) bounding box Latitude"
},
"maxLat": {
"name": "Maximum Latitude",
"type": "float",
"description": "Maximum (Northern) bounding box Latitude"
},
"minLon": {
"name": "Minimum Longitude",
"type": "float",
"description": "Minimum (Western) bounding box Longitude"
},
"maxLon": {
"name": "Maximum Longitude",
"type": "float",
"description": "Maximum (Eastern) bounding box Longitude"
},
"startTime": {
"name": "Start Time",
"type": "long integer",
"description": "Starting time in milliseconds since midnight Jan. 1st, 1970 UTC"
},
"endTime": {
"name": "End Time",
"type": "long integer",
"description": "Ending time in milliseconds since midnight Jan. 1st, 1970 UTC"
},
"lowPassFilter": {
"name": "Apply Low Pass Filter",
"type": "boolean",
"description": "Specifies whether to apply a low pass filter on the analytics results"
},
"seasonalFilter": {
"name": "Apply Seasonal Filter",
"type": "boolean",
"description": "Specified whether to apply a seasonal cycle filter on the analytics results"
}
}
def _lon2ind(self,lon):
return int((lon-self._minLonCent)/self._lonRes)
def _ind2lat(self,y):
return self._minLatCent+y*self._latRes
def _ind2lon(self,x):
return self._minLonCent+x*self._lonRes
def _create_nc_file_time1d(self, a, fname, varname, varunits=None,
fill=None):
self.log.debug('a={0}'.format(a))
self.log.debug('shape a = {0}'.format(a.shape))
assert len(a.shape) == 1
time_dim = len(a)
rootgrp = Dataset(fname, "w", format="NETCDF4")
rootgrp.createDimension("time", time_dim)
vals = rootgrp.createVariable(varname, "f4", dimensions=("time",),
fill_value=fill)
times = rootgrp.createVariable("time", "f4", dimensions=("time",))
vals[:] = [d['mean'] for d in a]
times[:] = [d['time'] for d in a]
if varunits is not None:
vals.units = varunits
times.units = 'seconds since 1970-01-01 00:00:00'
rootgrp.close()
def _create_nc_file_latlon2d(self, a, fname, varname, varunits=None,
fill=None):
self.log.debug('a={0}'.format(a))
self.log.debug('shape a = {0}'.format(a.shape))
assert len(a.shape) == 2
lat_dim, lon_dim = a.shape
rootgrp = Dataset(fname, "w", format="NETCDF4")
rootgrp.createDimension("lat", lat_dim)
rootgrp.createDimension("lon", lon_dim)
vals = rootgrp.createVariable(varname, "f4",
dimensions=("lat","lon",),
fill_value=fill)
lats = rootgrp.createVariable("lat", "f4", dimensions=("lat",))
lons = rootgrp.createVariable("lon", "f4", dimensions=("lon",))
vals[:,:] = a
lats[:] = np.linspace(self._minLatCent,
self._maxLatCent, lat_dim)
lons[:] = np.linspace(self._minLonCent,
self._maxLonCent, lon_dim)
if varunits is not None:
vals.units = varunits
lats.units = "degrees north"
lons.units = "degrees east"
rootgrp.close()
def _create_nc_file(self, a, fname, varname, **kwargs):
self._create_nc_file_latlon2d(a, fname, varname, **kwargs)
def executeInitializers(config):
[wrapper.init(config) for wrapper in AVAILABLE_INITIALIZERS]
| 40.030631 | 165 | 0.508755 |
1cade1c54a41deec5844621516e8934dad9ba6ed | 2,602 | py | Python | utils/box/metric.py | ming71/SLA | 7024b093bc0d456b274314ebeae3bc500c2db65a | [
"MIT"
] | 9 | 2021-05-26T05:51:19.000Z | 2021-12-25T02:31:55.000Z | utils/box/metric.py | ming71/SLA | 7024b093bc0d456b274314ebeae3bc500c2db65a | [
"MIT"
] | 4 | 2021-09-17T11:24:20.000Z | 2022-03-16T02:07:33.000Z | utils/box/metric.py | ming71/SLA | 7024b093bc0d456b274314ebeae3bc500c2db65a | [
"MIT"
] | null | null | null | import numpy as np
from collections import defaultdict, Counter
from .rbbox_np import rbbox_iou
| 37.171429 | 117 | 0.563028 |
1caee980c9d28fcb7768f3cf4259dd89c12fcb4a | 5,186 | py | Python | app.py | winstonschroeder/setlistmanager | 3c177a8da4bd56049964076f6ead51e3fffff5fa | [
"MIT"
] | null | null | null | app.py | winstonschroeder/setlistmanager | 3c177a8da4bd56049964076f6ead51e3fffff5fa | [
"MIT"
] | null | null | null | app.py | winstonschroeder/setlistmanager | 3c177a8da4bd56049964076f6ead51e3fffff5fa | [
"MIT"
] | null | null | null |
import logging
import pygame
from app import *
from pygame.locals import *
from werkzeug.serving import run_simple
from web import webapp as w
import data_access as da
logging.basicConfig(filename='setlistmanager.log', level=logging.DEBUG)
SCREEN_WIDTH = 160
SCREEN_HEIGHT = 128
| 35.040541 | 122 | 0.571732 |
1caf08d291951db640773cc4547ec6df82e53a36 | 4,488 | py | Python | sim_keypoints.py | Praznat/annotationmodeling | 014b8b94b2225f947691c18b26eb8a4b148d2c8a | [
"BSD-3-Clause"
] | 8 | 2020-05-03T20:01:03.000Z | 2021-12-20T12:24:34.000Z | sim_keypoints.py | Praznat/annotationmodeling | 014b8b94b2225f947691c18b26eb8a4b148d2c8a | [
"BSD-3-Clause"
] | 1 | 2021-11-19T02:33:19.000Z | 2021-12-28T03:22:33.000Z | sim_keypoints.py | Praznat/annotationmodeling | 014b8b94b2225f947691c18b26eb8a4b148d2c8a | [
"BSD-3-Clause"
] | 4 | 2020-05-04T15:04:57.000Z | 2021-11-04T18:14:26.000Z | import json
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import simulation
from eval_functions import oks_score_multi
import utils | 43.153846 | 143 | 0.64951 |
1cb1d08525c852f3c763a5bfd0e70b7e85abb9c4 | 6,592 | py | Python | local/controller.py | Loptt/home-automation-system | f1878596905e022d1d626d485d1a29dc7212f480 | [
"MIT"
] | null | null | null | local/controller.py | Loptt/home-automation-system | f1878596905e022d1d626d485d1a29dc7212f480 | [
"MIT"
] | null | null | null | local/controller.py | Loptt/home-automation-system | f1878596905e022d1d626d485d1a29dc7212f480 | [
"MIT"
] | null | null | null | import requests
import time
import os
import sys
import json
import threading
from getpass import getpass
import schedule
import event as e
import configuration as c
import RPi.GPIO as GPIO
#SERVER_URL = "https://home-automation-289621.uc.r.appspot.com"
#SERVER_URL = "http://127.0.0.1:4747"
SERVER_URL = "http://192.168.11.117:4747"
pins = [2, 3, 4, 7, 8, 9, 10, 11, 14, 15, 17, 18, 22, 23, 24, 27]
if __name__ == "__main__":
main()
| 29.168142 | 226 | 0.618174 |
1cb40b16f030cc0fc491e5ff712cd9ba3b6fe9c3 | 1,640 | py | Python | src/graphql_sqlalchemy/graphql_types.py | gzzo/graphql-sqlalchemy | 54a30d0b2fe2d5a1eb3668f0f7bc6ec3cb366ec4 | [
"MIT"
] | 12 | 2020-06-11T18:17:46.000Z | 2021-11-23T04:23:59.000Z | src/graphql_sqlalchemy/graphql_types.py | gzzo/graphql-sqlalchemy | 54a30d0b2fe2d5a1eb3668f0f7bc6ec3cb366ec4 | [
"MIT"
] | 9 | 2020-06-03T21:34:50.000Z | 2021-05-23T16:48:01.000Z | src/graphql_sqlalchemy/graphql_types.py | gzzo/graphql-sqlalchemy | 54a30d0b2fe2d5a1eb3668f0f7bc6ec3cb366ec4 | [
"MIT"
] | 2 | 2020-07-02T09:59:30.000Z | 2021-04-13T19:28:48.000Z | from typing import Dict, Union
from graphql import (
GraphQLBoolean,
GraphQLFloat,
GraphQLInputField,
GraphQLInt,
GraphQLList,
GraphQLNonNull,
GraphQLScalarType,
GraphQLString,
)
from sqlalchemy import ARRAY, Boolean, Float, Integer
from sqlalchemy.dialects.postgresql import ARRAY as PGARRAY
from sqlalchemy.types import TypeEngine
| 32.8 | 116 | 0.739024 |
1cb410c38e7b086fc006f0a9169efd98fc6fc76d | 3,223 | py | Python | Knapsack.py | byterubpay/mininero1 | ea6b8017cdbab82011d7f329e7726cc52d1ef431 | [
"BSD-3-Clause"
] | 182 | 2016-02-05T18:33:09.000Z | 2022-03-23T12:31:54.000Z | Knapsack.py | byterubpay/mininero1 | ea6b8017cdbab82011d7f329e7726cc52d1ef431 | [
"BSD-3-Clause"
] | 81 | 2016-09-04T14:00:24.000Z | 2022-03-28T17:22:52.000Z | Knapsack.py | byterubpay/mininero1 | ea6b8017cdbab82011d7f329e7726cc52d1ef431 | [
"BSD-3-Clause"
] | 63 | 2016-02-05T19:38:06.000Z | 2022-03-07T06:07:46.000Z | import Crypto.Random.random as rand
import itertools
import math #for log
import sys
if len(sys.argv) > 2:
kk = 2
parts = 7
kk = rand.randint(1, int(parts / 4)) #how many sends to demand
fuzz = 1
decideAmounts(float(sys.argv[1]), float(sys.argv[2]), parts, kk, fuzz)
| 29.036036 | 99 | 0.559727 |
1cb4f5278643eda7e6d9e305ee74cda8346049cd | 14,601 | py | Python | drought_impact_forecasting/models/model_parts/Conv_Transformer.py | rudolfwilliam/satellite_image_forecasting | 164ee7e533e1a8d730a0ee9c0062fd9b32e0bcdc | [
"MIT"
] | 4 | 2021-12-16T18:32:01.000Z | 2021-12-28T15:57:27.000Z | drought_impact_forecasting/models/model_parts/Conv_Transformer.py | rudolfwilliam/satellite_image_forecasting | 164ee7e533e1a8d730a0ee9c0062fd9b32e0bcdc | [
"MIT"
] | null | null | null | drought_impact_forecasting/models/model_parts/Conv_Transformer.py | rudolfwilliam/satellite_image_forecasting | 164ee7e533e1a8d730a0ee9c0062fd9b32e0bcdc | [
"MIT"
] | 2 | 2021-10-05T15:01:47.000Z | 2021-12-28T15:57:14.000Z | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from .shared import Conv_Block
from ..utils.utils import zeros, mean_cube, last_frame, ENS
| 48.996644 | 184 | 0.617423 |
1cb668eb96e3db81034b5b4b90591cfcdc750510 | 2,325 | py | Python | tests/test_clients.py | rodrigoapereira/python-hydra-sdk | ea3d61ed6f7ef1bc1990c442548d21b10155d075 | [
"MIT"
] | null | null | null | tests/test_clients.py | rodrigoapereira/python-hydra-sdk | ea3d61ed6f7ef1bc1990c442548d21b10155d075 | [
"MIT"
] | null | null | null | tests/test_clients.py | rodrigoapereira/python-hydra-sdk | ea3d61ed6f7ef1bc1990c442548d21b10155d075 | [
"MIT"
] | null | null | null | # Copyright (C) 2017 O.S. Systems Software LTDA.
# This software is released under the MIT License
import unittest
from hydra import Hydra, Client
| 40.086207 | 77 | 0.67871 |
1cb6b746023f10a214f482d1a5a600bbf6962f4e | 4,097 | py | Python | test/PR_test/unit_test/backend/test_binary_crossentropy.py | Phillistan16/fastestimator | 54c9254098aee89520814ed54b6e6016b821424f | [
"Apache-2.0"
] | null | null | null | test/PR_test/unit_test/backend/test_binary_crossentropy.py | Phillistan16/fastestimator | 54c9254098aee89520814ed54b6e6016b821424f | [
"Apache-2.0"
] | null | null | null | test/PR_test/unit_test/backend/test_binary_crossentropy.py | Phillistan16/fastestimator | 54c9254098aee89520814ed54b6e6016b821424f | [
"Apache-2.0"
] | 1 | 2020-04-28T12:16:10.000Z | 2020-04-28T12:16:10.000Z | # Copyright 2020 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import unittest
import numpy as np
import tensorflow as tf
import torch
import fastestimator as fe
| 48.77381 | 115 | 0.608494 |
1cb6d29da4f211a81a8c27dc9b9e2bda5b85f6c6 | 619 | py | Python | ats_hex.py | kyeser/scTools | c4c7dee0c41c8afe1da6350243df5f9d9b929c7f | [
"MIT"
] | null | null | null | ats_hex.py | kyeser/scTools | c4c7dee0c41c8afe1da6350243df5f9d9b929c7f | [
"MIT"
] | null | null | null | ats_hex.py | kyeser/scTools | c4c7dee0c41c8afe1da6350243df5f9d9b929c7f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from scTools import interval, primeForm
from scTools.rowData import ats
from scTools.scData import *
count = 1
for w in ats:
prime = primeForm(w[0:6])
print '%3d\t' % count,
for x in w:
print '%X' % x,
print ' ',
intervals = interval(w)
for y in intervals:
print '%X' % y,
print '\t%2d\t' % sc6.index(prime),
if prime == sc6[1] or prime == sc6[7] or prime == sc6[8] or \
prime == sc6[20] or prime == sc6[32] or prime == sc6[35]:
print 'AC'
elif prime == sc6[17]:
print 'AT'
else:
print
count += 1
| 20.633333 | 65 | 0.544426 |
1cb7e53b2c17e731b27a68b654287de75f6d7775 | 1,042 | py | Python | src/precon/commands.py | Albert-91/precon | aaded1d6a5f743b3539ea46b19a37a7bf9930e05 | [
"MIT"
] | null | null | null | src/precon/commands.py | Albert-91/precon | aaded1d6a5f743b3539ea46b19a37a7bf9930e05 | [
"MIT"
] | null | null | null | src/precon/commands.py | Albert-91/precon | aaded1d6a5f743b3539ea46b19a37a7bf9930e05 | [
"MIT"
] | null | null | null | import asyncio
import click
from precon.devices_handlers.distance_sensor import show_distance as show_distance_func
from precon.remote_control import steer_vehicle, Screen
try:
import RPi.GPIO as GPIO
except (RuntimeError, ModuleNotFoundError):
import fake_rpi
GPIO = fake_rpi.RPi.GPIO
| 25.414634 | 87 | 0.690019 |
1cb813fdb41b3152ecad7b90bfbabd5c02323b45 | 57,607 | py | Python | midway.py | sjtichenor/midway-ford | 43bf8770f2edd483d7c27dede8b9ac1fb8f10152 | [
"MIT"
] | null | null | null | midway.py | sjtichenor/midway-ford | 43bf8770f2edd483d7c27dede8b9ac1fb8f10152 | [
"MIT"
] | null | null | null | midway.py | sjtichenor/midway-ford | 43bf8770f2edd483d7c27dede8b9ac1fb8f10152 | [
"MIT"
] | null | null | null | import csv
import string
import ftplib
import math
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import sqlite3
from lxml import html
import requests
import sys
import midwords
import facebook
import hd_images
import adwords_feeds
import sheets
import random
import sales_specials
import scrape
from pprint import pprint
from pyvirtualdisplay import Display
import locale
locale.setlocale(locale.LC_ALL, 'en_US.utf8')
# Misc stuff
# FMC Dealer Scrapes
# Data stuff
if __name__ == '__main__':
main()
| 39.894044 | 537 | 0.591282 |
1cb871009f40d73e438998df7547b42738178c54 | 3,932 | py | Python | monolithe/generators/sdkgenerator.py | edwinfeener/monolithe | 0f024b2ec7d4c5a2229612280e5e559bf2667ba5 | [
"BSD-3-Clause"
] | 18 | 2015-06-24T18:35:20.000Z | 2022-01-19T19:04:00.000Z | monolithe/generators/sdkgenerator.py | edwinfeener/monolithe | 0f024b2ec7d4c5a2229612280e5e559bf2667ba5 | [
"BSD-3-Clause"
] | 63 | 2015-11-03T18:57:12.000Z | 2020-09-30T02:54:49.000Z | monolithe/generators/sdkgenerator.py | edwinfeener/monolithe | 0f024b2ec7d4c5a2229612280e5e559bf2667ba5 | [
"BSD-3-Clause"
] | 38 | 2015-10-23T19:04:44.000Z | 2021-06-04T08:13:33.000Z | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import unicode_literals
import os
import shutil
from monolithe.lib import Printer
from monolithe.generators.lib import Generator
from monolithe.generators.managers import MainManager, CLIManager, VanillaManager
from .sdkapiversiongenerator import SDKAPIVersionGenerator
| 44.179775 | 115 | 0.721261 |
1cb99804e820098ccccda4d6284924e807ceb66e | 1,787 | py | Python | rllab-taewoo/rllab/plotter/plotter.py | kyuhoJeong11/GrewRL | a514698df8d38df34de0bd1667d99927f0aa3885 | [
"MIT"
] | null | null | null | rllab-taewoo/rllab/plotter/plotter.py | kyuhoJeong11/GrewRL | a514698df8d38df34de0bd1667d99927f0aa3885 | [
"MIT"
] | null | null | null | rllab-taewoo/rllab/plotter/plotter.py | kyuhoJeong11/GrewRL | a514698df8d38df34de0bd1667d99927f0aa3885 | [
"MIT"
] | null | null | null | import atexit
import sys
if sys.version_info[0] == 2:
from Queue import Empty
else:
from queue import Empty
from multiprocessing import Process, Queue
from rllab.sampler.utils import rollout
import numpy as np
__all__ = [
'init_worker',
'init_plot',
'update_plot'
]
process = None
queue = None
| 24.479452 | 94 | 0.564633 |
1cbc8c259914408b1b3a8b596c9f92062c17a6d8 | 1,902 | py | Python | OpenCV/bookIntroCV_008_binarizacao.py | fotavio16/PycharmProjects | f5be49db941de69159ec543e8a6dde61f9f94d86 | [
"MIT"
] | null | null | null | OpenCV/bookIntroCV_008_binarizacao.py | fotavio16/PycharmProjects | f5be49db941de69159ec543e8a6dde61f9f94d86 | [
"MIT"
] | null | null | null | OpenCV/bookIntroCV_008_binarizacao.py | fotavio16/PycharmProjects | f5be49db941de69159ec543e8a6dde61f9f94d86 | [
"MIT"
] | null | null | null | '''
Livro-Introduo-a-Viso-Computacional-com-Python-e-OpenCV-3
Repositrio de imagens
https://github.com/opencv/opencv/tree/master/samples/data
'''
import cv2
import numpy as np
from matplotlib import pyplot as plt
#import mahotas
VERMELHO = (0, 0, 255)
VERDE = (0, 255, 0)
AZUL = (255, 0, 0)
AMARELO = (0, 255, 255)
BRANCO = (255,255,255)
CIANO = (255, 255, 0)
PRETO = (0, 0, 0)
img = cv2.imread('ponte2.jpg') # Flag 1 = Color, 0 = Gray, -1 = Unchanged
img = img[::2,::2] # Diminui a imagem
#Binarizao com limiar
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
suave = cv2.GaussianBlur(img, (7, 7), 0) # aplica blur
(T, bin) = cv2.threshold(suave, 160, 255, cv2.THRESH_BINARY)
(T, binI) = cv2.threshold(suave, 160, 255, cv2.THRESH_BINARY_INV)
'''
resultado = np.vstack([
np.hstack([suave, bin]),
np.hstack([binI, cv2.bitwise_and(img, img, mask = binI)])
])
'''
resultado = np.vstack([
np.hstack([img, suave]),
np.hstack([bin, binI])
])
cv2.imshow("Binarizao da imagem", resultado)
cv2.waitKey(0)
#Threshold adaptativo
bin1 = cv2.adaptiveThreshold(suave, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 21, 5)
bin2 = cv2.adaptiveThreshold(suave, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 21, 5)
resultado = np.vstack([
np.hstack([img, suave]),
np.hstack([bin1, bin2])
])
cv2.imshow("Binarizao adaptativa da imagem", resultado)
cv2.waitKey(0)
#Threshold com Otsu e Riddler-Calvard
'''
T = mahotas.thresholding.otsu(suave)
temp = img.copy()
temp[temp > T] = 255
temp[temp < 255] = 0
temp = cv2.bitwise_not(temp)
T = mahotas.thresholding.rc(suave)
temp2 = img.copy()
temp2[temp2 > T] = 255
temp2[temp2 < 255] = 0
temp2 = cv2.bitwise_not(temp2)
resultado = np.vstack([
np.hstack([img, suave]),
np.hstack([temp, temp2])
])
cv2.imshow("Binarizao com mtodo Otsu e Riddler-Calvard", resultado)
cv2.waitKey(0)
'''
| 24.384615 | 102 | 0.684017 |
1cbebb0ca1313739b0fe47f6d54aaa9f17675ecf | 1,949 | py | Python | djangito/backends.py | mechanicbuddy/djangito | 07c08a83c57577cbf945bba461219bc0ef2a7695 | [
"Apache-2.0"
] | null | null | null | djangito/backends.py | mechanicbuddy/djangito | 07c08a83c57577cbf945bba461219bc0ef2a7695 | [
"Apache-2.0"
] | null | null | null | djangito/backends.py | mechanicbuddy/djangito | 07c08a83c57577cbf945bba461219bc0ef2a7695 | [
"Apache-2.0"
] | null | null | null | import base64
import json
import jwt
import requests
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.backends import ModelBackend
USER_MODEL = get_user_model()
| 29.984615 | 75 | 0.626988 |
1cbf5ae6b77e5700645e93821c03cc92778db151 | 11,306 | py | Python | data_profiler/labelers/regex_model.py | gme5078/data-profiler | 602cc5e4f4463f9b807000abf3893815918d0723 | [
"Apache-2.0"
] | null | null | null | data_profiler/labelers/regex_model.py | gme5078/data-profiler | 602cc5e4f4463f9b807000abf3893815918d0723 | [
"Apache-2.0"
] | null | null | null | data_profiler/labelers/regex_model.py | gme5078/data-profiler | 602cc5e4f4463f9b807000abf3893815918d0723 | [
"Apache-2.0"
] | null | null | null | import json
import os
import sys
import re
import copy
import numpy as np
from data_profiler.labelers.base_model import BaseModel
from data_profiler.labelers.base_model import AutoSubRegistrationMeta
_file_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(_file_dir)
| 38.719178 | 81 | 0.523527 |
1cbf73995d2a6d71959f99c6cb216fdecd75b4e3 | 1,693 | py | Python | taller_estructuras_de_control_selectivas/ejercicio_13.py | JMosqueraM/algoritmos_y_programacion | 30dc179b976f1db24401b110496250fbcb98938e | [
"MIT"
] | null | null | null | taller_estructuras_de_control_selectivas/ejercicio_13.py | JMosqueraM/algoritmos_y_programacion | 30dc179b976f1db24401b110496250fbcb98938e | [
"MIT"
] | null | null | null | taller_estructuras_de_control_selectivas/ejercicio_13.py | JMosqueraM/algoritmos_y_programacion | 30dc179b976f1db24401b110496250fbcb98938e | [
"MIT"
] | null | null | null | # Desarrolle un un programa que reciba la fecha de nacimiento
# de una persona, y como salida, indique el nombre del signo del
# zodiaco correspondiente, ademas de su edad
fecha_str = input("Ingrese la fecha de nacimiento (DD/MM/AAAA): ")
fecha = fecha_str.split("/")
fecha_int = []
for elemento in fecha:
fecha_int.append(int(elemento))
dia = fecha_int[0]
mes = fecha_int[1]
ano = fecha_int[2]
signo = zodiaco(dia, mes)
print(f"Siendo que su fecha de nacimiento es {fecha_str}, su signo zodiacal corresponde a {signo} y tiene {abs(ano - 2021)} aos") | 33.196078 | 130 | 0.512109 |
1cc05adcc568b6fb2373878d0e0ebc62065ed391 | 5,110 | py | Python | assignment3/crawler/spiders/benchmark_spider.py | vhazali/cs5331 | 3b3618aaa17199ebcd3c01bc6c25ddbdbe4f3d0f | [
"MIT"
] | 8 | 2020-02-22T12:47:12.000Z | 2021-12-03T11:39:19.000Z | assignment3/crawler/spiders/benchmark_spider.py | vhazali/cs5331 | 3b3618aaa17199ebcd3c01bc6c25ddbdbe4f3d0f | [
"MIT"
] | null | null | null | assignment3/crawler/spiders/benchmark_spider.py | vhazali/cs5331 | 3b3618aaa17199ebcd3c01bc6c25ddbdbe4f3d0f | [
"MIT"
] | 4 | 2018-08-15T12:58:36.000Z | 2021-12-29T07:06:29.000Z | import re, scrapy
from crawler.items import * | 33.181818 | 218 | 0.520939 |
1cc17ff4c766e10f3ad5dd61384738c26b148c2c | 22,005 | py | Python | octavia_tempest_plugin/services/load_balancer/v2/listener_client.py | NeCTAR-RC/octavia-tempest-plugin | 5506c00b8d8972e6223499dd5a5da4c85c1ff836 | [
"Apache-2.0"
] | null | null | null | octavia_tempest_plugin/services/load_balancer/v2/listener_client.py | NeCTAR-RC/octavia-tempest-plugin | 5506c00b8d8972e6223499dd5a5da4c85c1ff836 | [
"Apache-2.0"
] | null | null | null | octavia_tempest_plugin/services/load_balancer/v2/listener_client.py | NeCTAR-RC/octavia-tempest-plugin | 5506c00b8d8972e6223499dd5a5da4c85c1ff836 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 GoDaddy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo_serialization import jsonutils
from tempest import config
from octavia_tempest_plugin.services.load_balancer.v2 import base_client
CONF = config.CONF
Unset = base_client.Unset
| 58.836898 | 79 | 0.578278 |
1cc2858f2edc96485c6ae59c62afeb79423f9cef | 1,180 | py | Python | ryu/gui/views/router_address_delete.py | isams1/Thesis | dfe03ce60169bd4e5b2eb6f1068a1c89fc9d9fd3 | [
"Apache-2.0"
] | 3 | 2019-04-23T11:11:46.000Z | 2020-11-04T20:14:17.000Z | ryu/gui/views/router_address_delete.py | isams1/Thesis | dfe03ce60169bd4e5b2eb6f1068a1c89fc9d9fd3 | [
"Apache-2.0"
] | null | null | null | ryu/gui/views/router_address_delete.py | isams1/Thesis | dfe03ce60169bd4e5b2eb6f1068a1c89fc9d9fd3 | [
"Apache-2.0"
] | 3 | 2019-10-03T09:31:42.000Z | 2021-05-15T04:41:12.000Z | import re
import logging
import httplib
import view_base
from models import rt_proxy
LOG = logging.getLogger('ryu.gui') | 26.818182 | 79 | 0.594915 |
1cc4d424336a53cdc71bcc2c504f86ad318db561 | 361 | py | Python | tests/util/test_helper.py | TobiasRasbold/pywrangler | 3f4ba5891a75430e0882b223bda4c6c7f55dbd51 | [
"MIT"
] | 14 | 2019-04-08T21:14:50.000Z | 2022-02-12T18:58:48.000Z | tests/util/test_helper.py | TobiasRasbold/pywrangler | 3f4ba5891a75430e0882b223bda4c6c7f55dbd51 | [
"MIT"
] | 27 | 2019-03-15T12:35:29.000Z | 2020-07-10T10:31:38.000Z | tests/util/test_helper.py | TobiasRasbold/pywrangler | 3f4ba5891a75430e0882b223bda4c6c7f55dbd51 | [
"MIT"
] | 3 | 2019-11-20T11:18:06.000Z | 2021-07-26T04:50:00.000Z | """This module contains tests for the helper module.
"""
from pywrangler.util.helper import get_param_names
| 18.05 | 54 | 0.609418 |
1cc5fd243fb313db4d6da43f6b24a969983fb154 | 964 | py | Python | Python-Files/model_conversion/convert_to_tflite.py | jcgeo9/ML-For-Fish-Recognition | 0b5faba77d0b2c5452950637f047882c80fa6fb7 | [
"Apache-2.0"
] | null | null | null | Python-Files/model_conversion/convert_to_tflite.py | jcgeo9/ML-For-Fish-Recognition | 0b5faba77d0b2c5452950637f047882c80fa6fb7 | [
"Apache-2.0"
] | null | null | null | Python-Files/model_conversion/convert_to_tflite.py | jcgeo9/ML-For-Fish-Recognition | 0b5faba77d0b2c5452950637f047882c80fa6fb7 | [
"Apache-2.0"
] | null | null | null | # =============================================================================
# Created By : Giannis Kostas Georgiou
# Project : Machine Learning for Fish Recognition (Individual Project)
# =============================================================================
# Description : File in order to convert saved models to .tflite instances.
# To be used after the desired model are trained and saved
# How to use : Replace variables in CAPS according to needs of the dataset
# =============================================================================
import tensorflow as tf
model_path='PATH TO SAVED MODEL'
tflite_model_name='NAME OF THE NEWLY CREATED TFLITE MODEL'
#convert the model by loading the saved model to the converter
converter = tf.lite.TFLiteConverter.from_saved_model(model_path)
tflite_model = converter.convert()
#save the tflite model
with open(tflite_model_name+'.tflite', 'wb') as f:
f.write(tflite_model)
| 43.818182 | 79 | 0.572614 |
1cc629a630efb8f26ff269373c402c157da69af1 | 2,283 | py | Python | python3/sparkts/test/test_datetimeindex.py | hedibejaoui/spark-timeseries | 9112dcbbba4e095b5eb46c568e1c72e13e1f251a | [
"Apache-2.0"
] | null | null | null | python3/sparkts/test/test_datetimeindex.py | hedibejaoui/spark-timeseries | 9112dcbbba4e095b5eb46c568e1c72e13e1f251a | [
"Apache-2.0"
] | null | null | null | python3/sparkts/test/test_datetimeindex.py | hedibejaoui/spark-timeseries | 9112dcbbba4e095b5eb46c568e1c72e13e1f251a | [
"Apache-2.0"
] | 1 | 2021-09-05T15:05:53.000Z | 2021-09-05T15:05:53.000Z | from .test_utils import PySparkTestCase
from sparkts.datetimeindex import *
import pandas as pd
| 45.66 | 90 | 0.654402 |
1cc943b894a8b8ca43a398705ed5a7c52cece87e | 492 | py | Python | src/listIntersect/inter.py | rajitbanerjee/leetcode | 720fcdd88d371e2d6592ceec8370a6760a77bb89 | [
"CC0-1.0"
] | null | null | null | src/listIntersect/inter.py | rajitbanerjee/leetcode | 720fcdd88d371e2d6592ceec8370a6760a77bb89 | [
"CC0-1.0"
] | null | null | null | src/listIntersect/inter.py | rajitbanerjee/leetcode | 720fcdd88d371e2d6592ceec8370a6760a77bb89 | [
"CC0-1.0"
] | 1 | 2021-04-28T18:17:55.000Z | 2021-04-28T18:17:55.000Z | # Definition for singly-linked list.
| 21.391304 | 80 | 0.530488 |
1ccaa4cf179ca9984d4a2effe3502e46bd80d7d5 | 1,214 | py | Python | photon_stream_production/tests/test_drs_run_assignment.py | fact-project/photon_stream_production | ca2f946976c9a9717cfcd9364f2361ef385b45aa | [
"MIT"
] | null | null | null | photon_stream_production/tests/test_drs_run_assignment.py | fact-project/photon_stream_production | ca2f946976c9a9717cfcd9364f2361ef385b45aa | [
"MIT"
] | 2 | 2019-01-17T12:11:27.000Z | 2019-02-27T14:51:05.000Z | photon_stream_production/tests/test_drs_run_assignment.py | fact-project/photon_stream_production | ca2f946976c9a9717cfcd9364f2361ef385b45aa | [
"MIT"
] | null | null | null | import numpy as np
import photon_stream as ps
import photon_stream_production as psp
import pkg_resources
import os
runinfo_path = pkg_resources.resource_filename(
'photon_stream_production',
os.path.join('tests', 'resources', 'runinfo_20161115_to_20170103.csv')
)
drs_fRunID_for_obs_run = psp.drs_run._drs_fRunID_for_obs_run
| 30.35 | 74 | 0.660626 |
1ccbfd84ed556c53c45b775bad63d6e98f029035 | 2,444 | py | Python | accounts/migrations/0001_initial.py | vikifox/CMDB | bac9b7da204c3eee344f55bb2187df38ef3b3d4c | [
"Apache-2.0"
] | 16 | 2020-08-13T04:28:50.000Z | 2021-06-10T06:24:51.000Z | accounts/migrations/0001_initial.py | vikifox/CMDB | bac9b7da204c3eee344f55bb2187df38ef3b3d4c | [
"Apache-2.0"
] | 1 | 2019-04-15T07:01:42.000Z | 2019-04-15T07:01:42.000Z | accounts/migrations/0001_initial.py | vikifox/CMDB | bac9b7da204c3eee344f55bb2187df38ef3b3d4c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-04-18 05:56
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| 40.733333 | 128 | 0.576105 |
1ccca623d7f5e702eea65074c02fe6486e238208 | 10,450 | py | Python | autoscaler/azure.py | gabrieladt/kops-ec2-autoscaler | 8b90fa23caaacf9cf0a4310b65667769906af777 | [
"MIT"
] | null | null | null | autoscaler/azure.py | gabrieladt/kops-ec2-autoscaler | 8b90fa23caaacf9cf0a4310b65667769906af777 | [
"MIT"
] | null | null | null | autoscaler/azure.py | gabrieladt/kops-ec2-autoscaler | 8b90fa23caaacf9cf0a4310b65667769906af777 | [
"MIT"
] | 1 | 2019-07-08T07:06:27.000Z | 2019-07-08T07:06:27.000Z | import http
import logging
from typing import List, Tuple, MutableMapping
from datetime import datetime
import re
from requests.packages.urllib3 import Retry
import autoscaler.utils as utils
from autoscaler.autoscaling_groups import AutoScalingGroup
from autoscaler.azure_api import AzureApi, AzureScaleSet, AzureScaleSetInstance
from autoscaler.utils import TransformingFuture, AllCompletedFuture, CompletedFuture
logger = logging.getLogger(__name__)
_RETRY_TIME_LIMIT = 30
_CLASS_PAT = re.compile(r'\w+_(?P<class>[A-Z]+).+')
_SCALE_SET_SIZE_LIMIT = 100
# Appears as an unbounded scale set. Currently, Azure Scale Sets have a limit of 100 hosts.
| 40.980392 | 150 | 0.655598 |
1cce392f19d05213758223c7be8ae0c890defc93 | 772 | py | Python | sort_insertion.py | rachitmishra/45 | c38650f4fa2ea1857848b95320cdc37929b39197 | [
"MIT"
] | null | null | null | sort_insertion.py | rachitmishra/45 | c38650f4fa2ea1857848b95320cdc37929b39197 | [
"MIT"
] | null | null | null | sort_insertion.py | rachitmishra/45 | c38650f4fa2ea1857848b95320cdc37929b39197 | [
"MIT"
] | null | null | null | """
Insertion Sort
Approach: Loop
Complexity: O(n2)
"""
if __name__ == '__main__':
arr = [21, 4, 1, 3, 9, 20, 25, 6, 21, 14]
sort_insertion(arr)
| 22.057143 | 56 | 0.443005 |
1ccedf375dff61d5b7747bbbaf81aa8a41e6f3f6 | 1,780 | py | Python | Python2/tareas/tarea_7.py | eveiramirez/python_class | 7a3830cc92dc842b853b243c6b01e06993faa97e | [
"MIT"
] | null | null | null | Python2/tareas/tarea_7.py | eveiramirez/python_class | 7a3830cc92dc842b853b243c6b01e06993faa97e | [
"MIT"
] | null | null | null | Python2/tareas/tarea_7.py | eveiramirez/python_class | 7a3830cc92dc842b853b243c6b01e06993faa97e | [
"MIT"
] | 3 | 2021-04-09T19:12:15.000Z | 2021-08-24T18:24:58.000Z | """
NAME
tarea_7.py
VERSION
[1.0]
AUTHOR
Ignacio Emmanuel Ramirez Bernabe
CONTACT
iramirez@lcg.unam.mx
GITHUB
https://github.com/eveiramirez/python_class/blob/master/Python2/tareas/tarea_7.py
DESCRIPTION
Este programa contiene arrays estructurados para los arrays
creados en el ejercicio 1, los cuales son:
Produccion
Costos
Costos por g/L
CATEGORY
Numpy
"""
import numpy as np
# Crear array con la produccion de cada gen para cada temperatura
production = np.array([("Gen1", 5, 3), ("Gen2", 11, 7),
("Gen3", 4, 9), ("Gen4", 2, 6)],
dtype=[("name", (np.str_, 10)),
("production_cond1", np.int32),
("production_cond2", np.int32)])
# Crear array con los costos de induccion
costs = np.array([("Gen1", 3.5), ("Gen2", 5), ("Gen3", 7),
("Gen4", 4.3)], dtype=[("name", (np.str_, 10)),
("cost", np.float64)])
# Crear array con los costos por g/L para condicion 1
pc_cond1 = production["production_cond1"]/costs["cost"]
# Crear array con los costos por g/L para temperatura 2
pc_cond2 = production["production_cond2"]/costs["cost"]
# Crear lista con los costos por g/L para cada gene guardados en una
# tupla
gene_list = []
for gene in range(0, 4):
gene_list.append((f"Gen{gene+1}", pc_cond1[gene], pc_cond2[gene]))
# Crear array con los costos por g/L
prod_costs = np.array(gene_list, dtype=[("name", (np.str_, 10)),
("pc_cond1", np.float64),
("pc_cond2", np.float64)])
# Imprimir array de los costos por g/L
print(prod_costs)
| 29.180328 | 89 | 0.567978 |
1ccf03aa9b400d7a3b6f76334d043ce47040c33d | 11,857 | py | Python | iguanas/pipeline/_base_pipeline.py | paypal/Iguanas | 166ea81b7d370eb4281a27aa449719ed1d38a74a | [
"Apache-2.0"
] | 20 | 2021-12-22T14:15:03.000Z | 2022-03-31T22:46:42.000Z | iguanas/pipeline/_base_pipeline.py | paypal/Iguanas | 166ea81b7d370eb4281a27aa449719ed1d38a74a | [
"Apache-2.0"
] | 12 | 2022-01-18T16:55:56.000Z | 2022-03-10T11:39:39.000Z | iguanas/pipeline/_base_pipeline.py | paypal/Iguanas | 166ea81b7d370eb4281a27aa449719ed1d38a74a | [
"Apache-2.0"
] | 5 | 2021-12-25T07:28:29.000Z | 2022-02-23T09:40:03.000Z | """
Base pipeline class. Main rule generator classes inherit from this one.
"""
from copy import deepcopy
from typing import List, Tuple, Union, Dict
from iguanas.pipeline.class_accessor import ClassAccessor
from iguanas.utils.typing import PandasDataFrameType, PandasSeriesType
import iguanas.utils.utils as utils
from iguanas.exceptions import DataFrameSizeError
| 40.606164 | 112 | 0.580417 |
1cd008f314f201433a589af299e0dc00308ca8c5 | 6,306 | py | Python | test_activity_merger.py | AlexanderMakarov/activitywatch-ets | 36e5ac92c7834b9515a54c5d633ae5e45d6928bc | [
"MIT"
] | null | null | null | test_activity_merger.py | AlexanderMakarov/activitywatch-ets | 36e5ac92c7834b9515a54c5d633ae5e45d6928bc | [
"MIT"
] | null | null | null | test_activity_merger.py | AlexanderMakarov/activitywatch-ets | 36e5ac92c7834b9515a54c5d633ae5e45d6928bc | [
"MIT"
] | null | null | null | import unittest
import datetime
from parameterized import parameterized
from activity_merger import Interval
from aw_core.models import Event
from typing import List, Tuple
def build_intervals_linked_list(data: List[Tuple[int, bool, int]]) -> Interval:
"""
Builds intervals linked list from the list of tuples. Doesn't check parameters.
:param data: List of tuples (day of start, flag to return `Interval` from the function, duration).
:return: Chosen interval.
"""
result = None
previous = None
for (seed, is_target, duration) in data:
if not previous:
previous = Interval(_build_datetime(seed), _build_datetime(seed + duration))
else:
tmp = Interval(_build_datetime(seed), _build_datetime(seed + duration), previous)
previous.next = tmp
previous = tmp
if is_target:
assert result is None, f"Wrong parameters - '{seed}' interval is marked as result but is not first."
result = previous
return result
if __name__ == '__main__':
unittest.main()
| 30.172249 | 112 | 0.482873 |
1cd0df6aa8a1e2d70124b017898c86056e7b29dd | 4,526 | py | Python | pommerman/agents/player_agent.py | alekseynp/playground | 523cc924fe9fd269a8eb3e29c45ace1c5c85b12c | [
"Apache-2.0"
] | 8 | 2019-06-11T16:08:25.000Z | 2020-10-28T09:03:53.000Z | pommerman/agents/player_agent.py | alekseynp/playground | 523cc924fe9fd269a8eb3e29c45ace1c5c85b12c | [
"Apache-2.0"
] | 1 | 2019-06-21T03:57:35.000Z | 2019-06-21T03:57:35.000Z | pommerman/agents/player_agent.py | alekseynp/playground | 523cc924fe9fd269a8eb3e29c45ace1c5c85b12c | [
"Apache-2.0"
] | 1 | 2018-03-21T15:21:52.000Z | 2018-03-21T15:21:52.000Z | """
NOTE:
There are a few minor complications to fluid human control which make this
code a little more involved than trivial.
1. Key press-release cycles can be, and often are, faster than one tick of
the game/simulation, but the player still wants that cycle to count, i.e.
to lay a bomb!
2. When holding down a key, the player expects that action to be repeated,
at least after a slight delay.
3. But when holding a key down (say, move left) and simultaneously doing a
quick press-release cycle (put a bomb), we want the held-down key to keep
being executed, but the cycle should have happened in-between.
The way we solve this problem is by separating key-state and actions-to-do.
We hold the actions that need be executed in a queue (`self._action_q`) and
a state for all considered keys.
1. When a key is pressed down, we note the time and mark it as down.
2. If it is released quickly thereafter, before a game tick could happen,
we add its action into the queue. This often happens when putting bombs.
3. If it's still pressed down as we enter a game tick, we do some math to see
if it's time for a "repeat" event and, if so, push an action to the queue.
4. Just work off one item from the queue each tick.
This way, the input is "natural" and things like dropping a bomb while doing
a diagonal walk from one end to the other "just work".
"""
from time import time
from . import BaseAgent
from .. import characters
REPEAT_DELAY = 0.2 # seconds
REPEAT_INTERVAL = 0.1
| 33.776119 | 98 | 0.614671 |
1cd0eac7f0e61913c8d825507589abb58c69759a | 14,852 | py | Python | tests/rest/test_rest.py | sapshah-cisco/cobra | e2b5a75495931844180b05d776c15829e63f0dab | [
"Apache-2.0"
] | 93 | 2015-02-11T01:41:22.000Z | 2022-02-03T22:55:57.000Z | tests/rest/test_rest.py | sapshah-cisco/cobra | e2b5a75495931844180b05d776c15829e63f0dab | [
"Apache-2.0"
] | 112 | 2015-02-23T22:20:29.000Z | 2022-03-22T21:46:52.000Z | tests/rest/test_rest.py | sapshah-cisco/cobra | e2b5a75495931844180b05d776c15829e63f0dab | [
"Apache-2.0"
] | 61 | 2015-02-22T01:34:01.000Z | 2022-01-19T09:50:21.000Z | # Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import range
from builtins import object
import http.client
import os
import pytest
import random
import string
import time
import xml.etree.ElementTree as ET
import logging
from cobra.internal.codec.jsoncodec import toJSONStr, fromJSONStr
from cobra.internal.codec.xmlcodec import toXMLStr, fromXMLStr
import cobra.mit.access
import cobra.mit.request
import cobra.mit.session
cobra = pytest.importorskip("cobra")
cobra.model = pytest.importorskip("cobra.model")
cobra.model.fv = pytest.importorskip("cobra.model.fv")
import cobra.model.pol
import cobra.model.infra
import cobra.services
pytestmark = pytest.mark.skipif(pytest.config.getvalue('apic') == [],
reason="You must specify at least one --apic " +
"option on the CLI")
slow = pytest.mark.slow
http.client.HTTPConnection.debuglevel = 1
logging.basicConfig(level=logging.DEBUG)
fakeDevicePackageZip = 'Archive.zip'
realDevicePackageZip = 'asa-device-pkg.zip'
| 36.581281 | 99 | 0.606181 |
1cd0fd2e907a405a13689ee31a56a04909e02b9c | 555 | py | Python | spanglish/tests/fixtures/models/language.py | omaraljazairy/FedalAPI | 2be0a19bb2629be9e2a0477f99477e4bfbd8901e | [
"MIT"
] | null | null | null | spanglish/tests/fixtures/models/language.py | omaraljazairy/FedalAPI | 2be0a19bb2629be9e2a0477f99477e4bfbd8901e | [
"MIT"
] | null | null | null | spanglish/tests/fixtures/models/language.py | omaraljazairy/FedalAPI | 2be0a19bb2629be9e2a0477f99477e4bfbd8901e | [
"MIT"
] | null | null | null | """ fixtures that return an sql statement with a list of values to be inserted."""
def load_language():
""" return the sql and values of the insert queuery."""
sql = """
INSERT INTO Spanglish_Test.Language
(
`name`, `iso-639-1`
)
VALUES (%s, %s)
"""
values = [
(
'English', 'EN'
),
(
'Spanish', 'ES'
),
(
'Dutch', 'NL'
)
]
return {
'sql': sql,
'values': values
}
| 19.137931 | 82 | 0.405405 |
1cd1aa4b57039ede6d30d90d5b70dc7281d0f585 | 9,693 | py | Python | main-hs2.py | tradewartracker/phase-one-product-hs2 | 38dd328a8211695c31f09a34832535dc2c82a5c2 | [
"MIT"
] | null | null | null | main-hs2.py | tradewartracker/phase-one-product-hs2 | 38dd328a8211695c31f09a34832535dc2c82a5c2 | [
"MIT"
] | null | null | null | main-hs2.py | tradewartracker/phase-one-product-hs2 | 38dd328a8211695c31f09a34832535dc2c82a5c2 | [
"MIT"
] | null | null | null | import datetime as dt
from os.path import dirname, join
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
from bokeh.io import curdoc
from bokeh.layouts import column, gridplot, row
from bokeh.models import ColumnDataSource, DataRange1d, Select, HoverTool, Panel, Tabs, LinearColorMapper, Range1d
from bokeh.models import NumeralTickFormatter, Title, Label, Paragraph, Div, CustomJSHover, BoxAnnotation
from bokeh.models import ColorBar
from bokeh.palettes import brewer, Spectral6
from bokeh.plotting import figure
from bokeh.embed import server_document
from bokeh.transform import factor_cmap
#################################################################################
# This just loads in the data...
# Alot of this was built of this "cross-fire demo"
# https://github.com/bokeh/bokeh/blob/branch-2.3/examples/app/crossfilter/main.py
start_date = dt.datetime(2017,7,1)
end_date = dt.datetime(2022,1,1)
background = "#ffffff"
file = "./data"+ "/data.parquet"
df = pq.read_table(file).to_pandas()
df.sort_index(inplace=True)
options = df.index.unique(0).to_list()
#print(options)
product = "HS CODE 72, IRON AND STEEL"
level = "US Dollars"
#################################################################################
#These are functions used in the plot...
#################################################################################
# Then this makes the simple plots:
# This part is still not clear to me. but it tells it what to update and where to put it
# so it updates the layout and [0] is the first option (see below there is a row with the
# first entry the plot, then the controls)
level_select = Select(value=level, title='Tranformations', options=['US Dollars', 'Year over Year % Change', "Cumulative Purchases 2020 vs 2017"])
level_select.on_change('value', update_plot)
#print(sorted(options))
product_select = Select(value=product, title='Product', options=sorted(options), width=400)
# This is the key thing that creates teh selection object
product_select.on_change('value', update_plot)
# Change the value upone selection via the update plot
div0 = Div(text = """Categories are at both the HS2 and HS4 level. Only Phase One covered products as defined in Annex 6-1 of The Agreement within that HS Code are shown. Red marks the period of Section 301 tariffs and retaliation. Blue is period of agreement.\n
\n
\n
""", width=400, background = background, style={"justify-content": "space-between", "display": "flex"} )
div1 = Div(text = """Transformations: US Dollars, year over year growth rate and cumulative purchases in 2017 vs 2020.\n The later transformation cumulates Chinese purchases over each month in 2017 and 2020 and compares each. Because 2017 is the benchmark year for The Agreement, this measure provides a sense, for each product category, China's progress towards meeting their purchase commitments.\n
""", width=400, background = background, style={"justify-content": "space-between", "display": "flex"} )
controls = column(product_select, div0, level_select, div1)
height = int(1.95*533)
width = int(1.95*675)
layout = row(make_plot(), controls, sizing_mode = "scale_height", max_height = height, max_width = width,
min_height = int(0.25*height), min_width = int(0.25*width))
curdoc().add_root(layout)
curdoc().title = "us-china-products"
| 37.280769 | 400 | 0.613123 |
1cd3b57ef203189fa0937ba41bdb1a37dbdad462 | 2,223 | py | Python | aiohttp_middlewares/https.py | alxpy/aiohttp-middlewares | 377740d21cdaf3142523eb81b0cee4c6dd01f6b5 | [
"BSD-3-Clause"
] | 34 | 2017-05-14T11:31:41.000Z | 2022-03-24T06:07:31.000Z | aiohttp_middlewares/https.py | alxpy/aiohttp-middlewares | 377740d21cdaf3142523eb81b0cee4c6dd01f6b5 | [
"BSD-3-Clause"
] | 77 | 2017-10-20T19:40:59.000Z | 2022-03-01T05:07:36.000Z | aiohttp_middlewares/https.py | alxpy/aiohttp-middlewares | 377740d21cdaf3142523eb81b0cee4c6dd01f6b5 | [
"BSD-3-Clause"
] | 2 | 2019-11-06T12:45:33.000Z | 2021-11-24T14:55:28.000Z | """
================
HTTPS Middleware
================
Change scheme for current request when aiohttp application deployed behind
reverse proxy with HTTPS enabled.
Usage
=====
.. code-block:: python
from aiohttp import web
from aiohttp_middlewares import https_middleware
# Basic usage
app = web.Application(middlewares=[https_middleware()])
# Specify custom headers to match, not `X-Forwarded-Proto: https`
app = web.Application(
middlewares=https_middleware({"Forwarded": "https"})
)
"""
import logging
from aiohttp import web
from aiohttp.web_middlewares import _Handler, _Middleware
from .annotations import DictStrStr
DEFAULT_MATCH_HEADERS = {"X-Forwarded-Proto": "https"}
logger = logging.getLogger(__name__)
def https_middleware(match_headers: DictStrStr = None) -> _Middleware:
"""
Change scheme for current request when aiohttp application deployed behind
reverse proxy with HTTPS enabled.
This middleware is required to use, when your aiohttp app deployed behind
nginx with HTTPS enabled, after aiohttp discounted
``secure_proxy_ssl_header`` keyword argument in
https://github.com/aio-libs/aiohttp/pull/2299.
:param match_headers:
Dict of header(s) from reverse proxy to specify that aiohttp run behind
HTTPS. By default:
.. code-block:: python
{"X-Forwarded-Proto": "https"}
"""
return middleware
| 25.848837 | 79 | 0.645524 |
1cd41a80f04199f3be841ce38a8ac4428c343606 | 6,620 | py | Python | show/drawing.py | nohamanona/poke-auto-fuka | 9d355694efa0168738795afb403fc89264dcaeae | [
"Apache-2.0"
] | 5 | 2019-12-31T18:38:52.000Z | 2021-01-07T08:57:17.000Z | show/drawing.py | nohamanona/poke-auto-fuka | 9d355694efa0168738795afb403fc89264dcaeae | [
"Apache-2.0"
] | null | null | null | show/drawing.py | nohamanona/poke-auto-fuka | 9d355694efa0168738795afb403fc89264dcaeae | [
"Apache-2.0"
] | 1 | 2020-03-03T08:14:47.000Z | 2020-03-03T08:14:47.000Z | import cv2
import numpy as np
| 51.317829 | 109 | 0.540483 |
1cd5217ab9022ac6fb992de8575b10b6f886806f | 1,452 | py | Python | backtest.py | YangTaoCN/IntroNeuralNetworks | 45b0311f85c9cdd9d3f0806e0059201e2655697f | [
"MIT"
] | null | null | null | backtest.py | YangTaoCN/IntroNeuralNetworks | 45b0311f85c9cdd9d3f0806e0059201e2655697f | [
"MIT"
] | null | null | null | backtest.py | YangTaoCN/IntroNeuralNetworks | 45b0311f85c9cdd9d3f0806e0059201e2655697f | [
"MIT"
] | null | null | null | import pandas_datareader.data as pdr
import yfinance as fix
import numpy as np
fix.pdr_override()
def back_test(strategy, seq_len, ticker, start_date, end_date, dim):
"""
A simple back test for a given date period
:param strategy: the chosen strategy. Note to have already formed the model, and fitted with training data.
:param seq_len: length of the days used for prediction
:param ticker: company ticker
:param start_date: starting date
:type start_date: "YYYY-mm-dd"
:param end_date: ending date
:type end_date: "YYYY-mm-dd"
:param dim: dimension required for strategy: 3dim for LSTM and 2dim for MLP
:type dim: tuple
:return: Percentage errors array that gives the errors for every test in the given date range
"""
data = pdr.get_data_yahoo(ticker, start_date, end_date)
stock_data = data["Adj Close"]
errors = []
for i in range((len(stock_data) // 10) * 10 - seq_len - 1):
x = np.array(stock_data.iloc[i: i + seq_len, 1]).reshape(dim) / 200
y = np.array(stock_data.iloc[i + seq_len + 1, 1]) / 200
predict = strategy.predict(x)
while predict == 0:
predict = strategy.predict(x)
error = (predict - y) / 100
errors.append(error)
total_error = np.array(errors)
print(f"Average error = {total_error.mean()}")
# If you want to see the full error list then print the following statement
# print(errors)
| 40.333333 | 111 | 0.669421 |
1cd64e7eef2ac9aae41c0784aa1ab81588c6d2ef | 2,278 | py | Python | src/tespy/components/subsystems.py | jbueck/tespy | dd7a2633ce12f33b4936ae902f4fe5df29191690 | [
"MIT"
] | null | null | null | src/tespy/components/subsystems.py | jbueck/tespy | dd7a2633ce12f33b4936ae902f4fe5df29191690 | [
"MIT"
] | null | null | null | src/tespy/components/subsystems.py | jbueck/tespy | dd7a2633ce12f33b4936ae902f4fe5df29191690 | [
"MIT"
] | null | null | null | # -*- coding: utf-8
"""Module for custom component groups.
It is possible to create subsystems of component groups in tespy. The subsystem
class is the base class for custom subsystems.
This file is part of project TESPy (github.com/oemof/tespy). It's copyrighted
by the contributors recorded in the version control history of the file,
available from its original location tespy/components/subsystems.py
SPDX-License-Identifier: MIT
"""
import logging
# %%
| 25.311111 | 79 | 0.579017 |
1cd7fdf07b75be54fc81ee90365afd1023ab4167 | 7,940 | py | Python | fairscale/optim/oss.py | blefaudeux/fairscale | aa5850107a37c7d5644b6079516e7ae1079ff5e8 | [
"BSD-3-Clause"
] | 1 | 2020-07-23T22:30:36.000Z | 2020-07-23T22:30:36.000Z | fairscale/optim/oss.py | blefaudeux/fairscale | aa5850107a37c7d5644b6079516e7ae1079ff5e8 | [
"BSD-3-Clause"
] | null | null | null | fairscale/optim/oss.py | blefaudeux/fairscale | aa5850107a37c7d5644b6079516e7ae1079ff5e8 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import copy
import logging
from typing import TYPE_CHECKING, Any, Callable, List, Optional, Type
import torch
import torch.distributed as dist
from torch.optim import SGD, Optimizer
from .utils import broadcast_object, recursive_copy_to_device
if TYPE_CHECKING:
from torch.optim.optimizer import _params_t
else:
_params_t = Any
| 37.990431 | 116 | 0.614987 |
1cd87eec694df1d23bb94dc59dabf36d48ef6f7d | 445 | py | Python | setup.py | ninezerozeronine/raytracing-one-weekend | 22ca36dcec679cbd78a7711734ca22e01ef06ef2 | [
"MIT"
] | null | null | null | setup.py | ninezerozeronine/raytracing-one-weekend | 22ca36dcec679cbd78a7711734ca22e01ef06ef2 | [
"MIT"
] | null | null | null | setup.py | ninezerozeronine/raytracing-one-weekend | 22ca36dcec679cbd78a7711734ca22e01ef06ef2 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
setup(
name="raytracing-one-weekend",
version="0.0.0",
author="Andy Palmer",
author_email="contactninezerozeronine@gmail.com",
description="A raytracer achievable in a weekend.",
url="https://github.com/ninezerozeronine/raytracing-one-weekend",
install_requires=[
"Pillow",
"numpy",
],
packages=find_packages('src'),
package_dir={'': 'src'},
)
| 26.176471 | 69 | 0.662921 |
1cd93bcec91ffc966a787c6dda07671b2cad8b23 | 603 | py | Python | homepage/urls.py | r0kym/SNI-backend | 5fdc25df21846fadb313d439acba73782a6248c3 | [
"MIT"
] | 1 | 2021-06-03T22:07:24.000Z | 2021-06-03T22:07:24.000Z | homepage/urls.py | r0kym/SNI-backend | 5fdc25df21846fadb313d439acba73782a6248c3 | [
"MIT"
] | 1 | 2020-07-19T11:10:22.000Z | 2020-07-19T11:10:22.000Z | homepage/urls.py | r0kym/SNI-backend | 5fdc25df21846fadb313d439acba73782a6248c3 | [
"MIT"
] | 2 | 2020-07-02T12:05:03.000Z | 2020-07-02T18:34:39.000Z | """
URLconf of the homepage
"""
from django.urls import path, include
from . import views
urlpatterns = [
path('', views.home, name='home'),
path('auth', views.auth, name='auth'),
path('auth/public', views.auth_public, name='auth-public'),
path('auth/full', views.auth_full, name='auth-full'),
path('auth/invite', views.auth_invite, name='auth-invite'),
path('callback/sni', views.sni_callback, name='sni_callback'),
path('logout', views.logout, name='logout'),
path('403', views.no_perm, name='no-permission'),
path('404', views.not_found, name='not-found'),
]
| 27.409091 | 66 | 0.656716 |
1cd940fc315fde5b1737f292edb3bdacd8fa4aa7 | 3,058 | py | Python | srcflib/email/__init__.py | mas90/srcf-python | 09ce45c65d2ddbec2cdfc559a7b5983398dbdfa0 | [
"MIT"
] | null | null | null | srcflib/email/__init__.py | mas90/srcf-python | 09ce45c65d2ddbec2cdfc559a7b5983398dbdfa0 | [
"MIT"
] | null | null | null | srcflib/email/__init__.py | mas90/srcf-python | 09ce45c65d2ddbec2cdfc559a7b5983398dbdfa0 | [
"MIT"
] | null | null | null | """
Notification email machinery, for tasks to send credentials and instructions to users.
Email templates placed inside the `templates` directory of this module should:
- extend from `layout`
- provide `subject` and `body` blocks
"""
from enum import Enum
import os.path
from jinja2 import Environment, FileSystemLoader
from sqlalchemy.orm import Session as SQLASession
from srcf.database import Member, Society
from srcf.mail import send_mail
from ..plumbing import Owner, owner_desc, owner_name, owner_website
ENV = Environment(loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), "templates")),
trim_blocks=True, lstrip_blocks=True)
ENV.filters.update({"is_member": lambda mem: isinstance(mem, Member),
"is_society": lambda soc: isinstance(soc, Society),
"owner_name": owner_name,
"owner_desc": owner_desc,
"owner_website": owner_website})
CURRENT_WRAPPER = None
DEFAULT_WRAPPER = EmailWrapper(subject="[SRCF] {}")
def send(target: Owner, template: str, context: dict = None, session: SQLASession = None):
"""
Render and send an email to the target member or society.
"""
wrapper = CURRENT_WRAPPER or DEFAULT_WRAPPER
subject = wrapper.render(template, Layout.SUBJECT, target, context)
body = wrapper.render(template, Layout.BODY, target, context)
recipient = (owner_desc(target, True), target.email)
send_mail(recipient, subject, body, copy_sysadmins=False, session=session)
| 30.888889 | 96 | 0.657292 |
1cd9be86a01ac85db863f60ec2922ba01db45a75 | 348 | py | Python | nose2_example/my_package/myapp.py | dolfandringa/PythonProjectStructureDemo | 8bdd72b94d3b830e9e9dce548cca1cdb16601d0d | [
"CC-BY-4.0"
] | 2 | 2017-02-03T00:15:27.000Z | 2017-02-03T02:26:25.000Z | nose2_example/my_package/myapp.py | dolfandringa/unittesting_example | 8bdd72b94d3b830e9e9dce548cca1cdb16601d0d | [
"CC-BY-4.0"
] | null | null | null | nose2_example/my_package/myapp.py | dolfandringa/unittesting_example | 8bdd72b94d3b830e9e9dce548cca1cdb16601d0d | [
"CC-BY-4.0"
] | null | null | null | from .operations import Multiply, Add, Substract
| 29 | 70 | 0.594828 |
1cd9cb84780ce4068a648d1e9469d9570121c655 | 5,852 | py | Python | src/train_nn.py | anirudhbhashyam/911-Calls-Seattle-Predictions | 8c975ab6c6a85d514ad74388778e1b635ed3e63d | [
"MIT"
] | null | null | null | src/train_nn.py | anirudhbhashyam/911-Calls-Seattle-Predictions | 8c975ab6c6a85d514ad74388778e1b635ed3e63d | [
"MIT"
] | null | null | null | src/train_nn.py | anirudhbhashyam/911-Calls-Seattle-Predictions | 8c975ab6c6a85d514ad74388778e1b635ed3e63d | [
"MIT"
] | null | null | null | import os
from typing import Union
import tensorflow as tf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, KFold
import utility as ut
from variables import *
# Read the data.
train_data = pd.read_csv(os.path.join(DATA_PATH, ".".join([DATA_TRAIN, DATA_EXT])), header = 0)
# Get the labels.
Y = train_data.pop(LABEL)
sample_weights = np.ones(Y.shape[0])
for i in range(10, 24):
sample_weights[train_data["_".join(("hour", str(i)))] == 1] = 1.5
# -- For classification -- #
# CLASSES = np.unique(Y)
# N_CLASSES = len(CLASSES)
# Y = Y.replace(dict(zip(CLASSES, range(0, len(CLASSES)))))
# Data shape parameters.
N_FEATURES = train_data.shape[1]
N_SAMPLES = train_data.shape[0]
# Split the training data.
X_train, X_val, Y_train, Y_val = train_test_split(train_data, Y, shuffle = True, random_state = 7919)
def build_and_compile(input_: tuple = (WB_SIZE, N_FEATURES),
loss_func: str = "mae") -> tf.keras.Model:
"""
Build and compile a TensorFLow LSTM network.
Parameters
----------
input_ :
Shape of the trainining data. Should specify
`(batch_size` or `window_size, n_features)`
loss_func :
Loss function to use for training.
Returns
-------
`tf.keras.Model` :
A compiled TensorFlow model.
"""
# Seqential keras model.
model = tf.keras.models.Sequential([
tf.keras.layers.LSTM(50, input_shape = input_, return_sequences = True),
tf.keras.layers.LSTM(50, return_sequences = False),
tf.keras.layers.GaussianNoise(1.0),
tf.keras.layers.Dense(1024, activation = "relu"),
tf.keras.layers.Dropout(0.7),
tf.keras.layers.Dense(128, activation = "relu"),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(64, activation = "relu"),
tf.keras.layers.GaussianNoise(0.2),
# tf.keras.layers.Dense(32, activation = "relu"),
# tf.keras.layers.GaussianNoise(0.7),
tf.keras.layers.Dense(1, activation = "relu")
])
# Compile the model.
model.compile(
loss = loss_func,
optimizer = "adam"
)
return model
def train(model: tf.keras.Model,
train_data: np.ndarray,
train_labels: np.ndarray,
val_data: np.ndarray,
val_labels: np.ndarray,
epochs: int = 200,
sample_weights: np.array = None,
cross_val = False) -> pd.DataFrame:
"""
Trains the TensorFlow `model`.
Parameters
----------
model :
A TensorFlow compiled model.
train_data :
The data to be trained. Shape must be consistent with what is passed during model compilation.
train_labels :
The ground truth predictions.
val_data :
The data to be used as validation.
val_labels :
The ground truth validation predictions.
epochs :
Total number of epochs to train.
sample_weights :
Weights for `train_data` to use during training.
Returns
-------
pd.DataFrame:
Training information.
"""
# Check for overfitting.
early_stopping = tf.keras.callbacks.EarlyStopping(
monitor = "val_loss",
min_delta = 0.001,
patience = 100,
restore_best_weights = False)
history = model.fit(
train_data.reshape(-1, WB_SIZE, N_FEATURES),
train_labels,
sample_weight = sample_weights,
validation_data = (val_data.reshape(-1, WB_SIZE, N_FEATURES), val_labels),
verbose = 1,
epochs = epochs,
callbacks = early_stopping)
return pd.DataFrame(history.history)
# def cross_validate(train_data: pd.DataFrame,
# train_labels: pd.DataFrame,
# epochs: int = 50,
# sample_weights: np.array = None,
# folds: int = 2) -> pd.DataFrame:
# splits = KFold(n_splits = folds, shuffle = True)
# print("Starting cross validation.")
# accuracy = list()
# val_loss = list()
# models = list()
# for i, (train_index, test_index) in enumerate(splits.split(train_data, train_labels)):
# print(f"Iteration {i}\n")
# X_train, X_val, Y_train, Y_val = train_data[train_index], train_data[test_index], train_data[train_index], train_labels[test_index]
# model = build_and_compile((WB_SIZE, N_FEATURES), "mae")
# history_df = train(model, X_train, Y_train, epochs)
# # train_stats(history_df, i)
# scores = model.evaluate(X_val.reshape(-1, WB_SIZE, N_FEATURES), Y_val)
# print(f"Validation loss: {scores}\n")
# #of {scores[0]} {model.metrics_names[1]} of {scores[1] * 100:.2f}%")
# # accuracy.append(scores[1] * 100)
# val_loss.append(scores)
# models.append(model)
# return models[np.argmin(val_loss)]
def train_stats(history_df: pd.DataFrame, it: int = None) -> None:
"""
Produces training statistics once training has run its course.
Parameters
----------
history_df :
The history as returned by Keras `fit` method.
it :
To be used with cross validation. Specifies the name of the learning curve based on the cross validation itertation `it`.
Returns
-------
`None`
"""
# Learning curve.
plt.rcParams["figure.dpi"] = 160
history_df.loc[:, ["loss", "val_loss"]].plot()
plt.title("Model Loss")
plt.ylabel("Loss")
plt.xlabel("Epoch")
name = TRAIN_FIG_SAVE_NAME
if it is not None:
name = "_".join([name, str(it)])
plt.savefig(os.path.join(TRAIN_FIG_SAVE_PATH, ".".join([name, FIG_EXT])))
# Stats
print(f"Minimum validation loss: {history_df['val_loss'].min()}")
# plt.plot(f"Accuracy: {history_df['train_accuracy']}")
# plt.plot(f"Validation Accuracy: {history_df['val_accuracy']}")
return None
if __name__ == "__main__":
main()
| 27.866667 | 135 | 0.681647 |
1cd9fdc42b14ec8f2d6ab3af8d353bbdb853608c | 1,971 | py | Python | pdserver/objects.py | Gustavo6046/polydung | e8626c67b0f59e00a2400b5a5c644e3f6b925e00 | [
"MIT"
] | null | null | null | pdserver/objects.py | Gustavo6046/polydung | e8626c67b0f59e00a2400b5a5c644e3f6b925e00 | [
"MIT"
] | null | null | null | pdserver/objects.py | Gustavo6046/polydung | e8626c67b0f59e00a2400b5a5c644e3f6b925e00 | [
"MIT"
] | null | null | null | import base64
import random
import string
import netbyte
import numpy as np
try:
import simplejson as json
except ImportError:
import json
kinds = {} | 27.375 | 115 | 0.559614 |
1cdaebcf2a2178841183e0647850aae12465877f | 1,859 | py | Python | football/football_test.py | EEdwardsA/DS-OOP-Review | 2352866c5d0ea6a09802c29c17366450f35c75ae | [
"MIT"
] | null | null | null | football/football_test.py | EEdwardsA/DS-OOP-Review | 2352866c5d0ea6a09802c29c17366450f35c75ae | [
"MIT"
] | null | null | null | football/football_test.py | EEdwardsA/DS-OOP-Review | 2352866c5d0ea6a09802c29c17366450f35c75ae | [
"MIT"
] | null | null | null | import unittest
from players import Player, Quarterback
from possible_values import *
from game import Game
from random import randint, uniform, sample
from season import *
# TODO - some things you can add...
if __name__ == '__main__':
unittest.main()
| 28.166667 | 64 | 0.652501 |
1cdc48fef2a5dcb4bffb7cadff760f5a6da8ed72 | 2,486 | py | Python | preprocessor/base.py | shayanthrn/AGAIN-VC | 41934f710d117d524b4a0bfdee7e9b845a56d422 | [
"MIT"
] | 3 | 2022-02-21T09:40:00.000Z | 2022-02-27T13:52:19.000Z | preprocessor/base.py | shayanthrn/AGAIN-VC | 41934f710d117d524b4a0bfdee7e9b845a56d422 | [
"MIT"
] | null | null | null | preprocessor/base.py | shayanthrn/AGAIN-VC | 41934f710d117d524b4a0bfdee7e9b845a56d422 | [
"MIT"
] | 1 | 2022-02-21T09:40:02.000Z | 2022-02-21T09:40:02.000Z | import os
import logging
import numpy as np
from tqdm import tqdm
from functools import partial
from multiprocessing.pool import ThreadPool
import pyworld as pw
from util.dsp import Dsp
logger = logging.getLogger(__name__)
| 37.666667 | 108 | 0.666935 |
1cdc6e1e4c787b21a5dbe8f394976972f434c199 | 3,025 | py | Python | divsum_stats.py | fjruizruano/SatIntExt | 90b39971ee6ea3d7cfa63fbb906df3df714a5012 | [
"MIT"
] | null | null | null | divsum_stats.py | fjruizruano/SatIntExt | 90b39971ee6ea3d7cfa63fbb906df3df714a5012 | [
"MIT"
] | null | null | null | divsum_stats.py | fjruizruano/SatIntExt | 90b39971ee6ea3d7cfa63fbb906df3df714a5012 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import sys
from subprocess import call
print "divsum_count.py ListOfDivsumFiles\n"
try:
files = sys.argv[1]
except:
files = raw_input("Introduce RepeatMasker's list of Divsum files with library size (tab separated): ")
files = open(files).readlines()
to_join = []
header = "Coverage for each repeat class and divergence (Kimura)\n"
results = {}
for line in files:
line = line.split("\t")
file = line[0]
size = int(line[1])
data = open(file).readlines()
matrix_start = data.index(header)
matrix = data[matrix_start+1:]
li= []
names_line = matrix[0]
info = names_line.split()
for fam in info:
li.append([fam])
info_len = len(li)
for line in matrix[1:]:
info = line.split()
for i in range(0,info_len):
li[i].append(info[i])
out = open(file+".counts","w")
out.write("Sequence\tAbundance\n")
stats = open(file+".stats","w")
stats.write("Sequence\tDivergence\tTotalAbundance\tMaxAbundance\tMaxPeak\tRPS\tDIVPEAK\n")
for el in li[1:]:
numbers = el[1:]
numbers = [int(x) for x in numbers]
numbers_prop = [1.0*x/size for x in numbers]
prop_dict = {}
prop_li = []
for prop in range(0,len(numbers_prop)):
prop_dict[prop] = numbers_prop[prop]
prop_li.append(numbers_prop[prop])
prop_dict_sorted = sorted(prop_dict.items(), key=lambda x: x[1], reverse=True)
total = sum(numbers_prop)
top = prop_dict_sorted[0]
top_div = top[0]
top_ab = top[1]
peak = []
if top_div >= 2:
for div in range(top_div-2,top_div+3):
peak.append(prop_dict[div])
else:
for div in range(0,5):
peak.append(prop_dict[div])
sum_peak = sum(peak)
rps = sum_peak/total
divpeak = top_div
out.write(el[0]+"\t"+str(sum(numbers))+"\n")
all_divs = []
for d in li[0][1:]:
all_divs.append(int(d)+0.5)
div_sumproduct = 0
for x,y in zip(all_divs,prop_li):
div_sumproduct += x * y
divergence = div_sumproduct/total
data = "%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (el[0],str(divergence),str(total),str(top_ab),str(sum_peak),str(rps),str(divpeak))
stats.write(data)
data2 = "%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (file, str(divergence),str(total),str(top_ab),str(sum_peak),str(rps),str(divpeak))
if el[0] in results:
results[el[0]].append(data2)
else:
results[el[0]] = [data2]
out.close()
stats.close()
to_join.append(file+".counts")
out = open("results.txt", "w")
for el in sorted(results):
info = results[el]
out.write("%s\tDivergence\tTotalAbundance\tMaxAbundance\tMaxPeak\tRPS\tDIVPEAK\n" % (el))
for i in info:
out.write(i)
out.write("\n\n\n")
out.close()
call("join_multiple_lists.py %s" % (" ".join(to_join)), shell=True)
| 27.752294 | 131 | 0.57686 |
1cdc98744b311e2367992861b764dff14f24294c | 201 | py | Python | agatecharts/charts/__init__.py | onyxfish/fever | 8aef0cd4adff7fdde1f5950ffb1d01db9137e3b7 | [
"MIT"
] | 4 | 2015-09-05T04:47:27.000Z | 2015-09-16T15:14:43.000Z | agatecharts/charts/__init__.py | onyxfish/fever | 8aef0cd4adff7fdde1f5950ffb1d01db9137e3b7 | [
"MIT"
] | 18 | 2015-09-05T01:17:30.000Z | 2015-09-23T13:08:27.000Z | agatecharts/charts/__init__.py | onyxfish/way | 8aef0cd4adff7fdde1f5950ffb1d01db9137e3b7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from agatecharts.charts.bars import Bars
from agatecharts.charts.columns import Columns
from agatecharts.charts.lines import Lines
from agatecharts.charts.scatter import Scatter
| 28.714286 | 46 | 0.840796 |
1cdce77473b836e98d4d4044b2d6d581603e5972 | 1,930 | py | Python | users/views.py | rossm6/accounts | 74633ce4038806222048d85ef9dfe97a957a6a71 | [
"MIT"
] | 11 | 2021-01-23T01:09:54.000Z | 2021-01-25T07:16:30.000Z | users/views.py | rossm6/accounts | 74633ce4038806222048d85ef9dfe97a957a6a71 | [
"MIT"
] | 7 | 2021-04-06T18:19:10.000Z | 2021-09-22T19:45:03.000Z | users/views.py | rossm6/accounts | 74633ce4038806222048d85ef9dfe97a957a6a71 | [
"MIT"
] | 3 | 2021-01-23T18:55:32.000Z | 2021-02-16T17:47:59.000Z | from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.models import User
from django.contrib.auth.views import (LoginView, PasswordResetConfirmView,
PasswordResetView)
from django.http import HttpResponse, HttpResponseNotAllowed
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views.generic import CreateView, DeleteView, UpdateView
from users.forms import (SignInForm, SignUpForm, UserPasswordResetForm,
UserProfileForm, UserSetPasswordForm)
from users.mixins import LockDuringEditMixin
from users.models import Lock, UserSession
def unlock(request, pk):
if request.method == "POST":
lock = Lock.objects.filter(pk=pk).delete()
return HttpResponse('')
return HttpResponseNotAllowed(["POST"])
| 33.275862 | 103 | 0.748705 |
1cde121b7cc2a3e5e4fa33ad8b2f5852ba028e54 | 2,970 | py | Python | test/core/s3_table_test_base.py | adidas/m3d-api | 755d676452e4b10075fa65f9acfdbf30a6ee828e | [
"Apache-2.0"
] | 24 | 2019-09-26T13:15:14.000Z | 2021-11-10T11:10:04.000Z | test/core/s3_table_test_base.py | adidas/m3d-api | 755d676452e4b10075fa65f9acfdbf30a6ee828e | [
"Apache-2.0"
] | null | null | null | test/core/s3_table_test_base.py | adidas/m3d-api | 755d676452e4b10075fa65f9acfdbf30a6ee828e | [
"Apache-2.0"
] | 11 | 2019-09-26T13:27:10.000Z | 2020-11-04T03:13:20.000Z | import os
from test.core.emr_system_unit_test_base import EMRSystemUnitTestBase
from test.core.tconx_helper import TconxHelper
| 37.594937 | 108 | 0.662626 |
1cdeb5c9ff7c16810e652dbe520cbde408b27771 | 939 | py | Python | metrics/serializers.py | BrianWaganerSTL/RocketDBaaS | d924589188411371842513060a5e08b1be3cdccf | [
"MIT"
] | 1 | 2018-11-04T09:36:35.000Z | 2018-11-04T09:36:35.000Z | metrics/serializers.py | BrianWaganerSTL/RocketDBaaS_api | d924589188411371842513060a5e08b1be3cdccf | [
"MIT"
] | null | null | null | metrics/serializers.py | BrianWaganerSTL/RocketDBaaS_api | d924589188411371842513060a5e08b1be3cdccf | [
"MIT"
] | null | null | null | from rest_framework import serializers
from metrics.models import Metrics_Cpu, Metrics_PingServer, Metrics_MountPoint, \
Metrics_CpuLoad, Metrics_PingDb
| 27.617647 | 81 | 0.698616 |
1ce01d2d1af3efb76606596d816ab61448b4bddc | 2,911 | bzl | Python | sqlc/private/sqlc_toolchain.bzl | dmayle/rules_sqlc | c465542827a086994e9427e2c792bbc4355c3e70 | [
"Apache-2.0"
] | 2 | 2020-12-09T16:01:14.000Z | 2021-02-15T09:24:27.000Z | sqlc/private/sqlc_toolchain.bzl | dmayle/rules_sqlc | c465542827a086994e9427e2c792bbc4355c3e70 | [
"Apache-2.0"
] | 2 | 2020-12-08T16:46:25.000Z | 2020-12-09T16:17:55.000Z | sqlc/private/sqlc_toolchain.bzl | dmayle/rules_sqlc | c465542827a086994e9427e2c792bbc4355c3e70 | [
"Apache-2.0"
] | 3 | 2021-07-28T20:39:10.000Z | 2022-01-26T19:33:28.000Z | # Copyright 2020 Plezentek, Inc. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load(
"//sqlc/private:providers.bzl",
"SQLCRelease",
)
load(
"//sqlc/private/rules_go/lib:platforms.bzl",
"PLATFORMS",
)
sqlc_toolchain = rule(
_sqlc_toolchain_impl,
attrs = {
"goos": attr.string(
mandatory = True,
doc = "Default target OS",
),
"goarch": attr.string(
mandatory = True,
doc = "Default target architecture",
),
"release": attr.label(
mandatory = True,
providers = [SQLCRelease],
cfg = "exec",
doc = "The SQLC release this toolchain is based on",
),
},
doc = "Defines a SQLC toolchain based on a release",
provides = [platform_common.ToolchainInfo],
)
| 32.707865 | 86 | 0.61113 |
1ce0a4f2656bca31c4698766977c076b08c6dfcd | 4,041 | py | Python | configs/tracker_configs/new_test_20e_cam_1_new_short.py | nolanzzz/mtmct | 8bbbc7ff2fa53ab8af424feaac3cf7424b87fff0 | [
"MIT"
] | 17 | 2021-09-01T23:13:14.000Z | 2022-03-28T11:12:37.000Z | configs/tracker_configs/new_test_20e_cam_1_new_short.py | nolanzzz/MTMCT | 8bbbc7ff2fa53ab8af424feaac3cf7424b87fff0 | [
"MIT"
] | 4 | 2022-01-21T05:47:09.000Z | 2022-03-31T04:44:01.000Z | configs/tracker_configs/new_test_20e_cam_1_new_short.py | nolanzzz/MTMCT | 8bbbc7ff2fa53ab8af424feaac3cf7424b87fff0 | [
"MIT"
] | 6 | 2021-12-16T02:08:43.000Z | 2022-03-09T06:18:32.000Z | root = {
"general" : {
"display_viewer" : False,
#The visible GPUS will be restricted to the numbers listed here. The pytorch (cuda:0) numeration will start at 0
#This is a trick to get everything onto the wanted gpus because just setting cuda:4 in the function calls will
#not work for mmdetection. There will still be things on gpu cuda:0.
"cuda_visible_devices" : "1",
"save_track_results" : True
},
"data" : {
# To increase the speed while developing an specific interval of all frames can be set.
"selection_interval" : [0,10000],
"source" : {
"base_folder" : "/u40/zhanr110/MTA_ext_short/test",
# "base_folder" : "/Users/nolanzhang/Projects/mtmct/data/MTA_ext_short/test",
"cam_ids" : [1]
}
},
"detector" : {
# "mmdetection_config" : "detectors/mmdetection/configs/faster_rcnn_r50_fpn_1x_gta.py",
"mmdetection_config" : "detectors/mmdetection/configs/mta/faster_rcnn_r50_mta.py",
# "mmdetection_checkpoint_file" : "work_dirs/detector/faster_rcnn_gta22.07_epoch_5.pth",
"mmdetection_checkpoint_file" : "detectors/mmdetection/work_dirs/GtaDataset_30e/epoch_20.pth",
"device" : "cuda:0",
#Remove all detections with a confidence less than min_confidence
"min_confidence" : 0.8,
},
"feature_extractor" : {
"feature_extractor_name" : "abd_net_extractor"
,"reid_strong_extractor": {
"reid_strong_baseline_config": "feature_extractors/reid_strong_baseline/configs/softmax_triplet.yml",
"checkpoint_file": "work_dirs/feature_extractor/strong_reid_baseline/resnet50_model_reid_GTA_softmax_triplet.pth",
"device": "cuda:0,1"
,"visible_device" : "0,1"}
,"abd_net_extractor" : dict(abd_dan=['cam', 'pam'], abd_dan_no_head=False, abd_dim=1024, abd_np=2, adam_beta1=0.9,
adam_beta2=0.999, arch='resnet50', branches=['global', 'abd'], compatibility=False, criterion='htri',
cuhk03_classic_split=False, cuhk03_labeled=False, dan_dan=[], dan_dan_no_head=False, dan_dim=1024,
data_augment=['crop,random-erase'], day_only=False, dropout=0.5, eval_freq=5, evaluate=False,
fixbase=False, fixbase_epoch=10, flip_eval=False, gamma=0.1, global_dim=1024,
global_max_pooling=False, gpu_devices='1', height=384, htri_only=False, label_smooth=True,
lambda_htri=0.1, lambda_xent=1, lr=0.0003, margin=1.2, max_epoch=80, min_height=-1,
momentum=0.9, night_only=False, np_dim=1024, np_max_pooling=False, np_np=2, np_with_global=False,
num_instances=4, of_beta=1e-06, of_position=['before', 'after', 'cam', 'pam', 'intermediate'],
of_start_epoch=23, open_layers=['classifier'], optim='adam', ow_beta=0.001,
pool_tracklet_features='avg', print_freq=10, resume='', rmsprop_alpha=0.99
, load_weights='work_dirs/feature_extractor/abd-net/checkpoint_ep30_non_clean.pth.tar'
# , load_weights='work_dirs/feature_extractor/abd-net/resnet50-19c8e357.pth'
, root='work_dirs/datasets'
, sample_method='evenly'
, save_dir='work_dirs/feature_extractor/abd-net/log/eval-resnet50'
, seed=1, seq_len=15,
sgd_dampening=0, sgd_nesterov=False, shallow_cam=True, source_names=['mta_ext'], split_id=0,
start_epoch=0, start_eval=0, stepsize=[20, 40], target_names=['market1501'],
test_batch_size=100, train_batch_size=64, train_sampler='', use_avai_gpus=False, use_cpu=False,
use_metric_cuhk03=False, use_of=True, use_ow=True, visualize_ranks=False, weight_decay=0.0005,
width=128, workers=4)
},
"tracker" : {
"type" : "DeepSort",
"nn_budget" : 100
}
}
| 48.686747 | 130 | 0.632022 |
1ce1d1ce742bc81665dc46ca2940199356484a9f | 1,010 | py | Python | tests/structures/test_generator.py | cherub96/voc | 2692d56059e4d4a52768270feaf5179b23609b04 | [
"BSD-3-Clause"
] | 1 | 2021-01-03T00:59:50.000Z | 2021-01-03T00:59:50.000Z | tests/structures/test_generator.py | cherub96/voc | 2692d56059e4d4a52768270feaf5179b23609b04 | [
"BSD-3-Clause"
] | null | null | null | tests/structures/test_generator.py | cherub96/voc | 2692d56059e4d4a52768270feaf5179b23609b04 | [
"BSD-3-Clause"
] | null | null | null | from ..utils import TranspileTestCase
| 28.055556 | 44 | 0.40099 |
1ce29cc9381fd7dde956750ac0935a544001e2ba | 22,057 | py | Python | ogusa/tax.py | hdoupe/OG-USA | f7e4d600b7a2993c7d1b53e23bfe29cfccaea770 | [
"CC0-1.0"
] | null | null | null | ogusa/tax.py | hdoupe/OG-USA | f7e4d600b7a2993c7d1b53e23bfe29cfccaea770 | [
"CC0-1.0"
] | 2 | 2020-09-02T22:58:36.000Z | 2020-09-03T19:29:46.000Z | ogusa/tax.py | prrathi/OG-USA | 2e5c116bb8656ab190a59e431a8d57415fe26b08 | [
"CC0-1.0"
] | null | null | null | '''
------------------------------------------------------------------------
Functions for taxes in the steady state and along the transition path.
------------------------------------------------------------------------
'''
# Packages
import numpy as np
from ogusa import utils
'''
------------------------------------------------------------------------
Functions
------------------------------------------------------------------------
'''
def replacement_rate_vals(nssmat, wss, factor_ss, j, p):
'''
Calculates replacement rate values for the social security system.
Args:
nssmat (Numpy array): initial guess at labor supply, size = SxJ
new_w (scalar): steady state real wage rate
factor_ss (scalar): scaling factor converting model units to
dollars
j (int): index of lifetime income group
p (OG-USA Specifications object): model parameters
Returns:
theta (Numpy array): social security replacement rate value for
lifetime income group j
'''
if j is not None:
e = p.e[:, j]
else:
e = p.e
# adjust number of calendar years AIME computed from int model periods
equiv_periods = int(round((p.S / 80.0) * p.AIME_num_years)) - 1
if e.ndim == 2:
dim2 = e.shape[1]
else:
dim2 = 1
earnings = (e * (wss * nssmat * factor_ss)).reshape(p.S, dim2)
# get highest earning years for number of years AIME computed from
highest_earn =\
(-1.0 * np.sort(-1.0 * earnings[:p.retire[-1], :],
axis=0))[:equiv_periods]
AIME = highest_earn.sum(0) / ((12.0 * (p.S / 80.0)) * equiv_periods)
PIA = np.zeros(dim2)
# Compute level of replacement using AIME brackets and PIA rates
for j in range(dim2):
if AIME[j] < p.AIME_bkt_1:
PIA[j] = p.PIA_rate_bkt_1 * AIME[j]
elif AIME[j] < p.AIME_bkt_2:
PIA[j] = (p.PIA_rate_bkt_1 * p.AIME_bkt_1 +
p.PIA_rate_bkt_2 * (AIME[j] - p.AIME_bkt_1))
else:
PIA[j] = (p.PIA_rate_bkt_1 * p.AIME_bkt_1 +
p.PIA_rate_bkt_2 * (p.AIME_bkt_2 - p.AIME_bkt_1) +
p.PIA_rate_bkt_3 * (AIME[j] - p.AIME_bkt_2))
# Set the maximum monthly replacment rate from SS benefits tables
PIA[PIA > p.PIA_maxpayment] = p.PIA_maxpayment
if p.PIA_minpayment != 0.0:
PIA[PIA < p.PIA_minpayment] = p.PIA_minpayment
theta = (PIA * (12.0 * p.S / 80.0)) / (factor_ss * wss)
return theta
def ETR_wealth(b, h_wealth, m_wealth, p_wealth):
r'''
Calculates the effective tax rate on wealth.
.. math::
T_{j,s,t}^{w} = \frac{h^{w}p_{w}b_{j,s,t}}{h^{w}b_{j,s,t} + m^{w}}
Args:
b (Numpy array): savings
h_wealth (scalar): parameter of wealth tax function
p_wealth (scalar): parameter of wealth tax function
m_wealth (scalar): parameter of wealth tax function
Returns:
tau_w (Numpy array): effective tax rate on wealth, size = SxJ
'''
tau_w = (p_wealth * h_wealth * b) / (h_wealth * b + m_wealth)
return tau_w
def MTR_wealth(b, h_wealth, m_wealth, p_wealth):
r'''
Calculates the marginal tax rate on wealth from the wealth tax.
.. math::
\frac{\partial T_{j,s,t}^{w}}{\partial b_{j,s,t}} = \frac{h^{w}m^{w}p_{w}}{(b_{j,s,t}h^{w}m^{w})^{2}}
Args:
b (Numpy array): savings
h_wealth (scalar): parameter of wealth tax function
p_wealth (scalar): parameter of wealth tax function
m_wealth (scalar): parameter of wealth tax function
Returns:
tau_prime (Numpy array): marginal tax rate on wealth, size = SxJ
'''
tau_prime = ((b * h_wealth * m_wealth * p_wealth) /
((b * h_wealth + m_wealth) ** 2) +
ETR_wealth(b, h_wealth, m_wealth, p_wealth))
return tau_prime
def ETR_income(r, w, b, n, factor, e, etr_params, p):
'''
Calculates effective personal income tax rate.
Args:
r (array_like): real interest rate
w (array_like): real wage rate
b (Numpy array): savings
n (Numpy array): labor supply
factor (scalar): scaling factor converting model units to
dollars
e (Numpy array): effective labor units
etr_params (Numpy array): effective tax rate function parameters
p (OG-USA Specifications object): model parameters
Returns:
tau (Numpy array): effective tax rate on total income
'''
X = (w * e * n) * factor
Y = (r * b) * factor
X2 = X ** 2
Y2 = Y ** 2
income = X + Y
income2 = income ** 2
if p.tax_func_type == 'GS':
phi0 = np.squeeze(etr_params[..., 0])
phi1 = np.squeeze(etr_params[..., 1])
phi2 = np.squeeze(etr_params[..., 2])
tau = ((phi0 * (income - ((income ** -phi1) + phi2) **
(-1 / phi1))) / income)
elif p.tax_func_type == 'DEP_totalinc':
A = np.squeeze(etr_params[..., 0])
B = np.squeeze(etr_params[..., 1])
max_income = np.squeeze(etr_params[..., 4])
min_income = np.squeeze(etr_params[..., 5])
shift_income = np.squeeze(etr_params[..., 8])
shift = np.squeeze(etr_params[..., 10])
tau_income = (((max_income - min_income) *
(A * income2 + B * income) /
(A * income2 + B * income + 1)) + min_income)
tau = tau_income + shift_income + shift
else: # DEP or linear
A = np.squeeze(etr_params[..., 0])
B = np.squeeze(etr_params[..., 1])
C = np.squeeze(etr_params[..., 2])
D = np.squeeze(etr_params[..., 3])
max_x = np.squeeze(etr_params[..., 4])
min_x = np.squeeze(etr_params[..., 5])
max_y = np.squeeze(etr_params[..., 6])
min_y = np.squeeze(etr_params[..., 7])
shift_x = np.squeeze(etr_params[..., 8])
shift_y = np.squeeze(etr_params[..., 9])
shift = np.squeeze(etr_params[..., 10])
share = np.squeeze(etr_params[..., 11])
tau_x = ((max_x - min_x) * (A * X2 + B * X) /
(A * X2 + B * X + 1) + min_x)
tau_y = ((max_y - min_y) * (C * Y2 + D * Y) /
(C * Y2 + D * Y + 1) + min_y)
tau = (((tau_x + shift_x) ** share) *
((tau_y + shift_y) ** (1 - share))) + shift
return tau
def MTR_income(r, w, b, n, factor, mtr_capital, e, etr_params,
mtr_params, p):
r'''
Generates the marginal tax rate on labor income for households.
Args:
r (array_like): real interest rate
w (array_like): real wage rate
b (Numpy array): savings
n (Numpy array): labor supply
factor (scalar): scaling factor converting model units to
dollars
mtr_capital (bool): whether to compute the marginal tax rate on
capital income or labor income
e (Numpy array): effective labor units
etr_params (Numpy array): effective tax rate function parameters
p (OG-USA Specifications object): model parameters
Returns:
tau (Numpy array): marginal tax rate on income source
'''
X = (w * e * n) * factor
Y = (r * b) * factor
X2 = X ** 2
Y2 = Y ** 2
income = X + Y
income2 = income ** 2
if p.tax_func_type == 'GS':
if p.analytical_mtrs:
phi0 = np.squeeze(etr_params[..., 0])
phi1 = np.squeeze(etr_params[..., 1])
phi2 = np.squeeze(etr_params[..., 2])
else:
phi0 = np.squeeze(mtr_params[..., 0])
phi1 = np.squeeze(mtr_params[..., 1])
phi2 = np.squeeze(mtr_params[..., 2])
tau = (phi0*(1 - (income ** (-phi1 - 1) *
((income ** -phi1) + phi2) **
((-1 - phi1) / phi1))))
elif p.tax_func_type == 'DEP_totalinc':
if p.analytical_mtrs:
A = np.squeeze(etr_params[..., 0])
B = np.squeeze(etr_params[..., 1])
max_income = np.squeeze(etr_params[..., 4])
min_income = np.squeeze(etr_params[..., 5])
shift_income = np.squeeze(etr_params[..., 8])
shift = np.squeeze(etr_params[..., 10])
d_etr = ((max_income - min_income) * ((2 * A * income + B) /
((A * income2 + B * income + 1) ** 2)))
etr = (((max_income - min_income) *
((A * income2 + B * income) /
(A * income2 + B * income + 1)) + min_income) +
shift_income + shift)
tau = (d_etr * income) + (etr)
else:
A = np.squeeze(mtr_params[..., 0])
B = np.squeeze(mtr_params[..., 1])
max_income = np.squeeze(mtr_params[..., 4])
min_income = np.squeeze(mtr_params[..., 5])
shift_income = np.squeeze(mtr_params[..., 8])
shift = np.squeeze(mtr_params[..., 10])
tau_income = (((max_income - min_income) *
(A * income2 + B * income) /
(A * income2 + B * income + 1)) + min_income)
tau = tau_income + shift_income + shift
else: # DEP or linear
if p.analytical_mtrs:
A = np.squeeze(etr_params[..., 0])
B = np.squeeze(etr_params[..., 1])
C = np.squeeze(etr_params[..., 2])
D = np.squeeze(etr_params[..., 3])
max_x = np.squeeze(etr_params[..., 4])
min_x = np.squeeze(etr_params[..., 5])
max_y = np.squeeze(etr_params[..., 6])
min_y = np.squeeze(etr_params[..., 7])
shift_x = np.squeeze(etr_params[..., 8])
shift_y = np.squeeze(etr_params[..., 9])
shift = np.squeeze(etr_params[..., 10])
share = np.squeeze(etr_params[..., 11])
tau_x = ((max_x - min_x) * (A * X2 + B * X) /
(A * X2 + B * X + 1) + min_x)
tau_y = ((max_y - min_y) * (C * Y2 + D * Y) /
(C * Y2 + D * Y + 1) + min_y)
etr = (((tau_x + shift_x) ** share) *
((tau_y + shift_y) ** (1 - share))) + shift
if mtr_capital:
d_etr = ((1-share) * ((tau_y + shift_y) ** (-share)) *
(max_y - min_y) * ((2 * C * Y + D) /
((C * Y2 + D * Y + 1)
** 2)) *
((tau_x + shift_x) ** share))
tau = d_etr * income + etr
else:
d_etr = (share * ((tau_x + shift_x) ** (share - 1)) *
(max_x - min_x) * ((2 * A * X + B) /
((A * X2 + B * X + 1)
** 2)) *
((tau_y + shift_y) ** (1 - share)))
tau = d_etr * income + etr
else:
A = np.squeeze(mtr_params[..., 0])
B = np.squeeze(mtr_params[..., 1])
C = np.squeeze(mtr_params[..., 2])
D = np.squeeze(mtr_params[..., 3])
max_x = np.squeeze(mtr_params[..., 4])
min_x = np.squeeze(mtr_params[..., 5])
max_y = np.squeeze(mtr_params[..., 6])
min_y = np.squeeze(mtr_params[..., 7])
shift_x = np.squeeze(mtr_params[..., 8])
shift_y = np.squeeze(mtr_params[..., 9])
shift = np.squeeze(mtr_params[..., 10])
share = np.squeeze(mtr_params[..., 11])
tau_x = ((max_x - min_x) * (A * X2 + B * X) /
(A * X2 + B * X + 1) + min_x)
tau_y = ((max_y - min_y) * (C * Y2 + D * Y) /
(C * Y2 + D * Y + 1) + min_y)
tau = (((tau_x + shift_x) ** share) *
((tau_y + shift_y) ** (1 - share))) + shift
return tau
def get_biz_tax(w, Y, L, K, p, method):
r'''
Finds total business income tax revenue.
.. math::
R_{t}^{b} = \tau_{t}^{b}(Y_{t} - w_{t}L_{t}) - \tau_{t}^{b}\delta_{t}^{\tau}K_{t}^{\tau}
Args:
r (array_like): real interest rate
Y (array_like): aggregate output
L (array_like): aggregate labor demand
K (array_like): aggregate capital demand
Returns:
business_revenue (array_like): aggregate business tax revenue
'''
if method == 'SS':
delta_tau = p.delta_tau[-1]
tau_b = p.tau_b[-1]
else:
delta_tau = p.delta_tau[:p.T]
tau_b = p.tau_b[:p.T]
business_revenue = tau_b * (Y - w * L) - tau_b * delta_tau * K
return business_revenue
def net_taxes(r, w, b, n, bq, factor, tr, theta, t, j, shift, method,
e, etr_params, p):
'''
Calculate net taxes paid for each household.
Args:
r (array_like): real interest rate
w (array_like): real wage rate
b (Numpy array): savings
n (Numpy array): labor supply
bq (Numpy array): bequests received
factor (scalar): scaling factor converting model units to
dollars
tr (Numpy array): government transfers to the household
theta (Numpy array): social security replacement rate value for
lifetime income group j
t (int): time period
j (int): index of lifetime income group
shift (bool): whether computing for periods 0--s or 1--(s+1),
=True for 1--(s+1)
method (str): adjusts calculation dimensions based on 'SS' or
'TPI'
e (Numpy array): effective labor units
etr_params (Numpy array): effective tax rate function parameters
p (OG-USA Specifications object): model parameters
Returns:
net_tax (Numpy array): net taxes paid for each household
'''
T_I = income_tax_liab(r, w, b, n, factor, t, j, method, e, etr_params, p)
pension = pension_amount(w, n, theta, t, j, shift, method, e, p)
T_BQ = bequest_tax_liab(r, b, bq, t, j, method, p)
T_W = wealth_tax_liab(r, b, t, j, method, p)
net_tax = T_I - pension + T_BQ + T_W - tr
return net_tax
def income_tax_liab(r, w, b, n, factor, t, j, method, e, etr_params, p):
'''
Calculate income and payroll tax liability for each household
Args:
r (array_like): real interest rate
w (array_like): real wage rate
b (Numpy array): savings
n (Numpy array): labor supply
factor (scalar): scaling factor converting model units to
dollars
t (int): time period
j (int): index of lifetime income group
method (str): adjusts calculation dimensions based on 'SS' or
'TPI'
e (Numpy array): effective labor units
etr_params (Numpy array): effective tax rate function parameters
p (OG-USA Specifications object): model parameters
Returns:
T_I (Numpy array): total income and payroll taxes paid for each
household
'''
if j is not None:
if method == 'TPI':
if b.ndim == 2:
r = r.reshape(r.shape[0], 1)
w = w.reshape(w.shape[0], 1)
else:
if method == 'TPI':
r = utils.to_timepath_shape(r)
w = utils.to_timepath_shape(w)
income = r * b + w * e * n
labor_income = w * e * n
T_I = ETR_income(r, w, b, n, factor, e, etr_params, p) * income
if method == 'SS':
T_P = p.tau_payroll[-1] * labor_income
elif method == 'TPI':
length = w.shape[0]
if len(b.shape) == 1:
T_P = p.tau_payroll[t: t + length] * labor_income
elif len(b.shape) == 2:
T_P = (p.tau_payroll[t: t + length].reshape(length, 1) *
labor_income)
else:
T_P = (p.tau_payroll[t:t + length].reshape(length, 1, 1) *
labor_income)
elif method == 'TPI_scalar':
T_P = p.tau_payroll[0] * labor_income
income_payroll_tax_liab = T_I + T_P
return income_payroll_tax_liab
def pension_amount(w, n, theta, t, j, shift, method, e, p):
'''
Calculate public pension benefit amounts for each household.
Args:
w (array_like): real wage rate
n (Numpy array): labor supply
theta (Numpy array): social security replacement rate value for
lifetime income group j
t (int): time period
j (int): index of lifetime income group
shift (bool): whether computing for periods 0--s or 1--(s+1),
=True for 1--(s+1)
method (str): adjusts calculation dimensions based on 'SS' or
'TPI'
e (Numpy array): effective labor units
p (OG-USA Specifications object): model parameters
Returns:
pension (Numpy array): pension amount for each household
'''
if j is not None:
if method == 'TPI':
if n.ndim == 2:
w = w.reshape(w.shape[0], 1)
else:
if method == 'TPI':
w = utils.to_timepath_shape(w)
pension = np.zeros_like(n)
if method == 'SS':
# Depending on if we are looking at b_s or b_s+1, the
# entry for retirement will change (it shifts back one).
# The shift boolean makes sure we start replacement rates
# at the correct age.
if shift is False:
pension[p.retire[-1]:] = theta * w
else:
pension[p.retire[-1] - 1:] = theta * w
elif method == 'TPI':
length = w.shape[0]
if not shift:
# retireTPI is different from retire, because in TP income
# we are counting backwards with different length lists.
# This will always be the correct location of retirement,
# depending on the shape of the lists.
retireTPI = (p.retire[t: t + length] - p.S)
else:
retireTPI = (p.retire[t: t + length] - 1 - p.S)
if len(n.shape) == 1:
if not shift:
retireTPI = p.retire[t] - p.S
else:
retireTPI = p.retire[t] - 1 - p.S
pension[retireTPI:] = (
theta[j] * p.replacement_rate_adjust[t] * w[retireTPI:])
elif len(n.shape) == 2:
for tt in range(pension.shape[0]):
pension[tt, retireTPI[tt]:] = (
theta * p.replacement_rate_adjust[t + tt] * w[tt])
else:
for tt in range(pension.shape[0]):
pension[tt, retireTPI[tt]:, :] = (
theta.reshape(1, p.J) *
p.replacement_rate_adjust[t + tt] * w[tt])
elif method == 'TPI_scalar':
# The above methods won't work if scalars are used. This option
# is only called by the SS_TPI_firstdoughnutring function in TPI.
pension = theta * p.replacement_rate_adjust[0] * w
return pension
def wealth_tax_liab(r, b, t, j, method, p):
'''
Calculate wealth tax liability for each household.
Args:
r (array_like): real interest rate
b (Numpy array): savings
t (int): time period
j (int): index of lifetime income group
method (str): adjusts calculation dimensions based on 'SS' or
'TPI'
p (OG-USA Specifications object): model parameters
Returns:
T_W (Numpy array): wealth tax liability for each household
'''
if j is not None:
if method == 'TPI':
if b.ndim == 2:
r = r.reshape(r.shape[0], 1)
else:
if method == 'TPI':
r = utils.to_timepath_shape(r)
if method == 'SS':
T_W = (ETR_wealth(b, p.h_wealth[-1], p.m_wealth[-1],
p.p_wealth[-1]) * b)
elif method == 'TPI':
length = r.shape[0]
if len(b.shape) == 1:
T_W = (ETR_wealth(b, p.h_wealth[t:t + length],
p.m_wealth[t:t + length],
p.p_wealth[t:t + length]) * b)
elif len(b.shape) == 2:
T_W = (ETR_wealth(b, p.h_wealth[t:t + length],
p.m_wealth[t:t + length],
p.p_wealth[t:t + length]) * b)
else:
T_W = (ETR_wealth(
b, p.h_wealth[t:t + length].reshape(length, 1, 1),
p.m_wealth[t:t + length].reshape(length, 1, 1),
p.p_wealth[t:t + length].reshape(length, 1, 1)) * b)
elif method == 'TPI_scalar':
T_W = (ETR_wealth(b, p.h_wealth[0], p.m_wealth[0],
p.p_wealth[0]) * b)
return T_W
def bequest_tax_liab(r, b, bq, t, j, method, p):
'''
Calculate liability due from taxes on bequests for each household.
Args:
r (array_like): real interest rate
b (Numpy array): savings
bq (Numpy array): bequests received
t (int): time period
j (int): index of lifetime income group
method (str): adjusts calculation dimensions based on 'SS' or
'TPI'
p (OG-USA Specifications object): model parameters
Returns:
T_BQ (Numpy array): bequest tax liability for each household
'''
if j is not None:
lambdas = p.lambdas[j]
if method == 'TPI':
if b.ndim == 2:
r = r.reshape(r.shape[0], 1)
else:
lambdas = np.transpose(p.lambdas)
if method == 'TPI':
r = utils.to_timepath_shape(r)
if method == 'SS':
T_BQ = p.tau_bq[-1] * bq
elif method == 'TPI':
length = r.shape[0]
if len(b.shape) == 1:
T_BQ = p.tau_bq[t:t + length] * bq
elif len(b.shape) == 2:
T_BQ = p.tau_bq[t:t + length].reshape(length, 1) * bq / lambdas
else:
T_BQ = p.tau_bq[t:t + length].reshape(length, 1, 1) * bq
elif method == 'TPI_scalar':
# The above methods won't work if scalars are used. This option
# is only called by the SS_TPI_firstdoughnutring function in TPI.
T_BQ = p.tau_bq[0] * bq
return T_BQ
| 36.823038 | 109 | 0.518611 |
1ce2efac56c23c6a39d717edb12824108fd3d153 | 35,293 | py | Python | muse_for_anything/api/v1_api/taxonomy_items.py | baireutherjonas/muse-for-anything | a625b4fc6468d74fa12886dc465d5694eed86e04 | [
"MIT"
] | null | null | null | muse_for_anything/api/v1_api/taxonomy_items.py | baireutherjonas/muse-for-anything | a625b4fc6468d74fa12886dc465d5694eed86e04 | [
"MIT"
] | 1 | 2021-11-14T18:55:44.000Z | 2021-11-14T18:55:44.000Z | muse_for_anything/api/v1_api/taxonomy_items.py | baireutherjonas/muse-for-anything | a625b4fc6468d74fa12886dc465d5694eed86e04 | [
"MIT"
] | 1 | 2021-09-08T13:49:52.000Z | 2021-09-08T13:49:52.000Z | """Module containing the taxonomy items API endpoints of the v1 API."""
from datetime import datetime
from sqlalchemy.sql.schema import Sequence
from muse_for_anything.db.models.taxonomies import (
Taxonomy,
TaxonomyItem,
TaxonomyItemRelation,
TaxonomyItemVersion,
)
from marshmallow.utils import INCLUDE
from flask_babel import gettext
from muse_for_anything.api.util import template_url_for
from typing import Any, Callable, Dict, List, Optional, Union, cast
from flask.helpers import url_for
from flask.views import MethodView
from sqlalchemy.sql.expression import asc, desc, literal
from sqlalchemy.orm.query import Query
from sqlalchemy.orm import selectinload
from flask_smorest import abort
from http import HTTPStatus
from .root import API_V1
from ..base_models import (
ApiLink,
ApiResponse,
ChangedApiObject,
ChangedApiObjectSchema,
CursorPage,
CursorPageArgumentsSchema,
CursorPageSchema,
DynamicApiResponseSchema,
NewApiObject,
NewApiObjectSchema,
)
from ...db.db import DB
from ...db.pagination import get_page_info
from ...db.models.namespace import Namespace
from ...db.models.ontology_objects import OntologyObjectType, OntologyObjectTypeVersion
from .models.ontology import (
TaxonomyItemRelationPostSchema,
TaxonomyItemRelationSchema,
TaxonomyItemSchema,
TaxonomySchema,
)
from .namespace_helpers import (
query_params_to_api_key,
)
from .taxonomy_helpers import (
action_links_for_taxonomy_item,
action_links_for_taxonomy_item_relation,
create_action_link_for_taxonomy_item_relation_page,
nav_links_for_taxonomy_item,
nav_links_for_taxonomy_item_relation,
taxonomy_item_relation_to_api_link,
taxonomy_item_relation_to_api_response,
taxonomy_item_relation_to_taxonomy_item_relation_data,
taxonomy_item_to_api_link,
taxonomy_item_to_api_response,
taxonomy_item_to_taxonomy_item_data,
taxonomy_to_api_response,
taxonomy_to_items_links,
taxonomy_to_taxonomy_data,
)
| 39.43352 | 123 | 0.606041 |
1ce4e6e88e3b37747a733ee2057c09e983742a39 | 478 | py | Python | PythonDAdata/3358OS_06_Code/code6/pd_plotting.py | shijiale0609/Python_Data_Analysis | c18b5ed006c171bbb6fcb6be5f51b2686edc8f7e | [
"MIT"
] | 1 | 2020-02-22T18:55:54.000Z | 2020-02-22T18:55:54.000Z | PythonDAdata/3358OS_06_Code/code6/pd_plotting.py | shijiale0609/Python_Data_Analysis | c18b5ed006c171bbb6fcb6be5f51b2686edc8f7e | [
"MIT"
] | null | null | null | PythonDAdata/3358OS_06_Code/code6/pd_plotting.py | shijiale0609/Python_Data_Analysis | c18b5ed006c171bbb6fcb6be5f51b2686edc8f7e | [
"MIT"
] | 1 | 2020-02-22T18:55:57.000Z | 2020-02-22T18:55:57.000Z | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
df = pd.read_csv('transcount.csv')
df = df.groupby('year').aggregate(np.mean)
gpu = pd.read_csv('gpu_transcount.csv')
gpu = gpu.groupby('year').aggregate(np.mean)
df = pd.merge(df, gpu, how='outer', left_index=True, right_index=True)
df = df.replace(np.nan, 0)
df.plot()
df.plot(logy=True)
df[df['gpu_trans_count'] > 0].plot(kind='scatter', x='trans_count', y='gpu_trans_count', loglog=True)
plt.show()
| 26.555556 | 101 | 0.717573 |
1ce550dcd34ad1e54a6bb3af57029219d257f4d1 | 742 | py | Python | source/blog/migrations/0004_postcomments.py | JakubGutowski/PersonalBlog | 96122b36486f7e874c013e50d939732a43db309f | [
"BSD-3-Clause"
] | null | null | null | source/blog/migrations/0004_postcomments.py | JakubGutowski/PersonalBlog | 96122b36486f7e874c013e50d939732a43db309f | [
"BSD-3-Clause"
] | null | null | null | source/blog/migrations/0004_postcomments.py | JakubGutowski/PersonalBlog | 96122b36486f7e874c013e50d939732a43db309f | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 2.0.5 on 2018-07-02 19:46
from django.db import migrations, models
import django.db.models.deletion
| 30.916667 | 115 | 0.58221 |
1ce6c087a65ed77b98463ac3f530b83170cfd6d6 | 241 | py | Python | submissions/aising2019/a.py | m-star18/atcoder | 08e475810516602fa088f87daf1eba590b4e07cc | [
"Unlicense"
] | 1 | 2021-05-10T01:16:28.000Z | 2021-05-10T01:16:28.000Z | submissions/aising2019/a.py | m-star18/atcoder | 08e475810516602fa088f87daf1eba590b4e07cc | [
"Unlicense"
] | 3 | 2021-05-11T06:14:15.000Z | 2021-06-19T08:18:36.000Z | submissions/aising2019/a.py | m-star18/atcoder | 08e475810516602fa088f87daf1eba590b4e07cc | [
"Unlicense"
] | null | null | null | import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
n = int(readline())
h = int(readline())
w = int(readline())
print((n - h + 1) * (n - w + 1))
| 21.909091 | 38 | 0.676349 |
1ce7603f33584c5aefec4359d0957617e3a28159 | 5,106 | py | Python | CreateHalo.py | yoyoberenguer/MultiplayerGameEngine | 1d1a4c0ab40d636322c4e3299cbc84fb57965b31 | [
"MIT"
] | 4 | 2019-09-08T13:54:14.000Z | 2021-12-18T11:46:59.000Z | CreateHalo.py | yoyoberenguer/MultiplayerGameEngine | 1d1a4c0ab40d636322c4e3299cbc84fb57965b31 | [
"MIT"
] | 1 | 2019-09-01T11:21:39.000Z | 2019-09-01T15:01:21.000Z | CreateHalo.py | yoyoberenguer/MultiplayerGameEngine | 1d1a4c0ab40d636322c4e3299cbc84fb57965b31 | [
"MIT"
] | 1 | 2019-08-23T07:00:20.000Z | 2019-08-23T07:00:20.000Z |
import pygame
from NetworkBroadcast import Broadcast, AnimatedSprite, DeleteSpriteCommand
from Textures import HALO_SPRITE12, HALO_SPRITE14, HALO_SPRITE13
__author__ = "Yoann Berenguer"
__credits__ = ["Yoann Berenguer"]
__version__ = "1.0.0"
__maintainer__ = "Yoann Berenguer"
__email__ = "yoyoberenguer@hotmail.com"
| 31.9125 | 99 | 0.570897 |
1ce783ade7ec4e76f4c0abea82bc09661b19e042 | 29,965 | py | Python | src/dataops/pandas_db.py | ShizhuZhang/ontask_b | acbf05ff9b18dae0a41c67d1e41774e54a890c40 | [
"MIT"
] | null | null | null | src/dataops/pandas_db.py | ShizhuZhang/ontask_b | acbf05ff9b18dae0a41c67d1e41774e54a890c40 | [
"MIT"
] | null | null | null | src/dataops/pandas_db.py | ShizhuZhang/ontask_b | acbf05ff9b18dae0a41c67d1e41774e54a890c40 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import logging
import os.path
import subprocess
from collections import OrderedDict
from itertools import izip
import numpy as np
import pandas as pd
from django.conf import settings
from django.core.cache import cache
from django.db import connection
from sqlalchemy import create_engine
from dataops.formula_evaluation import evaluate_node_sql
from ontask import fix_pctg_in_name
SITE_ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
table_prefix = '__ONTASK_WORKFLOW_TABLE_'
df_table_prefix = table_prefix + '{0}'
upload_table_prefix = table_prefix + 'UPLOAD_{0}'
# Query to count the number of rows in a table
query_count_rows = 'SELECT count(*) from "{0}"'
logger = logging.getLogger(__name__)
# Translation between pandas data type names, and those handled in OnTask
pandas_datatype_names = {
'object': 'string',
'int64': 'integer',
'float64': 'double',
'bool': 'boolean',
'datetime64[ns]': 'datetime'
}
# Translation between SQL data type names, and those handled in OnTask
sql_datatype_names = {
'text': 'string',
'bigint': 'integer',
'double precision': 'double',
'boolean': 'boolean',
'timestamp without time zone': 'datetime'
}
# DB Engine to use with Pandas (required by to_sql, from_sql
engine = None
def create_db_connection(dialect, driver, username, password, host, dbname):
"""
Function that creates the engine object to connect to the database. The
object is required by the pandas functions to_sql and from_sql
:param dialect: Dialect for the engine (oracle, mysql, postgresql, etc)
:param driver: DBAPI driver (psycopg2, ...)
:param username: Username to connect with the database
:param password: Password to connect with the database
:param host: Host to connect with the database
:param dbname: database name
:return: the engine
"""
# DB engine
database_url = \
'{dialect}{driver}://{user}:{password}@{host}/{database_name}'.format(
dialect=dialect,
driver=driver,
user=username,
password=password,
host=host,
database_name=dbname,
)
return create_engine(database_url, echo=False, paramstyle='format')
def create_db_engine(dialect, driver, username, password, host, dbname):
"""
Function that creates the engine object to connect to the database. The
object is required by the pandas functions to_sql and from_sql
:param dialect: Dialect for the engine (oracle, mysql, postgresql, etc)
:param driver: DBAPI driver (psycopg2, ...)
:param username: Username to connect with the database
:param password: Password to connect with the database
:param host: Host to connect with the database
:param dbname: database name
:return: the engine
"""
# DB engine
database_url = \
'{dialect}{driver}://{user}:{password}@{host}/{database_name}'.format(
dialect=dialect,
driver=driver,
user=username,
password=password,
host=host,
database_name=dbname,
)
engine = create_db_connection(dialect, driver, username, password, host,
dbname)
if settings.DEBUG:
print('Creating engine with ', database_url)
return engine
def destroy_db_engine(db_engine):
"""
Method that disposes of the given engine (to guarantee there are no
connections available
:param db_engine: Engine to destroy
:return: Nothing
"""
db_engine.dispose()
def pg_restore_table(filename):
"""
Function that given a file produced with a pg_dump, it uploads its
content to the existing database
:param filename: File in pg_dump format to restore
:return:
"""
process = subprocess.Popen(['psql',
'-d',
settings.DATABASES['default']['NAME'],
'-q',
'-f',
filename])
process.wait()
def delete_all_tables():
"""
Delete all tables related to existing workflows
:return:
"""
cursor = connection.cursor()
table_list = connection.introspection.get_table_list(cursor)
for tinfo in table_list:
if not tinfo.name.startswith(table_prefix):
continue
cursor.execute('DROP TABLE "{0}";'.format(tinfo.name))
# To make sure the table is dropped.
connection.commit()
return
def create_table_name(pk):
"""
:param pk: Primary Key of a workflow
:return: The unique table name to use to store a workflow data frame
"""
return df_table_prefix.format(pk)
def create_upload_table_name(pk):
"""
:param pk: Primary key of a workflow
:return: The unique table to use to upload a new data frame
"""
return upload_table_prefix.format(pk)
def load_from_db(pk, columns=None, filter_exp=None):
"""
Load the data frame stored for the workflow with the pk
:param pk: Primary key of the workflow
:param columns: Optional list of columns to load (all if NOne is given)
:param filter_exp: JSON expression to filter a subset of rows
:return: data frame
"""
return load_table(create_table_name(pk),
columns=columns,
filter_exp=filter_exp)
def load_table(table_name, columns=None, filter_exp=None):
"""
Load a data frame from the SQL DB.
FUTURE WORK:
Consider to store the dataframes in Redis to reduce load/store time.
The trick is to use a compressed format:
SET: redisConn.set("key", df.to_msgpack(compress='zlib'))
GET: pd.read_msgpack(redisConn.get("key"))
Need to agree on a sensible item name that does not collide with anything
else and a policy to detect a cached dataframe and remove it when the data
changes (difficult to detect? Perhaps df_new.equals(df_current))
If feasible, a write-through system could be easily implemented.
:param table_name: Table name to read from the db in to data frame
:param view: Optional view object to restrict access to the DB
:return: data_frame or None if it does not exist.
"""
if table_name not in connection.introspection.table_names():
return None
if settings.DEBUG:
print('Loading table ', table_name)
if columns or filter_exp:
# A list of columns or a filter exp is given
query, params = get_filter_query(table_name, columns, filter_exp)
result = pd.read_sql_query(query, engine, params=params)
else:
# No view given, so simply get the whole table
result = pd.read_sql(table_name, engine)
# After reading from the DB, turn all None into NaN
result.fillna(value=np.nan, inplace=True)
return result
def load_query(query):
"""
Load a data frame from the SQL DB running the given query.
:param query: Query to run in the DB
:return: data_frame or None if it does not exist.
"""
if settings.DEBUG:
print('Loading query ', query)
result = pd.read_sql_query(query, engine)
# After reading from the DB, turn all None into NaN
result.fillna(value=np.nan, inplace=True)
return result
def load_df_from_csvfile(file, skiprows=0, skipfooter=0):
"""
Given a file object, try to read the content as a CSV file and transform
into a data frame. The skiprows and skipfooter are number of lines to skip
from the top and bottom of the file (see read_csv in pandas).
It also tries to convert as many columns as possible to date/time format
(testing the conversion on every string column).
:param filename: File object to read the CSV content
:param skiprows: Number of lines to skip at the top of the document
:param skipfooter: Number of lines to skip at the bottom of the document
:return: Resulting data frame, or an Exception.
"""
data_frame = pd.read_csv(
file,
index_col=False,
infer_datetime_format=True,
quotechar='"',
skiprows=skiprows,
skipfooter=skipfooter
)
# Strip white space from all string columns and try to convert to
# datetime just in case
for x in list(data_frame.columns):
if data_frame[x].dtype.name == 'object':
# Column is a string! Remove the leading and trailing white
# space
data_frame[x] = data_frame[x].str.strip().fillna(data_frame[x])
# Try the datetime conversion
try:
series = pd.to_datetime(data_frame[x],
infer_datetime_format=True)
# Datetime conversion worked! Update the data_frame
data_frame[x] = series
except (ValueError, TypeError):
pass
return data_frame
def load_df_from_sqlconnection(conn_item, pwd=None):
"""
Load a DF from a SQL connection open with the parameters given in conn_item.
:param conn_item: SQLConnection object with the connection parameters.
:return: Data frame or raise an exception.
"""
# Get the connection
db_connection = create_db_connection(conn_item.conn_type,
conn_item.conn_driver,
conn_item.db_user,
pwd,
conn_item.db_host,
conn_item.db_name)
# Try to fetch the data
result = pd.read_sql(conn_item.db_table, db_connection)
# After reading from the DB, turn all None into NaN
result.fillna(value=np.nan, inplace=True)
return result
def store_table(data_frame, table_name):
"""
Store a data frame in the DB
:param data_frame: The data frame to store
:param table_name: The name of the table in the DB
:return: Nothing. Side effect in the DB
"""
with cache.lock(table_name):
# We ovewrite the content and do not create an index
data_frame.to_sql(table_name,
engine,
if_exists='replace',
index=False)
return
def delete_table(pk):
"""Delete the table representing the workflow with the given PK. Due to
the dual use of the database, the command has to be executed directly on
the DB.
"""
try:
cursor = connection.cursor()
cursor.execute('DROP TABLE "{0}";'.format(create_table_name(pk)))
connection.commit()
except Exception:
logger.error(
'Error while dropping table {0}'.format(create_table_name(pk))
)
def delete_upload_table(pk):
"""Delete the table used to merge data into the workflow with the given
PK. Due to the dual use of the database, the command has to be executed
directly on the DB.
"""
cursor = connection.cursor()
cursor.execute('DROP TABLE "{0}"'.format(create_upload_table_name(pk)))
connection.commit()
def get_table_column_types(table_name):
"""
:param table_name: Table name
:return: List of pairs (column name, SQL type)
"""
cursor = connection.cursor()
cursor.execute("""select column_name, data_type from
INFORMATION_SCHEMA.COLUMNS where table_name = '{0}'""".format(table_name))
return cursor.fetchall()
def df_column_types_rename(table_name):
"""
:param table_name: Primary key of the workflow containing this data frame (table)
:return: List of data type strings translated to the proper values
"""
column_types = get_table_column_types(table_name)
# result = [table_name[x].dtype.name for x in list(table_name.columns)]
# for tname, ntname in pandas_datatype_names.items():
# result[:] = [x if x != tname else ntname for x in result]
return [sql_datatype_names[x] for __, x in
get_table_column_types(table_name)]
def df_drop_column(pk, column_name):
"""
Drop a column from the DB table storing a data frame
:param pk: Workflow primary key to obtain table name
:param column_name: Column name
:return: Drops the column from the corresponding DB table
"""
query = 'ALTER TABLE "{0}" DROP COLUMN "{1}"'.format(
create_table_name(pk),
column_name
)
cursor = connection.cursor()
cursor.execute(query)
def get_subframe(pk, cond_filter, column_names=None):
"""
Execute a select query to extract a subset of the dataframe and turn the
resulting query set into a data frame.
:param pk: Workflow primary key
:param cond_filter: Condition object to filter the data (or None)
:param column_names: [list of column names], QuerySet with the data rows
:return:
"""
# Get the cursor
cursor = get_table_cursor(pk, cond_filter, column_names)
# Create the DataFrame and set the column names
result = pd.DataFrame.from_records(cursor.fetchall(), coerce_float=True)
result.columns = [c.name for c in cursor.description]
return result
def get_table_cursor(pk, cond_filter, column_names=None):
"""
Execute a select query in the database with an optional filter obtained
from the jquery QueryBuilder.
:param pk: Primary key of the workflow storing the data
:param cond_filter: Condition object to filter the data (or None)
:param column_names: optional list of columns to select
:return: ([list of column names], QuerySet with the data rows)
"""
# Create the query
if column_names:
safe_column_names = [fix_pctg_in_name(x) for x in column_names]
query = 'SELECT "{0}" from "{1}"'.format(
'", "'.join(safe_column_names),
create_table_name(pk)
)
else:
query = 'SELECT * from "{0}"'.format(create_table_name(pk))
# See if the action has a filter or not
fields = []
if cond_filter is not None:
cond_filter, fields = evaluate_node_sql(cond_filter.formula)
if cond_filter:
# The condition may be empty, in which case, nothing is needed.
query += ' WHERE ' + cond_filter
# Execute the query
cursor = connection.cursor()
cursor.execute(query, fields)
return cursor
def execute_select_on_table(pk, fields, values, column_names=None):
"""
Execute a select query in the database with an optional filter obtained
from the jquery QueryBuilder.
:param pk: Primary key of the workflow storing the data
:param fields: List of fields to add to the WHERE clause
:param values: parameters to match the previous fields
:param column_names: optional list of columns to select
:return: QuerySet with the data rows
"""
# Create the query
if column_names:
safe_column_names = ['"' + fix_pctg_in_name(x) + '"'
for x in column_names]
query = 'SELECT {0}'.format(','.join(safe_column_names))
else:
query = 'SELECT *'
# Add the table
query += ' FROM "{0}"'.format(create_table_name(pk))
# See if the action has a filter or not
cursor = connection.cursor()
if fields:
query += ' WHERE ' + \
' AND '.join(['"{0}" = %s'.format(fix_pctg_in_name(x))
for x in fields])
cursor.execute(query, values)
else:
# Execute the query
cursor.execute(query)
# Get the data
return cursor.fetchall()
def update_row(pk, set_fields, set_values, where_fields, where_values):
"""
Given a primary key, pairs (set_field, set_value), and pairs (where_field,
where_value), it updates the row in the table selected with the
list of (where field = where value) with the values in the assignments in
the list of (set_fields, set_values)
:param pk: Primary key to detect workflow
:param set_fields: List of field names to be updated
:param set_values: List of values to update the fields of the previous list
:param where_fields: List of fields used to filter the row in the table
:param where_values: List of values of the previous fields to filter the row
:return: The table in the workflow pointed by PK is modified.
"""
# First part of the query with the table name
query = 'UPDATE "{0}"'.format(create_table_name(pk))
# Add the SET field = value clauses
query += ' SET ' + ', '.join(['"{0}" = %s'.format(fix_pctg_in_name(x))
for x in set_fields])
# And finally add the WHERE clause
query += ' WHERE ' + ' AND '.join(['"{0}" = %s'.format(fix_pctg_in_name(x))
for x in where_fields])
# Concatenate the values as parameters to the query
parameters = set_values + where_values
# Execute the query
cursor = connection.cursor()
cursor.execute(query, parameters)
connection.commit()
def increase_row_integer(pk, set_field, where_field, where_value):
"""
Given a primary key, a field set_field, and a pair (where_field,
where_value), it increases the field in the appropriate row
:param pk: Primary key to detect workflow
:param set_field: name of the field to be increased
:param where_field: Field used to filter the row in the table
:param where_value: Value of the previous field to filter the row
:return: The table in the workflow pointed by PK is modified.
"""
# First part of the query with the table name
query = 'UPDATE "{0}" SET "{1}" = "{1}" + 1 WHERE "{2}" = %s'.format(
create_table_name(pk),
set_field,
where_field
)
# Execute the query
cursor = connection.cursor()
cursor.execute(query, [where_value])
connection.commit()
def get_table_row_by_key(workflow, cond_filter, kv_pair, column_names=None):
"""
Select the set of elements after filtering and with the key=value pair
:param workflow: workflow object to get to the table
:param cond_filter: Condition object to filter the data (or None)
:param kv_pair: A key=value pair to identify the row. Key is suppose to
be unique.
:param column_names: Optional list of column names to select
:return: A dictionary with the (column_name, value) data or None if the
row has not been found
"""
# Create the query
if column_names:
safe_column_names = [fix_pctg_in_name(x) for x in column_names]
query = 'SELECT "{0}"'.format('", "'.join(safe_column_names))
else:
query = 'SELECT *'
# Add the table
query += ' FROM "{0}"'.format(create_table_name(workflow.id))
# Create the second part of the query setting key=value
query += ' WHERE ("{0}" = %s)'.format(fix_pctg_in_name(kv_pair[0]))
fields = [kv_pair[1]]
# See if the action has a filter or not
if cond_filter is not None:
cond_filter, filter_fields = \
evaluate_node_sql(cond_filter.formula)
query += ' AND (' + cond_filter + ')'
fields = fields + filter_fields
# Execute the query
cursor = connection.cursor()
cursor.execute(query, fields)
# Get the data
qs = cursor.fetchall()
# If there is anything different than one element, return None
if len(qs) != 1:
return None
# Get the only element
qs = qs[0]
# ZIP the values to create a dictionary
return OrderedDict(zip(workflow.get_column_names(), qs))
def get_column_stats_from_df(df_column):
"""
Given a data frame with a single column, return a set of statistics
depending on its type.
:param df_column: data frame with a single column
:return: A dictionary with keys depending on the type of column
{'min': minimum value (integer, double an datetime),
'q1': Q1 value (0.25) (integer, double),
'mean': mean value (integer, double),
'median': median value (integer, double),
'mean': mean value (integer, double),
'q3': Q3 value (0.75) (integer, double),
'max': maximum value (integer, double an datetime),
'std': standard deviation (integer, double),
'counts': (integer, double, string, datetime, Boolean',
'mode': (integer, double, string, datetime, Boolean,
or None if the column has all its values to NaN
"""
if len(df_column.loc[df_column.notnull()]) == 0:
# The column has no data
return None
# Dictionary to return
result = {
'min': 0,
'q1': 0,
'mean': 0,
'median': 0,
'q3': 0,
'max': 0,
'std': 0,
'mode': None,
'counts': {},
}
data_type = pandas_datatype_names[df_column.dtype.name]
if data_type == 'integer' or data_type == 'double':
quantiles = df_column.quantile([0, .25, .5, .75, 1])
result['min'] = '{0:g}'.format(quantiles[0])
result['q1'] = '{0:g}'.format(quantiles[.25])
result['mean'] = '{0:g}'.format(df_column.mean())
result['median'] = '{0:g}'.format(quantiles[.5])
result['q3'] = '{0:g}'.format(quantiles[.75])
result['max'] = '{0:g}'.format(quantiles[1])
result['std'] = '{0:g}'.format(df_column.std())
result['counts'] = df_column.value_counts().to_dict()
mode = df_column.mode()
if len(mode) == 0:
mode = '--'
result['mode'] = mode[0]
return result
def get_filter_query(table_name, column_names, filter_exp):
"""
Given a set of columns and a filter expression, return a pair of SQL query
and params to be executed
:param table_name: Table to query
:param column_names: list of columns to consider or None to consider all
:param filter_exp: Text filter expression
:return: (sql query, sql params)
"""
# Create the query
if column_names:
safe_column_names = [fix_pctg_in_name(x) for x in column_names]
query = 'SELECT "{0}"'.format('", "'.join(safe_column_names))
else:
query = 'SELECT *'
# Add the table
query += ' FROM "{0}"'.format(table_name)
# Calculate the first suffix to add to the query
filter_txt = ''
filter_fields = []
if filter_exp:
filter_txt, filter_fields = evaluate_node_sql(filter_exp)
# Build the query so far appending the filter and/or the cv_tuples
if filter_txt:
query += ' WHERE '
fields = []
# If there has been a suffix from the filter, add it.
if filter_txt:
query += filter_txt
if filter_fields:
fields.extend(filter_fields)
return (query, fields)
def search_table_rows(workflow_id,
cv_tuples=None,
any_join=True,
order_col_name=None,
order_asc=True,
column_names=None,
pre_filter=None):
"""
Select rows where for every (column, value) pair, column contains value (
as in LIKE %value%, these are combined with OR if any is TRUE, or AND if
any is false, and the result is ordered by the given column and type (if
given)
:param workflow_id: workflow object to get to the table
:param cv_tuples: A column, value, type tuple to search the value in the
column
:param any_join: Boolean encoding if values should be combined with OR (or
AND)
:param order_col_name: Order results by this column
:param order_asc: Order results in ascending values (or descending)
:param column_names: Optional list of column names to select
:param pre_filter: Optional filter condition to pre filter the query set.
the query is built with these terms as requirement AND the cv_tuples.
:return: The resulting query set
"""
# Create the query
if column_names:
safe_column_names = [fix_pctg_in_name(x) for x in column_names]
query = 'SELECT "{0}"'.format('", "'.join(safe_column_names))
else:
query = 'SELECT *'
# Add the table
query += ' FROM "{0}"'.format(create_table_name(workflow_id))
# Calculate the first suffix to add to the query
filter_txt = ''
filter_fields = []
if pre_filter:
filter_txt, filter_fields = evaluate_node_sql(pre_filter)
if cv_tuples:
likes = []
tuple_fields = []
for name, value, data_type in cv_tuples:
# Make sure we escape the name and search as text
name = fix_pctg_in_name(name)
mod_name = '(CAST("{0}" AS TEXT) LIKE %s)'.format(name)
# Create the second part of the query setting column LIKE '%value%'
likes.append(mod_name)
tuple_fields.append('%' + value + '%')
# Combine the search subqueries
if any_join:
tuple_txt = '(' + ' OR '.join(likes) + ')'
else:
tuple_txt = '(' + ' AND '.join(likes) + ')'
# Build the query so far appending the filter and/or the cv_tuples
if filter_txt or cv_tuples:
query += ' WHERE '
fields = []
# If there has been a suffix from the filter, add it.
if filter_txt:
query += filter_txt
fields.extend(filter_fields)
# If there is a pre-filter, the suffix needs to be "AND" with the ones
# just calculated
if filter_txt and cv_tuples:
query += ' AND '
if cv_tuples:
query += tuple_txt
fields.extend(tuple_fields)
# Add the order if needed
if order_col_name:
query += ' ORDER BY "{0}"'.format(fix_pctg_in_name(order_col_name))
if not order_asc:
query += ' DESC'
# Execute the query
cursor = connection.cursor()
cursor.execute(query, fields)
# Get the data
return cursor.fetchall()
def delete_table_row_by_key(workflow_id, kv_pair):
"""
Delete the row in the table attached to a workflow with the given key,
value pairs
:param workflow_id: workflow object to get to the table
:param kv_pair: A key=value pair to identify the row. Key is suppose to
be unique.
:return: Drops that row from the table in the DB
"""
# Create the query
query = 'DELETE FROM "{0}"'.format(create_table_name(workflow_id))
# Create the second part of the query setting key=value
query += ' WHERE ("{0}" = %s)'.format(fix_pctg_in_name(kv_pair[0]))
fields = [kv_pair[1]]
# Execute the query
cursor = connection.cursor()
cursor.execute(query, fields)
def num_rows(pk, cond_filter=None):
"""
Obtain the number of rows of the table storing workflow with given pk
:param pk: Primary key of the table storing the data frame
:param cond_filter: Condition element to filter the query
:return:
"""
return num_rows_by_name(create_table_name(pk), cond_filter)
def num_rows_by_name(table_name, cond_filter=None):
"""
Given a table name, get its number of rows
:param table_name: Table name
:param cond_filter: Condition element used to filter the query
:return: integer
"""
# Initial query with the table name
query = query_count_rows.format(table_name)
fields = []
if cond_filter is not None:
cond_filter, fields = evaluate_node_sql(cond_filter)
query += ' WHERE ' + cond_filter
cursor = connection.cursor()
cursor.execute(query, fields)
return cursor.fetchone()[0]
def check_wf_df(workflow):
"""
Check the consistency between the information stored in the workflow
and the structure of the underlying dataframe
:param workflow: Workflow object
:return: Boolean stating the result of the check. True: Correct.
"""
# Get the df
df = load_from_db(workflow.id)
# Set values in case there is no df
if df is not None:
dfnrows = df.shape[0]
dfncols = df.shape[1]
df_col_names = list(df.columns)
else:
dfnrows = 0
dfncols = 0
df_col_names = []
# Check 1: Number of rows and columns
if workflow.nrows != dfnrows:
return False
if workflow.ncols != dfncols:
return False
# Identical sets of columns
wf_cols = workflow.columns.all()
if [x.name for x in wf_cols] != df_col_names:
return False
# Identical data types
for n1, n2 in zip(wf_cols, df_col_names):
df_dt = pandas_datatype_names[df[n2].dtype.name]
if n1.data_type == 'boolean' and df_dt == 'string':
# This is the case of a column with Boolean and Nulls
continue
if n1.data_type != df_dt:
return False
return True
| 31.776246 | 86 | 0.64275 |
1ce813d53ecf60bcfa1c5a10f665cbdcffd14f05 | 1,579 | py | Python | config/cf.py | rbsdev/config-client | 761f39cd8839daba10bf21b98ccdd44d33eaebe8 | [
"Apache-2.0"
] | null | null | null | config/cf.py | rbsdev/config-client | 761f39cd8839daba10bf21b98ccdd44d33eaebe8 | [
"Apache-2.0"
] | null | null | null | config/cf.py | rbsdev/config-client | 761f39cd8839daba10bf21b98ccdd44d33eaebe8 | [
"Apache-2.0"
] | null | null | null | from typing import Any, Dict, KeysView
import attr
from config.auth import OAuth2
from config.cfenv import CFenv
from config.spring import ConfigClient
| 28.709091 | 80 | 0.644712 |
1ce82884bd68028c036284e33b78a44ed716634f | 3,881 | py | Python | ducktape/template.py | rancp/ducktape-docs | e1a3b1b7e68beedf5f8d29a4e5f196912a20e264 | [
"Apache-2.0"
] | null | null | null | ducktape/template.py | rancp/ducktape-docs | e1a3b1b7e68beedf5f8d29a4e5f196912a20e264 | [
"Apache-2.0"
] | null | null | null | ducktape/template.py | rancp/ducktape-docs | e1a3b1b7e68beedf5f8d29a4e5f196912a20e264 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.utils.util import package_is_installed
from jinja2 import Template, FileSystemLoader, PackageLoader, ChoiceLoader, Environment
import os.path
import inspect
| 42.184783 | 113 | 0.670188 |
1ce98f8cbd7283e38faf8437d2c92e51357a9597 | 93 | py | Python | day4/homework/q7.py | AkshayManchanda/Python_Training | 5a50472d118ac6d40145bf1dd60f26864bf9fb6c | [
"MIT"
] | null | null | null | day4/homework/q7.py | AkshayManchanda/Python_Training | 5a50472d118ac6d40145bf1dd60f26864bf9fb6c | [
"MIT"
] | null | null | null | day4/homework/q7.py | AkshayManchanda/Python_Training | 5a50472d118ac6d40145bf1dd60f26864bf9fb6c | [
"MIT"
] | null | null | null | i=input("Enter a string: ")
list = i.split()
list.sort()
for i in list:
print(i,end=' ')
| 15.5 | 27 | 0.591398 |
1cea0e3fced4fc9fe2a48efd4c3e8de95165a2da | 948 | py | Python | src/git_portfolio/use_cases/config_repos.py | staticdev/github-portfolio | 850461eed8160e046ee16664ac3dbc19e3ec0965 | [
"MIT"
] | null | null | null | src/git_portfolio/use_cases/config_repos.py | staticdev/github-portfolio | 850461eed8160e046ee16664ac3dbc19e3ec0965 | [
"MIT"
] | null | null | null | src/git_portfolio/use_cases/config_repos.py | staticdev/github-portfolio | 850461eed8160e046ee16664ac3dbc19e3ec0965 | [
"MIT"
] | null | null | null | """Config repositories use case."""
from __future__ import annotations
import git_portfolio.config_manager as cm
import git_portfolio.domain.gh_connection_settings as cs
import git_portfolio.responses as res
| 37.92 | 83 | 0.74789 |
1cea0f437c7a9f8ccbc1159b25612a99704a7170 | 943 | py | Python | test/test_logic.py | mateuszkowalke/sudoku_game | 800e33a6fe755b493d8e9c3c9a20204af5865148 | [
"MIT"
] | null | null | null | test/test_logic.py | mateuszkowalke/sudoku_game | 800e33a6fe755b493d8e9c3c9a20204af5865148 | [
"MIT"
] | null | null | null | test/test_logic.py | mateuszkowalke/sudoku_game | 800e33a6fe755b493d8e9c3c9a20204af5865148 | [
"MIT"
] | null | null | null | import pytest
from ..logic import Board, empty_board, example_board, solved_board
| 27.735294 | 67 | 0.66702 |
1ceb3eafc161d9fd9d9f5411f96898dcc0d87036 | 8,111 | py | Python | src/compas_rhino/objects/_select.py | jf---/compas | cd878ece933013b8ac34e9d42cf6d5c62a5396ee | [
"MIT"
] | 2 | 2021-03-17T18:14:22.000Z | 2021-09-19T13:50:02.000Z | src/compas_rhino/objects/_select.py | jf---/compas | cd878ece933013b8ac34e9d42cf6d5c62a5396ee | [
"MIT"
] | null | null | null | src/compas_rhino/objects/_select.py | jf---/compas | cd878ece933013b8ac34e9d42cf6d5c62a5396ee | [
"MIT"
] | null | null | null | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import ast
import rhinoscriptsyntax as rs
__all__ = [
'mesh_select_vertex',
'mesh_select_vertices',
'mesh_select_face',
'mesh_select_faces',
'mesh_select_edge',
'mesh_select_edges',
'network_select_node',
'network_select_nodes',
'network_select_edge',
'network_select_edges',
]
def mesh_select_vertex(mesh, message="Select a vertex."):
"""Select a single vertex of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
int or None
"""
guid = rs.GetObject(message, preselect=True, filter=rs.filter.point | rs.filter.textdot)
if guid:
prefix = mesh.attributes['name']
name = rs.ObjectName(guid).split('.')
if 'vertex' in name:
if not prefix or prefix in name:
key = name[-1]
return ast.literal_eval(key)
return None
def mesh_select_vertices(mesh, message="Select vertices."):
"""Select multiple vertices of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
list of int
"""
keys = []
guids = rs.GetObjects(message, preselect=True, filter=rs.filter.point | rs.filter.textdot)
if guids:
prefix = mesh.attributes['name']
seen = set()
for guid in guids:
name = rs.ObjectName(guid).split('.')
if 'vertex' in name:
if not prefix or prefix in name:
key = name[-1]
if not seen.add(key):
key = ast.literal_eval(key)
keys.append(key)
return keys
def mesh_select_face(mesh, message="Select a face."):
"""Select a single face of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
int or None
"""
guid = rs.GetObject(message, preselect=True, filter=rs.filter.mesh | rs.filter.textdot)
if guid:
prefix = mesh.attributes['name']
name = rs.ObjectName(guid).split('.')
if 'face' in name:
if not prefix or prefix in name:
key = name[-1]
key = ast.literal_eval(key)
return key
return None
def mesh_select_faces(mesh, message="Select faces."):
"""Select multiple faces of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
list of int
"""
keys = []
guids = rs.GetObjects(message, preselect=True, filter=rs.filter.mesh | rs.filter.textdot)
if guids:
prefix = mesh.attributes['name']
seen = set()
for guid in guids:
name = rs.ObjectName(guid).split('.')
if 'face' in name:
if not prefix or prefix in name:
key = name[-1]
if not seen.add(key):
key = ast.literal_eval(key)
keys.append(key)
return keys
def mesh_select_edge(mesh, message="Select an edge."):
"""Select a single edge of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
tuple of int, or None
"""
guid = rs.GetObject(message, preselect=True, filter=rs.filter.curve | rs.filter.textdot)
if guid:
prefix = mesh.attributes['name']
name = rs.ObjectName(guid).split('.')
if 'edge' in name:
if not prefix or prefix in name:
key = name[-1]
u, v = key.split('-')
u = ast.literal_eval(u)
v = ast.literal_eval(v)
return u, v
return None
def mesh_select_edges(mesh, message="Select edges."):
"""Select multiple edges of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
list of tuple of int
"""
keys = []
guids = rs.GetObjects(message, preselect=True, filter=rs.filter.curve | rs.filter.textdot)
if guids:
prefix = mesh.attributes['name']
seen = set()
for guid in guids:
name = rs.ObjectName(guid).split('.')
if 'edge' in name:
if not prefix or prefix in name:
key = name[-1]
if not seen.add(key):
u, v = key.split('-')
u = ast.literal_eval(u)
v = ast.literal_eval(v)
keys.append((u, v))
return keys
def network_select_node(network, message="Select a node."):
"""Select a single node of a network.
Parameters
----------
network: :class:`compas.datastructures.Network`
message: str, optional
Returns
-------
hashable or None
"""
guid = rs.GetObject(message, preselect=True, filter=rs.filter.point | rs.filter.textdot)
if guid:
prefix = network.attributes['name']
name = rs.ObjectName(guid).split('.')
if 'node' in name:
if not prefix or prefix in name:
key = name[-1]
return ast.literal_eval(key)
return None
def network_select_nodes(network, message="Select nodes."):
"""Select multiple nodes of a network.
Parameters
----------
network: :class:`compas.datastructures.Network`
message: str, optional
Returns
-------
list of hashable
"""
keys = []
guids = rs.GetObjects(message, preselect=True, filter=rs.filter.point | rs.filter.textdot)
if guids:
prefix = network.attributes['name']
seen = set()
for guid in guids:
name = rs.ObjectName(guid).split('.')
if 'node' in name:
if not prefix or prefix in name:
key = name[-1]
if not seen.add(key):
key = ast.literal_eval(key)
keys.append(key)
return keys
def network_select_edge(network, message="Select an edge."):
"""Select a single edge of a network.
Parameters
----------
network: :class:`compas.datastructures.Network`
message: str, optional
Returns
-------
tuple of hashable, or None
"""
guid = rs.GetObject(message, preselect=True, filter=rs.filter.curve | rs.filter.textdot)
if guid:
prefix = network.attributes['name']
name = rs.ObjectName(guid).split('.')
if 'edge' in name:
if not prefix or prefix in name:
key = name[-1]
u, v = key.split('-')
u = ast.literal_eval(u)
v = ast.literal_eval(v)
return u, v
return None
def network_select_edges(network, message="Select edges."):
"""Select multiple edges of a network.
Parameters
----------
network: :class:`compas.datastructures.Network`
message: str, optional
Returns
-------
list of tuple of hashable
"""
keys = []
guids = rs.GetObjects(message, preselect=True, filter=rs.filter.curve | rs.filter.textdot)
if guids:
prefix = network.attributes['name']
seen = set()
for guid in guids:
name = rs.ObjectName(guid).split('.')
if 'edge' in name:
if not prefix or prefix in name:
key = name[-1]
if not seen.add(key):
u, v = key.split('-')
u = ast.literal_eval(u)
v = ast.literal_eval(v)
keys.append((u, v))
return keys
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
pass
| 27.494915 | 94 | 0.53298 |
1ceb4e48c8b6f66fc03698755dae7d3610a03921 | 1,258 | py | Python | handlers/product_add.py | MuchkoM/CalorieMatchBot | ca26a1f6195079e10dd798ca9e77968438f2aa01 | [
"MIT"
] | null | null | null | handlers/product_add.py | MuchkoM/CalorieMatchBot | ca26a1f6195079e10dd798ca9e77968438f2aa01 | [
"MIT"
] | null | null | null | handlers/product_add.py | MuchkoM/CalorieMatchBot | ca26a1f6195079e10dd798ca9e77968438f2aa01 | [
"MIT"
] | null | null | null | from telegram import Update
from telegram.ext import Updater, CallbackContext, ConversationHandler, CommandHandler, MessageHandler, Filters
from db import DBConnector
import re
str_matcher = r"\"(?P<name>.+)\"\s*(?P<fat>\d+)\s*/\s*(?P<protein>\d+)\s*/\s*(?P<carbohydrates>\d+)\s*(?P<kcal>\d+)"
ADD_1 = 0
def add_handler(updater: Updater):
"""/product_add - Add product to list known products"""
updater.dispatcher.add_handler(ConversationHandler(
entry_points=[CommandHandler('product_add', add_0)],
states={
ADD_1: [MessageHandler(Filters.text & ~Filters.command, add_1)]
},
fallbacks=[]
))
| 32.25641 | 116 | 0.67806 |
1cebb0fff2532d5f8a3a2e41a74346938730be3d | 1,298 | py | Python | python-packages/nolearn-0.5/build/lib.linux-x86_64-2.7/nolearn/tests/test_dataset.py | rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | ee45bee6f96cdb6d91184abc16f41bba1546c943 | [
"BSD-3-Clause"
] | 2 | 2017-08-13T14:09:32.000Z | 2018-07-16T23:39:00.000Z | python-packages/nolearn-0.5/build/lib.linux-x86_64-2.7/nolearn/tests/test_dataset.py | rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | ee45bee6f96cdb6d91184abc16f41bba1546c943 | [
"BSD-3-Clause"
] | null | null | null | python-packages/nolearn-0.5/build/lib.linux-x86_64-2.7/nolearn/tests/test_dataset.py | rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | ee45bee6f96cdb6d91184abc16f41bba1546c943 | [
"BSD-3-Clause"
] | 2 | 2018-04-02T06:45:11.000Z | 2018-07-16T23:39:02.000Z | from mock import patch
import numpy as np
| 24.961538 | 65 | 0.682589 |
1cec0b60edcd31e7b741951f8b76edad6144ee56 | 1,345 | py | Python | src/Cipher/MultiLevelCaesarDecrypt.py | EpicTofuu/Assignment | 293f99d20e8fa7d688c16a56c48a554bcd3c9e7d | [
"Apache-2.0"
] | null | null | null | src/Cipher/MultiLevelCaesarDecrypt.py | EpicTofuu/Assignment | 293f99d20e8fa7d688c16a56c48a554bcd3c9e7d | [
"Apache-2.0"
] | null | null | null | src/Cipher/MultiLevelCaesarDecrypt.py | EpicTofuu/Assignment | 293f99d20e8fa7d688c16a56c48a554bcd3c9e7d | [
"Apache-2.0"
] | null | null | null | import Cipher.tk
from Cipher.tk import EncryptDecryptCoord, GetChiSquared, Mode
'''
# testing do write it here
a = " abcdefghijklmnopqrstuvwxyz"
p=[]
for c in a:
p.append (c)
print ("starting...")
print (MultiDecrypt ("dtyktckcxlbd", p))
# original 231
''' | 32.804878 | 150 | 0.584387 |
1cecb0baeee1d541b67de121aac28491961e0c43 | 2,234 | py | Python | scripts/vcf_filter.py | bunop/cyvcf | f58860dd06b215b9d9ae80e2b46337fb6ab59139 | [
"MIT"
] | 46 | 2015-01-31T17:24:34.000Z | 2021-01-15T01:29:07.000Z | scripts/vcf_filter.py | arq5x/cyvcf | f58860dd06b215b9d9ae80e2b46337fb6ab59139 | [
"MIT"
] | 11 | 2015-01-13T17:59:32.000Z | 2016-09-23T21:50:00.000Z | scripts/vcf_filter.py | mandawilson/PyVCF | d23ab476237aced75635e543c061c1bf80a7c2a4 | [
"MIT"
] | 7 | 2015-02-10T09:12:00.000Z | 2016-06-30T03:37:37.000Z | #!/usr/bin/env python
import sys
import argparse
import pkg_resources
import vcf
from vcf.parser import _Filter
parser = argparse.ArgumentParser(description='Filter a VCF file',
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('input', metavar='input', type=str, nargs=1,
help='File to process (use - for STDIN)')
parser.add_argument('filters', metavar='filter', type=str, nargs='+',
help='Filters to use')
parser.add_argument('--no-short-circuit', action='store_true',
help='Do not stop filter processing on a site if a single filter fails.')
parser.add_argument('--output', action='store', default=sys.stdout,
help='Filename to output (default stdout)')
parser.add_argument('--no-filtered', action='store_true',
help='Remove failed sites')
if __name__ == '__main__':
# TODO: allow filter specification by short name
# TODO: flag that writes filter output into INFO column
# TODO: argument use implies filter use
# TODO: parallelize
# TODO: prevent plugins raising an exception from crashing the script
# dynamically build the list of available filters
filters = {}
filter_help = '\n\navailable filters:'
for p in pkg_resources.iter_entry_points('vcf.filters'):
filt = p.load()
filters[filt.name] = filt
filt.customize_parser(parser)
filter_help += '\n %s:\t%s' % (filt.name, filt.description)
parser.description += filter_help
# parse command line args
args = parser.parse_args()
inp = vcf.Reader(file(args.input[0]))
# build filter chain
chain = []
for name in args.filters:
f = filters[name](args)
chain.append(f)
inp.filters[f.filter_name()] = _Filter(f.filter_name(), f.description)
oup = vcf.Writer(args.output, inp)
# apply filters
short_circuit = not args.no_short_circuit
for record in inp:
for filt in chain:
result = filt(record)
if result:
record.add_filter(filt.filter_name())
if short_circuit:
break
if (not args.no_filtered) or (record.FILTER == '.'):
oup.write_record(record)
| 30.60274 | 81 | 0.651746 |
1cecb4c2f3b6f24c919644faa0e058b12f679c06 | 273 | py | Python | src/flocker/blueprints/red/__init__.py | Muxelmann/home-projects | 85bd06873174b9c5c6276160988c19b460370db8 | [
"MIT"
] | null | null | null | src/flocker/blueprints/red/__init__.py | Muxelmann/home-projects | 85bd06873174b9c5c6276160988c19b460370db8 | [
"MIT"
] | null | null | null | src/flocker/blueprints/red/__init__.py | Muxelmann/home-projects | 85bd06873174b9c5c6276160988c19b460370db8 | [
"MIT"
] | null | null | null | import os
from flask import Blueprint, render_template | 22.75 | 58 | 0.652015 |
1ced85b293ca7dbd18aca02752e3ef9bf70663c2 | 4,125 | py | Python | alphacoders/__init__.py | whoiscc/alphacoders | 685d1e7e02a7276ae0518114b0c6aab58914aab7 | [
"MIT"
] | 7 | 2019-09-22T16:16:15.000Z | 2020-08-27T23:53:07.000Z | alphacoders/__init__.py | whoiscc/alphacoders | 685d1e7e02a7276ae0518114b0c6aab58914aab7 | [
"MIT"
] | 1 | 2020-08-27T23:53:02.000Z | 2020-08-28T06:10:10.000Z | alphacoders/__init__.py | whoiscc/alphacoders | 685d1e7e02a7276ae0518114b0c6aab58914aab7 | [
"MIT"
] | null | null | null | #
from aiohttp.client_exceptions import ClientError
from lxml import html
from pathlib import Path
from asyncio import create_task
from functools import wraps
def download_search(client, keyword, page):
safe_keyword = keyword.replace(" ", "+")
# url = f"https://mobile.alphacoders.com/by-resolution/5?search={safe_keyword}&page={page}"
url = f"https://wall.alphacoders.com/search.php?search={safe_keyword}&page={page}"
return download_page(client, url)
class SingleTask:
def __init__(self, keyword, limit=None):
self.keyword = keyword
self.limit = limit
self.complete_count = 0
self.triggered = False
| 30.555556 | 95 | 0.629333 |
1ceddd105ecb3e0dcae569f584b7c20f28eab09e | 553 | py | Python | Python/Calculating_Trimmed_Means/calculating_trimmed_means1.py | PeriscopeData/analytics-toolbox | 83effdee380c33e5eecea29528acf5375fd496fb | [
"MIT"
] | 2 | 2019-09-27T22:19:09.000Z | 2019-12-02T23:12:18.000Z | Python/Calculating_Trimmed_Means/calculating_trimmed_means1.py | PeriscopeData/analytics-toolbox | 83effdee380c33e5eecea29528acf5375fd496fb | [
"MIT"
] | 1 | 2019-10-03T17:46:23.000Z | 2019-10-03T17:46:23.000Z | Python/Calculating_Trimmed_Means/calculating_trimmed_means1.py | PeriscopeData/analytics-toolbox | 83effdee380c33e5eecea29528acf5375fd496fb | [
"MIT"
] | 2 | 2021-07-17T18:23:50.000Z | 2022-03-03T04:53:03.000Z | # SQL output is imported as a pandas dataframe variable called "df"
# Source: https://stackoverflow.com/questions/19441730/trimmed-mean-with-percentage-limit-in-python
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import tmean, scoreatpercentile
import numpy as np
my_result = trimmean(df["amt_paid"].values,10) | 39.5 | 100 | 0.779385 |
1cef547e153ff6ac5a327c151e5950b2c7563ac2 | 1,298 | py | Python | scripts/data_extract.py | amichalski2/WBC-SHAP | b69a4a8746aaf7a8dfacfdb4dbd85b4868d73ad0 | [
"MIT"
] | null | null | null | scripts/data_extract.py | amichalski2/WBC-SHAP | b69a4a8746aaf7a8dfacfdb4dbd85b4868d73ad0 | [
"MIT"
] | null | null | null | scripts/data_extract.py | amichalski2/WBC-SHAP | b69a4a8746aaf7a8dfacfdb4dbd85b4868d73ad0 | [
"MIT"
] | null | null | null | import os
import cv2
import random
import numpy as np
from tensorflow.keras.utils import to_categorical
from scripts.consts import class_dict
| 25.45098 | 79 | 0.617874 |
1cf00fc10b36c1bb5b56b4af86d43c0bd17b8dff | 33,478 | py | Python | ironic/tests/unit/drivers/test_base.py | tzumainn/ironic | 91680bd450a4b2259d153b6a995a9436a5f82694 | [
"Apache-2.0"
] | null | null | null | ironic/tests/unit/drivers/test_base.py | tzumainn/ironic | 91680bd450a4b2259d153b6a995a9436a5f82694 | [
"Apache-2.0"
] | null | null | null | ironic/tests/unit/drivers/test_base.py | tzumainn/ironic | 91680bd450a4b2259d153b6a995a9436a5f82694 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import mock
from ironic.common import exception
from ironic.common import raid
from ironic.common import states
from ironic.drivers import base as driver_base
from ironic.drivers.modules import fake
from ironic.tests import base
class PassthruDecoratorTestCase(base.TestCase):
def test_passthru_shared_task_metadata(self):
self.assertIn('require_exclusive_lock',
self.fvi.shared_task._vendor_metadata[1])
self.assertFalse(
self.fvi.shared_task._vendor_metadata[1]['require_exclusive_lock'])
def test_passthru_exclusive_task_metadata(self):
self.assertIn('require_exclusive_lock',
self.fvi.noexception._vendor_metadata[1])
self.assertTrue(
self.fvi.noexception._vendor_metadata[1]['require_exclusive_lock'])
def test_passthru_check_func_references(self):
inst1 = FakeVendorInterface()
inst2 = FakeVendorInterface()
self.assertNotEqual(inst1.vendor_routes['noexception']['func'],
inst2.vendor_routes['noexception']['func'])
self.assertNotEqual(inst1.driver_routes['driver_noexception']['func'],
inst2.driver_routes['driver_noexception']['func'])
class CleanStepDecoratorTestCase(base.TestCase):
obj = TestClass()
obj2 = TestClass2()
obj3 = TestClass3()
self.assertEqual(2, len(obj.get_clean_steps(task_mock)))
# Ensure the steps look correct
self.assertEqual(10, obj.get_clean_steps(task_mock)[0]['priority'])
self.assertTrue(obj.get_clean_steps(task_mock)[0]['abortable'])
self.assertEqual('test', obj.get_clean_steps(
task_mock)[0]['interface'])
self.assertEqual('automated_method', obj.get_clean_steps(
task_mock)[0]['step'])
self.assertEqual(0, obj.get_clean_steps(task_mock)[1]['priority'])
self.assertFalse(obj.get_clean_steps(task_mock)[1]['abortable'])
self.assertEqual('test', obj.get_clean_steps(
task_mock)[1]['interface'])
self.assertEqual('manual_method', obj.get_clean_steps(
task_mock)[1]['step'])
# Ensure the second obj get different clean steps
self.assertEqual(2, len(obj2.get_clean_steps(task_mock)))
# Ensure the steps look correct
self.assertEqual(20, obj2.get_clean_steps(task_mock)[0]['priority'])
self.assertTrue(obj2.get_clean_steps(task_mock)[0]['abortable'])
self.assertEqual('test2', obj2.get_clean_steps(
task_mock)[0]['interface'])
self.assertEqual('automated_method2', obj2.get_clean_steps(
task_mock)[0]['step'])
self.assertEqual(0, obj2.get_clean_steps(task_mock)[1]['priority'])
self.assertFalse(obj2.get_clean_steps(task_mock)[1]['abortable'])
self.assertEqual('test2', obj2.get_clean_steps(
task_mock)[1]['interface'])
self.assertEqual('manual_method2', obj2.get_clean_steps(
task_mock)[1]['step'])
self.assertIsNone(obj2.get_clean_steps(task_mock)[0]['argsinfo'])
# Ensure the third obj has different clean steps
self.assertEqual(2, len(obj3.get_clean_steps(task_mock)))
self.assertEqual(15, obj3.get_clean_steps(task_mock)[0]['priority'])
self.assertFalse(obj3.get_clean_steps(task_mock)[0]['abortable'])
self.assertEqual('test3', obj3.get_clean_steps(
task_mock)[0]['interface'])
self.assertEqual('automated_method3', obj3.get_clean_steps(
task_mock)[0]['step'])
self.assertEqual({'arg10': {'description': 'desc10'}},
obj3.get_clean_steps(task_mock)[0]['argsinfo'])
self.assertEqual(0, obj3.get_clean_steps(task_mock)[1]['priority'])
self.assertTrue(obj3.get_clean_steps(task_mock)[1]['abortable'])
self.assertEqual(obj3.interface_type, obj3.get_clean_steps(
task_mock)[1]['interface'])
self.assertEqual('manual_method3', obj3.get_clean_steps(
task_mock)[1]['step'])
self.assertEqual({'arg1': {'description': 'desc1', 'required': True}},
obj3.get_clean_steps(task_mock)[1]['argsinfo'])
# Ensure we can execute the function.
obj.execute_clean_step(task_mock, obj.get_clean_steps(task_mock)[0])
method_mock.assert_called_once_with(task_mock)
args = {'arg1': 'val1'}
clean_step = {'interface': 'test3', 'step': 'manual_method3',
'args': args}
obj3.execute_clean_step(task_mock, clean_step)
method_args_mock.assert_called_once_with(task_mock, **args)
class DeployStepDecoratorTestCase(base.TestCase):
obj = TestClass()
obj2 = TestClass2()
obj3 = TestClass3()
self.assertEqual(2, len(obj.get_deploy_steps(task_mock)))
# Ensure the steps look correct
self.assertEqual(10, obj.get_deploy_steps(task_mock)[0]['priority'])
self.assertEqual('test', obj.get_deploy_steps(
task_mock)[0]['interface'])
self.assertEqual('deploy_ten', obj.get_deploy_steps(
task_mock)[0]['step'])
self.assertEqual(0, obj.get_deploy_steps(task_mock)[1]['priority'])
self.assertEqual('test', obj.get_deploy_steps(
task_mock)[1]['interface'])
self.assertEqual('deploy_zero', obj.get_deploy_steps(
task_mock)[1]['step'])
# Ensure the second obj has different deploy steps
self.assertEqual(2, len(obj2.get_deploy_steps(task_mock)))
# Ensure the steps look correct
self.assertEqual(20, obj2.get_deploy_steps(task_mock)[0]['priority'])
self.assertEqual('test2', obj2.get_deploy_steps(
task_mock)[0]['interface'])
self.assertEqual('deploy_twenty', obj2.get_deploy_steps(
task_mock)[0]['step'])
self.assertEqual(0, obj2.get_deploy_steps(task_mock)[1]['priority'])
self.assertEqual('test2', obj2.get_deploy_steps(
task_mock)[1]['interface'])
self.assertEqual('deploy_zero2', obj2.get_deploy_steps(
task_mock)[1]['step'])
self.assertIsNone(obj2.get_deploy_steps(task_mock)[0]['argsinfo'])
# Ensure the third obj has different deploy steps
self.assertEqual(2, len(obj3.get_deploy_steps(task_mock)))
self.assertEqual(15, obj3.get_deploy_steps(task_mock)[0]['priority'])
self.assertEqual('test3', obj3.get_deploy_steps(
task_mock)[0]['interface'])
self.assertEqual('deploy_fifteen', obj3.get_deploy_steps(
task_mock)[0]['step'])
self.assertEqual({'arg10': {'description': 'desc10'}},
obj3.get_deploy_steps(task_mock)[0]['argsinfo'])
self.assertEqual(0, obj3.get_deploy_steps(task_mock)[1]['priority'])
self.assertEqual(obj3.interface_type, obj3.get_deploy_steps(
task_mock)[1]['interface'])
self.assertEqual('deploy_zero3', obj3.get_deploy_steps(
task_mock)[1]['step'])
self.assertEqual({'arg1': {'description': 'desc1', 'required': True}},
obj3.get_deploy_steps(task_mock)[1]['argsinfo'])
# Ensure we can execute the function.
obj.execute_deploy_step(task_mock, obj.get_deploy_steps(task_mock)[0])
method_mock.assert_called_once_with(task_mock)
args = {'arg1': 'val1'}
deploy_step = {'interface': 'test3', 'step': 'deploy_zero3',
'args': args}
obj3.execute_deploy_step(task_mock, deploy_step)
method_args_mock.assert_called_once_with(task_mock, **args)
class MyRAIDInterface(driver_base.RAIDInterface):
class RAIDInterfaceTestCase(base.TestCase):
class TestBIOSInterface(base.TestCase):
| 42.057789 | 79 | 0.642183 |
1cf117501c6990cccaec0505efbf96de4aa8d218 | 299 | py | Python | opentimesheet/profiles/tests/test_models.py | valerymelou/opentimesheet-server | 0da97ebb3c3e59962132d1bc5e83e1d727f7331b | [
"MIT"
] | null | null | null | opentimesheet/profiles/tests/test_models.py | valerymelou/opentimesheet-server | 0da97ebb3c3e59962132d1bc5e83e1d727f7331b | [
"MIT"
] | 95 | 2021-02-20T21:53:29.000Z | 2022-01-14T17:24:50.000Z | opentimesheet/profiles/tests/test_models.py | valerymelou/opentimesheet-server | 0da97ebb3c3e59962132d1bc5e83e1d727f7331b | [
"MIT"
] | null | null | null | import pytest
from opentimesheet.core.tests import TenantTestCase
| 23 | 66 | 0.665552 |
1cf1510ac46bda476c715d01c64fd6ef223f7da4 | 10,434 | py | Python | ami/flowchart/library/Display.py | chuckie82/ami | 7adb72c709afe4c1af53ef7f0d2b0e3639c63bf3 | [
"BSD-3-Clause-LBNL"
] | 6 | 2018-05-31T21:37:15.000Z | 2022-01-24T15:22:46.000Z | ami/flowchart/library/Display.py | chuckie82/ami | 7adb72c709afe4c1af53ef7f0d2b0e3639c63bf3 | [
"BSD-3-Clause-LBNL"
] | 68 | 2019-06-06T21:00:49.000Z | 2022-03-14T22:35:29.000Z | ami/flowchart/library/Display.py | chuckie82/ami | 7adb72c709afe4c1af53ef7f0d2b0e3639c63bf3 | [
"BSD-3-Clause-LBNL"
] | 2 | 2020-12-13T01:53:05.000Z | 2021-07-19T04:56:51.000Z | from ami.flowchart.library.DisplayWidgets import ScalarWidget, ScatterWidget, WaveformWidget, \
ImageWidget, ObjectWidget, LineWidget, TimeWidget, HistogramWidget, \
Histogram2DWidget
from ami.flowchart.library.common import CtrlNode
from amitypes import Array1d, Array2d
from typing import Any
import ami.graph_nodes as gn
| 33.986971 | 96 | 0.57236 |
1cf1add35a6f5a301f98fac454ddd82a0c4fd197 | 1,435 | py | Python | deep-rl/lib/python2.7/site-packages/OpenGL/GL/ARB/transform_feedback_instanced.py | ShujaKhalid/deep-rl | 99c6ba6c3095d1bfdab81bd01395ced96bddd611 | [
"MIT"
] | 210 | 2016-04-09T14:26:00.000Z | 2022-03-25T18:36:19.000Z | deep-rl/lib/python2.7/site-packages/OpenGL/GL/ARB/transform_feedback_instanced.py | ShujaKhalid/deep-rl | 99c6ba6c3095d1bfdab81bd01395ced96bddd611 | [
"MIT"
] | 72 | 2016-09-04T09:30:19.000Z | 2022-03-27T17:06:53.000Z | deep-rl/lib/python2.7/site-packages/OpenGL/GL/ARB/transform_feedback_instanced.py | ShujaKhalid/deep-rl | 99c6ba6c3095d1bfdab81bd01395ced96bddd611 | [
"MIT"
] | 64 | 2016-04-09T14:26:49.000Z | 2022-03-21T11:19:47.000Z | '''OpenGL extension ARB.transform_feedback_instanced
This module customises the behaviour of the
OpenGL.raw.GL.ARB.transform_feedback_instanced to provide a more
Python-friendly API
Overview (from the spec)
Multiple instances of geometry may be specified to the GL by calling
functions such as DrawArraysInstanced and DrawElementsInstanced. Further,
the results of a transform feedback operation may be returned to the GL
by calling DrawTransformFeedback, or DrawTransformFeedbackStream. However,
it is not presently possible to draw multiple instances of data
transform feedback without using a query and the resulting round trip from
server to client.
This extension adds functionality to draw multiple instances of the result
of a transform feedback operation.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/transform_feedback_instanced.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.transform_feedback_instanced import *
from OpenGL.raw.GL.ARB.transform_feedback_instanced import _EXTENSION_NAME
def glInitTransformFeedbackInstancedARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | 39.861111 | 75 | 0.822997 |
1cf2c5ea382bc1bc6087303216c79dc6b5f0dc2a | 2,681 | py | Python | features/cpp/simple/test.py | xbabka01/retdec-regression-tests | 1ac40cca5165740364e6f7fb72b20820eac9bc7c | [
"MIT"
] | 8 | 2017-12-14T14:25:17.000Z | 2019-03-09T03:29:12.000Z | features/cpp/simple/test.py | xbabka01/retdec-regression-tests | 1ac40cca5165740364e6f7fb72b20820eac9bc7c | [
"MIT"
] | 10 | 2019-06-14T09:12:55.000Z | 2021-10-01T12:15:43.000Z | features/cpp/simple/test.py | xbabka01/retdec-regression-tests | 1ac40cca5165740364e6f7fb72b20820eac9bc7c | [
"MIT"
] | 8 | 2019-05-10T14:59:48.000Z | 2022-03-07T16:34:23.000Z | from regression_tests import *
| 31.174419 | 82 | 0.638941 |