hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ce80fb5cf7cc8de8662abff81c0c30df6c3722fb | 91 | py | Python | gmailbox/apps.py | megatron0000/ces22-xadrez-web | 10032f81952e1f816925f5af9c27f235dfbf360c | [
"MIT"
] | null | null | null | gmailbox/apps.py | megatron0000/ces22-xadrez-web | 10032f81952e1f816925f5af9c27f235dfbf360c | [
"MIT"
] | 8 | 2020-02-11T22:28:16.000Z | 2021-06-10T20:16:42.000Z | gmailbox/apps.py | megatron0000/ces22-xadrez-web | 10032f81952e1f816925f5af9c27f235dfbf360c | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 15.166667 | 33 | 0.758242 |
ce815d6c526703afd41758b750594101532e6d14 | 724 | py | Python | warmup/sock_merchant.py | franloza/hackerrank | e66f5f5c4c1c7c0fe93146d29140692cd71625b7 | [
"MIT"
] | null | null | null | warmup/sock_merchant.py | franloza/hackerrank | e66f5f5c4c1c7c0fe93146d29140692cd71625b7 | [
"MIT"
] | null | null | null | warmup/sock_merchant.py | franloza/hackerrank | e66f5f5c4c1c7c0fe93146d29140692cd71625b7 | [
"MIT"
] | null | null | null | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the sockMerchant function below.
# Read from input
# if __name__ == '__main__':
# fptr = open(os.environ['OUTPUT_PATH'], 'w')
#
# n = int(input())
#
# ar = list(map(int, input().rstrip().split()))
#
# result = sockMerchant(n, ar)
#
# fptr.write(str(result) + '\n')
#
# fptr.close()
# Toy case
if __name__ == '__main__':
n, ar = 9, [10, 20, 20, 10, 10, 30, 50, 10, 20]
print(sockMerchant(n, ar))
| 19.567568 | 52 | 0.56768 |
ce8168357ebbc2da0dca74cb00c849cf14fe0b44 | 5,468 | py | Python | sentiment/twitter/2stepTestValidation.py | ekedziora/twinty | 5f0ba01e9e1b93f686569ebd907517a4f41d5cf7 | [
"Apache-2.0"
] | null | null | null | sentiment/twitter/2stepTestValidation.py | ekedziora/twinty | 5f0ba01e9e1b93f686569ebd907517a4f41d5cf7 | [
"Apache-2.0"
] | null | null | null | sentiment/twitter/2stepTestValidation.py | ekedziora/twinty | 5f0ba01e9e1b93f686569ebd907517a4f41d5cf7 | [
"Apache-2.0"
] | null | null | null | from nltk.corpus.reader import CategorizedPlaintextCorpusReader
from nltk.tokenize.casual import TweetTokenizer
from nltk.classify.scikitlearn import SklearnClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.svm import SVC, LinearSVC, NuSVC, LinearSVR, NuSVR
from sklearn.linear_model import LogisticRegression, LinearRegression, Perceptron
from featureExtractors import unigramsFeatures, bigramsFeatures, mpqaSubjectivityWordsCountFeatures, \
extraTwitterFeaturesCount, mpqaSentimentWordsCountFeatures
from lexicons.mpqa.mpqaDictionary import MpqaDictionaryWrapper
from normalization import normalizeTwitterWordsWithNegationHandle, normalizeTwitterWordsWithExtraFeatures
from utils import precision_recall_2step
import nltk, pickle
import csv
from itertools import product
tweetTokenizer = TweetTokenizer(reduce_len=True, preserve_case=True, strip_handles=False)
testcorpus = CategorizedPlaintextCorpusReader('corpus/standford/test', r'(pos|neg|neu)-tweet[0-9]+\.txt', cat_pattern=r'(\w+)-tweet[0-9]+\.txt', word_tokenizer=tweetTokenizer)
mpqaDictionary = MpqaDictionaryWrapper()
normalizationFunction = normalizeTwitterWordsWithNegationHandle
testfeatureset = []
with open("dumps/2step/polar/logreg/70pct/uni-bi-extra-mpqa-senti", 'rb') as fileout:
polarClassifier = pickle.load(fileout)
with open("dumps/2step/sentiment/logreg/80pct/uni-bi-extra-mpqa-senti", 'rb') as fileout:
sentiClassifier = pickle.load(fileout)
for category in testcorpus.categories():
for fileid in testcorpus.fileids(category):
words = testcorpus.words(fileids=[fileid])
normalizedWords = normalizationFunction(words)
extraNormalizedWords = normalizeTwitterWordsWithExtraFeatures(words)
testfeatures = getfeaturesTest(normalizedWords, extraNormalizedWords=extraNormalizedWords)
dec = polarClassifier.classify(testfeatures)
if dec == 'polar':
observed = sentiClassifier.classify(testfeatures)
else:
observed = 'neu'
real = testcorpus.categories(fileids=[fileid])
if real[0] != observed:
print(testcorpus.raw(fileids=[fileid]))
print("REAL: {}".format(real))
print("PREDICTED: {}".format(observed))
# performTestValidation(testfeatureset, "dumps/2step/polar/multiNB/uni-bi-extra-mpqa-subj", "dumps/2step/sentiment/multiNB/uni-bi-extra-mpqa-subj")
# performTestValidation(testfeatureset, "dumps/2step/polar/multiNB/uni-bi-extra-mpqa-subj", "dumps/2step/sentiment/logreg/uni-bi-extra-mpqa-subj")
# performTestValidation(testfeatureset, "dumps/2step/polar/logreg/uni-bi-extra-mpqa-subj", "dumps/2step/sentiment/multiNB/uni-bi-extra-mpqa-subj")
# performTestValidation(testfeatureset, "dumps/2step/polar/logreg/uni-bi-extra-mpqa-subj", "dumps/2step/sentiment/logreg/uni-bi-extra-mpqa-subj")
# with open("results2.csv", 'w') as csvfile:
# csvwriter = csv.writer(csvfile)
# csvwriter.writerow(('features', 'polar classifier', 'sentiment classifier', 'test accuracy', 'f score for pos', 'f score for neg', 'f score for neu'))
#
# polarDir = "dumps/2step/polar/"
# sentimentDir = "dumps/2step/sentiment"
# versionsSet = ['multiNB/60pct', 'multiNB/70pct', 'multiNB/80pct', 'logreg/60pct', 'logreg/70pct', 'logreg/80pct']
# for featuresVersion in ['uni-bi-extra-mpqa-senti']:
#
# tuples = product(versionsSet, versionsSet)
# for tuple in tuples:
# # print("CLASSIFIERS:")
# # print("Polar: " + tuple[0])
# # print("Sentiment: " + tuple[1])
# polarClassifierPath = polarDir + '/' + tuple[0] + '/' + featuresVersion
# sentimentClassifierPath = sentimentDir + '/' + tuple[1] + '/' + featuresVersion
# fscores, accuracy = performTestValidation(testfeatureset, polarClassifierPath, sentimentClassifierPath)
# csvwriter.writerow((featuresVersion, tuple[0], tuple[1], accuracy, fscores[0], fscores[1], fscores[2]))
# # print("\n\n") | 53.087379 | 175 | 0.72586 |
ce81ee19f31ff5409d3bf88154d88299544b821e | 10,712 | py | Python | mango/group.py | mschneider/mango-explorer | ed50880ef80b31b679c9c89fa9bf0579391d71c9 | [
"MIT"
] | null | null | null | mango/group.py | mschneider/mango-explorer | ed50880ef80b31b679c9c89fa9bf0579391d71c9 | [
"MIT"
] | null | null | null | mango/group.py | mschneider/mango-explorer | ed50880ef80b31b679c9c89fa9bf0579391d71c9 | [
"MIT"
] | 1 | 2021-09-02T17:06:09.000Z | 2021-09-02T17:06:09.000Z | # # Warning
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# [ Mango Markets](https://mango.markets/) support is available at:
# [Docs](https://docs.mango.markets/)
# [Discord](https://discord.gg/67jySBhxrg)
# [Twitter](https://twitter.com/mangomarkets)
# [Github](https://github.com/blockworks-foundation)
# [Email](mailto:hello@blockworks.foundation)
import construct
import time
import typing
from decimal import Decimal
from solana.publickey import PublicKey
from .accountinfo import AccountInfo
from .addressableaccount import AddressableAccount
from .aggregator import Aggregator
from .baskettoken import BasketToken
from .context import Context
from .index import Index
from .layouts import layouts
from .mangoaccountflags import MangoAccountFlags
from .marketmetadata import MarketMetadata
from .market import MarketLookup
from .token import SolToken, Token, TokenLookup
from .tokenvalue import TokenValue
from .version import Version
# # Group class
#
# The `Group` class encapsulates the data for the Mango Group - the cross-margined basket
# of tokens with lending.
| 47.821429 | 169 | 0.684279 |
ce8209792059f11dd2ce00622a3448e62cd81437 | 639 | py | Python | office_test_interface/hede_interface_test/encryption/md5_hede.py | yag8009/office_test_team | edf06f3c0818b08ec39541bdcd04bcc537fc9ed1 | [
"MIT"
] | null | null | null | office_test_interface/hede_interface_test/encryption/md5_hede.py | yag8009/office_test_team | edf06f3c0818b08ec39541bdcd04bcc537fc9ed1 | [
"MIT"
] | null | null | null | office_test_interface/hede_interface_test/encryption/md5_hede.py | yag8009/office_test_team | edf06f3c0818b08ec39541bdcd04bcc537fc9ed1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : yag8009
# @FileName : md5_hede
# @Time : 2020/3/18
import hashlib
import time
if __name__ == '__main__':
data = "brand=&channelType=227&city=&color=&imei=123456789012345&model=mate9&name=&phone=17066668888&price=3999&saleman=A&store=×tamp=1496566760204&key=b0a44d2ac9bb4196b8977360554f91bb"
md = md5_hede(data).upper()
print(md)
timestamp = int(time.time()*1000)
print(timestamp)
| 27.782609 | 207 | 0.693271 |
ce827ac31d171b94c54c1e52af75d3c49aac0651 | 2,270 | py | Python | src/tables.py | tallywiesenberg/dating-data-dividend | 0a4f3d7784e9902631a79dce1efb12fc39e74963 | [
"MIT"
] | null | null | null | src/tables.py | tallywiesenberg/dating-data-dividend | 0a4f3d7784e9902631a79dce1efb12fc39e74963 | [
"MIT"
] | null | null | null | src/tables.py | tallywiesenberg/dating-data-dividend | 0a4f3d7784e9902631a79dce1efb12fc39e74963 | [
"MIT"
] | null | null | null | from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from .extensions import db
# class UserData(db.Model):
# id = db.Column(db.Integer, primary_key=True)
# left_swipes_given = db.Column(db.Integer, nullable=False)
# right_swipes_given = db.Column(db.Integer, nullable=False)
# matches = db.Column(db.Integer, nullable=False)
# bio = db.Column(db.String(240))
# path_to_photos = db.Column(db.String(20), nullable=False)
# user_id = db.Column(db.String(44),
# # db.ForeignKey('user_login.id'),
# nullable=False) | 40.535714 | 84 | 0.697797 |
ce849a188316eb44b68f6012ce73ef84e1631ac2 | 9,586 | py | Python | satyr/proxies/scheduler.py | usheth/satyr | 01fadbe2f9c294b9a9719a85d5bd032925453ee6 | [
"Apache-2.0"
] | null | null | null | satyr/proxies/scheduler.py | usheth/satyr | 01fadbe2f9c294b9a9719a85d5bd032925453ee6 | [
"Apache-2.0"
] | null | null | null | satyr/proxies/scheduler.py | usheth/satyr | 01fadbe2f9c294b9a9719a85d5bd032925453ee6 | [
"Apache-2.0"
] | 1 | 2018-10-10T18:57:54.000Z | 2018-10-10T18:57:54.000Z | from __future__ import absolute_import, division, print_function
import logging
import sys
from mesos.interface import Scheduler
from .messages import Filters, decode, encode
| 41.860262 | 80 | 0.628312 |
ce852eac1ab8df3025b403cd06d435faa196ae79 | 6,759 | py | Python | NeuralNetworks/neuralnetworks/NeuralNetwork.py | AlmCoding/MachineLearningPlayground | b0316367fc269c4fbb4a8035a59058b731e01839 | [
"Apache-2.0"
] | null | null | null | NeuralNetworks/neuralnetworks/NeuralNetwork.py | AlmCoding/MachineLearningPlayground | b0316367fc269c4fbb4a8035a59058b731e01839 | [
"Apache-2.0"
] | null | null | null | NeuralNetworks/neuralnetworks/NeuralNetwork.py | AlmCoding/MachineLearningPlayground | b0316367fc269c4fbb4a8035a59058b731e01839 | [
"Apache-2.0"
] | null | null | null | import numpy as np
| 43.326923 | 113 | 0.568131 |
ce86369c63bb6fc50980df6a3068e5a13c86663c | 3,350 | py | Python | xen/xen-4.2.2/tools/python/xen/xm/console.py | zhiming-shen/Xen-Blanket-NG | 47e59d9bb92e8fdc60942df526790ddb983a5496 | [
"Apache-2.0"
] | 1 | 2018-02-02T00:15:26.000Z | 2018-02-02T00:15:26.000Z | xen/xen-4.2.2/tools/python/xen/xm/console.py | zhiming-shen/Xen-Blanket-NG | 47e59d9bb92e8fdc60942df526790ddb983a5496 | [
"Apache-2.0"
] | null | null | null | xen/xen-4.2.2/tools/python/xen/xm/console.py | zhiming-shen/Xen-Blanket-NG | 47e59d9bb92e8fdc60942df526790ddb983a5496 | [
"Apache-2.0"
] | 1 | 2019-05-27T09:47:18.000Z | 2019-05-27T09:47:18.000Z | #============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2005 XenSource Ltd
#============================================================================
import xen.util.auxbin
import xen.lowlevel.xs
import os
import sys
import signal
from xen.util import utils
XENCONSOLE = "xenconsole"
| 37.640449 | 77 | 0.589851 |
ce86edf8e6652b33bfc429b6779f16cab7aabc38 | 591 | py | Python | Solutions/luhn.py | matthijskrul/PythonExercism | 7f0fbd340cf32f7bb88b0c32287729968720ee1c | [
"MIT"
] | null | null | null | Solutions/luhn.py | matthijskrul/PythonExercism | 7f0fbd340cf32f7bb88b0c32287729968720ee1c | [
"MIT"
] | null | null | null | Solutions/luhn.py | matthijskrul/PythonExercism | 7f0fbd340cf32f7bb88b0c32287729968720ee1c | [
"MIT"
] | null | null | null | import re
| 28.142857 | 91 | 0.445008 |
ce882279c49c7c6dbe430df86a631892b7154111 | 3,342 | py | Python | tests/zzz_deprecated_unmaintained/obsmodel/TestZeroMeanGaussLocalStepSpeed.py | HongminWu/bnpy | 04c918cc1150ca8d9694c093633d539d9286a1b6 | [
"BSD-3-Clause"
] | 3 | 2018-07-02T03:50:23.000Z | 2019-05-16T03:23:55.000Z | tests/zzz_deprecated_unmaintained/obsmodel/TestZeroMeanGaussLocalStepSpeed.py | HongminWu/bnpy | 04c918cc1150ca8d9694c093633d539d9286a1b6 | [
"BSD-3-Clause"
] | 1 | 2021-01-07T01:33:06.000Z | 2021-01-07T01:33:06.000Z | tests/zzz_deprecated_unmaintained/obsmodel/TestZeroMeanGaussLocalStepSpeed.py | birlrobotics/bnpy | 8f297d8f3e4a56088d7755134c329f63a550be9e | [
"BSD-3-Clause"
] | 1 | 2020-09-01T13:21:18.000Z | 2020-09-01T13:21:18.000Z | import numpy as np
import scipy.linalg
import argparse
import time
from contextlib import contextmanager
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--N', type=int, default=1e5)
parser.add_argument('--D', type=int, default=64)
args = parser.parse_args()
N = args.N
D = args.D
print "TIMING TEST: N=%d D=%d" % (N, D)
X = np.random.randn(N, D)
R = np.random.randn(D, D)
B = np.dot(R.T, R) + np.eye(D, D)
cholB = np.linalg.cholesky(B)
mahalDist_np_solve(X=X, cholB=cholB)
mahalDist_scipy_solve(X=X, cholB=cholB)
mahalDist_scipy_solve_triangular(X=X, cholB=cholB)
mahalDist_scipy_solve_triangular_nocheck(X=X, cholB=cholB)
"""
In [41]: Qs = scipy.linalg.solve_triangular(cholB, X.T, lower=True, check_finite=False)
In [42]: %timeit -n1 -r1 Q = scipy.linalg.solve_triangular(cholB, X.T, lower=True, check_finite=False)
1 loops, best of 1: 625 ms per loop
In [43]: %timeit -n1 -r1 Q = scipy.linalg.solve_triangular(cholB, X.T, lower=True, check_finite=False)
1 loops, best of 1: 623 ms per loop
In [44]: %timeit -n1 -r1 Q = scipy.linalg.solve_triangular(cholB, X.T, lower=True)
1 loops, best of 1: 790 ms per loop
In [45]: %timeit -n1 -r1 Q = scipy.linalg.solve_triangular(cholB, X.T, lower=True)
1 loops, best of 1: 799 ms per loop
In [46]: %timeit -n1 -r1 Q = scipy.linalg.solve(cholB, X.T)
1 loops, best of 1: 1.26 s per loop
In [47]: %timeit -n1 -r1 Q = scipy.linalg.solve(cholB, X.T)
1 loops, best of 1: 1.26 s per loop
"""
| 30.66055 | 102 | 0.620586 |
ce8992e05fc290077147eefaf26bedd1f77bd977 | 1,450 | py | Python | actions/set_status.py | Bedrock-OSS/server-updater | b4329563f9af5525a69ffd1f461c53bf09e52c2b | [
"MIT"
] | 1 | 2022-01-02T13:24:01.000Z | 2022-01-02T13:24:01.000Z | actions/set_status.py | Bedrock-OSS/server-updater | b4329563f9af5525a69ffd1f461c53bf09e52c2b | [
"MIT"
] | 3 | 2021-11-15T23:17:25.000Z | 2022-01-02T13:26:34.000Z | actions/set_status.py | Bedrock-OSS/server-updater | b4329563f9af5525a69ffd1f461c53bf09e52c2b | [
"MIT"
] | null | null | null | from subprocess import run
from actions.common import get_name_and_org, get_process_config
from flask import request
| 35.365854 | 92 | 0.611034 |
ce8c6b49bd95348ca5598200de74bc4909b5e91e | 4,067 | py | Python | amo2kinto/generator.py | Mozilla-GitHub-Standards/9856eb0fa59d2f9b39dc3c83d0f31961057b7f0ddf57ba213fd01b9b0f99c4cc | abbd2d8874b79fc12a0d8b6570593c8a6dbc2e68 | [
"Apache-2.0"
] | 5 | 2016-07-06T21:35:06.000Z | 2019-05-15T22:36:24.000Z | amo2kinto/generator.py | Mozilla-GitHub-Standards/9856eb0fa59d2f9b39dc3c83d0f31961057b7f0ddf57ba213fd01b9b0f99c4cc | abbd2d8874b79fc12a0d8b6570593c8a6dbc2e68 | [
"Apache-2.0"
] | 63 | 2016-05-11T09:15:16.000Z | 2021-10-15T16:44:23.000Z | amo2kinto/generator.py | mozilla-services/kinto2xml | ae8aee28c9a2b15e19a21553eb031ae32f673de2 | [
"Apache-2.0"
] | 6 | 2018-02-01T14:13:04.000Z | 2020-07-13T05:36:57.000Z | import os
import datetime
from dateutil.parser import parse as dateutil_parser
from jinja2 import Environment, PackageLoader
from kinto_http import cli_utils
from . import constants
from .logger import logger
JSON_DATE_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
COLLECTION_FORMAT = '/buckets/{bucket_id}/collections/{collection_id}'
| 33.336066 | 88 | 0.668798 |
ce8e42b2a35ed5fd98c1fefc1db9f29031a082bc | 2,270 | py | Python | migrations/versions/2019_03_04_optional_chart_and_table_classifications.py.py | AlexKouzy/ethnicity-facts-and-figures-publisher | 18ab2495a8633f585e18e607c7f75daa564a053d | [
"MIT"
] | 1 | 2021-10-06T13:48:36.000Z | 2021-10-06T13:48:36.000Z | migrations/versions/2019_03_04_optional_chart_and_table_classifications.py.py | AlexKouzy/ethnicity-facts-and-figures-publisher | 18ab2495a8633f585e18e607c7f75daa564a053d | [
"MIT"
] | 116 | 2018-11-02T17:20:47.000Z | 2022-02-09T11:06:22.000Z | migrations/versions/2019_03_04_optional_chart_and_table_classifications.py.py | racedisparityaudit/rd_cms | a12f0e3f5461cc41eed0077ed02e11efafc5dd76 | [
"MIT"
] | 2 | 2018-11-09T16:47:35.000Z | 2020-04-09T13:06:48.000Z | """Make some fields on Chart and Table nullable
We want to copy chart and table data across to these tables but have no way to add a
classification for each one, so we'll have to live with some nulls in here.
Revision ID: 2019_03_04_make_fields_nullable
Revises: 2019_03_04_chart_table_settings
Create Date: 2019-03-05 16:38:12.835894
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "2019_03_04_make_fields_nullable"
down_revision = "2019_03_04_chart_table_settings"
branch_labels = None
depends_on = None
| 54.047619 | 113 | 0.781498 |
ce8ebc8223eab95eca97cd475849f6774b863014 | 962 | py | Python | open_discussions/middleware/channel_api_test.py | mitodl/open-discussions | ab6e9fac70b8a1222a84e78ba778a7a065c20541 | [
"BSD-3-Clause"
] | 12 | 2017-09-27T21:23:27.000Z | 2020-12-25T04:31:30.000Z | open_discussions/middleware/channel_api_test.py | mitodl/open-discussions | ab6e9fac70b8a1222a84e78ba778a7a065c20541 | [
"BSD-3-Clause"
] | 3,293 | 2017-06-30T18:16:01.000Z | 2022-03-31T18:01:34.000Z | open_discussions/middleware/channel_api_test.py | mitodl/open-discussions | ab6e9fac70b8a1222a84e78ba778a7a065c20541 | [
"BSD-3-Clause"
] | 1 | 2020-04-13T12:19:57.000Z | 2020-04-13T12:19:57.000Z | """Tests for channel API middleware"""
from django.utils.functional import SimpleLazyObject
from open_discussions.middleware.channel_api import ChannelApiMiddleware
def test_channel_api_middleware(
mocker, jwt_token, rf, user
): # pylint: disable=unused-argument
"""Tests that the middleware makes a channel API object available on the request"""
api_mock_obj = mocker.Mock(some_api_method=mocker.Mock(return_value="result"))
patched_api_cls = mocker.patch(
"open_discussions.middleware.channel_api.Api", return_value=api_mock_obj
)
request = rf.get("/")
request.user = user
get_request_mock = mocker.Mock()
middleware = ChannelApiMiddleware(get_request_mock)
middleware(request)
assert hasattr(request, "channel_api")
assert isinstance(request.channel_api, SimpleLazyObject)
result = request.channel_api.some_api_method()
patched_api_cls.assert_called_with(user)
assert result == "result"
| 38.48 | 87 | 0.759875 |
ce8f967c1a3e2320cb9057b9e55d32dfed9aae91 | 2,019 | py | Python | app/python/query_strings.py | ProfessorUdGuru/toykinter | 66c0a9877df6b4b3034125566e687b7361085d2b | [
"Unlicense"
] | null | null | null | app/python/query_strings.py | ProfessorUdGuru/toykinter | 66c0a9877df6b4b3034125566e687b7361085d2b | [
"Unlicense"
] | null | null | null | app/python/query_strings.py | ProfessorUdGuru/toykinter | 66c0a9877df6b4b3034125566e687b7361085d2b | [
"Unlicense"
] | null | null | null | # query_strings.py
'''
Since Sqlite queries are inserted as string in Python code,
the queries can be stored here to save space in the modules
where they are used.
'''
delete_color_scheme = '''
DELETE FROM color_scheme
WHERE color_scheme_id = ?
'''
insert_color_scheme = '''
INSERT INTO color_scheme
VALUES (null, ?, ?, ?, ?, 0, 0)
'''
select_all_color_schemes = '''
SELECT bg, highlight_bg, head_bg, fg
FROM color_scheme
'''
select_all_color_schemes_plus = '''
SELECT bg, highlight_bg, head_bg, fg, built_in, color_scheme_id
FROM color_scheme
'''
select_color_scheme_current = '''
SELECT bg, highlight_bg, head_bg, fg
FROM format
WHERE format_id = 1
'''
select_current_database = '''
SELECT current_database
FROM closing_state
WHERE closing_state_id = 1
'''
select_font_scheme = '''
SELECT font_size, output_font, input_font
FROM format
WHERE format_id = 1
'''
select_opening_settings = '''
SELECT
bg,
highlight_bg,
head_bg,
fg,
output_font,
input_font,
font_size,
default_bg,
default_highlight_bg,
default_head_bg,
default_fg,
default_output_font,
default_input_font,
default_font_size
FROM format
WHERE format_id = 1
'''
update_color_scheme_null = '''
UPDATE format
SET (bg, highlight_bg, head_bg, fg) =
(null, null, null, null)
WHERE format_id = 1
'''
update_current_database = '''
UPDATE closing_state
SET current_database = ?
WHERE closing_state_id = 1
'''
update_format_color_scheme = '''
UPDATE format
SET (bg, highlight_bg, head_bg, fg) = (?, ?, ?, ?)
WHERE format_id = 1
'''
update_format_fonts = '''
UPDATE format
SET (font_size, output_font, input_font) = (?, ?, ?)
WHERE format_id = 1
'''
| 20.393939 | 69 | 0.595344 |
ce90ad08ae1e89a4b497c7dcbd24f5d92a0ba879 | 428 | py | Python | travel/migrations/0029_auto_20190514_2108.py | sausage-team/travel-notes | 3c2454ebad7764906c5ff30cbdfe296cb7c64eb4 | [
"MIT"
] | null | null | null | travel/migrations/0029_auto_20190514_2108.py | sausage-team/travel-notes | 3c2454ebad7764906c5ff30cbdfe296cb7c64eb4 | [
"MIT"
] | null | null | null | travel/migrations/0029_auto_20190514_2108.py | sausage-team/travel-notes | 3c2454ebad7764906c5ff30cbdfe296cb7c64eb4 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.1 on 2019-05-14 13:08
from django.db import migrations, models
| 22.526316 | 98 | 0.616822 |
ce9204471f1cea2c7a2d1ce2b0a13ce9e7a84406 | 2,027 | py | Python | ci/mocks/sensu/sensu_verify.py | infrawatch/collectd-sensubility | 5bea37045137ab88c879159ba1e1792a70eb3717 | [
"Apache-2.0"
] | null | null | null | ci/mocks/sensu/sensu_verify.py | infrawatch/collectd-sensubility | 5bea37045137ab88c879159ba1e1792a70eb3717 | [
"Apache-2.0"
] | 7 | 2020-06-15T22:08:33.000Z | 2021-01-08T16:03:01.000Z | ci/mocks/sensu/sensu_verify.py | paramite/collectd-sensubility | 4baca0cd657b4f9d2edeb19e82b2b052c45c972c | [
"Apache-2.0"
] | 1 | 2020-02-18T21:03:10.000Z | 2020-02-18T21:03:10.000Z | #!/usr/bin/env python
""",
Mocked Ruby based Sensu server. Connects to the RabbitMQ instance and sends
check requests as Sensu server would and waits for appropriate response.
Fails the response is not received at all or in invalid format.
"""
import click
import json
import pika
import signal
import sys
CI_TEST_RESULTS = {
'test1': {'output': 'foo\n', 'status': 0},
'test2': {'output': 'bar\n', 'status': 1},
'test3': {'output': 'baz\n', 'status': 2},
'standalone_check': {'output': 'foobar\n', 'status': 2}
}
if __name__ == '__main__':
main()
| 29.808824 | 95 | 0.624075 |
ce92edbc18e65275ac6f2bfc1a972e464010433f | 205 | py | Python | django_ecommerce/contact/admin.py | marshallhumble/Python_Web | d0d31647c882f30422f31aa62ecd61ae7ad87123 | [
"MIT"
] | null | null | null | django_ecommerce/contact/admin.py | marshallhumble/Python_Web | d0d31647c882f30422f31aa62ecd61ae7ad87123 | [
"MIT"
] | null | null | null | django_ecommerce/contact/admin.py | marshallhumble/Python_Web | d0d31647c882f30422f31aa62ecd61ae7ad87123 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import ContactForm
admin.site.register(ContactForm, ContactFormAdmin)
| 20.5 | 50 | 0.780488 |
ce943e66efc0e44702391aac0684e3625f5526e9 | 718 | py | Python | ds/practice/daily_practice/20-07/assets/code/reverse_sll.py | tobias-fyi/vela | b0b3d3c6dc3fa397c8c7a492098a02cf75e0ff82 | [
"MIT"
] | null | null | null | ds/practice/daily_practice/20-07/assets/code/reverse_sll.py | tobias-fyi/vela | b0b3d3c6dc3fa397c8c7a492098a02cf75e0ff82 | [
"MIT"
] | 8 | 2020-03-24T17:47:23.000Z | 2022-03-12T00:33:21.000Z | ds/practice/daily_practice/20-07/assets/code/reverse_sll.py | tobias-fyi/vela | b0b3d3c6dc3fa397c8c7a492098a02cf75e0ff82 | [
"MIT"
] | null | null | null | """
HackerRank :: Reverse a singly-linked list
https://www.hackerrank.com/challenges/reverse-a-linked-list/problem
Complete the reverse function below.
For your reference:
SinglyLinkedListNode:
int data
SinglyLinkedListNode next
"""
| 23.16129 | 67 | 0.683844 |
ce9550e5fc7912aecc7ac103430d2e2845e818b7 | 12,566 | gyp | Python | ui/gfx/gfx.gyp | cvsuser-chromium/chromium | acb8e8e4a7157005f527905b48dd48ddaa3b863a | [
"BSD-3-Clause"
] | 4 | 2017-04-05T01:51:34.000Z | 2018-02-15T03:11:54.000Z | ui/gfx/gfx.gyp | cvsuser-chromium/chromium | acb8e8e4a7157005f527905b48dd48ddaa3b863a | [
"BSD-3-Clause"
] | 1 | 2021-12-13T19:44:12.000Z | 2021-12-13T19:44:12.000Z | ui/gfx/gfx.gyp | cvsuser-chromium/chromium | acb8e8e4a7157005f527905b48dd48ddaa3b863a | [
"BSD-3-Clause"
] | 4 | 2017-04-05T01:52:03.000Z | 2022-02-13T17:58:45.000Z | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
},
'targets': [
{
'target_name': 'gfx',
'type': '<(component)',
'dependencies': [
'<(DEPTH)/base/base.gyp:base',
'<(DEPTH)/base/base.gyp:base_i18n',
'<(DEPTH)/base/base.gyp:base_static',
'<(DEPTH)/base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'<(DEPTH)/net/net.gyp:net',
'<(DEPTH)/skia/skia.gyp:skia',
'<(DEPTH)/third_party/icu/icu.gyp:icui18n',
'<(DEPTH)/third_party/icu/icu.gyp:icuuc',
'<(DEPTH)/third_party/libpng/libpng.gyp:libpng',
'<(DEPTH)/third_party/zlib/zlib.gyp:zlib',
'<(DEPTH)/url/url.gyp:url_lib',
],
# text_elider.h includes ICU headers.
'export_dependent_settings': [
'<(DEPTH)/skia/skia.gyp:skia',
'<(DEPTH)/third_party/icu/icu.gyp:icui18n',
'<(DEPTH)/third_party/icu/icu.gyp:icuuc',
],
'defines': [
'GFX_IMPLEMENTATION',
],
'sources': [
'android/device_display_info.cc',
'android/device_display_info.h',
'android/gfx_jni_registrar.cc',
'android/gfx_jni_registrar.h',
'android/java_bitmap.cc',
'android/java_bitmap.h',
'android/shared_device_display_info.cc',
'android/shared_device_display_info.h',
'animation/animation.cc',
'animation/animation.h',
'animation/animation_container.cc',
'animation/animation_container.h',
'animation/animation_container_element.h',
'animation/animation_container_observer.h',
'animation/animation_delegate.h',
'animation/linear_animation.cc',
'animation/linear_animation.h',
'animation/multi_animation.cc',
'animation/multi_animation.h',
'animation/slide_animation.cc',
'animation/slide_animation.h',
'animation/throb_animation.cc',
'animation/throb_animation.h',
'animation/tween.cc',
'animation/tween.h',
'blit.cc',
'blit.h',
'box_f.cc',
'box_f.h',
'break_list.h',
'canvas.cc',
'canvas.h',
'canvas_android.cc',
'canvas_paint_gtk.cc',
'canvas_paint_gtk.h',
'canvas_paint_mac.h',
'canvas_paint_mac.mm',
'canvas_paint_win.cc',
'canvas_paint_win.h',
'canvas_skia.cc',
'canvas_skia_paint.h',
'codec/jpeg_codec.cc',
'codec/jpeg_codec.h',
'codec/png_codec.cc',
'codec/png_codec.h',
'color_analysis.cc',
'color_analysis.h',
'color_profile.cc',
'color_profile.h',
'color_profile_mac.cc',
'color_profile_win.cc',
'color_utils.cc',
'color_utils.h',
'display.cc',
'display.h',
'display_observer.cc',
'display_observer.h',
'favicon_size.cc',
'favicon_size.h',
'frame_time.h',
'font.cc',
'font.h',
'font_fallback_win.cc',
'font_fallback_win.h',
'font_list.cc',
'font_list.h',
'font_render_params_android.cc',
'font_render_params_linux.cc',
'font_render_params_linux.h',
'font_smoothing_win.cc',
'font_smoothing_win.h',
'gfx_export.h',
'gfx_paths.cc',
'gfx_paths.h',
'gpu_memory_buffer.cc',
'gpu_memory_buffer.h',
'image/canvas_image_source.cc',
'image/canvas_image_source.h',
'image/image.cc',
'image/image.h',
'image/image_family.cc',
'image/image_family.h',
'image/image_ios.mm',
'image/image_mac.mm',
'image/image_png_rep.cc',
'image/image_png_rep.h',
'image/image_skia.cc',
'image/image_skia.h',
'image/image_skia_operations.cc',
'image/image_skia_operations.h',
'image/image_skia_rep.cc',
'image/image_skia_rep.h',
'image/image_skia_source.h',
'image/image_skia_util_ios.h',
'image/image_skia_util_ios.mm',
'image/image_skia_util_mac.h',
'image/image_skia_util_mac.mm',
'image/image_util.cc',
'image/image_util.h',
'image/image_util_ios.mm',
'insets.cc',
'insets.h',
'insets_base.h',
'insets_f.cc',
'insets_f.h',
'interpolated_transform.cc',
'interpolated_transform.h',
'mac/scoped_ns_disable_screen_updates.h',
'matrix3_f.cc',
'matrix3_f.h',
'native_widget_types.h',
'ozone/dri/dri_skbitmap.cc',
'ozone/dri/dri_skbitmap.h',
'ozone/dri/dri_surface.cc',
'ozone/dri/dri_surface.h',
'ozone/dri/dri_surface_factory.cc',
'ozone/dri/dri_surface_factory.h',
'ozone/dri/dri_wrapper.cc',
'ozone/dri/dri_wrapper.h',
'ozone/dri/hardware_display_controller.cc',
'ozone/dri/hardware_display_controller.h',
'ozone/impl/file_surface_factory.cc',
'ozone/impl/file_surface_factory.h',
'ozone/surface_factory_ozone.cc',
'ozone/surface_factory_ozone.h',
'pango_util.cc',
'pango_util.h',
'path.cc',
'path.h',
'path_aura.cc',
'path_gtk.cc',
'path_win.cc',
'path_win.h',
'path_x11.cc',
'path_x11.h',
'platform_font.h',
'platform_font_android.cc',
'platform_font_ios.h',
'platform_font_ios.mm',
'platform_font_mac.h',
'platform_font_mac.mm',
'platform_font_ozone.cc',
'platform_font_pango.cc',
'platform_font_pango.h',
'platform_font_win.cc',
'platform_font_win.h',
'point.cc',
'point.h',
'point3_f.cc',
'point3_f.h',
'point_base.h',
'point_conversions.cc',
'point_conversions.h',
'point_f.cc',
'point_f.h',
'quad_f.cc',
'quad_f.h',
'range/range.cc',
'range/range.h',
'range/range_mac.mm',
'range/range_win.cc',
'rect.cc',
'rect.h',
'rect_base.h',
'rect_base_impl.h',
'rect_conversions.cc',
'rect_conversions.h',
'rect_f.cc',
'rect_f.h',
'render_text.cc',
'render_text.h',
'render_text_mac.cc',
'render_text_mac.h',
'render_text_ozone.cc',
'render_text_pango.cc',
'render_text_pango.h',
'render_text_win.cc',
'render_text_win.h',
'safe_integer_conversions.h',
'scoped_canvas.h',
'scoped_cg_context_save_gstate_mac.h',
'scoped_ns_graphics_context_save_gstate_mac.h',
'scoped_ns_graphics_context_save_gstate_mac.mm',
'scoped_ui_graphics_push_context_ios.h',
'scoped_ui_graphics_push_context_ios.mm',
'screen.cc',
'screen.h',
'screen_android.cc',
'screen_aura.cc',
'screen_gtk.cc',
'screen_ios.mm',
'screen_mac.mm',
'screen_win.cc',
'screen_win.h',
'scrollbar_size.cc',
'scrollbar_size.h',
'selection_model.cc',
'selection_model.h',
'sequential_id_generator.cc',
'sequential_id_generator.h',
'shadow_value.cc',
'shadow_value.h',
'size.cc',
'size.h',
'size_base.h',
'size_conversions.cc',
'size_conversions.h',
'size_f.cc',
'size_f.h',
'skbitmap_operations.cc',
'skbitmap_operations.h',
'skia_util.cc',
'skia_util.h',
'skia_utils_gtk.cc',
'skia_utils_gtk.h',
'switches.cc',
'switches.h',
'sys_color_change_listener.cc',
'sys_color_change_listener.h',
'text_constants.h',
'text_elider.cc',
'text_elider.h',
'text_utils.cc',
'text_utils.h',
'text_utils_android.cc',
'text_utils_ios.mm',
'text_utils_skia.cc',
'transform.cc',
'transform.h',
'transform_util.cc',
'transform_util.h',
'utf16_indexing.cc',
'utf16_indexing.h',
'vector2d.cc',
'vector2d.h',
'vector2d_conversions.cc',
'vector2d_conversions.h',
'vector2d_f.cc',
'vector2d_f.h',
'vector3d_f.cc',
'vector3d_f.h',
'win/dpi.cc',
'win/dpi.h',
'win/hwnd_util.cc',
'win/hwnd_util.h',
'win/scoped_set_map_mode.h',
'win/singleton_hwnd.cc',
'win/singleton_hwnd.h',
'win/window_impl.cc',
'win/window_impl.h',
'x/x11_atom_cache.cc',
'x/x11_atom_cache.h',
'x/x11_types.cc',
'x/x11_types.h',
],
'conditions': [
['OS=="ios"', {
# iOS only uses a subset of UI.
'sources/': [
['exclude', '^codec/jpeg_codec\\.cc$'],
],
}, {
'dependencies': [
'<(libjpeg_gyp_path):libjpeg',
],
}],
# TODO(asvitkine): Switch all platforms to use canvas_skia.cc.
# http://crbug.com/105550
['use_canvas_skia==1', {
'sources!': [
'canvas_android.cc',
],
}, { # use_canvas_skia!=1
'sources!': [
'canvas_skia.cc',
],
}],
['toolkit_uses_gtk == 1', {
'dependencies': [
'<(DEPTH)/build/linux/system.gyp:gtk',
],
'sources': [
'gtk_native_view_id_manager.cc',
'gtk_native_view_id_manager.h',
'gtk_preserve_window.cc',
'gtk_preserve_window.h',
'gdk_compat.h',
'gtk_compat.h',
'gtk_util.cc',
'gtk_util.h',
'image/cairo_cached_surface.cc',
'image/cairo_cached_surface.h',
'scoped_gobject.h',
],
}],
['OS=="win"', {
'sources': [
'gdi_util.cc',
'gdi_util.h',
'icon_util.cc',
'icon_util.h',
],
# TODO(jschuh): C4267: http://crbug.com/167187 size_t -> int
# C4324 is structure was padded due to __declspec(align()), which is
# uninteresting.
'msvs_disabled_warnings': [ 4267, 4324 ],
}],
['OS=="android"', {
'sources!': [
'animation/throb_animation.cc',
'display_observer.cc',
'path.cc',
'selection_model.cc',
],
'dependencies': [
'gfx_jni_headers',
],
'link_settings': {
'libraries': [
'-landroid',
'-ljnigraphics',
],
},
}],
['OS=="android" and android_webview_build==0', {
'dependencies': [
'<(DEPTH)/base/base.gyp:base_java',
],
}],
['OS=="android" or OS=="ios"', {
'sources!': [
'render_text.cc',
'render_text.h',
'text_utils_skia.cc',
],
}],
['use_pango==1', {
'dependencies': [
'<(DEPTH)/build/linux/system.gyp:pangocairo',
],
}],
['ozone_platform_dri==1', {
'dependencies': [
'<(DEPTH)/build/linux/system.gyp:dridrm',
],
}],
],
'target_conditions': [
# Need 'target_conditions' to override default filename_rules to include
# the file on iOS.
['OS == "ios"', {
'sources/': [
['include', '^scoped_cg_context_save_gstate_mac\\.h$'],
],
}],
],
}
],
'conditions': [
['OS=="android"' , {
'targets': [
{
'target_name': 'gfx_jni_headers',
'type': 'none',
'direct_dependent_settings': {
'include_dirs': [
'<(SHARED_INTERMEDIATE_DIR)/ui/gfx',
],
},
'sources': [
'../android/java/src/org/chromium/ui/gfx/BitmapHelper.java',
'../android/java/src/org/chromium/ui/gfx/DeviceDisplayInfo.java',
],
'variables': {
'jni_gen_package': 'ui/gfx',
'jni_generator_ptr_type': 'long',
},
'includes': [ '../../build/jni_generator.gypi' ],
},
],
}],
],
}
| 29.990453 | 100 | 0.524988 |
ce9579f634d692007205ef460ecf1c49f7c087d2 | 2,191 | py | Python | preprocess/python/h5py_utils.py | fredericsun/seq2seq-ChinesePoetryGenerater | 2d3aab3807f99c99046f53a4d5cc045ae733386f | [
"MIT"
] | null | null | null | preprocess/python/h5py_utils.py | fredericsun/seq2seq-ChinesePoetryGenerater | 2d3aab3807f99c99046f53a4d5cc045ae733386f | [
"MIT"
] | null | null | null | preprocess/python/h5py_utils.py | fredericsun/seq2seq-ChinesePoetryGenerater | 2d3aab3807f99c99046f53a4d5cc045ae733386f | [
"MIT"
] | null | null | null | import numpy as np
import h5py
import os
import sys
from copy import deepcopy
#handle .(period) and slash specially since it is part of path
#replace with \period or \slash-forward when store, recover later
#not using '\forward-slash' is because \f is a special character
PERIOD='\period'
SLASH='\slash-forward'
'''
This function will save a python dictionary with format {'key':vector} to hdf5 format
target_dict is the target dictionary
f_name is the HDF5 file name including path to it
mode is file open mode, 'a' for append is the default setting
output file is a HDF5 file which the keys of HDF5 are keys of target dict
dataset is corresponding array of each key in target dict
'''
'''
This function will convert saved hdf5 file from previous function back to {'key':vector} dictionary
f_name is the HDF5 file name including path to it
'''
if __name__ == '__main__':
main()
| 19.918182 | 99 | 0.682337 |
ce96c92938a6d505e9a78e6bd30465ac609c886c | 2,238 | py | Python | libs/dataset/pipeline.py | arnasRad/vits | c0e0689d8c24049a2531dd5d52fc96a594389db2 | [
"MIT"
] | null | null | null | libs/dataset/pipeline.py | arnasRad/vits | c0e0689d8c24049a2531dd5d52fc96a594389db2 | [
"MIT"
] | null | null | null | libs/dataset/pipeline.py | arnasRad/vits | c0e0689d8c24049a2531dd5d52fc96a594389db2 | [
"MIT"
] | null | null | null | import re
import string
from libs.dataset.core import SampleEntry, oov_replacement_vocabulary, letter_replacements
__word_start_regex = f'[ \t\n]|^|[{string.punctuation}]'
__word_end_regex = f'[ \t\n]|$|[{string.punctuation}]'
| 34.430769 | 130 | 0.662645 |
ce97329b4f1ef6afddfe7243f66fd55019aff8be | 733 | py | Python | examples/rfid_lookup.py | CMU-Robotics-Club/pyrc | bda1113a75ff9b67b518016b8b8aceeb9971d50b | [
"MIT"
] | null | null | null | examples/rfid_lookup.py | CMU-Robotics-Club/pyrc | bda1113a75ff9b67b518016b8b8aceeb9971d50b | [
"MIT"
] | 2 | 2015-01-14T01:48:01.000Z | 2015-04-06T05:16:23.000Z | examples/rfid_lookup.py | CMU-Robotics-Club/pyrc | bda1113a75ff9b67b518016b8b8aceeb9971d50b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from rc.clients import APIClient
if __name__ == '__main__':
"""
Given a valid RFID attempts to lookup
the User's ID. Then using that User ID,
if it exists, get more information about the User.
"""
# If the bash variables RC_PUBLIC_KEY and RC_PRIVATE_KEY
# are not set you need to pass the public and private key
# values into this constructor
client = APIClient()
rfid = input("RFID: ")
# Ask the API who this RFID corresponds to
(user_id, api_request_id) = client.rfid(rfid, "Some Meta String")
if user_id is None:
print("No such User")
else:
# Get more information about the user
user = client.user(user_id)
print("Username: {}".format(user['username']))
| 25.275862 | 67 | 0.68895 |
ce9737971517d349e0d99c904be7b6abc01113e4 | 1,015 | py | Python | LeetCode/0417. Pacific Atlantic Water Flow/solution.py | InnoFang/oh-my-algorithms | f559dba371ce725a926725ad28d5e1c2facd0ab2 | [
"Apache-2.0"
] | 1 | 2017-03-31T15:24:01.000Z | 2017-03-31T15:24:01.000Z | LeetCode/0417. Pacific Atlantic Water Flow/solution.py | InnoFang/Algorithm-Library | 1896b9d8b1fa4cd73879aaecf97bc32d13ae0169 | [
"Apache-2.0"
] | null | null | null | LeetCode/0417. Pacific Atlantic Water Flow/solution.py | InnoFang/Algorithm-Library | 1896b9d8b1fa4cd73879aaecf97bc32d13ae0169 | [
"Apache-2.0"
] | null | null | null | """
113 / 113 test cases passed.
Runtime: 92 ms
Memory Usage: 16.4 MB
"""
| 36.25 | 105 | 0.469951 |
ce983be2f0cd8dfbb73fd808cea94fd76f198a4e | 456 | py | Python | cellardoor/api/methods.py | movermeyer/cellardoor | 25192b07224ff7bd33fd29ebac07340bef53a2ed | [
"MIT"
] | null | null | null | cellardoor/api/methods.py | movermeyer/cellardoor | 25192b07224ff7bd33fd29ebac07340bef53a2ed | [
"MIT"
] | 3 | 2015-01-31T14:53:06.000Z | 2015-02-01T19:04:30.000Z | cellardoor/api/methods.py | movermeyer/cellardoor | 25192b07224ff7bd33fd29ebac07340bef53a2ed | [
"MIT"
] | 2 | 2015-01-31T14:54:28.000Z | 2018-03-05T17:33:42.000Z | __all__ = (
'LIST',
'GET',
'CREATE',
'UPDATE',
'REPLACE',
'DELETE',
'ALL',
'get_http_methods'
)
LIST = 'list'
GET = 'get'
CREATE = 'create'
REPLACE = 'replace'
UPDATE = 'update'
DELETE = 'delete'
ALL = (LIST, GET, CREATE, UPDATE, REPLACE, DELETE)
_http_methods = {
LIST: ('get',),
GET: ('get',),
CREATE: ('post',),
REPLACE: ('put',),
UPDATE: ('patch',),
DELETE: ('delete',)
}
| 13.818182 | 50 | 0.605263 |
ce9b85f5702f1c6dc74c0af21d1a6d33b4e3768c | 617 | py | Python | tests/test007_check_instant_ach.py | xuru/Sila-Python | 12fef8886580327779d32cf7596dae4516b36c11 | [
"Apache-2.0"
] | 9 | 2019-05-14T14:19:00.000Z | 2021-11-10T05:07:39.000Z | tests/test007_check_instant_ach.py | xuru/Sila-Python | 12fef8886580327779d32cf7596dae4516b36c11 | [
"Apache-2.0"
] | 8 | 2019-05-13T16:27:54.000Z | 2021-09-14T17:31:51.000Z | tests/test007_check_instant_ach.py | xuru/Sila-Python | 12fef8886580327779d32cf7596dae4516b36c11 | [
"Apache-2.0"
] | 8 | 2019-02-13T18:53:58.000Z | 2021-09-03T15:49:31.000Z | import unittest
import silasdk
from tests.test_config import (
app, eth_private_key, eth_private_key_4, instant_ach_handle, user_handle)
if __name__ == '__main__':
unittest.main()
| 26.826087 | 77 | 0.670989 |
ce9e402973e2cfeb24237745cb68e27f8bb1a25f | 72 | py | Python | lxml2json/__init__.py | meteozond/lxml2json | 502b3512dcb0ae034469a2cedd7661a4801c4425 | [
"MIT"
] | null | null | null | lxml2json/__init__.py | meteozond/lxml2json | 502b3512dcb0ae034469a2cedd7661a4801c4425 | [
"MIT"
] | null | null | null | lxml2json/__init__.py | meteozond/lxml2json | 502b3512dcb0ae034469a2cedd7661a4801c4425 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from functions import convert
name = "convert"
| 18 | 30 | 0.722222 |
ce9f112137fa8c8ee27bbea19fe1b8d402e47a00 | 1,758 | py | Python | src/day10.py | nlasheras/aoc-2021 | 17af9108e2f907747c9aca784e52c80e81949845 | [
"MIT"
] | null | null | null | src/day10.py | nlasheras/aoc-2021 | 17af9108e2f907747c9aca784e52c80e81949845 | [
"MIT"
] | null | null | null | src/day10.py | nlasheras/aoc-2021 | 17af9108e2f907747c9aca784e52c80e81949845 | [
"MIT"
] | null | null | null | """ https://adventofcode.com/2021/day/10 """
from functools import reduce
opening_map = { ")": "(", "]": "[", "}": "{", ">": "<" }
closing_map = {v: k for k,v in opening_map.items()}
if __name__ == '__main__':
input_lines = read_input("input10_test.txt")
part1_syntax_error(input_lines)
part2_autocompletion(input_lines)
input_lines = read_input("input10.txt")
part1_syntax_error(input_lines)
part2_autocompletion(input_lines)
| 33.807692 | 103 | 0.608077 |
ce9f2c870786c7bb3405fa82c2066dccfd69d84d | 2,318 | py | Python | bigbench/models/dummy_model.py | sebschu/BIG-bench | b04e56e3a9c0b6488fe154370de0b9697f991b67 | [
"Apache-2.0"
] | 4 | 2021-05-19T01:07:37.000Z | 2021-11-02T01:31:56.000Z | bigbench/models/dummy_model.py | sebschu/BIG-bench | b04e56e3a9c0b6488fe154370de0b9697f991b67 | [
"Apache-2.0"
] | 6 | 2021-05-26T09:08:17.000Z | 2021-05-31T15:27:14.000Z | bigbench/models/dummy_model.py | sebschu/BIG-bench | b04e56e3a9c0b6488fe154370de0b9697f991b67 | [
"Apache-2.0"
] | 3 | 2021-04-21T00:10:43.000Z | 2021-06-01T17:22:54.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bigbench.api.model as model
import bigbench.api.util as util
import numpy as np
import scipy.special
| 37.387097 | 88 | 0.654443 |
ce9f408bce05df804432eb2ae09a3fb7b0a734bb | 67 | py | Python | strategy/__init__.py | mmmaaaggg/QABAT | d6f20d926de047af6857e466cf28084d0ba69993 | [
"MIT"
] | 3 | 2019-08-31T18:01:10.000Z | 2021-04-04T09:51:17.000Z | strategy/__init__.py | mmmaaaggg/QABAT | d6f20d926de047af6857e466cf28084d0ba69993 | [
"MIT"
] | null | null | null | strategy/__init__.py | mmmaaaggg/QABAT | d6f20d926de047af6857e466cf28084d0ba69993 | [
"MIT"
] | 1 | 2020-08-15T17:04:14.000Z | 2020-08-15T17:04:14.000Z | # -*- coding: utf-8 -*-
"""
Created on 2017/11/18
@author: MG
"""
| 9.571429 | 23 | 0.522388 |
ce9f508775c39825fac04c419be3ad975e03c872 | 1,099 | py | Python | js/data/getcytoband.py | ManuelLecaro/ucsc-xena-client | 67611408800d227172b27d97fcba267ddb93c06e | [
"Apache-2.0"
] | 60 | 2017-01-18T00:51:24.000Z | 2022-02-16T11:06:08.000Z | js/data/getcytoband.py | ManuelLecaro/ucsc-xena-client | 67611408800d227172b27d97fcba267ddb93c06e | [
"Apache-2.0"
] | 407 | 2016-03-04T23:11:29.000Z | 2022-03-18T07:27:10.000Z | js/data/getcytoband.py | ManuelLecaro/ucsc-xena-client | 67611408800d227172b27d97fcba267ddb93c06e | [
"Apache-2.0"
] | 60 | 2017-03-02T15:19:48.000Z | 2021-01-19T09:39:46.000Z | #!/usr/bin/env python
from itertools import groupby, tee
import json, os
files = [
('hg38', 'http://hgdownload.soe.ucsc.edu/goldenPath/hg38/database/cytoBand.txt.gz'),
('hg19', 'http://hgdownload.soe.ucsc.edu/goldenPath/hg19/database/cytoBand.txt.gz'),
('hg18', 'http://hgdownload.soe.ucsc.edu/goldenPath/hg18/database/cytoBand.txt.gz')]
chroms = ['chr' + str(i + 1) for i in range(22)] + ['chrX', 'chrY']
Kchrom = 0
for (name, url) in files:
# fetch(name, url)
write(name)
| 33.30303 | 94 | 0.624204 |
cea21f7323fd769b736524d144d28f5a8974c4ed | 6,973 | py | Python | osxphotos/cli/param_types.py | oPromessa/osxphotos | 0d7e324f0262093727147b9f22ed275e962e8725 | [
"MIT"
] | null | null | null | osxphotos/cli/param_types.py | oPromessa/osxphotos | 0d7e324f0262093727147b9f22ed275e962e8725 | [
"MIT"
] | null | null | null | osxphotos/cli/param_types.py | oPromessa/osxphotos | 0d7e324f0262093727147b9f22ed275e962e8725 | [
"MIT"
] | null | null | null | """Click parameter types for osxphotos CLI"""
import datetime
import os
import pathlib
import re
import bitmath
import click
import pytimeparse2
from osxphotos.export_db_utils import export_db_get_version
from osxphotos.photoinfo import PhotoInfoNone
from osxphotos.phototemplate import PhotoTemplate, RenderOptions
from osxphotos.timeutils import time_string_to_datetime, utc_offset_string_to_seconds
from osxphotos.timezones import Timezone
from osxphotos.utils import expand_and_validate_filepath, load_function
__all__ = [
"BitMathSize",
"DateOffset",
"DateTimeISO8601",
"ExportDBType",
"FunctionCall",
"TemplateString",
"TimeISO8601",
"TimeOffset",
"TimeString",
"UTCOffset",
]
| 31.695455 | 153 | 0.582819 |
cea24139f5562c36b7de46c6bc9e387fa1a316e6 | 26,229 | py | Python | old/ms/django_rg/views.py | jcnelson/syndicate | 4837265be3e0aa18cdf4ee50316dbfc2d1f06e5b | [
"Apache-2.0"
] | 16 | 2015-01-02T15:39:04.000Z | 2016-03-17T06:38:46.000Z | old/ms/django_rg/views.py | jcnelson/syndicate | 4837265be3e0aa18cdf4ee50316dbfc2d1f06e5b | [
"Apache-2.0"
] | 37 | 2015-01-28T20:58:05.000Z | 2016-03-22T04:01:32.000Z | old/ms/django_rg/views.py | jcnelson/syndicate | 4837265be3e0aa18cdf4ee50316dbfc2d1f06e5b | [
"Apache-2.0"
] | 8 | 2015-04-08T02:26:03.000Z | 2016-03-04T05:56:24.000Z | '''
All of these views are predicated on the user already being logged in to
valid session.
djago_ag/views.py
John Whelchel
Summer 2013
These are the views for the Replica Gateway section of the administration
site. They are all decorated with @authenticate to make sure that a user is
logged in; if not, they are redirected to the login page. Some are decorated
with precheck, a decorator that makes sure the passed g_id and passwords
are valid.
'''
import logging
import json
from django.http import HttpResponse
from django.shortcuts import redirect
from django.template import Context, loader, RequestContext
from django.views.decorators.csrf import csrf_exempt
from django.forms.formsets import formset_factory
from django_lib.auth import authenticate
from django_lib.decorators import precheck
from django_lib import gatewayforms
from django_lib import forms as libforms
from storage.storagetypes import transactional
import storage.storage as db
from MS.volume import Volume
from MS.user import SyndicateUser as User
from MS.gateway import ReplicaGateway as RG
# This is the view to be redirected to when precheck fails; i.e.
# the given password or g_id is wrong.
PRECHECK_REDIRECT = 'django_rg.views.viewgateway'
# Doesn't use precheck() because doesn't use Password() form, just ChangePassword() form.
| 39.089419 | 126 | 0.618895 |
cea36ec679e178237a093bbac8b36da547e73bca | 46 | py | Python | cloudbackup/tests/__init__.py | nloadholtes/python-cloudbackup-sdk | 1866e23aaaac41c35be4cb6ab964fcd0ba9a8fe6 | [
"Apache-2.0"
] | 4 | 2015-02-10T14:28:12.000Z | 2016-12-26T22:52:07.000Z | cloudbackup/tests/__init__.py | nloadholtes/python-cloudbackup-sdk | 1866e23aaaac41c35be4cb6ab964fcd0ba9a8fe6 | [
"Apache-2.0"
] | 17 | 2015-01-22T21:58:36.000Z | 2018-01-25T19:47:43.000Z | cloudbackup/tests/__init__.py | nloadholtes/python-cloudbackup-sdk | 1866e23aaaac41c35be4cb6ab964fcd0ba9a8fe6 | [
"Apache-2.0"
] | 9 | 2015-01-26T19:25:45.000Z | 2018-11-01T20:14:12.000Z | """
Rackspace Cloud Backup API
Test Suite
"""
| 9.2 | 26 | 0.695652 |
cea3c68b56eb3e56f0d54948bd1517206a13b963 | 5,647 | py | Python | gym_locm/toolbox/predictor.py | dfpetrin/gym-locm | af843f2508dd6c5ce96740390ef67b89c77c34ad | [
"MIT"
] | 12 | 2019-11-16T23:11:08.000Z | 2022-03-24T03:31:59.000Z | gym_locm/toolbox/predictor.py | dfpetrin/gym-locm | af843f2508dd6c5ce96740390ef67b89c77c34ad | [
"MIT"
] | 6 | 2021-01-21T15:33:40.000Z | 2022-03-18T18:06:25.000Z | gym_locm/toolbox/predictor.py | dfpetrin/gym-locm | af843f2508dd6c5ce96740390ef67b89c77c34ad | [
"MIT"
] | 2 | 2019-07-18T20:34:42.000Z | 2022-03-01T19:56:46.000Z | import argparse
import json
import os
import pathlib
import numpy as np
import pexpect
from scipy.special import softmax
base_path = str(pathlib.Path(__file__).parent.absolute())
if __name__ == '__main__':
run()
| 27.412621 | 80 | 0.607756 |
cea540d8d7c6742e25322196c14ce8e5fffdddeb | 57,014 | py | Python | src/ralph_assets/migrations/0012_auto__add_transitionshistory__add_attachment__add_coaoemos__add_action.py | xliiv/ralph_assets | 73e5e46db380c9a8dafb9ca1bd5abe47d5733385 | [
"Apache-2.0"
] | null | null | null | src/ralph_assets/migrations/0012_auto__add_transitionshistory__add_attachment__add_coaoemos__add_action.py | xliiv/ralph_assets | 73e5e46db380c9a8dafb9ca1bd5abe47d5733385 | [
"Apache-2.0"
] | null | null | null | src/ralph_assets/migrations/0012_auto__add_transitionshistory__add_attachment__add_coaoemos__add_action.py | xliiv/ralph_assets | 73e5e46db380c9a8dafb9ca1bd5abe47d5733385 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
| 74.236979 | 224 | 0.60522 |
cea61c55844715882bbb20ff9d1e7b4ef98a8044 | 3,981 | py | Python | ansible/modules/network/nxos/nxos_pim.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 1 | 2022-01-25T22:52:58.000Z | 2022-01-25T22:52:58.000Z | ansible/modules/network/nxos/nxos_pim.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | ansible/modules/network/nxos/nxos_pim.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_pim
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages configuration of a PIM instance.
description:
- Manages configuration of a Protocol Independent Multicast (PIM) instance.
author: Gabriele Gerbino (@GGabriele)
options:
ssm_range:
description:
- Configure group ranges for Source Specific Multicast (SSM).
Valid values are multicast addresses or the keyword 'none'.
required: true
'''
EXAMPLES = '''
- nxos_pim:
ssm_range: "232.0.0.0/8"
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["ip pim ssm range 232.0.0.0/8"]
'''
import re
from ansible.module_utils.nxos import get_config, load_config
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import CustomNetworkConfig
PARAM_TO_COMMAND_KEYMAP = {
'ssm_range': 'ip pim ssm range'
}
if __name__ == '__main__':
main()
| 28.035211 | 86 | 0.672444 |
cea86a1503f29caac9cdfd11f755050fd0f5e54c | 3,506 | py | Python | server/openapi_server/controllers/variable_controller.py | mintproject/MINT-ModelCatalogIngestionAPI | 026d3495483a3e48ea3c1364d0dda09beeea69e4 | [
"Apache-2.0"
] | 2 | 2019-05-30T21:33:43.000Z | 2019-09-27T21:04:38.000Z | server/openapi_server/controllers/variable_controller.py | mintproject/model-catalog-api | 2ad7016691891497bba37afe8ceb0fea8fe769e5 | [
"Apache-2.0"
] | 82 | 2019-10-08T16:35:34.000Z | 2022-03-15T18:25:27.000Z | server/openapi_server/controllers/variable_controller.py | mintproject/model-catalog-api | 2ad7016691891497bba37afe8ceb0fea8fe769e5 | [
"Apache-2.0"
] | null | null | null | import connexion
import six
from openapi_server import query_manager
from openapi_server.utils.vars import VARIABLE_TYPE_NAME, VARIABLE_TYPE_URI
from openapi_server.models.variable import Variable # noqa: E501
from openapi_server import util
def variables_get(username=None, label=None, page=None, per_page=None): # noqa: E501
"""List all instances of Variable
Gets a list of all instances of Variable (more information in https://w3id.org/okn/o/sd#Variable) # noqa: E501
:param username: Name of the user graph to query
:type username: str
:param label: Filter by label
:type label: str
:param page: Page number
:type page: int
:param per_page: Items per page
:type per_page: int
:rtype: List[Variable]
"""
return query_manager.get_resource(
username=username,
label=label,
page=page,
per_page=per_page,
rdf_type_uri=VARIABLE_TYPE_URI,
rdf_type_name=VARIABLE_TYPE_NAME,
kls=Variable)
def variables_id_delete(id, user=None): # noqa: E501
"""Delete an existing Variable
Delete an existing Variable (more information in https://w3id.org/okn/o/sd#Variable) # noqa: E501
:param id: The ID of the Variable to be retrieved
:type id: str
:param user: Username
:type user: str
:rtype: None
"""
return query_manager.delete_resource(id=id,
user=user,
rdf_type_uri=VARIABLE_TYPE_URI,
rdf_type_name=VARIABLE_TYPE_NAME,
kls=Variable)
def variables_id_get(id, username=None): # noqa: E501
"""Get a single Variable by its id
Gets the details of a given Variable (more information in https://w3id.org/okn/o/sd#Variable) # noqa: E501
:param id: The ID of the Variable to be retrieved
:type id: str
:param username: Name of the user graph to query
:type username: str
:rtype: Variable
"""
return query_manager.get_resource(id=id,
username=username,
rdf_type_uri=VARIABLE_TYPE_URI,
rdf_type_name=VARIABLE_TYPE_NAME,
kls=Variable)
def variables_id_put(id, user=None, variable=None): # noqa: E501
"""Update an existing Variable
Updates an existing Variable (more information in https://w3id.org/okn/o/sd#Variable) # noqa: E501
:param id: The ID of the Variable to be retrieved
:type id: str
:param user: Username
:type user: str
:param variable: An old Variableto be updated
:type variable: dict | bytes
:rtype: Variable
"""
if connexion.request.is_json:
variable = Variable.from_dict(connexion.request.get_json()) # noqa: E501
return query_manager.put_resource(id=id,
user=user,
body=variable,
rdf_type_uri=VARIABLE_TYPE_URI,
rdf_type_name=VARIABLE_TYPE_NAME,
kls=Variable)
def variables_post(user=None, variable=None): # noqa: E501
"""Create one Variable
Create a new instance of Variable (more information in https://w3id.org/okn/o/sd#Variable) # noqa: E501
:param user: Username
:type user: str
:param variable: Information about the Variableto be created
:type variable: dict | bytes
:rtype: Variable
"""
if connexion.request.is_json:
variable = Variable.from_dict(connexion.request.get_json()) # noqa: E501
return query_manager.post_resource(
user=user,
body=variable,
rdf_type_uri=VARIABLE_TYPE_URI,
rdf_type_name=VARIABLE_TYPE_NAME,
kls=Variable)
| 28.504065 | 114 | 0.683685 |
ceab7daa2ddbacd632939bc43e04875d9fe01ee3 | 3,482 | py | Python | lhorizon/_request_formatters.py | arfon/lhorizon | de2fa9c8121b27da87a4e0613a7dd5ec0647d9fb | [
"BSD-3-Clause"
] | null | null | null | lhorizon/_request_formatters.py | arfon/lhorizon | de2fa9c8121b27da87a4e0613a7dd5ec0647d9fb | [
"BSD-3-Clause"
] | null | null | null | lhorizon/_request_formatters.py | arfon/lhorizon | de2fa9c8121b27da87a4e0613a7dd5ec0647d9fb | [
"BSD-3-Clause"
] | null | null | null | """
formatters to translate various parameters and options into URL parameters
that can be parsed by JPL Horizons' CGI. These are mostly intended to be used
by LHorizon methods and should probably not be called directly.
"""
from collections.abc import Mapping, Sequence
from typing import Union
import numpy as np
import pandas as pd
def format_geodetic_origin(location: Mapping) -> dict:
"""
creates dict of URL parameters for a geodetic coordinate origin
"""
return {
"CENTER": "coord@{:s}".format(str(location["body"])),
"COORD_TYPE": "GEODETIC",
"SITE_COORD": "'{:f},{:f},{:f}'".format(
float(location["lon"]),
float(location["lat"]),
float(location["elevation"]),
),
}
def format_geodetic_target(location: Mapping) -> str:
"""creates command string for a geodetic target"""
return "g:{lon},{lat},{elevation}@{body}".format(**location)
def format_epoch_params(epochs: Union[Sequence, Mapping]) -> dict:
"""creates dict of URL parameters from epochs"""
epoch_payload = {}
if isinstance(epochs, (pd.Series, list, tuple, np.ndarray)):
epoch_payload["TLIST"] = "\n".join([str(epoch) for epoch in epochs])
elif isinstance(epochs, dict):
if (
"start" not in epochs
or "stop" not in epochs
or "step" not in epochs
):
raise ValueError("'epochs' must contain start, " + "stop, step")
epoch_payload["START_TIME"] = '"' + str(epochs["start"]) + '"'
epoch_payload["STOP_TIME"] = '"' + str(epochs["stop"]) + '"'
epoch_payload["STEP_SIZE"] = '"' + str(epochs["step"]) + '"'
else:
# treat epochs as scalar
epoch_payload["TLIST"] = str(epochs)
return epoch_payload
def make_commandline(
target: Union[str, int, Mapping],
closest_apparition: Union[bool, str],
no_fragments: bool,
):
"""makes 'primary' command string for Horizons CGI request'"""
if isinstance(target, Mapping):
target = format_geodetic_target(target)
commandline = str(target)
if isinstance(closest_apparition, bool):
if closest_apparition:
commandline += " CAP;"
else:
commandline += " CAP{:s};".format(closest_apparition)
if no_fragments:
commandline += " NOFRAG;"
return commandline
# TODO: add REF_PLANE parameters
def assemble_request_params(
commandline: str,
query_type: str,
extra_precision: bool,
max_hour_angle: float,
quantities: str,
refraction: bool,
refsystem: str,
solar_elongation: Sequence[float],
vec_corr: str,
vec_table: int,
ref_plane: str
) -> dict[str]:
"""final-stage assembler for Horizons CGI URL parameters"""
return {
"batch": 1,
"TABLE_TYPE": query_type,
"QUANTITIES": "'" + str(quantities) + "'",
"COMMAND": '"' + commandline + '"',
"SOLAR_ELONG": '"'
+ str(solar_elongation[0])
+ ","
+ str(solar_elongation[1])
+ '"',
"LHA_CUTOFF": str(max_hour_angle),
"CSV_FORMAT": "YES",
"CAL_FORMAT": "BOTH",
"ANG_FORMAT": "DEG",
"APPARENT": {False: "AIRLESS", True: "REFRACTED"}[refraction],
"REF_SYSTEM": refsystem,
"EXTRA_PREC": {True: "YES", False: "NO"}[extra_precision],
# NONE, LT, LT + s
"VEC_CORR": "'" + vec_corr + "'",
"VEC_TABLE": "'" + str(vec_table) + "'",
}
| 31.654545 | 77 | 0.600804 |
ceac15e5add44827ffdab3055b716cb3256a3e2a | 1,327 | py | Python | src/aws_lambda_typing/events/kinesis_stream.py | chuckwondo/aws-lambda-typing | 8417ab67f2492be1508fe38b2c34bc106619a56d | [
"MIT"
] | 29 | 2021-01-07T13:35:16.000Z | 2022-03-25T07:20:54.000Z | src/aws_lambda_typing/events/kinesis_stream.py | chuckwondo/aws-lambda-typing | 8417ab67f2492be1508fe38b2c34bc106619a56d | [
"MIT"
] | 13 | 2021-02-28T00:31:00.000Z | 2022-03-29T15:24:01.000Z | src/aws_lambda_typing/events/kinesis_stream.py | chuckwondo/aws-lambda-typing | 8417ab67f2492be1508fe38b2c34bc106619a56d | [
"MIT"
] | 5 | 2021-02-27T13:50:42.000Z | 2022-01-13T15:05:44.000Z | #!/usr/bin/env python
import sys
if sys.version_info >= (3, 8):
from typing import List, TypedDict
else:
from typing import List
from typing_extensions import TypedDict
| 16.382716 | 66 | 0.666164 |
ceac7c7a86d7f596354c9e7181c0d362a2bc878a | 1,478 | py | Python | tests/test_mangling.py | ecoinvent/brightway2-parameters | 0b42466bf33655087e231364a7d677c6c114a046 | [
"BSD-3-Clause"
] | null | null | null | tests/test_mangling.py | ecoinvent/brightway2-parameters | 0b42466bf33655087e231364a7d677c6c114a046 | [
"BSD-3-Clause"
] | 1 | 2019-12-26T15:18:49.000Z | 2019-12-26T15:18:49.000Z | tests/test_mangling.py | ecoinvent/brightway2-parameters | 0b42466bf33655087e231364a7d677c6c114a046 | [
"BSD-3-Clause"
] | 1 | 2021-07-05T12:14:49.000Z | 2021-07-05T12:14:49.000Z | from bw2parameters import *
| 35.190476 | 95 | 0.525034 |
cead398064b594593f3430fbc788b9476bf86da6 | 150 | py | Python | venv/Lib/site-packages/clyent/errors.py | GiovanniConserva/TestDeploy | 7a8242df6fe996b1029497d2d87295d1531b6139 | [
"BSD-3-Clause"
] | null | null | null | venv/Lib/site-packages/clyent/errors.py | GiovanniConserva/TestDeploy | 7a8242df6fe996b1029497d2d87295d1531b6139 | [
"BSD-3-Clause"
] | null | null | null | venv/Lib/site-packages/clyent/errors.py | GiovanniConserva/TestDeploy | 7a8242df6fe996b1029497d2d87295d1531b6139 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import, print_function, unicode_literals
| 18.75 | 72 | 0.8 |
ceaeab5eb737a59c0b5e6c14b392452d4ffec67b | 2,482 | py | Python | abm-predator-prey.py | RachidStat/PyCX | a1a597e61d03b25cf138dd11ab136db8202e1243 | [
"BSD-2-Clause-FreeBSD"
] | 176 | 2019-12-18T11:44:28.000Z | 2022-03-27T09:09:33.000Z | abm-predator-prey.py | RachidStat/PyCX | a1a597e61d03b25cf138dd11ab136db8202e1243 | [
"BSD-2-Clause-FreeBSD"
] | 2 | 2020-03-29T00:51:25.000Z | 2020-07-19T11:08:32.000Z | abm-predator-prey.py | RachidStat/PyCX | a1a597e61d03b25cf138dd11ab136db8202e1243 | [
"BSD-2-Clause-FreeBSD"
] | 56 | 2019-12-18T19:04:12.000Z | 2022-03-22T09:35:33.000Z | import pycxsimulator
from pylab import *
import copy as cp
nr = 500. # carrying capacity of rabbits
r_init = 100 # initial rabbit population
mr = 0.03 # magnitude of movement of rabbits
dr = 1.0 # death rate of rabbits when it faces foxes
rr = 0.1 # reproduction rate of rabbits
f_init = 30 # initial fox population
mf = 0.05 # magnitude of movement of foxes
df = 0.1 # death rate of foxes when there is no food
rf = 0.5 # reproduction rate of foxes
cd = 0.02 # radius for collision detection
cdsq = cd ** 2
pycxsimulator.GUI().start(func=[initialize, observe, update])
| 26.978261 | 74 | 0.553183 |
ceaf4faaf34ba6494575ecceb758062b6bcf8486 | 4,832 | py | Python | models/base_ae.py | christopher-beckham/amr | 1bd67b9b4fb2fcf07cc8faba3c863f5ad5d4c4c0 | [
"BSD-3-Clause"
] | 35 | 2019-08-27T08:59:53.000Z | 2021-09-19T15:55:34.000Z | models/base_ae.py | christopher-beckham/amr | 1bd67b9b4fb2fcf07cc8faba3c863f5ad5d4c4c0 | [
"BSD-3-Clause"
] | 4 | 2020-01-14T05:09:31.000Z | 2020-05-25T20:39:55.000Z | models/base_ae.py | christopher-beckham/amr | 1bd67b9b4fb2fcf07cc8faba3c863f5ad5d4c4c0 | [
"BSD-3-Clause"
] | 3 | 2019-12-24T01:29:49.000Z | 2020-12-06T01:56:19.000Z | import torch
import numpy as np
from collections import OrderedDict
from torch import optim
from itertools import chain
from .base import Base
from torch import nn
| 31.174194 | 97 | 0.528353 |
ceafef1d012a2252b4736fc5912d1fe98bb743cd | 10,608 | py | Python | psdet/models/point_detector/utils.py | Jiaolong/gcn-parking-slot | f8c3b445b186e3a7fd13af1f17fa5ba0336027c7 | [
"MIT"
] | 56 | 2021-03-24T08:24:27.000Z | 2022-03-26T13:56:36.000Z | psdet/models/point_detector/utils.py | Jiaolong/gcn-parking-slot | f8c3b445b186e3a7fd13af1f17fa5ba0336027c7 | [
"MIT"
] | 7 | 2021-04-05T03:55:05.000Z | 2022-03-08T03:12:20.000Z | psdet/models/point_detector/utils.py | Jiaolong/gcn-parking-slot | f8c3b445b186e3a7fd13af1f17fa5ba0336027c7 | [
"MIT"
] | 17 | 2021-04-04T02:42:09.000Z | 2022-03-31T01:48:06.000Z | """Universal network struture unit definition."""
import torch
import math
from torch import nn
import torchvision
from torch.utils import model_zoo
from torchvision.models.resnet import BasicBlock, model_urls, Bottleneck
def define_squeeze_unit(basic_channel_size):
"""Define a 1x1 squeeze convolution with norm and activation."""
conv = nn.Conv2d(2 * basic_channel_size, basic_channel_size, kernel_size=1,
stride=1, padding=0, bias=False)
norm = nn.BatchNorm2d(basic_channel_size)
relu = nn.LeakyReLU(0.1)
layers = [conv, norm, relu]
return layers
def define_expand_unit(basic_channel_size):
"""Define a 3x3 expand convolution with norm and activation."""
conv = nn.Conv2d(basic_channel_size, 2 * basic_channel_size, kernel_size=3,
stride=1, padding=1, bias=False)
norm = nn.BatchNorm2d(2 * basic_channel_size)
relu = nn.LeakyReLU(0.1)
layers = [conv, norm, relu]
return layers
def define_halve_unit(basic_channel_size):
"""Define a 4x4 stride 2 expand convolution with norm and activation."""
conv = nn.Conv2d(basic_channel_size, 2 * basic_channel_size, kernel_size=4,
stride=2, padding=1, bias=False)
norm = nn.BatchNorm2d(2 * basic_channel_size)
relu = nn.LeakyReLU(0.1)
layers = [conv, norm, relu]
return layers
def define_depthwise_expand_unit(basic_channel_size):
"""Define a 3x3 expand convolution with norm and activation."""
conv1 = nn.Conv2d(basic_channel_size, 2 * basic_channel_size,
kernel_size=1, stride=1, padding=0, bias=False)
norm1 = nn.BatchNorm2d(2 * basic_channel_size)
relu1 = nn.LeakyReLU(0.1)
conv2 = nn.Conv2d(2 * basic_channel_size, 2 * basic_channel_size, kernel_size=3,
stride=1, padding=1, bias=False, groups=2 * basic_channel_size)
norm2 = nn.BatchNorm2d(2 * basic_channel_size)
relu2 = nn.LeakyReLU(0.1)
layers = [conv1, norm1, relu1, conv2, norm2, relu2]
return layers
def define_detector_block(basic_channel_size):
"""Define a unit composite of a squeeze and expand unit."""
layers = []
layers += define_squeeze_unit(basic_channel_size)
layers += define_expand_unit(basic_channel_size)
return layers
# vgg backbone
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 1024, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def vgg16(pretrained=False, **kwargs):
"""VGG 16-layer model (configuration "D")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['D']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg16']))
return model
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet18(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet50(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']), strict=False)
return model
| 36.961672 | 113 | 0.607089 |
ceb07de7a23c054666d87af2b849db00c172593e | 819 | py | Python | Config/Texts/NFSW/NFSW.py | amiralirj/DarkHelper | 386eea58eb6b9766d6f900a83f87eeac0b8f09c2 | [
"MIT"
] | 34 | 2021-08-05T12:41:18.000Z | 2021-11-30T22:23:20.000Z | Config/Texts/NFSW/NFSW.py | amiralirj/DarkHelper | 386eea58eb6b9766d6f900a83f87eeac0b8f09c2 | [
"MIT"
] | 2 | 2021-08-29T10:32:02.000Z | 2021-08-31T12:10:29.000Z | Config/Texts/NFSW/NFSW.py | amiralirj/DarkHelper | 386eea58eb6b9766d6f900a83f87eeac0b8f09c2 | [
"MIT"
] | 5 | 2021-08-07T07:41:44.000Z | 2021-08-20T13:52:36.000Z | NFSW_Texts = [
''
,''
,' '
,''
,''
,'jnde'
,'jende'
,'kos'
,'pussy'
,'kir'
,'lashi'
,''
,'jakesh'
,''
,' '
,'madar kharab'
,'mde kharab'
,'khar kose'
,'fuck'
,'bitch'
,'haroomzade'
,''
,''
,''
,''
,' '
]
NFSW_Names=[
''
,''
,""
,""
,""
,""
,""
,""
,""
,""
,""
,""
]
Porn={'dick':'Male Genitalia - Exposed',
'pussy':'Female Genitalia - Exposed',
'coveredpossy':'Female Genitalia - Covered',
'fboobs':'Female Breast - Exposed',
'mboobs':'Male Breast - Exposed',
'coveredboobs':'Female Breast - Covered',
'stomack':'Male Breast - Covered',
'baghal':'Male Breast - Exposed',
'ass':'Buttocks - Exposed',
'feet':'404NotFound',
'coveredass':'Buttocks - Covered'} | 14.625 | 45 | 0.577534 |
ceb45da6d8703038dcca50551f842da1aba2b40e | 14,784 | py | Python | ml_collections/config_dict/tests/frozen_config_dict_test.py | wyddmw/ViT-pytorch-1 | 81dd3c43880d0f641ec8e15d8226035a358e78fc | [
"Apache-2.0"
] | 311 | 2020-08-25T14:44:55.000Z | 2022-03-30T17:19:45.000Z | ml_collections/config_dict/tests/frozen_config_dict_test.py | wyddmw/ViT-pytorch-1 | 81dd3c43880d0f641ec8e15d8226035a358e78fc | [
"Apache-2.0"
] | 9 | 2020-11-14T04:00:23.000Z | 2022-02-18T21:03:33.000Z | ml_collections/config_dict/tests/frozen_config_dict_test.py | wyddmw/ViT-pytorch-1 | 81dd3c43880d0f641ec8e15d8226035a358e78fc | [
"Apache-2.0"
] | 19 | 2020-08-25T21:52:30.000Z | 2022-03-13T22:25:20.000Z | # Copyright 2021 The ML Collections Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for ml_collections.FrozenConfigDict."""
from collections import abc as collections_abc
import copy
import pickle
from absl.testing import absltest
import ml_collections
_TEST_DICT = {
'int': 2,
'list': [1, 2],
'nested_list': [[1, [2]]],
'set': {1, 2},
'tuple': (1, 2),
'frozenset': frozenset({1, 2}),
'dict': {
'float': -1.23,
'list': [1, 2],
'dict': {},
'tuple_containing_list': (1, 2, (3, [4, 5], (6, 7))),
'list_containing_tuple': [1, 2, [3, 4], (5, 6)],
},
'ref': ml_collections.FieldReference({'int': 0})
}
if __name__ == '__main__':
absltest.main()
| 37.810742 | 80 | 0.718277 |
ceb543c431720b3b36051ad70d948b85d8942aeb | 3,829 | py | Python | driver.py | kavj/npmd | 742fcb271e695b24bb062cdc66d455c0f397116d | [
"Apache-2.0"
] | null | null | null | driver.py | kavj/npmd | 742fcb271e695b24bb062cdc66d455c0f397116d | [
"Apache-2.0"
] | null | null | null | driver.py | kavj/npmd | 742fcb271e695b24bb062cdc66d455c0f397116d | [
"Apache-2.0"
] | null | null | null | import numbers
import os
import sys
import typing
import numpy as np
from dataclasses import dataclass
from pathlib import Path
import ir
import type_interface as ti
import type_resolution as tr
from ASTTransform import build_module_ir_and_symbols
from ccodegen import codegen
from canonicalize import NormalizePaths
from errors import error_context, CompilerError
from lowering import loop_lowering
from pretty_printing import pretty_printer
from reaching_check import ReachingCheck
from utils import wrap_input
version = sys.version_info
# Python 2 can't parse a significant
# amount of this code, so error messages ignore it.
if sys.version_info.minor < 8:
raise RuntimeError(f"Python 3.8 or above is required.")
# stub for now, since we may need to remake typed passes later
# per function or incorporate context management
| 31.130081 | 81 | 0.655524 |
ceb62f0313de2d8b1490179cab386a903cdaa203 | 20,552 | py | Python | ipt/ipt_hough_circles_detector.py | tpmp-inra/ipapi | b0f6be8960a20dbf95ef9df96efdd22bd6e031c5 | [
"MIT"
] | 1 | 2020-06-30T06:53:36.000Z | 2020-06-30T06:53:36.000Z | ipt/ipt_hough_circles_detector.py | tpmp-inra/ipapi | b0f6be8960a20dbf95ef9df96efdd22bd6e031c5 | [
"MIT"
] | null | null | null | ipt/ipt_hough_circles_detector.py | tpmp-inra/ipapi | b0f6be8960a20dbf95ef9df96efdd22bd6e031c5 | [
"MIT"
] | null | null | null | import os
import pickle
import logging
logger = logging.getLogger(__name__)
import cv2
import numpy as np
from skimage.transform import hough_circle, hough_circle_peaks
import ipso_phen.ipapi.base.ip_common as ipc
from ipso_phen.ipapi.base.ipt_abstract import IptBase
from ipso_phen.ipapi.ipt.ipt_edge_detector import IptEdgeDetector
from ipso_phen.ipapi.tools.regions import (
RectangleRegion,
CircleRegion,
AnnulusRegion,
Point,
)
from ipso_phen.ipapi.tools.folders import ipso_folders
| 39.371648 | 116 | 0.476109 |
ceb677a0c09c58bfcd7c3dccc2e5b38736bebbf5 | 1,278 | py | Python | python_sample/cloud_API_endpoint/my_driving/edge_endpoint/db.py | alexcourouble/automotive-iot-samples | ead3549e052968b7f2c0a30c3787f34e15e373fd | [
"Apache-2.0"
] | 11 | 2019-09-02T12:38:05.000Z | 2021-01-03T17:52:32.000Z | python_sample/cloud_API_endpoint/my_driving/edge_endpoint/db.py | alexcourouble/automotive-iot-samples | ead3549e052968b7f2c0a30c3787f34e15e373fd | [
"Apache-2.0"
] | 34 | 2019-12-29T21:31:35.000Z | 2021-10-06T03:08:21.000Z | python_sample/cloud_API_endpoint/my_driving/edge_endpoint/db.py | alexcourouble/automotive-iot-samples | ead3549e052968b7f2c0a30c3787f34e15e373fd | [
"Apache-2.0"
] | 7 | 2019-04-28T22:14:26.000Z | 2021-02-17T16:58:34.000Z | import sqlite3
from flask import current_app, g
def get_db():
"""
Connect to the application's configured database. The connection
is unique for each request and will be reused if this is called
again.
"""
if 'db' not in g:
g.db = sqlite3.connect(
'../data/cloud_db.db',
detect_types=sqlite3.PARSE_DECLTYPES
)
g.db.row_factory = sqlite3.Row
initialize_DB(g.db)
return g.db
def initialize_DB(db):
"""
Creating event table if it doesn't already exist.
The event table has two keys:
1-A key generated on the edge gateway when an event detected.
2-The sqlite3 rowid: http://www.sqlitetutorial.net/sqlite-autoincrement/
"""
db.execute( """CREATE TABLE IF NOT EXISTS events (client_side_id TEXT, user TEXT, event_timestamp INTEGER, distance TEXT, fuel TEXT);""")
def write_event(json_data):
"""
Inserts data passed in argument.
"""
db = get_db()
row_to_insert = [
json_data["client_side_id"],
json_data["user"],
int(json_data["event_timestamp"]),
json_data["distance"],
json_data["fuel"]
]
db.execute("""INSERT OR REPLACE INTO events VALUES(?,?,?,?,?)""",row_to_insert)
db.commit()
| 26.081633 | 141 | 0.628326 |
ceb6903422d5b1eae99326a4376ddaace1e94411 | 8,467 | py | Python | tests/core/test_app.py | ShepardZhao/rancher | a747ac408ca34fb0bf465276f07557ec43bf9c89 | [
"Apache-2.0"
] | 1 | 2019-06-16T01:16:56.000Z | 2019-06-16T01:16:56.000Z | tests/core/test_app.py | ShepardZhao/rancher | a747ac408ca34fb0bf465276f07557ec43bf9c89 | [
"Apache-2.0"
] | 1 | 2019-05-14T04:08:43.000Z | 2019-05-14T04:08:43.000Z | tests/core/test_app.py | ShepardZhao/rancher | a747ac408ca34fb0bf465276f07557ec43bf9c89 | [
"Apache-2.0"
] | null | null | null | from .common import random_str
from .test_catalog import wait_for_template_to_be_created
import time
| 36.49569 | 77 | 0.591709 |
ceb7ed5b699f0ffa10c4137af6c73aac0a124844 | 1,455 | py | Python | iotPub.py | norikokt/serverless-language-translation | 51c0fdbf3b8c0c15d5e2208ad9cff147adc59efb | [
"Apache-2.0"
] | null | null | null | iotPub.py | norikokt/serverless-language-translation | 51c0fdbf3b8c0c15d5e2208ad9cff147adc59efb | [
"Apache-2.0"
] | null | null | null | iotPub.py | norikokt/serverless-language-translation | 51c0fdbf3b8c0c15d5e2208ad9cff147adc59efb | [
"Apache-2.0"
] | 1 | 2020-07-30T09:25:53.000Z | 2020-07-30T09:25:53.000Z | # Copyright 2018 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
HOST = 'messaging.internetofthings.ibmcloud.com'
PORT = 1883
PATH = '.' + HOST + ':' + PORT + '/api/v0002/application/types/'
| 39.324324 | 76 | 0.651546 |
ceb89dab258e22f4133b8db37425e38785178a38 | 6,177 | py | Python | awsshell/autocomplete.py | bdharang/AWS_SHELL | 4e84552f367f4da647e10be05795b870c112e3bb | [
"Apache-2.0"
] | null | null | null | awsshell/autocomplete.py | bdharang/AWS_SHELL | 4e84552f367f4da647e10be05795b870c112e3bb | [
"Apache-2.0"
] | null | null | null | awsshell/autocomplete.py | bdharang/AWS_SHELL | 4e84552f367f4da647e10be05795b870c112e3bb | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
from awsshell.fuzzy import fuzzy_search
from awsshell.substring import substring_search
| 41.736486 | 79 | 0.610329 |
ceba8a2b9daea8892048e636439497932e869b2b | 3,809 | py | Python | IM920.py | Momijinn/IM920MHz_Module | 1f70a0021a82ba7bf3bf0fd461b01d921be27ea7 | [
"MIT"
] | 1 | 2021-06-06T03:54:18.000Z | 2021-06-06T03:54:18.000Z | IM920.py | Momijinn/IM920MHz_Module | 1f70a0021a82ba7bf3bf0fd461b01d921be27ea7 | [
"MIT"
] | null | null | null | IM920.py | Momijinn/IM920MHz_Module | 1f70a0021a82ba7bf3bf0fd461b01d921be27ea7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
pyserial
pytho2.x(python3.*)
CreaterKaname Takano
'''
import serial
import binascii
import signal
import sys
import platform
from serial.tools import list_ports
#platform
if platform.system() == 'Windows': #windows
ports = list_ports.comports()
portnumber = None
for port in ports:
if (port.vid == 1027) and (port.pid == 24597): #IDID
portnumber = port.device
print("connect to " + portnumber)
break
if portnumber == None:
print("not connetc to im920!")
sys.exit(1)
elif platform.system() == 'Linux': #Linux
portnumber = '/dev/ttyUSB0'
'''
ctrl+c
'''
'''
serial.Serial
mybaudrate:
'''
'''
ID
mybaudrate:
'''
'''
mybaudrate:
setbaudrate:()
0 1200bps
1 2400bps
2 4800bps
3 9600bps
4 19200bps
5 38400bps
'''
'''
mybaudrate:
args:ID()
'''
'''
!
mybaudrate:
'''
'''
mybaudrate:
args: ()
'''
'''
mybaudrate:
'''
'''
mybaudrate:
''' | 20.368984 | 78 | 0.608034 |
cebb72569f74c340b49b55e56cd5cfb94ded36d4 | 229 | py | Python | test/webdnn_test/graph_test/operators_test/sigmoid_test.py | steerapi/webdnn | 1df51cc094e5a528cfd3452c264905708eadb491 | [
"MIT"
] | 1 | 2021-04-09T15:55:35.000Z | 2021-04-09T15:55:35.000Z | test/webdnn_test/graph_test/operators_test/sigmoid_test.py | steerapi/webdnn | 1df51cc094e5a528cfd3452c264905708eadb491 | [
"MIT"
] | null | null | null | test/webdnn_test/graph_test/operators_test/sigmoid_test.py | steerapi/webdnn | 1df51cc094e5a528cfd3452c264905708eadb491 | [
"MIT"
] | null | null | null | from test.webdnn_test.graph_test.operators_test.util import template_test_unary_operator
from webdnn.graph.operators.sigmoid import Sigmoid
| 20.818182 | 88 | 0.812227 |
cebbaac4650f5e836f24ada37e7051b81fcb685c | 262 | py | Python | hw_asr/metric/__init__.py | kostyayatsok/asr_project_template | ee5fb8006fa4e4f5d4a2e5c6e9f6352c22ad5bbb | [
"MIT"
] | null | null | null | hw_asr/metric/__init__.py | kostyayatsok/asr_project_template | ee5fb8006fa4e4f5d4a2e5c6e9f6352c22ad5bbb | [
"MIT"
] | null | null | null | hw_asr/metric/__init__.py | kostyayatsok/asr_project_template | ee5fb8006fa4e4f5d4a2e5c6e9f6352c22ad5bbb | [
"MIT"
] | null | null | null | from hw_asr.metric.cer_metric import ArgmaxCERMetric, BeamsearchCERMetric
from hw_asr.metric.wer_metric import ArgmaxWERMetric, BeamsearchWERMetric
__all__ = [
"ArgmaxWERMetric",
"ArgmaxCERMetric",
"BeamsearchCERMetric",
"BeamsearchWERMetric"
]
| 26.2 | 73 | 0.790076 |
cebbe59000886920359408b216f69573eb72a0fb | 6,746 | py | Python | src/main_window.py | serenafr/My2048 | ea96f8c8d9651ad86308f02f35474a51dc3be531 | [
"MIT"
] | null | null | null | src/main_window.py | serenafr/My2048 | ea96f8c8d9651ad86308f02f35474a51dc3be531 | [
"MIT"
] | 1 | 2015-04-25T00:36:44.000Z | 2015-04-25T00:36:44.000Z | src/main_window.py | serenafr/My2048 | ea96f8c8d9651ad86308f02f35474a51dc3be531 | [
"MIT"
] | null | null | null | import wx
import wx.lib.stattext as ST
import board
import score_board
from os.path import expanduser
SCORE_FILE_PATH = expanduser('~/.config/my2048/scores.conf')
if __name__ == "__main__":
app = wx.App()
board_object = board.Board(2)
score_board_object = score_board.Score_Board()
frame = My2048_wx(None, -1, '2048', (400, 300), board_object, score_board_object)
app.MainLoop()
| 36.464865 | 119 | 0.725912 |
cebc0e5ca2b65cfcf3eb7b5e9ccc9a733e94b3e5 | 19,153 | py | Python | tests/base/test_maps.py | ElliotCheung/simpeg | ce5bde154179ca63798a62a12787a7ec3535472c | [
"MIT"
] | 1 | 2022-02-18T16:31:27.000Z | 2022-02-18T16:31:27.000Z | tests/base/test_maps.py | ElliotCheung/simpeg | ce5bde154179ca63798a62a12787a7ec3535472c | [
"MIT"
] | null | null | null | tests/base/test_maps.py | ElliotCheung/simpeg | ce5bde154179ca63798a62a12787a7ec3535472c | [
"MIT"
] | null | null | null | import numpy as np
import unittest
import discretize
from SimPEG import maps, models, utils
from discretize.utils import mesh_builder_xyz, refine_tree_xyz
import inspect
TOL = 1e-14
np.random.seed(121)
REMOVED_IGNORE = [
"FullMap",
"CircleMap",
"Map2Dto3D",
"Vertical1DMap",
"ActiveCells",
]
MAPS_TO_EXCLUDE_2D = [
"ComboMap",
"ActiveCells",
"InjectActiveCells",
"LogMap",
"ReciprocalMap",
"PolynomialPetroClusterMap",
"Surject2Dto3D",
"Map2Dto3D",
"Mesh2Mesh",
"ParametricPolyMap",
"PolyMap",
"ParametricSplineMap",
"SplineMap",
"BaseParametric",
"ParametricBlock",
"ParametricEllipsoid",
"ParametricCasingAndLayer",
"ParametricLayer",
"ParametricBlockInLayer",
"Projection",
"SelfConsistentEffectiveMedium",
"SumMap",
"SurjectUnits",
"TileMap",
] + REMOVED_IGNORE
MAPS_TO_EXCLUDE_3D = [
"ComboMap",
"ActiveCells",
"InjectActiveCells",
"LogMap",
"ReciprocalMap",
"PolynomialPetroClusterMap",
"CircleMap",
"ParametricCircleMap",
"Mesh2Mesh",
"BaseParametric",
"ParametricBlock",
"ParametricEllipsoid",
"ParametricPolyMap",
"PolyMap",
"ParametricSplineMap",
"SplineMap",
"ParametricCasingAndLayer",
"ParametricLayer",
"ParametricBlockInLayer",
"Projection",
"SelfConsistentEffectiveMedium",
"SumMap",
"SurjectUnits",
"TileMap",
] + REMOVED_IGNORE
if __name__ == "__main__":
unittest.main()
| 32.573129 | 88 | 0.532293 |
cebc19ad6f58417864244da0cd384e9b8241025b | 11,569 | py | Python | src/panda_env.py | irom-lab/PAC-Imitation | 1b9c203b02551895613b6710da33e1bebe4a0f11 | [
"MIT"
] | 12 | 2020-08-11T03:26:36.000Z | 2022-02-10T01:14:08.000Z | grasp/src/panda_env.py | irom-lab/Task_Relevant_OOD_Detection | c49d04f88a3e155bec9abb5ac5529dff8ea2c449 | [
"MIT"
] | null | null | null | grasp/src/panda_env.py | irom-lab/Task_Relevant_OOD_Detection | c49d04f88a3e155bec9abb5ac5529dff8ea2c449 | [
"MIT"
] | 1 | 2021-03-08T10:46:06.000Z | 2021-03-08T10:46:06.000Z | import pybullet_data
import pybullet as p
import time
import numpy as np
from src.utils_geom import *
from src.utils_depth import *
from src.panda import Panda
| 31.183288 | 227 | 0.684675 |
cebc72a58b425fb7f7cd7143c3625b862489e1f9 | 178 | py | Python | desafios/desafio 021.py | juaoantonio/curso_video_python | 7520223d8647929530a1cd96f7c7d8c8f264ba1e | [
"MIT"
] | null | null | null | desafios/desafio 021.py | juaoantonio/curso_video_python | 7520223d8647929530a1cd96f7c7d8c8f264ba1e | [
"MIT"
] | null | null | null | desafios/desafio 021.py | juaoantonio/curso_video_python | 7520223d8647929530a1cd96f7c7d8c8f264ba1e | [
"MIT"
] | null | null | null | import pygame
pygame.mixer.init()
pygame.init()
pygame.mixer.music.load('/home/jaab/Msica/bach_1.wav')
pygame.mixer.music.play()
input()
pygame.event.wait()
pygame.mixer.stop()
| 19.777778 | 55 | 0.758427 |
cebdd561ae5cf73cc61b02a50a7e42a495c58927 | 67 | py | Python | deem/pytorch/layers/__init__.py | xxaxtt/TwoTowers | 206c6b38a2f72486906d391c5176e4508036aac0 | [
"Apache-2.0"
] | 14 | 2021-09-22T02:24:16.000Z | 2021-12-11T11:59:02.000Z | deem/pytorch/layers/__init__.py | xxaxtt/TwoTowers | 206c6b38a2f72486906d391c5176e4508036aac0 | [
"Apache-2.0"
] | 2 | 2021-10-16T04:39:21.000Z | 2021-12-01T08:04:46.000Z | deem/pytorch/layers/__init__.py | xxaxtt/TwoTowers | 206c6b38a2f72486906d391c5176e4508036aac0 | [
"Apache-2.0"
] | 5 | 2021-10-09T11:47:53.000Z | 2021-11-25T04:41:24.000Z | from .embedding import *
from .sequence import *
from .mlp import * | 22.333333 | 24 | 0.746269 |
cebf0a2899cd29ab2ab60478658090adf649c895 | 1,765 | py | Python | common.py | braindatalab/scrutinizing-xai | fb24fed7ae3adc10e3c35d7f477a5db322b48f4f | [
"MIT"
] | null | null | null | common.py | braindatalab/scrutinizing-xai | fb24fed7ae3adc10e3c35d7f477a5db322b48f4f | [
"MIT"
] | null | null | null | common.py | braindatalab/scrutinizing-xai | fb24fed7ae3adc10e3c35d7f477a5db322b48f4f | [
"MIT"
] | null | null | null | import json
import os
import pickle
from dataclasses import dataclass, field
from os.path import join
from typing import Dict, Any, ClassVar
| 25.955882 | 62 | 0.628329 |
cebff2caa1f50e0a7946513e1fed0db5898382ad | 1,623 | py | Python | src/spn/tests/prometheus_tests/test.py | AmurG/SPFlow | ab28dd4af9ed722ace69c6b290cf0a279bbda39e | [
"Apache-2.0"
] | null | null | null | src/spn/tests/prometheus_tests/test.py | AmurG/SPFlow | ab28dd4af9ed722ace69c6b290cf0a279bbda39e | [
"Apache-2.0"
] | null | null | null | src/spn/tests/prometheus_tests/test.py | AmurG/SPFlow | ab28dd4af9ed722ace69c6b290cf0a279bbda39e | [
"Apache-2.0"
] | null | null | null | import spn
#from spn.structure.leaves.mvgauss.MVG import *
from spn.io.Text import *
import sys
from spn.structure.leaves.parametric.Parametric import *
from spn.structure.leaves.parametric.MLE import *
from spn.algorithms.MPE import mpe
from spn.structure.prometheus.disc import *
from scipy.stats import multivariate_normal as mn
#from spn.structure.prometheus.disc import *
node = MultivariateGaussian(np.inf, np.inf)
data = np.array([1, 2, 3, 4, 5, 1, 2, 3, 4, 1, 3, 3, 6, 2]).reshape(-1, 2)
update_parametric_parameters_mle(node, data)
print(node.mean, node.sigma)
print(node.scope)
dummydata = np.asarray([[1, 2, 4, 8], [2.1, 4.1, 8.1, 16.1], [
4.1, 8.1, 16.1, 32.1], [8.8, 16.5, 32.3, 64.2]])
dummyscope = list([0, 1, 2, 3])
spn = MultivariateGaussian(np.inf, np.inf)
update_parametric_parameters_mle(spn, dummydata)
print(spn.mean)
print(spn.sigma)
spn.scope = dummyscope
#print(mn.pdf(spn.mean, spn.mean, spn.cov))
print(spn.scope)
dummydata = np.asarray([[np.nan, 2.0, np.nan, np.nan],
[np.nan, np.nan, np.nan, 64.3]])
print(np.shape(dummydata))
print(np.shape(np.asarray(spn.mean)))
print(np.shape(np.asarray(spn.sigma)))
print(mpe(spn, dummydata))
print(spn_to_str_equation(spn))
recreate = (str_to_spn(spn_to_str_equation(spn)))
print(spn_to_str_equation(recreate))
print(recreate.mean)
print(recreate.sigma)
arr = np.load('./test.npy')
teststruct = prometheus(arr, 1, itermult=0, leafsize=4, maxsize=6)
testspn = str_to_spn(teststruct)
recreate = spn_to_str_equation(testspn)
file = open('./ca.txt', 'w')
file.write(teststruct)
file.close()
| 24.969231 | 74 | 0.701787 |
cec12b95a353d8bc6ee20816f32cfee16d9c8d60 | 5,592 | py | Python | tests/test_converters.py | stabacco/graphene-pydantic | 41d62e1879b1f6ebd75319c39b0a872ec6594cc5 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-04-22T06:13:16.000Z | 2021-04-22T06:13:16.000Z | tests/test_converters.py | stabacco/graphene-pydantic | 41d62e1879b1f6ebd75319c39b0a872ec6594cc5 | [
"Apache-2.0",
"MIT"
] | 1 | 2020-11-10T18:40:44.000Z | 2020-11-10T18:40:44.000Z | tests/test_converters.py | stabacco/graphene-pydantic | 41d62e1879b1f6ebd75319c39b0a872ec6594cc5 | [
"Apache-2.0",
"MIT"
] | null | null | null | import datetime
import decimal
import enum
import typing as T
import uuid
import graphene
import graphene.types
import pydantic
import pytest
from pydantic import BaseModel, create_model
import graphene_pydantic.converters as converters
from graphene_pydantic.converters import ConversionError, convert_pydantic_field
from graphene_pydantic.objecttype import PydanticObjectType
from graphene_pydantic.registry import get_global_registry, Placeholder
| 31.954286 | 87 | 0.696531 |
cec1de2a6639546d17fb7dd7eb09653aa22c391e | 3,158 | py | Python | src/freesound.py | lRomul/argus-birdsong | 2290bd78f462cedc2ae143ec0b5e6e0782cd2b19 | [
"MIT"
] | null | null | null | src/freesound.py | lRomul/argus-birdsong | 2290bd78f462cedc2ae143ec0b5e6e0782cd2b19 | [
"MIT"
] | null | null | null | src/freesound.py | lRomul/argus-birdsong | 2290bd78f462cedc2ae143ec0b5e6e0782cd2b19 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
from pathlib import Path
import multiprocessing as mp
from functools import partial
from src.audio import read_as_melspectrogram
from src.utils import get_params_hash
from src import config
NOISE_SOUNDS = [
'Buzz',
'Car_passing_by',
'Crackle',
'Cricket',
'Hiss',
'Mechanical_fan',
'Stream',
'Traffic_noise_and_roadway_noise',
'Walk_and_footsteps',
'Waves_and_surf',
'Crowd',
'Run',
'Female_speech_and_woman_speaking',
'Male_speech_and_man_speaking',
'Raindrop',
'Sink_(filling_or_washing)',
'Gurgling',
'Frying_(food)',
]
if __name__ == "__main__":
check_prepared_freesound_data(audio_params=config.audio)
| 30.07619 | 82 | 0.674478 |
cec2cea789e2b7e248e443d7b1eaf90f326de9fc | 5,338 | py | Python | especifico/json_schema.py | athenianco/especifico | af8b97868390ba23a2c5e3e8506bd5215ee0084a | [
"Apache-2.0"
] | null | null | null | especifico/json_schema.py | athenianco/especifico | af8b97868390ba23a2c5e3e8506bd5215ee0084a | [
"Apache-2.0"
] | null | null | null | especifico/json_schema.py | athenianco/especifico | af8b97868390ba23a2c5e3e8506bd5215ee0084a | [
"Apache-2.0"
] | null | null | null | """
Module containing all code related to json schema validation.
"""
from collections.abc import Mapping
import contextlib
from copy import deepcopy
import io
import os
import typing as t
import urllib.parse
import urllib.request
from jsonschema import Draft4Validator, RefResolver
from jsonschema.exceptions import RefResolutionError, ValidationError # noqa
from jsonschema.validators import extend
import requests
import yaml
from .utils import deep_get
default_handlers = {
"http": URLHandler(),
"https": URLHandler(),
"file": FileHandler(),
}
def resolve_refs(spec, store=None, handlers=None):
"""
Resolve JSON references like {"$ref": <some URI>} in a spec.
Optionally takes a store, which is a mapping from reference URLs to a
dereferenced objects. Prepopulating the store can avoid network calls.
"""
spec = deepcopy(spec)
store = store or {}
handlers = handlers or default_handlers
resolver = RefResolver("", spec, store, handlers=handlers)
res = _do_resolve(spec)
return res
NullableTypeValidator = allow_nullable(Draft4Validator.VALIDATORS["type"])
NullableEnumValidator = allow_nullable(Draft4Validator.VALIDATORS["enum"])
Draft4RequestValidator = extend(
Draft4Validator,
{
"type": NullableTypeValidator,
"enum": NullableEnumValidator,
"required": validate_required,
"readOnly": validate_readOnly,
},
)
Draft4ResponseValidator = extend(
Draft4Validator,
{
"type": NullableTypeValidator,
"enum": NullableEnumValidator,
"required": validate_required,
"writeOnly": validate_writeOnly,
"x-writeOnly": validate_writeOnly,
},
)
| 31.216374 | 95 | 0.641813 |
cec4f4492d6d64177d0b32e7ad2dbf7ec31fcdf8 | 779 | py | Python | mindhome_alpha/erpnext/accounts/doctype/fiscal_year/test_fiscal_year.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:55:29.000Z | 2021-04-29T14:55:29.000Z | mindhome_alpha/erpnext/accounts/doctype/fiscal_year/test_fiscal_year.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | null | null | null | mindhome_alpha/erpnext/accounts/doctype/fiscal_year/test_fiscal_year.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:39:01.000Z | 2021-04-29T14:39:01.000Z | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, unittest
from erpnext.accounts.doctype.fiscal_year.fiscal_year import FiscalYearIncorrectDate
test_records = frappe.get_test_records('Fiscal Year')
test_ignore = ["Company"]
| 28.851852 | 84 | 0.752246 |
cec53997d0c2400355b2407286a2ab638e1feab8 | 1,008 | py | Python | types/integers.py | anthony-walker/me499 | 1ec5761a822956b4e18f83b3e0cda93715b74b3e | [
"BSD-3-Clause"
] | 11 | 2020-03-31T21:27:19.000Z | 2022-01-11T09:50:13.000Z | types/integers.py | anthony-walker/me499 | 1ec5761a822956b4e18f83b3e0cda93715b74b3e | [
"BSD-3-Clause"
] | null | null | null | types/integers.py | anthony-walker/me499 | 1ec5761a822956b4e18f83b3e0cda93715b74b3e | [
"BSD-3-Clause"
] | 5 | 2020-05-13T05:47:23.000Z | 2021-09-27T18:43:25.000Z | #!/usr/bin/env python3
if __name__ == '__main__':
# Python can represent integers. Here are a couple of ways to create an integer variable. Notice the truncation,
# rather than rounding, in the assignment of d.
a = 5
b = int()
c = int(4)
d = int(3.84)
print(a, b, c, d)
# Integers have the usual math operations. Note that division will return a float, but others will preserve the
# integer type. The type() function can tell you the type of a variable. You should try to avoid using this
# function in your code.
print('\ndivision')
a = 10
b = 10 / 5
print(b, type(b))
# We can force integer division with //. Note that this will truncate results.
print('\nInteger division')
a = 10
b = 10 // 5
print(b, type(b))
a = 10
b = 10 // 3
print(b, type(b))
# We can also calculate the remainder
n = 10
m = 3
div = n // m
rem = n % m
print('\n{0} = {1} * {2} + {3}'.format(n, div, m, rem))
| 27.243243 | 118 | 0.590278 |
cec69b53aae0a98c800aee68729ab0b6f22dfd50 | 1,976 | py | Python | cli/asciiart.py | Christophe1997/pyramid | d135c86329b6527d54535d95c0db8b5d2da6cc8c | [
"Apache-2.0"
] | null | null | null | cli/asciiart.py | Christophe1997/pyramid | d135c86329b6527d54535d95c0db8b5d2da6cc8c | [
"Apache-2.0"
] | null | null | null | cli/asciiart.py | Christophe1997/pyramid | d135c86329b6527d54535d95c0db8b5d2da6cc8c | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python3
"""Convert picture to asciiArt, requrie python3.6 or higher.
Dependence:
- fire
- PIL
- numpy
Usage:
- chmod +x asciiart.py
- asciiart.py ${path_to_image} [Height] [Width]
Also, you can remove the filetype ".py" and put it to $HOME/bin/ then enjoy it:)
One example:
*&&&&&&&&&&&&&&&&&&&&&+
&&&$ &&&&&&&&&&&&&&&&&%
&&&&&%&&&&&&&&&&&&&&&&&&&$
%%%%%%%%%%%%%&&&&&&&&$$$$$
+&&&&&&&&&&&&&&&&&&&&&&&$&&&$$$$$$$$ ****** ***
&&&&&&&&&&&&&&&&&&&&&&&&&&&&$$$$$$$$$$*************
&&&&&&&&&&&&&&&&&&&&&&&&&&&$$$$$$$$$&$%*************
*&&&&&&&&&&&&&&&&&&&&&&&&&&$$$$$$$$$# ****************
+&&&&&&&&&&&&&$%**************************************
&&&&&&&&&&&&** **************************************
*&&&&&&&&&&$ ***************************************
+&&&&&&&&$ *************************************
**************************
**************************
****************** ****
+********************+
"""
import fire
from PIL import Image
import numpy as np
import sys
if __name__ == "__main__":
fire.Fire(AsciiArt)
| 29.058824 | 87 | 0.34666 |
cec6fd770eab40205480d0c6c46d89a072e6b4a6 | 2,057 | py | Python | tests/conftest.py | MohamedRaslan/screenpy | 94be7caa444ae7ac8a4ac403cd93ad92108237fe | [
"MIT"
] | null | null | null | tests/conftest.py | MohamedRaslan/screenpy | 94be7caa444ae7ac8a4ac403cd93ad92108237fe | [
"MIT"
] | null | null | null | tests/conftest.py | MohamedRaslan/screenpy | 94be7caa444ae7ac8a4ac403cd93ad92108237fe | [
"MIT"
] | null | null | null | from typing import Callable, Generator, Any
from unittest import mock
import pytest
from screenpy import AnActor, pacing, settings
from screenpy.abilities import AuthenticateWith2FA, BrowseTheWeb, MakeAPIRequests
from screenpy.narration.narrator import Narrator
def mock_settings(**new_settings) -> Callable:
"""Mock one or more settings for the duration of a test."""
return decorator
| 31.646154 | 81 | 0.684978 |
cec7fff35f1ea56bf0187cca1f5248b7d68f0fa3 | 14,729 | py | Python | main.py | ceciliazhang12/resource-reservation-system | d680582a41d39b1558b85d1e42f9006eb07caef8 | [
"Apache-2.0"
] | null | null | null | main.py | ceciliazhang12/resource-reservation-system | d680582a41d39b1558b85d1e42f9006eb07caef8 | [
"Apache-2.0"
] | null | null | null | main.py | ceciliazhang12/resource-reservation-system | d680582a41d39b1558b85d1e42f9006eb07caef8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START imports]
import os
from datetime import datetime, time, timedelta
import uuid
import time as t
from google.appengine.api import users
from google.appengine.ext import ndb
from google.appengine.api import mail
import jinja2
import webapp2
from models import Resource, Reservation
from __builtin__ import True
PATH_TEMPLATE = os.path.join(os.path.dirname(__file__), 'templates')
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(PATH_TEMPLATE),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
# [END imports]
# Helper Function
'''
Landing Page, which displays the following 4 sections:
user login / logout link
reservations made for resources by that user (sorted by the reservation time)
all resources in the system (shown in reverse time order based on last made reservation)
resources owned by that user
a link to create a new resource
'''
'''
CreateResource Handler enables the function of creating a new resource
'''
'''
ViewResource Handler handels displaying the main page for an existing resource
'''
'''
ViewResource Handler handels the function of editing an existing resource
'''
'''
ViewUser Handler handels displaying the main page for an user
'''
'''
CreateResservation Handler enables the function of creating a new reservation
'''
'''
CreateResservation Handler enables the function of
generating a RSS link for an existing reservation
'''
'''
DeleteResservation Handler enables deleting an existing reservation in Landing Page
'''
'''
ResourceBy Handler enables the function of filtering existing resources by tag
'''
'''
SearchResource Handler enables the function of searching existing resources by name
'''
# [START app]
app = webapp2.WSGIApplication([
('/', LandingPage),
('/newResource.html', CreateResource),
('/resource.html', ViewResource),
('/editResource.html', EditResource),
('/newReservation.html', CreateReservation),
('/user.html', ViewUser),
('/index.html', DeleteReservation),
('/tag.html', ResourcesByTag),
('/rss.html', GenerateRSS),
('/searchResource.html', SearchResource),
], debug=True)
# [END app]
| 39.808108 | 136 | 0.63263 |
cec871b2942200cdc40d51b006698874939d3556 | 144 | py | Python | sutta_publisher/src/sutta_publisher/edition_parsers/__init__.py | suttacentral/publications | 878befcfeb7af7f2f511697f2769cd00441aec57 | [
"CC0-1.0"
] | 1 | 2022-02-16T09:02:58.000Z | 2022-02-16T09:02:58.000Z | sutta_publisher/src/sutta_publisher/edition_parsers/__init__.py | suttacentral/publications | 878befcfeb7af7f2f511697f2769cd00441aec57 | [
"CC0-1.0"
] | 43 | 2022-02-07T11:37:29.000Z | 2022-03-30T08:54:55.000Z | sutta_publisher/src/sutta_publisher/edition_parsers/__init__.py | suttacentral/publications | 878befcfeb7af7f2f511697f2769cd00441aec57 | [
"CC0-1.0"
] | null | null | null | from .epub import EpubEdition
from .html import HtmlEdition
from .pdf import PdfEdition
__all__ = ["EpubEdition", "HtmlEdition", "PdfEdition"]
| 24 | 54 | 0.777778 |
cec99a1754c7933823e18fdb2d3c3f20789ec5bf | 5,052 | py | Python | tools/c7n_azure/tests_azure/tests_resources/test_event_hub.py | chris-angeli-rft/cloud-custodian | 5ff331b114a591dbaf6d672e30ceefb7ae64a5dd | [
"Apache-2.0"
] | 8 | 2021-05-18T02:22:03.000Z | 2021-09-11T02:49:04.000Z | tools/c7n_azure/tests_azure/tests_resources/test_event_hub.py | chris-angeli-rft/cloud-custodian | 5ff331b114a591dbaf6d672e30ceefb7ae64a5dd | [
"Apache-2.0"
] | 1 | 2021-04-26T04:38:35.000Z | 2021-04-26T04:38:35.000Z | tools/c7n_azure/tests_azure/tests_resources/test_event_hub.py | chris-angeli-rft/cloud-custodian | 5ff331b114a591dbaf6d672e30ceefb7ae64a5dd | [
"Apache-2.0"
] | 1 | 2020-12-28T23:21:30.000Z | 2020-12-28T23:21:30.000Z | # Copyright 2019 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..azure_common import BaseTest, arm_template, cassette_name
| 35.328671 | 74 | 0.500792 |
cecb5247af165b57afea53ae66b0856d10c40b07 | 2,662 | py | Python | pythonfiles/testing.py | amrut-prabhu/loan-default-prediction | 2e0a91529a71c69e93d7b30decefc59f2627406f | [
"MIT"
] | 2 | 2020-05-06T15:11:56.000Z | 2020-05-24T13:51:55.000Z | pythonfiles/testing.py | amrut-prabhu/loan-default-prediction | 2e0a91529a71c69e93d7b30decefc59f2627406f | [
"MIT"
] | null | null | null | pythonfiles/testing.py | amrut-prabhu/loan-default-prediction | 2e0a91529a71c69e93d7b30decefc59f2627406f | [
"MIT"
] | 2 | 2018-09-23T07:09:51.000Z | 2021-12-16T17:58:14.000Z | import numpy as np
import pandas as pd
from sklearn.externals import joblib
#from sklearn.ensemble import RandomForestRegressor
#from sklearn.multioutput import MultiOutputRegressor
#from sklearn.multioutput import MultiOutputRegressor
from sklearn.model_selection import train_test_split
df = pd.read_csv('https://drive.google.com/uc?export=download&id=1XoV8SfvHmzaxRuDRe81OWSQu10dYTbO5',sep=',')
df_X = df.iloc[:, 2:13].copy()
df_X = pd.get_dummies(df_X)
df_y1 = df.iloc[:, 13:16].copy()
df_y1 = pd.get_dummies(df_y1)
df_y2 = df.iloc[:, 16:20].copy()
df_y2 = pd.get_dummies(df_y2)
#X_train, df_X, y_train, df_y1 = train_test_split(df_X, df_y1, test_size=0.2, random_state=0)
#model = RandomForestRegressor(n_estimators = 1900, max_depth = 70, random_state = 50, learning_rate = 0.1, min_samples_split = 0.1, max_features = 'sqrt', loss = 'lad', warm_start = True, min_samples_leaf = 0.0005)
#model1 = MultipleOutputRegressor(model)
#model2 = MultipleOutputRegressor(model, n_jobs = -1)
#model1.fit(X_train, y_train)
model = joblib.load("RegressorChainGradientBoostingRegressorEarningsNew.pkl")
test(model, df_X, df_y1, 3)
model = joblib.load("RegressorChainGradientBoostingRegressorRepaymentNew.pkl")
test(model, df_X, df_y2, 4)
#model2.fit(X_train, y_train)
#joblib.dump(model2, "MultiplepleOutputRandomForestRegressorEarnings.pkl")
#print("Model 2: ");
#accuracy(model2, df_X, df_y1)
#X_train, df_X, y_train, df_y1 = train_test_split(df_X, df_y2, test_size=0.2, random_state=0)
#model1.fit(X_train, y_train)
#joblib.dump(model1, "MultipleOutputRegressorRandomForestRegressorRepayment.pkl")
#print("Model 3: ");
#accuracy(model1, df_X, df_y1)
#model2.fit(X_train, y_train)
#joblib.dump(model2, "MultiplepleOutputRandomForestRegressorRepayment.pkl")
#print("Model 4: ");
#accuracy(model2, df_X, df_y1)
| 39.731343 | 215 | 0.743426 |
cecbe395ca91f3242d3c1d6e5ad79275b3277dc8 | 6,806 | py | Python | kerasjr/Model.py | OliverMathias/kerasjr | 9fc039cf2fee4d29529707d4644c775121e5d1d7 | [
"MIT"
] | null | null | null | kerasjr/Model.py | OliverMathias/kerasjr | 9fc039cf2fee4d29529707d4644c775121e5d1d7 | [
"MIT"
] | null | null | null | kerasjr/Model.py | OliverMathias/kerasjr | 9fc039cf2fee4d29529707d4644c775121e5d1d7 | [
"MIT"
] | null | null | null | # Python 3
import numpy as np
np.seterr(divide='ignore', invalid='ignore')
| 39.34104 | 168 | 0.620923 |
cece03575811241c5528967937341963df9938c9 | 5,287 | py | Python | generate_revision_sheet.py | geritwagner/revision-sheet-generator | 2cfa2f0e10d980ec979fa8d4bd63106d3a089a98 | [
"MIT"
] | 1 | 2020-05-07T00:18:06.000Z | 2020-05-07T00:18:06.000Z | generate_revision_sheet.py | geritwagner/revision-sheet-generator | 2cfa2f0e10d980ec979fa8d4bd63106d3a089a98 | [
"MIT"
] | null | null | null | generate_revision_sheet.py | geritwagner/revision-sheet-generator | 2cfa2f0e10d980ec979fa8d4bd63106d3a089a98 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import argparse
import docx
from docx.shared import Cm
import pylatex
from pytablewriter import MarkdownTableWriter
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = "Revision-sheet generator")
parser.add_argument("--input", default=None, help="path to the review text file")
parser.add_argument("--format", default='w', help="format of the output document , w for word (default) or t for tex or m for markdown")
parser.add_argument("--output", default=None, help="path to the file where to put the results (optional)")
parser.add_argument("--i", default=1, help="start of comment numbering (optional)")
args = parser.parse_args()
filepath = args.input
output_format = args.format
result_path = args.output
if not result_path:
result_path = 'revision_sheet'
starting_item = int(args.i)
assert output_format in ['t', 'w', 'm']
if 'w' == output_format:
if result_path:
if '.doc' != result_path[:-4] or '.docx' != result_path[:-5]:
result_path += '.doc'
if 't' == output_format:
if result_path:
if '.tex' != result_path[:-4]:
result_path += '.tex'
if 'm' == output_format:
if result_path:
if '.md' != result_path[:-4]:
result_path += '.md'
assert filepath[-4:] == '.txt'
assert isinstance(starting_item, int)
if result_path:
assert not os.path.exists(result_path)
lines = load_file(filepath)
if 'w' == output_format:
generate_word_revision_sheet(filepath, result_path, lines, starting_item)
if 't' == output_format:
generate_tex_revision_sheet(filepath, result_path, lines, starting_item)
if 'm' == output_format:
generate_md_revision_sheet(filepath, result_path, lines, starting_item)
| 32.042424 | 141 | 0.614715 |
cecfd2f0eca2d1cff1112ddaf3c1ef7b67c5d1dc | 7,550 | py | Python | trainer_backup.py | dbseorms16/drnxgaze | c7b84189c263456c648829bc399a5edb2ec17bb8 | [
"MIT"
] | 1 | 2021-07-06T02:01:54.000Z | 2021-07-06T02:01:54.000Z | trainer_backup.py | dbseorms16/drnxgaze | c7b84189c263456c648829bc399a5edb2ec17bb8 | [
"MIT"
] | null | null | null | trainer_backup.py | dbseorms16/drnxgaze | c7b84189c263456c648829bc399a5edb2ec17bb8 | [
"MIT"
] | null | null | null | import torch
import numpy as np
import utility
from decimal import Decimal
from tqdm import tqdm
from option import args
from torchvision import transforms
from PIL import Image
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import copy
| 34.474886 | 84 | 0.488477 |
ced2a7bfdf7758ecb38fddf1d9823beee1d21d80 | 1,356 | py | Python | pymongo_test_query.py | rbola/python-mongodb | e0a466a5ddc3e105ff2161521313000a3b828d76 | [
"CNRI-Python"
] | 1 | 2021-07-05T04:10:21.000Z | 2021-07-05T04:10:21.000Z | pymongo_test_query.py | rbola/python-mongodb | e0a466a5ddc3e105ff2161521313000a3b828d76 | [
"CNRI-Python"
] | 2 | 2021-06-14T08:39:16.000Z | 2021-06-14T08:45:12.000Z | pymongo_test_query.py | rbola/python-mongodb | e0a466a5ddc3e105ff2161521313000a3b828d76 | [
"CNRI-Python"
] | 3 | 2021-07-02T20:32:21.000Z | 2021-07-14T17:41:19.000Z | # Get the database using the method we defined in pymongo_test_insert file
from pymongo_test_insert import get_database
dbname = get_database()
# Create a new collection
collection_name = dbname["user_1_items"]
item_details = collection_name.find()
for item in item_details:
# This will give readable output, but KeyError
print(item['item_name'], item['category'])
###---------------------------------------------------###
### Comment the above 'for loop' & 'print statements' ###
### for the next lines of code to work ###
###---------------------------------------------------###
from pandas import DataFrame
# Convert the dictionary objects to dataframe
items_df = DataFrame(item_details)
# View all items
print(items_df)
###--------------------------------------------------------###
### Get items of particular category without and with index###
###--------------------------------------------------------###
item_details = collection_name.find({"category" : "food"})
for item in item_details:
print(item)
# Add more data to understand the need for indexing
import pymongo_test_insert_more_items
# Create index on category, as an example
category_index = collection_name.create_index("category")
# Execute the previous query again to see the documents scanned (refer to the article) | 35.684211 | 86 | 0.605457 |
ced423f7265dba138645d51d245eba93b188c792 | 3,358 | py | Python | django_backend/backend/ajax.py | team23/django_backend | 02a2ef70584f80b9abd17b4e1a94576df5461b37 | [
"BSD-3-Clause"
] | 3 | 2015-09-10T07:10:49.000Z | 2021-03-16T07:17:58.000Z | django_backend/backend/ajax.py | team23/django_backend | 02a2ef70584f80b9abd17b4e1a94576df5461b37 | [
"BSD-3-Clause"
] | 10 | 2015-09-09T13:40:24.000Z | 2021-02-27T09:12:23.000Z | django_backend/backend/ajax.py | team23/django_backend | 02a2ef70584f80b9abd17b4e1a94576df5461b37 | [
"BSD-3-Clause"
] | 5 | 2016-06-12T08:20:38.000Z | 2021-02-27T09:02:30.000Z | import json
from django.core.serializers.json import DjangoJSONEncoder
from django.http import HttpResponse
from django.template import Context
from django.template import RequestContext
from django.template.loader import render_to_string, select_template
from django.utils.encoding import force_unicode
from ..compat import context_flatten
from ..compat import get_template_name
| 34.979167 | 79 | 0.6757 |
ced494334d10930a6ca41715a278fb9e1655660e | 1,801 | py | Python | flight/oled_ssd1306.py | MxToolbox/BalloonLaunch | a31e02ddc11b54d2ab3691e8206dc24af726f585 | [
"MIT"
] | 1 | 2020-04-13T20:18:45.000Z | 2020-04-13T20:18:45.000Z | flight/oled_ssd1306.py | MxToolbox/BalloonLaunch | a31e02ddc11b54d2ab3691e8206dc24af726f585 | [
"MIT"
] | null | null | null | flight/oled_ssd1306.py | MxToolbox/BalloonLaunch | a31e02ddc11b54d2ab3691e8206dc24af726f585 | [
"MIT"
] | null | null | null | """
This demo will fill the screen with white, draw a black box on top
and then print Hello World! in the center of the display
This example is for use on (Linux) computers that are using CPython with
Adafruit Blinka to support CircuitPython libraries. CircuitPython does
not support PIL/pillow (python imaging library)!
"""
import board
import digitalio
from PIL import Image, ImageDraw, ImageFont
import adafruit_ssd1306
import time
# Define the Reset Pin
oled_reset = digitalio.DigitalInOut(board.D4)
# Change these
# to the right size for your display!
WIDTH = 128
HEIGHT = 64 # Change to 64 if needed
BORDER = 5
# Use for I2C.
i2c = board.I2C()
oled = adafruit_ssd1306.SSD1306_I2C(WIDTH, HEIGHT, i2c, addr=0x3c, reset=oled_reset)
# Draw Some Text
while True:
# Clear display.
oled.fill(0)
oled.show()
# Create blank image for drawing.
# Make sure to create image with mode '1' for 1-bit color.
image = Image.new('1', (oled.width, oled.height))
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Draw a white background
#draw.rectangle((0, 0, oled.width, oled.height), outline=255, fill=255)
# Draw a smaller inner rectangle
#draw.rectangle((BORDER, BORDER, oled.width - BORDER - 1, oled.height - BORDER - 1),
# outline=0, fill=0)
# Load default font.
font = ImageFont.load_default()
#font = ImageFont.truetype("arial.ttf", 15)
#font = ImageFont.truetype(font=None, size=10, index=0, encoding='')
text = time.strftime("%H:%M:%S")
(font_width, font_height) = font.getsize(text)
draw.text((oled.width//2 - font_width//2, oled.height//2 - font_height//2),
text, font=font, fill=255)
# Display image
oled.image(image)
oled.show()
time.sleep(1)
| 27.707692 | 88 | 0.68462 |
ced5d7b593a38747bfdd7d0801c855ff58b4b2ad | 1,150 | py | Python | setup.py | billbrod/spatial-frequency-model | c962a50ba1041ab352e3426df026a74e21540c2e | [
"MIT"
] | null | null | null | setup.py | billbrod/spatial-frequency-model | c962a50ba1041ab352e3426df026a74e21540c2e | [
"MIT"
] | null | null | null | setup.py | billbrod/spatial-frequency-model | c962a50ba1041ab352e3426df026a74e21540c2e | [
"MIT"
] | null | null | null | #! /usr/bin/env python
from setuptools import setup, Extension
import importlib
import os
# copied from kymatio's setup.py: https://github.com/kymatio/kymatio/blob/master/setup.py
sfm_version_spec = importlib.util.spec_from_file_location('sfm_version', 'sfm/version.py')
sfm_version_module = importlib.util.module_from_spec(sfm_version_spec)
sfm_version_spec.loader.exec_module(sfm_version_module)
VERSION = sfm_version_module.version
setup(
name='sfm',
version='0.1',
description='Spatial frequency preferences model',
license='MIT',
url='https://github.com/billbrod/spatial-frequency-model',
author='William F. Broderick',
author_email='billbrod@gmail.com',
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3.7'],
keywords='Visual Information Processing',
packages=['sfm'],
install_requires=['numpy>=1.1',
'torch>=1.1',
'pandas>=0.25'
'scipy>=1.0',
'matplotlib>=3.1',
'pytest',
'seaborn>=0.9.0'],
tests='tests',
)
| 32.857143 | 90 | 0.630435 |
ced62c2a72c4a4e557f2a0fa0eaf322653f67299 | 1,490 | py | Python | tests/core/test_operation/test_inventory.py | pypipet/pypipet | 8c489e4a7992281fbb68b12e2627decf24f2facb | [
"MIT"
] | null | null | null | tests/core/test_operation/test_inventory.py | pypipet/pypipet | 8c489e4a7992281fbb68b12e2627decf24f2facb | [
"MIT"
] | null | null | null | tests/core/test_operation/test_inventory.py | pypipet/pypipet | 8c489e4a7992281fbb68b12e2627decf24f2facb | [
"MIT"
] | 1 | 2021-12-10T22:36:34.000Z | 2021-12-10T22:36:34.000Z | # from pipet.core.sql.query_interface import *
from pypipet.core.operations.inventory import *
import pytest
from pprint import pprint
_supplie_id = 1
# def test_match_upc(session, obj_classes):
# invs = [ {'upc':'48743213', 'supplier_id':1, 'qty':10},
# {'upc':'9348886', 'supplier_id':1, 'qty':10}]
# res = match_variation_sku_by_upc(obj_classes.get('variation'), session, invs)
# assert res is not None
| 40.27027 | 93 | 0.693289 |
ced6d0de53f60c3136d5f4268abc101197257419 | 2,982 | py | Python | reliabpy/models/cost.py | FelipeGiro/ReliabiliPy | 42624a65504a959f66a64ae2ad2ccfb5af5ae9b0 | [
"MIT"
] | null | null | null | reliabpy/models/cost.py | FelipeGiro/ReliabiliPy | 42624a65504a959f66a64ae2ad2ccfb5af5ae9b0 | [
"MIT"
] | 2 | 2021-08-13T15:31:33.000Z | 2021-08-13T15:31:34.000Z | reliabpy/models/cost.py | FelipeGiro/reliabpy | 42624a65504a959f66a64ae2ad2ccfb5af5ae9b0 | [
"MIT"
] | null | null | null | # Costs
# 2019 - Luque, Straub - Risk-based optimal inspection strategies for
# structural systems using dynamic Bayesian networks
# Table 4, case 1
import numpy as np | 36.365854 | 167 | 0.563045 |
ced99530196a9edbf9316f733aa9a4cd0cc8c3bd | 1,515 | py | Python | Code/Examples/Example_22.py | R6500/SLab | d8e4eac7d59dcdb2941ad4b267b59533bd038cab | [
"MIT"
] | 2 | 2018-02-23T18:23:35.000Z | 2018-04-10T11:30:31.000Z | Code/Examples/Example_22.py | R6500/SLab | d8e4eac7d59dcdb2941ad4b267b59533bd038cab | [
"MIT"
] | null | null | null | Code/Examples/Example_22.py | R6500/SLab | d8e4eac7d59dcdb2941ad4b267b59533bd038cab | [
"MIT"
] | null | null | null | '''
SLab Example
Example_22.py
Create several waveforms
Connect DAC 1 to ADC 1
'''
# Locate slab in the parent folder
import sys
sys.path.append('..')
sys.path.append('.')
import slab
# Set prefix to locate calibrations
slab.setFilePrefix("../")
# Open serial communication
slab.connect()
# Set sample time to 100us
slab.setSampleTime(0.0001)
# Set storage requirements
slab.setTransientStorage(200,1)
# (A) Creates and measures a square wave
slab.waveSquare(1.0,2.0,100)
slab.wavePlot()
# (B) Creates and measures a triangle wave
slab.waveTriangle(1.0,2.0,100)
slab.wavePlot()
# (C) Creates and measures a sawtooth wave
slab.waveSawtooth(1.0,2.0,100)
slab.wavePlot()
# (D) Creates and measures a sine wave
slab.waveSine(1.0,2.0,100)
slab.wavePlot()
# (E) Creates and measures a 10% duty pulse wave
slab.wavePulse(1.0,2.0,100,90)
slab.wavePlot()
# (F) Creates and measures a staircase waveform
list = []
for i in range(0,10):
for j in range(0,10):
list.append(1.0+0.1*i)
slab.loadWavetable(list)
slab.wavePlot()
# (G) Creates and measures a cosine wave
slab.waveCosine(1.0,2.0,100)
slab.wavePlot()
# (H) Creates and measures a noise wave
slab.waveNoise(1.5,0.1,100)
t,a1 = slab.wavePlot(1,returnData=True)
print "Std Dev is " + str(slab.std(a1)) + " V"
# (I) Creates and measures a random wave between 1V and 2V
slab.waveRandom(1,2,100)
slab.wavePlot()
# Close serial communication
slab.disconnect()
| 19.675325 | 59 | 0.678548 |
cedab3619291fe6be599d6dbcc26bf805226e6ae | 1,693 | py | Python | src/python_module_setup.py | Deyht/CIANNA | 84c2cd94e91af1114aaed1251e36e1a2669e4c82 | [
"Apache-2.0"
] | 5 | 2020-12-03T14:52:41.000Z | 2022-01-09T14:12:12.000Z | src/python_module_setup.py | Deyht/CIANNA | 84c2cd94e91af1114aaed1251e36e1a2669e4c82 | [
"Apache-2.0"
] | null | null | null | src/python_module_setup.py | Deyht/CIANNA | 84c2cd94e91af1114aaed1251e36e1a2669e4c82 | [
"Apache-2.0"
] | null | null | null | from distutils.core import setup, Extension
import os
#os.environ['USE_CUDA'] = '1'
#os.environ['USE_BLAS'] = '1'
#os.environ['USE_OPENMP'] = '1'
cuda_obj = []
cuda_extra = []
cuda_include = []
cuda_macro = [(None, None)]
blas_obj = []
blas_extra = []
blas_include = []
blas_macro = [(None, None)]
open_mp_extra = []
if(os.environ.get('USE_CUDA') != None):
print("USE_CUDA")
cuda_obj = ['cuda/cuda_main.o', 'cuda/cuda_conv_layer.o', 'cuda/cuda_dense_layer.o', 'cuda/cuda_pool_layer.o', 'cuda/cuda_activ_functions.o']
cuda_include = ['/usr/local/cuda-11.3/include']
cuda_extra = ['-L/usr/local/cuda-11.3/lib64', '-lcudart', '-lcublas']
cuda_macro = [('CUDA','1'), ('CUDA_THREADS_PER_BLOCKS', '256')]
if(os.environ.get('USE_BLAS') != None):
print("USE_BLAS")
blas_obj = ['blas/blas_dense_layer.o', 'blas/blas_conv_layer.o']
blas_include = ['/opt/OpenBLAS/include']
blas_extra = ['-lopenblas', '-L/opt/OpenBLAS/lib']
blas_macro = [('BLAS', '1')]
if(os.environ.get('USE_OPENMP') != None):
print("USE_OPENMP")
open_mp_extra = ['-fopenmp']
#Re-add naiv: 'naiv/naiv_dense_layer.o', 'naiv/naiv_conv_layer.o', 'naiv/naiv_pool_layer.o'
setup(name = 'CIANNA',
version = '0.9',
ext_modules = [Extension('CIANNA', ['python_module.c'],
extra_objects=['conv_layer.o', 'dense_layer.o', 'pool_layer.o', 'activ_functions.o', 'initializers.o', 'vars.o', 'auxil.o', 'naiv/naiv_dense_layer.o', 'naiv/naiv_conv_layer.o', 'naiv/naiv_pool_layer.o'] + cuda_obj + blas_obj,
include_dirs= cuda_include + blas_include,
extra_link_args=['-O3 -std=c99'] + cuda_extra + blas_extra + open_mp_extra,
define_macros=[('MAX_LAYERS_NB', '100'), ('MAX_NETWORKS_NB','10')] + cuda_macro + blas_macro)])
| 36.021277 | 226 | 0.686946 |
cedb99a67763a6bd3a431eeb9d02931f0c220063 | 4,324 | py | Python | potsim/filters.py | nicktimko/pots-sim | 7620c41fab4e7cecf1c0bb99df3fddfdfe208d61 | [
"MIT"
] | null | null | null | potsim/filters.py | nicktimko/pots-sim | 7620c41fab4e7cecf1c0bb99df3fddfdfe208d61 | [
"MIT"
] | null | null | null | potsim/filters.py | nicktimko/pots-sim | 7620c41fab4e7cecf1c0bb99df3fddfdfe208d61 | [
"MIT"
] | 1 | 2020-02-02T14:16:17.000Z | 2020-02-02T14:16:17.000Z | from __future__ import absolute_import, division, print_function
import json
import os.path as op
import six
import numpy as np
import scipy.signal as sig
import scipy.io.wavfile as sciwav
MAXINT16 = 2**15 - 1
FS = 44100
COEFF_DIR = op.join(op.dirname(op.abspath(__file__)), 'coeffs')
POTS_COEFFS = load_coeffs('pots.json')
| 28.077922 | 79 | 0.594357 |
cedc48d01729e8a20c5b2b1eaaa514c17e38fd56 | 5,952 | py | Python | scripts/bootstrap_optimize.py | adrn/thrift-shop | 8dcd8d7e242ded1263edb4b1fb7c05f04c05b47c | [
"MIT"
] | null | null | null | scripts/bootstrap_optimize.py | adrn/thrift-shop | 8dcd8d7e242ded1263edb4b1fb7c05f04c05b47c | [
"MIT"
] | 11 | 2020-09-29T19:18:19.000Z | 2020-11-21T21:26:09.000Z | scripts/bootstrap_optimize.py | adrn/thrift-shop | 8dcd8d7e242ded1263edb4b1fb7c05f04c05b47c | [
"MIT"
] | 1 | 2021-09-07T22:36:06.000Z | 2021-09-07T22:36:06.000Z | # Standard library
import atexit
import os
os.environ["OMP_NUM_THREADS"] = "1"
import sys
import traceback
# Third-party
from astropy.utils import iers
iers.conf.auto_download = False
import astropy.table as at
import numpy as np
# This project
from totoro.config import cache_path
from totoro.data import datasets, elem_names
from totoro.objective import TorusImagingObjective
if __name__ == '__main__':
from argparse import ArgumentParser
# Define parser object
parser = ArgumentParser()
parser.add_argument("-o", "--overwrite", dest="overwrite",
action="store_true")
# vq_group = parser.add_mutually_exclusive_group()
# vq_group.add_argument('-v', '--verbose', action='count', default=0,
# dest='verbosity')
# vq_group.add_argument('-q', '--quiet', action='count', default=0,
# dest='quietness')
group = parser.add_mutually_exclusive_group()
group.add_argument("--procs", dest="n_procs", default=1,
type=int, help="Number of processes.")
group.add_argument("--mpi", dest="mpi", default=False,
action="store_true", help="Run with MPI.")
parsed = parser.parse_args()
# deal with multiproc:
if parsed.mpi:
from schwimmbad.mpi import MPIPool
Pool = MPIPool
kw = dict()
elif parsed.n_procs > 1:
from schwimmbad import MultiPool
Pool = MultiPool
kw = dict(processes=parsed.n_procs)
else:
from schwimmbad import SerialPool
Pool = SerialPool
kw = dict()
Pool = Pool
Pool_kwargs = kw
with Pool(**Pool_kwargs) as pool:
main(pool=pool, overwrite=parsed.overwrite)
sys.exit(0)
| 31.162304 | 79 | 0.586022 |
cedc87d4e440dc6b8050ce800298170c9981e927 | 3,209 | py | Python | Solutions/346.py | ruppysuppy/Daily-Coding-Problem-Solutions | 37d061215a9af2ce39c51f8816c83039914c0d0b | [
"MIT"
] | 70 | 2021-03-18T05:22:40.000Z | 2022-03-30T05:36:50.000Z | Solutions/346.py | ungaro/Daily-Coding-Problem-Solutions | 37d061215a9af2ce39c51f8816c83039914c0d0b | [
"MIT"
] | null | null | null | Solutions/346.py | ungaro/Daily-Coding-Problem-Solutions | 37d061215a9af2ce39c51f8816c83039914c0d0b | [
"MIT"
] | 30 | 2021-03-18T05:22:43.000Z | 2022-03-17T10:25:18.000Z | """
Problem:
You are given a huge list of airline ticket prices between different cities around the
world on a given day. These are all direct flights. Each element in the list has the
format (source_city, destination, price).
Consider a user who is willing to take up to k connections from their origin city A to
their destination B. Find the cheapest fare possible for this journey and print the
itinerary for that journey.
For example, our traveler wants to go from JFK to LAX with up to 3 connections, and our
input flights are as follows:
[
('JFK', 'ATL', 150),
('ATL', 'SFO', 400),
('ORD', 'LAX', 200),
('LAX', 'DFW', 80),
('JFK', 'HKG', 800),
('ATL', 'ORD', 90),
('JFK', 'LAX', 500),
]
Due to some improbably low flight prices, the cheapest itinerary would be
JFK -> ATL -> ORD -> LAX, costing $440.
"""
from sys import maxsize
from typing import Dict, List, Optional, Tuple
from DataStructures.Graph import GraphDirectedWeighted
from DataStructures.PriorityQueue import MinPriorityQueue
if __name__ == "__main__":
flights = [
("JFK", "ATL", 150),
("ATL", "SFO", 400),
("ORD", "LAX", 200),
("LAX", "DFW", 80),
("JFK", "HKG", 800),
("ATL", "ORD", 90),
("JFK", "LAX", 500),
]
print(generate_path(flights, "JFK", "LAX", 3))
"""
SPECS:
TIME COMPLEXITY: O(e x v x log(v))
SPACE COMPLEXITY: O(v ^ 2)
[even though dijkstra's algorithm runs in O(e x log(v)) to lock maximum k moves, the
compleity increases to O(e x v x log(v))]
"""
| 31.15534 | 87 | 0.631973 |
cedc891e437dc6a7d998e688a8eceada192b23b2 | 2,518 | py | Python | training/pytorch_ddp_nvidia.py | gclouduniverse/dlenv-templates | 27e662c6a5bcea1d828252aa2632bc545d38d082 | [
"MIT"
] | null | null | null | training/pytorch_ddp_nvidia.py | gclouduniverse/dlenv-templates | 27e662c6a5bcea1d828252aa2632bc545d38d082 | [
"MIT"
] | null | null | null | training/pytorch_ddp_nvidia.py | gclouduniverse/dlenv-templates | 27e662c6a5bcea1d828252aa2632bc545d38d082 | [
"MIT"
] | null | null | null | """PyTorch Distributed Data Parallel example from NVIDIA."""
# https://github.com/NVIDIA/DeepLearningExamples
import argparse
import utils
import virtual_machine
if __name__ == "__main__":
main()
| 26.505263 | 102 | 0.685068 |
cedd3f2eb0e40b696aec07eb8b2518152978c2ab | 19,880 | py | Python | Back/ecoreleve_server/modules/stations/station_resource.py | NaturalSolutions/ecoReleve-Data | 535a6165984544902563eca7cb10d07f1686c963 | [
"MIT"
] | 15 | 2015-02-15T18:02:54.000Z | 2021-10-31T00:08:41.000Z | Back/ecoreleve_server/modules/stations/station_resource.py | NaturalSolutions/ecoReleve-Data | 535a6165984544902563eca7cb10d07f1686c963 | [
"MIT"
] | 505 | 2015-03-24T15:16:55.000Z | 2022-03-21T22:17:11.000Z | Back/ecoreleve_server/modules/stations/station_resource.py | NaturalSolutions/ecoReleve-Data | 535a6165984544902563eca7cb10d07f1686c963 | [
"MIT"
] | 31 | 2015-04-09T10:48:31.000Z | 2020-12-08T16:32:30.000Z | import json
import itertools
from datetime import datetime, timedelta
import pandas as pd
from sqlalchemy import select, and_, join
from sqlalchemy.exc import IntegrityError
import copy
from ecoreleve_server.core import RootCore
from ecoreleve_server.core.base_resource import DynamicObjectResource, DynamicObjectCollectionResource
from .station_model import Station, Station_FieldWorker
from ..monitored_sites.monitored_site_model import MonitoredSite, MonitoredSitePosition
from ..users.user_model import User
from ..field_activities import fieldActivity
from ..observations.observation_resource import ObservationsResource
from .station_collection import StationCollection
from ..permissions import context_permissions
from ..sensors.sensor_data import CamTrap
from ...utils.datetime import parse
RootCore.children.append(('stations', StationsResource))
| 36.883117 | 105 | 0.523793 |
cedd6b663a220db3d928b8923e83432fb4962366 | 6,002 | py | Python | t_core/Mutex/HIgnoreRuleNAC0.py | levilucio/SyVOLT | 7526ec794d21565e3efcc925a7b08ae8db27d46a | [
"MIT"
] | 3 | 2017-06-02T19:26:27.000Z | 2021-06-14T04:25:45.000Z | t_core/Mutex/HIgnoreRuleNAC0.py | levilucio/SyVOLT | 7526ec794d21565e3efcc925a7b08ae8db27d46a | [
"MIT"
] | 8 | 2016-08-24T07:04:07.000Z | 2017-05-26T16:22:47.000Z | t_core/Mutex/HIgnoreRuleNAC0.py | levilucio/SyVOLT | 7526ec794d21565e3efcc925a7b08ae8db27d46a | [
"MIT"
] | 1 | 2019-10-31T06:00:23.000Z | 2019-10-31T06:00:23.000Z |
from core.himesis import Himesis, HimesisPreConditionPatternNAC
import cPickle as pickle
from uuid import UUID
| 45.12782 | 118 | 0.50933 |
0c66ce32672f9f98c60320cbf4c3eb540dfab0e0 | 918 | py | Python | codes/dataloader.py | UltronMHTM/pytorch_learning | 840d71cc499a2e87ba2774880f46c1befd5a1658 | [
"Apache-2.0"
] | null | null | null | codes/dataloader.py | UltronMHTM/pytorch_learning | 840d71cc499a2e87ba2774880f46c1befd5a1658 | [
"Apache-2.0"
] | null | null | null | codes/dataloader.py | UltronMHTM/pytorch_learning | 840d71cc499a2e87ba2774880f46c1befd5a1658 | [
"Apache-2.0"
] | null | null | null | from torch.utils.data import Dataset
from skimage import io
import os
import torch
| 32.785714 | 72 | 0.602397 |
0c68a10dfccbb91dce925152bacac5157188cf26 | 20,478 | py | Python | qunetsim/objects/storage/quantum_storage.py | pritamsinha2304/QuNetSim | 65a7486d532816724b5c98cfdcc0910404bfe0e2 | [
"MIT"
] | 61 | 2020-02-15T00:59:20.000Z | 2022-03-08T10:29:23.000Z | qunetsim/objects/storage/quantum_storage.py | pritamsinha2304/QuNetSim | 65a7486d532816724b5c98cfdcc0910404bfe0e2 | [
"MIT"
] | 50 | 2020-01-28T12:18:50.000Z | 2021-12-16T21:38:19.000Z | qunetsim/objects/storage/quantum_storage.py | pritamsinha2304/QuNetSim | 65a7486d532816724b5c98cfdcc0910404bfe0e2 | [
"MIT"
] | 27 | 2020-01-21T12:59:28.000Z | 2022-02-21T14:23:00.000Z | from qunetsim.backends.rw_lock import RWLock
from qunetsim.objects.logger import Logger
import queue
def set_storage_limit_with_host(self, new_limit, host_id):
"""
Set a new storage limit for the storage. The implementations depends on
the storage mode.
Args:
new_limit (int): The new max amount of qubit.
host_id (str): optional, if given, and the storage mode is
STORAGE_LIMIT_INDIVIDUALLY_PER_HOST, the limit is only
set for this specific host.
"""
if self._storage_mode == QuantumStorage.STORAGE_LIMIT_INDIVIDUALLY_PER_HOST:
if host_id is None:
raise ValueError(
"Host ID must be given in this storage mode")
else:
self._storage_limits_per_host[host_id] = new_limit
else:
raise ValueError(
"Internal Value Error, this storage mode does not exist.")
def reset_storage(self):
"""
Reset the quantum storage.
"""
for host in self._host_dict:
self.reset_qubits_from_host(host)
def release_storage(self):
"""
Releases all qubits in this storage. The storage is not
usable anymore after this function has been called.
"""
self.lock.acquire_write()
for q in self._qubit_dict.values():
for ele in q.values():
ele.release()
# do not release write, storage not usable anymore
def check_qubit_from_host_exists(self, from_host_id, purpose=None):
"""
Check if a qubit from a host exists in this quantum storage.
Args:
from_host_id (str): The host id of the host from which the qubit is from.
purpose (int): Optional, purpose of the qubit which should exist.
Returns:
(bool): True, if such a qubit is in the storage, false if not.
"""
self.lock.acquire_write()
if from_host_id not in self._host_dict:
self.lock.release_write()
return False
for q in self._host_dict[from_host_id]:
if self._check_qubit_in_system(q, from_host_id, purpose):
self.lock.release_write()
return True
self.lock.release_write()
return False
def get_qubit_by_id(self, q_id):
"""
Return the qubit that has the id *q_id*
Args:
q_id (str): The ID of the qubit
Returns:
(Qubit): The qubit with the id *q_id* or None if it does not exist
"""
if q_id in self._qubit_dict:
return list(self._qubit_dict[q_id].values())[0]
return None
def change_qubit_id(self, from_host_id, new_id, old_id=None):
"""
Changes the ID of a qubit. If the ID is not given, a random
qubit which is from a host is changed to the new id.
Args:
from_host_id (str): The ID of the owner
new_id (str): The ID to change to
old_id (str): The old ID
Returns:
(str): The new ID
"""
new_id = str(new_id)
self.lock.acquire_write()
if old_id is not None:
old_id = str(old_id)
qubit, purpose = self._pop_qubit_with_id_and_host_from_qubit_dict(
old_id, from_host_id)
if qubit is not None:
qubit.id = new_id
self._add_qubit_to_qubit_dict(qubit, purpose, from_host_id)
else:
if from_host_id in self._host_dict and self._host_dict[from_host_id]:
qubit = self._host_dict[from_host_id][0]
old_id = qubit.id
_, purpose = self._pop_qubit_with_id_and_host_from_qubit_dict(
old_id, from_host_id)
qubit.id = new_id
self._add_qubit_to_qubit_dict(qubit, purpose, from_host_id)
self.lock.release_write()
return old_id
def add_qubit_from_host(self, qubit, purpose, from_host_id):
"""
Adds a qubit which has been received from a host.
Args:
qubit (Qubit): qubit which should be stored.
from_host_id (str): Id of the Host from whom the qubit has
been received.
purpose (str): Purpose of the Qubit, for example EPR or data.
"""
self.lock.acquire_write()
if self._check_qubit_in_system(qubit, from_host_id, purpose=purpose):
self.logger.log("Qubit with id %s, purpose %s and from host %s"
" already in storage" % (qubit.id, purpose, from_host_id))
raise ValueError("Qubit with these parameters already in storage!")
if from_host_id not in self._host_dict:
self._add_new_host(from_host_id)
if not self._increase_qubit_counter(from_host_id):
qubit.release()
self.lock.release_write()
return
self._host_dict[from_host_id].append(qubit)
self._add_qubit_to_qubit_dict(qubit, purpose, from_host_id)
# Check if a Qubit of one of the callbacks has arrived
self._check_all_requests()
self.lock.release_write()
def get_all_qubits_from_host(self, from_host_id, purpose=None, remove=False):
"""
Get all Qubits from a specific host id.
These qubits are not removed from storage!
Args:
from_host_id (str): The host who the qubits are from
purpose (int): The purpose of the qubits
remove (bool): Also remove from storage
Returns:
(list): The list of qubits
"""
if from_host_id in self._host_dict:
out = []
self.lock.acquire_write()
flag = False
for q in self._host_dict[from_host_id]:
if self._check_qubit_in_system(q, from_host_id, purpose):
if not remove:
out.append(q)
else:
flag = True
if remove:
break
if not flag and remove:
num_qubits = len(self._host_dict[from_host_id])
for _ in range(num_qubits):
out.append(self._get_qubit_from_host(from_host_id, purpose=purpose))
self.lock.release_write()
return out
return []
def reset_qubits_from_host(self, from_host_id, purpose=None):
"""
Remove all stored qubits from the host *from_host_id*.
Args:
from_host_id (str): The host who the qubits are from
purpose (int):
"""
self.lock.acquire_write()
if from_host_id in self._host_dict:
for q in self._host_dict[from_host_id]:
if self._check_qubit_in_system(q, from_host_id, purpose):
self._get_qubit_from_host(from_host_id, purpose=purpose)
self.lock.release_write()
def _check_all_requests(self):
"""
Checks if any of the pending requests is now fulfilled.
Returns:
If a request is fulfilled, the request is handled and the function
returns the qubit of this request.
"""
for req_id, args in self._pending_request_dict.items():
ret = self._get_qubit_from_host(args[1], args[2], args[3])
if ret is not None:
args[0].put(ret)
self._remove_request(req_id)
return ret
def _add_request(self, args):
"""
Adds a new request to the quantum storage. If a new qubit arrives, it
is checked if the request for the qubit is satisfied.
Args:
args (list): [Queue, from_host_id, q_id, purpose]
"""
self._pending_request_dict[self._request_id] = args
self._request_id += 1
self._amount_pending_requests += 1
return self._request_id
def _remove_request(self, req_id):
"""
Removes a pending request from the request dict.
Args:
req_id (int): The id of the request to remove.
"""
if req_id in self._pending_request_dict:
del self._pending_request_dict[req_id]
self._amount_pending_requests -= 1
def get_qubit_from_host(self, from_host_id, q_id=None, purpose=None, wait=0):
"""
Returns next qubit which has been received from a host. If the qubit has
not been receives yet, the thread is blocked for a maxiumum of the wait time,
till the qubit arrives (The default is 0). If the id is given, the exact qubit with the id
is returned, or None if it does not exist.
The qubit is removed from the quantum storage.
Args:
from_host_id (str): Host id from who the qubit has been received.
q_id (str): Optional Id, to return the exact qubit with the Id.
purpose (str): Optional, purpose of the Qubit.
wait (int): Default is 0. The maximum blocking time. -1 if blocking forever.
Returns:
(bool): If such a qubit exists, it returns the qubit. Otherwise, None
is returned.
"""
# Block forever if wait is -1
if wait == -1:
wait = None
self.lock.acquire_write()
ret = self._get_qubit_from_host(from_host_id, q_id, purpose)
if ret is not None or wait == 0:
self.lock.release_write()
return ret
q = queue.Queue()
args = [q, from_host_id, q_id, purpose]
req_id = self._add_request(args)
self.lock.release_write()
ret = None
try:
ret = q.get(timeout=wait)
except queue.Empty:
pass
if ret is None:
self.lock.acquire_write()
self._remove_request(req_id)
self.lock.release_write()
return ret
def _check_qubit_in_system(self, qubit, from_host_id, purpose=None):
"""
True if qubit with same parameters already in the systems
Args:
qubit (Qubit): The qubit in question
from_host_id (str): The ID of the sending host
purpose (int): Qubit's purpose
Returns:
(bool): If the qubit is in the system.
"""
if qubit.id in self._qubit_dict and \
from_host_id in self._qubit_dict[qubit.id]:
if purpose is None or (purpose == self._purpose_dict[qubit.id][from_host_id]):
return True
return False
def _check_memory_limits(self, host_id):
"""
Checks if another qubit can be added to the storage.
Args:
host_id (str): The host_id the qubit should be added to.
Returns:
True if no storage limit has been reached, False if a memory
limit has occurred.
"""
if self._storage_mode == QuantumStorage.STORAGE_LIMIT_ALL:
if self._storage_limit == -1:
return True
if self._storage_limit <= self._amount_qubit_stored:
return False
else:
return True
elif self._storage_mode == QuantumStorage.STORAGE_LIMIT_PER_HOST:
if self._storage_limit == -1:
return True
if self._storage_limit <= self._amount_qubits_stored_per_host[host_id]:
return False
else:
return True
elif self._storage_mode == QuantumStorage.STORAGE_LIMIT_INDIVIDUALLY_PER_HOST:
if self._storage_limits_per_host[host_id] == -1:
return True
if self._storage_limits_per_host[host_id] <= self._amount_qubits_stored_per_host[host_id]:
return False
else:
return True
else:
raise ValueError(
"Internal Value Error, this storage mode does not exist.")
def _increase_qubit_counter(self, host_id):
"""
Checks if the qubit counter can be increased, because of memory limits,
and increases the counter.
Args:
host_id (str): From who the qubit comes from.
Returns:
True, if the counter could be increased, False if not.
"""
if not self._check_memory_limits(host_id):
return False
self._amount_qubits_stored_per_host[host_id] += 1
self._amount_qubit_stored += 1
return True
def _reset_qubit_counter(self, host_id):
"""
Args:
host_id (str):
Returns:
(bool): True, if the counter could be decreased, False if not.
"""
if self._amount_qubits_stored_per_host[host_id] <= 0 or \
self._amount_qubit_stored <= 0:
return False
num_qubits = self._amount_qubits_stored_per_host[host_id]
self._amount_qubits_stored_per_host[host_id] = 0
self._amount_qubit_stored -= num_qubits
def _decrease_qubit_counter(self, host_id):
"""
Checks if the qubit counter can be decreased
and decreases the counter.
Args:
host_id (str): From who the qubit comes from.
Returns:
(bool): True, if the counter could be decreased, False if not.
"""
if self._amount_qubits_stored_per_host[host_id] <= 0 or \
self._amount_qubit_stored <= 0:
return False
self._amount_qubits_stored_per_host[host_id] -= 1
self._amount_qubit_stored -= 1
| 37.505495 | 102 | 0.592587 |
0c6a40a4bea2c4e73231cb976a84217ada08384c | 2,098 | py | Python | tests/test_utils.py | Guillerbr/python-pagseguro | 279eacf251e99a2f15d665f8193fcad0be6ea0bf | [
"MIT"
] | 115 | 2015-02-19T22:17:44.000Z | 2019-07-24T17:31:30.000Z | tests/test_utils.py | rubens8848/python-pagseguro | 08a8aa7f934b16d00948ead17a0e470a88f2479f | [
"MIT"
] | 49 | 2015-03-04T00:53:31.000Z | 2019-07-13T16:41:22.000Z | tests/test_utils.py | rubens8848/python-pagseguro | 08a8aa7f934b16d00948ead17a0e470a88f2479f | [
"MIT"
] | 53 | 2015-01-12T22:13:33.000Z | 2019-07-20T01:52:48.000Z | # -*- coding: utf-8 -*-
import datetime
from pagseguro.utils import (is_valid_cpf, is_valid_cnpj, is_valid_email,
parse_date)
from pagseguro.exceptions import PagSeguroValidationError
import pytest
from dateutil.tz import tzutc
| 26.897436 | 78 | 0.674452 |
0c6b6942a6015b98ac3474b28a14d6d14c8b2df9 | 121 | py | Python | G53IDS/run.py | jayBana/InventoryMan | 0826f9c98062fb6600f77a721311cbf27719e528 | [
"Apache-2.0"
] | null | null | null | G53IDS/run.py | jayBana/InventoryMan | 0826f9c98062fb6600f77a721311cbf27719e528 | [
"Apache-2.0"
] | null | null | null | G53IDS/run.py | jayBana/InventoryMan | 0826f9c98062fb6600f77a721311cbf27719e528 | [
"Apache-2.0"
] | null | null | null | from server.controller.app import app as webapp
# import Flask app so that it can be launched with gunicorn
app = webapp
| 30.25 | 59 | 0.793388 |
0c6c60baa3e34ba265cfea8fd4ef73ba5f9cccb2 | 383 | py | Python | tests/perf/test-prop-write.py | wenq1/duktape | 5ed3eee19b291f3b3de0b212cc62c0aba0ab4ecb | [
"MIT"
] | 4,268 | 2015-01-01T17:33:40.000Z | 2022-03-31T17:53:31.000Z | tests/perf/test-prop-write.py | KiraanRK/esp32-duktape | 1b7fbcb8bd6bfc346d92df30ec099df7f13b03aa | [
"MIT"
] | 1,667 | 2015-01-01T22:43:03.000Z | 2022-02-23T22:27:19.000Z | tests/perf/test-prop-write.py | KiraanRK/esp32-duktape | 1b7fbcb8bd6bfc346d92df30ec099df7f13b03aa | [
"MIT"
] | 565 | 2015-01-08T14:15:28.000Z | 2022-03-31T16:29:31.000Z |
test()
| 21.277778 | 68 | 0.373368 |
0c6c952788acdca410a606d2447a82cf6396e05a | 3,563 | py | Python | clickhouse_driver/numpy/result.py | fasttrack-solutions/clickhouse-driver | 676dfb09f74b8b55bfecaedbe70ddc971e1badd7 | [
"MIT"
] | 823 | 2017-05-16T15:30:15.000Z | 2022-03-31T08:39:04.000Z | clickhouse_driver/numpy/result.py | fasttrack-solutions/clickhouse-driver | 676dfb09f74b8b55bfecaedbe70ddc971e1badd7 | [
"MIT"
] | 277 | 2017-07-11T11:35:34.000Z | 2022-03-08T06:52:09.000Z | clickhouse_driver/numpy/result.py | fasttrack-solutions/clickhouse-driver | 676dfb09f74b8b55bfecaedbe70ddc971e1badd7 | [
"MIT"
] | 175 | 2017-10-11T08:41:12.000Z | 2022-03-22T03:59:35.000Z | from itertools import chain
import numpy as np
import pandas as pd
from pandas.api.types import union_categoricals
from ..progress import Progress
from ..result import QueryResult
| 28.733871 | 78 | 0.596969 |
0c6dab3b29d248c78a200aec1e3449a5aeb04604 | 33,383 | py | Python | sandbox/riskModelsResultsEval.py | danbirks/PredictCode | b4d7010d13706c771ba57437e9c7589e5c94329b | [
"Artistic-2.0"
] | null | null | null | sandbox/riskModelsResultsEval.py | danbirks/PredictCode | b4d7010d13706c771ba57437e9c7589e5c94329b | [
"Artistic-2.0"
] | null | null | null | sandbox/riskModelsResultsEval.py | danbirks/PredictCode | b4d7010d13706c771ba57437e9c7589e5c94329b | [
"Artistic-2.0"
] | 2 | 2020-01-28T23:02:54.000Z | 2020-02-03T16:04:38.000Z | # -*- coding: utf-8 -*-
"""
Created on Wed May 15 10:22:47 2019
@author: lawdfo
Purpose:
Read in the csv results file generated by (e.g.) riskModelsParamSweep.py
and report back some useful statistics.
"""
# Some fairly standard modules
import os, csv, lzma
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import descartes
from itertools import product
from collections import Counter, defaultdict
import datetime
import csv
import random
import time
from copy import deepcopy
import statistics
# The geopandas module does not come standard with anaconda,
# so you'll need to run the anaconda prompt as an administrator
# and install it via "conda install -c conda-forge geopandas".
# That installation will include pyproj and shapely automatically.
# These are useful modules for plotting geospatial data.
import geopandas as gpd
import pyproj
import shapely.geometry
# These modules are useful for tracking where modules are
# imported from, e.g., to check we're using our local edited
# versions of open_cp scripts.
import sys
import inspect
import importlib
# In order to use our local edited versions of open_cp
# scripts, we insert the parent directory of the current
# file ("..") at the start of our sys.path here.
sys.path.insert(0, os.path.abspath(".."))
# Elements from PredictCode's custom "open_cp" package
import open_cp
"""
import open_cp.geometry
import open_cp.plot
import open_cp.sources.chicago as chicago
import open_cp.retrohotspot as retro
import open_cp.prohotspot as phs
import open_cp.knox
"""
# Load custom functions that make dealing with datetime and timedelta easier
from crimeRiskTimeTools import generateDateRange, \
generateLaterDate, \
generateEarlierDate, \
getTimedPointsInTimeRange, \
getSixDigitDate, \
_day
"""
Expected data format of input CSV file, by column:
Header name Type Typical contents
dataset str Chicago
event_types str BURGLARY
cell_width int 100
eval_date np.datetime64 2016-03-01
train_len str 8W
test_len str 1D
coverage_rate float 0.01/0.02/0.05/0.1
test_events int 3/2/5/etc
hit_count int 1/2/0/etc
hit_pct float 0.33333 etc
model str naivecount/phs/etc
rand_seed int
rhs_bandwidth int
phs_time_unit str 1 weeks
phs_time_band str 4 weeks
phs_dist_unit int 100
phs_dist_band int 400
phs_weight str linear
"""
csv_data_types = [str, \
str, \
int, \
np.datetime64, \
str, \
str, \
float, \
int, \
int, \
float, \
str, \
int, \
int, \
str, \
str, \
int, \
int, \
str]
"""
Each element of output should have this info:
earliest test date of range
time band
dist band
avg hit rate
"""
"""
Each element of output should have this info:
coverage
earliest test date of range
time band
dist band
avg hit rate
"""
# datalist = list of results for PHS
# timespan = how frequently to check scores. Do we look at the top n models
# from each day, or averaged over each month, etc
# topnum = how many of the top models we consider successful. Top 10? Top 1?
"""
getPhsHitRates
Input: "datalist" = list where each entry is a dictionary containing the
information from a line of the csv results file (casted
as the appropriate data type) as well as "param_pair"
which is a tuple of the time and dist bandwidths.
Note: Ideally this datalist is a subset of the full csv data, so that
hit rates ar calculated over smaller timespans, e.g. monthly
Output: "info_by_band_pair" = dict that maps bandwidth pairs ("bp") to:
"bands": same as key; can be useful if just grabbing values
"num_tests": Number of experiments/tests/evaluations performed.
All bp's within a datalist fed into this function should end
up with the same number of tests -- I can't think of a reason
why this wouldn't happen. However, note that this number MAY
change across multiple runs of this function with different
data subsets. For example, maybe you calculate over every
month, but months have different numbers of days.
"total_events": Total number of events (i.e. crimes) in the data.
This is calculated by adding the number for the first time
each date is witnessed. So again, it's important that all bp's
are tested on all the same days.
"total_hits": Total number of hits achieved by the bp's model.
"total_rates": Sum of all daily(?) hit rates. This number is
essentially useless on its own, but used for calculating avg.
"avg_hit_rate": Average of all daily hit rates, calculated as
total_rates/num_tests
("overall_hit_rate"): A different average hit rate, being the total
number of hits divided by the total number of events. This
was removed from use (commented out) once we decided this
metric was less useful than avg_hit_rate, since this could be
swayed by a generally poor model that rarely performs extremely
well.
"""
# Note: 0 hits for 0 events gets counted as a hit rate of 0.
# Perhaps it should be discarded instead?
# But then what if the entire span has 0 events?
"""
getDataByCovRate
Given a path to csv results from running risk models,
return a dictionary where keys are coverage rates and
values are the rows of info with that coverage from the csv.
"""
"""
Copied snippets from riskModelsCompare
Still working out this section...
"""
def graphCoverageVsHitRate(hit_rates_dict, model_runs_list, model_names):
"""
print(len(hit_rates_dict))
for m in hit_rates_dict:
print(m)
print(len(hit_rates_dict[m]))
print(len(hit_rates_dict[m][0]))
print(len(model_runs_list))
print(model_runs_list)
"""
model_hit_rate_pairs = []
for mn in model_names:
model_hit_rate_pairs += list(zip(model_runs_list[mn], hit_rates_dict[mn]))
#hit_rates_flat += hit_rates_dict[mn]
#model_runs_flat += model_runs_list[mn]
#print(len(hit_rates_flat))
#print(len(model_runs_flat))
print(len(model_hit_rate_pairs))
### DECLARE FIGURE FOR HITRATE/COVERAGE
# !!! I should add an option for the x-axis of the figure!!!
#results_count_offset = .025
#results_rate_offset = .005
#results_count_offset = 0
#results_rate_offset = 0
# new version
# Declare figure
print("Declaring figure for graphCoverageVsHitRate...")
fig, ax = plt.subplots(figsize=(12,6))
names_for_legend = []
x_axis_size = len(hit_rates_dict[model_names[0]][0])
x_axis_values = np.linspace(0,1,x_axis_size)
print(x_axis_size)
for mn in model_names:
for hr in hit_rates_dict[mn]:
ax.plot(x_axis_values, hr)
for mr in model_runs_list[mn]:
names_for_legend.append(mr)
ax.legend(names_for_legend)
return
"""
result_matrix = np.zeros((len(all_exp_results[0]), len(all_exp_results)))
for exp_num, exp in enumerate(all_exp_results):
for model_num, model_result in enumerate(exp):
result_matrix[model_num, exp_num] = model_result[0][coverage_cell_index]
for row_num, row in enumerate(result_matrix):
ax.plot(test_data_dates, row + (results_count_offset * row_num) )
names_for_legend.append(all_exp_results[0][row_num][1])
#ax.legend(names_for_legend)
ax.tick_params(axis='x', rotation=90)
# one of the orig sections from riskModelsCompare
# Declare figure
print("Declaring figure...")
fig, ax = plt.subplots(figsize=(12,6))
names_for_legend = []
result_matrix = np.zeros((len(all_exp_results[0]), len(all_exp_results)))
for exp_num, exp in enumerate(all_exp_results):
for model_num, model_result in enumerate(exp):
result_matrix[model_num, exp_num] = model_result[0][coverage_cell_index]
for row_num, row in enumerate(result_matrix):
ax.plot(test_data_dates, row + (results_count_offset * row_num) )
names_for_legend.append(all_exp_results[0][row_num][1])
#ax.legend(names_for_legend)
ax.tick_params(axis='x', rotation=90)
# Declare figure
print("Declaring figure...")
fig, ax = plt.subplots(figsize=(12,6))
names_for_legend = []
#xcoords = test_data_dates
coverage_rate = 0.10
coverage_cell_index = int(num_cells_region * coverage_rate)-1
print("reg {}".format(num_cells_region))
print("cov {}".format(coverage_rate))
print("cci {}".format(coverage_cell_index))
result_matrix = np.zeros((len(all_exp_results[0]), len(all_exp_results)))
for exp_num, exp in enumerate(all_exp_results):
if test_data_counts[exp_num] == 0:
continue
for model_num, model_result in enumerate(exp):
result_matrix[model_num, exp_num] = \
model_result[0][coverage_cell_index]/test_data_counts[exp_num]
for row_num, row in enumerate(result_matrix):
ax.plot(test_data_dates, row + (results_rate_offset * row_num) )
names_for_legend.append(all_exp_results[0][row_num][1])
#ax.legend(names_for_legend)
ax.tick_params(axis='x', rotation=90)
"""
if __name__ == "__main__":
main()
| 33.550754 | 130 | 0.623371 |
0c6e32f7e7283b6370a0de49f39a51f43f1b82bb | 1,280 | py | Python | HIV model/others.py | omisolaidowu/HIV-story-telling | 290fbb9549ff0177fb2224553575aa24813fdc6a | [
"Apache-2.0"
] | null | null | null | HIV model/others.py | omisolaidowu/HIV-story-telling | 290fbb9549ff0177fb2224553575aa24813fdc6a | [
"Apache-2.0"
] | null | null | null | HIV model/others.py | omisolaidowu/HIV-story-telling | 290fbb9549ff0177fb2224553575aa24813fdc6a | [
"Apache-2.0"
] | null | null | null | '''
-*- coding: utf-8 -*-
Created on Fri Jan 17 12:34:15 2020
@author: Paul
'''
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
df=pd.read_excel(r'C:\Users\Paul\Desktop\Python projects\HIV_3.xlsx')
print(df)
print(df.isnull().sum())
ax=plt.figure(figsize=(8, 8))
years=['2000','2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016', '2017', '2018']
x=years
print(x)
line=plt.plot
y=df['']
z=df['Number_of_neonatal_deaths']
plt.xlabel('Changes over the years')
plt.ylabel('Occurence by population')
plt.xticks(rotation=90)
line(x,y, 'r', x, z, 'cyan' )
import matplotlib.patches as mpatches
red_patch = mpatches.Patch(color='red', label='New HIV prevalence among youths')
cyan_patch = mpatches.Patch(color='cyan', label='Neonatal deaths')
#blue_patch = mpatches.Patch(color='yellow', label='sIgM+IgG positive')
#orange_patch = mpatches.Patch(color='orange', label='site4')
#brown_patch = mpatches.Patch(color='brown', label='site5')
#black_patch = mpatches.Patch(color='black', label='site6')
plt.legend(handles=[red_patch, cyan_patch], loc=(0, 1))
plt.show()
| 29.767442 | 158 | 0.683594 |