hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9aa0a86fc034faf07525b543313701f15dfaa4e4 | 4,526 | py | Python | datasets/datasets.py | rioyokotalab/ecl-isvr | ae274b1b81b1d1c10db008140c477f5893a0c1c3 | [
"BSD-4-Clause-UC"
] | null | null | null | datasets/datasets.py | rioyokotalab/ecl-isvr | ae274b1b81b1d1c10db008140c477f5893a0c1c3 | [
"BSD-4-Clause-UC"
] | null | null | null | datasets/datasets.py | rioyokotalab/ecl-isvr | ae274b1b81b1d1c10db008140c477f5893a0c1c3 | [
"BSD-4-Clause-UC"
] | 2 | 2021-09-30T02:13:40.000Z | 2021-12-14T07:33:28.000Z | #! -*- coding:utf-8
from typing import Callable, List, Optional
import numpy as np
import torch
import torchvision
__all__ = ["CIFAR10", "FashionMNIST"]
| 36.208 | 87 | 0.527176 |
9aa249f279f7113e5bf54c4bf46eea1716af9bd2 | 1,819 | py | Python | API/Segmentation_API/detectron_seg.py | rogo96/Background-removal | e301d288b73074940356fa4fe9c11f11885dc506 | [
"MIT"
] | 40 | 2020-09-16T02:22:30.000Z | 2021-12-22T11:30:49.000Z | API/Segmentation_API/detectron_seg.py | ganjbakhshali/Background-removal | 39691c0044b824e8beab13e44f2c269e309aec72 | [
"MIT"
] | 6 | 2020-09-18T02:59:11.000Z | 2021-09-06T15:44:33.000Z | API/Segmentation_API/detectron_seg.py | ganjbakhshali/Background-removal | 39691c0044b824e8beab13e44f2c269e309aec72 | [
"MIT"
] | 14 | 2020-11-06T09:26:25.000Z | 2021-10-20T08:00:48.000Z | from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog
import torch
import numpy as np
import cv2
| 31.912281 | 124 | 0.6663 |
9aa39e5e7763187b713ab547d0e364010f1b3d6f | 106 | py | Python | examples/plugin_example/setup.py | linshoK/pysen | 2b84a15240c5a47cadd8e3fc8392c54c2995b0b1 | [
"MIT"
] | 423 | 2021-03-22T08:45:12.000Z | 2022-03-31T21:05:53.000Z | examples/plugin_example/setup.py | linshoK/pysen | 2b84a15240c5a47cadd8e3fc8392c54c2995b0b1 | [
"MIT"
] | 1 | 2022-02-23T08:53:24.000Z | 2022-03-23T14:11:54.000Z | examples/plugin_example/setup.py | linshoK/pysen | 2b84a15240c5a47cadd8e3fc8392c54c2995b0b1 | [
"MIT"
] | 9 | 2021-03-26T14:20:07.000Z | 2022-03-24T13:17:06.000Z | from setuptools import setup
setup(
name="example-advanced-package", version="0.0.0", packages=[],
)
| 17.666667 | 66 | 0.698113 |
9aa3bdf68ace18fc9d168671cbe55ba44bdbac29 | 416 | py | Python | setup.py | xpac1985/pyASA | a6cf470a4d1b731864a1b450e321901636c1ebdf | [
"MIT"
] | 10 | 2017-02-05T12:15:19.000Z | 2020-05-20T14:33:04.000Z | setup.py | xpac1985/pyASA | a6cf470a4d1b731864a1b450e321901636c1ebdf | [
"MIT"
] | null | null | null | setup.py | xpac1985/pyASA | a6cf470a4d1b731864a1b450e321901636c1ebdf | [
"MIT"
] | 3 | 2017-04-02T13:00:28.000Z | 2020-06-13T23:34:37.000Z | from distutils.core import setup
setup(
name='pyASA',
packages=['pyASA'],
version='0.1.0',
description='Wrapper for the Cisco ASA REST API',
author='xpac',
author_email='bjoern@areafunky.net',
url='https://github.com/xpac1985/pyASA',
download_url='https://github.com/xpac1985/pyASA/tarball/0.1.0',
keywords=['cisco', 'asa', 'rest-api', 'wrapper', 'alpha'],
classifiers=[],
)
| 27.733333 | 67 | 0.646635 |
9aa3ca73beed1f30ce5fdf99995b03ee7f17a719 | 2,441 | py | Python | Client.py | fimmartins/qpid_protobuf_python | b1411088e74b48347aeeaecdf84bbf9c7c9f7662 | [
"Apache-2.0"
] | 1 | 2015-12-15T19:21:26.000Z | 2015-12-15T19:21:26.000Z | Client.py | fimmartins/qpid_protobuf_python | b1411088e74b48347aeeaecdf84bbf9c7c9f7662 | [
"Apache-2.0"
] | null | null | null | Client.py | fimmartins/qpid_protobuf_python | b1411088e74b48347aeeaecdf84bbf9c7c9f7662 | [
"Apache-2.0"
] | null | null | null | from Qpid import QpidConnection
from mxt1xx_pb2 import *
from commands_pb2 import *
from QpidTypes import *
from qpid.messaging import *
#doc http://qpid.apache.org/releases/qpid-0.14/apis/python/html/
#examples https://developers.google.com/protocol-buffers/docs/pythontutorial
qpidCon = QpidConnection('192.168.0.78', '5672', 'fila_dados_ext', 'mxt_command_qpid')
while not(qpidCon.start()):
print('Trying to reconnect')
response_received = True;
while(1):
message = qpidCon.receiver.fetch()
subject = message.subject
print (message.subject + ' received')
if subject == QpidSubjectType.qpid_st_pb_mxt1xx_pos:
pos = mxt1xx_u_position()
pos.ParseFromString(message.content)
print (str(pos.firmware.protocol) + ':' + str(pos.firmware.serial) + ':' + str(pos.firmware.memory_index))
qpidCon.session.acknowledge()
if response_received:
response_received = mxt1xx_output_control(pos.hardware_monitor.outputs.output_1, pos, qpidCon);
if subject == QpidSubjectType.qpid_st_pb_command_response:
res = u_command_response()
res.ParseFromString(message.content)
if res.status == 5:
print('Command response: Success')
response_received = True
else:
print('Command response: ' + str(res.status))
else:
qpidCon.session.acknowledge() | 31.294872 | 114 | 0.679639 |
9aa4eade5a06a5cb47e49505af09bdb59f7f1c8a | 1,574 | py | Python | run_all.py | EinariTuukkanen/line-search-comparison | 7daa38779017f26828caa31a53675c8223e6ab8e | [
"MIT"
] | null | null | null | run_all.py | EinariTuukkanen/line-search-comparison | 7daa38779017f26828caa31a53675c8223e6ab8e | [
"MIT"
] | null | null | null | run_all.py | EinariTuukkanen/line-search-comparison | 7daa38779017f26828caa31a53675c8223e6ab8e | [
"MIT"
] | null | null | null | import numpy as np
from example_functions import target_function_dict
from line_search_methods import line_search_dict
from main_methods import main_method_dict
from config import best_params
from helpers import generate_x0
np.warnings.filterwarnings('ignore', category=RuntimeWarning)
for theta in best_params:
for main_method in best_params[theta]:
for line_search in best_params[theta][main_method]:
result = run_one(
target_function_dict[theta],
main_method_dict[main_method],
line_search_dict[line_search],
best_params[theta][main_method][line_search]['params'],
best_params[theta][main_method][line_search]['ls_params'],
)
status = result['status']
print(f"{status}: {theta},{main_method},{line_search}")
| 34.217391 | 74 | 0.670902 |
9aa4fd6241fe5ed3a825608b2a7990cea4c0d1af | 5,299 | py | Python | bin/runner.py | ColorOfLight/ML-term-project | 047b22fcdd8df7a18abd224ccbf23ae5d981fc97 | [
"MIT"
] | null | null | null | bin/runner.py | ColorOfLight/ML-term-project | 047b22fcdd8df7a18abd224ccbf23ae5d981fc97 | [
"MIT"
] | null | null | null | bin/runner.py | ColorOfLight/ML-term-project | 047b22fcdd8df7a18abd224ccbf23ae5d981fc97 | [
"MIT"
] | null | null | null | # Load Packages
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.model_selection import GridSearchCV, cross_val_score
from sklearn.metrics import classification_report
from plots import draw_corr_heatmap
import seaborn as sns
import xgboost as xgb
import pickle
from logger import Logger
import os
from sklearn.linear_model import ElasticNet
from sklearn.ensemble import AdaBoostRegressor
from ensemble import Ensemble
from sklearn.impute import SimpleImputer
from ilbeom_lg_v2 import Ilbeom_Linear
from sklearn.model_selection import StratifiedKFold
os.environ["JOBLIB_TEMP_FOLDER"] = "/tmp"
# Varaibles
train_rate = .8
# The model will saved in ../models/{model_name}.dat
model_name = 'ensemble-test1'
np.random.seed(0)
names = ['contract date', 'latitude', 'longtitude', 'altitude', '1st region id', '2nd region id', 'road id',
'apartment_id', 'floor', 'angle', 'area', 'parking lot limit', 'parking lot area', 'parking lot external',
'management fee', 'households', 'age of residents', 'builder id', 'completion date', 'built year',
'schools', 'bus stations', 'subway stations', 'price']
non_numeric_names = ['contract date', 'completion date']
tuned_parameters = {
'n_estimators': [100, 200, 400],
'learning_rate': [0.02, 0.04, 0.08, 0.1, 0.4],
'gamma': [0, 1, 2],
'subsample': [0.5, 0.66, 0.75],
'colsample_bytree': [0.6, 0.8, 1],
'max_depth': [6, 7, 8]
# 'learning_rate': [0.02],
# 'gamma': [0],
# 'subsample': [0.5],
# 'colsample_bytree': [0.6],
# 'max_depth': [6]
}
# Main
logger = Logger('final')
data = pd.read_csv('../data/data_train.csv',
names=names)
# Fill NaN
data = fill_missing_values(data)
y = data['price']
X = data.drop(columns=['price'])
# X_names = list(X)
# model_n = xgb.XGBRegressor(n_estimators=200, learning_rate=0.02, gamma=0, subsample=0.75,
# colsample_bytree=1, max_depth=6)
model_n = ElasticNet(l1_ratio=0.95, alpha=0.15, max_iter=50000)
model_u = get_unique_model()
# Test each model
# test_cv(model_n, preprocess(X), y)
# test_cv(model_u, X, y)
# Write Answer Sheet
# write answers
model_n.fit(preprocess(X), y)
model_u.fit(X, y)
write_answers(model_n, model_u)
| 32.115152 | 115 | 0.684846 |
9aa693424bf8bc328cb722f9e8651b7867acfe8a | 1,346 | py | Python | api/app.py | t-kigi/nuxt-chalice-aws-app-template | d413752004976911938d2fc26aa864ddae91a34f | [
"MIT"
] | null | null | null | api/app.py | t-kigi/nuxt-chalice-aws-app-template | d413752004976911938d2fc26aa864ddae91a34f | [
"MIT"
] | null | null | null | api/app.py | t-kigi/nuxt-chalice-aws-app-template | d413752004976911938d2fc26aa864ddae91a34f | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
nuxt-chalice-api
"""
import os
from chalice import (
Chalice, CognitoUserPoolAuthorizer,
CORSConfig
)
from chalicelib import aws
from chalicelib.env import store
stage = store.mutation(
'chalilce.stage', os.environ.get('STAGE', 'local'))
appname = os.environ.get('APPNAME', 'nuxt-chalice-api')
app = store.mutation(
'chalice.app', Chalice(app_name=appname))
project_dir = os.path.dirname(__file__)
conffile = os.path.join(
project_dir, 'chalicelib', 'env', f'{stage}.yaml')
store.load_config(conffile)
authorizer = store.mutation(
'chalice.authorizer',
CognitoUserPoolAuthorizer(
'MyUserPool', provider_arns=[store.conf('UserPoolARN')])
)
# local Origin CORS
if store.is_local():
cors = CORSConfig(
allow_origin=store.conf('FrontUrl'),
allow_headers=['CognitoAccessToken'],
allow_credentials=True
)
else:
cors = None
store.mutation('chalice.cors', cors)
# AWS boto3 client
store.mutation(
'aws.session',
aws.create_session(store.conf('Profile'), store.conf('Region')))
store.mutation(
'aws.cognito-idp', store.get('aws.session').client('cognito-idp'))
#
from chalicelib.routes import auth, example # noqa
| 22.433333 | 70 | 0.704309 |
9aa815cea217ed0284d392142fbc2dadb16b41d8 | 2,186 | py | Python | examples/plotting/plot_with_matplotlib.py | crzdg/acconeer-python-exploration | 26c16a3164199c58fe2940fe7050664d0d0e1161 | [
"BSD-3-Clause-Clear"
] | null | null | null | examples/plotting/plot_with_matplotlib.py | crzdg/acconeer-python-exploration | 26c16a3164199c58fe2940fe7050664d0d0e1161 | [
"BSD-3-Clause-Clear"
] | null | null | null | examples/plotting/plot_with_matplotlib.py | crzdg/acconeer-python-exploration | 26c16a3164199c58fe2940fe7050664d0d0e1161 | [
"BSD-3-Clause-Clear"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
from acconeer.exptool import configs, utils
from acconeer.exptool.clients import SocketClient, SPIClient, UARTClient
if __name__ == "__main__":
main()
| 26.987654 | 77 | 0.682068 |
9aa888a27862f3097e55339b5958acdbaec12723 | 437 | py | Python | kryptobot/bots/multi_bot.py | eristoddle/Kryptobot | d0c3050a1c924125810946530670c19b2de72d3f | [
"Apache-2.0"
] | 24 | 2018-05-29T13:44:36.000Z | 2022-03-12T20:41:45.000Z | kryptobot/bots/multi_bot.py | eristoddle/Kryptobot | d0c3050a1c924125810946530670c19b2de72d3f | [
"Apache-2.0"
] | 23 | 2018-07-08T02:31:18.000Z | 2020-06-02T04:07:49.000Z | kryptobot/bots/multi_bot.py | eristoddle/Kryptobot | d0c3050a1c924125810946530670c19b2de72d3f | [
"Apache-2.0"
] | 14 | 2018-08-10T15:44:27.000Z | 2021-06-14T07:14:52.000Z | from .bot import Bot
| 24.277778 | 54 | 0.606407 |
9aa8e28e915cdb48539530ca48ffdc1fa280bc82 | 140 | py | Python | setup.py | adrienbrunet/mixt | d725ec752ce430d135e993bc988bfdf2b8457c4b | [
"MIT"
] | 27 | 2018-06-04T19:11:42.000Z | 2022-02-23T22:46:39.000Z | setup.py | adrienbrunet/mixt | d725ec752ce430d135e993bc988bfdf2b8457c4b | [
"MIT"
] | 7 | 2018-06-09T15:27:51.000Z | 2021-03-11T20:00:35.000Z | setup.py | adrienbrunet/mixt | d725ec752ce430d135e993bc988bfdf2b8457c4b | [
"MIT"
] | 3 | 2018-07-29T10:20:02.000Z | 2021-11-18T19:55:07.000Z | #!/usr/bin/env python
"""Setup file for the ``mixt`` module. Configuration is in ``setup.cfg``."""
from setuptools import setup
setup()
| 15.555556 | 76 | 0.678571 |
9aa95eb6fe52df130917d5af87f7b5c65c75b243 | 691 | py | Python | app/accounts/views/user_type.py | phessabi/eshop | 6a5352753a0c27f9c3f0eda6eec696f49ef4a8eb | [
"Apache-2.0"
] | 1 | 2020-02-04T21:18:31.000Z | 2020-02-04T21:18:31.000Z | app/accounts/views/user_type.py | phessabi/eshop | 6a5352753a0c27f9c3f0eda6eec696f49ef4a8eb | [
"Apache-2.0"
] | 12 | 2020-01-01T11:46:33.000Z | 2022-03-12T00:10:01.000Z | app/accounts/views/user_type.py | phessabi/eshop | 6a5352753a0c27f9c3f0eda6eec696f49ef4a8eb | [
"Apache-2.0"
] | 1 | 2020-02-18T11:12:48.000Z | 2020-02-18T11:12:48.000Z | from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
| 26.576923 | 54 | 0.570188 |
9aa976fa66600077fd0293cccc1c6dcd3ade5f91 | 9,390 | py | Python | Statistical Thinking in Python (Part 1)/Thinking_probabilistically--_Discrete_variables.py | shreejitverma/Data-Scientist | 03c06936e957f93182bb18362b01383e5775ffb1 | [
"MIT"
] | 2 | 2022-03-12T04:53:03.000Z | 2022-03-27T12:39:21.000Z | Statistical Thinking in Python (Part 1)/Thinking_probabilistically--_Discrete_variables.py | shivaniverma1/Data-Scientist | f82939a411484311171465591455880c8e354750 | [
"MIT"
] | null | null | null | Statistical Thinking in Python (Part 1)/Thinking_probabilistically--_Discrete_variables.py | shivaniverma1/Data-Scientist | f82939a411484311171465591455880c8e354750 | [
"MIT"
] | 2 | 2022-03-12T04:52:21.000Z | 2022-03-27T12:45:32.000Z | # Thinking probabilistically-- Discrete variables!!
# Statistical inference rests upon probability. Because we can very rarely say anything meaningful with absolute certainty from data, we use probabilistic language to make quantitative statements about data. In this chapter, you will learn how to think probabilistically about discrete quantities: those that can only take certain values, like integers.
# Generating random numbers using the np.random module
# We will be hammering the np.random module for the rest of this course and its sequel. Actually, you will probably call functions from this module more than any other while wearing your hacker statistician hat. Let's start by taking its simplest function, np.random.random() for a test spin. The function returns a random number between zero and one. Call np.random.random() a few times in the IPython shell. You should see numbers jumping around between zero and one.
# In this exercise, we'll generate lots of random numbers between zero and one, and then plot a histogram of the results. If the numbers are truly random, all bars in the histogram should be of (close to) equal height.
# You may have noticed that, in the video, Justin generated 4 random numbers by passing the keyword argument size=4 to np.random.random(). Such an approach is more efficient than a for loop: in this exercise, however, you will write a for loop to experience hacker statistics as the practice of repeating an experiment over and over again.
# Seed the random number generator
np.random.seed(42)
# Initialize random numbers: random_numbers
random_numbers = np.empty(100000)
# Generate random numbers by looping over range(100000)
for i in range(100000):
random_numbers[i] = np.random.random()
# Plot a histogram
_ = plt.hist(random_numbers)
# Show the plot
plt.show()
# The np.random module and Bernoulli trials
# You can think of a Bernoulli trial as a flip of a possibly biased coin. Specifically, each coin flip has a probability p of landing heads (success) and probability 1p of landing tails (failure). In this exercise, you will write a function to perform n Bernoulli trials, perform_bernoulli_trials(n, p), which returns the number of successes out of n Bernoulli trials, each of which has probability p of success. To perform each Bernoulli trial, use the np.random.random() function, which returns a random number between zero and one.
def perform_bernoulli_trials(n, p):
"""Perform n Bernoulli trials with success probability p
and return number of successes."""
# Initialize number of successes: n_success
n_success = 0
# Perform trials
for i in range(n):
# Choose random number between zero and one: random_number
random_number = np.random.random()
# If less than p, it's a success so add one to n_success
if random_number< p:
n_success +=1
return n_success
# How many defaults might we expect?
# Let's say a bank made 100 mortgage loans. It is possible that anywhere between 0 and 100 of the loans will be defaulted upon. You would like to know the probability of getting a given number of defaults, given that the probability of a default is p = 0.05. To investigate this, you will do a simulation. You will perform 100 Bernoulli trials using the perform_bernoulli_trials() function you wrote in the previous exercise and record how many defaults we get. Here, a success is a default. (Remember that the word "success" just means that the Bernoulli trial evaluates to True, i.e., did the loan recipient default?) You will do this for another 100 Bernoulli trials. And again and again until we have tried it 1000 times. Then, you will plot a histogram describing the probability of the number of defaults.
# Seed random number generator
np.random.seed(42)
# Initialize the number of defaults: n_defaults
n_defaults = np.empty(1000)
# Compute the number of defaults
for i in range(1000):
n_defaults[i] = perform_bernoulli_trials(100,0.05)
# Plot the histogram with default number of bins; label your axes
_ = plt.hist(n_defaults, normed= True)
_ = plt.xlabel('number of defaults out of 100 loans')
_ = plt.ylabel('probability')
# Show the plot
plt.show()
# Will the bank fail?
# Plot the number of defaults you got from the previous exercise, in your namespace as n_defaults, as a CDF. The ecdf() function you wrote in the first chapter is available.
# If interest rates are such that the bank will lose money if 10 or more of its loans are defaulted upon, what is the probability that the bank will lose money?
# Compute ECDF: x, y
x, y= ecdf(n_defaults)
# Plot the ECDF with labeled axes
plt.plot(x, y, marker = '.', linestyle ='none')
plt.xlabel('loans')
plt.ylabel('interest')
# Show the plot
plt.show()
# Compute the number of 100-loan simulations with 10 or more defaults: n_lose_money
n_lose_money=sum(n_defaults >=10)
# Compute and print probability of losing money
print('Probability of losing money =', n_lose_money / len(n_defaults))
# Sampling out of the Binomial distribution
# Compute the probability mass function for the number of defaults we would expect for 100 loans as in the last section, but instead of simulating all of the Bernoulli trials, perform the sampling using np.random.binomial(). This is identical to the calculation you did in the last set of exercises using your custom-written perform_bernoulli_trials() function, but far more computationally efficient. Given this extra efficiency, we will take 10,000 samples instead of 1000. After taking the samples, plot the CDF as last time. This CDF that you are plotting is that of the Binomial distribution.
# Note: For this exercise and all going forward, the random number generator is pre-seeded for you (with np.random.seed(42)) to save you typing that each time.
# Take 10,000 samples out of the binomial distribution: n_defaults
n_defaults = np.random.binomial(100,0.05,size = 10000)
# Compute CDF: x, y
x, y = ecdf(n_defaults)
# Plot the CDF with axis labels
plt.plot(x,y, marker ='.', linestyle = 'none')
plt.xlabel("Number of Defaults")
plt.ylabel("CDF")
# Show the plot
plt.show()
# Plotting the Binomial PMF
# As mentioned in the video, plotting a nice looking PMF requires a bit of matplotlib trickery that we will not go into here. Instead, we will plot the PMF of the Binomial distribution as a histogram with skills you have already learned. The trick is setting up the edges of the bins to pass to plt.hist() via the bins keyword argument. We want the bins centered on the integers. So, the edges of the bins should be -0.5, 0.5, 1.5, 2.5, ... up to max(n_defaults) + 1.5. You can generate an array like this using np.arange() and then subtracting 0.5 from the array.
# You have already sampled out of the Binomial distribution during your exercises on loan defaults, and the resulting samples are in the NumPy array n_defaults.
# Compute bin edges: bins
bins = np.arange(0, max(n_defaults) + 1.5) - 0.5
# Generate histogram
plt.hist(n_defaults, normed = True, bins = bins)
# Label axes
plt.xlabel('Defaults')
plt.ylabel('PMF')
# Show the plot
plt.show()
# Relationship between Binomial and Poisson distributions
# You just heard that the Poisson distribution is a limit of the Binomial distribution for rare events. This makes sense if you think about the stories. Say we do a Bernoulli trial every minute for an hour, each with a success probability of 0.1. We would do 60 trials, and the number of successes is Binomially distributed, and we would expect to get about 6 successes. This is just like the Poisson story we discussed in the video, where we get on average 6 hits on a website per hour. So, the Poisson distribution with arrival rate equal to np approximates a Binomial distribution for n Bernoulli trials with probability p of success (with n large and p small). Importantly, the Poisson distribution is often simpler to work with because it has only one parameter instead of two for the Binomial distribution.
# Let's explore these two distributions computationally. You will compute the mean and standard deviation of samples from a Poisson distribution with an arrival rate of 10. Then, you will compute the mean and standard deviation of samples from a Binomial distribution with parameters n and p such that np=10.
# Draw 10,000 samples out of Poisson distribution: samples_poisson
# Print the mean and standard deviation
print('Poisson: ', np.mean(samples_poisson),
np.std(samples_poisson))
# Specify values of n and p to consider for Binomial: n, p
# Draw 10,000 samples for each n,p pair: samples_binomial
for i in range(3):
samples_binomial = ____
# Print results
print('n =', n[i], 'Binom:', np.mean(samples_binomial),
np.std(samples_binomial))
# Was 2015 anomalous?
# 1990 and 2015 featured the most no-hitters of any season of baseball (there were seven). Given that there are on average 251/115 no-hitters per season, what is the probability of having seven or more in a season?
# Draw 10,000 samples out of Poisson distribution: n_nohitters
# Compute number of samples that are seven or greater: n_large
n_large = np.sum(____)
# Compute probability of getting seven or more: p_large
# Print the result
print('Probability of seven or more no-hitters:', p_large)
| 47.908163 | 812 | 0.760809 |
9aacaa2c9c98de085aff50585e25fcd2964d6c96 | 1,008 | py | Python | ml/data_engineering/ETL/extract.py | alexnakagawa/tools | b5e8c047293247c8781d44607968402f637e597e | [
"MIT"
] | null | null | null | ml/data_engineering/ETL/extract.py | alexnakagawa/tools | b5e8c047293247c8781d44607968402f637e597e | [
"MIT"
] | null | null | null | ml/data_engineering/ETL/extract.py | alexnakagawa/tools | b5e8c047293247c8781d44607968402f637e597e | [
"MIT"
] | null | null | null | '''
This is an abstract example of Extracting in an ETL pipeline.
Inspired from the "Introduction to Data Engineering" course on Datacamp.com
Author: Alex Nakagawa
'''
import requests
# Fetch the Hackernews post
resp = requests.get("https://hacker-news.firebaseio.com/v0/item/16222426.json")
# Print the response parsed as JSON
print(resp.json())
# Assign the score of the test to post_score
post_score = resp.json()['score']
print(post_score)
# Function to extract table to a pandas DataFrame
# Connect to the database using the connection URI
connection_uri = "postgresql://repl:password@localhost:5432/pagila"
db_engine = sqlalchemy.create_engine(connection_uri)
# Extract the film table into a pandas DataFrame
extract_table_to_pandas("film", db_engine)
# Extract the customer table into a pandas DataFrame
extract_table_to_pandas("customer", db_engine)
| 30.545455 | 79 | 0.779762 |
9aacd4bc00b3363cbb5a9d413afa93f29eedb771 | 531 | py | Python | python/python-algorithm-intervew/11-hash-table/29-jewels-and-stones-3.py | bum12ark/algorithm | b6e262b0c29a8b5fb551db5a177a40feebc411b4 | [
"MIT"
] | 1 | 2022-03-06T03:49:31.000Z | 2022-03-06T03:49:31.000Z | python/python-algorithm-intervew/11-hash-table/29-jewels-and-stones-3.py | bum12ark/algorithm | b6e262b0c29a8b5fb551db5a177a40feebc411b4 | [
"MIT"
] | null | null | null | python/python-algorithm-intervew/11-hash-table/29-jewels-and-stones-3.py | bum12ark/algorithm | b6e262b0c29a8b5fb551db5a177a40feebc411b4 | [
"MIT"
] | null | null | null | """
*
J , S . S ? .
- Example 1
Input : J = "aA", S = "aAAbbbb"
Output : 3
- Example 2
Input : J = "z", S = "ZZ"
Output : 0
"""
import collections
if __name__ == '__main__':
solution = Solution()
print(solution.numJewelsInStones("aA", "aAAbbbb")) | 19.666667 | 55 | 0.585687 |
9aad0121a197a064fa70a4456dc468491585ad3b | 774 | py | Python | migrations/versions/e1c435b9e9dc_.py | vipshae/todo-lister | ca639a3efcc243bebe132ca43c1917a28d4e83a6 | [
"MIT"
] | null | null | null | migrations/versions/e1c435b9e9dc_.py | vipshae/todo-lister | ca639a3efcc243bebe132ca43c1917a28d4e83a6 | [
"MIT"
] | null | null | null | migrations/versions/e1c435b9e9dc_.py | vipshae/todo-lister | ca639a3efcc243bebe132ca43c1917a28d4e83a6 | [
"MIT"
] | null | null | null | """empty message
Revision ID: e1c435b9e9dc
Revises: 2527092d6a89
Create Date: 2020-06-11 14:22:00.453626
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e1c435b9e9dc'
down_revision = '2527092d6a89'
branch_labels = None
depends_on = None
| 23.454545 | 65 | 0.652455 |
9aad26c087264dde6976cf7bacd6c4bf3d397a51 | 1,345 | py | Python | test/test_quilted_contacts_list.py | cocoroutine/pyquilted | dd8644043deec17608e00f46e3ac4562b8879603 | [
"MIT"
] | 1 | 2019-02-21T20:10:37.000Z | 2019-02-21T20:10:37.000Z | test/test_quilted_contacts_list.py | cocoroutine/pyquilted | dd8644043deec17608e00f46e3ac4562b8879603 | [
"MIT"
] | null | null | null | test/test_quilted_contacts_list.py | cocoroutine/pyquilted | dd8644043deec17608e00f46e3ac4562b8879603 | [
"MIT"
] | null | null | null | import unittest
from pyquilted.quilted.contact import *
from pyquilted.quilted.contacts_list import ContactsList
if __name__ == '__main__':
unittest.main()
| 31.27907 | 79 | 0.475093 |
9aae954a3239c945002696eff2a9d8adff07720d | 3,110 | py | Python | examples/python/macOS/hack_or_die.py | kitazaki/NORA_Badge | 9b04a57235f0763641ffa8e90e499f141dc57570 | [
"Apache-2.0"
] | null | null | null | examples/python/macOS/hack_or_die.py | kitazaki/NORA_Badge | 9b04a57235f0763641ffa8e90e499f141dc57570 | [
"Apache-2.0"
] | null | null | null | examples/python/macOS/hack_or_die.py | kitazaki/NORA_Badge | 9b04a57235f0763641ffa8e90e499f141dc57570 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import time
import uuid
import Adafruit_BluefruitLE
CHARACTERISTIC_SERVICE_UUID = uuid.UUID('0000fee0-0000-1000-8000-00805f9b34fb')
CHARACTERISTIC_DATA_UUID = uuid.UUID('0000fee1-0000-1000-8000-00805f9b34fb')
provider = Adafruit_BluefruitLE.get_provider()
provider.initialize()
provider.run_mainloop_with(main)
| 37.02381 | 82 | 0.632797 |
9aaec48386d244bd541a612785f13979caec8fe3 | 4,902 | py | Python | turkish_morphology/validate_test.py | nogeeky/turkish-morphology | 64881f23dad87c6f470d874030f6b5f33fe1a9eb | [
"Apache-2.0"
] | 157 | 2019-05-20T13:05:43.000Z | 2022-03-23T16:36:31.000Z | turkish_morphology/validate_test.py | OrenBochman/turkish-morphology | 8f33046722ce204ccc51739687921ab041bed254 | [
"Apache-2.0"
] | 9 | 2019-09-11T08:17:12.000Z | 2022-03-15T18:29:01.000Z | turkish_morphology/validate_test.py | OrenBochman/turkish-morphology | 8f33046722ce204ccc51739687921ab041bed254 | [
"Apache-2.0"
] | 30 | 2019-09-29T06:50:01.000Z | 2022-03-13T15:31:10.000Z | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for turkish_morphology.validate."""
import os
from turkish_morphology import analysis_pb2
from turkish_morphology import validate
from absl.testing import absltest
from absl.testing import parameterized
from google.protobuf import text_format
_TESTDATA_DIR = "turkish_morphology/testdata"
if __name__ == "__main__":
absltest.main()
| 33.346939 | 76 | 0.659935 |
9aaf20b86321deb4ac2d2c3951af5c3c52764470 | 115 | py | Python | rplint/__main__.py | lpozo/rplint | 907cb5342827b2c38e79721bc2dc99b3b6f7912b | [
"MIT"
] | 7 | 2020-09-10T15:39:07.000Z | 2021-02-15T17:45:04.000Z | rplint/__main__.py | lpozo/rplint | 907cb5342827b2c38e79721bc2dc99b3b6f7912b | [
"MIT"
] | 6 | 2020-11-11T02:42:37.000Z | 2021-03-17T01:00:27.000Z | rplint/__main__.py | lpozo/rplint | 907cb5342827b2c38e79721bc2dc99b3b6f7912b | [
"MIT"
] | 3 | 2020-11-11T02:10:22.000Z | 2020-12-12T01:02:29.000Z | #!/usr/bin/env python3
from .main import rplint
if __name__ == "__main__":
rplint.main(prog_name=__package__)
| 19.166667 | 38 | 0.730435 |
9ab1353597b9195d65b8c371888b502f56866647 | 3,368 | py | Python | physicspy/optics/jones.py | suyag/physicspy | f2b29a72cb08b1de170274b3e35c3d8eda32f9e1 | [
"MIT"
] | null | null | null | physicspy/optics/jones.py | suyag/physicspy | f2b29a72cb08b1de170274b3e35c3d8eda32f9e1 | [
"MIT"
] | null | null | null | physicspy/optics/jones.py | suyag/physicspy | f2b29a72cb08b1de170274b3e35c3d8eda32f9e1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from __future__ import division
from numpy import sqrt, cos, sin, arctan, exp, abs, pi, conj
from scipy import array, dot, sum
def mphase(n,k,th):
""" Calculate phase shift and reflectance of a metal in the s and p directions"""
u = sqrt(0.5 *((n**2 - k**2 - sin(th)**2) + sqrt( (n**2 - k**2 - sin(th)**2)**2 + 4*n**2*k**2 )))
v = sqrt(0.5*(-(n**2 - k**2 - sin(th)**2) + sqrt( (n**2 - k**2 - sin(th)**2)**2 + 4*n**2*k**2 )))
ds = arctan(2*v*cos(th)/(u**2+v**2-cos(th)**2));
dp = arctan(2*v*cos(th)*(n**2-k**2-2*u**2)/(u**2+v**2-(n**2+k**2)**2*cos(th)**2));
if(dp < 0):
dp = dp+pi;
rs = abs((cos(th) - (u+v*1j))/(cos(th) + (u+v*1j)))
rp = abs(((n**2 + k**2)*cos(th) - (u+v*1j))/((n**2 + k**2)*cos(th) + (u+v*1j)));
return array([ds, dp, rs, rp])
| 34.367347 | 101 | 0.518705 |
9ab5d8227882ea8202fdc93b49f22e935bbc0e93 | 2,560 | py | Python | aiida/cmdline/params/options/config.py | louisponet/aiida-core | 3214236df66a3792ee57fe38a06c0c3bb65861ab | [
"MIT",
"BSD-3-Clause"
] | 1 | 2020-10-01T17:11:58.000Z | 2020-10-01T17:11:58.000Z | aiida/cmdline/params/options/config.py | louisponet/aiida-core | 3214236df66a3792ee57fe38a06c0c3bb65861ab | [
"MIT",
"BSD-3-Clause"
] | 17 | 2020-03-11T17:04:05.000Z | 2020-05-01T09:34:45.000Z | aiida/cmdline/params/options/config.py | louisponet/aiida-core | 3214236df66a3792ee57fe38a06c0c3bb65861ab | [
"MIT",
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=cyclic-import
"""
.. py:module::config
:synopsis: Convenience class for configuration file option
"""
import click_config_file
import yaml
from .overridable import OverridableOption
def yaml_config_file_provider(handle, cmd_name): # pylint: disable=unused-argument
"""Read yaml config file from file handle."""
return yaml.safe_load(handle)
| 36.056338 | 116 | 0.605078 |
9ab6d13a500341cc43c1e83dfab97d3f76d1b8d3 | 460 | py | Python | vaccine_feed_ingest/runners/ct/state/parse.py | jeremyschlatter/vaccine-feed-ingest | 215f6c144fe5220deaccdb5db3e96f28b7077b3f | [
"MIT"
] | 27 | 2021-04-24T02:11:18.000Z | 2021-05-17T00:54:45.000Z | vaccine_feed_ingest/runners/ct/state/parse.py | jeremyschlatter/vaccine-feed-ingest | 215f6c144fe5220deaccdb5db3e96f28b7077b3f | [
"MIT"
] | 574 | 2021-04-06T18:09:11.000Z | 2021-08-30T07:55:06.000Z | vaccine_feed_ingest/runners/ct/state/parse.py | jeremyschlatter/vaccine-feed-ingest | 215f6c144fe5220deaccdb5db3e96f28b7077b3f | [
"MIT"
] | 47 | 2021-04-23T05:31:14.000Z | 2021-07-01T20:22:46.000Z | #!/usr/bin/env python
import json
import pathlib
import sys
input_dir = pathlib.Path(sys.argv[2])
output_dir = pathlib.Path(sys.argv[1])
output_file = output_dir / "data.parsed.ndjson"
results = []
for input_file in input_dir.glob("data.raw.*.json"):
with input_file.open() as fin:
results.extend(json.load(fin)["results"])
with output_file.open("w") as fout:
for result in results:
json.dump(result, fout)
fout.write("\n")
| 23 | 52 | 0.680435 |
9ab9d917b353cf0f8ea3e285cac62732af59e404 | 563 | py | Python | python_learning/exception_redefinition.py | KonstantinKlepikov/all-python-ml-learning | a8a41347b548828bb8531ccdab89c622a0be20e1 | [
"MIT"
] | null | null | null | python_learning/exception_redefinition.py | KonstantinKlepikov/all-python-ml-learning | a8a41347b548828bb8531ccdab89c622a0be20e1 | [
"MIT"
] | null | null | null | python_learning/exception_redefinition.py | KonstantinKlepikov/all-python-ml-learning | a8a41347b548828bb8531ccdab89c622a0be20e1 | [
"MIT"
] | 1 | 2020-12-23T19:32:51.000Z | 2020-12-23T19:32:51.000Z | # example of redefinition __repr__ and __str__ of exception
try:
raise MyBad('spam')
except MyBad as X:
print(X) # My mistake!
print(X.args) # ('spam',)
try:
raise MyBad2('spam')
except MyBad2 as X:
print(X) # spam
print(X.args) # ('spam',)
raise MyBad('spam') # __main__.MyBad2: My mistake!
# raise MyBad2('spam') # __main__.MyBad2: spam | 20.107143 | 65 | 0.648313 |
9abaab450ac2ca5229b853ff9168c5720ce319bf | 7,998 | py | Python | difPy/dif.py | ppizarror/Duplicate-Image-Finder | 371d70454531d1407b06d98f3e3bdc5e3fc03f49 | [
"MIT"
] | null | null | null | difPy/dif.py | ppizarror/Duplicate-Image-Finder | 371d70454531d1407b06d98f3e3bdc5e3fc03f49 | [
"MIT"
] | null | null | null | difPy/dif.py | ppizarror/Duplicate-Image-Finder | 371d70454531d1407b06d98f3e3bdc5e3fc03f49 | [
"MIT"
] | null | null | null | import skimage.color
import matplotlib.pyplot as plt
import numpy as np
import cv2
import os
import imghdr
import time
"""
Duplicate Image Finder (DIF): function that searches a given directory for images and finds duplicate/similar images among them.
Outputs the number of found duplicate/similar image pairs with a list of the filenames having lower resolution.
"""
| 41.65625 | 128 | 0.572893 |
9abc03c9cf82f6250f6e274347a435222a3060a0 | 1,572 | py | Python | minmax.py | jeffmorais/estrutura-de-dados | e7088df4fe753af106b4642c5e147d578a466c3b | [
"MIT"
] | 1 | 2016-02-16T13:52:00.000Z | 2016-02-16T13:52:00.000Z | minmax.py | jeffmorais/estrutura-de-dados | e7088df4fe753af106b4642c5e147d578a466c3b | [
"MIT"
] | null | null | null | minmax.py | jeffmorais/estrutura-de-dados | e7088df4fe753af106b4642c5e147d578a466c3b | [
"MIT"
] | null | null | null | # A funo min_max dever rodar em O(n) e o cdigo no pode usar nenhuma
# lib do Python (sort, min, max e etc)
# No pode usar qualquer lao (while, for), a funo deve ser recursiva
# Ou delegar a soluo para uma funo puramente recursiva
import unittest
def min_max(seq):
'''
:param seq: uma sequencia
:return: (min, max)
Retorna tupla cujo primeiro valor mnimo (min) o valor
mnimo da sequencia seq.
O segundo o valor mximo (max) da sequencia
O(n)
'''
if len(seq) == 0:
return (None, None)
if len(seq) == 1:
return seq[0], seq[0]
val = bora(0, seq, seq[0], seq[0])
return val
if __name__ == '__main__':
unittest.main()
| 29.111111 | 72 | 0.588422 |
9abd21b74954fe3eba3090f8582e570668b4381d | 3,927 | py | Python | news-category-classifcation/build_vocab.py | lyeoni/pytorch-nlp-tutorial | 8cc490adc6cc92d458548e0e73fbbf1db575f049 | [
"MIT"
] | 1,433 | 2018-12-14T06:20:28.000Z | 2022-03-31T14:12:50.000Z | news-category-classifcation/build_vocab.py | itsshaikaslam/nlp-tutorial-1 | 6e4c74e103f4cdc5e0559d987ae6e41c40e17a5a | [
"MIT"
] | 14 | 2019-04-03T08:30:23.000Z | 2021-07-11T11:41:05.000Z | news-category-classifcation/build_vocab.py | itsshaikaslam/nlp-tutorial-1 | 6e4c74e103f4cdc5e0559d987ae6e41c40e17a5a | [
"MIT"
] | 306 | 2018-12-20T09:41:24.000Z | 2022-03-31T05:07:14.000Z | import argparse
import pickle
from tokenization import Vocab, Tokenizer
TOKENIZER = ('treebank', 'mecab')
def load_pretrained(fname):
"""
Load pre-trained FastText word vectors
:param fname: text file containing the word vectors, one per line.
"""
fin = open(fname, 'r', encoding='utf-8', newline='\n', errors='ignore')
n, d = map(int, fin.readline().split())
print('Loading {} word vectors(dim={})...'.format(n, d))
word2vec_dict = {}
for line in fin:
tokens = line.rstrip().split(' ')
word2vec_dict[tokens[0]] = list(map(float, tokens[1:]))
print('#pretrained_word_vectors:', len(word2vec_dict))
return word2vec_dict
if __name__=='__main__':
config = argparser()
print(config)
# Select tokenizer
config.tokenizer = config.tokenizer.lower()
if config.tokenizer==TOKENIZER[0]:
from nltk.tokenize import word_tokenize
tokenization_fn = word_tokenize
elif config.tokenizer ==TOKENIZER[1]:
from konlpy.tag import Mecab
tokenization_fn = Mecab().morphs
tokenizer = Tokenizer(tokenization_fn=tokenization_fn,
is_sentence=config.is_sentence,
max_seq_length=config.max_seq_length)
# Tokenization & read tokens
list_of_tokens = []
with open(config.corpus, 'r', encoding='-utf-8', errors='ignore') as reader:
for li, line in enumerate(reader):
text = ' '.join(line.split('\t')[1:]).strip()
list_of_tokens += tokenizer.tokenize(text)
# Build vocabulary
vocab = Vocab(list_of_tokens=list_of_tokens,
unk_token=config.unk_token,
pad_token=config.pad_token,
bos_token=config.bos_token,
eos_token=config.eos_token,
min_freq=config.min_freq,
lower=config.lower)
vocab.build()
if config.pretrained_vectors:
pretrained_vectors = load_pretrained(fname=config.pretrained_vectors)
vocab.from_pretrained(pretrained_vectors=pretrained_vectors)
print('Vocabulary size: ', len(vocab))
# Save vocabulary
with open(config.vocab, 'wb') as writer:
pickle.dump(vocab, writer)
print('Vocabulary saved to', config.vocab) | 40.071429 | 98 | 0.638146 |
9abd5d0a8f6f8a824f776810d4a5b66aeca261fa | 650 | py | Python | lambda-sfn-terraform/src/LambdaFunction.py | extremenelson/serverless-patterns | c307599ab2759567c581c37d70561e85b0fa8788 | [
"MIT-0"
] | 1 | 2022-01-12T17:22:02.000Z | 2022-01-12T17:22:02.000Z | lambda-sfn-terraform/src/LambdaFunction.py | extremenelson/serverless-patterns | c307599ab2759567c581c37d70561e85b0fa8788 | [
"MIT-0"
] | null | null | null | lambda-sfn-terraform/src/LambdaFunction.py | extremenelson/serverless-patterns | c307599ab2759567c581c37d70561e85b0fa8788 | [
"MIT-0"
] | null | null | null | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import json
import boto3
import os
from aws_lambda_powertools import Logger
logger = Logger()
client = boto3.client('stepfunctions')
sfnArn = os.environ['SFN_ARN']
| 23.214286 | 68 | 0.676923 |
9abd6d106252aee5d79f8c8f78a07cba499bc3da | 3,068 | py | Python | tests/encryption/aes_decrypter.py | dfjxs/dfvfs | a4154b07bb08c3c86afa2847f3224189dd80c138 | [
"Apache-2.0"
] | 176 | 2015-01-02T13:55:39.000Z | 2022-03-12T11:44:37.000Z | tests/encryption/aes_decrypter.py | dfjxs/dfvfs | a4154b07bb08c3c86afa2847f3224189dd80c138 | [
"Apache-2.0"
] | 495 | 2015-01-13T06:47:06.000Z | 2022-03-12T11:07:03.000Z | tests/encryption/aes_decrypter.py | dfjxs/dfvfs | a4154b07bb08c3c86afa2847f3224189dd80c138 | [
"Apache-2.0"
] | 62 | 2015-02-23T08:19:38.000Z | 2022-03-18T06:01:22.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the AES decrypter object."""
import unittest
from dfvfs.encryption import aes_decrypter
from dfvfs.lib import definitions
from tests.encryption import test_lib
if __name__ == '__main__':
unittest.main()
| 33.714286 | 77 | 0.730769 |
9abfb5ca61ed6e49fce34592c1824290b02d1d23 | 4,460 | py | Python | Crash Course on Python/WEEK 5/solutions.py | atharvpuranik/Google-IT-Automation-with-Python-Professional-Certificate | 4d8fd587fa85ea4db62db6142fbb58cd9c29bb69 | [
"MIT"
] | 42 | 2020-04-28T09:06:21.000Z | 2022-01-09T01:01:55.000Z | Crash Course on Python/WEEK 5/solutions.py | vaquarkhan/Google-IT-Automation-with-Python-Professional-Certificate | d87dffe924de218f73d61d27689798646824ed6c | [
"MIT"
] | null | null | null | Crash Course on Python/WEEK 5/solutions.py | vaquarkhan/Google-IT-Automation-with-Python-Professional-Certificate | d87dffe924de218f73d61d27689798646824ed6c | [
"MIT"
] | 52 | 2020-05-12T05:29:46.000Z | 2022-01-26T21:24:08.000Z | #Q2
# If you have an apple and I have an apple and we exchange these apples then
# you and I will still each have one apple. But if you have an idea and I have
# an idea and we exchange these ideas, then each of us will have two ideas.
# George Bernard Shaw
johanna = Person()
johanna.apples = 1
johanna.ideas = 1
martin = Person()
martin.apples = 2
martin.ideas = 1
exchange_apples(johanna, martin)
print("Johanna has {} apples and Martin has {} apples".format(johanna.apples, martin.apples))
exchange_ideas(johanna, martin)
print("Johanna has {} ideas and Martin has {} ideas".format(johanna.ideas, martin.ideas))
#Q3
# define a basic city class
# create a new instance of the City class and
# define each attribute
city1 = City()
city1.name = "Cusco"
city1.country = "Peru"
city1.elevation = 3399
city1.population = 358052
# create a new instance of the City class and
# define each attribute
city2 = City()
city2.name = "Sofia"
city2.country = "Bulgaria"
city2.elevation = 2290
city2.population = 1241675
# create a new instance of the City class and
# define each attribute
city3 = City()
city3.name = "Seoul"
city3.country = "South Korea"
city3.elevation = 38
city3.population = 9733509
print(max_elevation_city(100000)) # Should print "Cusco, Peru"
print(max_elevation_city(1000000)) # Should print "Sofia, Bulgaria"
print(max_elevation_city(10000000)) # Should print ""
#Q5
table = Furniture()
table.color="brown"
table.material="wood"
couch = Furniture()
couch.color="red"
couch.material="leather"
print(describe_furniture(table))
# Should be "This piece of furniture is made of brown wood"
print(describe_furniture(couch))
# Should be "This piece of furniture is made of red leather"
| 31.188811 | 140 | 0.722646 |
9ac1c767370071e77aa1a0a522794a49b7886db3 | 205 | py | Python | python/test/is_prime.test.py | hotate29/kyopro_lib | 20085381372d2555439980c79887ca6b0809bb77 | [
"MIT"
] | null | null | null | python/test/is_prime.test.py | hotate29/kyopro_lib | 20085381372d2555439980c79887ca6b0809bb77 | [
"MIT"
] | 2 | 2020-10-13T17:02:12.000Z | 2020-10-17T16:04:48.000Z | python/test/is_prime.test.py | hotate29/kyopro_lib | 20085381372d2555439980c79887ca6b0809bb77 | [
"MIT"
] | null | null | null | # verification-helper: PROBLEM http://judge.u-aizu.ac.jp/onlinejudge/description.jsp?id=ALDS1_1_C
from python.lib.is_prime import isprime
print(sum(isprime(int(input())) for _ in range(int(input()))))
| 25.625 | 97 | 0.756098 |
9ac242f669af4d52c4d497c2811debd7113e2d03 | 691 | py | Python | utils/pad.py | Zenodia/nativePytorch_NMT | bfced09eb6e5476d34619dfc0dd41d4ed610248f | [
"MIT"
] | 60 | 2018-09-28T07:53:11.000Z | 2020-11-06T11:59:07.000Z | utils/pad.py | Pravin74/transformer-pytorch | c31e163ed57321e405771ef7fb556d4d92fd5efb | [
"MIT"
] | 2 | 2021-02-15T14:08:08.000Z | 2021-09-12T12:52:37.000Z | utils/pad.py | Pravin74/transformer-pytorch | c31e163ed57321e405771ef7fb556d4d92fd5efb | [
"MIT"
] | 18 | 2018-09-28T07:56:35.000Z | 2020-11-24T00:11:33.000Z | import torch
import numpy as np
PAD_TOKEN_INDEX = 0
| 32.904762 | 87 | 0.723589 |
9ac324779be3fdadd696253340d551fc8f9b954c | 576 | py | Python | jesse/modes/utils.py | julesGoullee/jesse | 49a1ac46715682e8a30df133ce055bf2dfdedb7d | [
"MIT"
] | 4 | 2021-02-23T18:23:58.000Z | 2021-10-10T07:32:41.000Z | jesse/modes/utils.py | ArdeshirV/jesse | 2ff415f6768f9ef7cca3e86d8f2f87988d3e7129 | [
"MIT"
] | null | null | null | jesse/modes/utils.py | ArdeshirV/jesse | 2ff415f6768f9ef7cca3e86d8f2f87988d3e7129 | [
"MIT"
] | 2 | 2021-04-30T06:49:26.000Z | 2022-01-24T09:24:35.000Z | from jesse.store import store
from jesse import helpers
from jesse.services import logger
| 27.428571 | 76 | 0.694444 |
9ac5612f4d7fef57c2d92d9c354db5aaef44d59e | 1,020 | py | Python | Modo/Kits/OD_ModoCopyPasteExternal/lxserv/cmd_copyToExternal.py | heimlich1024/OD_CopyPasteExternal | 943b993198e16d19f1fb4ba44049e498abf1e993 | [
"Apache-2.0"
] | 278 | 2017-04-27T18:44:06.000Z | 2022-03-31T02:49:42.000Z | Modo/Kits/OD_ModoCopyPasteExternal/lxserv/cmd_copyToExternal.py | heimlich1024/OD_CopyPasteExternal | 943b993198e16d19f1fb4ba44049e498abf1e993 | [
"Apache-2.0"
] | 57 | 2017-05-01T11:58:41.000Z | 2022-02-06T18:43:13.000Z | Modo/Kits/OD_ModoCopyPasteExternal/lxserv/cmd_copyToExternal.py | heimlich1024/OD_CopyPasteExternal | 943b993198e16d19f1fb4ba44049e498abf1e993 | [
"Apache-2.0"
] | 49 | 2017-04-28T19:24:14.000Z | 2022-03-12T15:17:13.000Z | ################################################################################
#
# cmd_copyToExternal.py
#
# Author: Oliver Hotz | Chris Sprance
#
# Description: Copies Geo/Weights/Morphs/UV's to External File
#
# Last Update:
#
################################################################################
import lx
import lxifc
import lxu.command
from od_copy_paste_external import copy_to_external
lx.bless(ODCopyToExternal, "OD_CopyToExternal")
| 23.72093 | 81 | 0.560784 |
9ac6f272c7449b8674bd2e0ae76f212c2c1488d6 | 17,828 | py | Python | iotest/case.py | gwk/iotest | bb5386c8d2e96cf99ca840fc512008ef786c4805 | [
"CC0-1.0"
] | 1 | 2018-03-24T16:03:15.000Z | 2018-03-24T16:03:15.000Z | iotest/case.py | gwk/iotest | bb5386c8d2e96cf99ca840fc512008ef786c4805 | [
"CC0-1.0"
] | 1 | 2016-08-12T19:09:43.000Z | 2016-08-12T19:09:43.000Z | iotest/case.py | gwk/iotest | bb5386c8d2e96cf99ca840fc512008ef786c4805 | [
"CC0-1.0"
] | null | null | null | # Dedicated to the public domain under CC0: https://creativecommons.org/publicdomain/zero/1.0/.
import ast
import os
import re
import shlex
from itertools import zip_longest
from string import Template
from typing import *
from .pithy.fs import *
from .pithy.io import *
from .pithy.types import * # type: ignore
from .ctx import Ctx
coverage_name = '_.coven'
iot_key_subs = {
'.in' : 'in_',
'.err' : 'err_val',
'.out' : 'out_val',
'.dflt_src_paths' : 'dflt_src_paths',
'.test_info_paths' : 'test_info_paths',
'in' : 'in_',
}
case_key_validators: Dict[str, Tuple[str, Callable[[Any], bool], Optional[Callable[[str, Any], None]]]] = {
# key => msg, validator_predicate, validator_fn.
'args': ('string or list of strings', is_str_or_list, None),
'cmd': ('string or list of strings', is_str_or_list, None),
'code': ('int or `...`', is_int_or_ellipsis, None),
'compile': ('list of (str | list of str)', is_compile_cmd, None),
'compile_timeout': ('positive int', is_pos_int, None),
'coverage': ('string or list of strings', is_str_or_list, None),
'desc': ('str', is_str, None),
'dflt_src_paths': ('list of str', is_list_of_str, None),
'env': ('dict of strings', is_dict_of_str, None),
'err_mode': ('str', is_str, validate_exp_mode),
'err_path': ('str', is_str, None),
'err_val': ('str', is_str, None),
'files': ('dict', is_dict, validate_files_dict),
'in_': ('str', is_str, None),
'interpreter': ('string or list of strings', is_str_or_list, None),
'interpreter_args': ('string or list of strings', is_str_or_list, None),
'links': ('string or (dict | set) of strings', is_valid_links, validate_links_dict),
'out_mode': ('str', is_str, validate_exp_mode),
'out_path': ('str', is_str, None),
'out_val': ('str', is_str, None),
'skip': ('bool', is_bool, None),
'test_info_paths': ('set of str', is_set_of_str, None),
'timeout': ('positive int', is_pos_int, None),
}
# file expectation functions.
file_expectation_fns = {
'equal' : compare_equal,
'contain' : compare_contain,
'match' : compare_match,
'ignore' : compare_ignore,
}
| 40.796339 | 146 | 0.648138 |
9ac8a3896499bd8c6da3c5ab7c320fbd74dda4ff | 111 | py | Python | aiophotoprism/__init__.py | zhulik/aiophotoprism | 91cc263ffbd85c7dc7ccef6d4cdafdfdaf2a4c85 | [
"MIT"
] | 4 | 2021-08-09T05:02:23.000Z | 2022-01-30T03:04:29.000Z | aiophotoprism/__init__.py | zhulik/aiophotoprism | 91cc263ffbd85c7dc7ccef6d4cdafdfdaf2a4c85 | [
"MIT"
] | null | null | null | aiophotoprism/__init__.py | zhulik/aiophotoprism | 91cc263ffbd85c7dc7ccef6d4cdafdfdaf2a4c85 | [
"MIT"
] | null | null | null | """Asynchronous Python client for the Photoprism REST API."""
from .photoprism import API, Photoprism # noqa
| 27.75 | 61 | 0.756757 |
9ac8a6eee2b79ed601b853802a3795b71f290223 | 5,558 | py | Python | xen/xen-4.2.2/tools/python/scripts/test_vm_create.py | zhiming-shen/Xen-Blanket-NG | 47e59d9bb92e8fdc60942df526790ddb983a5496 | [
"Apache-2.0"
] | 1 | 2018-02-02T00:15:26.000Z | 2018-02-02T00:15:26.000Z | xen/xen-4.2.2/tools/python/scripts/test_vm_create.py | zhiming-shen/Xen-Blanket-NG | 47e59d9bb92e8fdc60942df526790ddb983a5496 | [
"Apache-2.0"
] | null | null | null | xen/xen-4.2.2/tools/python/scripts/test_vm_create.py | zhiming-shen/Xen-Blanket-NG | 47e59d9bb92e8fdc60942df526790ddb983a5496 | [
"Apache-2.0"
] | 1 | 2019-05-27T09:47:18.000Z | 2019-05-27T09:47:18.000Z | #!/usr/bin/python
vm_cfg = {
'name_label': 'APIVM',
'user_version': 1,
'is_a_template': False,
'auto_power_on': False, # TODO
'memory_static_min': 64,
'memory_static_max': 128,
#'memory_dynamic_min': 64,
#'memory_dynamic_max': 128,
'VCPUs_policy': 'credit',
'VCPUs_params': '',
'VCPUs_number': 2,
'actions_after_shutdown': 'destroy',
'actions_after_reboot': 'restart',
'actions_after_crash': 'destroy',
'PV_bootloader': '',
'PV_bootloader_args': '',
'PV_kernel': '/boot/vmlinuz-2.6.18-xenU',
'PV_ramdisk': '',
'PV_args': 'root=/dev/sda1 ro',
#'HVM_boot': '',
'platform_std_VGA': False,
'platform_serial': '',
'platform_localtime': False,
'platform_clock_offset': False,
'platform_enable_audio': False,
'PCI_bus': ''
}
vdi_cfg = {
'name_label': 'API_VDI',
'name_description': '',
'virtual_size': 100 * 1024 * 1024 * 1024,
'type': 'system',
'parent': '',
'SR_name': 'QCoW',
'sharable': False,
'read_only': False,
}
vbd_cfg = {
'VDI': '',
'VM': '',
'device': 'sda2',
'mode': 'RW',
'type': 'disk',
'driver': 'paravirtualised',
}
local_vdi_cfg = {
'name_label': 'gentoo.amd64.img',
'name_description': '',
'virtual_size': 0,
'type': 'system',
'parent': '',
'SR_name': 'Local',
'sharable': False,
'read_only': False,
'other_config': {'location': 'file:/root/gentoo.amd64.img'},
}
local_vbd_cfg = {
'VDI': '',
'VM': '',
'device': 'sda1',
'mode': 'RW',
'type': 'disk',
'driver': 'paravirtualised',
}
vif_cfg = {
'name': 'API_VIF',
'type': 'paravirtualised',
'device': '',
'network': '',
'MAC': '',
'MTU': 1500,
}
console_cfg = {
'protocol': 'rfb',
'other_config': {'vncunused': 1, 'vncpasswd': 'testing'},
}
import sys
import time
from xapi import connect, execute
if __name__ == "__main__":
test_vm_create()
| 26.216981 | 75 | 0.542821 |
9ac8dc710710ba41c77dd17ed479decc6f7a00ea | 6,171 | py | Python | portfolyo/core/pfline/tests/test_single_helper.py | rwijtvliet/portfolyo | b22948fbc55264ec5d69824e791ca7ef45c6e49c | [
"BSD-3-Clause"
] | null | null | null | portfolyo/core/pfline/tests/test_single_helper.py | rwijtvliet/portfolyo | b22948fbc55264ec5d69824e791ca7ef45c6e49c | [
"BSD-3-Clause"
] | null | null | null | portfolyo/core/pfline/tests/test_single_helper.py | rwijtvliet/portfolyo | b22948fbc55264ec5d69824e791ca7ef45c6e49c | [
"BSD-3-Clause"
] | null | null | null | from portfolyo import testing, dev
from portfolyo.core.pfline import single_helper
from portfolyo.tools.nits import Q_
from portfolyo.tools.stamps import FREQUENCIES
import pandas as pd
import pytest
| 33 | 97 | 0.580943 |
9ac99cea9babd92f880b3baa9bf72af575865d84 | 31,044 | py | Python | gomill/mcts_tuners.py | BenisonSam/goprime | 3613f643ee765b4ad48ebdc27bd9f1121b1c5298 | [
"MIT"
] | null | null | null | gomill/mcts_tuners.py | BenisonSam/goprime | 3613f643ee765b4ad48ebdc27bd9f1121b1c5298 | [
"MIT"
] | null | null | null | gomill/mcts_tuners.py | BenisonSam/goprime | 3613f643ee765b4ad48ebdc27bd9f1121b1c5298 | [
"MIT"
] | null | null | null | """Competitions for parameter tuning using Monte-carlo tree search."""
from __future__ import division
import operator
import random
from heapq import nlargest
from math import exp, log, sqrt
from gomill import compact_tracebacks
from gomill import game_jobs
from gomill import competitions
from gomill import competition_schedulers
from gomill.competitions import (
Competition, NoGameAvailable, CompetitionError, ControlFileError,
Player_config)
from gomill.settings import *
parameter_settings = [
Setting('code', interpret_identifier),
Setting('scale', interpret_callable),
Setting('split', interpret_positive_int),
Setting('format', interpret_8bit_string, default=None),
]
def interpret_candidate_colour(v):
if v in ('r', 'random'):
return 'random'
else:
return interpret_colour(v)
| 35.077966 | 95 | 0.616544 |
9acbd6e09016763ff8a75cf2e88c6a01d873ad9c | 9,705 | py | Python | endoscopic_ai.py | dennkitotaichi/AI_prediction_for_patients_with_colorectal_polyps | afbad36cb3fc2de31665fc3b0a7f065b7e6564a0 | [
"MIT"
] | null | null | null | endoscopic_ai.py | dennkitotaichi/AI_prediction_for_patients_with_colorectal_polyps | afbad36cb3fc2de31665fc3b0a7f065b7e6564a0 | [
"MIT"
] | null | null | null | endoscopic_ai.py | dennkitotaichi/AI_prediction_for_patients_with_colorectal_polyps | afbad36cb3fc2de31665fc3b0a7f065b7e6564a0 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#%matplotlib inline
import codecs
import lightgbm as lgb
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
# Read data
image_file_path = './simulated_dpc_data.csv'
with codecs.open(image_file_path, "r", "Shift-JIS", "ignore") as file:
dpc = pd.read_table(file, delimiter=",")
# dpc_r, g_dpc_r_1, g_r: restricted data from dpc
dpc_r=dpc.loc[:, ['ID','code']]
# g_dpc_r_1: made to check the details (: name of the code, name)
g_dpc_r_1=dpc.loc[:, ['ID','code','name']]
# Dummy Encoding with name
g_r = pd.get_dummies(dpc_r['code'])
# Reconstruct simulated data for AI learning
df_concat_dpc_get_dummies = pd.concat([dpc_r, g_r], axis=1)
# Remove features that may be the cause of the data leak
dpc_Remove_data_leak = df_concat_dpc_get_dummies.drop(["code",160094710,160094810,160094910,150285010,2113008,8842965,8843014,622224401,810000000,160060010], axis=1)
# Sum up the number of occurrences of each feature for each patient.
total_patient_features= dpc_Remove_data_leak.groupby("ID").sum()
total_patient_features.reset_index()
# Load a new file with ID and treatment availability
# Prepare training data
image_file_path_ID_and_polyp_pn = './simulated_patient_data.csv'
with codecs.open(image_file_path_ID_and_polyp_pn, "r", "Shift-JIS", "ignore") as file:
ID_and_polyp_pn = pd.read_table(file, delimiter=",")
ID_and_polyp_pn_data= ID_and_polyp_pn[['ID', 'target']]
#Combine the new file containing ID and treatment status with the file after dummy encoding by the name
ID_treatment_medical_statement=pd.merge(ID_and_polyp_pn_data,total_patient_features,on=["ID"],how='outer')
ID_treatment_medical_statement_o= ID_treatment_medical_statement.fillna(0)
ID_treatment_medical_statement_p=ID_treatment_medical_statement_o.drop("ID", axis=1)
ID_treatment_medical_statement_rename= ID_treatment_medical_statement_p.rename(columns={'code':"Receipt type code"})
merge_data= ID_treatment_medical_statement_rename
# Split the training/validation set into 80% and the test set into 20%, with a constant proportion of cases with lesions
X = merge_data.drop("target",axis=1).values
y = merge_data["target"].values
columns_name = merge_data.drop("target",axis=1).columns
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.2,random_state=1)
# Create a function to divide data
# Separate into training, validation, and test set
X_train, y_train, X_test, y_test = data_split(X, y)
X_train, y_train, X_val, y_val = data_split(X_train.values, y_train)
# Make test set into pandas
X_test_df = pd.DataFrame(X_test)
y_test_df = pd.DataFrame(y_test)
# Make test set into test_df to keep away for the final process
test_dfp = pd.concat([y_test_df,X_test_df], axis=1)
test_df=test_dfp.rename(columns={0:"target"})
# Make training/validation sets into pandas
y_trainp = pd.DataFrame(y_train)
X_trainp = pd.DataFrame(X_train)
train=pd.concat([y_trainp, X_trainp], axis=1)
y_valp = pd.DataFrame(y_val)
X_valp = pd.DataFrame(X_val)
val=pd.concat([y_valp, X_valp], axis=1)
test_vol=pd.concat([train, val])
training_validation_sets=test_vol.rename(columns={0:"target"})
# Create a function to save the results and feature importance after analysis with lightGBM
# Find out Top 50 features procedure / Run the model once
importance = reg_top10_lightGBM(training_validation_sets,"check_data","_1",1)
# Create a function that sorts and stores the values of feature importance.
# Run a function to sort and save the values of feature importance.
top50_importance_all = after_imp_save_sort(importance,"check_data","_1")
# 10 runs of this procedure
dict = {}
for num in range(10):
print(num+1)
importance = reg_top10_lightGBM(training_validation_sets,"check_data","_"+str(num+1),num+1)
top50_importance_all = after_imp_save_sort(importance,"check_data","_"+str(num+1))
dict[str(num)] = top50_importance_all
# Recall and merge the saved CSV files
importance_1_2 = concat_importance("0","1")
importance_3_4 = concat_importance("2","3")
importance_5_6 = concat_importance("4","5")
importance_7_8 = concat_importance("6","7")
importance_9_10 = concat_importance("8","9")
importance_1_4=pd.concat([importance_1_2, importance_3_4])
importance_1_6=pd.concat([importance_1_4, importance_5_6])
importance_1_8=pd.concat([importance_1_6, importance_7_8])
importance_1_10=pd.concat([importance_1_8, importance_9_10])
# Calculate the total value of the feature importance for each code
group_sum=importance_1_10.groupby(["columns"]).sum()
group_sum_s = group_sum.sort_values('importance', ascending=False)
importance_group_sum=group_sum_s.reset_index()
# Create train/validation test data with all features
merge_data_test=pd.concat([training_validation_sets, test_df])
# Make features in the order of highest total feature impotance value
importance_top50_previous_data=importance_group_sum["columns"]
importance_top50_previous_data
# refine the data to top 50 features
dict_top50 = {}
pycaret_dict_top50 = {}
X = range(1, 51)
for i,v in enumerate(X):
dict_top50[str(i)] = importance_top50_previous_data.iloc[v]
pycaret_dict_top50[importance_top50_previous_data[i]] = merge_data_test[dict_top50[str(i)]]
pycaret_df_dict_top50=pd.DataFrame(pycaret_dict_top50)
# Add the value of target (: objective variable)
target_data=merge_data_test["target"]
target_top50_dataframe=pd.concat([target_data, pycaret_df_dict_top50], axis=1)
# adjust pandas (pycaret needs to set str to int)
target_top50_dataframe_int=target_top50_dataframe.astype('int')
target_top50_dataframe_columns=target_top50_dataframe_int.columns.astype(str)
numpy_target_top50=target_top50_dataframe_int.to_numpy()
target_top50_dataframe_pycaret=pd.DataFrame(numpy_target_top50,columns=target_top50_dataframe_columns)
# compare the models
from pycaret.classification import *
clf1 = setup(target_top50_dataframe_pycaret, target ='target',train_size = 0.8,data_split_shuffle=False,fold=10,session_id=0)
best_model = compare_models()
| 48.525 | 165 | 0.757651 |
9acbf669f84ad525253b32c114c4e395b93adc19 | 3,488 | py | Python | open-hackathon-tempUI/src/hackathon/config-sample.py | SpAiNiOr/LABOSS | 32ad341821e9f30fecfa338b5669f574d32dd0fa | [
"Apache-2.0"
] | null | null | null | open-hackathon-tempUI/src/hackathon/config-sample.py | SpAiNiOr/LABOSS | 32ad341821e9f30fecfa338b5669f574d32dd0fa | [
"Apache-2.0"
] | null | null | null | open-hackathon-tempUI/src/hackathon/config-sample.py | SpAiNiOr/LABOSS | 32ad341821e9f30fecfa338b5669f574d32dd0fa | [
"Apache-2.0"
] | null | null | null | # "javascript" section for javascript. see @app.route('/config.js') in app/views.py
# oauth constants
HOSTNAME = "http://hackathon.chinacloudapp.cn" # host name of the UI site
QQ_OAUTH_STATE = "openhackathon" # todo state should be constant. Actually it should be unguessable to prevent CSFA
HACkATHON_API_ENDPOINT = "http://hackathon.chinacloudapp.cn:15000"
Config = {
"environment": "local",
"login": {
"github": {
"access_token_url": 'https://github.com/login/oauth/access_token?client_id=a10e2290ed907918d5ab&client_secret=5b240a2a1bed6a6cf806fc2f34eb38a33ce03d75&redirect_uri=%s/github&code=' % HOSTNAME,
"user_info_url": 'https://api.github.com/user?access_token=',
"emails_info_url": 'https://api.github.com/user/emails?access_token='
},
"qq": {
"access_token_url": 'https://graph.qq.com/oauth2.0/token?grant_type=authorization_code&client_id=101192358&client_secret=d94f8e7baee4f03371f52d21c4400cab&redirect_uri=%s/qq&code=' % HOSTNAME,
"openid_url": 'https://graph.qq.com/oauth2.0/me?access_token=',
"user_info_url": 'https://graph.qq.com/user/get_user_info?access_token=%s&oauth_consumer_key=%s&openid=%s'
},
"gitcafe": {
"access_token_url": 'https://api.gitcafe.com/oauth/token?client_id=25ba4f6f90603bd2f3d310d11c0665d937db8971c8a5db00f6c9b9852547d6b8&client_secret=e3d821e82d15096054abbc7fbf41727d3650cab6404a242373f5c446c0918634&redirect_uri=%s/gitcafe&grant_type=authorization_code&code=' % HOSTNAME
},
"provider_enabled": ["github", "qq", "gitcafe"],
"session_minutes": 60,
"token_expiration_minutes": 60 * 24
},
"hackathon-api": {
"endpoint": HACkATHON_API_ENDPOINT
},
"javascript": {
"renren": {
"clientID": "client_id=7e0932f4c5b34176b0ca1881f5e88562",
"redirect_url": "redirect_uri=%s/renren" % HOSTNAME,
"scope": "scope=read_user_message+read_user_feed+read_user_photo",
"response_type": "response_type=token",
},
"github": {
"clientID": "client_id=a10e2290ed907918d5ab",
"redirect_uri": "redirect_uri=%s/github" % HOSTNAME,
"scope": "scope=user",
},
"google": {
"clientID": "client_id=304944766846-7jt8jbm39f1sj4kf4gtsqspsvtogdmem.apps.googleusercontent.com",
"redirect_url": "redirect_uri=%s/google" % HOSTNAME,
"scope": "scope=https://www.googleapis.com/auth/userinfo.profile+https://www.googleapis.com/auth/userinfo.email",
"response_type": "response_type=token",
},
"qq": {
"clientID": "client_id=101192358",
"redirect_uri": "redirect_uri=%s/qq" % HOSTNAME,
"scope": "scope=get_user_info",
"state": "state=%s" % QQ_OAUTH_STATE,
"response_type": "response_type=code",
},
"gitcafe": {
"clientID": "client_id=25ba4f6f90603bd2f3d310d11c0665d937db8971c8a5db00f6c9b9852547d6b8",
"clientSecret": "client_secret=e3d821e82d15096054abbc7fbf41727d3650cab6404a242373f5c446c0918634",
"redirect_uri": "redirect_uri=http://hackathon.chinacloudapp.cn/gitcafe",
"response_type": "response_type=code",
"scope": "scope=public"
},
"hackathon": {
"name": "open-xml-sdk",
"endpoint": HACkATHON_API_ENDPOINT
}
}
}
| 48.444444 | 294 | 0.648222 |
9acc78e7c1d68d1a67b2d32bd290cc493caa9d62 | 1,036 | py | Python | marocco/first.py | panos1998/Thesis_Code | 3f95730b1b2139011b060f002d5ce449a886079b | [
"Apache-2.0"
] | null | null | null | marocco/first.py | panos1998/Thesis_Code | 3f95730b1b2139011b060f002d5ce449a886079b | [
"Apache-2.0"
] | null | null | null | marocco/first.py | panos1998/Thesis_Code | 3f95730b1b2139011b060f002d5ce449a886079b | [
"Apache-2.0"
] | null | null | null | #%%
import sys
import numpy as np
from typing import Any, List
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
sys.path.append('C:/Users/panos/Documents//code/fz')
from arfftocsv import function_labelize
import csv
colnames =['age', 'sex', 'cp', 'trestbps', 'chol',
'fbs', 'restecg', 'thalach','exang', 'oldpeak', 'slope',
'ca', 'thal', 'cvd']
# %%
df1 = function_labelize(dest = 'labeled_data1.txt',
labels=colnames, source = 'processed.hungarian.csv')
df2 = function_labelize(dest = 'labeled_data2.txt',
labels=colnames, source = 'processed.cleveland.data')
df3 = function_labelize(dest = 'labeled_data3.txt',
labels=colnames, source = 'processed.va.csv')
df4 =function_labelize(dest = 'labeled_data4.txt',
labels=colnames, source = 'processed.switzerland.csv')
df = pd.concat([df1,df2,df3,df4], axis=0)
print(df.isna().sum())
df['cvd'] = df['cvd'].replace([2,3,4], 1)
scaler = MinMaxScaler()
X = df[colnames[:-1]]
y = df[colnames[-1]]
X_norm = scaler.fit_transform(X)
print(X_norm)
print(y)
# %%
| 32.375 | 63 | 0.712355 |
9accd3c42fa9f549ce35aac4c4567cb2591c14a9 | 10,323 | py | Python | matlab2cpp/datatype.py | emc2norway/m2cpp | 81943057c184c539b409282cbbd47bbf933db04f | [
"BSD-3-Clause"
] | 28 | 2017-04-25T10:06:38.000Z | 2022-02-09T07:25:34.000Z | matlab2cpp/datatype.py | emc2norway/m2cpp | 81943057c184c539b409282cbbd47bbf933db04f | [
"BSD-3-Clause"
] | null | null | null | matlab2cpp/datatype.py | emc2norway/m2cpp | 81943057c184c539b409282cbbd47bbf933db04f | [
"BSD-3-Clause"
] | 5 | 2017-04-25T17:54:53.000Z | 2022-03-21T20:15:15.000Z | """
The follwing constructor classes exists here:
+------------------------------------------+---------------------------------------+
| Class | Description |
+==========================================+=======================================+
| :py:class:`~matlab2cpp.datatype.Type` | Frontend for the datatype string |
+------------------------------------------+---------------------------------------+
| :py:class:`~matlab2cpp.datatype.Dim` | Reference to the number of dimensions |
+------------------------------------------+---------------------------------------+
| :py:class:`~matlab2cpp.datatype.Mem` | Reference to the memory type |
+------------------------------------------+---------------------------------------+
| :py:class:`~matlab2cpp.datatype.Num` | Numerical value indicator |
+------------------------------------------+---------------------------------------+
| :py:class:`~matlab2cpp.datatype.Suggest` | Frontend for suggested datatype |
+------------------------------------------+---------------------------------------+
"""
import supplement
import matlab2cpp as mc
dim0 = {"int", "float", "uword", "double", "cx_double", "size_t"}
dim1 = {"ivec", "fvec", "uvec", "vec", "cx_vec"}
dim2 = {"irowvec", "frowvec", "urowvec", "rowvec", "cx_rowvec"}
dim3 = {"imat", "fmat", "umat", "mat", "cx_mat"}
dim4 = {"icube", "fcube", "ucube", "cube", "cx_cube"}
dims = [dim0, dim1, dim2, dim3, dim4]
mem0 = {"uword", "uvec", "urowvec", "umat", "ucube"}
mem1 = {"int", "ivec", "irowvec", "imat", "icube"}
mem2 = {"float", "fvec", "frowvec", "fmat", "fcube"}
mem3 = {"double", "vec", "rowvec", "mat", "cube"}
mem4 = {"cx_double", "cx_vec", "cx_rowvec", "cx_mat", "cx_cube"}
mems = [mem0, mem1, mem2, mem3, mem4]
others = {"char", "string", "TYPE", "func_lambda", "struct", "structs", "cell",
"wall_clock", "SPlot"}
def common_loose(vals):
"""Common denominator among several names.
Loose enforcment"""
if not isinstance(vals, (tuple, list)) or \
isinstance(vals[0], int):
vals = [vals]
vals = list(vals)
for i in xrange(len(vals)):
if isinstance(vals[i], str):
continue
if isinstance(vals[i][0], int):
vals[i] = get_name(*vals[i])
vals = set(vals)
if len(vals) == 1:
return vals.pop()
vals.discard("TYPE")
if len(vals) == 1:
return vals.pop()
for other in others:
vals.discard(other)
if len(vals) == 0:
return "TYPE"
elif len(vals) == 1:
return vals.pop()
dims_ = map(get_dim, vals)
if dims_:
dim = max(*dims_)
else:
return "TYPE"
if dim == 2 and 1 in dims_:
dim = 3
types = map(get_mem, vals)
type = max(*types)
val = get_name(dim, type)
return val
def common_strict(vals):
"""Common denominator among several names.
Strict enforcment"""
if not isinstance(vals, (tuple, list)) \
or isinstance(vals[0], int):
vals = [vals]
vals = list(vals)
for i in xrange(len(vals)):
if isinstance(vals[i], str):
continue
if isinstance(vals[i][0], int):
vals[i] = get_name(*vals[i])
vals = set(vals)
if len(vals) == 1:
return vals.pop()
for other in others:
if other in vals:
return "TYPE"
dims_ = map(get_dim, vals)
dim = max(*dims_)
if dim == 2 and 1 in dims_:
return "TYPE"
types = map(get_mem, vals)
type = max(*types)
val = get_name(dim, type)
return val
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28.675 | 84 | 0.465272 |
9acd3d20a14d9e96bec466426e861a98197f22b0 | 330 | py | Python | src/the_impossible/live/migrations/newsletter/migrations/0002_auto_20200514_1518.py | micha31r/The-Impossible | 7a79dea3169907eb93107107f4003c5813de58dc | [
"MIT"
] | null | null | null | src/the_impossible/live/migrations/newsletter/migrations/0002_auto_20200514_1518.py | micha31r/The-Impossible | 7a79dea3169907eb93107107f4003c5813de58dc | [
"MIT"
] | 2 | 2020-04-15T03:57:42.000Z | 2020-06-06T01:43:34.000Z | src/the_impossible/live/migrations/newsletter/migrations/0002_auto_20200514_1518.py | micha31r/The-Impossible | 7a79dea3169907eb93107107f4003c5813de58dc | [
"MIT"
] | null | null | null | # Generated by Django 2.2.7 on 2020-05-14 03:18
from django.db import migrations
| 18.333333 | 47 | 0.593939 |
9acd4db9f55911f16eb79b057e6fc8abf0b3c6d4 | 210 | py | Python | resident/views.py | felipeue/SmartBuilding | 57d904c6166c87f836bc8fada9eb5a2bc82069b8 | [
"MIT"
] | null | null | null | resident/views.py | felipeue/SmartBuilding | 57d904c6166c87f836bc8fada9eb5a2bc82069b8 | [
"MIT"
] | null | null | null | resident/views.py | felipeue/SmartBuilding | 57d904c6166c87f836bc8fada9eb5a2bc82069b8 | [
"MIT"
] | null | null | null | from django.views.generic import TemplateView
from main.permissions import ResidentLoginRequiredMixin
| 30 | 62 | 0.852381 |
9acff9f4ad0162148d8ed69428c049eb258f8169 | 9,179 | py | Python | src/awspfx/awspfx.py | exfi/awspfx | 118d2f83a365e1cd37da0b0689e6d5ff527e0f64 | [
"MIT"
] | 1 | 2021-08-10T23:17:07.000Z | 2021-08-10T23:17:07.000Z | src/awspfx/awspfx.py | exfi/awspfx | 118d2f83a365e1cd37da0b0689e6d5ff527e0f64 | [
"MIT"
] | 2 | 2021-09-22T03:59:52.000Z | 2021-12-22T22:48:18.000Z | src/awspfx/awspfx.py | exfi/awspfx | 118d2f83a365e1cd37da0b0689e6d5ff527e0f64 | [
"MIT"
] | 1 | 2022-03-29T15:14:22.000Z | 2022-03-29T15:14:22.000Z | #!/usr/bin/env python3
"""awspfx
Usage:
awspfx.py <profile>
awspfx.py [(-c | --current) | (-l | --list) | (-s | --swap)]
awspfx.py token [(-p | --profile) <profile>]
awspfx.py sso [(login | token)] [(-p | --profile) <profile>]
awspfx.py -h | --help
awspfx.py --version
Examples:
awspfx.py default # Change profile to 'default'
awspfx.py token # Token from current profile, default from SSO
awspfx.py token -p default # Token from profile 'default'
awspfx.py (-c | -l | -s)
SubCommands:
token Generate credentials
-p --profile Select profile
Options:
-c --current Change the profile
-l --list List profiles
-s --swap Swap previous the profile
-h --help Show this screen.
--version Show version.
WIP:
sso Option to login
sts Option to assume-role
"""
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import tempfile
from configparser import ConfigParser as cfgParser
import boto3
from colorlog import ColoredFormatter
from docopt import docopt
from iterfzf import iterfzf
if __name__ == "__main__":
log = setup_logging()
home_path = os.getenv('HOME') or exit_err("Home directory does not exist?")
# aws_profile_env = os.getenv("AWS_PROFILE")
aws = setup_aws()
awspfx_cache = has_file(f"{home_path}/.aws/awspfx", create=True)
direnv = has_which("direnv")
envrc_file = has_file(f"{home_path}/.envrc")
creds_file = has_file(f"{home_path}/.aws/credentials")
arguments = docopt(__doc__, version=f'awspfx 0.1.6 - python {sys.version}')
main(arguments)
| 26.002833 | 93 | 0.610742 |
9ad11bb35b11a89ca5873c299ffa8f65fee28a06 | 3,694 | py | Python | test/test_contacts_info_from_main_page.py | OlgaZtv/python_training | 661165613ef4b9545345a8a2c61a894571ded703 | [
"Apache-2.0"
] | null | null | null | test/test_contacts_info_from_main_page.py | OlgaZtv/python_training | 661165613ef4b9545345a8a2c61a894571ded703 | [
"Apache-2.0"
] | null | null | null | test/test_contacts_info_from_main_page.py | OlgaZtv/python_training | 661165613ef4b9545345a8a2c61a894571ded703 | [
"Apache-2.0"
] | null | null | null | import re
from model.contact import Contact
# def test_contacts(app, ormdb):
# random_index = randrange(app.contact.count())
# #
# contact_from_home_page = app.contact.get_contact_list()
# #
# contact_from_db = ormdb.get_contact_list()
# # ,
# assert sorted(contact_from_home_page, key=Contact.id_or_max) == sorted(contact_from_db, key=Contact.id_or_max)
# def test_contact_info_on_main_page(app):
# if app.contact.amount() == 0:
# app.contact.create(
# Contact(firstname="TestTest", middlename="Test", lastname="Testing", nickname="testing",
# title="test", company="Test test", address="Spb", home="000222111",
# mobile="444555222", work="99966655", fax="11122255", email="test@tesr.ru",
# email2="test2@test.ru", email3="test3@test.ru", homepage="www.test.ru", bday="15",
# bmonth="May", byear="1985", aday="14", amonth="June", ayear="1985",
# address2="Spb", phone2="111111", notes="Friend"))
# random_index = randrange(app.contact.amount())
# contact_from_home_page = app.contact.get_contact_list()[random_index]
# contact_from_edit_page = app.contact.get_contact_info_from_edit_page(random_index)
# assert contact_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(contact_from_edit_page)
# assert contact_from_home_page.firstname == contact_from_edit_page.firstname
# assert contact_from_home_page.lastname == contact_from_edit_page.lastname
# assert contact_from_home_page.address == contact_from_edit_page.address
# assert contact_from_home_page.all_emails_from_home_page == merge_emails_like_on_home_page(contact_from_edit_page)
| 52.028169 | 119 | 0.67542 |
9ad1371d592dd9a07aabbaf79a51d2d1c5de33e5 | 628 | py | Python | Leetcode/1379. Find a Corresponding Node of a Binary Tree in a Clone of That Tree/solution1.py | asanoviskhak/Outtalent | c500e8ad498f76d57eb87a9776a04af7bdda913d | [
"MIT"
] | 51 | 2020-07-12T21:27:47.000Z | 2022-02-11T19:25:36.000Z | Leetcode/1379. Find a Corresponding Node of a Binary Tree in a Clone of That Tree/solution1.py | CrazySquirrel/Outtalent | 8a10b23335d8e9f080e5c39715b38bcc2916ff00 | [
"MIT"
] | null | null | null | Leetcode/1379. Find a Corresponding Node of a Binary Tree in a Clone of That Tree/solution1.py | CrazySquirrel/Outtalent | 8a10b23335d8e9f080e5c39715b38bcc2916ff00 | [
"MIT"
] | 32 | 2020-07-27T13:54:24.000Z | 2021-12-25T18:12:50.000Z | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
| 36.941176 | 96 | 0.644904 |
9ad242baf7204452ac38c08eb06958775483a1b5 | 1,790 | py | Python | benchmark.py | raonyguimaraes/machinelearning | 03b18e5c69931c4ee2ea4803de72c846aba97bce | [
"MIT"
] | 1 | 2016-10-23T19:45:12.000Z | 2016-10-23T19:45:12.000Z | benchmark.py | raonyguimaraes/machinelearning | 03b18e5c69931c4ee2ea4803de72c846aba97bce | [
"MIT"
] | null | null | null | benchmark.py | raonyguimaraes/machinelearning | 03b18e5c69931c4ee2ea4803de72c846aba97bce | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Writing Our First Classifier - Machine Learning Recipes #5
#https://www.youtube.com/watch?v=AoeEHqVSNOw&list=PLOU2XLYxmsIIuiBfYad6rFYQU_jL2ryal&index=1
from scipy.spatial import distance
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn import datasets
from sklearn.cross_validation import train_test_split
import numpy as np
iris = datasets.load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .5)
# from sklearn.neighbors import KNeighborsClassifier
my_classifier = ScrappyKNN()
my_classifier_sklearn = KNeighborsClassifier()
accuracies = []
for i in range (0,1000):
my_classifier.fit(X_train, y_train)
predictions = my_classifier.predict(X_test)
accuracy = accuracy_score(y_test, predictions)
accuracies.append(accuracy)
print 'ScrappyKNN accuracy mean:', np.mean(accuracies)
accuracies = []
for i in range (0,1000):
my_classifier_sklearn.fit(X_train, y_train)
predictions = my_classifier_sklearn.predict(X_test)
accuracy = accuracy_score(y_test, predictions)
accuracies.append(accuracy)
print 'sklearn accuracy mean:', np.mean(accuracies) | 24.189189 | 92 | 0.754749 |
9ad3c6eb1d3fc248c366e0859044b8671327d992 | 2,323 | py | Python | process_frames.py | w-garcia/video-caption.pytorch | ef3766b093815b7cfd48d29b2af880c05b45ddbe | [
"MIT"
] | 4 | 2019-03-27T11:37:44.000Z | 2021-01-07T02:10:46.000Z | process_frames.py | w-garcia/video-caption.pytorch | ef3766b093815b7cfd48d29b2af880c05b45ddbe | [
"MIT"
] | 2 | 2019-07-11T20:34:19.000Z | 2019-08-19T13:21:52.000Z | process_frames.py | w-garcia/video-caption.pytorch | ef3766b093815b7cfd48d29b2af880c05b45ddbe | [
"MIT"
] | 3 | 2020-02-12T02:31:58.000Z | 2021-02-07T06:17:48.000Z | """
Re-tooled version of the script found on VideoToTextDNN:
https://github.com/OSUPCVLab/VideoToTextDNN/blob/master/data/process_frames.py
"""
import sys
import os
import argparse
import time
from multiprocessing import Pool
if __name__=='__main__':
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument(
'src_dir',
help='directory where videos are'
)
arg_parser.add_argument(
'dst_dir',
help='directory where to store frames'
)
arg_parser.add_argument(
'start',
help='start index (inclusive)'
)
arg_parser.add_argument(
'end',
help='end index (noninclusive)'
)
arg_parser.add_argument(
'--prepend',
default='',
help='optional prepend to start of ffmpeg command (in case you want to use a non-system wide version of ffmpeg)'
'For example: --prepend ~/anaconda2/bin/ will use ffmpeg installed in anaconda2'
)
if not len(sys.argv) > 1:
print(arg_parser.print_help())
sys.exit(0)
args = arg_parser.parse_args()
start_time = time.time()
main(args)
print("Job took %s mins" % ((time.time() - start_time)/60))
| 27.329412 | 145 | 0.635385 |
9ad3d0b300ea5b2d36712d2ed1f19a77b925f25f | 383 | py | Python | plaintext_password/checks.py | bryanwills/django-plaintext-password | 752cf0316cdc45dc9bed5f9107614881d613647f | [
"MIT"
] | null | null | null | plaintext_password/checks.py | bryanwills/django-plaintext-password | 752cf0316cdc45dc9bed5f9107614881d613647f | [
"MIT"
] | null | null | null | plaintext_password/checks.py | bryanwills/django-plaintext-password | 752cf0316cdc45dc9bed5f9107614881d613647f | [
"MIT"
] | 2 | 2021-04-23T08:24:08.000Z | 2022-03-01T06:56:33.000Z | from django.contrib.auth.hashers import get_hashers_by_algorithm
from django.core import checks
| 34.818182 | 83 | 0.744125 |
9ad5dd0d9bd8fbcbf6eef199aef2d2ca49925d18 | 9,340 | py | Python | code/preprocess/data_generation.py | hms-dbmi/VarPPUD | 316a45f33c12dfecadb17fa41b699ef95096a623 | [
"Apache-2.0"
] | null | null | null | code/preprocess/data_generation.py | hms-dbmi/VarPPUD | 316a45f33c12dfecadb17fa41b699ef95096a623 | [
"Apache-2.0"
] | null | null | null | code/preprocess/data_generation.py | hms-dbmi/VarPPUD | 316a45f33c12dfecadb17fa41b699ef95096a623 | [
"Apache-2.0"
] | 1 | 2022-01-18T17:14:31.000Z | 2022-01-18T17:14:31.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 24 17:19:39 2021
@author: rayin
"""
import os, sys
import numpy as np
import pandas as pd
import torch
import warnings
import random
import torchvision.models as models
from sdv.tabular import CTGAN
from sdv.evaluation import evaluate
from sdv.metrics.tabular import CSTest, KSTest
from sdv.metrics.tabular import MulticlassDecisionTreeClassifier
from sdv.metrics.tabular import LogisticDetection, SVCDetection
from ctgan import CTGANSynthesizer
from feature_data_imputation import data_imputation
from sdv.constraints import GreaterThan
warnings.filterwarnings("ignore")
os.chdir("/Users/rayin/Google Drive/Harvard/5_data/UDN/work/")
feature = pd.read_csv('data/feature/feature.csv', index_col=0)
feature_imputation = data_imputation(feature, 'MICE')
case_gene_update = pd.read_csv('data/processed/variant_clean.csv', index_col=0)
case_gene_update['\\12_Candidate variants\\03 Interpretation\\'].replace('pathogenic', 1, inplace=True)
case_gene_update['\\12_Candidate variants\\03 Interpretation\\'].replace('less_pathogenic', 0, inplace=True)
label = case_gene_update['\\12_Candidate variants\\03 Interpretation\\'].reset_index()
label = label['\\12_Candidate variants\\03 Interpretation\\']
#Generating synthetic data based on raw data with/without imputation respectively
real_data_raw = pd.concat([feature, label], axis=1)
real_data_impu = pd.concat([feature_imputation, label], axis=1)
real_data_raw = real_data_raw.rename(columns={"\\12_Candidate variants\\03 Interpretation\\": "label"})
real_data_impu = real_data_impu.rename(columns={"\\12_Candidate variants\\03 Interpretation\\": "label"})
#splitting for imputation real data
feature_real_impu = real_data_impu[real_data_impu.columns[0:-1]]
label_real_impu = real_data_impu[real_data_impu.columns[-1]]
real_data_impu_zero = real_data_impu.loc[real_data_impu[real_data_impu.columns[-1]] == 0]
real_data_impu_one = real_data_impu.loc[real_data_impu[real_data_impu.columns[-1]] == 1]
#splitting for raw real data
feature_real_raw = real_data_raw[real_data_raw.columns[0:-1]]
label_real_raw = real_data_raw[real_data_raw.columns[-1]]
real_data_raw_zero = real_data_raw.loc[real_data_raw[real_data_raw.columns[-1]] == 0]
real_data_raw_one = real_data_raw.loc[real_data_raw[real_data_raw.columns[-1]] == 1]
#############################################################################################################################
#ctgan based on sdv
range_min = pd.DataFrame(index=range(0,500), columns=['range_min'])
range_min = range_min.fillna(0)
range_max = pd.DataFrame(index=range(0,500), columns=['range_max'])
range_max = range_max.fillna(1)
real_data_raw = pd.concat([real_data_raw, range_min.iloc[0:474], range_max.iloc[0:474]], axis=1)
real_data_raw_zero = pd.concat([real_data_raw_zero.reset_index(), range_min.iloc[0:252], range_max.iloc[0:252]], axis=1)
real_data_raw_zero.drop(['index'], axis=1, inplace=True)
real_data_raw_one = pd.concat([real_data_raw_one.reset_index(), range_min.iloc[0:222], range_max.iloc[0:222]], axis=1)
real_data_raw_one.drop(['index'], axis=1, inplace=True)
field_transformers = {'evolutionary age': 'float',
'dN/dS': 'float',
'gene essentiality': 'one_hot_encoding',
'number of chem interaction action': 'one_hot_encoding',
'number of chem interaction': 'one_hot_encoding',
'number of chem': 'one_hot_encoding',
'number of pathway': 'one_hot_encoding',
'number of phenotype': 'one_hot_encoding',
'number of rare diseases': 'one_hot_encoding',
'number of total diseases': 'one_hot_encoding',
'phylogenetic number': 'one_hot_encoding',
'net charge value diff': 'one_hot_encoding',
'secondary structure value diff': 'one_hot_encoding',
'number of hydrogen bond value diff': 'one_hot_encoding',
'number of vertices value diff': 'one_hot_encoding',
'number of edges value diff': 'one_hot_encoding',
'diameter value diff': 'one_hot_encoding'}
#constraints settings for GAN
rare_total_disease_constraint = GreaterThan(
low='number of rare diseases',
high='number of total diseases',
handling_strategy='reject_sampling')
evolutionary_age_constraint = GreaterThan(
low = 'range_max',
high = 'evolutionary age',
handling_strategy='reject_sampling')
dnds_constraint = GreaterThan(
low = 'range_min',
high = 'dN/dS',
handling_strategy='reject_sampling')
gene_haplo_min_constraint = GreaterThan(
low = 'range_min',
high = 'haploinsufficiency',
handling_strategy='reject_sampling')
gene_haplo_max_constraint = GreaterThan(
low = 'haploinsufficiency',
high = 'range_max',
handling_strategy='reject_sampling')
fathmm_min_constraint = GreaterThan(
low = 'range_min',
high = 'FATHMM',
handling_strategy='reject_sampling')
fathmm_max_constraint = GreaterThan(
low = 'FATHMM',
high = 'range_max',
handling_strategy='reject_sampling')
vest_min_constraint = GreaterThan(
low = 'range_min',
high = 'VEST',
handling_strategy='reject_sampling')
vest_max_constraint = GreaterThan(
low = 'VEST',
high = 'range_max',
handling_strategy='reject_sampling')
proven_constraint = GreaterThan(
low = 'PROVEN',
high = 'range_min',
handling_strategy='reject_sampling')
sift_min_constraint = GreaterThan(
low = 'range_min',
high = 'SIFT',
handling_strategy='reject_sampling')
sift_max_constraint = GreaterThan(
low = 'SIFT',
high = 'range_max',
handling_strategy='reject_sampling')
constraints = [rare_total_disease_constraint, evolutionary_age_constraint, dnds_constraint, gene_haplo_min_constraint,
gene_haplo_max_constraint, fathmm_min_constraint, fathmm_max_constraint, vest_min_constraint,
vest_max_constraint, proven_constraint, sift_min_constraint, sift_max_constraint]
#build the model
model = CTGAN(epochs=300, batch_size=100, field_transformers=field_transformers, constraints=constraints) #field_distributions=field_distributions
# #Mode 1: generate all samples together (not work well)
# #generate all labels data
# model.fit(real_data_raw)
# sample = model.sample(500)
# sample.drop(['range_min', 'range_max'], axis=1, inplace=True)
# feature_syn_raw = sample[sample.columns[0:-1]]
# label_syn_raw = sample[sample.columns[-1]]
# feature_syn_raw = data_imputation(feature_syn_raw, 'MICE')
# ss = ShuffleSplit(n_splits=3, test_size=0.33, random_state=0)
# for train_index, test_index in ss.split(real_data_raw):
# train_x = feature_real_impu.iloc[train_index]
# train_y = label_real_impu.iloc[train_index]
# test_x = feature_real_impu.iloc[test_index]
# test_y = label_real_impu.iloc[test_index]
# feature_combine, label_combine = merge_data(train_x, train_y, feature_syn_raw, label_syn_raw)
# rf_baseline(feature_combine, label_combine, test_x, test_y)
# #xgb_baseline(feature_syn_raw, label_syn_raw, test_x, test_y)
#Mode 2: negative and positive resampling, respectievly
#generate label '0' data of 50000 cases
real_data_raw_zero.drop(['label'], axis=1, inplace=True)
model.fit(real_data_raw_zero) #model fitting
sample_zero = model.sample(50000) #generate samples with label '0'
sample_zero.drop(['range_min', 'range_max'], axis=1, inplace=True) #drop 'range_min' and 'range_max' columns
sample_zero['label'] = 0 #add the labels
#generate label '1' data of 50000 cases
real_data_raw_one.drop(['label'], axis=1, inplace=True)
model.fit(real_data_raw_one)
sample_one = model.sample(50000)
sample_one.drop(['range_min', 'range_max'], axis=1, inplace=True)
sample_one['label'] = 1
#concatenate positive and negative synthetic samples
sample_all = pd.concat([sample_zero, sample_one], axis=0)
#sample_all.to_csv('data/synthetic/syn_data_raw.csv')
#remove samples with 'NA' in any of the columns
sample_syn = sample_all.dropna(axis=0,how='any')
#sample_syn.to_csv('data/synthetic/syn_test_raw.csv')
#select 500 synthetic test samples that keeps the similar size of raw data
syn_test_raw = pd.read_csv('data/synthetic/syn_test_raw.csv', index_col=0)
syn_test_raw = syn_test_raw.sample(frac=1)
flag0 = 0
flag1= 0
count_zero = 0
count_one = 0
syn_test_data = []
for i in range(0, len(syn_test_raw)):
if syn_test_raw['label'].iloc[i] == int(0):
if count_zero == 250:
flag0 = 1
else:
count_zero = count_zero + 1
syn_test_data.append(syn_test_raw.iloc[i])
elif syn_test_raw['label'].iloc[i] == int(1):
if count_one == 250:
flag1 = 1
else:
count_one = count_one + 1
syn_test_data.append(syn_test_raw.iloc[i])
if flag0 == 1 and flag1 == 1:
break;
syn_test_data = pd.DataFrame(syn_test_data)
syn_test_data['label'] = syn_test_data['label'].astype(int)
syn_test_data.reset_index(inplace=True)
syn_test_data = syn_test_data[syn_test_data.columns[1:40]]
#export synthetic data for external evaluation
syn_test_data.to_csv('data/synthetic/syn_test.csv')
| 37.51004 | 147 | 0.713169 |
9ad633a8b545c9fd60433dd7e1485b51abf58bfc | 1,265 | py | Python | app/user/models.py | briankaemingk/streaks-with-todoist | c6cbc982fbedafce04e9f23af7422e996513c8bb | [
"MIT"
] | 3 | 2019-08-06T19:04:32.000Z | 2022-01-19T14:00:12.000Z | app/user/models.py | briankaemingk/streaks-with-todoist | c6cbc982fbedafce04e9f23af7422e996513c8bb | [
"MIT"
] | 6 | 2018-10-14T21:32:58.000Z | 2021-03-20T00:07:56.000Z | app/user/models.py | briankaemingk/streaks-with-todoist | c6cbc982fbedafce04e9f23af7422e996513c8bb | [
"MIT"
] | null | null | null | from app.extensions import db
from flask import current_app
| 38.333333 | 147 | 0.714625 |
9ad63695127b031d5978acb9042f9c3b9cb8c5de | 1,240 | py | Python | output/models/boeing_data/ipo4/ipo_xsd/address.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/boeing_data/ipo4/ipo_xsd/address.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/boeing_data/ipo4/ipo_xsd/address.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
from enum import Enum
from typing import Optional
from output.models.boeing_data.ipo4.ipo_xsd.ipo import AddressType
__NAMESPACE__ = "http://www.example.com/IPO"
| 20 | 66 | 0.504032 |
9ad672b90b5e5960648f597358159ab9f9c375ec | 5,060 | py | Python | Invaders/Displays/animation_display.py | JaredsGames/SpaceInvaders | 8a0da236c97340c4a8a06e7dd68e4672f885d9e0 | [
"MIT"
] | null | null | null | Invaders/Displays/animation_display.py | JaredsGames/SpaceInvaders | 8a0da236c97340c4a8a06e7dd68e4672f885d9e0 | [
"MIT"
] | null | null | null | Invaders/Displays/animation_display.py | JaredsGames/SpaceInvaders | 8a0da236c97340c4a8a06e7dd68e4672f885d9e0 | [
"MIT"
] | null | null | null | # Jared Dyreson
# CPSC 386-01
# 2021-11-29
# jareddyreson@csu.fullerton.edu
# @JaredDyreson
#
# Lab 00-04
#
# Some filler text
#
"""
This module contains the Intro display class
"""
import pygame
import functools
import sys
import pathlib
import typing
import os
import dataclasses
import random
from pprint import pprint as pp
import time
from Invaders.Dataclasses.point import Point
from Invaders.Displays.display import Display
from Invaders.UI.button import Button
# from Invaders.Entities.cacodemon import Cacodemon
# from Invaders.Entities.Entity import Entity
from Invaders.Entities.enemy_matrix import EnemyMatrix
# from Invaders.Entities.Player import Player
from Invaders.Entities.Entity import Entity
from Invaders.Dataclasses.direction import Direction
# TODO : move this to its own respective module or something like that
def absolute_file_paths(directory: pathlib.Path) -> typing.List[pathlib.Path]:
"""
List the contents of a directory with their absolute path
@param directory: path where to look
@return: typing.List[pathlib.Path]
"""
return [
pathlib.Path(os.path.abspath(os.path.join(dirpath, f)))
for dirpath, _, filenames in os.walk(directory)
for f in filenames
]
| 32.025316 | 86 | 0.594862 |
9ad73e40610067893659f1466d9493e1d1fdb576 | 49 | py | Python | ledger/checkout/models.py | jawaidm/ledger | 7094f3320d6a409a2a0080e70fa7c2b9dba4a715 | [
"Apache-2.0"
] | 59 | 2015-08-29T10:51:34.000Z | 2021-11-03T10:00:25.000Z | ledger/checkout/models.py | jawaidm/ledger | 7094f3320d6a409a2a0080e70fa7c2b9dba4a715 | [
"Apache-2.0"
] | 162 | 2018-02-16T05:13:03.000Z | 2021-05-14T02:47:37.000Z | ledger/checkout/models.py | jawaidm/ledger | 7094f3320d6a409a2a0080e70fa7c2b9dba4a715 | [
"Apache-2.0"
] | 22 | 2015-08-10T10:46:18.000Z | 2020-04-04T07:11:55.000Z | from oscar.apps.checkout.models import * # noqa
| 24.5 | 48 | 0.755102 |
9ad97cd25d6ffe7ca83c1fced680d4dc39e56290 | 1,642 | py | Python | api/serializers.py | mariomtzjr/podemos_test | 5efaf02a19aa8c4849e3ad0108546e95af524126 | [
"MIT"
] | null | null | null | api/serializers.py | mariomtzjr/podemos_test | 5efaf02a19aa8c4849e3ad0108546e95af524126 | [
"MIT"
] | null | null | null | api/serializers.py | mariomtzjr/podemos_test | 5efaf02a19aa8c4849e3ad0108546e95af524126 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from apps.calendarioPago.models import CalendarioPago
from apps.cliente.models import Cliente
from apps.cuenta.models import Cuenta
from apps.grupo.models import Grupo
from apps.miembro.models import Miembro
from apps.transaccion.models import Transaccion
# Serializers define the API representation.
| 26.483871 | 84 | 0.694275 |
9ada5e1bb0d72f096389f3d35f059bd13ec5be47 | 8,194 | py | Python | emmet/markup/format/html.py | emmetio/py-emmet | 9cbb42f482526d7df18ba632b3b3f2ed3b7653a5 | [
"MIT"
] | 29 | 2019-11-12T16:15:15.000Z | 2022-02-06T10:51:25.000Z | emmet/markup/format/html.py | emmetio/py-emmet | 9cbb42f482526d7df18ba632b3b3f2ed3b7653a5 | [
"MIT"
] | 3 | 2020-04-25T11:02:53.000Z | 2021-11-25T10:39:09.000Z | emmet/markup/format/html.py | emmetio/py-emmet | 9cbb42f482526d7df18ba632b3b3f2ed3b7653a5 | [
"MIT"
] | 7 | 2020-04-25T09:42:54.000Z | 2021-02-16T20:29:41.000Z | import re
from .walk import walk, WalkState
from .utils import caret, is_inline_element, is_snippet, push_tokens, should_output_attribute
from .comment import comment_node_before, comment_node_after, CommentWalkState
from ...abbreviation import Abbreviation, AbbreviationNode, AbbreviationAttribute
from ...abbreviation.tokenizer.tokens import Field
from ...config import Config
from ...output_stream import tag_name, self_close, attr_name, is_boolean_attribute, attr_quote, is_inline
from ...list_utils import some, find_index, get_item
re_html_tag = re.compile(r'<([\w\-:]+)[\s>]')
def push_attribute(attr: AbbreviationAttribute, state: WalkState):
"Outputs given attributes content into output stream"
out = state.out
config = state.config
if attr.name:
name = attr_name(attr.name, config)
l_quote = attr_quote(attr, config, True)
r_quote = attr_quote(attr, config, False)
value = attr.value
if is_boolean_attribute(attr, config) and not value:
# If attribute value is omitted and its a boolean value, check for
# `compactBoolean` option: if its disabled, set value to attribute name
# (XML style)
if not config.options.get('output.compactBoolean'):
value = [name]
elif not value:
value = caret
out.push_string(' %s' % name)
if value:
out.push_string('=%s' % l_quote)
push_tokens(value, state)
out.push_string(r_quote)
elif config.options.get('output.selfClosingStyle') != 'html':
out.push_string('=%s%s' % (l_quote, r_quote))
def should_format(node: AbbreviationNode, index: int, items: list, state: WalkState):
"Check if given node should be formatted in its parent context"
parent = state.parent
config = state.config
if not config.options.get('output.format'):
return False
if index == 0 and not parent:
# Do not format very first node
return False
# Do not format single child of snippet
if parent and is_snippet(parent) and len(items) == 1:
return False
if is_snippet(node):
# Adjacent text-only/snippet nodes
fmt = is_snippet(get_item(items, index - 1)) or is_snippet(get_item(items, index + 1)) or \
some(has_newline, node.value) or \
(some(is_field, node.value) and node.children)
if fmt:
return True
if is_inline(node, config):
# Check if inline node is the next sibling of block-level node
if index == 0:
# First node in parent: format if its followed by a block-level element
for item in items:
if not is_inline(item, config):
return True
elif not is_inline(items[index - 1], config):
# Node is right after block-level element
return True
if config.options.get('output.inlineBreak'):
# check for adjacent inline elements before and after current element
adjacent_inline = 1
before = index - 1
after = index + 1
while before >= 0 and is_inline_element(items[before], config):
adjacent_inline += 1
before -= 1
while after < len(items) and is_inline_element(items[after], config):
adjacent_inline += 1
after += 1
if adjacent_inline >= config.options.get('output.inlineBreak'):
return True
# Edge case: inline node contains node that should receive formatting
for i, child in enumerate(node.children):
if should_format(child, i, node.children, state):
return True
return False
return True
def get_indent(state: WalkState):
"Returns indentation offset for given node"
parent = state.parent
if not parent or is_snippet(parent) or (parent.name and parent.name in state.config.options.get('output.formatSkip')):
return 0
return 1
def has_newline(value):
"Check if given node value contains newlines"
return '\r' in value or '\n' in value if isinstance(value, str) else False
def starts_with_block_tag(value: list, config: Config) -> bool:
"Check if given node value starts with block-level tag"
if value and isinstance(value[0], str):
m = re_html_tag.match(value[0])
if m and m.group(1).lower() not in config.options.get('inlineElements'):
return True
return False
| 34.868085 | 122 | 0.611667 |
9adc3fed9b6a076b0f178e8d91edfcd0fe2b0e5f | 2,584 | py | Python | secant_method.py | FixingMind5/proyecto_metodos_I | 4eaed1991ad18574984bcc0010394ecb9c4a620e | [
"MIT"
] | null | null | null | secant_method.py | FixingMind5/proyecto_metodos_I | 4eaed1991ad18574984bcc0010394ecb9c4a620e | [
"MIT"
] | null | null | null | secant_method.py | FixingMind5/proyecto_metodos_I | 4eaed1991ad18574984bcc0010394ecb9c4a620e | [
"MIT"
] | null | null | null | """Secant Method module"""
from numeric_method import NumericMethod
| 32.708861 | 101 | 0.540635 |
9add394027ddb25c4a3c822d581f2bbeacc67447 | 245 | py | Python | variables.py | bestend/korquad | 3b92fffcc950ff584e0f9755ea9b04f8bece7a31 | [
"MIT"
] | 1 | 2019-09-06T04:47:14.000Z | 2019-09-06T04:47:14.000Z | variables.py | bestend/korquad | 3b92fffcc950ff584e0f9755ea9b04f8bece7a31 | [
"MIT"
] | 6 | 2020-01-28T22:12:50.000Z | 2022-02-09T23:30:45.000Z | variables.py | bestend/korquad | 3b92fffcc950ff584e0f9755ea9b04f8bece7a31 | [
"MIT"
] | null | null | null | import os
import re
MODEL_FILE_FORMAT = 'weights.{epoch:02d}-{val_loss:.2f}.h5'
MODEL_REGEX_PATTERN = re.compile(r'^.*weights\.(\d+)\-\d+\.\d+\.h5$')
LAST_MODEL_FILE_FORMAT = 'last.h5'
TEAMS_WEBHOOK_URL = os.environ.get('TEAMS_WEBHOOK_URL', '') | 35 | 69 | 0.714286 |
9ade61531561b4025a09449d1265b8472b175b17 | 977 | py | Python | svm.py | sciencementors2019/Image-Processer | a1b036f38166722d2bb0ee44de1f3558880312c5 | [
"MIT"
] | null | null | null | svm.py | sciencementors2019/Image-Processer | a1b036f38166722d2bb0ee44de1f3558880312c5 | [
"MIT"
] | null | null | null | svm.py | sciencementors2019/Image-Processer | a1b036f38166722d2bb0ee44de1f3558880312c5 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
from sklearn import svm
from mlxtend.plotting import plot_decision_regions
import matplotlib.pyplot as plt
# Create arbitrary dataset for example
df = pd.DataFrame({'Planned_End': np.random.uniform(low=-5, high=5, size=50),
'Actual_End': np.random.uniform(low=-1, high=1, size=50),
'Late': np.random.random_integers(low=0, high=2, size=50)}
)
# Fit Support Vector Machine Classifier
X = df[['Planned_End', 'Actual_End']]
y = df['Late']
clf = svm.SVC(decision_function_shape='ovo')
clf.fit(X.values, y.values)
# Plot Decision Region using mlxtend's awesome plotting function
plot_decision_regions(X=X.values,
y=y.values,
clf=clf,
legend=2)
# Update plot object with X/Y axis labels and Figure Title
plt.xlabel(X.columns[0], size=14)
plt.ylabel(X.columns[1], size=14)
plt.title('SVM Decision Region Boundary', size=16) | 32.566667 | 85 | 0.663255 |
9ae1bc0d9c8249afc93cd2e786ee58fa70373ce4 | 2,544 | py | Python | tests/importing/test_read_genes.py | EKingma/Transposonmapper | 1413bda16a0bd5f5f3ccf84d86193c2dba0ab01b | [
"Apache-2.0"
] | 2 | 2021-11-23T09:39:35.000Z | 2022-01-25T15:49:45.000Z | tests/importing/test_read_genes.py | EKingma/Transposonmapper | 1413bda16a0bd5f5f3ccf84d86193c2dba0ab01b | [
"Apache-2.0"
] | 76 | 2021-07-07T18:31:44.000Z | 2022-03-22T10:04:40.000Z | tests/importing/test_read_genes.py | EKingma/Transposonmapper | 1413bda16a0bd5f5f3ccf84d86193c2dba0ab01b | [
"Apache-2.0"
] | 2 | 2021-09-16T10:56:20.000Z | 2022-01-25T12:33:25.000Z |
from transposonmapper.importing import (
load_default_files,read_genes
)
| 39.138462 | 107 | 0.717374 |
9ae33df6172e3d387be468447aa95067143972f3 | 4,477 | py | Python | src/apps/tractatusapp/views_spacetree.py | lambdamusic/wittgensteiniana | f9b37282dcf4b93f9a6218cc827a6ab7386a3dd4 | [
"MIT"
] | 1 | 2018-04-24T09:55:40.000Z | 2018-04-24T09:55:40.000Z | src/apps/tractatusapp/views_spacetree.py | lambdamusic/wittgensteiniana | f9b37282dcf4b93f9a6218cc827a6ab7386a3dd4 | [
"MIT"
] | null | null | null | src/apps/tractatusapp/views_spacetree.py | lambdamusic/wittgensteiniana | f9b37282dcf4b93f9a6218cc827a6ab7386a3dd4 | [
"MIT"
] | 1 | 2020-11-25T08:53:49.000Z | 2020-11-25T08:53:49.000Z | """
Using
http://thejit.org/static/v20/Docs/files/Options/Options-Canvas-js.html#Options.Canvas
"""
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.urls import reverse
from django.shortcuts import render, redirect, get_object_or_404
import json
import os
import json
from libs.myutils.myutils import printDebug
from tractatusapp.models import *
def spacetree(request):
"""
Visualizes a space tree - ORIGINAL VIEW (USED TO GENERATE HTML VERSION)
"""
# DEFAULT JSON FOR TESTING THE APP
to_json = {
'id': "190_0",
'name': "Pearl Jam",
'children': [
{
'id': "306208_1",
'name': "Pearl Jam & Cypress Hill",
'data': {
'relation': "<h4>Pearl Jam & Cypress Hill</h4><b>Connections:</b><ul><h3>Pearl Jam <div>(relation: collaboration)</div></h3><h3>Cypress Hill <div>(relation: collaboration)</div></h3></ul>"
},},
{ 'id': "191_0",
'name': "Pink Floyd",
'children': [{
'id': "306209_1",
'name': "Guns and Roses",
'data': {
'relation': "<h4>Pearl Jam & Cypress Hill</h4><b>Connections:</b><ul><h3>Pearl Jam <div>(relation: collaboration)</div></h3><h3>Cypress Hill <div>(relation: collaboration)</div></h3></ul>"
},
}],
}]}
# reconstruct the tree as a nested dictionary
TESTING = False
treeroot = {'id': "root", 'name': "TLP", 'children': [],
'data': {'preview_ogden' : "root node", 'full_ogden' : generate_text("root")}}
# level0 = TextUnit.tree.root_nodes()
# TODO - make this a mptt tree function
level0 = TextUnit.tree_top()
for x in level0:
treeroot['children'] += [nav_tree(x)]
context = {
'json': json.dumps(treeroot),
'experiment_description': """
The Space Tree Tractatus is an experimental visualization of the <br />
<a target='_blank' href="http://en.wikipedia.org/wiki/Tractatus_Logico-Philosophicus">Tractatus Logico-Philosophicus</a>, a philosophical text by Ludwig Wittgenstein.
<br /><br />
<b>Click</b> on a node to move the tree and center that node. The text contents of the node are displayed at the bottom of the page. <b>Use the mouse wheel</b> to zoom and <b>drag and drop the canvas</b> to pan.
<br /><br />
<small>Made with <a target='_blank' href="http://www.python.org/">Python</a> and the <a target='_blank' href="http://thejit.org/">JavaScript InfoVis Toolkit</a>. More info on this <a href="http://www.michelepasin.org/blog/2012/07/08/wittgenstein-and-the-javascript-infovis-toolkit/">blog post</a></small>
"""
}
return render(request,
'tractatusapp/spacetree/spacetree.html',
context)
def generate_text(instance, expression="ogden"):
""" creates the html needed for the full text representation of the tractatus
includes the number-title, and small links to next and prev satz
# TODO: add cases for different expressions
"""
if instance == "root":
return """<div class='tnum'>Tractatus Logico-Philosophicus<span class='smalllinks'></small></div>
<div>Ludwig Wittgenstein, 1921.<br />
Translated from the German by C.K. Ogden in 1922<br />
Original title: Logisch-Philosophische Abhandlung, Wilhelm Ostwald (ed.), Annalen der Naturphilosophie, 14 (1921)</div>
"""
else:
next, prev = "", ""
next_satz = instance.tractatus_next()
prev_satz = instance.tractatus_prev()
if next_satz:
next = "<a title='Next Sentence' href='javascript:focus_node(%s);'>→ %s</a>" % (next_satz.name, next_satz.name)
if prev_satz:
prev = "<a title='Previous Sentence' href='javascript:focus_node(%s);'>%s ←</a>" % (prev_satz.name, prev_satz.name)
# HACK src images rendered via JS in the template cause WGET errors
# hence they are hidden away in this visualization
# TODO find a more elegant solution
text_js_ready = instance.textOgden().replace('src="', '-src=\"src image omitted ')
t = "<div class='tnum'><span class='smalllinks'>%s</span>%s<span class='smalllinks'>%s</span></div>%s" % (prev, instance.name, next, text_js_ready)
return t
| 33.916667 | 309 | 0.663837 |
9ae3c34cb81d8405b95cc94d6b0a73cbfa7be42a | 14,772 | py | Python | vumi/blinkenlights/metrics_workers.py | apopheniac/vumi | e04bf32a0cf09292f03dfe8628798adff512b709 | [
"BSD-3-Clause"
] | null | null | null | vumi/blinkenlights/metrics_workers.py | apopheniac/vumi | e04bf32a0cf09292f03dfe8628798adff512b709 | [
"BSD-3-Clause"
] | null | null | null | vumi/blinkenlights/metrics_workers.py | apopheniac/vumi | e04bf32a0cf09292f03dfe8628798adff512b709 | [
"BSD-3-Clause"
] | 2 | 2018-03-05T18:01:45.000Z | 2019-11-02T19:34:18.000Z | # -*- test-case-name: vumi.blinkenlights.tests.test_metrics_workers -*-
import time
import random
import hashlib
from datetime import datetime
from twisted.python import log
from twisted.internet.defer import inlineCallbacks, Deferred
from twisted.internet import reactor
from twisted.internet.task import LoopingCall
from twisted.internet.protocol import DatagramProtocol
from vumi.service import Consumer, Publisher, Worker
from vumi.blinkenlights.metrics import (MetricsConsumer, MetricManager, Count,
Metric, Timer, Aggregator)
from vumi.blinkenlights.message20110818 import MetricMessage
| 36.384236 | 79 | 0.641822 |
9ae436efa8485153023aeda553abb0051a92e57f | 1,401 | py | Python | src/sentry/web/forms/base_organization_member.py | JannKleen/sentry | 8b29c8234bb51a81d5cab821a1f2ed4ea8e8bd88 | [
"BSD-3-Clause"
] | 1 | 2019-02-27T15:13:06.000Z | 2019-02-27T15:13:06.000Z | src/sentry/web/forms/base_organization_member.py | rmax/sentry | 8b29c8234bb51a81d5cab821a1f2ed4ea8e8bd88 | [
"BSD-3-Clause"
] | 5 | 2020-07-17T11:20:41.000Z | 2021-05-09T12:16:53.000Z | src/sentry/web/forms/base_organization_member.py | zaasmi/codeerrorhelp | 1ab8d3e314386b9b2d58dad9df45355bf6014ac9 | [
"BSD-3-Clause"
] | 2 | 2021-01-26T09:53:39.000Z | 2022-03-22T09:01:47.000Z | from __future__ import absolute_import
from django import forms
from django.db import transaction
from sentry.models import (
OrganizationMember,
OrganizationMemberTeam,
Team,
)
| 29.1875 | 94 | 0.68237 |
9ae66ae64bed27a4c419e21d360710c58e9c3114 | 1,589 | py | Python | turbinia/workers/fsstat.py | dfjxs/turbinia | 23a97d9d826cbcc51e6b5dfd50d85251506bf242 | [
"Apache-2.0"
] | 1 | 2021-05-31T19:44:50.000Z | 2021-05-31T19:44:50.000Z | turbinia/workers/fsstat.py | dfjxs/turbinia | 23a97d9d826cbcc51e6b5dfd50d85251506bf242 | [
"Apache-2.0"
] | null | null | null | turbinia/workers/fsstat.py | dfjxs/turbinia | 23a97d9d826cbcc51e6b5dfd50d85251506bf242 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2021 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Task to run fsstat on disk partitions."""
from __future__ import unicode_literals
import os
from turbinia import TurbiniaException
from turbinia.workers import TurbiniaTask
from turbinia.evidence import EvidenceState as state
from turbinia.evidence import ReportText
| 33.104167 | 79 | 0.733166 |
9ae7351fe81fa3901619faf1757d1f1b2dffbe49 | 401 | py | Python | app/django-doubtfire-api/endpoint/urls.py | JiatengTao/speaker-verification-api | 89c0b82c49498426c4d35104e0e4935c193a3cb1 | [
"MIT"
] | null | null | null | app/django-doubtfire-api/endpoint/urls.py | JiatengTao/speaker-verification-api | 89c0b82c49498426c4d35104e0e4935c193a3cb1 | [
"MIT"
] | null | null | null | app/django-doubtfire-api/endpoint/urls.py | JiatengTao/speaker-verification-api | 89c0b82c49498426c4d35104e0e4935c193a3cb1 | [
"MIT"
] | null | null | null | from django.urls import include, path
from django.conf.urls import url
from endpoint.views import (
enroll_user,
validate_recording,
check_redis_health,
redirect_flower_dashboard,
)
urlpatterns = [
path("enroll", enroll_user),
path("validate", validate_recording),
path("redis-healthcheck", check_redis_health, name="up"),
path("flower", redirect_flower_dashboard),
]
| 25.0625 | 61 | 0.733167 |
9ae9da1c04d49fc47628f3418837d002feeee3c7 | 3,096 | py | Python | back/src/crud.py | Celeo/wiki_elm | 620caf74b4cc17d3ffe3231493df15e84bfcf67f | [
"MIT"
] | null | null | null | back/src/crud.py | Celeo/wiki_elm | 620caf74b4cc17d3ffe3231493df15e84bfcf67f | [
"MIT"
] | null | null | null | back/src/crud.py | Celeo/wiki_elm | 620caf74b4cc17d3ffe3231493df15e84bfcf67f | [
"MIT"
] | null | null | null | from datetime import datetime
from typing import List, Optional
import bcrypt
from sqlalchemy.orm import Session
from . import models, schemas
def get_user(db: Session, id: int) -> models.User:
"""Return a single user by id.
Args:
db (Session): database connection
id (int): id of the user
Returns:
models.User: user
"""
return db.query(models.User).filter(models.User.id == id).first()
def get_user_by_name(db: Session, name: str) -> models.User:
"""Return a single user by name.
Args:
db (Session): database connection
name (str): name of the user
Returns:
models.User: user
"""
return db.query(models.User).filter(models.User.name == name).first()
def get_all_articles(db: Session) -> List[models.Article]:
"""Return all articles.
Args:
db (Session): database connection
Returns:
List[models.Article]: list of articles
"""
return db.query(models.Article).all()
def get_article(db: Session, id: int) -> models.Article:
"""Return a single article by id.
Args:
db (Session): database connection
id (int): id of the article
Returns:
models.Article: article
"""
return db.query(models.Article).filter(models.Article.id == id).first()
def create_user(db: Session, user: schemas.UserCreate) -> None:
"""Create a new user.
Args:
db (Session): database connection
user: (schemas.UserCreate): creation data
"""
new_user = models.User(name=user.name)
new_user.password = bcrypt.hashpw(user.password, bcrypt.gensalt())
db.add(new_user)
db.commit()
def check_user(db: Session, name: str, password: str) -> Optional[models.User]:
"""Return true if the name and password match.
Args:
db (Session): database connection
name (str): name of the user to check
password (str): password to check against
Returns:
Optional[models.User]: user if the password matches, otherwise None
"""
from_db = get_user_by_name(db, name)
if not from_db:
return None
if bcrypt.checkpw(password.encode('UTF-8'), from_db.password.encode('UTF-8')):
return from_db
return None
def create_article(db: Session, article: schemas.ArticleCreate, creator_id: int) -> None:
"""Create a new article.
Args:
db (Session): database connection
article (schemas.ArticleCreate): data creation data
creator_id (int): user id of the creator
"""
new_article = models.Article(**article.dict(), created_by=creator_id, time_created=datetime.utcnow())
db.add(new_article)
db.commit()
def update_article(db: Session, article: schemas.ArticleUpdate) -> None:
"""Update an article.
Args:
db (Session): database connection
article (schemas.ArticleUpdate): data update data
"""
from_db = get_article(db, article.id)
if article.title:
from_db.title = article.title
if article.content:
from_db.content = article.content
db.commit()
| 26.016807 | 105 | 0.648256 |
9aea27159d7833c105fb4af0a9c01c188110c93d | 2,693 | py | Python | polymorphic/tests/test_utils.py | likeanaxon/django-polymorphic | ad4e6e90c82f897300c1c135bd7a95e4b2d802a3 | [
"BSD-3-Clause"
] | 1 | 2021-03-12T17:42:37.000Z | 2021-03-12T17:42:37.000Z | polymorphic/tests/test_utils.py | likeanaxon/django-polymorphic | ad4e6e90c82f897300c1c135bd7a95e4b2d802a3 | [
"BSD-3-Clause"
] | 10 | 2020-02-12T01:46:41.000Z | 2022-02-10T09:00:03.000Z | polymorphic/tests/test_utils.py | likeanaxon/django-polymorphic | ad4e6e90c82f897300c1c135bd7a95e4b2d802a3 | [
"BSD-3-Clause"
] | 1 | 2020-04-18T15:14:47.000Z | 2020-04-18T15:14:47.000Z | from django.test import TransactionTestCase
from polymorphic.models import PolymorphicModel, PolymorphicTypeUndefined
from polymorphic.tests.models import (
Enhance_Base,
Enhance_Inherit,
Model2A,
Model2B,
Model2C,
Model2D,
)
from polymorphic.utils import (
get_base_polymorphic_model,
reset_polymorphic_ctype,
sort_by_subclass,
)
| 32.445783 | 82 | 0.671742 |
9aeae4d01c050a9274a24e3e6c5783d7fc583318 | 2,098 | py | Python | blockchain/utils.py | TheEdgeOfRage/blockchain | f75764b5a5a87337200b14d1909077c31e2dbdc1 | [
"MIT"
] | null | null | null | blockchain/utils.py | TheEdgeOfRage/blockchain | f75764b5a5a87337200b14d1909077c31e2dbdc1 | [
"MIT"
] | null | null | null | blockchain/utils.py | TheEdgeOfRage/blockchain | f75764b5a5a87337200b14d1909077c31e2dbdc1 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright 2020 <pavle.portic@tilda.center>
#
# Distributed under terms of the BSD 3-Clause license.
import hashlib
import itertools
import json
from decimal import Decimal
from multiprocessing import (
cpu_count,
Pool,
Process,
Queue
)
def valid_proof(last_proof, proof, last_hash, difficulty):
"""
Validates the Proof
:param last_proof: <int> Previous Proof
:param proof: <int> Current Proof
:param last_hash: <str> The hash of the Previous Block
:return: <bool> True if correct, False if not.
"""
guess = f'{last_proof}{proof}{last_hash}'.encode()
guess_hash = hashlib.sha256(guess)
binary_hash = ''.join(format(n, '08b') for n in guess_hash.digest())
return binary_hash[:difficulty] == '0' * difficulty
| 18.900901 | 69 | 0.702574 |
9aebd92051cfcf6d0045079f9f922a518fd301b8 | 5,317 | py | Python | myfunds/web/views/joint_limits/limit/views/participants.py | anzodev/myfunds | 9f6cda99f443cec064d15d7ff7780f297cbdfe10 | [
"MIT"
] | null | null | null | myfunds/web/views/joint_limits/limit/views/participants.py | anzodev/myfunds | 9f6cda99f443cec064d15d7ff7780f297cbdfe10 | [
"MIT"
] | null | null | null | myfunds/web/views/joint_limits/limit/views/participants.py | anzodev/myfunds | 9f6cda99f443cec064d15d7ff7780f297cbdfe10 | [
"MIT"
] | null | null | null | import peewee as pw
from flask import g
from flask import redirect
from flask import render_template
from flask import request
from flask import url_for
from myfunds.core.models import Account
from myfunds.core.models import Category
from myfunds.core.models import JointLimitParticipant
from myfunds.web import auth
from myfunds.web import notify
from myfunds.web import utils
from myfunds.web.constants import FundsDirection
from myfunds.web.forms import AddJointLimitParticipantStep1Form
from myfunds.web.forms import AddJointLimitParticipantStep2Form
from myfunds.web.forms import DeleteJointLimitParticipantForm
from myfunds.web.forms import JointLimitParticipantGetStepForm
from myfunds.web.views.joint_limits.limit.views import bp
from myfunds.web.views.joint_limits.limit.views import verify_limit
| 30.912791 | 88 | 0.658454 |
9aec3cbbdf80ed6024cc8bfdc62a6afaf2fdc1c4 | 6,854 | py | Python | elyra/pipeline/component_parser_kfp.py | rachaelhouse/elyra | e2f474f26f65fd7c5ec5602f6e40a229dda0a081 | [
"Apache-2.0"
] | null | null | null | elyra/pipeline/component_parser_kfp.py | rachaelhouse/elyra | e2f474f26f65fd7c5ec5602f6e40a229dda0a081 | [
"Apache-2.0"
] | null | null | null | elyra/pipeline/component_parser_kfp.py | rachaelhouse/elyra | e2f474f26f65fd7c5ec5602f6e40a229dda0a081 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2018-2021 Elyra Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from types import SimpleNamespace
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
import yaml
from elyra.pipeline.component import Component
from elyra.pipeline.component import ComponentParameter
from elyra.pipeline.component import ComponentParser
| 45.390728 | 111 | 0.579224 |
9aedf1a23d553278d5b929adc837502da68eda10 | 356 | py | Python | mayan/apps/mimetype/apps.py | eshbeata/open-paperless | 6b9ed1f21908116ad2795b3785b2dbd66713d66e | [
"Apache-2.0"
] | 2,743 | 2017-12-18T07:12:30.000Z | 2022-03-27T17:21:25.000Z | mayan/apps/mimetype/apps.py | eshbeata/open-paperless | 6b9ed1f21908116ad2795b3785b2dbd66713d66e | [
"Apache-2.0"
] | 15 | 2017-12-18T14:58:07.000Z | 2021-03-01T20:05:05.000Z | mayan/apps/mimetype/apps.py | eshbeata/open-paperless | 6b9ed1f21908116ad2795b3785b2dbd66713d66e | [
"Apache-2.0"
] | 257 | 2017-12-18T03:12:58.000Z | 2022-03-25T08:59:10.000Z | from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from common import MayanAppConfig
from .licenses import * # NOQA
| 22.25 | 56 | 0.727528 |
9aefb8bc9120b71f8727047442cac13c02b21950 | 388 | py | Python | test/level.py | Matt-London/command-line-tutorial | 5b6afeedb4075de114e8c91756ecf3a03645fde7 | [
"MIT"
] | 1 | 2020-07-11T06:29:25.000Z | 2020-07-11T06:29:25.000Z | test/level.py | Matt-London/Command-Line-Tutorial | 5b6afeedb4075de114e8c91756ecf3a03645fde7 | [
"MIT"
] | 15 | 2020-07-10T20:01:51.000Z | 2020-08-10T05:23:47.000Z | test/level.py | Matt-London/command-line-tutorial | 5b6afeedb4075de114e8c91756ecf3a03645fde7 | [
"MIT"
] | null | null | null | from packages.levels.Level import Level
import packages.levels.levels as Levels
import packages.resources.functions as function
import packages.resources.variables as var
from packages.filesystem.Directory import Directory
from packages.filesystem.File import File
var.bash_history = ("Check")
test = Level("Instruct", "Help", ("Check"))
test.instruct()
test.help()
print(test.check()) | 27.714286 | 51 | 0.796392 |
9af07d32c8be1202f3730dbd2847cb3a451513ad | 1,235 | py | Python | tests/test_buffers.py | TheCharmingCthulhu/cython-vst-loader | 2d2d358515f24f4846ca664e5a9b366a207207a6 | [
"MIT"
] | 23 | 2020-07-29T14:44:29.000Z | 2022-01-07T05:29:16.000Z | tests/test_buffers.py | TheCharmingCthulhu/cython-vst-loader | 2d2d358515f24f4846ca664e5a9b366a207207a6 | [
"MIT"
] | 14 | 2020-09-09T02:38:24.000Z | 2022-03-04T05:19:25.000Z | tests/test_buffers.py | TheCharmingCthulhu/cython-vst-loader | 2d2d358515f24f4846ca664e5a9b366a207207a6 | [
"MIT"
] | 2 | 2021-06-05T23:30:08.000Z | 2021-06-06T19:58:59.000Z | # noinspection PyUnresolvedReferences
import unittest
from cython_vst_loader.vst_loader_wrapper import allocate_float_buffer, get_float_buffer_as_list, \
free_buffer, \
allocate_double_buffer, get_double_buffer_as_list
| 36.323529 | 99 | 0.688259 |
9af148fc623927e65f3f0abe332698d9eddb80f8 | 1,520 | py | Python | samples/17.multilingual-bot/translation/microsoft_translator.py | hangdong/botbuilder-python | 8ff979a58fadc4356d76b9ce577f94da3245f664 | [
"MIT"
] | null | null | null | samples/17.multilingual-bot/translation/microsoft_translator.py | hangdong/botbuilder-python | 8ff979a58fadc4356d76b9ce577f94da3245f664 | [
"MIT"
] | null | null | null | samples/17.multilingual-bot/translation/microsoft_translator.py | hangdong/botbuilder-python | 8ff979a58fadc4356d76b9ce577f94da3245f664 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import uuid
import requests
| 40 | 82 | 0.678947 |
9af29a94a64ce15c2f18ac01d5658596e67aa248 | 48 | py | Python | dachar/utils/__init__.py | roocs/dachar | 687b6acb535f634791d13a435cded5f97cae8e76 | [
"BSD-3-Clause"
] | 2 | 2020-05-01T11:17:06.000Z | 2020-11-23T10:37:24.000Z | dachar/utils/__init__.py | roocs/dachar | 687b6acb535f634791d13a435cded5f97cae8e76 | [
"BSD-3-Clause"
] | 69 | 2020-03-26T15:39:26.000Z | 2022-01-14T14:34:39.000Z | dachar/utils/__init__.py | roocs/dachar | 687b6acb535f634791d13a435cded5f97cae8e76 | [
"BSD-3-Clause"
] | null | null | null | from .common import *
from .json_store import *
| 16 | 25 | 0.75 |
9af36b234d70f262e1618ab3933e4d7b9aedd9f4 | 2,760 | py | Python | scraper/models.py | mrcnc/assessor-scraper | b502ebb157048d20294ca44ab0d30e3a44d86c08 | [
"MIT"
] | null | null | null | scraper/models.py | mrcnc/assessor-scraper | b502ebb157048d20294ca44ab0d30e3a44d86c08 | [
"MIT"
] | null | null | null | scraper/models.py | mrcnc/assessor-scraper | b502ebb157048d20294ca44ab0d30e3a44d86c08 | [
"MIT"
] | 1 | 2019-02-14T04:01:40.000Z | 2019-02-14T04:01:40.000Z | # -*- coding: utf-8 -*-
import os
import logging
from sqlalchemy import create_engine, Column, Integer, String, ForeignKey
from sqlalchemy.engine.url import URL
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from scraper import settings
Base = declarative_base()
def db_connect():
"""
Returns sqlalchemy engine instance
"""
if 'DATABASE_URL' in os.environ:
DATABASE_URL = os.environ['DATABASE_URL']
logging.debug("Connecting to %s", URL)
else:
DATABASE_URL = URL(**settings.DATABASE)
logging.debug("Connecting with settings %s", DATABASE_URL)
return create_engine(DATABASE_URL)
| 29.677419 | 73 | 0.721014 |
9af3a835ffd32ad662ca751cd48d5f535bf94f5d | 487 | py | Python | WeIrD-StRiNg-CaSe.py | lovefov/Python | ba8fc49e6e503927dc1f827f37b77f3e43b5d0c8 | [
"MIT"
] | null | null | null | WeIrD-StRiNg-CaSe.py | lovefov/Python | ba8fc49e6e503927dc1f827f37b77f3e43b5d0c8 | [
"MIT"
] | null | null | null | WeIrD-StRiNg-CaSe.py | lovefov/Python | ba8fc49e6e503927dc1f827f37b77f3e43b5d0c8 | [
"MIT"
] | 1 | 2021-02-08T08:48:44.000Z | 2021-02-08T08:48:44.000Z |
'''
def to_weird_case(string):
recase = lambda s: "".join([c.upper() if i % 2 == 0 else c.lower() for i, c in enumerate(s)])
return " ".join([recase(word) for word in string.split(" ")])
''' | 23.190476 | 97 | 0.521561 |
9af63c97cc5b9b0bb2ddfde6ccac394409cbd012 | 1,573 | py | Python | FTP_client/LHYlearning/Entry.py | welles2000/CCNProject | 0f20718aa171571a952343d7a07c2f1c0f953a6e | [
"MulanPSL-1.0"
] | 2 | 2022-03-29T05:43:09.000Z | 2022-03-29T14:29:46.000Z | FTP_client/LHYlearning/Entry.py | welles2000/CCNProject | 0f20718aa171571a952343d7a07c2f1c0f953a6e | [
"MulanPSL-1.0"
] | null | null | null | FTP_client/LHYlearning/Entry.py | welles2000/CCNProject | 0f20718aa171571a952343d7a07c2f1c0f953a6e | [
"MulanPSL-1.0"
] | null | null | null | # GUI
from tkinter import *
from tkinter import messagebox
if __name__ == '__main__':
root = Tk()
root.geometry("1280x720+200+300")
root.title("")
app = Application(master=root)
root.mainloop()
| 24.2 | 75 | 0.577241 |
9af728f0342a41c7e42c05bfe4ce250d82a4e42b | 839 | py | Python | curso-em-video/ex054.py | joseluizbrits/sobre-python | 316143c341e5a44070a3b13877419082774bd730 | [
"MIT"
] | null | null | null | curso-em-video/ex054.py | joseluizbrits/sobre-python | 316143c341e5a44070a3b13877419082774bd730 | [
"MIT"
] | null | null | null | curso-em-video/ex054.py | joseluizbrits/sobre-python | 316143c341e5a44070a3b13877419082774bd730 | [
"MIT"
] | null | null | null | # Grupo da Maioridade
'''Crie um programa que leia o ANO DE NASCIMENTO de
SETE PESSOAS. No final, mostre quantas pessoas ainda
no atingiram a maioridade e quantas j so maiores'''
from datetime import date
anoatual = date.today().year # Pegar o ano atual configurado na mquina
totalmaior = 0
totalmenor = 0
for pessoas in range(1, 8):
anonasc = int(input('Digite o ano de nascimento da {} pessoa: '.format(pessoas)))
if 1900 < anonasc < anoatual:
idade = anoatual - anonasc
if idade >= 21:
totalmaior += 1
else:
totalmenor += 1
else:
print('\033[31m''Ocorreu um erro no ano em que voc digitou! Tente novamente.')
print('H {} pessoas neste grupo que esto na maioridade'.format(totalmaior))
print('E h {} pessoas que ainda so menor de idade'.format(totalmenor))
| 38.136364 | 87 | 0.682956 |
9af8cf4aed2f78a490c8a32e60b1aabe24f15e72 | 2,160 | py | Python | stellar/simulation/data.py | strfx/stellar | 41b190eed016d2d6ad8548490a0c9620a02d711e | [
"MIT"
] | null | null | null | stellar/simulation/data.py | strfx/stellar | 41b190eed016d2d6ad8548490a0c9620a02d711e | [
"MIT"
] | null | null | null | stellar/simulation/data.py | strfx/stellar | 41b190eed016d2d6ad8548490a0c9620a02d711e | [
"MIT"
] | null | null | null | from typing import Tuple
import numpy as np
import png
from skimage.transform import resize
def load_world(filename: str, size: Tuple[int, int], resolution: int) -> np.array:
"""Load a preconstructred track to initialize world.
Args:
filename: Full path to the track file (png).
size: Width and height of the map
resolution: Resolution of the grid map (i.e. into how many cells)
one meter is divided into.
Returns:
An initialized gridmap based on the preconstructed track as
an n x m dimensional numpy array, where n is the width (num cells)
and m the height (num cells) - (after applying resolution).
"""
width_in_cells, height_in_cells = np.multiply(size, resolution)
world = np.array(png_to_ogm(
filename, normalized=True, origin='lower'))
# If the image is already in our desired shape, no need to rescale it
if world.shape == (height_in_cells, width_in_cells):
return world
# Otherwise, scale the image to our desired size.
resized_world = resize(world, (width_in_cells, height_in_cells))
return resized_world
def png_to_ogm(filename, normalized=False, origin='lower'):
"""Convert a png image to occupancy grid map.
Inspired by https://github.com/richardos/occupancy-grid-a-star
Args:
filename: Path to the png file.
normalized: Whether to normalize the data, i.e. to be in value range [0, 1]
origin: Point of origin (0,0)
Returns:
2D Array
"""
r = png.Reader(filename)
img = r.read()
img_data = list(img[2])
out_img = []
bitdepth = img[3]['bitdepth']
for i in range(len(img_data)):
out_img_row = []
for j in range(len(img_data[0])):
if j % img[3]['planes'] == 0:
if normalized:
out_img_row.append(img_data[i][j]*1.0/(2**bitdepth))
else:
out_img_row.append(img_data[i][j])
out_img.append(out_img_row)
if origin == 'lower':
out_img.reverse()
return out_img
| 29.189189 | 83 | 0.611574 |
9af8e51dd66ea49555fb4a24794f6c9c1dc7752a | 885 | py | Python | apps/user/serializers.py | major-hub/soil_app | ddd250161ad496afd4c8484f79500ff2657b51df | [
"MIT"
] | null | null | null | apps/user/serializers.py | major-hub/soil_app | ddd250161ad496afd4c8484f79500ff2657b51df | [
"MIT"
] | null | null | null | apps/user/serializers.py | major-hub/soil_app | ddd250161ad496afd4c8484f79500ff2657b51df | [
"MIT"
] | null | null | null | from rest_framework import serializers
from user.models import User
from main.exceptions.user_exceptions import UserException
user_exception = UserException
| 32.777778 | 116 | 0.754802 |
9af8e62cf5607d29f1d31c790e20bc86925e4fe4 | 7,332 | py | Python | bf_compiler.py | PurpleMyst/bf_compiler | 51832ac9bb493b478c88f68798e99727cf43e180 | [
"MIT"
] | 31 | 2018-03-09T15:40:46.000Z | 2021-01-15T10:03:40.000Z | bf_compiler.py | PurpleMyst/bf_compiler | 51832ac9bb493b478c88f68798e99727cf43e180 | [
"MIT"
] | null | null | null | bf_compiler.py | PurpleMyst/bf_compiler | 51832ac9bb493b478c88f68798e99727cf43e180 | [
"MIT"
] | 2 | 2018-03-09T23:59:28.000Z | 2021-01-15T10:05:00.000Z | #!/usr/bin/env python3
import argparse
import ctypes
import os
import sys
from llvmlite import ir, binding as llvm
INDEX_BIT_SIZE = 16
# courtesy of the llvmlite docs
def create_execution_engine():
"""
Create an ExecutionEngine suitable for JIT code generation on
the host CPU. The engine is reusable for an arbitrary number of
modules.
"""
# Create a target machine representing the host
target = llvm.Target.from_default_triple()
target_machine = target.create_target_machine()
# And an execution engine with an empty backing module
backing_mod = llvm.parse_assembly("")
engine = llvm.create_mcjit_compiler(backing_mod, target_machine)
return engine
if __name__ == "__main__":
main()
| 31.2 | 79 | 0.610475 |
9afad36409d9c59fa007a59c5630a3d8610a0ebd | 4,715 | py | Python | dapbench/record_dap.py | cedadev/dapbench | e722c52f1d38d0ea008e177a1d68adff0a5daecc | [
"BSD-3-Clause-Clear"
] | null | null | null | dapbench/record_dap.py | cedadev/dapbench | e722c52f1d38d0ea008e177a1d68adff0a5daecc | [
"BSD-3-Clause-Clear"
] | null | null | null | dapbench/record_dap.py | cedadev/dapbench | e722c52f1d38d0ea008e177a1d68adff0a5daecc | [
"BSD-3-Clause-Clear"
] | 1 | 2019-08-05T20:01:23.000Z | 2019-08-05T20:01:23.000Z | #!/usr/bin/env python
# BSD Licence
# Copyright (c) 2011, Science & Technology Facilities Council (STFC)
# All rights reserved.
#
# See the LICENSE file in the source distribution of this software for
# the full license text.
"""
Execute a programme that makes NetCDF-API OPeNDAP calls, capturing
request events and timings.
This script uses 2 methods of capturing OPeNDAP requests:
1. It assumes CURL.VERBOSE=1 in ~/.dodsrc
2. It runns the command through "strace" to capture request timings
The result is a dapbench.dap_stats.DapStats object containing all OPeNDAP
requests made.
WARNING: It is possible to fool record_dap if the wrapped script
writes to stderr lines begining "* Connected to" or "> GET"
"""
import tempfile
import os, sys
from subprocess import Popen, PIPE
import re
import urllib
from dapbench.dap_request import DapRequest
from dapbench.dap_stats import DapStats, SingleTimestampRecorder, echofilter_to_stats
import logging
log = logging.getLogger(__name__)
TMP_PREFIX='record_dap-'
DODSRC = '.dodsrc'
if __name__ == '__main__':
main()
| 30.419355 | 103 | 0.599152 |
9afbc58c35485195590c0111ab875fa7190d1ec1 | 621 | py | Python | kesko_webapp/models.py | kounelisagis/kesko-food-waste-hackathon | 6b66806aeaf4fc72ea96e47f152cd4bbd8b5a43d | [
"MIT"
] | 1 | 2019-12-29T16:16:54.000Z | 2019-12-29T16:16:54.000Z | kesko_webapp/models.py | kounelisagis/kesko-food-waste-hackathon | 6b66806aeaf4fc72ea96e47f152cd4bbd8b5a43d | [
"MIT"
] | 14 | 2019-11-16T18:27:51.000Z | 2022-02-26T20:17:01.000Z | kesko_webapp/models.py | kounelisagis/kesko-food-waste-hackathon | 6b66806aeaf4fc72ea96e47f152cd4bbd8b5a43d | [
"MIT"
] | 8 | 2019-11-15T20:27:32.000Z | 2020-08-26T16:21:48.000Z | from django.conf import settings
from django.contrib.auth.models import AbstractUser
from django.db import models
| 23.884615 | 80 | 0.727858 |
9afd4d7170021441a6b8eb952c84d874debdddcf | 5,925 | py | Python | source/prosumer.py | gus0k/LEMsim | a008a2d25d1de9d5d07706ebeaaaa402bee97bef | [
"Apache-2.0"
] | null | null | null | source/prosumer.py | gus0k/LEMsim | a008a2d25d1de9d5d07706ebeaaaa402bee97bef | [
"Apache-2.0"
] | null | null | null | source/prosumer.py | gus0k/LEMsim | a008a2d25d1de9d5d07706ebeaaaa402bee97bef | [
"Apache-2.0"
] | null | null | null | """
Prosumer class, extendes the battery controler
"""
import numpy as np
from source.batterycontroller import BatteryController
| 37.738854 | 167 | 0.575021 |
9afd605d71b6ed6dddc10236ff2ea972b58f32f8 | 1,630 | py | Python | tests/calculations/test_inner_goals_regression.py | frc1678/server-2021-public | d61e35f8385bf1debc9daaaed40208f6c783ed77 | [
"MIT"
] | null | null | null | tests/calculations/test_inner_goals_regression.py | frc1678/server-2021-public | d61e35f8385bf1debc9daaaed40208f6c783ed77 | [
"MIT"
] | null | null | null | tests/calculations/test_inner_goals_regression.py | frc1678/server-2021-public | d61e35f8385bf1debc9daaaed40208f6c783ed77 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2019 FRC Team 1678: Citrus Circuits
import pytest
import numpy as np
import os, sys
current_directory = os.path.dirname(os.path.realpath(__file__))
parent_directory = os.path.dirname(current_directory)
grandparent_directory = os.path.dirname(parent_directory)
sys.path.append(grandparent_directory)
from calculations import inner_goals_regression
| 33.265306 | 80 | 0.648466 |
9afec172d7c5d85ad984f002f65f8f198cc1e65d | 13,758 | py | Python | trove/tests/unittests/taskmanager/test_galera_clusters.py | a4913994/openstack_trove | 3b550048dd1e5841ad0f3295679e0f0b913a5687 | [
"Apache-2.0"
] | 244 | 2015-01-01T12:04:44.000Z | 2022-03-25T23:38:39.000Z | trove/tests/unittests/taskmanager/test_galera_clusters.py | a4913994/openstack_trove | 3b550048dd1e5841ad0f3295679e0f0b913a5687 | [
"Apache-2.0"
] | 6 | 2015-08-18T08:19:10.000Z | 2022-03-05T02:32:36.000Z | trove/tests/unittests/taskmanager/test_galera_clusters.py | a4913994/openstack_trove | 3b550048dd1e5841ad0f3295679e0f0b913a5687 | [
"Apache-2.0"
] | 178 | 2015-01-02T15:16:58.000Z | 2022-03-23T03:30:20.000Z | # Copyright [2015] Hewlett-Packard Development Company, L.P.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from unittest.mock import Mock
from unittest.mock import patch
from trove.cluster.models import ClusterTasks as ClusterTaskStatus
from trove.cluster.models import DBCluster
from trove.common.exception import GuestError
from trove.common.strategies.cluster.experimental.galera_common.taskmanager \
import GaleraCommonClusterTasks
from trove.common.strategies.cluster.experimental.galera_common.taskmanager \
import GaleraCommonTaskManagerStrategy
from trove.datastore import models as datastore_models
from trove.instance.models import BaseInstance
from trove.instance.models import DBInstance
from trove.instance.models import Instance
from trove.instance.models import InstanceServiceStatus
from trove.instance.models import InstanceTasks
from trove.instance.service_status import ServiceStatuses
from trove.tests.unittests import trove_testtools
from trove.tests.unittests.util import util
| 50.029091 | 79 | 0.611353 |
9afeccca8e9baead9183ce3029a46c08b65bc934 | 3,814 | py | Python | AStyleTest/file-py/locale_enum_i18n.py | a-w/astyle | 8225c7fc9b65162bdd958cabb87eedd9749f1ecd | [
"MIT"
] | null | null | null | AStyleTest/file-py/locale_enum_i18n.py | a-w/astyle | 8225c7fc9b65162bdd958cabb87eedd9749f1ecd | [
"MIT"
] | null | null | null | AStyleTest/file-py/locale_enum_i18n.py | a-w/astyle | 8225c7fc9b65162bdd958cabb87eedd9749f1ecd | [
"MIT"
] | null | null | null | #! /usr/bin/python
""" Enumerate selected locales and sort by codepage to determine
which languages the locales support.
"""
# to disable the print statement and use the print() function (version 3 format)
from __future__ import print_function
import libastyle # local directory
import locale
import os
import platform
import sys
# -----------------------------------------------------------------------------
def main():
"""Main processing function.
"""
if os.name != "nt":
libastyle.system_exit("This script is for Windows only!")
if platform.python_implementation() == "IronPython":
libastyle.system_exit("IronPython is not currently supported")
libastyle.set_text_color("yellow")
print(libastyle.get_python_version())
languages = (
# "chinese", # returns chinese-simplified
"chinese-simplified",
"chinese-traditional",
"czech",
"danish",
"dutch",
"belgian",
"english",
"finnish",
"french",
"german",
"greek",
"hungarian",
"icelandic",
"italian",
"japanese",
"korean",
"norwegian",
"polish",
"portuguese",
"russian",
"slovak",
"spanish",
"swedish",
"turkish",
)
# build list of locale names
locale_names = []
for language in languages:
# print language
try:
locale.setlocale(locale.LC_ALL, language)
except locale.Error:
print("unsupported locale: " + language)
# print(locale.getlocale(locale.LC_CTYPE))
locale_name = locale.setlocale(locale.LC_ALL, None)
locale_names.append(locale_name)
# sort the list of locale names
# the call changed with version 3
if sys.version_info[0] < 3:
locale_names.sort(sort_compare)
else:
locale_names.sort(key=get_codepage)
# print the list of locale names
prevoius_codepage = 0
total1252 = 0
for locale_name in locale_names:
codepage = get_codepage(locale_name)
if codepage == "1252":
total1252 += 1
if codepage != prevoius_codepage:
if prevoius_codepage == "1252":
print("1252 TOTAL " + str(total1252))
print()
prevoius_codepage = codepage
print(codepage + ' ' + locale_name)
# -----------------------------------------------------------------------------
def sort_compare(locale_name1, locale_name2):
"""Sort comparison function.
Not used by version 3.
"""
# get codepage from the locale
codepage1 = get_codepage(locale_name1)
codepage2 = get_codepage(locale_name2)
# then sort by codepage
if codepage1 < codepage2:
return -1
if codepage1 > codepage2:
return 1
# codepage is equal, sort by name
if locale_name1 < locale_name2:
return -1
return 1
# -----------------------------------------------------------------------------
def get_codepage(locale_name):
"""Extract codepage from the locale name.
"""
# extract codepage
codepage_sep = locale_name.rfind('.')
if codepage_sep == -1:
codepage = "0"
else:
codepage = locale_name[codepage_sep + 1:]
# if less than 4 bytes prefix with a zero
if len(codepage) == 3:
codepage = '0' + codepage
return codepage
# -----------------------------------------------------------------------------
# make the module executable
if __name__ == "__main__":
main()
libastyle.system_exit()
# -----------------------------------------------------------------------------
| 28.893939 | 80 | 0.522811 |
9aff6921a655770822f92c25247b7dfa80a21333 | 2,521 | py | Python | src/Coord_cmd.py | aembillo/MNWellRecordGui | 1683bdde75ff37a17726ce1cd7ba0135988f2992 | [
"BSD-3-Clause"
] | null | null | null | src/Coord_cmd.py | aembillo/MNWellRecordGui | 1683bdde75ff37a17726ce1cd7ba0135988f2992 | [
"BSD-3-Clause"
] | null | null | null | src/Coord_cmd.py | aembillo/MNWellRecordGui | 1683bdde75ff37a17726ce1cd7ba0135988f2992 | [
"BSD-3-Clause"
] | null | null | null | """ 2015-07-23
Perform coordinate conversions from the command line.
Uses
"""
import argparse
import pyperclip
# p1 = argparse.ArgumentParser()
# p1.add_argument('x')
# print p1.parse_args(['123'])
#
# p2 = argparse.ArgumentParser()
# p2.add_argument('-d', action='store_const',const='dak')
# print p2.parse_args(['-d'])
#
# p3 = argparse.ArgumentParser()
# p3.add_argument('-d', action='store_const',const='dak')
# p3.add_argument('x')
# p3.add_argument('y')
# print p3.parse_args(['-d','1','2'])
#p1.add_argument(
from Coordinate_Transform import DCcoordinate_projector
# #
# # parser = argparse.ArgumentParser()
# # parser.add_argument("coord_1")
# # parser.add_argument("coord_2")
# # args = parser.parse_args()
# # x,y = args.coord_1, args.coord_2
#
if __name__ == '__main__':
#test_parse_args()
coord_convert()
'''
ERROR coordinates not recognized or not within Dakota County
"570931,1441"
496475.91,4937695.85
Dakota Co: 570931, 144108
Dakota Co: 570931.0, 144108.0
UTM : 496475.91, 4937695.85
D.d : -93.044399765, 44.592598646
D M.m : -93 2.663986, 44 35.555919
D M S.s : -93 2 39.839", 44 35 33.355"''' | 28.647727 | 127 | 0.623165 |
9aff8c7e14210fed3124a5e6c2fdfe6fc51837d4 | 58 | py | Python | contest/abc106/A.py | mola1129/atcoder | 1d3b18cb92d0ba18c41172f49bfcd0dd8d29f9db | [
"MIT"
] | null | null | null | contest/abc106/A.py | mola1129/atcoder | 1d3b18cb92d0ba18c41172f49bfcd0dd8d29f9db | [
"MIT"
] | null | null | null | contest/abc106/A.py | mola1129/atcoder | 1d3b18cb92d0ba18c41172f49bfcd0dd8d29f9db | [
"MIT"
] | null | null | null | A, B = map(int, input().split())
print((A - 1) * (B - 1))
| 19.333333 | 32 | 0.465517 |
b1011b3a30f3ce240dd73397c6dc7062b1511e60 | 774 | py | Python | pythonmisc/string_manipulation.py | davikawasaki/python-misc-module-library | c66b3e8be09db741c3b62d3a4e4a92ce70e1edb5 | [
"MIT"
] | null | null | null | pythonmisc/string_manipulation.py | davikawasaki/python-misc-module-library | c66b3e8be09db741c3b62d3a4e4a92ce70e1edb5 | [
"MIT"
] | null | null | null | pythonmisc/string_manipulation.py | davikawasaki/python-misc-module-library | c66b3e8be09db741c3b62d3a4e4a92ce70e1edb5 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# Version: 0.1.1
import re
| 24.967742 | 106 | 0.524548 |
b101387aab58adbece7fb5e7de6f69fdf986d8dd | 6,979 | py | Python | ALLCools/clustering/incremental_pca.py | mukamel-lab/ALLCools | 756ef790665c6ce40633873211929ea92bcccc21 | [
"MIT"
] | 5 | 2019-07-16T17:27:15.000Z | 2022-01-14T19:12:27.000Z | ALLCools/clustering/incremental_pca.py | mukamel-lab/ALLCools | 756ef790665c6ce40633873211929ea92bcccc21 | [
"MIT"
] | 12 | 2019-10-17T19:34:43.000Z | 2022-03-23T16:04:18.000Z | ALLCools/clustering/incremental_pca.py | mukamel-lab/ALLCools | 756ef790665c6ce40633873211929ea92bcccc21 | [
"MIT"
] | 4 | 2019-10-18T23:43:48.000Z | 2022-02-12T04:12:26.000Z | import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import IncrementalPCA as _IncrementalPCA
from ..count_matrix.zarr import dataset_to_array
| 36.160622 | 114 | 0.55796 |
b103007297614b73c2ae8e2e4d5c35bd947a709c | 1,051 | py | Python | wordcount/views.py | chinya07/Django-wordcount | 57808f922a140b341807a5b5352864cec5728695 | [
"MIT"
] | null | null | null | wordcount/views.py | chinya07/Django-wordcount | 57808f922a140b341807a5b5352864cec5728695 | [
"MIT"
] | null | null | null | wordcount/views.py | chinya07/Django-wordcount | 57808f922a140b341807a5b5352864cec5728695 | [
"MIT"
] | null | null | null | from django.http import HttpResponse
from django.shortcuts import render
import operator
| 30.028571 | 115 | 0.617507 |
b103796b9eb62b2e02e96ca3c1828f5ebc3886b8 | 3,137 | py | Python | example/06-modules/modules.py | iten-engineering/python | 97a79973c7727cd881974462db99a99d612b55f9 | [
"MIT"
] | null | null | null | example/06-modules/modules.py | iten-engineering/python | 97a79973c7727cd881974462db99a99d612b55f9 | [
"MIT"
] | null | null | null | example/06-modules/modules.py | iten-engineering/python | 97a79973c7727cd881974462db99a99d612b55f9 | [
"MIT"
] | null | null | null | # =============================================================================
# Python examples - modules
# =============================================================================
# -----------------------------------------------------------------------------
# Module
# -----------------------------------------------------------------------------
# Module
# - Mit Python knnen Definitionen (Funktionen, Klassen) in eine eigenen Datei (Modul) ausgelagert werden.
# - Die Definitionen eines Moduls knnen in andere Modlue oder das Hauptprogramm importiert und dort genutzt werden
# - Der Datei Name entspricht dabei dem Modulnamen mit dem Suffix ".py"
# - Innerhalb vom Modul ist der Modulname via die interen Varialble "__name__" verfgbar
import fibo
print ("Fibo sample:")
fibo.print_fib(100)
result = fibo.fib(100)
print(result)
print(("Show module details:"))
print(dir(fibo))
# -----------------------------------------------------------------------------
# Import
# -----------------------------------------------------------------------------
# Sample: `import module `
# - imports everything and keeps it in the module's namespace
# - module.func()
# - module.className.func()
# Sample: `from module import *`
# - imports everything under the current namespace
# - func()
# - className.func()
# > not recommended
# Sample: `from module import className`
# - selectively imports under the current namespace
# - className.func()
# - like standard modules: math, os, sys
# -----------------------------------------------------------------------------
# Import with custom name
# -----------------------------------------------------------------------------
# game.py
# import the draw module
# if visual_mode:
# # in visual mode, we draw using graphics
# import draw_visual as draw
# else:
# # in textual mode, we print out text
# import draw_textual as draw
#
# def main():
# result = play_game()
# # this can either be visual or textual depending on visual_mode
# draw.draw_game(result)
# -----------------------------------------------------------------------------
# Executing modules as scripts
# -----------------------------------------------------------------------------
# When you run a Python module with: python fibo.py <arguments>
# - the code in the module will be executed, just as if you imported it,
# - but with the __name__ set to "__main__".
# That means that by adding this code at the end of your module:
# if __name__ == "__main__":
# import sys
# fib(int(sys.argv[1]))
# you can make the file usable as a script as well as an importable module,
# because the code that parses the command line only runs if the module is executed as the main file!
# -----------------------------------------------------------------------------
# Further details
# -----------------------------------------------------------------------------
# Links:
# - https://docs.python.org/3/tutorial/modules.html
# - https://realpython.com/python-modules-packages/
# =============================================================================
# The end.
| 34.855556 | 115 | 0.476889 |
b1040fd46ded01c83aec3ec914b8371b0061edd6 | 5,010 | py | Python | .github/docker/checker_image/scripts/check_copyright_headers.py | TomasRejhons/siren | 9ef3ace7174cbdb48b9e45a2db104f3f5c4b9825 | [
"MIT"
] | null | null | null | .github/docker/checker_image/scripts/check_copyright_headers.py | TomasRejhons/siren | 9ef3ace7174cbdb48b9e45a2db104f3f5c4b9825 | [
"MIT"
] | null | null | null | .github/docker/checker_image/scripts/check_copyright_headers.py | TomasRejhons/siren | 9ef3ace7174cbdb48b9e45a2db104f3f5c4b9825 | [
"MIT"
] | 1 | 2021-05-26T12:06:12.000Z | 2021-05-26T12:06:12.000Z | #!/usr/bin/env python
#
# MIT License
#
# Copyright (c) 2021 silicon-village
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import argparse
import os
import re
import sys
import datetime
import io
from distutils.spawn import find_executable
from subprocess import check_output
copyright_pattern = re.compile(
r"\bCopyright \(c\)[^a-zA-Z]*\b\d{4}\b",
re.IGNORECASE)
year_pattern = re.compile(r"\b\d{4}\b")
file_exceptions = [
".copyrightignore"
]
if __name__ == "__main__":
argument_parser = argparse.ArgumentParser(
description="Check that modified files include copyright headers with current year.")
argument_parser.add_argument(
"branch", type=str, help="Branch from which to compute the diff")
args = argument_parser.parse_args()
files = None
if not find_executable("git"):
print(terminal_colors.ERROR + "Missing git" + terminal_colors.END)
sys.exit(1)
try:
ignored = open(".copyrightignore").readlines()
for file in ignored:
file_exceptions.append(file.strip())
except FileNotFoundError:
pass
out = check_output(["git", "diff", args.branch,
"--name-only"])
files = out.decode('utf-8').split("\n")
if files:
file_to_check = list(filter(lambda x: os.path.isfile(x) and os.path.basename(
x) not in file_exceptions and get_ext(x) not in file_exceptions and len(x) > 0, files))
check_files(file_to_check)
| 28.465909 | 99 | 0.642515 |
b104d1fb0a99c316174f26991ded219303201426 | 1,584 | py | Python | setup.py | finsberg/scholar_bot | b8a9fc22cfa1888d58a1881235e57a98769153fb | [
"MIT"
] | null | null | null | setup.py | finsberg/scholar_bot | b8a9fc22cfa1888d58a1881235e57a98769153fb | [
"MIT"
] | null | null | null | setup.py | finsberg/scholar_bot | b8a9fc22cfa1888d58a1881235e57a98769153fb | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import sys
import platform
import glob
from setuptools import setup, find_packages, Command
if sys.version_info < (3, 6):
print("Python 3.6 or higher required, please upgrade.")
sys.exit(1)
version = "0.1"
name = "scholar_bot"
description = ("Post updates on Slack about citations "
"for the Computational Phyisoligy department at Simula")
scripts = glob.glob("bin/*")
requirements = ['slackclient', 'scholarly', 'pyyaml']
if platform.system() == "Windows" or "bdist_wininst" in sys.argv:
# In the Windows command prompt we can't execute Python scripts
# without a .py extension. A solution is to create batch files
# that runs the different scripts.
batch_files = []
for script in scripts:
batch_file = script + ".bat"
f = open(batch_file, "w")
f.write(r'python "%%~dp0\%s" %%*\n' % os.path.split(script)[1])
f.close()
batch_files.append(batch_file)
scripts.extend(batch_files)
def run_install():
"Run installation"
# Call distutils to perform installation
setup(
name=name,
description=description,
version=version,
author='Henrik Finsberg',
license="MIT",
author_email="henrikn@simula.no",
platforms=["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
packages=["scholar_bot"],
package_dir={"scholar_bot": "scholar_bot"},
# install_requires=requirements,
scripts=scripts,
zip_safe=False,
)
if __name__ == "__main__":
run_install()
| 26.847458 | 71 | 0.636364 |
b105030052fdd1f7dc3bd7505e5951494ee00846 | 3,226 | py | Python | time_series_rnn_without_wrapper.py | KT12/hands_on_machine_learning | 6de2292b43d7c34b6509ad61dab2da4f7ec04894 | [
"MIT"
] | null | null | null | time_series_rnn_without_wrapper.py | KT12/hands_on_machine_learning | 6de2292b43d7c34b6509ad61dab2da4f7ec04894 | [
"MIT"
] | null | null | null | time_series_rnn_without_wrapper.py | KT12/hands_on_machine_learning | 6de2292b43d7c34b6509ad61dab2da4f7ec04894 | [
"MIT"
] | null | null | null | # Predict time series w/o using OutputProjectWrapper
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# Create time series
t_min, t_max = 0, 30
resolution = 0.1
t = np.linspace(t_min, t_max, (t_max - t_min) // resolution)
n_steps = 20
t_instance = np.linspace(12.2, 12.2 + resolution * (n_steps + 1), n_steps + 1)
n_inputs = 1
n_neurons = 100
n_outputs = 1
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.float32, [None, n_steps, n_outputs])
basic_cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons,
activation=tf.nn.relu)
rnn_outputs, states = tf.nn.dynamic_rnn(basic_cell, X,
dtype=tf.float32)
learning_rate = 0.001
stacked_rnn_outputs = tf.reshape(rnn_outputs, [-1, n_neurons])
stacked_outputs = tf.layers.dense(stacked_rnn_outputs, n_outputs)
outputs = tf.reshape(stacked_outputs, [-1, n_steps, n_outputs])
loss = tf.reduce_sum(tf.square(outputs - y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
n_iterations = 1000
batch_size = 50
with tf.Session() as sess:
init.run()
for k in range(n_iterations):
X_batch, y_batch = next_batch(batch_size, n_steps)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
if k % 100 == 0:
mse = loss.eval(feed_dict={X: X_batch, y: y_batch})
print(k, "\tMSE: ", mse)
X_new = time_series(np.array(t_instance[:-1].reshape(-1, n_steps, n_inputs)))
y_pred = sess.run(outputs, feed_dict={X: X_new})
print(y_pred)
# Generat a creative new seq
n_iterations = 2000
batch_size = 50
with tf.Session() as sess:
init.run()
for k in range(n_iterations):
X_batch, y_batch = next_batch(batch_size, n_steps)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
if k % 100 == 0:
mse = loss.eval(feed_dict={X: X_batch, y: y_batch})
print(k, "\tMSE: ", mse)
sequence1 = [0. for j in range(n_steps)]
for k in range(len(t) - n_steps):
X_batch = np.array(sequence1[-n_steps:]).reshape(1, n_steps, 1)
y_pred = sess.run(outputs, feed_dict={X: X_batch})
sequence1.append(y_pred[0, -1, 0])
sequence2 = [time_series(i * resolution + t_min + (t_max-t_min/3)) for i in range(n_steps)]
for j in range(len(t) - n_steps):
X_batch = np.array(sequence2[-n_steps:]).reshape(1, n_steps, 1)
y_pred = sess.run(outputs, feed_dict={X: X_batch})
sequence2.append(y_pred[0, -1, 0])
plt.figure(figsize=(11,4))
plt.subplot(121)
plt.plot(t, sequence1, 'b-')
plt.plot(t[:n_steps],sequence1[:n_steps], 'b-', linewidth=3)
plt.xlabel('Time')
plt.ylabel('Value')
plt.subplot(122)
plt.plot(t, sequence2, 'b-')
plt.plot(t[:n_steps], sequence2[:n_steps], 'b-', linewidth=3)
plt.xlabel('Time')
plt.show() | 32.26 | 95 | 0.66522 |
b10569084242d8420b097b98d57fbf57c409ad50 | 5,536 | py | Python | pydocteur/actions.py | AFPy/PyDocTeur | 70e6e025468ad232797c4da0b9a834613d2a2ec4 | [
"MIT"
] | 4 | 2020-11-30T10:14:32.000Z | 2021-02-18T00:44:30.000Z | pydocteur/actions.py | AFPy/PyDocTeur | 70e6e025468ad232797c4da0b9a834613d2a2ec4 | [
"MIT"
] | 46 | 2020-11-27T09:21:02.000Z | 2021-06-08T07:43:33.000Z | pydocteur/actions.py | AFPy/PyDocTeur | 70e6e025468ad232797c4da0b9a834613d2a2ec4 | [
"MIT"
] | 4 | 2020-11-27T06:52:11.000Z | 2022-02-22T20:06:35.000Z | import json
import logging
import os
import random
import time
from functools import lru_cache
from github import Github
from github import PullRequest
from pydocteur.github_api import get_commit_message_for_merge
from pydocteur.github_api import get_trad_team_members
from pydocteur.pr_status import is_already_greeted
from pydocteur.pr_status import is_first_time_contributor
from pydocteur.settings import GH_TOKEN
from pydocteur.settings import REPOSITORY_NAME
from pydocteur.settings import VERSION
logger = logging.getLogger("pydocteur")
COMMENT_BODIES_FILEPATH = os.path.join(os.path.dirname(__file__), "../comment_bodies.json")
END_OF_BODY = """
---
<details>
<summary>Disclaimer</summary>
Je suis un robot fait par l'quipe de [l'AFPy et de Traduction](https://github.com/AFPy/PyDocTeur/graphs/contributors)
sur leur temps libre. Je risque de dire des btises. Ne me blmez pas, blamez les dveloppeurs.
[Code source](https://github.com/afpy/pydocteur)
I'm a bot made by the [Translation and AFPy teams](https://github.com/AFPy/PyDocTeur/graphs/contributors) on their free
time. I might say or do dumb things sometimes. Don't blame me, blame the developer !
[Source code](https://github.com/afpy/pydocteur)
(state: {state})
`PyDocTeur {version}`
</details>
"""
# TODO: Check if changing state for incorrect title may not create a bug where PyDocteur might repeat itself
| 38.713287 | 119 | 0.740426 |