hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9c6f5eebc67f2c098afe70ef549d9f14b27bc659
| 1,572
|
py
|
Python
|
app/strategies/ema_bb_alligator_strategy.py
|
namuan/crypto-rider
|
f5b47ada60a7cef07e66609e2e92993619c6bfbe
|
[
"MIT"
] | 1
|
2022-01-18T19:06:20.000Z
|
2022-01-18T19:06:20.000Z
|
app/strategies/ema_bb_alligator_strategy.py
|
namuan/crypto-rider
|
f5b47ada60a7cef07e66609e2e92993619c6bfbe
|
[
"MIT"
] | null | null | null |
app/strategies/ema_bb_alligator_strategy.py
|
namuan/crypto-rider
|
f5b47ada60a7cef07e66609e2e92993619c6bfbe
|
[
"MIT"
] | null | null | null |
import pandas as pd
import ta
from app.common import reshape_data
from app.strategies.base_strategy import BaseStrategy
pd.set_option("display.max_columns", None)
pd.set_option("display.width", None)
| 29.111111
| 83
| 0.564885
|
9c6f86bf35dea92442e86d8e34f3dfcb1923875e
| 1,336
|
py
|
Python
|
BasicScripts/basics.py
|
TomasBelskis/PythonAutomation
|
dd2e30abb214e37d84a8952deb834074abdc84a2
|
[
"MIT"
] | null | null | null |
BasicScripts/basics.py
|
TomasBelskis/PythonAutomation
|
dd2e30abb214e37d84a8952deb834074abdc84a2
|
[
"MIT"
] | null | null | null |
BasicScripts/basics.py
|
TomasBelskis/PythonAutomation
|
dd2e30abb214e37d84a8952deb834074abdc84a2
|
[
"MIT"
] | null | null | null |
# Python Basics
# String concatenaton
added_strings = str(32) + "_342"
# Getting input
input_from_user = input()
# Basic print function
print(input_from_user)
# Mixing boolean and comparison operations
if (4 < 5) and (5 < 6):
print("True")
# Basic if & if else flow
if name == 'Alice':
print('Hi, Alice.')
elif age < 12:
print("You are not Alice, kiddo.")
elif age > 2000:
print('Unlike you, Alice is not an undead, immortal vampire.')
elif age > 100:
print('You are not Alice, grannie.')
# Loops in Python 3
spam = 0
while spam < 5:
print('Spam, spam!')
spam = spam + 1
# Access loop
while True:
print('Who are you?')
name = input()
if name != 'Joe':
continue
print('Hello, Joe. What is the password? (It is a fish.)')
password = input()
if password = 'swordfish':
break
print('Access granted.')
# For loops using range function
print("My name is")
for i in range(5):
print('Jimmy Five Times (' + str(i) + ')')
# Using starting range
for i in range(12, 16):
print(i)
# Importing modules
import random
for i in range(5):
print(random.randint(1, 10))
# Exiting a python program
import sys
while True:
print('Type exit to exit.')
response = input()
if response == 'exit':
sys.exit()
print('You typed ' + response + '.')
| 19.940299
| 66
| 0.624251
|
9c6fcb64c497c5bc80d5ed65052770cfc9db0316
| 156
|
py
|
Python
|
env.example.py
|
wilcoln/klazor
|
8f3c40a03a7e61c07eceb6cdbe4d1bb05693727e
|
[
"MIT"
] | 8
|
2020-01-18T09:33:51.000Z
|
2020-01-19T10:47:51.000Z
|
env.example.py
|
wilcoln/klazor
|
8f3c40a03a7e61c07eceb6cdbe4d1bb05693727e
|
[
"MIT"
] | 8
|
2019-08-09T03:54:44.000Z
|
2022-02-12T16:55:51.000Z
|
env.example.py
|
wilcoln/klazor
|
8f3c40a03a7e61c07eceb6cdbe4d1bb05693727e
|
[
"MIT"
] | null | null | null |
DATABASE_OPTIONS = {
'database': 'klazor',
'user': 'root',
'password': '',
'charset': 'utf8mb4',
}
HOSTS = ['127.0.0.1', '67.209.115.211']
| 17.333333
| 39
| 0.525641
|
9c70026f65fce93a4cf97730e00fb2afc1c7f5b5
| 818
|
py
|
Python
|
misc/_local_settings.py
|
lzantal/djskell
|
cef71bab8a4dd163b632128666c315e228cc8f0f
|
[
"MIT"
] | 4
|
2018-08-06T19:18:03.000Z
|
2019-09-26T14:52:01.000Z
|
misc/_local_settings.py
|
lzantal/djskell
|
cef71bab8a4dd163b632128666c315e228cc8f0f
|
[
"MIT"
] | 2
|
2018-08-06T19:17:57.000Z
|
2020-02-12T22:59:40.000Z
|
misc/_local_settings.py
|
lzantal/djskell
|
cef71bab8a4dd163b632128666c315e228cc8f0f
|
[
"MIT"
] | null | null | null |
"""
Django settings.
Generated by 'django-admin startproject' using Django 2.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
#DEBUG = False
DEBUG = True
SERVE_STATIC = DEBUG
ALLOWED_HOSTS = []
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
#'ENGINE': 'django.db.backends.oracle'
#'ENGINE': 'django.db.backends.mysql',
#'ENGINE': 'django.db.backends.sqlite3',
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'mydatabase',
'USER': 'mydatabaseuser',
'PASSWORD': 'mypassword',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
| 24.058824
| 64
| 0.640587
|
9c70e9c29dfd2bb44316e8313ac9366ffff8c24e
| 204
|
py
|
Python
|
contacts/forms.py
|
pedrohd21/Agenda-Django
|
c48a90d76094523fd2060ff735faefbf3c2f808d
|
[
"MIT"
] | 1
|
2021-04-21T00:07:03.000Z
|
2021-04-21T00:07:03.000Z
|
contacts/forms.py
|
pedrohd21/Agenda
|
c48a90d76094523fd2060ff735faefbf3c2f808d
|
[
"MIT"
] | null | null | null |
contacts/forms.py
|
pedrohd21/Agenda
|
c48a90d76094523fd2060ff735faefbf3c2f808d
|
[
"MIT"
] | null | null | null |
from django import forms
from .models import Contact
| 20.4
| 71
| 0.661765
|
9c723e762bff7b4ab80b6f5113e4e550464fb8ae
| 1,276
|
py
|
Python
|
awx/api/urls/ad_hoc_command.py
|
ziegenberg/awx
|
a3e29317c5d4220fffe28370ec73c73802255246
|
[
"Apache-2.0"
] | null | null | null |
awx/api/urls/ad_hoc_command.py
|
ziegenberg/awx
|
a3e29317c5d4220fffe28370ec73c73802255246
|
[
"Apache-2.0"
] | 2
|
2022-02-10T11:57:21.000Z
|
2022-02-27T22:43:44.000Z
|
awx/api/urls/ad_hoc_command.py
|
ziegenberg/awx
|
a3e29317c5d4220fffe28370ec73c73802255246
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2017 Ansible, Inc.
# All Rights Reserved.
from django.urls import re_path
from awx.api.views import (
AdHocCommandList,
AdHocCommandDetail,
AdHocCommandCancel,
AdHocCommandRelaunch,
AdHocCommandAdHocCommandEventsList,
AdHocCommandActivityStreamList,
AdHocCommandNotificationsList,
AdHocCommandStdout,
)
urls = [
re_path(r'^$', AdHocCommandList.as_view(), name='ad_hoc_command_list'),
re_path(r'^(?P<pk>[0-9]+)/$', AdHocCommandDetail.as_view(), name='ad_hoc_command_detail'),
re_path(r'^(?P<pk>[0-9]+)/cancel/$', AdHocCommandCancel.as_view(), name='ad_hoc_command_cancel'),
re_path(r'^(?P<pk>[0-9]+)/relaunch/$', AdHocCommandRelaunch.as_view(), name='ad_hoc_command_relaunch'),
re_path(r'^(?P<pk>[0-9]+)/events/$', AdHocCommandAdHocCommandEventsList.as_view(), name='ad_hoc_command_ad_hoc_command_events_list'),
re_path(r'^(?P<pk>[0-9]+)/activity_stream/$', AdHocCommandActivityStreamList.as_view(), name='ad_hoc_command_activity_stream_list'),
re_path(r'^(?P<pk>[0-9]+)/notifications/$', AdHocCommandNotificationsList.as_view(), name='ad_hoc_command_notifications_list'),
re_path(r'^(?P<pk>[0-9]+)/stdout/$', AdHocCommandStdout.as_view(), name='ad_hoc_command_stdout'),
]
__all__ = ['urls']
| 42.533333
| 137
| 0.724922
|
9c73c8f40881c066eecdb84a89d42263b576a7ce
| 110
|
py
|
Python
|
note5/package_test5.py
|
icexmoon/python-learning-notes
|
838c91d896404290b89992b6517be1b6a79df41f
|
[
"MIT"
] | null | null | null |
note5/package_test5.py
|
icexmoon/python-learning-notes
|
838c91d896404290b89992b6517be1b6a79df41f
|
[
"MIT"
] | null | null | null |
note5/package_test5.py
|
icexmoon/python-learning-notes
|
838c91d896404290b89992b6517be1b6a79df41f
|
[
"MIT"
] | null | null | null |
#test.py
from time_tools import *
# print(compareTimestamp(111,222))
time.showNowTime()
# now time is XX:XX:XX
| 22
| 34
| 0.754545
|
9c7497307c0cb4f07fda11674de8080bc75940ac
| 3,265
|
py
|
Python
|
fgarcade/sprites.py
|
fabiommendes/fgarcade
|
2bfdb3ca18cb8260048ccfc9e84524987c322221
|
[
"MIT"
] | 2
|
2019-04-20T00:07:16.000Z
|
2019-04-24T01:25:38.000Z
|
fgarcade/sprites.py
|
fabiommendes/fgarcade
|
2bfdb3ca18cb8260048ccfc9e84524987c322221
|
[
"MIT"
] | null | null | null |
fgarcade/sprites.py
|
fabiommendes/fgarcade
|
2bfdb3ca18cb8260048ccfc9e84524987c322221
|
[
"MIT"
] | 7
|
2019-06-18T17:59:41.000Z
|
2019-07-02T21:37:21.000Z
|
import arcade
from arcade import FACE_RIGHT, FACE_DOWN, FACE_UP, FACE_LEFT
| 40.8125
| 79
| 0.618989
|
9c76b7443d1cefb8613a32ec558f3e2d259300ab
| 2,089
|
py
|
Python
|
src/mafUtility.py
|
gh-schen/SiriusEpiClassifier
|
617e0243a95fe1014acfeca25ff6f6ba617d366f
|
[
"Apache-2.0"
] | 1
|
2021-12-08T19:21:07.000Z
|
2021-12-08T19:21:07.000Z
|
src/mafUtility.py
|
gh-schen/SiriusEpiClassifier
|
617e0243a95fe1014acfeca25ff6f6ba617d366f
|
[
"Apache-2.0"
] | null | null | null |
src/mafUtility.py
|
gh-schen/SiriusEpiClassifier
|
617e0243a95fe1014acfeca25ff6f6ba617d366f
|
[
"Apache-2.0"
] | null | null | null |
from numpy.core.fromnumeric import transpose
from sklearn import linear_model
from scipy.special import logit
from scipy import stats
from copy import deepcopy
from numpy import random, concatenate, quantile, matmul, transpose
import logging
| 31.179104
| 87
| 0.650551
|
9c77b39243b7ae9ea7813df0033b58ce3c06fb82
| 4,553
|
py
|
Python
|
examples/linreg.py
|
hanyas/sds
|
3c195fb9cbd88a9284287d62c0eacb6afc4598a7
|
[
"MIT"
] | 12
|
2019-09-21T13:52:09.000Z
|
2022-02-14T06:48:46.000Z
|
examples/linreg.py
|
hanyas/sds
|
3c195fb9cbd88a9284287d62c0eacb6afc4598a7
|
[
"MIT"
] | 1
|
2020-01-22T12:34:52.000Z
|
2020-01-26T21:14:11.000Z
|
examples/linreg.py
|
hanyas/sds
|
3c195fb9cbd88a9284287d62c0eacb6afc4598a7
|
[
"MIT"
] | 5
|
2019-09-18T15:11:26.000Z
|
2021-12-10T14:04:53.000Z
|
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weights with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target<
y = np.dot(X, w) + noise
clf = ARDRegression(fit_intercept=False, n_iter=1000)
clf.fit(X, y)
ols = LinearRegression(fit_intercept=False)
ols.fit(X, y)
from copy import deepcopy
from sds.distributions.lingauss import SingleOutputLinearGaussianWithKnownPrecision
from sds.distributions.lingauss import SingleOutputLinearGaussianWithKnownMean
from sds.distributions.gaussian import GaussianWithPrecision
from sds.distributions.gaussian import GaussianWithKnownMeanAndDiagonalPrecision
from sds.distributions.gamma import Gamma
likelihood_precision_prior = Gamma(dim=1, alphas=np.ones((1, )),
betas=1e-6 * np.ones((1, )))
parameter_precision_prior = Gamma(dim=n_features, alphas=np.ones((n_features, )),
betas=1e-6 * np.ones((n_features, )))
likelihood_precision_posterior = deepcopy(likelihood_precision_prior)
parameter_precision_posterior = deepcopy(parameter_precision_prior)
parameter_posterior = None
for i in range(100):
# parameter posterior
alphas = parameter_precision_posterior.mean()
parameter_prior = GaussianWithPrecision(dim=n_features,
mu=np.zeros((n_features, )),
lmbda=np.diag(alphas))
parameter_posterior = deepcopy(parameter_prior)
beta = likelihood_precision_posterior.mean()
likelihood_known_precision = SingleOutputLinearGaussianWithKnownPrecision(column_dim=n_features,
lmbda=beta,
affine=False)
stats = likelihood_known_precision.statistics(X, y)
parameter_posterior.nat_param = parameter_prior.nat_param + stats
# likelihood precision posterior
param = parameter_posterior.mean()
likelihood_known_mean = SingleOutputLinearGaussianWithKnownMean(column_dim=n_features,
W=param, affine=False)
stats = likelihood_known_mean.statistics(X, y)
likelihood_precision_posterior.nat_param = likelihood_precision_prior.nat_param + stats
# parameter precision posterior
parameter_likelihood = GaussianWithKnownMeanAndDiagonalPrecision(dim=n_features)
param = parameter_posterior.mean()
stats = parameter_likelihood.statistics(param)
parameter_precision_posterior.nat_param = parameter_precision_prior.nat_param + stats
our_ard = parameter_posterior.mode()
from sds.distributions.composite import MatrixNormalGamma
from sds.distributions.lingauss import LinearGaussianWithDiagonalPrecision
M = np.zeros((1, n_features))
K = 1e-16 * np.eye(n_features)
alphas = 1e-16 * np.ones((1, ))
betas = 1e-16 * np.ones((1, ))
prior = MatrixNormalGamma(column_dim=n_features, row_dim=1,
M=M, K=K, alphas=alphas, betas=betas)
posterior = deepcopy(prior)
likelihood = LinearGaussianWithDiagonalPrecision(column_dim=n_features,
row_dim=1,
affine=False)
stats = likelihood.statistics(X, np.atleast_2d(y).T)
posterior.nat_param = prior.nat_param + stats
our_ols = posterior.mode()[0]
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(w, color='orange', linestyle='-', linewidth=2, label="Ground truth")
plt.plot(clf.coef_, color='darkblue', linestyle='-', linewidth=2, label="Sklearn ARD")
plt.plot(our_ard, color='red', linestyle='-', linewidth=2, label="Our ARD")
# plt.plot(ols.coef_, color='yellowgreen', linestyle=':', linewidth=2, label="Sklearn OLS")
# plt.plot(our_ols.flatten(), color='cyan', linestyle='-', linewidth=2, label="Our OLS")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.show()
| 39.591304
| 100
| 0.691852
|
9c77f77e66dc427bbe7624fc776b41c3d875169f
| 7,516
|
py
|
Python
|
optimal/tompkins/examples/dask_scheduling_problem_nonetcontention.py
|
KarizCache/serverless
|
c5735afee29e104f3909f3b0140e993d461a5420
|
[
"MIT"
] | null | null | null |
optimal/tompkins/examples/dask_scheduling_problem_nonetcontention.py
|
KarizCache/serverless
|
c5735afee29e104f3909f3b0140e993d461a5420
|
[
"MIT"
] | null | null | null |
optimal/tompkins/examples/dask_scheduling_problem_nonetcontention.py
|
KarizCache/serverless
|
c5735afee29e104f3909f3b0140e993d461a5420
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import os
import json
import re
import ast
import json
from graphviz import Digraph
import pandas as pd
# color the graph
import graph_tool.all as gt
import copy
import matplotlib.colors as mcolors
import sys
import utils
from tompkins.ilp import schedule, jobs_when_where
from collections import defaultdict
from pulp import value
import re
import ast
import json
from graphviz import Digraph
import pandas as pd
# color the graph
import graph_tool.all as gt
import copy
import matplotlib.colors as mcolors
import sys
import seaborn as sns
import pulp as pl
import time
results_dir = './benchmarks'
stats_dir='./benchmarks'
benchmarks = get_benchmarks()
#benchmarks = ['dom4x61GB1B', 'dom2x41GB1B', 'tree4x61GB1B']
for bnch in benchmarks:
for bw in [1*1024, 16*1024, 512, 32*1024, 8*1024, 4*1024, 2*1024, 256, 128, 64, 32]:
print(f'process {bnch}')
g = build_graph(bnch)
sched2, stats = find_optimal(g, bw)
with open(f'{results_dir}/optimal_compuation_stats.csv', 'a') as fd:
fd.write(f'{bnch},{stats["makespan"]},{stats["constraints"]},{stats["variables"]},{stats["time"]},no,{bw}\n')
with open(f'{results_dir}/{bnch}.nonetworkcontention.{bw}mbps.optimal', 'w') as fd:
for s in sched2:
fd.write(f'v,{s[0]},{s[1]},{s[2]}\n')
#fd.write(f'{s[4]},{s[3]},{s[0]},{s[1]},{s[2]}\n')
#v = int(s[0].replace('t', ''))
#g.vp.worker[v] = s[2]
break
#break
| 32.678261
| 125
| 0.548696
|
9c78abbbec293aaefd5d026abd3db6d7960279d4
| 8,563
|
py
|
Python
|
tests/apitests/python/test_robot_account.py
|
gerhardgossen/harbor
|
1d03b8727acb9a3935bf45cd76b61f87c68e2a08
|
[
"Apache-2.0"
] | 1
|
2020-07-31T15:00:54.000Z
|
2020-07-31T15:00:54.000Z
|
tests/apitests/python/test_robot_account.py
|
gerhardgossen/harbor
|
1d03b8727acb9a3935bf45cd76b61f87c68e2a08
|
[
"Apache-2.0"
] | 10
|
2021-05-31T00:06:59.000Z
|
2022-02-11T12:34:16.000Z
|
tests/apitests/python/test_robot_account.py
|
gerhardgossen/harbor
|
1d03b8727acb9a3935bf45cd76b61f87c68e2a08
|
[
"Apache-2.0"
] | 1
|
2020-07-12T16:51:07.000Z
|
2020-07-12T16:51:07.000Z
|
from __future__ import absolute_import
import unittest
from testutils import ADMIN_CLIENT
from testutils import TEARDOWN
from library.user import User
from library.project import Project
from library.repository import Repository
from library.repository import pull_harbor_image
from library.repository import push_image_to_project
from testutils import harbor_server
from library.base import _assert_status_code
if __name__ == '__main__':
unittest.main()
| 64.871212
| 215
| 0.73911
|
9c79f89ccfffa309abd3d78c50d5bebd47df7780
| 3,675
|
py
|
Python
|
slackchannel2pdf/locales.py
|
ErikKalkoken/slackchannel2pdf
|
2848dfaaffbf9a5255c6dbe87dcc1e90d062b820
|
[
"MIT"
] | 52
|
2019-08-05T21:58:53.000Z
|
2022-03-21T22:36:22.000Z
|
slackchannel2pdf/locales.py
|
ErikKalkoken/slackchannel2pdf
|
2848dfaaffbf9a5255c6dbe87dcc1e90d062b820
|
[
"MIT"
] | 10
|
2020-04-11T21:30:53.000Z
|
2022-03-12T07:14:06.000Z
|
slackchannel2pdf/locales.py
|
ErikKalkoken/slackchannel2pdf
|
2848dfaaffbf9a5255c6dbe87dcc1e90d062b820
|
[
"MIT"
] | 10
|
2020-01-30T07:52:09.000Z
|
2022-02-03T03:44:41.000Z
|
import datetime as dt
import logging
from babel import Locale, UnknownLocaleError
from babel.dates import format_datetime, format_time, format_date
import pytz
from tzlocal import get_localzone
from . import settings
logger = logging.getLogger(__name__)
def format_datetime_str(self, my_datetime: dt.datetime) -> str:
"""returns formated datetime string for given dt using locale"""
return format_datetime(my_datetime, format="short", locale=self.locale)
def get_datetime_formatted_str(self, ts: int) -> str:
"""return given timestamp as formated datetime string using locale"""
my_datetime = self.get_datetime_from_ts(ts)
return format_datetime(my_datetime, format="short", locale=self.locale)
def get_time_formatted_str(self, ts: int) -> str:
"""return given timestamp as formated datetime string using locale"""
my_datetime = self.get_datetime_from_ts(ts)
return format_time(my_datetime, format="short", locale=self.locale)
def get_datetime_from_ts(self, ts: int) -> dt.datetime:
"""returns datetime object of a unix timestamp with local timezone"""
my_datetime = dt.datetime.fromtimestamp(float(ts), pytz.UTC)
return my_datetime.astimezone(self.timezone)
| 36.386139
| 88
| 0.633741
|
9c7a5adb483cec8b62999237b4fa84739552c994
| 2,414
|
py
|
Python
|
databoard/databoard/default_config.py
|
glemaitre/ramp-board-1
|
a5e9b423a55d196d38232fd94b2f7d53fb35d9d8
|
[
"BSD-3-Clause"
] | null | null | null |
databoard/databoard/default_config.py
|
glemaitre/ramp-board-1
|
a5e9b423a55d196d38232fd94b2f7d53fb35d9d8
|
[
"BSD-3-Clause"
] | null | null | null |
databoard/databoard/default_config.py
|
glemaitre/ramp-board-1
|
a5e9b423a55d196d38232fd94b2f7d53fb35d9d8
|
[
"BSD-3-Clause"
] | null | null | null |
import os
######################################################################
| 30.948718
| 70
| 0.681856
|
9c7c266f5c66aa6fb93fbd1ac553f14737d31adf
| 1,193
|
py
|
Python
|
python_developer_tools/cv/bases/pool/AvgPool2d.py
|
carlsummer/python_developer_tools
|
a8c4365b7cc601cda55648cdfd8c0cb1faae132f
|
[
"Apache-2.0"
] | 32
|
2021-06-21T04:49:48.000Z
|
2022-03-29T05:46:59.000Z
|
python_developer_tools/cv/bases/pool/AvgPool2d.py
|
carlsummer/python_developer_tools
|
a8c4365b7cc601cda55648cdfd8c0cb1faae132f
|
[
"Apache-2.0"
] | 1
|
2021-11-12T03:45:55.000Z
|
2021-11-12T03:45:55.000Z
|
python_developer_tools/cv/bases/pool/AvgPool2d.py
|
carlsummer/python_developer_tools
|
a8c4365b7cc601cda55648cdfd8c0cb1faae132f
|
[
"Apache-2.0"
] | 10
|
2021-06-03T08:05:05.000Z
|
2021-12-13T03:10:42.000Z
|
# !/usr/bin/env python
# -- coding: utf-8 --
# @Author zengxiaohui
# Datatime:8/31/2021 1:37 PM
# @File:GlobalAvgPool2d
import torch.nn as nn
from python_developer_tools.cv.bases.activates.swish import h_swish
| 31.394737
| 93
| 0.619447
|
9c7c4de6be5e48f9c89afdf0a57351e2ebf01e66
| 28,531
|
py
|
Python
|
expyfun/_utils.py
|
nordme/expyfun
|
e644bba8cbfb6edd2a076099536417d4854d64af
|
[
"BSD-3-Clause"
] | 2
|
2015-12-31T07:56:16.000Z
|
2016-08-22T17:23:02.000Z
|
expyfun/_utils.py
|
nordme/expyfun
|
e644bba8cbfb6edd2a076099536417d4854d64af
|
[
"BSD-3-Clause"
] | 6
|
2015-02-18T04:25:46.000Z
|
2017-01-25T01:00:35.000Z
|
expyfun/_utils.py
|
nordme/expyfun
|
e644bba8cbfb6edd2a076099536417d4854d64af
|
[
"BSD-3-Clause"
] | 1
|
2015-12-31T07:56:20.000Z
|
2015-12-31T07:56:20.000Z
|
"""Some utility functions"""
# Authors: Eric Larson <larsoner@uw.edu>
#
# License: BSD (3-clause)
import warnings
import operator
from copy import deepcopy
import subprocess
import importlib
import os
import os.path as op
import inspect
import sys
import tempfile
import ssl
from shutil import rmtree
import atexit
import json
from functools import partial
from distutils.version import LooseVersion
from numpy import sqrt, convolve, ones
import logging
import datetime
from timeit import default_timer as clock
from threading import Timer
import numpy as np
import scipy as sp
from ._externals import decorator
# set this first thing to make sure it "takes"
try:
import pyglet
pyglet.options['debug_gl'] = False
del pyglet
except Exception:
pass
# for py3k (eventually)
if sys.version.startswith('2'):
string_types = basestring # noqa
input = raw_input # noqa, input is raw_input in py3k
text_type = unicode # noqa
from __builtin__ import reload
from urllib2 import urlopen # noqa
from cStringIO import StringIO # noqa
else:
string_types = str
text_type = str
from urllib.request import urlopen
input = input
from io import StringIO # noqa, analysis:ignore
from importlib import reload # noqa, analysis:ignore
###############################################################################
# LOGGING
EXP = 25
logging.addLevelName(EXP, 'EXP')
def exp(self, message, *args, **kwargs):
"""Experiment-level logging."""
self.log(EXP, message, *args, **kwargs)
logging.Logger.exp = exp
logger = logging.getLogger('expyfun')
def flush_logger():
"""Flush expyfun logger"""
for handler in logger.handlers:
handler.flush()
def set_log_level(verbose=None, return_old_level=False):
"""Convenience function for setting the logging level
Parameters
----------
verbose : bool, str, int, or None
The verbosity of messages to print. If a str, it can be either DEBUG,
INFO, WARNING, ERROR, or CRITICAL. Note that these are for
convenience and are equivalent to passing in logging.DEBUG, etc.
For bool, True is the same as 'INFO', False is the same as 'WARNING'.
If None, the environment variable EXPYFUN_LOGGING_LEVEL is read, and if
it doesn't exist, defaults to INFO.
return_old_level : bool
If True, return the old verbosity level.
"""
if verbose is None:
verbose = get_config('EXPYFUN_LOGGING_LEVEL', 'INFO')
elif isinstance(verbose, bool):
verbose = 'INFO' if verbose is True else 'WARNING'
if isinstance(verbose, string_types):
verbose = verbose.upper()
logging_types = dict(DEBUG=logging.DEBUG, INFO=logging.INFO,
WARNING=logging.WARNING, ERROR=logging.ERROR,
CRITICAL=logging.CRITICAL)
if verbose not in logging_types:
raise ValueError('verbose must be of a valid type')
verbose = logging_types[verbose]
old_verbose = logger.level
logger.setLevel(verbose)
return (old_verbose if return_old_level else None)
def set_log_file(fname=None,
output_format='%(asctime)s - %(levelname)-7s - %(message)s',
overwrite=None):
"""Convenience function for setting the log to print to a file
Parameters
----------
fname : str, or None
Filename of the log to print to. If None, stdout is used.
To suppress log outputs, use set_log_level('WARN').
output_format : str
Format of the output messages. See the following for examples:
http://docs.python.org/dev/howto/logging.html
e.g., "%(asctime)s - %(levelname)s - %(message)s".
overwrite : bool, or None
Overwrite the log file (if it exists). Otherwise, statements
will be appended to the log (default). None is the same as False,
but additionally raises a warning to notify the user that log
entries will be appended.
"""
handlers = logger.handlers
for h in handlers:
if isinstance(h, logging.FileHandler):
h.close()
logger.removeHandler(h)
if fname is not None:
if op.isfile(fname) and overwrite is None:
warnings.warn('Log entries will be appended to the file. Use '
'overwrite=False to avoid this message in the '
'future.')
mode = 'w' if overwrite is True else 'a'
lh = logging.FileHandler(fname, mode=mode)
else:
""" we should just be able to do:
lh = logging.StreamHandler(sys.stdout)
but because doctests uses some magic on stdout, we have to do this:
"""
lh = logging.StreamHandler(WrapStdOut())
lh.setFormatter(logging.Formatter(output_format))
# actually add the stream handler
logger.addHandler(lh)
###############################################################################
# RANDOM UTILITIES
building_doc = any('sphinx-build' in ((''.join(i[4]).lower() + i[1])
if i[4] is not None else '')
for i in inspect.stack())
def run_subprocess(command, **kwargs):
"""Run command using subprocess.Popen
Run command and wait for command to complete. If the return code was zero
then return, otherwise raise CalledProcessError.
By default, this will also add stdout= and stderr=subproces.PIPE
to the call to Popen to suppress printing to the terminal.
Parameters
----------
command : list of str
Command to run as subprocess (see subprocess.Popen documentation).
**kwargs : objects
Keywoard arguments to pass to ``subprocess.Popen``.
Returns
-------
stdout : str
Stdout returned by the process.
stderr : str
Stderr returned by the process.
"""
# code adapted with permission from mne-python
kw = dict(stderr=subprocess.PIPE, stdout=subprocess.PIPE)
kw.update(kwargs)
p = subprocess.Popen(command, **kw)
stdout_, stderr = p.communicate()
output = (stdout_.decode(), stderr.decode())
if p.returncode:
err_fun = subprocess.CalledProcessError.__init__
if 'output' in _get_args(err_fun):
raise subprocess.CalledProcessError(p.returncode, command, output)
else:
raise subprocess.CalledProcessError(p.returncode, command)
return output
def date_str():
"""Produce a date string for the current date and time
Returns
-------
datestr : str
The date string.
"""
return str(datetime.datetime.today()).replace(':', '_')
def check_units(units):
"""Ensure user passed valid units type
Parameters
----------
units : str
Must be ``'norm'``, ``'deg'``, or ``'pix'``.
"""
good_units = ['norm', 'pix', 'deg']
if units not in good_units:
raise ValueError('"units" must be one of {}, not {}'
''.format(good_units, units))
###############################################################################
# DECORATORS
# Following deprecated class copied from scikit-learn
if hasattr(inspect, 'signature'): # py35
else:
def requires_video():
"""Requires FFmpeg/AVbin decorator."""
import pytest
return pytest.mark.skipif(not _has_video(), reason='Requires FFmpeg/AVbin')
def requires_opengl21(func):
"""Requires OpenGL decorator."""
import pytest
import pyglet.gl
vendor = pyglet.gl.gl_info.get_vendor()
version = pyglet.gl.gl_info.get_version()
sufficient = pyglet.gl.gl_info.have_version(2, 0)
return pytest.mark.skipif(not sufficient,
reason='OpenGL too old: %s %s'
% (vendor, version,))(func)
def requires_lib(lib):
"""Requires lib decorator."""
import pytest
try:
importlib.import_module(lib)
except Exception as exp:
val = True
reason = 'Needs %s (%s)' % (lib, exp)
else:
val = False
reason = ''
return pytest.mark.skipif(val, reason=reason)
def _get_user_home_path():
"""Return standard preferences path"""
# this has been checked on OSX64, Linux64, and Win32
val = os.getenv('APPDATA' if 'nt' == os.name.lower() else 'HOME', None)
if val is None:
raise ValueError('expyfun config file path could '
'not be determined, please report this '
'error to expyfun developers')
return val
def fetch_data_file(fname):
"""Fetch example remote file
Parameters
----------
fname : str
The remote filename to get. If the filename already exists
on the local system, the file will not be fetched again.
Returns
-------
fname : str
The filename on the local system where the file was downloaded.
"""
path = get_config('EXPYFUN_DATA_PATH', op.join(_get_user_home_path(),
'.expyfun', 'data'))
fname_out = op.join(path, fname)
if not op.isdir(op.dirname(fname_out)):
os.makedirs(op.dirname(fname_out))
fname_url = ('https://github.com/LABSN/expyfun-data/raw/master/{0}'
''.format(fname))
try:
# until we get proper certificates
context = ssl._create_unverified_context()
this_urlopen = partial(urlopen, context=context)
except AttributeError:
context = None
this_urlopen = urlopen
if not op.isfile(fname_out):
try:
with open(fname_out, 'wb') as fid:
www = this_urlopen(fname_url, timeout=30.0)
try:
fid.write(www.read())
finally:
www.close()
except Exception:
os.remove(fname_out)
raise
return fname_out
def get_config_path():
r"""Get path to standard expyfun config file.
Returns
-------
config_path : str
The path to the expyfun configuration file. On windows, this
will be '%APPDATA%\.expyfun\expyfun.json'. On every other
system, this will be $HOME/.expyfun/expyfun.json.
"""
val = op.join(_get_user_home_path(), '.expyfun', 'expyfun.json')
return val
# List the known configuration values
known_config_types = ('RESPONSE_DEVICE',
'AUDIO_CONTROLLER',
'DB_OF_SINE_AT_1KHZ_1RMS',
'EXPYFUN_EYELINK',
'SOUND_CARD_API',
'SOUND_CARD_BACKEND',
'SOUND_CARD_FS',
'SOUND_CARD_NAME',
'SOUND_CARD_FIXED_DELAY',
'TDT_CIRCUIT_PATH',
'TDT_DELAY',
'TDT_INTERFACE',
'TDT_MODEL',
'TDT_TRIG_DELAY',
'TRIGGER_CONTROLLER',
'TRIGGER_ADDRESS',
'WINDOW_SIZE',
'SCREEN_NUM',
'SCREEN_WIDTH',
'SCREEN_DISTANCE',
'SCREEN_SIZE_PIX',
'EXPYFUN_LOGGING_LEVEL',
)
# These allow for partial matches: 'NAME_1' is okay key if 'NAME' is listed
known_config_wildcards = ()
def get_config(key=None, default=None, raise_error=False):
"""Read expyfun preference from env, then expyfun config
Parameters
----------
key : str
The preference key to look for. The os environment is searched first,
then the expyfun config file is parsed.
default : str | None
Value to return if the key is not found.
raise_error : bool
If True, raise an error if the key is not found (instead of returning
default).
Returns
-------
value : str | None
The preference key value.
"""
if key is not None and not isinstance(key, string_types):
raise ValueError('key must be a string')
# first, check to see if key is in env
if key is not None and key in os.environ:
return os.environ[key]
# second, look for it in expyfun config file
config_path = get_config_path()
if not op.isfile(config_path):
key_found = False
val = default
else:
with open(config_path, 'r') as fid:
config = json.load(fid)
if key is None:
return config
key_found = True if key in config else False
val = config.get(key, default)
if not key_found and raise_error is True:
meth_1 = 'os.environ["%s"] = VALUE' % key
meth_2 = 'expyfun.utils.set_config("%s", VALUE)' % key
raise KeyError('Key "%s" not found in environment or in the '
'expyfun config file:\n%s\nTry either:\n'
' %s\nfor a temporary solution, or:\n'
' %s\nfor a permanent one. You can also '
'set the environment variable before '
'running python.'
% (key, config_path, meth_1, meth_2))
return val
def set_config(key, value):
"""Set expyfun preference in config
Parameters
----------
key : str | None
The preference key to set. If None, a tuple of the valid
keys is returned, and ``value`` is ignored.
value : str | None
The value to assign to the preference key. If None, the key is
deleted.
"""
if key is None:
return sorted(known_config_types)
if not isinstance(key, string_types):
raise ValueError('key must be a string')
# While JSON allow non-string types, we allow users to override config
# settings using env, which are strings, so we enforce that here
if not isinstance(value, string_types) and value is not None:
raise ValueError('value must be a string or None')
if key not in known_config_types and not \
any(k in key for k in known_config_wildcards):
warnings.warn('Setting non-standard config type: "%s"' % key)
# Read all previous values
config_path = get_config_path()
if op.isfile(config_path):
with open(config_path, 'r') as fid:
config = json.load(fid)
else:
config = dict()
logger.info('Attempting to create new expyfun configuration '
'file:\n%s' % config_path)
if value is None:
config.pop(key, None)
else:
config[key] = value
# Write all values
directory = op.split(config_path)[0]
if not op.isdir(directory):
os.mkdir(directory)
with open(config_path, 'w') as fid:
json.dump(config, fid, sort_keys=True, indent=0)
###############################################################################
# MISC
def fake_button_press(ec, button='1', delay=0.):
"""Fake a button press after a delay
Notes
-----
This function only works with the keyboard controller (not TDT)!
It uses threads to ensure that control is passed back, so other commands
can be called (like wait_for_presses).
"""
Timer(delay, send).start() if delay > 0. else send()
def fake_mouse_click(ec, pos, button='left', delay=0.):
"""Fake a mouse click after a delay"""
button = dict(left=1, middle=2, right=4)[button] # trans to pyglet
Timer(delay, send).start() if delay > 0. else send()
def _check_pyglet_version(raise_error=False):
"""Check pyglet version, return True if usable.
"""
import pyglet
is_usable = LooseVersion(pyglet.version) >= LooseVersion('1.2')
if raise_error is True and is_usable is False:
raise ImportError('On Linux, you must run at least Pyglet '
'version 1.2, and you are running '
'{0}'.format(pyglet.version))
return is_usable
def _wait_secs(secs, ec=None):
"""Wait a specified number of seconds.
Parameters
----------
secs : float
Number of seconds to wait.
ec : None | expyfun.ExperimentController instance
The ExperimentController.
Notes
-----
This function uses a while loop. Although this slams the CPU, it will
guarantee that events (keypresses, etc.) are processed.
"""
# hog the cpu, checking time
t0 = clock()
if ec is not None:
while (clock() - t0) < secs:
ec._dispatch_events()
ec.check_force_quit()
else:
wins = _get_display().get_windows()
for win in wins:
win.dispatch_events()
def running_rms(signal, win_length):
"""RMS of ``signal`` with rectangular window ``win_length`` samples long.
Parameters
----------
signal : array_like
The (1-dimesional) signal of interest.
win_length : int
Length (in samples) of the rectangular window
"""
return sqrt(convolve(signal ** 2, ones(win_length) / win_length, 'valid'))
def _fix_audio_dims(signal, n_channels):
"""Make it so a valid audio buffer is in the standard dimensions
Parameters
----------
signal : array_like
The signal whose dimensions should be checked and fixed.
n_channels : int
The number of channels that the output should have.
If the input is mono and n_channels=2, it will be tiled to be
shape (2, n_samples). Otherwise, the number of channels in signal
must match n_channels.
Returns
-------
signal_fixed : array
The signal with standard dimensions (n_channels, N).
"""
# Check requested channel output
n_channels = int(operator.index(n_channels))
signal = np.asarray(np.atleast_2d(signal), dtype=np.float32)
# Check dimensionality
if signal.ndim != 2:
raise ValueError('Sound data must have one or two dimensions, got %s.'
% (signal.ndim,))
# Return data with correct dimensions
if n_channels == 2 and signal.shape[0] == 1:
signal = np.tile(signal, (n_channels, 1))
if signal.shape[0] != n_channels:
raise ValueError('signal channel count %d did not match required '
'channel count %d' % (signal.shape[0], n_channels))
return signal
def _sanitize(text_like):
"""Cast as string, encode as UTF-8 and sanitize any escape characters.
"""
return text_type(text_like).encode('unicode_escape').decode('utf-8')
def _sort_keys(x):
"""Sort and return keys of dict"""
keys = list(x.keys()) # note: not thread-safe
idx = np.argsort([str(k) for k in keys])
keys = [keys[ii] for ii in idx]
return keys
def object_diff(a, b, pre=''):
"""Compute all differences between two python variables
Parameters
----------
a : object
Currently supported: dict, list, tuple, ndarray, int, str, bytes,
float, StringIO, BytesIO.
b : object
Must be same type as ``a``.
pre : str
String to prepend to each line.
Returns
-------
diffs : str
A string representation of the differences.
Notes
-----
Taken from mne-python with permission.
"""
out = ''
if type(a) != type(b):
out += pre + ' type mismatch (%s, %s)\n' % (type(a), type(b))
elif isinstance(a, dict):
k1s = _sort_keys(a)
k2s = _sort_keys(b)
m1 = set(k2s) - set(k1s)
if len(m1):
out += pre + ' x1 missing keys %s\n' % (m1)
for key in k1s:
if key not in k2s:
out += pre + ' x2 missing key %s\n' % key
else:
out += object_diff(a[key], b[key], pre + 'd1[%s]' % repr(key))
elif isinstance(a, (list, tuple)):
if len(a) != len(b):
out += pre + ' length mismatch (%s, %s)\n' % (len(a), len(b))
else:
for xx1, xx2 in zip(a, b):
out += object_diff(xx1, xx2, pre='')
elif isinstance(a, (string_types, int, float, bytes)):
if a != b:
out += pre + ' value mismatch (%s, %s)\n' % (a, b)
elif a is None:
if b is not None:
out += pre + ' a is None, b is not (%s)\n' % (b)
elif isinstance(a, np.ndarray):
if not np.array_equal(a, b):
out += pre + ' array mismatch\n'
else:
raise RuntimeError(pre + ': unsupported type %s (%s)' % (type(a), a))
return out
| 31.45645
| 79
| 0.598156
|
9c7c6f724eb34b62e9ef21f46c99bd86675f4bf5
| 295
|
py
|
Python
|
mixin.py
|
delimatorres/foodbasket
|
2f043d713337581be2165259cdbba4e4a24b656b
|
[
"Apache-2.0"
] | null | null | null |
mixin.py
|
delimatorres/foodbasket
|
2f043d713337581be2165259cdbba4e4a24b656b
|
[
"Apache-2.0"
] | null | null | null |
mixin.py
|
delimatorres/foodbasket
|
2f043d713337581be2165259cdbba4e4a24b656b
|
[
"Apache-2.0"
] | null | null | null |
import signal
| 26.818182
| 59
| 0.698305
|
9c7c76de33634fbc8775661a36b44e7120b4b1f1
| 68
|
py
|
Python
|
test5.py
|
liubaishuo-github/peening-post-processor
|
61f4c2d2385469bc1e9d1b7a692b72eb6afd7f75
|
[
"MIT"
] | null | null | null |
test5.py
|
liubaishuo-github/peening-post-processor
|
61f4c2d2385469bc1e9d1b7a692b72eb6afd7f75
|
[
"MIT"
] | null | null | null |
test5.py
|
liubaishuo-github/peening-post-processor
|
61f4c2d2385469bc1e9d1b7a692b72eb6afd7f75
|
[
"MIT"
] | null | null | null |
a = HAHA()
print(a)
print(a[0])
| 9.714286
| 17
| 0.5
|
9c7d50b1d9dc52a93f5eb0bc5220367d727e498d
| 9,079
|
py
|
Python
|
torch/_fx/graph_module.py
|
jsun94/nimble
|
e5c899a69677818b1becc58100577441e15ede13
|
[
"BSD-3-Clause"
] | 206
|
2020-11-28T22:56:38.000Z
|
2022-03-27T02:33:04.000Z
|
torch/_fx/graph_module.py
|
jsun94/nimble
|
e5c899a69677818b1becc58100577441e15ede13
|
[
"BSD-3-Clause"
] | 19
|
2020-12-09T23:13:14.000Z
|
2022-01-24T23:24:08.000Z
|
torch/_fx/graph_module.py
|
jsun94/nimble
|
e5c899a69677818b1becc58100577441e15ede13
|
[
"BSD-3-Clause"
] | 28
|
2020-11-29T15:25:12.000Z
|
2022-01-20T02:16:27.000Z
|
import torch
import torch.overrides
import linecache
from typing import Type, Dict, List, Any, Union
from .graph import Graph
import copy
# normal exec loses the source code, however we can patch
# the linecache module to still recover it.
# using exec_with_source will add it to our local cache
# and then tools like TorchScript will be able to get source info.
_next_id = 0
# patch linecache so that any code we exec using exec_with_source
# works with inspect
_eval_cache : Dict[str, List[str]] = {}
_orig_getlines = linecache.getlines
linecache.getlines = patched_getline
def deserialize_graphmodule(body : dict) -> torch.nn.Module:
"""
Deserialize a GraphModule given the dictionary of the original module,
using the code to reconstruct the graph. We delete the actual graph before
saving the dictionary so that changes to the in-memory graph format do not
get serialized.
"""
# We create a dummy class here because symbolic_trace pulls the forward()
# function off of the class, rather than the instance
CodeOnlyModule.forward = _forward_from_src(body['code'])
from .symbolic_trace import Tracer
# we shouldn't trace into any of the submodules, they were not
# because they were not traced in the original GraphModule
return KeepModules().trace(CodeOnlyModule(body))
# copy an attribute value with qualified name 'target' from 'from_module' to 'to_module'
# This installs empty Modules where none exist yet if they are subpaths of target
# Assign attribute 'from_obj' to the qualified name 'target' on 'to_module
# This installs empty Modules where none exist yet if they are subpaths of target
# because __reduce__ is defined for serialization,
# we need to define deepcopy otherwise it will call __reduce__
# and cause symbolic tracing to occur every time we try to copy the object
def __deepcopy__(self, memo):
fake_mod = torch.nn.Module()
fake_mod.__dict__ = copy.deepcopy(self.__dict__)
return GraphModule(fake_mod, self.graph)
def __copy__(self):
return GraphModule(self, self.graph)
def __str__(self) -> str:
orig_str = super().__str__()
return '\n'.join([orig_str, self.code])
# workarounds for issues in __torch_function__
# WAR for __torch_function__ not handling tensor lists,
# fix is in https://github.com/pytorch/pytorch/pull/34725
# orig_cat = torch.cat
# def patched_cat(*args, **kwargs):
# tensors = args[0]
# for t in tensors:
# if isinstance(t, Proxy):
# return t.__torch_function__(patched_cat, (), args, kwargs)
# return orig_cat(*args, **kwargs)
# patched_cat.__module__ = 'torch'
# patched_cat.__name__ = 'cat'
# torch.cat = patched_cat
| 41.268182
| 107
| 0.651614
|
9c7db6d021abe53926601b1834856be78ee60324
| 8,949
|
py
|
Python
|
RequestHandler.py
|
robot0nfire/behem0th
|
3931f2a9a2f00b95d82ccb3c5e7c13b3fbb5f4d7
|
[
"MIT"
] | 2
|
2016-09-08T18:38:35.000Z
|
2016-09-14T11:05:34.000Z
|
RequestHandler.py
|
robot0nfire/behem0th
|
3931f2a9a2f00b95d82ccb3c5e7c13b3fbb5f4d7
|
[
"MIT"
] | 1
|
2016-09-29T17:36:49.000Z
|
2016-09-29T17:36:49.000Z
|
RequestHandler.py
|
robot0nfire/behem0th
|
3931f2a9a2f00b95d82ccb3c5e7c13b3fbb5f4d7
|
[
"MIT"
] | null | null | null |
#
# Copyright (c) 2016 Christoph Heiss <me@christoph-heiss.me>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import os
import json
import struct
import threading
import socket
import queue
import tempfile
import base64
import select
from behem0th import utils, log
BLOCK_SIZE = 4096
"""
{
"action": "<action>",
"path": "<relpath-to-file>"
}
<action> can be either 'receive' or 'send'
Payload are base64 encoded chunks (BLOCK_SIZE bytes)
"""
"""
{
"type": "<type>",
"path": "<relpath-to-file>"
}
<type> can be one of 'file-created', 'file-deleted', 'file-moved'
"""
ROUTES = {
'filelist': FilelistRoute(),
'file': FileRoute(),
'event': EventRoute()
}
"""
behem0th's protocol is completely text-based, using utf-8 encoding and
encoded in JSON for easy parsing.
A request usually looks like this:
{ "route": "<route-name>", "data": "<data>" }
'data' holds additional data which is then passed to the route.
There is no special format designed for 'data' and is specific to each route.
After each request there is a newline to separate them. (think of HTTP)
If a route needs to transfer additional data (a 'payload'), it has to send them
in a text-based format, e.g. base-64 encoding for binary data.
After the payload, if any, there has to be another newline to separate it from
the next request.
"""
| 24.927577
| 91
| 0.684769
|
9c7dd63e969ee9cd5df33ca5c30412f39a0774ab
| 366
|
py
|
Python
|
tests/utils/test_metrics.py
|
haochuanwei/hover
|
53eb38c718e44445b18a97e391b7f90270802b04
|
[
"MIT"
] | 251
|
2020-11-22T15:02:30.000Z
|
2022-03-23T23:29:28.000Z
|
tests/utils/test_metrics.py
|
MaxCodeXTC/hover
|
feeb0e0c59295a3c883823ccef918dfe388b603c
|
[
"MIT"
] | 22
|
2020-12-03T07:50:27.000Z
|
2022-02-26T01:43:41.000Z
|
tests/utils/test_metrics.py
|
MaxCodeXTC/hover
|
feeb0e0c59295a3c883823ccef918dfe388b603c
|
[
"MIT"
] | 14
|
2020-11-18T06:46:02.000Z
|
2022-03-03T08:14:18.000Z
|
from hover.utils.metrics import classification_accuracy
import numpy as np
| 33.272727
| 55
| 0.677596
|
9c7e8f9016c9cbf4f8f05d18b1e14e707c0c6a3e
| 27,504
|
py
|
Python
|
scripts/blenderseed.package.py
|
rgirish28/blenderseed
|
fee897620d0348f4ea1f5722e1a82c3682ca0178
|
[
"MIT"
] | null | null | null |
scripts/blenderseed.package.py
|
rgirish28/blenderseed
|
fee897620d0348f4ea1f5722e1a82c3682ca0178
|
[
"MIT"
] | null | null | null |
scripts/blenderseed.package.py
|
rgirish28/blenderseed
|
fee897620d0348f4ea1f5722e1a82c3682ca0178
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
#
# This source file is part of appleseed.
# Visit https://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2017-2018 Esteban Tovagliari, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the 'Software'), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from __future__ import print_function
from distutils import archive_util, dir_util
from xml.etree.ElementTree import ElementTree
import argparse
import colorama
import datetime
import glob
import os
import platform
import re
import shutil
import stat
import subprocess
import sys
import time
import traceback
import urllib
#--------------------------------------------------------------------------------------------------
# Constants.
#--------------------------------------------------------------------------------------------------
VERSION = "1.1.0"
SETTINGS_FILENAME = "blenderseed.package.configuration.xml"
#--------------------------------------------------------------------------------------------------
# Utility functions.
#--------------------------------------------------------------------------------------------------
GREEN_CHECKMARK = u"{0}\u2713{1}".format(colorama.Style.BRIGHT + colorama.Fore.GREEN, colorama.Style.RESET_ALL)
RED_CROSSMARK = u"{0}\u2717{1}".format(colorama.Style.BRIGHT + colorama.Fore.RED, colorama.Style.RESET_ALL)
#--------------------------------------------------------------------------------------------------
# Settings.
#--------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------
# Base package builder.
#--------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------
# Windows package builder.
#--------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------
# Mac package builder.
#--------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------
# Linux package builder.
#--------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------
# Entry point.
#--------------------------------------------------------------------------------------------------
def main():
colorama.init()
parser = argparse.ArgumentParser(description="build a blenderseed package from sources")
parser.add_argument("--nozip", action="store_true", help="copies appleseed binaries to blenderseed folder but does not build a release package")
args = parser.parse_args()
no_release = args.nozip
package_version = subprocess.Popen("git describe --long", stdout=subprocess.PIPE, shell=True).stdout.read().strip()
build_date = datetime.date.today().isoformat()
print("blenderseed.package version " + VERSION)
print("")
settings = Settings()
settings.load()
settings.print_summary()
if os.name == "nt":
package_builder = WindowsPackageBuilder(settings, package_version, build_date, no_release)
elif os.name == "posix" and platform.mac_ver()[0] != "":
package_builder = MacPackageBuilder(settings, package_version, build_date, no_release)
elif os.name == "posix" and platform.mac_ver()[0] == "":
package_builder = LinuxPackageBuilder(settings, package_version, build_date, no_release)
else:
fatal("Unsupported platform: " + os.name)
package_builder.build_package()
if __name__ == "__main__":
main()
| 37.98895
| 148
| 0.597186
|
9c7f2a0eaaf692b24bd48b78aa80933467348c66
| 80
|
py
|
Python
|
uts/uts_17_aut_py/2/A.py
|
viad00/code_olymp
|
90f20f9fd075e8967d02baf7554fcf24f4ae089c
|
[
"MIT"
] | null | null | null |
uts/uts_17_aut_py/2/A.py
|
viad00/code_olymp
|
90f20f9fd075e8967d02baf7554fcf24f4ae089c
|
[
"MIT"
] | null | null | null |
uts/uts_17_aut_py/2/A.py
|
viad00/code_olymp
|
90f20f9fd075e8967d02baf7554fcf24f4ae089c
|
[
"MIT"
] | null | null | null |
ser = int(input())
mas = list(map(int, input().split()))
mas.sort()
print(*mas)
| 16
| 37
| 0.6125
|
9c7f69a036f4358b44b78abe3f34ed429e5fbfef
| 1,420
|
py
|
Python
|
wagtailkatex/wagtail_hooks.py
|
ongchi/wagtail-katex
|
c64b491e765e6b87a90d7cd8602153826ee9fe07
|
[
"Apache-2.0"
] | null | null | null |
wagtailkatex/wagtail_hooks.py
|
ongchi/wagtail-katex
|
c64b491e765e6b87a90d7cd8602153826ee9fe07
|
[
"Apache-2.0"
] | null | null | null |
wagtailkatex/wagtail_hooks.py
|
ongchi/wagtail-katex
|
c64b491e765e6b87a90d7cd8602153826ee9fe07
|
[
"Apache-2.0"
] | null | null | null |
from django.utils.translation import gettext
from wagtail.admin.rich_text.editors.draftail import features as draftail_features
from wagtail.core import hooks
from .richtext import KaTeXEntityElementHandler, katex_entity_decorator
| 32.272727
| 90
| 0.607042
|
9c7f78a252c83665660e04bc5a8e1cea157db269
| 1,415
|
py
|
Python
|
esque_wire/protocol/serializers/api/elect_preferred_leaders_request.py
|
real-digital/esque-wire
|
eb02c49f38b89ad5e5d25aad15fb4ad795e52807
|
[
"MIT"
] | null | null | null |
esque_wire/protocol/serializers/api/elect_preferred_leaders_request.py
|
real-digital/esque-wire
|
eb02c49f38b89ad5e5d25aad15fb4ad795e52807
|
[
"MIT"
] | 7
|
2019-11-26T08:19:49.000Z
|
2021-03-15T14:27:47.000Z
|
esque_wire/protocol/serializers/api/elect_preferred_leaders_request.py
|
real-digital/esque-wire
|
eb02c49f38b89ad5e5d25aad15fb4ad795e52807
|
[
"MIT"
] | null | null | null |
###############################################################
# Autogenerated module. Please don't modify. #
# Edit according file in protocol_generator/templates instead #
###############################################################
from typing import Dict
from ...structs.api.elect_preferred_leaders_request import ElectPreferredLeadersRequestData, TopicPartition
from ._main_serializers import ArraySerializer, ClassSerializer, Schema, int32Serializer, stringSerializer
topicPartitionSchemas: Dict[int, Schema] = {
0: [("topic", stringSerializer), ("partition_id", ArraySerializer(int32Serializer))]
}
topicPartitionSerializers: Dict[int, ClassSerializer[TopicPartition]] = {
version: ClassSerializer(TopicPartition, schema) for version, schema in topicPartitionSchemas.items()
}
topicPartitionSerializers[-1] = topicPartitionSerializers[0]
electPreferredLeadersRequestDataSchemas: Dict[int, Schema] = {
0: [("topic_partitions", ArraySerializer(topicPartitionSerializers[0])), ("timeout_ms", int32Serializer)]
}
electPreferredLeadersRequestDataSerializers: Dict[int, ClassSerializer[ElectPreferredLeadersRequestData]] = {
version: ClassSerializer(ElectPreferredLeadersRequestData, schema)
for version, schema in electPreferredLeadersRequestDataSchemas.items()
}
electPreferredLeadersRequestDataSerializers[-1] = electPreferredLeadersRequestDataSerializers[0]
| 41.617647
| 109
| 0.733569
|
9c7f9627f318b3e1570c92823a8ee10c19ec9aa5
| 8,991
|
py
|
Python
|
test/tests/bootstrap/test_api20_windows_bootstrap.py
|
arunrordell/RackHD
|
079c21f45cb38f538c502363aa1ff86dbcac3169
|
[
"Apache-2.0"
] | 451
|
2015-11-09T13:19:25.000Z
|
2022-03-16T08:00:16.000Z
|
test/tests/bootstrap/test_api20_windows_bootstrap.py
|
arunrordell/RackHD
|
079c21f45cb38f538c502363aa1ff86dbcac3169
|
[
"Apache-2.0"
] | 824
|
2015-11-10T15:25:50.000Z
|
2018-04-09T09:59:49.000Z
|
test/tests/bootstrap/test_api20_windows_bootstrap.py
|
arunrordell/RackHD
|
079c21f45cb38f538c502363aa1ff86dbcac3169
|
[
"Apache-2.0"
] | 221
|
2015-11-10T23:00:46.000Z
|
2022-03-16T08:00:22.000Z
|
'''
Copyright 2017 Dell Inc. or its subsidiaries. All Rights Reserved.
This script tests arbitrary payload of the RackHD API 2.0 OS bootstrap workflows.
The default case is running a minimum payload Windows OS install.
Other Windows-type OS install cases can be specified by creating a payload file and specifiying it using the '-extra' argument.
This test takes 30-45 minutes to run.
Example payload file (installed in configuration dir):
{"bootstrap-payload":
{"name": "Graph.InstallWindowsServer",
"options": {"defaults": {"version": "2012",
"repo": "http://172.31.128.1:8080/repo/winpe",
"smbRepo": "\\\\172.31.128.1\\windowsServer2012",
"productkey": "XXXXX-XXXXX-XXXXX-XXXXX-XXXXX",
"username": "rackhduser",
"password": "RackHDRocks",
"smbUser": "vagrant",
"smbPassword": "vagrant"}}}
}
Example command line using external payload file:
python run_tests.py -stack 4 -test tests/bootstrap/test_api20_windows_bootstrap.py -extra base_windows_2012_install.json
RackHD Windows installation workflow requires special configuration of the RackHD server:
- A customized WinPE environment installed on RackHD server as documented here:
https://github.com/RackHD/on-tools/tree/master/winpe
- Samba installed on the RackHD server and configured as documented here:
http://rackhd.readthedocs.io/en/latest/rackhd/install_os.html?highlight=os%20install
- Windows 2012 installation distro installed on RackHD server or equivalent NFS mount.
- Windows 2012 activation key in the installation payload file.
'''
import fit_path # NOQA: unused import
from nose.plugins.attrib import attr
import fit_common
import flogging
import random
import json
import time
from nosedep import depends
from datetime import datetime
log = flogging.get_loggers()
# sample default base payload
PAYLOAD = {"name": "Graph.InstallWindowsServer",
"options": {"defaults": {"version": "2012",
"repo": "http://172.31.128.1:8080/repo/winpe",
"smbRepo": "\\\\172.31.128.1\\windowsServer2012",
"productkey": "XXXXX-XXXXX-XXXXX-XXXXX-XXXXX",
"username": "rackhduser",
"password": "RackHDRocks",
"smbUser": "vagrant",
"smbPassword": "vagrant"}}}
# if an external payload file is specified, use that
config = fit_common.fitcfg().get('bootstrap-payload', None)
if config:
PAYLOAD = config
# function to return the value of a field from the workflow response
# this routine polls a workflow task ID for completion
# ------------------------ Tests -------------------------------------
if __name__ == '__main__':
fit_common.unittest.main()
| 45.872449
| 127
| 0.587031
|
9c806e8f0ae3b3c96a9df2eadcd9d67e2ad3e5fe
| 602
|
py
|
Python
|
random_number.py
|
till-h/alexa
|
47891eb97fff375500a032b23fef7a2681b50735
|
[
"MIT"
] | null | null | null |
random_number.py
|
till-h/alexa
|
47891eb97fff375500a032b23fef7a2681b50735
|
[
"MIT"
] | null | null | null |
random_number.py
|
till-h/alexa
|
47891eb97fff375500a032b23fef7a2681b50735
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template
from flask_ask import Ask, statement
import random
app = Flask(__name__)
ask = Ask(app, '/')
if __name__ == '__main__':
app.run(debug=True)
| 31.684211
| 101
| 0.749169
|
9c81af124f83929d36674b85f7157b8a2ef4f4b9
| 9,686
|
py
|
Python
|
model/losses.py
|
askerlee/rift
|
d4dbf42b82f1f83dfab18f8da8fe3a1d0a716fa2
|
[
"MIT"
] | 11
|
2022-02-14T08:31:04.000Z
|
2022-03-29T08:20:17.000Z
|
model/losses.py
|
askerlee/rift
|
d4dbf42b82f1f83dfab18f8da8fe3a1d0a716fa2
|
[
"MIT"
] | 3
|
2022-02-14T11:19:15.000Z
|
2022-03-19T05:11:25.000Z
|
model/losses.py
|
askerlee/rift
|
d4dbf42b82f1f83dfab18f8da8fe3a1d0a716fa2
|
[
"MIT"
] | null | null | null |
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from model.laplacian import LapLoss
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# flow could have any channels.
# https://github.com/coolbeam/OIFlow/blob/main/utils/tools.py
def flow_smooth_delta(flow, if_second_order=False):
dx, dy = gradient(flow)
# dx2, dxdy = gradient(dx)
# dydx, dy2 = gradient(dy)
if if_second_order:
dx2, dxdy = gradient(dx)
dydx, dy2 = gradient(dy)
smooth_loss = dx.abs().mean() + dy.abs().mean() + dx2.abs().mean() + dxdy.abs().mean() + dydx.abs().mean() + dy2.abs().mean()
else:
smooth_loss = dx.abs().mean() + dy.abs().mean()
# smooth_loss = dx.abs().mean() + dy.abs().mean() # + dx2.abs().mean() + dxdy.abs().mean() + dydx.abs().mean() + dy2.abs().mean()
# photo loss TODO
return smooth_loss
# flow should have 4 channels.
# https://github.com/coolbeam/OIFlow/blob/main/utils/tools.py
# weight_type='exp' seems to perform better than 'gauss'.
# Dual teaching helps slightly.
if __name__ == '__main__':
img0 = torch.zeros(3, 3, 256, 256).float().to(device)
img1 = torch.tensor(np.random.normal(
0, 1, (3, 3, 256, 256))).float().to(device)
ternary_loss = Ternary()
print(ternary_loss(img0, img1).shape)
| 39.696721
| 134
| 0.601693
|
9c820bdf9b7f916cd742cf712e94425ee24e76e1
| 5,847
|
py
|
Python
|
project/python/swarm_simulation.py
|
righetti/swarmrobotics
|
f8f6bf72c3aae1f432f3306aebb48fd32a6dd2a7
|
[
"BSD-3-Clause"
] | 8
|
2019-09-14T11:55:49.000Z
|
2022-02-05T23:06:33.000Z
|
project/python/swarm_simulation.py
|
righetti/swarmrobotics
|
f8f6bf72c3aae1f432f3306aebb48fd32a6dd2a7
|
[
"BSD-3-Clause"
] | null | null | null |
project/python/swarm_simulation.py
|
righetti/swarmrobotics
|
f8f6bf72c3aae1f432f3306aebb48fd32a6dd2a7
|
[
"BSD-3-Clause"
] | 7
|
2019-09-16T02:42:41.000Z
|
2021-09-07T03:26:22.000Z
|
import numpy as np
import pybullet as p
import itertools
from robot import Robot
| 43.634328
| 96
| 0.540277
|
9c82ce7669d0a4f2d3645ab5502b497296602411
| 31,437
|
py
|
Python
|
boto/ec2/elb/__init__.py
|
wt/boto
|
83d5b256c8333307233e1ec7c1e21696e8d32437
|
[
"MIT"
] | 15
|
2015-03-25T05:24:11.000Z
|
2021-12-18T04:24:06.000Z
|
boto/ec2/elb/__init__.py
|
wt/boto
|
83d5b256c8333307233e1ec7c1e21696e8d32437
|
[
"MIT"
] | null | null | null |
boto/ec2/elb/__init__.py
|
wt/boto
|
83d5b256c8333307233e1ec7c1e21696e8d32437
|
[
"MIT"
] | 10
|
2015-04-26T17:56:37.000Z
|
2020-09-24T14:01:53.000Z
|
# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
"""
This module provides an interface to the Elastic Compute Cloud (EC2)
load balancing service from AWS.
"""
from boto.connection import AWSQueryConnection
from boto.ec2.instanceinfo import InstanceInfo
from boto.ec2.elb.loadbalancer import LoadBalancer, LoadBalancerZones
from boto.ec2.elb.instancestate import InstanceState
from boto.ec2.elb.healthcheck import HealthCheck
from boto.ec2.elb.listelement import ListElement
from boto.regioninfo import RegionInfo, get_regions, load_regions
import boto
RegionData = load_regions().get('elasticloadbalancing', {})
def regions():
"""
Get all available regions for the ELB service.
:rtype: list
:return: A list of :class:`boto.RegionInfo` instances
"""
return get_regions('elasticloadbalancing', connection_cls=ELBConnection)
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
:class:`boto.ec2.elb.ELBConnection`.
:param str region_name: The name of the region to connect to.
:rtype: :class:`boto.ec2.ELBConnection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
| 42.946721
| 91
| 0.633426
|
9c836060b9b7e80140ebb8a9cc363bc2e1d5ff72
| 9,677
|
py
|
Python
|
basis_set_exchange/cli/bse_cli.py
|
atomse/basis_set_exchange
|
7ffd64082c14d2f61eb43f1c2d44792e8b0e394e
|
[
"BSD-3-Clause"
] | null | null | null |
basis_set_exchange/cli/bse_cli.py
|
atomse/basis_set_exchange
|
7ffd64082c14d2f61eb43f1c2d44792e8b0e394e
|
[
"BSD-3-Clause"
] | null | null | null |
basis_set_exchange/cli/bse_cli.py
|
atomse/basis_set_exchange
|
7ffd64082c14d2f61eb43f1c2d44792e8b0e394e
|
[
"BSD-3-Clause"
] | null | null | null |
'''
Command line interface for the basis set exchange
'''
import argparse
import argcomplete
from .. import version
from .bse_handlers import bse_cli_handle_subcmd
from .check import cli_check_normalize_args
from .complete import (cli_case_insensitive_validator,
cli_family_completer, cli_role_completer, cli_bsname_completer,
cli_write_fmt_completer, cli_read_fmt_completer, cli_reffmt_completer)
| 59.368098
| 160
| 0.673349
|
9c8366ee191973d219cc50c6458365ebe9053724
| 376
|
py
|
Python
|
Backjoon/1929.py
|
hanjungwoo1/CodingTest
|
0112488d04dd53cea1c869439341fb602e699f2a
|
[
"MIT"
] | 3
|
2022-03-29T04:56:50.000Z
|
2022-03-30T08:06:42.000Z
|
Backjoon/1929.py
|
hanjungwoo1/CodingTest
|
0112488d04dd53cea1c869439341fb602e699f2a
|
[
"MIT"
] | null | null | null |
Backjoon/1929.py
|
hanjungwoo1/CodingTest
|
0112488d04dd53cea1c869439341fb602e699f2a
|
[
"MIT"
] | null | null | null |
"""
3 16
3
5
7
11
13
"""
import math
left, right = map(int, input().split())
array = [True for i in range(right+1)]
array[1] = 0
for i in range(2, int(math.sqrt(right)) + 1):
if array[i] == True:
j = 2
while i * j <= right:
array[i * j] = False
j += 1
for i in range(left, right+1):
if array[i]:
print(i)
| 13.925926
| 45
| 0.505319
|
92bb5127dacf316c62cd64b3874b283309deffd5
| 42,452
|
py
|
Python
|
tensorflow/tools/quantization/quantize_graph_test.py
|
tianyapiaozi/tensorflow
|
fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a
|
[
"Apache-2.0"
] | 374
|
2018-12-02T06:59:44.000Z
|
2022-03-15T10:34:00.000Z
|
tensorflow/tools/quantization/quantize_graph_test.py
|
shrikunjsarda/tensorflow
|
7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae
|
[
"Apache-2.0"
] | 157
|
2018-12-02T07:37:39.000Z
|
2022-03-16T09:49:11.000Z
|
tensorflow/tools/quantization/quantize_graph_test.py
|
shrikunjsarda/tensorflow
|
7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae
|
[
"Apache-2.0"
] | 141
|
2018-12-12T11:57:59.000Z
|
2022-02-28T13:12:58.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the graph quantization script.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
from tensorflow.core.framework import graph_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.platform import flags as flags_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.tools.quantization import quantize_graph
flags = flags_lib
FLAGS = flags.FLAGS
def test_mat_mul(m, n, k, a, b):
"""Tests a MatMul replacement."""
a_constant_name = "a_constant"
b_constant_name = "b_constant"
mat_mul_name = "mat_mul"
float_graph_def = graph_pb2.GraphDef()
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=a, dtype=dtypes.float32, shape=[m, k])
float_graph_def.node.extend([a_constant])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=b, dtype=dtypes.float32, shape=[k, n])
float_graph_def.node.extend([b_constant])
mat_mul_node = quantize_graph.create_node("MatMul", mat_mul_name,
[a_constant_name, b_constant_name])
quantize_graph.set_attr_dtype(mat_mul_node, "T", dtypes.float32)
quantize_graph.set_attr_bool(mat_mul_node, "transpose_a", False)
quantize_graph.set_attr_bool(mat_mul_node, "transpose_b", False)
float_graph_def.node.extend([mat_mul_node])
test_graph(float_graph_def, {}, [mat_mul_name])
def test_conv(depth, image_width, image_height, image_batch_count, filter_size,
filter_count, stride, padding, input_values, filter_values):
"""Tests a Conv replacement."""
input_constant_name = "input_constant"
filter_constant_name = "filter_constant"
conv_name = "conv"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=input_values,
dtype=dtypes.float32,
shape=[image_batch_count, image_height, image_width, depth])
float_graph_def.node.extend([input_constant])
filter_constant = quantize_graph.create_constant_node(
filter_constant_name,
value=filter_values,
dtype=dtypes.float32,
shape=[filter_size, filter_size, depth, filter_count])
float_graph_def.node.extend([filter_constant])
conv_node = quantize_graph.create_node(
"Conv2D", conv_name, [input_constant_name, filter_constant_name])
quantize_graph.set_attr_dtype(conv_node, "T", dtypes.float32)
quantize_graph.set_attr_int_list(conv_node, "strides", [1, stride, stride, 1])
quantize_graph.set_attr_string(conv_node, "padding", padding)
float_graph_def.node.extend([conv_node])
test_graph(float_graph_def, {}, [conv_name])
def are_tensors_near(a, b, tolerance):
"""Tests whether two tensors are nearly identical.
This is a specialized comparison function designed to help debug problems with
quantization. It prints out information about the differences between tensors
on failure, paying special attention to possible biases by looking at the mean
and absolute average errors.
Args:
a: First comparison tensor.
b: Second comparison tensor.
tolerance: Float value indicating how large an error between values is ok.
Returns:
Boolean indicating whether the two inputs were close enough.
"""
flat_a = a.flatten()
flat_b = b.flatten()
if len(flat_a) != len(flat_b):
tf_logging.info("Tensors are different sizes: " + str(len(flat_a)) + " vs "
+ str(len(flat_b)))
return False
value_count = len(flat_a)
how_many_different = 0
total_difference = 0
total_abs_difference = 0
for index in range(value_count):
a_value = flat_a[index]
b_value = flat_b[index]
difference = a_value - b_value
total_difference += difference
total_abs_difference += abs(difference)
if abs(difference) > tolerance:
how_many_different += 1
mean_difference = total_difference / value_count
mean_abs_difference = total_abs_difference / value_count
proportion_different = (how_many_different * 1.0) / value_count
if how_many_different == 0:
return True
else:
tf_logging.info("Tensors have {0} different values ({1}%), with mean"
" difference {2} and mean absolute difference {3}".format(
how_many_different, proportion_different * 100,
mean_difference, mean_abs_difference))
return False
def test_graph(float_graph_def, input_map, output_names, log_graph=False):
"""Runs the float graph through the rewriter and tests the results."""
float_results = run_graph_def(
float_graph_def, input_map,
[output_name + ":0" for output_name in output_names])
# TODO(petewarden): round test is currently failing because there is no
# RoundToSteps op available.
# round_rewriter = quantize_graph.GraphRewriter(float_graph_def, "round")
# round_graph_def = round_rewriter.rewrite(output_name)
# round_results = run_graph_def(round_graph_def, input_map,
# [output_name + ":0"])
# assert are_tensors_near(expected, round_results[0], 1.0)
#
# TODO(petewarden): Add test for "quantize" mode.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def, "eightbit", quantized_input_range=None)
eightbit_graph_def = eightbit_rewriter.rewrite(output_names)
eightbit_results = run_graph_def(
eightbit_graph_def, input_map,
[output_name + ":0" for output_name in output_names])
for expected, result in zip(float_results, eightbit_results):
assert are_tensors_near(expected, result, 1.0)
if log_graph:
tf_logging.info("8bit:\n%s", str(eightbit_graph_def))
# Test the weights_rounded mode. This uses the default bit_depth.
weights_rounded_rewriter = quantize_graph.GraphRewriter(
float_graph_def, "weights_rounded", quantized_input_range=None)
weights_rounded_graph_def = weights_rounded_rewriter.rewrite(output_names)
weights_rounded_results = run_graph_def(
weights_rounded_graph_def, input_map,
[output_name + ":0" for output_name in output_names])
for expected, result in zip(float_results, weights_rounded_results):
assert are_tensors_near(expected, result, 1.0)
if __name__ == "__main__":
test.main()
| 43.900724
| 80
| 0.692123
|
92bbcd24bf10bc66f379878a7b6917a00a8a96a4
| 2,698
|
py
|
Python
|
layerserver/migrations/0001_initial.py
|
aroiginfraplan/giscube-admin
|
b7f3131b0186f847f3902df97f982cb288b16a49
|
[
"BSD-3-Clause"
] | 5
|
2018-06-07T12:54:35.000Z
|
2022-01-14T10:38:38.000Z
|
layerserver/migrations/0001_initial.py
|
aroiginfraplan/giscube-admin
|
b7f3131b0186f847f3902df97f982cb288b16a49
|
[
"BSD-3-Clause"
] | 140
|
2018-06-18T10:27:28.000Z
|
2022-03-23T09:53:15.000Z
|
layerserver/migrations/0001_initial.py
|
aroiginfraplan/giscube-admin
|
b7f3131b0186f847f3902df97f982cb288b16a49
|
[
"BSD-3-Clause"
] | 1
|
2021-04-13T11:20:54.000Z
|
2021-04-13T11:20:54.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-04-26 09:14
import colorfield.fields
from django.db import migrations, models
import django.db.models.deletion
import giscube.utils
| 52.901961
| 182
| 0.610823
|
92bc543d24e721550df8b06cf7b80bb7637df99c
| 910
|
py
|
Python
|
SETTINGS.py
|
pirica/fortnite-leaks-image-generator
|
c23633862fd7d2286700f932e5dab41decd2ff72
|
[
"CC0-1.0"
] | 5
|
2020-10-07T23:53:30.000Z
|
2021-09-18T17:50:11.000Z
|
SETTINGS.py
|
pirica/fortnite-leaks-image-generator
|
c23633862fd7d2286700f932e5dab41decd2ff72
|
[
"CC0-1.0"
] | null | null | null |
SETTINGS.py
|
pirica/fortnite-leaks-image-generator
|
c23633862fd7d2286700f932e5dab41decd2ff72
|
[
"CC0-1.0"
] | 5
|
2020-12-13T16:49:41.000Z
|
2021-09-18T17:50:14.000Z
|
backgroundurl = "https://storage.needpix.com/rsynced_images/colored-background.jpg" # <- Need to be a Image URL!!!
lang = "en" # <- language code
displayset = True # <- Display the Set of the Item
raritytext = True # <- Display the Rarity of the Item
typeconfig = {
"BannerToken": True,
"AthenaBackpack": True,
"AthenaPetCarrier": True,
"AthenaPet": True,
"AthenaPickaxe": True,
"AthenaCharacter": True,
"AthenaSkyDiveContrail": True,
"AthenaGlider": True,
"AthenaDance": True,
"AthenaEmoji": True,
"AthenaLoadingScreen": True,
"AthenaMusicPack": True,
"AthenaSpray": True,
"AthenaToy": True,
"AthenaBattleBus": True,
"AthenaItemWrap": True
}
interval = 5 # <- Time (in seconds) until the bot checks for leaks again | Recommend: 7
watermark = "" # <- Leave it empty if you dont want one
watermarksize = 25 # <- Size of the Watermark
| 28.4375
| 115
| 0.66044
|
92bc6a8a2905baaef24ea73868b39d5f28b0a445
| 592
|
py
|
Python
|
src/healthvaultlib/tests/testbase.py
|
rajeevs1992/pyhealthvault
|
2b6fa7c1687300bcc2e501368883fbb13dc80495
|
[
"MIT"
] | 1
|
2015-12-19T09:09:15.000Z
|
2015-12-19T09:09:15.000Z
|
src/healthvaultlib/tests/testbase.py
|
rajeevs1992/pyhealthvault
|
2b6fa7c1687300bcc2e501368883fbb13dc80495
|
[
"MIT"
] | 6
|
2015-12-19T07:53:44.000Z
|
2021-12-13T19:35:10.000Z
|
src/healthvaultlib/tests/testbase.py
|
rajeevs1992/pyhealthvault
|
2b6fa7c1687300bcc2e501368883fbb13dc80495
|
[
"MIT"
] | 2
|
2018-02-20T08:34:50.000Z
|
2018-03-28T14:29:52.000Z
|
import unittest
import settings
from healthvaultlib.helpers.connection import Connection
| 31.157895
| 90
| 0.72973
|
92bcea551d4afd1053bbca8f841cc813051b6539
| 484
|
py
|
Python
|
apps/extensions/migrations/0012_imports_path_urlfield_to_charfield.py
|
StepicOrg/stepik-apps
|
5825bc9b2444ad4690681964d1bed172706f8796
|
[
"Apache-2.0"
] | 5
|
2017-03-17T10:01:25.000Z
|
2018-03-23T05:56:25.000Z
|
apps/extensions/migrations/0012_imports_path_urlfield_to_charfield.py
|
StepicOrg/stepik-apps
|
5825bc9b2444ad4690681964d1bed172706f8796
|
[
"Apache-2.0"
] | 4
|
2020-06-05T17:34:05.000Z
|
2021-04-19T12:58:48.000Z
|
apps/extensions/migrations/0012_imports_path_urlfield_to_charfield.py
|
StepicOrg/stepik-apps
|
5825bc9b2444ad4690681964d1bed172706f8796
|
[
"Apache-2.0"
] | 2
|
2017-03-21T13:01:28.000Z
|
2017-04-27T14:33:20.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-06-09 03:01
from __future__ import unicode_literals
from django.db import migrations, models
| 23.047619
| 71
| 0.630165
|
92bcfabb83b949d7b865d6edb058159c8c815b8b
| 628
|
py
|
Python
|
regtests/bench/thread_collision.py
|
secureosv/pythia
|
459f9e2bc0bb2da57e9fa8326697d9ef3386883a
|
[
"BSD-3-Clause"
] | 17
|
2015-12-13T23:11:31.000Z
|
2020-07-19T00:40:18.000Z
|
regtests/bench/thread_collision.py
|
secureosv/pythia
|
459f9e2bc0bb2da57e9fa8326697d9ef3386883a
|
[
"BSD-3-Clause"
] | 8
|
2016-02-22T19:42:56.000Z
|
2016-07-13T10:58:04.000Z
|
regtests/bench/thread_collision.py
|
secureosv/pythia
|
459f9e2bc0bb2da57e9fa8326697d9ef3386883a
|
[
"BSD-3-Clause"
] | 3
|
2016-04-11T20:34:31.000Z
|
2021-03-12T10:33:02.000Z
|
'''
multi-threading (python3 version)
https://docs.python.org/3/library/threading.html
'''
from time import clock
import threading
THREADS=2
lock = threading.Lock()
A = 0
B = 0
C = 0
main()
| 14.604651
| 54
| 0.630573
|
92bd6cd2780084175f5bca66b4d32f6768777683
| 2,270
|
py
|
Python
|
game/board.py
|
scooler/checkers
|
90bfe8702c6005c767a8673caed6e7e2f0ce5879
|
[
"MIT"
] | null | null | null |
game/board.py
|
scooler/checkers
|
90bfe8702c6005c767a8673caed6e7e2f0ce5879
|
[
"MIT"
] | null | null | null |
game/board.py
|
scooler/checkers
|
90bfe8702c6005c767a8673caed6e7e2f0ce5879
|
[
"MIT"
] | null | null | null |
import numpy as np
| 28.024691
| 93
| 0.574009
|
92bed45f1cd8f2bc90c85f74109f48fc3d320089
| 5,261
|
py
|
Python
|
zge/engine.py
|
zhester/zge
|
246096a8c1fd26472091aac747a3fffda58f3072
|
[
"BSD-2-Clause"
] | null | null | null |
zge/engine.py
|
zhester/zge
|
246096a8c1fd26472091aac747a3fffda58f3072
|
[
"BSD-2-Clause"
] | null | null | null |
zge/engine.py
|
zhester/zge
|
246096a8c1fd26472091aac747a3fffda58f3072
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Zoe Game Engine Core Implementation
===================================
Requirements
------------
[pygame](http://www.pygame.org/)
"""
# core packages
# third-party packages
import pygame
# local package
import layer
__version__ = '0.0.0'
#=============================================================================
| 27.118557
| 78
| 0.472914
|
92c00c62a4d73688ee5500b37708447cbeae1913
| 557
|
py
|
Python
|
Authentication/migrations/0004_auto_20201115_1105.py
|
CHESyrian/Estebyan
|
015c0a8e95d033af04ba949942da79a4f5a90488
|
[
"MIT"
] | null | null | null |
Authentication/migrations/0004_auto_20201115_1105.py
|
CHESyrian/Estebyan
|
015c0a8e95d033af04ba949942da79a4f5a90488
|
[
"MIT"
] | null | null | null |
Authentication/migrations/0004_auto_20201115_1105.py
|
CHESyrian/Estebyan
|
015c0a8e95d033af04ba949942da79a4f5a90488
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.6 on 2020-11-15 09:05
from django.db import migrations, models
| 23.208333
| 54
| 0.587074
|
92c32d549db39666405ca82ccd8b1e761fbef653
| 455
|
py
|
Python
|
dashboard/urls.py
|
EdisonBr/MockDados
|
c625cba7b93a8f31609549241c5aa71932e26b2d
|
[
"MIT"
] | null | null | null |
dashboard/urls.py
|
EdisonBr/MockDados
|
c625cba7b93a8f31609549241c5aa71932e26b2d
|
[
"MIT"
] | 4
|
2021-03-30T13:49:39.000Z
|
2021-06-10T19:40:02.000Z
|
dashboard/urls.py
|
smart320/MockDados
|
c625cba7b93a8f31609549241c5aa71932e26b2d
|
[
"MIT"
] | 1
|
2020-07-27T02:08:29.000Z
|
2020-07-27T02:08:29.000Z
|
from django.urls import path, re_path
from django.views.generic.base import TemplateView
from .views import dashboard_cost, dashboard_energy, MotorDataListView
app_name = 'dashboard'
urlpatterns = [
path('', MotorDataListView.as_view(), name='dashboard_custom'),
#path('', dashboard_custom, name='dashboard_custom'),
path('energy', dashboard_energy, name='dashboard_energy'),
path('cost', dashboard_cost, name='dashboard_cost'),
]
| 28.4375
| 71
| 0.745055
|
92c39bcd73eb7b9f3010061b7c3106141036133d
| 111
|
py
|
Python
|
Coursera/Python for Everybody Specialization/Python for everybody basics/hourly rate.py
|
ejgarcia1991/Courses-and-other-non-professional-projects
|
94794dd1d6cf626de174330311e3fde4d10cd460
|
[
"MIT"
] | 1
|
2021-02-19T22:33:55.000Z
|
2021-02-19T22:33:55.000Z
|
Coursera/Python for Everybody Specialization/Python for everybody basics/hourly rate.py
|
ejgarcia1991/Courses-and-other-non-professional-projects
|
94794dd1d6cf626de174330311e3fde4d10cd460
|
[
"MIT"
] | null | null | null |
Coursera/Python for Everybody Specialization/Python for everybody basics/hourly rate.py
|
ejgarcia1991/Courses-and-other-non-professional-projects
|
94794dd1d6cf626de174330311e3fde4d10cd460
|
[
"MIT"
] | null | null | null |
hrs = input("Enter Hours:")
rate = input("Enter rate:")
pay = float(hrs) * float(rate)
print("Pay: " +str(pay))
| 27.75
| 30
| 0.630631
|
92c3f1ad626b115da6ffe9d3c9d13ac69cd2a64e
| 18,742
|
py
|
Python
|
litex_boards/platforms/xilinx_kcu105.py
|
smunaut/litex-boards
|
caac75c7dbcba68d9f4fb948107cb5d6ff60e05f
|
[
"BSD-2-Clause"
] | 177
|
2019-06-13T09:54:49.000Z
|
2022-03-29T02:25:13.000Z
|
litex_boards/platforms/xilinx_kcu105.py
|
smunaut/litex-boards
|
caac75c7dbcba68d9f4fb948107cb5d6ff60e05f
|
[
"BSD-2-Clause"
] | 347
|
2019-06-12T17:47:45.000Z
|
2022-03-30T21:59:01.000Z
|
litex_boards/platforms/xilinx_kcu105.py
|
smunaut/litex-boards
|
caac75c7dbcba68d9f4fb948107cb5d6ff60e05f
|
[
"BSD-2-Clause"
] | 202
|
2019-06-11T15:01:26.000Z
|
2022-03-31T16:25:19.000Z
|
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2017-2019 Florent Kermarrec <florent@enjoy-digital.fr>
# SPDX-License-Identifier: BSD-2-Clause
from litex.build.generic_platform import *
from litex.build.xilinx import XilinxPlatform, VivadoProgrammer
# IOs ----------------------------------------------------------------------------------------------
_io = [
# Clk / Rst
("clk125", 0,
Subsignal("p", Pins("G10"), IOStandard("LVDS")),
Subsignal("n", Pins("F10"), IOStandard("LVDS"))
),
("clk300", 0,
Subsignal("p", Pins("AK17"), IOStandard("DIFF_SSTL12")),
Subsignal("n", Pins("AK16"), IOStandard("DIFF_SSTL12"))
),
("cpu_reset", 0, Pins("AN8"), IOStandard("LVCMOS18")),
# Leds
("user_led", 0, Pins("AP8"), IOStandard("LVCMOS18")),
("user_led", 1, Pins("H23"), IOStandard("LVCMOS18")),
("user_led", 2, Pins("P20"), IOStandard("LVCMOS18")),
("user_led", 3, Pins("P21"), IOStandard("LVCMOS18")),
("user_led", 4, Pins("N22"), IOStandard("LVCMOS18")),
("user_led", 5, Pins("M22"), IOStandard("LVCMOS18")),
("user_led", 6, Pins("R23"), IOStandard("LVCMOS18")),
("user_led", 7, Pins("P23"), IOStandard("LVCMOS18")),
# Buttons
("user_btn_c", 0, Pins("AE10"), IOStandard("LVCMOS18")),
("user_btn_n", 0, Pins("AD10"), IOStandard("LVCMOS18")),
("user_btn_s", 0, Pins("AF8"), IOStandard("LVCMOS18")),
("user_btn_w", 0, Pins("AF9"), IOStandard("LVCMOS18")),
("user_btn_e", 0, Pins("AE8"), IOStandard("LVCMOS18")),
# Switches
("user_dip_btn", 0, Pins("AN16"), IOStandard("LVCMOS12")),
("user_dip_btn", 1, Pins("AN19"), IOStandard("LVCMOS12")),
("user_dip_btn", 2, Pins("AP18"), IOStandard("LVCMOS12")),
("user_dip_btn", 3, Pins("AN14"), IOStandard("LVCMOS12")),
# SMA
("user_sma_clock", 0,
Subsignal("p", Pins("D23"), IOStandard("LVDS")),
Subsignal("n", Pins("C23"), IOStandard("LVDS"))
),
("user_sma_clock_p", 0, Pins("D23"), IOStandard("LVCMOS18")),
("user_sma_clock_n", 0, Pins("C23"), IOStandard("LVCMOS18")),
("user_sma_gpio", 0,
Subsignal("p", Pins("H27"), IOStandard("LVDS")),
Subsignal("n", Pins("G27"), IOStandard("LVDS"))
),
("user_sma_gpio_p", 0, Pins("H27"), IOStandard("LVCMOS18")),
("user_sma_gpio_n", 0, Pins("G27"), IOStandard("LVCMOS18")),
# I2C
("i2c", 0,
Subsignal("scl", Pins("J24")),
Subsignal("sda", Pins("J25")),
IOStandard("LVCMOS18")
),
# Serial
("serial", 0,
Subsignal("cts", Pins("L23")),
Subsignal("rts", Pins("K27")),
Subsignal("tx", Pins("K26")),
Subsignal("rx", Pins("G25")),
IOStandard("LVCMOS18")
),
# SPIFlash
("spiflash", 0, # clock needs to be accessed through primitive
Subsignal("cs_n", Pins("U7")),
Subsignal("dq", Pins("AC7 AB7 AA7 Y7")),
IOStandard("LVCMOS18")
),
("spiflash", 1, # clock needs to be accessed through primitive
Subsignal("cs_n", Pins("G26")),
Subsignal("dq", Pins("M20 L20 R21 R22")),
IOStandard("LVCMOS18")
),
# SDCard
("spisdcard", 0,
Subsignal("clk", Pins("AL10")),
Subsignal("cs_n", Pins("AH8")),
Subsignal("mosi", Pins("AD9"), Misc("PULLUP")),
Subsignal("miso", Pins("AP9"), Misc("PULLUP")),
Misc("SLEW=FAST"),
IOStandard("LVCMOS18")
),
("sdcard", 0,
Subsignal("clk", Pins("AL10")),
Subsignal("cmd", Pins("AD9"), Misc("PULLUP True")),
Subsignal("data", Pins("AP9 AN9 AH9 AH8"), Misc("PULLUP True")),
Misc("SLEW=FAST"),
IOStandard("LVCMOS18")
),
# Rotary Encoder
("rotary", 0,
Subsignal("a", Pins("Y21")),
Subsignal("b", Pins("AD26")),
Subsignal("push", Pins("AF28")),
IOStandard("LVCMOS18")
),
# HDMI
("hdmi", 0,
Subsignal("d", Pins(
"AK11 AP11 AP13 AN13 AN11 AM11 AN12 AM12",
"AL12 AK12 AL13 AK13 AD11 AH12 AG12 AJ11",
"AG10 AK8")),
Subsignal("de", Pins("AE11")),
Subsignal("clk", Pins("AF13")),
Subsignal("vsync", Pins("AH13")),
Subsignal("hsync", Pins("AE13")),
Subsignal("spdif", Pins("AE12")),
Subsignal("spdif_out", Pins("AF12")),
IOStandard("LVCMOS18")
),
# DDR4 SDRAM
("ddram", 0,
Subsignal("a", Pins(
"AE17 AH17 AE18 AJ15 AG16 AL17 AK18 AG17",
"AF18 AH19 AF15 AD19 AJ14 AG19"),
IOStandard("SSTL12_DCI")),
Subsignal("ba", Pins("AF17 AL15"), IOStandard("SSTL12_DCI")),
Subsignal("bg", Pins("AG15"), IOStandard("SSTL12_DCI")),
Subsignal("ras_n", Pins("AF14"), IOStandard("SSTL12_DCI")), # A16
Subsignal("cas_n", Pins("AG14"), IOStandard("SSTL12_DCI")), # A15
Subsignal("we_n", Pins("AD16"), IOStandard("SSTL12_DCI")), # A14
Subsignal("cs_n", Pins("AL19"), IOStandard("SSTL12_DCI")),
Subsignal("act_n", Pins("AH14"), IOStandard("SSTL12_DCI")),
#Subsignal("ten", Pins("AH16"), IOStandard("SSTL12_DCI")),
#Subsignal("alert_n", Pins("AJ16"), IOStandard("SSTL12_DCI")),
#Subsignal("par", Pins("AD18"), IOStandard("SSTL12_DCI")),
Subsignal("dm", Pins("AD21 AE25 AJ21 AM21 AH26 AN26 AJ29 AL32"),
IOStandard("POD12_DCI")),
Subsignal("dq", Pins(
"AE23 AG20 AF22 AF20 AE22 AD20 AG22 AE20",
"AJ24 AG24 AJ23 AF23 AH23 AF24 AH22 AG25",
"AL22 AL25 AM20 AK23 AK22 AL24 AL20 AL23",
"AM24 AN23 AN24 AP23 AP25 AN22 AP24 AM22",
"AH28 AK26 AK28 AM27 AJ28 AH27 AK27 AM26",
"AL30 AP29 AM30 AN28 AL29 AP28 AM29 AN27",
"AH31 AH32 AJ34 AK31 AJ31 AJ30 AH34 AK32",
"AN33 AP33 AM34 AP31 AM32 AN31 AL34 AN32"),
IOStandard("POD12_DCI"),
Misc("PRE_EMPHASIS=RDRV_240"),
Misc("EQUALIZATION=EQ_LEVEL2")),
Subsignal("dqs_p", Pins("AG21 AH24 AJ20 AP20 AL27 AN29 AH33 AN34"),
IOStandard("DIFF_POD12_DCI"),
Misc("PRE_EMPHASIS=RDRV_240"),
Misc("EQUALIZATION=EQ_LEVEL2")),
Subsignal("dqs_n", Pins("AH21 AJ25 AK20 AP21 AL28 AP30 AJ33 AP34"),
IOStandard("DIFF_POD12_DCI"),
Misc("PRE_EMPHASIS=RDRV_240"),
Misc("EQUALIZATION=EQ_LEVEL2")),
Subsignal("clk_p", Pins("AE16"), IOStandard("DIFF_SSTL12_DCI")),
Subsignal("clk_n", Pins("AE15"), IOStandard("DIFF_SSTL12_DCI")),
Subsignal("cke", Pins("AD15"), IOStandard("SSTL12_DCI")),
Subsignal("odt", Pins("AJ18"), IOStandard("SSTL12_DCI")),
Subsignal("reset_n", Pins("AL18"), IOStandard("LVCMOS12")),
Misc("SLEW=FAST"),
),
# PCIe
("pcie_x1", 0,
Subsignal("rst_n", Pins("K22"), IOStandard("LVCMOS18")),
Subsignal("clk_p", Pins("AB6")),
Subsignal("clk_n", Pins("AB5")),
Subsignal("rx_p", Pins("AB2")),
Subsignal("rx_n", Pins("AB1")),
Subsignal("tx_p", Pins("AC4")),
Subsignal("tx_n", Pins("AC3"))
),
("pcie_x2", 0,
Subsignal("rst_n", Pins("K22"), IOStandard("LVCMOS18")),
Subsignal("clk_p", Pins("AB6")),
Subsignal("clk_n", Pins("AB5")),
Subsignal("rx_p", Pins("AB2 AD2")),
Subsignal("rx_n", Pins("AB1 AD1")),
Subsignal("tx_p", Pins("AC4 AE4")),
Subsignal("tx_n", Pins("AC3 AE3"))
),
("pcie_x4", 0,
Subsignal("rst_n", Pins("K22"), IOStandard("LVCMOS18")),
Subsignal("clk_p", Pins("AB6")),
Subsignal("clk_n", Pins("AB5")),
Subsignal("rx_p", Pins("AB2 AD2 AF2 AH2")),
Subsignal("rx_n", Pins("AB1 AD1 AF1 AH1")),
Subsignal("tx_p", Pins("AC4 AE4 AG4 AH6")),
Subsignal("tx_n", Pins("AC3 AE3 AG3 AH5"))
),
("pcie_x8", 0,
Subsignal("rst_n", Pins("K22"), IOStandard("LVCMOS18")),
Subsignal("clk_p", Pins("AB6")),
Subsignal("clk_n", Pins("AB5")),
Subsignal("rx_p", Pins("AB2 AD2 AF2 AH2 AJ4 AK2 AM2 AP2")),
Subsignal("rx_n", Pins("AB1 AD1 AF1 AH1 AJ3 AK1 AM1 AP1")),
Subsignal("tx_p", Pins("AC4 AE4 AG4 AH6 AK6 AL4 AM6 AN4")),
Subsignal("tx_n", Pins("AC3 AE3 AG3 AH5 AK5 AL3 AM5 AN3"))
),
# SGMII Clk
("sgmii_clock", 0,
Subsignal("p", Pins("P26"), IOStandard("LVDS_25")),
Subsignal("n", Pins("N26"), IOStandard("LVDS_25"))
),
# SI570
("si570_refclk", 0,
Subsignal("p", Pins("P6")),
Subsignal("n", Pins("P5"))
),
# SMA
("user_sma_mgt_refclk", 0,
Subsignal("p", Pins("V6")),
Subsignal("n", Pins("V5"))
),
("user_sma_mgt_tx", 0,
Subsignal("p", Pins("R4")),
Subsignal("n", Pins("R3"))
),
("user_sma_mgt_rx", 0,
Subsignal("p", Pins("P2")),
Subsignal("n", Pins("P1"))
),
# SFP
("sfp", 0,
Subsignal("txp", Pins("U4")),
Subsignal("txn", Pins("U3")),
Subsignal("rxp", Pins("T2")),
Subsignal("rxn", Pins("T1"))
),
("sfp_tx", 0,
Subsignal("p", Pins("U4")),
Subsignal("n", Pins("U3")),
),
("sfp_rx", 0,
Subsignal("p", Pins("T2")),
Subsignal("n", Pins("T1")),
),
("sfp_tx_disable_n", 0, Pins("AL8"), IOStandard("LVCMOS18")),
("sfp", 1,
Subsignal("txp", Pins("W4")),
Subsignal("txn", Pins("W3")),
Subsignal("rxp", Pins("V2")),
Subsignal("rxn", Pins("V1"))
),
("sfp_tx", 1,
Subsignal("p", Pins("W4")),
Subsignal("n", Pins("W3")),
),
("sfp_rx", 1,
Subsignal("p", Pins("V2")),
Subsignal("n", Pins("V1")),
),
("sfp_tx_disable_n", 1, Pins("D28"), IOStandard("LVCMOS18")),
]
# Connectors ---------------------------------------------------------------------------------------
_connectors = [
("HPC", {
"DP0_C2M_P" : "F6",
"DP0_C2M_N" : "F5",
"DP0_M2C_P" : "E4",
"DP0_M2C_N" : "E3",
"DP1_C2M_P" : "D6",
"DP1_C2M_N" : "D5",
"DP1_M2C_P" : "D2",
"DP1_M2C_N" : "D1",
"DP2_C2M_P" : "C4",
"DP2_C2M_N" : "C3",
"DP2_M2C_P" : "B2",
"DP2_M2C_N" : "B1",
"DP3_C2M_P" : "B6",
"DP3_C2M_N" : "B5",
"DP3_M2C_P" : "A4",
"DP3_M2C_N" : "A3",
"DP4_C2M_P" : "N4",
"DP4_C2M_N" : "N3",
"DP4_M2C_P" : "M2",
"DP4_M2C_N" : "M1",
"DP5_C2M_P" : "J4",
"DP5_C2M_N" : "J3",
"DP5_M2C_P" : "H2",
"DP5_M2C_N" : "H1",
"DP6_C2M_P" : "L4",
"DP6_C2M_N" : "L3",
"DP6_M2C_P" : "K2",
"DP6_M2C_N" : "K1",
"DP7_C2M_P" : "G4",
"DP7_C2M_N" : "G3",
"DP7_M2C_P" : "F2",
"DP7_M2C_N" : "F1",
"LA06_P" : "D13",
"LA06_N" : "C13",
"LA10_P" : "L8",
"LA10_N" : "K8",
"LA14_P" : "B10",
"LA14_N" : "A10",
"LA18_CC_P" : "E22",
"LA18_CC_N" : "E23",
"LA27_P" : "H21",
"LA27_N" : "G21",
"HA01_CC_P" : "E16",
"HA01_CC_N" : "D16",
"HA05_P" : "J15",
"HA05_N" : "J14",
"HA09_P" : "F18",
"HA09_N" : "F17",
"HA13_P" : "B14",
"HA13_N" : "A14",
"HA16_P" : "A19",
"HA16_N" : "A18",
"HA20_P" : "C19",
"HA20_N" : "B19",
"CLK1_M2C_P" : "E25",
"CLK1_M2C_N" : "D25",
"LA00_CC_P" : "H11",
"LA00_CC_N" : "G11",
"LA03_P" : "A13",
"LA03_N" : "A12",
"LA08_P" : "J8",
"LA08_N" : "H8",
"LA12_P" : "E10",
"LA12_N" : "D10",
"LA16_P" : "B9",
"LA16_N" : "A9",
"LA20_P" : "B24",
"LA20_N" : "A24",
"LA22_P" : "G24",
"LA22_N" : "F25",
"LA25_P" : "D20",
"LA25_N" : "D21",
"LA29_P" : "B20",
"LA29_N" : "A20",
"LA31_P" : "B25",
"LA31_N" : "A25",
"LA33_P" : "A27",
"LA33_N" : "A28",
"HA03_P" : "G15",
"HA03_N" : "G14",
"HA07_P" : "L19",
"HA07_N" : "L18",
"HA11_P" : "J19",
"HA11_N" : "J18",
"HA14_P" : "F15",
"HA14_N" : "F14",
"HA18_P" : "B17",
"HA18_N" : "B16",
"HA22_P" : "C18",
"HA22_N" : "C17",
"GBTCLK1_M2C_P" : "H6",
"GBTCLK1_M2C_N" : "H5",
"GBTCLK0_M2C_P" : "K6",
"GBTCLK0_M2C_N" : "K5",
"LA01_CC_P" : "G9",
"LA01_CC_N" : "F9",
"LA05_P" : "L13",
"LA05_N" : "K13",
"LA09_P" : "J9",
"LA09_N" : "H9",
"LA13_P" : "D9",
"LA13_N" : "C9",
"LA17_CC_P" : "D24",
"LA17_CC_N" : "C24",
"LA23_P" : "G22",
"LA23_N" : "F22",
"LA26_P" : "G20",
"LA26_N" : "F20",
"PG_M2C" : "L27",
"HA00_CC_P" : "G17",
"HA00_CC_N" : "G16",
"HA04_P" : "G19",
"HA04_N" : "F19",
"HA08_P" : "K18",
"HA08_N" : "K17",
"HA12_P" : "K16",
"HA12_N" : "J16",
"HA15_P" : "D14",
"HA15_N" : "C14",
"HA19_P" : "D19",
"HA19_N" : "D18",
"PRSNT_M2C_B" : "H24",
"CLK0_M2C_P" : "H12",
"CLK0_M2C_N" : "G12",
"LA02_P" : "K10",
"LA02_N" : "J10",
"LA04_P" : "L12",
"LA04_N" : "K12",
"LA07_P" : "F8",
"LA07_N" : "E8",
"LA11_P" : "K11",
"LA11_N" : "J11",
"LA15_P" : "D8",
"LA15_N" : "C8",
"LA19_P" : "C21",
"LA19_N" : "C22",
"LA21_P" : "F23",
"LA21_N" : "F24",
"LA24_P" : "E20",
"LA24_N" : "E21",
"LA28_P" : "B21",
"LA28_N" : "B22",
"LA30_P" : "C26",
"LA30_N" : "B26",
"LA32_P" : "E26",
"LA32_N" : "D26",
"HA02_P" : "H19",
"HA02_N" : "H18",
"HA06_P" : "L15",
"HA06_N" : "K15",
"HA10_P" : "H17",
"HA10_N" : "H16",
"HA17_CC_P" : "E18",
"HA17_CC_N" : "E17",
"HA21_P" : "E15",
"HA21_N" : "D15",
"HA23_P" : "B15",
"HA23_N" : "A15",
}
),
("LPC", {
"GBTCLK0_M2C_P" : "AA24",
"GBTCLK0_M2C_N" : "AA25",
"LA01_CC_P" : "W25",
"LA01_CC_N" : "Y25",
"LA05_P" : "V27",
"LA05_N" : "V28",
"LA09_P" : "V26",
"LA09_N" : "W26",
"LA13_P" : "AA20",
"LA13_N" : "AB20",
"LA17_CC_P" : "AA32",
"LA17_CC_N" : "AB32",
"LA23_P" : "AD30",
"LA23_N" : "AD31",
"LA26_P" : "AF33",
"LA26_N" : "AG34",
"CLK0_M2C_P" : "AA24",
"CLK0_M2C_N" : "AA25",
"LA02_P" : "AA22",
"LA02_N" : "AB22",
"LA04_P" : "U26",
"LA04_N" : "U27",
"LA07_P" : "V22",
"LA07_N" : "V23",
"LA11_P" : "V21",
"LA11_N" : "W21",
"LA15_P" : "AB25",
"LA15_N" : "AB26",
"LA19_P" : "AA29",
"LA19_N" : "AB29",
"LA21_P" : "AC33",
"LA21_N" : "AD33",
"LA24_P" : "AE32",
"LA24_N" : "AF32",
"LA28_P" : "V31",
"LA28_N" : "W31",
"LA30_P" : "Y31",
"LA30_N" : "Y32",
"LA32_P" : "W30",
"LA32_N" : "Y30",
"LA06_P" : "V29",
"LA06_N" : "W29",
"LA10_P" : "T22",
"LA10_N" : "T23",
"LA14_P" : "U21",
"LA14_N" : "U22",
"LA18_CC_P" : "AB30",
"LA18_CC_N" : "AB31",
"LA27_P" : "AG31",
"LA27_N" : "AG32",
"CLK1_M2C_P" : "AC31",
"CLK1_M2C_N" : "AC32",
"LA00_CC_P" : "W23",
"LA00_CC_N" : "W24",
"LA03_P" : "W28",
"LA03_N" : "Y28",
"LA08_P" : "U24",
"LA08_N" : "U25",
"LA12_P" : "AC22",
"LA12_N" : "AC23",
"LA16_P" : "AB21",
"LA16_N" : "AC21",
"LA20_P" : "AA34",
"LA20_N" : "AB34",
"LA22_P" : "AC34",
"LA22_N" : "AD34",
"LA25_P" : "AE33",
"LA25_N" : "AF34",
"LA29_P" : "U34",
"LA29_N" : "V34",
"LA31_P" : "V33",
"LA31_N" : "W34",
"LA33_P" : "W33",
"LA33_N" : "Y33",
}
),
("pmod0", "AK25 AN21 AH18 AM19 AE26 AF25 AE21 AM17"),
("pmod1", "AL14 AM14 AP16 AP15 AM16 AM15 AN18 AN17"),
]
# Platform -----------------------------------------------------------------------------------------
| 34.836431
| 100
| 0.442642
|
92c62cbe56cec51196d1580ada73d616cb7c64b7
| 1,543
|
py
|
Python
|
code/advent_of_code_day3.py
|
erinleeryan/2020adventofcode
|
69f21d3458f57d8fcf006c451416e0509a66cd7a
|
[
"Unlicense"
] | null | null | null |
code/advent_of_code_day3.py
|
erinleeryan/2020adventofcode
|
69f21d3458f57d8fcf006c451416e0509a66cd7a
|
[
"Unlicense"
] | null | null | null |
code/advent_of_code_day3.py
|
erinleeryan/2020adventofcode
|
69f21d3458f57d8fcf006c451416e0509a66cd7a
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import math
# In[2]:
fileObj = open('../data/advent_of_code_input_day_three.txt', "r") #opens the file in read mode.
items = fileObj. read(). splitlines() #puts the file into an array.
# In[3]:
#print (items)
holding = []
for i, line in enumerate(items):
result = split(line)
holding.append(result)
holding = np.array(holding)
holding[holding == '.'] = 0
holding[holding == '#'] = 1
holding = holding.astype(int)
print (holding)
# In[7]:
down1_right3 = dup_and_count(3,1,holding)
down1_right1 = dup_and_count(1,1,holding)
down1_right5 = dup_and_count(5,1,holding)
down1_right7 = dup_and_count(7,1,holding)
down2_right1 = dup_and_count(1,2,holding)
results = np.array([down1_right3, down1_right1, down1_right5, down1_right7, down2_right1], dtype=np.int64)
print(results)
product = np.prod(results)
print (product)
# In[ ]:
| 20.302632
| 106
| 0.700583
|
92c68dfb6fa34afab6acdb9718f1d8ef9da011d5
| 670
|
py
|
Python
|
input_handler.py
|
Wyverns010/Body-Keypoints-Detection
|
980445da5e87c898a00a8ef1c9e1e143d09d4643
|
[
"Apache-2.0"
] | 1
|
2021-06-06T07:08:27.000Z
|
2021-06-06T07:08:27.000Z
|
input_handler.py
|
Wyverns010/Body-Keypoints-Detection
|
980445da5e87c898a00a8ef1c9e1e143d09d4643
|
[
"Apache-2.0"
] | null | null | null |
input_handler.py
|
Wyverns010/Body-Keypoints-Detection
|
980445da5e87c898a00a8ef1c9e1e143d09d4643
|
[
"Apache-2.0"
] | null | null | null |
import os
import traceback
if __name__ == '__main__':
obj = InputHandler()
print(obj.listFiles())
| 25.769231
| 135
| 0.60597
|
92c83bb936e6892d8eb39bcbfcb76fe95e1f5577
| 1,281
|
py
|
Python
|
docker/autoconfig.py
|
misc0110/bepasty-server
|
662179671220d680fed57aa90894ffebf57dd4c7
|
[
"BSD-2-Clause"
] | null | null | null |
docker/autoconfig.py
|
misc0110/bepasty-server
|
662179671220d680fed57aa90894ffebf57dd4c7
|
[
"BSD-2-Clause"
] | null | null | null |
docker/autoconfig.py
|
misc0110/bepasty-server
|
662179671220d680fed57aa90894ffebf57dd4c7
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/python
import os
import sys
SITENAME = os.environ.get("BEPASTY_SITENAME", None)
if SITENAME is None:
print("\n\nEnvironment variable BEPASTY_SITENAME must be set.")
sys.exit(1)
SECRET_KEY = os.environ.get("BEPASTY_SECRET_KEY", None)
if SECRET_KEY is None:
print("\n\nEnvironment variable BEPASTY_SECRET_KEY must be set.")
sys.exit(1)
APP_BASE_PATH = os.environ.get("BEPASTY_APP_BASE_PATH", None)
STORAGE_FILESYSTEM_DIRECTORY = os.environ.get(
"BEPASTY_STORAGE_FILESYSTEM_DIRECTORY", "/app/data",
)
DEFAULT_PERMISSIONS = os.environ.get("BEPASTY_DEFAULT_PERMISSIONS", "create,read")
PERMISSIONS = {}
admin_secret = os.environ.get("BEPASTY_ADMIN_SECRET", None)
if admin_secret is not None:
PERMISSIONS.update({admin_secret: "admin,list,create,modify,read,delete"})
try:
max_allowed_file_size = os.environ.get("BEPASTY_MAX_ALLOWED_FILE_SIZE", 5000000000)
MAX_ALLOWED_FILE_SIZE = int(max_allowed_file_size)
except ValueError as err:
print("\n\nInvalid BEPASTY_MAX_ALLOWED_FILE_SIZE: %s", str(err))
sys.exit(1)
try:
max_body_size = os.environ.get("BEPASTY_MAX_BODY_SIZE", 1040384)
MAX_BODY_SIZE = int(max_body_size)
except ValueError as err:
print("\n\nInvalid BEPASTY_MAX_BODY_SIZE: %s", str(err))
sys.exit(1)
| 30.5
| 87
| 0.753318
|
92c8fb39f9443d549d8e36137c05b64ee86a7a00
| 13,786
|
py
|
Python
|
pysh/transforms/alpha/bangexpr.py
|
drslump/pysh
|
673cdf2b5ea95dc3209cb294bb91cb2f298bb888
|
[
"MIT"
] | 3
|
2018-07-09T04:39:24.000Z
|
2020-11-27T05:44:56.000Z
|
pysh/transforms/alpha/bangexpr.py
|
drslump/pysh
|
673cdf2b5ea95dc3209cb294bb91cb2f298bb888
|
[
"MIT"
] | null | null | null |
pysh/transforms/alpha/bangexpr.py
|
drslump/pysh
|
673cdf2b5ea95dc3209cb294bb91cb2f298bb888
|
[
"MIT"
] | 1
|
2018-08-02T21:57:11.000Z
|
2018-08-02T21:57:11.000Z
|
from io import StringIO
import re
import tokenize
import os
from collections import deque, ChainMap
from functools import lru_cache
from enum import Enum
import pysh
from pysh.path import PathWrapper, Path
from typing import List, Callable, Iterator, Tuple, NamedTuple, Deque, Union, Any
TBangTransformer = Callable[ [List[str]], Iterator[str]]
# runtime symbols
__all__ = ['BangExpr', 'BangOp', 'BangSeq', 'BangGlob', 'BangEnv', 'BangBang']
TBangLexerToken = Tuple[str, str, Tuple[int,int]]
class BangEnv:
__slots__ = ('name',)
def parse_bangexpr(code: str) -> str:
as_str = lambda s: "'{}'".format(s.replace("\\", "\\\\").replace("'", "\\'"))
lexer = BangLexer().scan(code)
seq = []
exprs = []
while True:
tkn = next(lexer, None)
if tkn and tkn.type != BangTokenType.OP:
if tkn.type in (BangTokenType.LOCAL, BangTokenType.EXPR):
seq.append(tkn.value)
elif tkn.type == BangTokenType.ENV:
seq.append('pysh.BangEnv({})'.format(as_str(tkn.value)))
elif tkn.type == BangTokenType.OPAQUE:
seq.append('{}'.format(as_str(tkn.value)))
elif tkn.type == BangTokenType.GLOB:
seq.append('pysh.BangGlob({})'.format(as_str(tkn.value)))
else:
assert False, 'Unexpected token {}'.format(tkn.type)
continue
if seq:
if len(seq) > 1:
exprs.append('pysh.BangSeq({})'.format(', '.join(seq)))
else:
exprs.append(seq[0])
seq = []
if not tkn:
break
assert tkn.type == BangTokenType.OP
if tkn.value == ' ':
continue
exprs.append('pysh.BangOp("{}")'.format(tkn.value))
# We need to provide locals/globals so we can resolve commands to variables
return 'pysh.BangExpr({}, locals=locals(), globals=globals())'.format(', '.join(exprs))
def transform(code: StringIO, transformer: TBangTransformer) -> Iterator[str]:
""" Scans python code to transform bang expressions.
Given some python code it will extract bang expressions and process
them with a callback that can report back the transformation.
Returns a generator that allows to consume the transformed code
line by line.
"""
tokens = tokenize.generate_tokens(code.readline)
bangexpr = [] # type: List[str]
bangcont = False
prebang = None
ptkn = None
indent = 0
bang_indent = -100
last_bang_line = -100
for ctkn in tokens:
if ctkn.type == tokenize.INDENT:
indent += 1
if last_bang_line + 1 == ctkn.start[0]:
bang_indent = indent
elif ctkn.type == tokenize.DEDENT:
indent -= 1
if bang_indent > indent:
bang_indent = -100
# due to continuations we can't rely on NEWLINE tokens, instead we have
# use the lexical information to detect when we're on a new line
#TODO: Support indent/dedent for multiline
if ptkn and ctkn.start[0] > ptkn.start[0]:
if bangcont or bang_indent == indent:
if ctkn.type is tokenize.ENDMARKER:
raise SyntaxError('BangExpr continuation at program end')
line = ctkn.line.rstrip('\r\n')
bangexpr.append(line)
bangcont = line.endswith('\\')
last_bang_line = ctkn.start[0]
elif bangexpr:
lines = list(transformer(bangexpr))
assert len(lines) <= len(bangexpr)
if lines and prebang:
lines[0] = prebang + lines[0]
yield from lines
bangexpr = []
last_bang_line = ptkn.start[0]
else:
yield ptkn.line
ptkn = ctkn
if bangexpr:
continue
if ctkn.string == '!':
col = ctkn.start[1]
prebang = ctkn.line[0:col]
line = ctkn.line[col+1:].lstrip(' \t').rstrip('\r\n')
bangexpr.append(line.rstrip('\\'))
bangcont = line.endswith('\\')
last_bang_line = ctkn.start[0]
assert not bangexpr, bangexpr
def transformer(lines: List[str]) -> Iterator[str]:
if lines[0].startswith('!'):
#TODO: Detect $ident to expose them on env when evaluated
lines[0] = lines[0][1:]
code = '\n'.join(lines)
code = code.strip().replace("'", "\\'").replace("\\", "\\\\")
code = "pysh.BangBang('{}')".format(code)
lines = code.split('\n')
for line in lines:
yield line
else:
yield from parse_bangexpr(' '.join(lines)).split('\n')
from io import StringIO
code = r'''
foo = ! ls foo${bar}.* \
| grep foo
> /dev/null
foo = r' ls foo${bar} ' >> expr
expr<' ls foo${bar} '
!! #!/bin/fish
ls .*
'''.strip()
#TODO: !! is probably better solved with:
# locals are solved with inspect.frame.f_locals
sh << r'''
# << means with variables interpolated
# < is plain text
ls .*
'''
for line in transform(StringIO(code), transformer):
print(line.rstrip('\n'))
from pysh.command import command
ls = command('ls')
grep = command('grep')
bar = 10
print('::BangExpr::')
be = BangExpr('ls', BangSeq('foo', bar, BangGlob('.*')), BangOp("|"), 'grep', 'foo', 'baz', BangOp(">"), '/dev/null', locals=locals(), globals=globals())
# print(be)
print('::BangBang::')
bb = BangBang('''#!/bin/bash
ls *.py''')
print(bb)
| 31.260771
| 153
| 0.533585
|
92ca0cfb3a6ca200081a09f8a2c36869b58c22cb
| 2,449
|
py
|
Python
|
example/bayesian-methods/data_loader.py
|
Vikas-kum/incubator-mxnet
|
ba02bf2fe2da423caa59ddb3fd5e433b90b730bf
|
[
"Apache-2.0"
] | 54
|
2018-11-27T06:00:52.000Z
|
2022-03-24T09:41:01.000Z
|
example/bayesian-methods/data_loader.py
|
Vikas-kum/incubator-mxnet
|
ba02bf2fe2da423caa59ddb3fd5e433b90b730bf
|
[
"Apache-2.0"
] | 27
|
2017-07-04T17:45:51.000Z
|
2019-09-12T06:56:27.000Z
|
example/bayesian-methods/data_loader.py
|
Vikas-kum/incubator-mxnet
|
ba02bf2fe2da423caa59ddb3fd5e433b90b730bf
|
[
"Apache-2.0"
] | 51
|
2019-07-12T05:10:25.000Z
|
2021-07-28T16:19:06.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import numpy
import os
import ssl
| 40.147541
| 95
| 0.683953
|
92ca255eec01c1e82a3ad0136582786783c1c0bd
| 4,743
|
py
|
Python
|
start.py
|
mickeyckm/nanodegree-freshtomatoes
|
12776f7e46d6c42a4755a0b81e60eb1a5a65de08
|
[
"MIT"
] | 1
|
2016-10-13T05:25:36.000Z
|
2016-10-13T05:25:36.000Z
|
start.py
|
mickeyckm/freshtomatoes
|
12776f7e46d6c42a4755a0b81e60eb1a5a65de08
|
[
"MIT"
] | null | null | null |
start.py
|
mickeyckm/freshtomatoes
|
12776f7e46d6c42a4755a0b81e60eb1a5a65de08
|
[
"MIT"
] | null | null | null |
import os
import tmdbsimple as tmdb
import media
import fresh_tomatoes as ft
movies = []
if os.environ.get('TMDB_API', False):
# Retrieve API KEY
tmdb.API_KEY = os.environ['TMDB_API']
# TMDB Movie Ids
movie_ids = [271110, 297761, 246655, 278154, 135397, 188927]
# Get Configuration
configuration = tmdb.Configuration().info()
image_base_url = configuration['images']['secure_base_url']
image_width = "w500"
for movie_id in movie_ids:
m = tmdb.Movies(movie_id)
# Retrieve Image URL
minfo = m.info()
poster_image_url = image_base_url + image_width + minfo['poster_path']
# Retrieve Youtube Video URL
videos = m.videos()
video = videos['results'][0]
youtube_url = 'https://youtube.com/watch?v=' + video['key']
# Append Movie object
movie = media.Movie(m.title)
movie.storyline = m.overview
movie.poster_url = poster_image_url
movie.trailer_url = youtube_url
movies.append(movie)
else:
# Avatar
avatar = media.Movie("Avatar")
avatar.storyline = ("A paraplegic marine dispatched to the moon Pandora "
"on a unique mission becomes torn between following "
"his orders and protecting the world he feels is "
"his home.")
avatar.poster_url = ("https://upload.wikimedia.org/wikipedia/"
"en/b/b0/Avatar-Teaser-Poster.jpg")
avatar.trailer_url = "https://www.youtube.com/watch?v=-9ceBgWV8io"
# Deadpool
deadpool = media.Movie("Deadpool")
deadpool.storyline = ("A fast-talking mercenary with a morbid sense of "
"humor is subjected to a rogue experiment that "
"leaves him with accelerated healing powers and a "
"quest for revenge.")
deadpool.poster_url = ("https://upload.wikimedia.org/wikipedia/en/4/46/"
"Deadpool_poster.jpg")
deadpool.trailer_url = "https://www.youtube.com/watch?v=gtTfd6tISfw"
# Ghostbusters
ghostbusters = media.Movie("Ghostbusters")
ghostbusters.storyline = ("Following a ghost invasion of Manhattan, "
"paranormal enthusiasts Erin Gilbert and Abby "
"Yates, nuclear engineer Jillian Holtzmann, "
"and subway worker Patty Tolan band together "
"to stop the otherworldly threat.")
ghostbusters.poster_url = ("https://upload.wikimedia.org/wikipedia/"
"en/3/32/Ghostbusters_2016_film_poster.png")
ghostbusters.trailer_url = "https://www.youtube.com/watch?v=w3ugHP-yZXw"
# Olympus
olympus = media.Movie("Olympus Has Fallen")
olympus.storyline = ("Disgraced Secret Service agent (and former "
"presidential guard) Mike Banning finds himself "
"trapped inside the White House in the wake of a "
"terrorist attack; using his inside knowledge, "
"Banning works with national security to rescue "
"the President from his kidnappers.")
olympus.poster_url = ("https://upload.wikimedia.org/wikipedia/en/b/bf/"
"Olympus_Has_Fallen_poster.jpg")
olympus.trailer_url = "https://www.youtube.com/watch?v=vwx1f0kyNwI"
# Angry Birds
angry_birds = media.Movie("The Angry Birds Movie")
angry_birds.storyline = ("Find out why the birds are so angry. When an "
"island populated by happy, flightless birds "
"is visited by mysterious green piggies, it's "
"up to three unlikely outcasts - Red, Chuck "
"and Bomb - to figure out what the pigs are up "
"to.")
angry_birds.poster_url = ("https://upload.wikimedia.org/wikipedia/en/f/"
"f9/The_Angry_Birds_Movie_poster.png")
angry_birds.trailer_url = "https://www.youtube.com/watch?v=1U2DKKqxHgE"
# Ironman
ironman = media.Movie("Iron Man")
ironman.storyline = ("After being held captive in an Afghan cave, "
"billionaire engineer Tony Stark creates a unique "
"weaponized suit of armor to fight evil.")
ironman.poster_url = ("https://upload.wikimedia.org/wikipedia/en/7/70/"
"Ironmanposter.JPG")
ironman.trailer_url = "https://www.youtube.com/watch?v=8hYlB38asDY"
movies = [avatar, deadpool, ghostbusters, olympus, angry_birds, ironman]
ft.open_movies_page(movies)
| 43.916667
| 78
| 0.59688
|
92cab9ec692aa8897ecccca29c25b34c478b66a7
| 8,798
|
py
|
Python
|
qiskit_metal/_gui/elements_ui.py
|
sarafs1926/qiskit-metal
|
cf2ce8125ebe8f21b6d1b85362466fd57db2cada
|
[
"Apache-2.0"
] | 1
|
2022-01-27T07:11:49.000Z
|
2022-01-27T07:11:49.000Z
|
qiskit_metal/_gui/elements_ui.py
|
sarafs1926/qiskit-metal
|
cf2ce8125ebe8f21b6d1b85362466fd57db2cada
|
[
"Apache-2.0"
] | null | null | null |
qiskit_metal/_gui/elements_ui.py
|
sarafs1926/qiskit-metal
|
cf2ce8125ebe8f21b6d1b85362466fd57db2cada
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './elements_ui.ui',
# licensing of './elements_ui.ui' applies.
#
# Created: Wed Jun 16 14:29:03 2021
# by: pyside2-uic running on PySide2 5.13.2
#
# WARNING! All changes made in this file will be lost!
from PySide2 import QtCore, QtGui, QtWidgets
from . import main_window_rc_rc
| 49.988636
| 100
| 0.639236
|
92cb45afbfe997d9e2fe7ab0970f2d0910fc5659
| 148
|
py
|
Python
|
Python/function.py
|
manishaverma1012/programs
|
dd77546219eab2f2ee81dd0d599b78ebd8f95957
|
[
"MIT"
] | null | null | null |
Python/function.py
|
manishaverma1012/programs
|
dd77546219eab2f2ee81dd0d599b78ebd8f95957
|
[
"MIT"
] | null | null | null |
Python/function.py
|
manishaverma1012/programs
|
dd77546219eab2f2ee81dd0d599b78ebd8f95957
|
[
"MIT"
] | null | null | null |
digit = input(" the cube of which digit do you want >")
result = cube(int(digit))
print(result)
| 18.5
| 56
| 0.702703
|
92cd8cee441a839cf57967c393c922a1fab007b8
| 1,203
|
py
|
Python
|
tests/test_runner.py
|
elifesciences/proofreader-python
|
89d807253e17a1731c7ce15f7dd382e49c1c835a
|
[
"MIT"
] | 1
|
2018-06-26T21:49:31.000Z
|
2018-06-26T21:49:31.000Z
|
tests/test_runner.py
|
elifesciences/proofreader-python
|
89d807253e17a1731c7ce15f7dd382e49c1c835a
|
[
"MIT"
] | 8
|
2017-12-05T08:34:25.000Z
|
2018-04-30T08:58:18.000Z
|
tests/test_runner.py
|
elifesciences/proofreader-python
|
89d807253e17a1731c7ce15f7dd382e49c1c835a
|
[
"MIT"
] | null | null | null |
try:
from unittest.mock import patch
except ImportError: # pragma: no cover
from mock import patch
from proofreader.runner import run, _run_command
| 28.642857
| 83
| 0.729842
|
92ce1ba4b6776bf939e55fcd9a49ebf0d28494b0
| 1,266
|
py
|
Python
|
tanim/core/container/container.py
|
wofeicaoge/Tanim
|
8ef17834a4ba51092f28c0d5becec25aecd01a62
|
[
"MIT"
] | null | null | null |
tanim/core/container/container.py
|
wofeicaoge/Tanim
|
8ef17834a4ba51092f28c0d5becec25aecd01a62
|
[
"MIT"
] | 5
|
2020-04-13T15:31:37.000Z
|
2022-03-12T00:23:27.000Z
|
tanim/core/container/container.py
|
wofeicaoge/Tanim
|
8ef17834a4ba51092f28c0d5becec25aecd01a62
|
[
"MIT"
] | null | null | null |
from tanim.utils.config_ops import digest_config
from tanim.utils.iterables import list_update
# Currently, this is only used by both Scene and Mobject.
# Still, we abstract its functionality here, albeit purely nominally.
# All actual implementation has to be handled by derived classes for now.
| 32.461538
| 76
| 0.657188
|
92ce6473bab7c8882ecd1ab85554b02e243b4587
| 5,076
|
py
|
Python
|
article.py
|
ZACHSTRIVES/AUCSS-StaffPlatform
|
f2d6597853e85b06f057292025d83edbb4184361
|
[
"MIT"
] | 3
|
2020-10-01T08:46:12.000Z
|
2021-01-25T11:32:16.000Z
|
article.py
|
ZACHSTRIVES/AUCSS-StaffPlatform
|
f2d6597853e85b06f057292025d83edbb4184361
|
[
"MIT"
] | null | null | null |
article.py
|
ZACHSTRIVES/AUCSS-StaffPlatform
|
f2d6597853e85b06f057292025d83edbb4184361
|
[
"MIT"
] | 1
|
2020-09-24T11:20:23.000Z
|
2020-09-24T11:20:23.000Z
|
from config import *
| 25.766497
| 121
| 0.573483
|
92cea7421504e38a8678084f761b6c6af9dcfff2
| 1,231
|
py
|
Python
|
12-Querying-Data-II/just_filtering.py
|
dwang-ischool/w205
|
ebcdf684dc653951691faaa2787896a2d2406539
|
[
"Apache-2.0"
] | 23
|
2018-10-21T17:47:56.000Z
|
2022-03-06T04:50:27.000Z
|
12a/just_filtering.py
|
FuriousGeorge19/W205-Course-Content
|
f51046d7507fba9ba9f7521cda437d7dad803e5b
|
[
"Apache-2.0"
] | null | null | null |
12a/just_filtering.py
|
FuriousGeorge19/W205-Course-Content
|
f51046d7507fba9ba9f7521cda437d7dad803e5b
|
[
"Apache-2.0"
] | 9
|
2020-03-16T08:52:58.000Z
|
2022-02-09T09:31:51.000Z
|
#!/usr/bin/env python
"""Extract events from kafka and write them to hdfs
"""
import json
from pyspark.sql import SparkSession, Row
from pyspark.sql.functions import udf
def main():
"""main
"""
spark = SparkSession \
.builder \
.appName("ExtractEventsJob") \
.getOrCreate()
raw_events = spark \
.read \
.format("kafka") \
.option("kafka.bootstrap.servers", "kafka:29092") \
.option("subscribe", "events") \
.option("startingOffsets", "earliest") \
.option("endingOffsets", "latest") \
.load()
purchase_events = raw_events \
.select(raw_events.value.cast('string').alias('raw'),
raw_events.timestamp.cast('string')) \
.filter(is_purchase('raw'))
extracted_purchase_events = purchase_events \
.rdd \
.map(lambda r: Row(timestamp=r.timestamp, **json.loads(r.raw))) \
.toDF()
extracted_purchase_events.printSchema()
extracted_purchase_events.show()
if __name__ == "__main__":
main()
| 25.122449
| 73
| 0.613323
|
92cec8b3278d323143a4d7cc2f5e6ab7db12785e
| 434
|
py
|
Python
|
test.py
|
navjotk/pysz
|
6d75aa4fe24713ed893a2301c143006dace6fd77
|
[
"MIT"
] | 3
|
2020-03-14T04:43:00.000Z
|
2022-02-02T15:22:48.000Z
|
test.py
|
navjotk/pysz
|
6d75aa4fe24713ed893a2301c143006dace6fd77
|
[
"MIT"
] | null | null | null |
test.py
|
navjotk/pysz
|
6d75aa4fe24713ed893a2301c143006dace6fd77
|
[
"MIT"
] | null | null | null |
import numpy as np
from pysz import compress, decompress
test_compress_decompress()
| 25.529412
| 84
| 0.71659
|
92cf711f3ee4d4acd21a60efb873e479a2b9db00
| 447
|
py
|
Python
|
sparkdq/outliers/params/KSigmaParams.py
|
PasaLab/SparkDQ
|
16d50210747ef7de03cf36d689ce26ff7445f63a
|
[
"Apache-2.0"
] | 1
|
2021-02-08T07:49:54.000Z
|
2021-02-08T07:49:54.000Z
|
sparkdq/outliers/params/KSigmaParams.py
|
PasaLab/SparkDQ
|
16d50210747ef7de03cf36d689ce26ff7445f63a
|
[
"Apache-2.0"
] | null | null | null |
sparkdq/outliers/params/KSigmaParams.py
|
PasaLab/SparkDQ
|
16d50210747ef7de03cf36d689ce26ff7445f63a
|
[
"Apache-2.0"
] | null | null | null |
import json
from sparkdq.outliers.params.OutlierSolverParams import OutlierSolverParams
from sparkdq.outliers.OutlierSolver import OutlierSolver
| 23.526316
| 75
| 0.736018
|
92d067e85ffe42672816ef3e9eaff85647966d45
| 1,312
|
py
|
Python
|
webhooks/sentry/alerta_sentry.py
|
dunzoit/alerta-contrib
|
57dd47d5bb0c994fce036ae1eea2c3a88ef352c4
|
[
"MIT"
] | null | null | null |
webhooks/sentry/alerta_sentry.py
|
dunzoit/alerta-contrib
|
57dd47d5bb0c994fce036ae1eea2c3a88ef352c4
|
[
"MIT"
] | null | null | null |
webhooks/sentry/alerta_sentry.py
|
dunzoit/alerta-contrib
|
57dd47d5bb0c994fce036ae1eea2c3a88ef352c4
|
[
"MIT"
] | null | null | null |
from alerta.models.alert import Alert
from alerta.webhooks import WebhookBase
| 32
| 108
| 0.529726
|
92d135cd3396bc2bfc2ba5711e29b118672c8503
| 1,676
|
py
|
Python
|
setup.py
|
dolfim/django-mail-gmailapi
|
c2f7319329d07d6ecd41e4addc05e47c38fd5e19
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
dolfim/django-mail-gmailapi
|
c2f7319329d07d6ecd41e4addc05e47c38fd5e19
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
dolfim/django-mail-gmailapi
|
c2f7319329d07d6ecd41e4addc05e47c38fd5e19
|
[
"Apache-2.0"
] | null | null | null |
import re
from setuptools import setup, find_packages
import sys
if sys.version_info < (3, 5):
raise 'must use Python version 3.5 or higher'
with open('./gmailapi_backend/__init__.py', 'r') as f:
MATCH_EXPR = "__version__[^'\"]+(['\"])([^'\"]+)"
VERSION = re.search(MATCH_EXPR, f.read()).group(2).strip()
setup(
name='django-gmailapi-backend',
version=VERSION,
packages=find_packages(),
author="Michele Dolfi",
author_email="michele.dolfi@gmail.com",
license="Apache License 2.0",
entry_points={
'console_scripts': [
'gmail_oauth2 = gmailapi_backend.bin.gmail_oauth2:main',
]
},
install_requires=[
'google-api-python-client~=2.0',
'google-auth>=1.16.0,<3.0.0dev',
],
url="https://github.com/dolfim/django-gmailapi-backend",
long_description_content_type='text/markdown',
long_description=open('README.md').read(),
description='Email backend for Django which sends email via the Gmail API',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Framework :: Django',
'Topic :: Communications :: Email',
'Development Status :: 4 - Beta'
],
)
| 33.52
| 79
| 0.614558
|
92d23334c19f98d7d5d931da713ce60c1a673466
| 1,351
|
py
|
Python
|
openpeerpower/scripts/ensure_config.py
|
OpenPeerPower/openpeerpower
|
940a04a88e8f78e2d010dc912ad6905ae363503c
|
[
"Apache-2.0"
] | null | null | null |
openpeerpower/scripts/ensure_config.py
|
OpenPeerPower/openpeerpower
|
940a04a88e8f78e2d010dc912ad6905ae363503c
|
[
"Apache-2.0"
] | null | null | null |
openpeerpower/scripts/ensure_config.py
|
OpenPeerPower/openpeerpower
|
940a04a88e8f78e2d010dc912ad6905ae363503c
|
[
"Apache-2.0"
] | 1
|
2019-04-24T14:10:08.000Z
|
2019-04-24T14:10:08.000Z
|
"""Script to ensure a configuration file exists."""
import argparse
import os
import openpeerpower.config as config_util
from openpeerpower.core import OpenPeerPower
# mypy: allow-untyped-calls, allow-untyped-defs
def run(args):
"""Handle ensure config commandline script."""
parser = argparse.ArgumentParser(
description=(
"Ensure a Open Peer Power config exists, creates one if necessary."
)
)
parser.add_argument(
"-c",
"--config",
metavar="path_to_config_dir",
default=config_util.get_default_config_dir(),
help="Directory that contains the Open Peer Power configuration",
)
parser.add_argument("--script", choices=["ensure_config"])
args = parser.parse_args()
config_dir = os.path.join(os.getcwd(), args.config)
# Test if configuration directory exists
if not os.path.isdir(config_dir):
print("Creating directory", config_dir)
os.makedirs(config_dir)
opp = OpenPeerPower()
opp.config.config_dir = config_dir
config_path = opp.loop.run_until_complete(async_run(opp))
print("Configuration file:", config_path)
return 0
| 28.145833
| 79
| 0.687639
|
92d2be755f1c0894c43d329732b414de4bf31ab2
| 195
|
py
|
Python
|
atcoder/abc132A_fifty_fifty.py
|
uninhm/kyopro
|
bf6ed9cbf6a5e46cde0291f7aa9d91a8ddf1f5a3
|
[
"BSD-3-Clause"
] | 31
|
2020-05-13T01:07:55.000Z
|
2021-07-13T07:53:26.000Z
|
atcoder/abc132A_fifty_fifty.py
|
uninhm/kyopro
|
bf6ed9cbf6a5e46cde0291f7aa9d91a8ddf1f5a3
|
[
"BSD-3-Clause"
] | 10
|
2020-05-20T07:22:09.000Z
|
2021-07-19T03:52:13.000Z
|
atcoder/abc132A_fifty_fifty.py
|
uninhm/kyopro
|
bf6ed9cbf6a5e46cde0291f7aa9d91a8ddf1f5a3
|
[
"BSD-3-Clause"
] | 14
|
2020-05-11T05:58:36.000Z
|
2021-12-07T03:20:43.000Z
|
# Vicfred
# https://atcoder.jp/contests/abc132/tasks/abc132_a
# implementation
S = list(input())
if len(set(S)) == 2:
if S.count(S[0]) == 2:
print("Yes")
quit()
print("No")
| 16.25
| 51
| 0.574359
|
92d3126cd9f9279a6936076ceba3b9c4bff9aa48
| 11,146
|
py
|
Python
|
dabl/plot/tests/test_supervised.py
|
nrohan09-cloud/dabl
|
ebc4686c7b16c011bf5266cb6335221309aacb80
|
[
"BSD-3-Clause"
] | 500
|
2019-04-01T13:50:18.000Z
|
2022-03-07T01:50:45.000Z
|
dabl/plot/tests/test_supervised.py
|
nrohan09-cloud/dabl
|
ebc4686c7b16c011bf5266cb6335221309aacb80
|
[
"BSD-3-Clause"
] | 111
|
2019-04-01T17:48:40.000Z
|
2020-03-27T16:39:19.000Z
|
dabl/plot/tests/test_supervised.py
|
nrohan09-cloud/dabl
|
ebc4686c7b16c011bf5266cb6335221309aacb80
|
[
"BSD-3-Clause"
] | 60
|
2019-04-01T14:58:35.000Z
|
2021-08-13T02:58:20.000Z
|
import pytest
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import itertools
from sklearn.datasets import (make_regression, make_blobs, load_digits,
fetch_openml, load_diabetes)
from sklearn.preprocessing import KBinsDiscretizer
from dabl.preprocessing import clean, detect_types, guess_ordinal
from dabl.plot.supervised import (
plot, plot_classification_categorical,
plot_classification_continuous, plot_regression_categorical,
plot_regression_continuous)
from dabl.utils import data_df_from_bunch
from dabl import set_config
# FIXME: check that target is not y but a column name
def test_float_classification_target():
# check we can plot even if we do classification with a float target
X, y = make_blobs()
data = pd.DataFrame(X)
data['target'] = y.astype(np.float)
types = detect_types(data)
assert types.categorical['target']
plot(data, target_col='target')
# same with "actual float" - we need to specify classification for that :-/
data['target'] = y.astype(np.float) + .2
plot(data, target_col='target', type_hints={'target': 'categorical'})
plt.close("all")
| 35.610224
| 79
| 0.63386
|
92d3e306e086847f38535479f8de8893955d728c
| 4,480
|
py
|
Python
|
scripts/calculate_rank.py
|
daniel-theis/multicore-test-harness
|
d0ff54ef1c9f9637dd16dd8b85ac1cee8dc49e19
|
[
"MIT"
] | 15
|
2018-05-06T20:54:41.000Z
|
2020-12-04T05:36:11.000Z
|
scripts/calculate_rank.py
|
daniel-theis/multicore-test-harness
|
d0ff54ef1c9f9637dd16dd8b85ac1cee8dc49e19
|
[
"MIT"
] | null | null | null |
scripts/calculate_rank.py
|
daniel-theis/multicore-test-harness
|
d0ff54ef1c9f9637dd16dd8b85ac1cee8dc49e19
|
[
"MIT"
] | 3
|
2020-12-04T05:36:13.000Z
|
2021-09-08T11:53:16.000Z
|
################################################################################
# Copyright (c) 2017 Dan Iorga, Tyler Sorenson, Alastair Donaldson
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
################################################################################
import sys
import json
from pprint import pprint
if __name__ == "__main__":
if len(sys.argv) != 2:
print("usage: " + sys.argv[0] + " <ranked_environments>.json\n")
exit(1)
rank = CalculateRank(sys.argv[1])
rank.get_rank()
| 39.646018
| 107
| 0.579241
|
92d42a0f362d457b4431027767a7c26d248663c3
| 6,206
|
py
|
Python
|
contrib/antlrqueryparser/src/python/generate_asts.py
|
marblestation/montysolr
|
50917b4d53caac633fe9d1965f175401b3edc77d
|
[
"Apache-2.0"
] | 24
|
2015-03-23T17:57:47.000Z
|
2021-11-01T17:08:58.000Z
|
contrib/antlrqueryparser/src/python/generate_asts.py
|
marblestation/montysolr
|
50917b4d53caac633fe9d1965f175401b3edc77d
|
[
"Apache-2.0"
] | 138
|
2015-01-02T16:47:51.000Z
|
2022-02-26T02:44:07.000Z
|
contrib/antlrqueryparser/src/python/generate_asts.py
|
marblestation/montysolr
|
50917b4d53caac633fe9d1965f175401b3edc77d
|
[
"Apache-2.0"
] | 10
|
2015-03-11T19:49:51.000Z
|
2020-12-09T09:22:02.000Z
|
import sys
import subprocess as sub
import os
"""
Simple utility script to generate HTML charts of how ANTLR parses
every query and what is the resulting AST.
"""
if __name__ == '__main__':
if len(sys.argv) == 1:
sys.argv.insert(1, "StandardLuceneGrammar")
run(*sys.argv[1:])
| 30.722772
| 144
| 0.496616
|
92d5a318d2e721b05edd8c4dc433e4875c24b448
| 6,318
|
py
|
Python
|
visual_perception/Detection/yolov4/__init__.py
|
SSusantAchary/Visual-Perception
|
b81ffe69ab85e9afb7ee6eece43ac83c8f292285
|
[
"MIT"
] | null | null | null |
visual_perception/Detection/yolov4/__init__.py
|
SSusantAchary/Visual-Perception
|
b81ffe69ab85e9afb7ee6eece43ac83c8f292285
|
[
"MIT"
] | null | null | null |
visual_perception/Detection/yolov4/__init__.py
|
SSusantAchary/Visual-Perception
|
b81ffe69ab85e9afb7ee6eece43ac83c8f292285
|
[
"MIT"
] | null | null | null |
"""
MIT License
Copyright (c) 2020 Susant Achary <sache.meet@yahoo.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from visual_perception.Detection.yolov4.tf import YOLOv4 as yolo_main
import numpy as np
import cv2
labels = {0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorcycle', 4: 'airplane', 5: 'bus', 6: 'train', 7: 'truck', 8: 'boat',
9: 'traffic light', 10: 'fire hydrant', 11: 'stop sign', 12: 'parking meter', 13: 'bench', 14: 'bird', 15: 'cat', 16: 'dog',
17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', 22: 'zebra', 23: 'giraffe', 24: 'backpack', 25: 'umbrella',
26: 'handbag', 27: 'tie', 28: 'suitcase', 29: 'frisbee', 30: 'skis', 31: 'snowboard', 32: 'sports ball', 33: 'kite',
34: 'baseball bat', 35: 'baseball glove', 36: 'skateboard', 37: 'surfboard', 38: 'tennis racket', 39: 'bottle', 40: 'wine glass',
41: 'cup', 42: 'fork', 43: 'knife', 44: 'spoon', 45: 'bowl', 46: 'banana', 47: 'apple', 48: 'sandwich', 49: 'orange',
50: 'broccoli', 51: 'carrot', 52: 'hot dog', 53: 'pizza', 54: 'donut', 55: 'cake', 56: 'chair', 57: 'couch', 58: 'potted plant',
59: 'bed', 60: 'dining table', 61: 'toilet', 62: 'tv', 63: 'laptop', 64: 'mouse', 65: 'remote', 66: 'keyboard', 67: 'cell phone',
68: 'microwave', 69: 'oven', 70: 'toaster', 71: 'sink', 72: 'refrigerator', 73: 'book', 74: 'clock', 75: 'vase', 76: 'scissors',
77: 'teddy bear', 78: 'hair drier', 79: 'toothbrush'}
| 43.875
| 141
| 0.608895
|
92d6916cbd5aa31c26daff18d295d7d026f17d82
| 517
|
py
|
Python
|
server/mqtt/handler.py
|
rishab-rb/MyIOTMap
|
e27a73b58cd3a9aba558ebacfb2bf8b6ef4761aa
|
[
"MIT"
] | 1
|
2018-10-08T06:11:20.000Z
|
2018-10-08T06:11:20.000Z
|
server/mqtt/handler.py
|
rishab-rb/MyIOTMap
|
e27a73b58cd3a9aba558ebacfb2bf8b6ef4761aa
|
[
"MIT"
] | null | null | null |
server/mqtt/handler.py
|
rishab-rb/MyIOTMap
|
e27a73b58cd3a9aba558ebacfb2bf8b6ef4761aa
|
[
"MIT"
] | 2
|
2018-07-30T08:18:22.000Z
|
2018-10-11T08:04:58.000Z
|
import paho.client as mqtt
HOST = 'localhost'
PORT = 1883
| 19.884615
| 61
| 0.609284
|
92d713c9c1666b57fdf260fc3597ec5bb433209c
| 1,886
|
py
|
Python
|
scripts/spacy_files/similarity_replacement.py
|
HighDeFing/thesis_v4
|
2dc9288af75a8b51fe54ed66f520e8aa8a0ab3c7
|
[
"Apache-2.0"
] | null | null | null |
scripts/spacy_files/similarity_replacement.py
|
HighDeFing/thesis_v4
|
2dc9288af75a8b51fe54ed66f520e8aa8a0ab3c7
|
[
"Apache-2.0"
] | null | null | null |
scripts/spacy_files/similarity_replacement.py
|
HighDeFing/thesis_v4
|
2dc9288af75a8b51fe54ed66f520e8aa8a0ab3c7
|
[
"Apache-2.0"
] | null | null | null |
#!/bin/env python
from black import main
import spacy
import json
from spacy import displacy
import unidecode
import pandas as pd
import numpy as np
import os
csv_source = "scripts/spacy_files/data/thesis_200_with_school.csv"
df = pd.read_csv(csv_source)
df = df[df['isScan']==False]
df = df.sort_values('isScan', ascending=False)
text1= "Escuela de Enfermera"
text2 = "ESCUELA DE ENFERMERIA"
file = open("scripts/spacy_files/data/escuelas.json", "r")
file = json.load(file)
temp_list = []
for facultad in file:
temp_list.append(facultad['escuela'])
#print(facultad['escuela'])
escuelas = [item for sublist in temp_list for item in sublist] # make the list flat
#print(escuelas)
text1_u = unidecode.unidecode(text1)
text1_l_u = text1_u.lower()
text2_l_u = unidecode.unidecode(text2).lower()
print(text1_l_u, "<-->", text2_l_u)
if text1_l_u == text2_l_u:
print(text1, " is correct.")
if __name__ == "__main__":
u_escuelas = set_school_to_unaccent(escuelas)
u_escuelas_dict = create_dictionary(u_escuelas)
escuelas_dict = create_dictionary(escuelas)
print(u_escuelas_dict)
print(escuelas_dict)
print(set_schools_accents("No school", u_escuelas_dict, escuelas_dict))
| 25.486486
| 83
| 0.718982
|
92d920562d22f1142cab1ea79e81051636bf317f
| 7,212
|
py
|
Python
|
test/unittest_base.py
|
dat-boris/tensorforce
|
d777121b1c971da5500572c5f83173b9229f7370
|
[
"Apache-2.0"
] | null | null | null |
test/unittest_base.py
|
dat-boris/tensorforce
|
d777121b1c971da5500572c5f83173b9229f7370
|
[
"Apache-2.0"
] | null | null | null |
test/unittest_base.py
|
dat-boris/tensorforce
|
d777121b1c971da5500572c5f83173b9229f7370
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from copy import deepcopy
from datetime import datetime
import os
import sys
import warnings
from tensorforce import TensorforceError
from tensorforce.agents import Agent
from tensorforce.core.layers import Layer
from tensorforce.environments import Environment
from tensorforce.execution import Runner
from test.unittest_environment import UnittestEnvironment
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
| 35.880597
| 97
| 0.634498
|
92dadd375392924dd104471b2991098481772c78
| 132
|
py
|
Python
|
mspray/apps/reveal/__init__.py
|
onaio/mspray
|
b3e0f4b5855abbf0298de6b66f2e9f472f2bf838
|
[
"Apache-2.0"
] | null | null | null |
mspray/apps/reveal/__init__.py
|
onaio/mspray
|
b3e0f4b5855abbf0298de6b66f2e9f472f2bf838
|
[
"Apache-2.0"
] | 76
|
2018-03-15T09:37:56.000Z
|
2019-05-15T12:45:51.000Z
|
mspray/apps/reveal/__init__.py
|
onaio/mspray
|
b3e0f4b5855abbf0298de6b66f2e9f472f2bf838
|
[
"Apache-2.0"
] | 1
|
2020-10-31T07:15:22.000Z
|
2020-10-31T07:15:22.000Z
|
"""init module for reveal app"""
# pylint: disable=invalid-name
default_app_config = "mspray.apps.reveal.apps.RevealConfig" # noqa
| 33
| 67
| 0.757576
|
92dbf0257f2cb3330104352818f4db40e27c4927
| 513
|
py
|
Python
|
guifw/models/port.py
|
luizerico/PyGuiFW
|
d79347db7d4bd9e09fbc53215d79c06ccf16bad5
|
[
"MIT"
] | 1
|
2021-02-05T02:58:19.000Z
|
2021-02-05T02:58:19.000Z
|
guifw/models/port.py
|
luizerico/PyGuiFW
|
d79347db7d4bd9e09fbc53215d79c06ccf16bad5
|
[
"MIT"
] | 1
|
2018-11-09T16:10:50.000Z
|
2018-11-09T16:10:50.000Z
|
guifw/models/port.py
|
luizerico/PyGuiFW
|
d79347db7d4bd9e09fbc53215d79c06ccf16bad5
|
[
"MIT"
] | null | null | null |
from django.db import models
from django import forms
from audit_log.models.managers import AuditLog
# Create your models here.
| 20.52
| 61
| 0.707602
|
92dc4e7fbdb299e18e1175c0718307d433b0cb15
| 386
|
py
|
Python
|
app/backend/arm/migrations/0002_auto_20190924_1712.py
|
karstenv/nmp-arm
|
47e45f0391820000f461ab6e994e20eacfffb457
|
[
"Apache-2.0"
] | 2
|
2019-08-12T22:06:23.000Z
|
2019-10-22T20:50:32.000Z
|
app/backend/arm/migrations/0002_auto_20190924_1712.py
|
karstenv/nmp-arm
|
47e45f0391820000f461ab6e994e20eacfffb457
|
[
"Apache-2.0"
] | 12
|
2019-07-10T18:11:31.000Z
|
2022-02-10T08:47:57.000Z
|
app/backend/arm/migrations/0002_auto_20190924_1712.py
|
karstenv/nmp-arm
|
47e45f0391820000f461ab6e994e20eacfffb457
|
[
"Apache-2.0"
] | 6
|
2019-07-03T21:24:11.000Z
|
2021-04-29T17:31:34.000Z
|
# Generated by Django 2.2.5 on 2019-09-25 00:12
from django.db import migrations
| 19.3
| 48
| 0.546632
|
92dc54efa676f164aaadbce167924df2d1df95ab
| 7,112
|
py
|
Python
|
webcam_demo.py
|
taranek/tennis-stats-provider
|
e95093679a194d30d0727ec8e11d44fc462f6adc
|
[
"Apache-2.0"
] | null | null | null |
webcam_demo.py
|
taranek/tennis-stats-provider
|
e95093679a194d30d0727ec8e11d44fc462f6adc
|
[
"Apache-2.0"
] | null | null | null |
webcam_demo.py
|
taranek/tennis-stats-provider
|
e95093679a194d30d0727ec8e11d44fc462f6adc
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
import json
import math
import cv2
import time
import argparse
import concurrent.futures
import posenet
import keyboard
import sys
import numpy as np
from threading import Thread
from slugify import slugify
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=int, default=101)
parser.add_argument('--cam_id', type=int, default=0)
parser.add_argument('--cam_width', type=int, default=1280)
parser.add_argument('--cam_height', type=int, default=720)
parser.add_argument('--scale_factor', type=float, default=0.7125)
parser.add_argument('--file', type=str, default=None, help="Optionally use a video file instead of a live camera")
args = parser.parse_args()
if __name__ == "__main__":
main()
| 37.829787
| 137
| 0.564398
|
92df29892405e44dded087915f2a1792a9fb1160
| 6,265
|
py
|
Python
|
otcextensions/tests/unit/osclient/dcs/v1/fakes.py
|
zsoltn/python-otcextensions
|
4c0fa22f095ebd5f9636ae72acbae5048096822c
|
[
"Apache-2.0"
] | null | null | null |
otcextensions/tests/unit/osclient/dcs/v1/fakes.py
|
zsoltn/python-otcextensions
|
4c0fa22f095ebd5f9636ae72acbae5048096822c
|
[
"Apache-2.0"
] | null | null | null |
otcextensions/tests/unit/osclient/dcs/v1/fakes.py
|
zsoltn/python-otcextensions
|
4c0fa22f095ebd5f9636ae72acbae5048096822c
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# import datetime
import random
import uuid
import mock
from openstackclient.tests.unit import utils
from otcextensions.tests.unit.osclient import test_base
from otcextensions.sdk.dcs.v1 import backup
from otcextensions.sdk.dcs.v1 import config
from otcextensions.sdk.dcs.v1 import instance
from otcextensions.sdk.dcs.v1 import restore
from otcextensions.sdk.dcs.v1 import statistic
| 36.005747
| 77
| 0.555148
|
92df7af4ff3a17d2d4ea5d2cff4ae315569f3502
| 830
|
py
|
Python
|
tests/dummy_repo/tvm/python/tvm/api.py
|
csullivan/ffi-navigator
|
ed47678f9cb8c6d3637bf3219d3cf7b2754b84bb
|
[
"Apache-2.0"
] | 148
|
2019-12-28T19:02:17.000Z
|
2022-03-27T07:30:13.000Z
|
tests/dummy_repo/tvm/python/tvm/api.py
|
csullivan/ffi-navigator
|
ed47678f9cb8c6d3637bf3219d3cf7b2754b84bb
|
[
"Apache-2.0"
] | 21
|
2019-12-28T17:29:24.000Z
|
2021-11-24T09:59:35.000Z
|
tests/dummy_repo/tvm/python/tvm/api.py
|
csullivan/ffi-navigator
|
ed47678f9cb8c6d3637bf3219d3cf7b2754b84bb
|
[
"Apache-2.0"
] | 17
|
2019-12-29T01:46:13.000Z
|
2022-01-10T09:56:46.000Z
|
from ._ffi.base import string_types
from ._ffi.object import register_object, Object
from ._ffi.node import register_node, NodeBase
from ._ffi.node import convert_to_node as _convert_to_node
from ._ffi.node_generic import _scalar_type_inference
from ._ffi.function import Function
from ._ffi.function import _init_api, register_func, get_global_func, extract_ext_funcs
from ._ffi.function import convert_to_tvm_func as _convert_tvm_func
from ._ffi.runtime_ctypes import TVMType
from . import _api_internal
from . import make as _make
from . import expr as _expr
from . import tensor as _tensor
from . import schedule as _schedule
from . import container as _container
from . import tag as _tag
int8 = "int8"
int32 = "int32"
float32 = "float32"
handle = "handle"
| 31.923077
| 87
| 0.812048
|
92dfa8870f87fbcfb31691bd442140d0c802358d
| 4,121
|
py
|
Python
|
torchattacks/attacks/multiattack.py
|
Harry24k/adversarial-attacks-pytorch
|
bfa2aa8d6f0c3b8086718f9f31526fcafa6995bb
|
[
"MIT"
] | 782
|
2020-03-28T01:56:36.000Z
|
2022-03-31T14:54:02.000Z
|
torchattacks/attacks/multiattack.py
|
Harry24k/adversarial-attacks-pytorch
|
bfa2aa8d6f0c3b8086718f9f31526fcafa6995bb
|
[
"MIT"
] | 48
|
2020-04-18T23:06:30.000Z
|
2022-03-24T01:54:50.000Z
|
torchattacks/attacks/multiattack.py
|
Harry24k/adversarial-attacks-pytorch
|
bfa2aa8d6f0c3b8086718f9f31526fcafa6995bb
|
[
"MIT"
] | 197
|
2020-03-31T05:21:02.000Z
|
2022-03-31T15:24:29.000Z
|
import copy
import torch
from ..attack import Attack
| 33.778689
| 108
| 0.626062
|
92e0877363cacd633cbbf12e0ee4175cb9564598
| 2,627
|
py
|
Python
|
src/manager/om/script/gspylib/inspection/items/os/CheckPortConflict.py
|
wotchin/openGauss-server
|
ebd92e92b0cfd76b121d98e4c57a22d334573159
|
[
"MulanPSL-1.0"
] | 1
|
2020-06-30T15:00:50.000Z
|
2020-06-30T15:00:50.000Z
|
src/manager/om/script/gspylib/inspection/items/os/CheckPortConflict.py
|
wotchin/openGauss-server
|
ebd92e92b0cfd76b121d98e4c57a22d334573159
|
[
"MulanPSL-1.0"
] | null | null | null |
src/manager/om/script/gspylib/inspection/items/os/CheckPortConflict.py
|
wotchin/openGauss-server
|
ebd92e92b0cfd76b121d98e4c57a22d334573159
|
[
"MulanPSL-1.0"
] | null | null | null |
# -*- coding:utf-8 -*-
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
#
# openGauss is licensed under Mulan PSL v2.
# You can use this software according to the terms
# and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
#
# http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
# ----------------------------------------------------------------------------
import subprocess
from gspylib.inspection.common.CheckItem import BaseItem
from gspylib.inspection.common.CheckResult import ResultStatus
| 39.80303
| 79
| 0.53445
|
92e16c1fa8d877e82eb805100d17b73907afb25e
| 646
|
py
|
Python
|
_scripts/_build.py
|
dfreeman06/wxyz
|
663cf6593f4c0ca12f7b94b61e34c0a8d3cbcdfd
|
[
"BSD-3-Clause"
] | 1
|
2021-06-20T12:21:27.000Z
|
2021-06-20T12:21:27.000Z
|
_scripts/_build.py
|
nrbgt/wxyz
|
663cf6593f4c0ca12f7b94b61e34c0a8d3cbcdfd
|
[
"BSD-3-Clause"
] | null | null | null |
_scripts/_build.py
|
nrbgt/wxyz
|
663cf6593f4c0ca12f7b94b61e34c0a8d3cbcdfd
|
[
"BSD-3-Clause"
] | null | null | null |
import subprocess
import sys
from . import ROOT, PY_SRC, _run, PY, DIST
CONDA_ORDER = [
"core",
"html",
"lab",
"datagrid",
"svg",
"tpl-jjinja"
"yaml"
]
CONDA_BUILD_ARGS = [
"conda-build", "-c", "conda-forge", "--output-folder", DIST / "conda-bld",
]
if __name__ == "__main__":
for pkg in PY_SRC.glob("wxyz_*"):
_run([PY, "setup.py", "sdist", "--dist-dir", DIST / "sdist"], cwd=str(pkg))
try:
_run([*CONDA_BUILD_ARGS, "--skip-existing", "."], cwd=ROOT / "recipes")
except:
for pkg in CONDA_ORDER:
_run([*CONDA_BUILD_ARGS, f"wxyz-{pkg}"], cwd=ROOT / "recipes")
| 20.83871
| 83
| 0.560372
|
92e1c91fec4c34f39e9e2622024fad4489b61749
| 5,279
|
py
|
Python
|
scripts/C189/C189Checkin.py
|
xiaopowanyi/py_scripts
|
29f240800eefd6e0f91fd098c35ac3c451172ff8
|
[
"MIT"
] | 2
|
2020-11-14T05:42:49.000Z
|
2020-11-14T05:43:13.000Z
|
scripts/C189/C189Checkin.py
|
J220541674/py_scripts
|
2b72e23041392a2e5f0a7305d7e9802054978384
|
[
"MIT"
] | null | null | null |
scripts/C189/C189Checkin.py
|
J220541674/py_scripts
|
2b72e23041392a2e5f0a7305d7e9802054978384
|
[
"MIT"
] | null | null | null |
import requests, time, re, rsa, json, base64
from urllib import parse
s = requests.Session()
username = ""
password = ""
if(username == "" or password == ""):
username = input("")
password = input("")
BI_RM = list("0123456789abcdefghijklmnopqrstuvwxyz")
b64map = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
if __name__ == "__main__":
main()
| 37.707143
| 305
| 0.586664
|
92e2096dcbe8b31e8b6213b7078b62e4efd23dd0
| 3,318
|
py
|
Python
|
Mmint/CGratio.py
|
lijiacd985/Mplot
|
adea07aa78a5495cf3551618f6ec2c08fa7c1029
|
[
"MIT"
] | 5
|
2018-07-02T16:33:23.000Z
|
2021-03-23T00:32:31.000Z
|
Mmint/CGratio.py
|
lijiacd985/Mplot
|
adea07aa78a5495cf3551618f6ec2c08fa7c1029
|
[
"MIT"
] | 1
|
2017-09-19T19:46:11.000Z
|
2020-02-28T05:00:49.000Z
|
Mmint/CGratio.py
|
lijiacd985/Mplot
|
adea07aa78a5495cf3551618f6ec2c08fa7c1029
|
[
"MIT"
] | 4
|
2017-11-16T15:26:24.000Z
|
2020-02-13T16:25:25.000Z
|
import subprocess
from .Genome_fasta import get_fasta
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import numpy as np
import pysam
if __name__=="__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-b','--bamfile',help="bam file name", metavar="FILE")
parser.add_argument('-g','--genome',help="Genome fasta file path")
parser.add_argument('-o','--output',help="pie figure's filename")
run(parser)
| 33.857143
| 177
| 0.589813
|
92e278de46c7d8190178a6e51a0f4e234995f14e
| 1,536
|
py
|
Python
|
src/furo/__init__.py
|
sethmlarson/furo
|
1257d884dae9040248380595e06d7d2a1e6eba39
|
[
"MIT"
] | null | null | null |
src/furo/__init__.py
|
sethmlarson/furo
|
1257d884dae9040248380595e06d7d2a1e6eba39
|
[
"MIT"
] | null | null | null |
src/furo/__init__.py
|
sethmlarson/furo
|
1257d884dae9040248380595e06d7d2a1e6eba39
|
[
"MIT"
] | null | null | null |
"""A clean customisable Sphinx documentation theme."""
__version__ = "2020.9.8.beta2"
from pathlib import Path
from .body import wrap_tables
from .code import get_pygments_style_colors
from .navigation import get_navigation_tree
from .toc import should_hide_toc
def setup(app):
"""Entry point for sphinx theming."""
theme_path = (Path(__file__).parent / "theme").resolve()
app.add_html_theme("furo", str(theme_path))
app.connect("html-page-context", _html_page_context)
| 32
| 73
| 0.69401
|
92e3577604795bc43851e0afe7af80fe0fe26bbf
| 2,059
|
py
|
Python
|
experiments/mix_down.py
|
fretboardfreak/potty_oh
|
70b752c719576c0975e1d2af5aca2fc7abc8abcc
|
[
"Apache-2.0"
] | null | null | null |
experiments/mix_down.py
|
fretboardfreak/potty_oh
|
70b752c719576c0975e1d2af5aca2fc7abc8abcc
|
[
"Apache-2.0"
] | 1
|
2016-05-04T03:51:36.000Z
|
2016-05-16T19:08:02.000Z
|
experiments/mix_down.py
|
fretboardfreak/potty_oh
|
70b752c719576c0975e1d2af5aca2fc7abc8abcc
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2016 Curtis Sand
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A test for what happens when two waveforms are averaged together."""
from potty_oh import common
from potty_oh.wav_file import wav_file_context
from potty_oh.waveform import mix_down
from potty_oh.signal_generator import Generator
from potty_oh.music.pitch import Key
from potty_oh.music.interval import Interval
if __name__ == "__main__":
common.call_main(main)
| 34.898305
| 78
| 0.736765
|
92e37ec4545956a8e8242b1871fea16288a1a867
| 8,704
|
py
|
Python
|
tests/test_hrepr.py
|
fabaff/hrepr
|
f6de915f1d34c47ceab11f5f70e433a30e6de174
|
[
"MIT"
] | null | null | null |
tests/test_hrepr.py
|
fabaff/hrepr
|
f6de915f1d34c47ceab11f5f70e433a30e6de174
|
[
"MIT"
] | null | null | null |
tests/test_hrepr.py
|
fabaff/hrepr
|
f6de915f1d34c47ceab11f5f70e433a30e6de174
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
from hrepr import H
from hrepr import hrepr as real_hrepr
from hrepr.h import styledir
from .common import one_test_per_assert
css_hrepr = open(f"{styledir}/hrepr.css", encoding="utf-8").read()
hrepr = real_hrepr.variant(fill_resources=False)
def hshort(x, **kw):
return hrepr(x, max_depth=0, **kw)
def test_function():
assert hrepr(Opaque) == H.span["hreprk-class"](
H.span["hrepr-defn-key"]("class"),
" ",
H.span["hrepr-defn-name"]("Opaque"),
)
def test_structures():
for typ, o, c in (
(tuple, "(", ")"),
(list, "[", "]"),
(set, "{", "}"),
(frozenset, "{", "}"),
):
clsname = typ.__name__
assert hrepr(typ((1, 2))) == H.div[
f"hreprt-{clsname}", "hrepr-bracketed"
](
H.div["hrepr-open"](o),
H.div["hreprl-h", "hrepr-body"](
H.div(H.span["hreprt-int"]("1")),
H.div(H.span["hreprt-int"]("2")),
),
H.div["hrepr-close"](c),
)
| 29.110368
| 79
| 0.443934
|
92e459320d22b81d1d537a46bdb22eb8751da72d
| 6,218
|
py
|
Python
|
sympy/assumptions/assume.py
|
shivangdubey/sympy
|
bd3ddd4c71d439c8b623f69a02274dd8a8a82198
|
[
"BSD-3-Clause"
] | 2
|
2020-07-27T16:36:27.000Z
|
2020-12-29T22:28:37.000Z
|
sympy/assumptions/assume.py
|
shivangdubey/sympy
|
bd3ddd4c71d439c8b623f69a02274dd8a8a82198
|
[
"BSD-3-Clause"
] | 2
|
2020-08-18T15:21:59.000Z
|
2020-08-18T19:35:29.000Z
|
sympy/assumptions/assume.py
|
shivangdubey/sympy
|
bd3ddd4c71d439c8b623f69a02274dd8a8a82198
|
[
"BSD-3-Clause"
] | 2
|
2021-01-08T23:03:23.000Z
|
2021-01-13T18:57:02.000Z
|
import inspect
from sympy.core.cache import cacheit
from sympy.core.singleton import S
from sympy.core.sympify import _sympify
from sympy.logic.boolalg import Boolean
from sympy.utilities.source import get_class
from contextlib import contextmanager
global_assumptions = AssumptionsContext()
| 26.686695
| 81
| 0.587488
|
92e5ae34177c1ed1dca21481a52e063cdd40f997
| 5,794
|
py
|
Python
|
distancematrix/tests/consumer/test_distance_matrix.py
|
IDLabResearch/seriesdistancematrix
|
c0e666d036f24184511e766cee9fdfa55f41df97
|
[
"MIT"
] | 12
|
2019-11-22T14:34:51.000Z
|
2021-05-04T19:23:55.000Z
|
distancematrix/tests/consumer/test_distance_matrix.py
|
predict-idlab/seriesdistancematrix
|
c0e666d036f24184511e766cee9fdfa55f41df97
|
[
"MIT"
] | 1
|
2020-04-28T07:59:03.000Z
|
2020-04-28T07:59:03.000Z
|
distancematrix/tests/consumer/test_distance_matrix.py
|
IDLabResearch/seriesdistancematrix
|
c0e666d036f24184511e766cee9fdfa55f41df97
|
[
"MIT"
] | 3
|
2020-03-02T12:39:00.000Z
|
2021-03-22T13:36:25.000Z
|
import numpy as np
from unittest import TestCase
import numpy.testing as npt
from distancematrix.util import diag_indices_of
from distancematrix.consumer.distance_matrix import DistanceMatrix
| 42.602941
| 104
| 0.608733
|
92e5bc0e9b68f032b202632a0013f3e6bb85256a
| 11,460
|
py
|
Python
|
supervisor/const.py
|
peddamat/home-assistant-supervisor-test
|
5da55772bcb2db3c6d8432cbc08e2ac9fbf480c4
|
[
"Apache-2.0"
] | null | null | null |
supervisor/const.py
|
peddamat/home-assistant-supervisor-test
|
5da55772bcb2db3c6d8432cbc08e2ac9fbf480c4
|
[
"Apache-2.0"
] | null | null | null |
supervisor/const.py
|
peddamat/home-assistant-supervisor-test
|
5da55772bcb2db3c6d8432cbc08e2ac9fbf480c4
|
[
"Apache-2.0"
] | null | null | null |
"""Constants file for Supervisor."""
from enum import Enum
from ipaddress import ip_network
from pathlib import Path
SUPERVISOR_VERSION = "DEV"
URL_HASSIO_ADDONS = "https://github.com/home-assistant/addons"
URL_HASSIO_APPARMOR = "https://version.home-assistant.io/apparmor.txt"
URL_HASSIO_VERSION = "https://version.home-assistant.io/{channel}.json"
SUPERVISOR_DATA = Path("/data")
FILE_HASSIO_ADDONS = Path(SUPERVISOR_DATA, "addons.json")
FILE_HASSIO_AUTH = Path(SUPERVISOR_DATA, "auth.json")
FILE_HASSIO_CONFIG = Path(SUPERVISOR_DATA, "config.json")
FILE_HASSIO_DISCOVERY = Path(SUPERVISOR_DATA, "discovery.json")
FILE_HASSIO_DOCKER = Path(SUPERVISOR_DATA, "docker.json")
FILE_HASSIO_HOMEASSISTANT = Path(SUPERVISOR_DATA, "homeassistant.json")
FILE_HASSIO_INGRESS = Path(SUPERVISOR_DATA, "ingress.json")
FILE_HASSIO_SERVICES = Path(SUPERVISOR_DATA, "services.json")
FILE_HASSIO_UPDATER = Path(SUPERVISOR_DATA, "updater.json")
FILE_SUFFIX_CONFIGURATION = [".yaml", ".yml", ".json"]
MACHINE_ID = Path("/etc/machine-id")
SOCKET_DBUS = Path("/run/dbus/system_bus_socket")
SOCKET_DOCKER = Path("/run/docker.sock")
RUN_SUPERVISOR_STATE = Path("/run/supervisor")
SYSTEMD_JOURNAL_PERSISTENT = Path("/var/log/journal")
SYSTEMD_JOURNAL_VOLATILE = Path("/run/log/journal")
DOCKER_NETWORK = "hassio"
DOCKER_NETWORK_MASK = ip_network("172.30.32.0/23")
DOCKER_NETWORK_RANGE = ip_network("172.30.33.0/24")
# This needs to match the dockerd --cpu-rt-runtime= argument.
DOCKER_CPU_RUNTIME_TOTAL = 950_000
# The rt runtimes are guarantees, hence we cannot allocate more
# time than available! Support up to 5 containers with equal time
# allocated.
# Note that the time is multiplied by CPU count. This means that
# a single container can schedule up to 950/5*4 = 760ms in RT priority
# on a quad core system.
DOCKER_CPU_RUNTIME_ALLOCATION = int(DOCKER_CPU_RUNTIME_TOTAL / 5)
DNS_SUFFIX = "local.hass.io"
LABEL_ARCH = "io.hass.arch"
LABEL_MACHINE = "io.hass.machine"
LABEL_TYPE = "io.hass.type"
LABEL_VERSION = "io.hass.version"
META_ADDON = "addon"
META_HOMEASSISTANT = "homeassistant"
META_SUPERVISOR = "supervisor"
JSON_DATA = "data"
JSON_MESSAGE = "message"
JSON_RESULT = "result"
RESULT_ERROR = "error"
RESULT_OK = "ok"
CONTENT_TYPE_BINARY = "application/octet-stream"
CONTENT_TYPE_JSON = "application/json"
CONTENT_TYPE_PNG = "image/png"
CONTENT_TYPE_TAR = "application/tar"
CONTENT_TYPE_TEXT = "text/plain"
CONTENT_TYPE_URL = "application/x-www-form-urlencoded"
COOKIE_INGRESS = "ingress_session"
HEADER_TOKEN = "X-Supervisor-Token"
HEADER_TOKEN_OLD = "X-Hassio-Key"
ENV_TIME = "TZ"
ENV_TOKEN = "SUPERVISOR_TOKEN"
ENV_TOKEN_HASSIO = "HASSIO_TOKEN"
ENV_HOMEASSISTANT_REPOSITORY = "HOMEASSISTANT_REPOSITORY"
ENV_SUPERVISOR_DEV = "SUPERVISOR_DEV"
ENV_SUPERVISOR_MACHINE = "SUPERVISOR_MACHINE"
ENV_SUPERVISOR_NAME = "SUPERVISOR_NAME"
ENV_SUPERVISOR_SHARE = "SUPERVISOR_SHARE"
ENV_SUPERVISOR_CPU_RT = "SUPERVISOR_CPU_RT"
REQUEST_FROM = "HASSIO_FROM"
ATTR_ACCESS_TOKEN = "access_token"
ATTR_ACCESSPOINTS = "accesspoints"
ATTR_ACTIVE = "active"
ATTR_ADDON = "addon"
ATTR_ADDONS = "addons"
ATTR_ADDONS_CUSTOM_LIST = "addons_custom_list"
ATTR_ADDONS_REPOSITORIES = "addons_repositories"
ATTR_ADDRESS = "address"
ATTR_ADDRESS_DATA = "address-data"
ATTR_ADMIN = "admin"
ATTR_ADVANCED = "advanced"
ATTR_APPARMOR = "apparmor"
ATTR_APPLICATION = "application"
ATTR_ARCH = "arch"
ATTR_ARGS = "args"
ATTR_LABELS = "labels"
ATTR_AUDIO = "audio"
ATTR_AUDIO_INPUT = "audio_input"
ATTR_AUDIO_OUTPUT = "audio_output"
ATTR_AUTH = "auth"
ATTR_AUTH_API = "auth_api"
ATTR_AUTO_UPDATE = "auto_update"
ATTR_AVAILABLE = "available"
ATTR_BLK_READ = "blk_read"
ATTR_BLK_WRITE = "blk_write"
ATTR_BOARD = "board"
ATTR_BOOT = "boot"
ATTR_BRANCH = "branch"
ATTR_BUILD = "build"
ATTR_BUILD_FROM = "build_from"
ATTR_CARD = "card"
ATTR_CHANGELOG = "changelog"
ATTR_CHANNEL = "channel"
ATTR_CHASSIS = "chassis"
ATTR_CHECKS = "checks"
ATTR_CLI = "cli"
ATTR_CONFIG = "config"
ATTR_CONFIGURATION = "configuration"
ATTR_CONNECTED = "connected"
ATTR_CONNECTIONS = "connections"
ATTR_CONTAINERS = "containers"
ATTR_CPE = "cpe"
ATTR_CPU_PERCENT = "cpu_percent"
ATTR_CRYPTO = "crypto"
ATTR_DATA = "data"
ATTR_DATE = "date"
ATTR_DEBUG = "debug"
ATTR_DEBUG_BLOCK = "debug_block"
ATTR_DEFAULT = "default"
ATTR_DEPLOYMENT = "deployment"
ATTR_DESCRIPTON = "description"
ATTR_DETACHED = "detached"
ATTR_DEVICES = "devices"
ATTR_DEVICETREE = "devicetree"
ATTR_DIAGNOSTICS = "diagnostics"
ATTR_DISCOVERY = "discovery"
ATTR_DISK = "disk"
ATTR_DISK_FREE = "disk_free"
ATTR_DISK_LIFE_TIME = "disk_life_time"
ATTR_DISK_TOTAL = "disk_total"
ATTR_DISK_USED = "disk_used"
ATTR_DNS = "dns"
ATTR_DOCKER = "docker"
ATTR_DOCKER_API = "docker_api"
ATTR_DOCUMENTATION = "documentation"
ATTR_DOMAINS = "domains"
ATTR_ENABLE = "enable"
ATTR_ENABLED = "enabled"
ATTR_ENVIRONMENT = "environment"
ATTR_EVENT = "event"
ATTR_FEATURES = "features"
ATTR_FILENAME = "filename"
ATTR_FLAGS = "flags"
ATTR_FOLDERS = "folders"
ATTR_FREQUENCY = "frequency"
ATTR_FULL_ACCESS = "full_access"
ATTR_GATEWAY = "gateway"
ATTR_GPIO = "gpio"
ATTR_HASSIO_API = "hassio_api"
ATTR_HASSIO_ROLE = "hassio_role"
ATTR_HASSOS = "hassos"
ATTR_HEALTHY = "healthy"
ATTR_HOMEASSISTANT = "homeassistant"
ATTR_HOMEASSISTANT_API = "homeassistant_api"
ATTR_HOST = "host"
ATTR_HOST_DBUS = "host_dbus"
ATTR_HOST_INTERNET = "host_internet"
ATTR_HOST_IPC = "host_ipc"
ATTR_HOST_NETWORK = "host_network"
ATTR_HOST_PID = "host_pid"
ATTR_HOSTNAME = "hostname"
ATTR_ICON = "icon"
ATTR_ID = "id"
ATTR_IMAGE = "image"
ATTR_IMAGES = "images"
ATTR_INDEX = "index"
ATTR_INGRESS = "ingress"
ATTR_INGRESS_ENTRY = "ingress_entry"
ATTR_INGRESS_PANEL = "ingress_panel"
ATTR_INGRESS_PORT = "ingress_port"
ATTR_INGRESS_TOKEN = "ingress_token"
ATTR_INGRESS_URL = "ingress_url"
ATTR_INIT = "init"
ATTR_INITIALIZE = "initialize"
ATTR_INPUT = "input"
ATTR_INSTALLED = "installed"
ATTR_INTERFACE = "interface"
ATTR_INTERFACES = "interfaces"
ATTR_IP_ADDRESS = "ip_address"
ATTR_IPV4 = "ipv4"
ATTR_IPV6 = "ipv6"
ATTR_ISSUES = "issues"
ATTR_KERNEL = "kernel"
ATTR_KERNEL_MODULES = "kernel_modules"
ATTR_LAST_BOOT = "last_boot"
ATTR_LEGACY = "legacy"
ATTR_LOCALS = "locals"
ATTR_LOCATON = "location"
ATTR_LOGGING = "logging"
ATTR_LOGO = "logo"
ATTR_LONG_DESCRIPTION = "long_description"
ATTR_MAC = "mac"
ATTR_MACHINE = "machine"
ATTR_MAINTAINER = "maintainer"
ATTR_MAP = "map"
ATTR_MEMORY_LIMIT = "memory_limit"
ATTR_MEMORY_PERCENT = "memory_percent"
ATTR_MEMORY_USAGE = "memory_usage"
ATTR_MESSAGE = "message"
ATTR_METHOD = "method"
ATTR_MODE = "mode"
ATTR_MULTICAST = "multicast"
ATTR_NAME = "name"
ATTR_NAMESERVERS = "nameservers"
ATTR_NETWORK = "network"
ATTR_NETWORK_DESCRIPTION = "network_description"
ATTR_NETWORK_RX = "network_rx"
ATTR_NETWORK_TX = "network_tx"
ATTR_OBSERVER = "observer"
ATTR_OPERATING_SYSTEM = "operating_system"
ATTR_OPTIONS = "options"
ATTR_OTA = "ota"
ATTR_OUTPUT = "output"
ATTR_PANEL_ADMIN = "panel_admin"
ATTR_PANEL_ICON = "panel_icon"
ATTR_PANEL_TITLE = "panel_title"
ATTR_PANELS = "panels"
ATTR_PARENT = "parent"
ATTR_PASSWORD = "password"
ATTR_PORT = "port"
ATTR_PORTS = "ports"
ATTR_PORTS_DESCRIPTION = "ports_description"
ATTR_PREFIX = "prefix"
ATTR_PRIMARY = "primary"
ATTR_PRIORITY = "priority"
ATTR_PRIVILEGED = "privileged"
ATTR_PROTECTED = "protected"
ATTR_PROVIDERS = "providers"
ATTR_PSK = "psk"
ATTR_RATING = "rating"
ATTR_REALTIME = "realtime"
ATTR_REFRESH_TOKEN = "refresh_token"
ATTR_REGISTRIES = "registries"
ATTR_REGISTRY = "registry"
ATTR_REPOSITORIES = "repositories"
ATTR_REPOSITORY = "repository"
ATTR_SCHEMA = "schema"
ATTR_SECURITY = "security"
ATTR_SERIAL = "serial"
ATTR_SERVERS = "servers"
ATTR_SERVICE = "service"
ATTR_SERVICES = "services"
ATTR_SESSION = "session"
ATTR_SIGNAL = "signal"
ATTR_SIZE = "size"
ATTR_SLUG = "slug"
ATTR_SNAPSHOT_EXCLUDE = "snapshot_exclude"
ATTR_SNAPSHOTS = "snapshots"
ATTR_SOURCE = "source"
ATTR_SQUASH = "squash"
ATTR_SSD = "ssid"
ATTR_SSID = "ssid"
ATTR_SSL = "ssl"
ATTR_STAGE = "stage"
ATTR_STARTUP = "startup"
ATTR_STATE = "state"
ATTR_STATIC = "static"
ATTR_STDIN = "stdin"
ATTR_STORAGE = "storage"
ATTR_SUGGESTIONS = "suggestions"
ATTR_SUPERVISOR = "supervisor"
ATTR_SUPERVISOR_INTERNET = "supervisor_internet"
ATTR_SUPPORTED = "supported"
ATTR_SUPPORTED_ARCH = "supported_arch"
ATTR_SYSTEM = "system"
ATTR_JOURNALD = "journald"
ATTR_TIMEOUT = "timeout"
ATTR_TIMEZONE = "timezone"
ATTR_TITLE = "title"
ATTR_TMPFS = "tmpfs"
ATTR_TOTP = "totp"
ATTR_TRANSLATIONS = "translations"
ATTR_TYPE = "type"
ATTR_UART = "uart"
ATTR_UDEV = "udev"
ATTR_UNHEALTHY = "unhealthy"
ATTR_UNSAVED = "unsaved"
ATTR_UNSUPPORTED = "unsupported"
ATTR_UPDATE_AVAILABLE = "update_available"
ATTR_UPDATE_KEY = "update_key"
ATTR_URL = "url"
ATTR_USB = "usb"
ATTR_USER = "user"
ATTR_USERNAME = "username"
ATTR_UUID = "uuid"
ATTR_VALID = "valid"
ATTR_VALUE = "value"
ATTR_VERSION = "version"
ATTR_VERSION_LATEST = "version_latest"
ATTR_VIDEO = "video"
ATTR_VLAN = "vlan"
ATTR_VOLUME = "volume"
ATTR_VPN = "vpn"
ATTR_WAIT_BOOT = "wait_boot"
ATTR_WATCHDOG = "watchdog"
ATTR_WEBUI = "webui"
ATTR_WIFI = "wifi"
ATTR_CONTENT_TRUST = "content_trust"
ATTR_FORCE_SECURITY = "force_security"
PROVIDE_SERVICE = "provide"
NEED_SERVICE = "need"
WANT_SERVICE = "want"
MAP_CONFIG = "config"
MAP_SSL = "ssl"
MAP_ADDONS = "addons"
MAP_BACKUP = "backup"
MAP_SHARE = "share"
MAP_MEDIA = "media"
ARCH_ARMHF = "armhf"
ARCH_ARMV7 = "armv7"
ARCH_AARCH64 = "aarch64"
ARCH_AMD64 = "amd64"
ARCH_I386 = "i386"
ARCH_ALL = [ARCH_ARMHF, ARCH_ARMV7, ARCH_AARCH64, ARCH_AMD64, ARCH_I386]
REPOSITORY_CORE = "core"
REPOSITORY_LOCAL = "local"
FOLDER_HOMEASSISTANT = "homeassistant"
FOLDER_SHARE = "share"
FOLDER_ADDONS = "addons/local"
FOLDER_SSL = "ssl"
FOLDER_MEDIA = "media"
SNAPSHOT_FULL = "full"
SNAPSHOT_PARTIAL = "partial"
CRYPTO_AES128 = "aes128"
SECURITY_PROFILE = "profile"
SECURITY_DEFAULT = "default"
SECURITY_DISABLE = "disable"
ROLE_DEFAULT = "default"
ROLE_HOMEASSISTANT = "homeassistant"
ROLE_BACKUP = "backup"
ROLE_MANAGER = "manager"
ROLE_ADMIN = "admin"
ROLE_ALL = [ROLE_DEFAULT, ROLE_HOMEASSISTANT, ROLE_BACKUP, ROLE_MANAGER, ROLE_ADMIN]
| 25.986395
| 84
| 0.755585
|
92e5e938e0e0af1229cd08971df68b5917c123c7
| 8,334
|
py
|
Python
|
quaesit/agent.py
|
jgregoriods/quaesit
|
3846f5084ea4d6c1cbd9a93176ee9dee25e12105
|
[
"MIT"
] | null | null | null |
quaesit/agent.py
|
jgregoriods/quaesit
|
3846f5084ea4d6c1cbd9a93176ee9dee25e12105
|
[
"MIT"
] | null | null | null |
quaesit/agent.py
|
jgregoriods/quaesit
|
3846f5084ea4d6c1cbd9a93176ee9dee25e12105
|
[
"MIT"
] | null | null | null |
import inspect
from math import hypot, sin, asin, cos, radians, degrees
from abc import ABCMeta, abstractmethod
from random import randint, choice
from typing import Dict, List, Tuple, Union
| 35.016807
| 76
| 0.517639
|
92e5fb97c8f7793e1b150c9be5289156548c78e6
| 15,337
|
py
|
Python
|
models/LRF_COCO_300.py
|
vaesl/LRF-Net
|
e44b120dd55288c02852f8e58cda31313525d748
|
[
"MIT"
] | 180
|
2019-10-24T01:55:54.000Z
|
2022-02-07T11:26:49.000Z
|
models/LRF_COCO_300.py
|
CV-Rookie/LRF-Net
|
e44b120dd55288c02852f8e58cda31313525d748
|
[
"MIT"
] | 11
|
2019-11-06T08:46:00.000Z
|
2020-06-20T02:30:32.000Z
|
models/LRF_COCO_300.py
|
CV-Rookie/LRF-Net
|
e44b120dd55288c02852f8e58cda31313525d748
|
[
"MIT"
] | 29
|
2019-10-28T03:26:27.000Z
|
2021-05-03T02:32:06.000Z
|
import torch
import torch.nn as nn
import os
import torch.nn.functional as F
def vgg(cfg, i, batch_norm=False):
layers = []
in_channels = i
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
elif v == 'C':
layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=False)]
else:
layers += [conv2d, nn.ReLU(inplace=False)]
in_channels = v
pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)
conv7 = nn.Conv2d(1024, 1024, kernel_size=1)
layers += [pool5, conv6,
nn.ReLU(inplace=False), conv7, nn.ReLU(inplace=False)]
return layers
base = {
'300': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',
512, 512, 512]}
extras = {
'300': [1024, 'S', 512, 'S', 256]}
mbox = {
'300': [6, 6, 6, 6, 4, 4]}
| 36.516667
| 154
| 0.579774
|
92e751e7128a30f8b366e1182af0f9e14b4591cd
| 25,418
|
py
|
Python
|
tests/test.py
|
chromia/wandplus
|
815127aeee85dbac3bc8fca35971d2153b1898a9
|
[
"ImageMagick",
"MIT"
] | null | null | null |
tests/test.py
|
chromia/wandplus
|
815127aeee85dbac3bc8fca35971d2153b1898a9
|
[
"ImageMagick",
"MIT"
] | null | null | null |
tests/test.py
|
chromia/wandplus
|
815127aeee85dbac3bc8fca35971d2153b1898a9
|
[
"ImageMagick",
"MIT"
] | null | null | null |
#!/usr/bin/env python
from wand.image import Image
from wand.drawing import Drawing
from wand.color import Color
import wandplus.image as wpi
from wandplus.textutil import calcSuitableFontsize, calcSuitableImagesize
import os
import unittest
tmpdir = '_tmp/'
class CheckTextUtil(unittest.TestCase):
if __name__ == '__main__':
unittest.main()
| 29.487239
| 81
| 0.464395
|
92e78a29e0f69d74c35aa00744e686a1763079d2
| 7,652
|
py
|
Python
|
src/librender/tests/test_mesh.py
|
tizian/layer-laboratory
|
008cc94b76127e9eb74227fcd3d0145da8ddec30
|
[
"CNRI-Python"
] | 7
|
2020-07-24T03:19:59.000Z
|
2022-03-30T10:56:12.000Z
|
src/librender/tests/test_mesh.py
|
tizian/layer-laboratory
|
008cc94b76127e9eb74227fcd3d0145da8ddec30
|
[
"CNRI-Python"
] | 1
|
2021-04-07T22:30:23.000Z
|
2021-04-08T00:55:36.000Z
|
src/librender/tests/test_mesh.py
|
tizian/layer-laboratory
|
008cc94b76127e9eb74227fcd3d0145da8ddec30
|
[
"CNRI-Python"
] | 2
|
2020-06-08T08:25:09.000Z
|
2021-04-05T22:13:08.000Z
|
import mitsuba
import pytest
import enoki as ek
from enoki.dynamic import Float32 as Float
from mitsuba.python.test.util import fresolver_append_path
from mitsuba.python.util import traverse
def test04_normal_weighting_scheme(variant_scalar_rgb):
from mitsuba.core import Struct, float_dtype, Vector3f
from mitsuba.render import Mesh
import numpy as np
"""Tests the weighting scheme that is used to compute surface normals."""
m = Mesh("MyMesh", 5, 2, has_vertex_normals=True)
vertices = m.vertex_positions_buffer()
normals = m.vertex_normals_buffer()
a, b = 1.0, 0.5
vertices[:] = [0, 0, 0, -a, 1, 0, a, 1, 0, -b, 0, 1, b, 0, 1]
n0 = Vector3f(0.0, 0.0, -1.0)
n1 = Vector3f(0.0, 1.0, 0.0)
angle_0 = ek.pi / 2.0
angle_1 = ek.acos(3.0 / 5.0)
n2 = n0 * angle_0 + n1 * angle_1
n2 /= ek.norm(n2)
n = np.vstack([n2, n0, n0, n1, n1]).transpose()
m.faces_buffer()[:] = [0, 1, 2, 0, 3, 4]
m.recompute_vertex_normals()
for i in range(5):
assert ek.allclose(normals[i*3:(i+1)*3], n[:, i], 5e-4)
def test08_mesh_add_attribute(variant_scalar_rgb):
from mitsuba.core import Struct, float_dtype
from mitsuba.render import Mesh
m = Mesh("MyMesh", 3, 2)
m.vertex_positions_buffer()[:] = [0.0, 0.0, 0.0, 1.0, 0.2, 0.0, 0.2, 1.0, 0.0]
m.faces_buffer()[:] = [0, 1, 2, 1, 2, 0]
m.parameters_changed()
m.add_attribute("vertex_color", 3)[:] = [0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0]
assert str(m) == """Mesh[
name = "MyMesh",
bbox = BoundingBox3f[
min = [0, 0, 0],
max = [1, 1, 0]
],
vertex_count = 3,
vertices = [72 B of vertex data],
face_count = 2,
faces = [24 B of face data],
disable_vertex_normals = 0,
surface_area = 0.96,
mesh attributes = [
vertex_color: 3 floats
]
]"""
| 32.561702
| 93
| 0.611997
|
92e9c5118907200831bee6234267cd344285472f
| 1,457
|
py
|
Python
|
agsadmin/sharing_admin/community/groups/Group.py
|
christopherblanchfield/agsadmin
|
989cb3795aacf285ccf74ee51b0de26bf2f48bc3
|
[
"BSD-3-Clause"
] | 2
|
2015-12-07T05:53:29.000Z
|
2020-09-13T18:12:15.000Z
|
agsadmin/sharing_admin/community/groups/Group.py
|
christopherblanchfield/agsadmin
|
989cb3795aacf285ccf74ee51b0de26bf2f48bc3
|
[
"BSD-3-Clause"
] | 4
|
2015-03-09T05:59:14.000Z
|
2018-01-09T00:12:56.000Z
|
agsadmin/sharing_admin/community/groups/Group.py
|
christopherblanchfield/agsadmin
|
989cb3795aacf285ccf74ee51b0de26bf2f48bc3
|
[
"BSD-3-Clause"
] | 5
|
2015-03-09T01:05:24.000Z
|
2019-09-09T23:01:21.000Z
|
from __future__ import (absolute_import, division, print_function, unicode_literals)
from builtins import (ascii, bytes, chr, dict, filter, hex, input, int, map, next, oct, open, pow, range, round, str,
super, zip)
from ...._utils import send_session_request
from ..._PortalEndpointBase import PortalEndpointBase
from .CreateUpdateGroupParams import CreateUpdateGroupParams
def get_properties(self):
"""
Gets the properties of the item.
"""
return self._get()
def update(self, update_group_params, clear_empty_fields=False):
"""
Updates the group properties.
"""
update_group_params = update_group_params._get_params() if isinstance(
update_group_params, CreateUpdateGroupParams) else update_group_params.copy()
if not "clearEmptyFields" in update_group_params:
update_group_params["clearEmptyFields"] = clear_empty_fields
r = self._create_operation_request(self, "update", method="POST", data=update_group_params)
return send_session_request(self._session, r).json()
| 33.883721
| 117
| 0.683596
|
92ea3eda1c775e0583e47210352c08da3ae6793c
| 3,995
|
py
|
Python
|
amy/workshops/migrations/0191_auto_20190809_0936.py
|
code-review-doctor/amy
|
268c1a199510457891459f3ddd73fcce7fe2b974
|
[
"MIT"
] | 53
|
2015-01-10T17:39:19.000Z
|
2019-06-12T17:36:34.000Z
|
amy/workshops/migrations/0191_auto_20190809_0936.py
|
code-review-doctor/amy
|
268c1a199510457891459f3ddd73fcce7fe2b974
|
[
"MIT"
] | 1,176
|
2015-01-02T06:32:47.000Z
|
2019-06-18T11:57:47.000Z
|
amy/workshops/migrations/0191_auto_20190809_0936.py
|
code-review-doctor/amy
|
268c1a199510457891459f3ddd73fcce7fe2b974
|
[
"MIT"
] | 44
|
2015-01-03T15:08:56.000Z
|
2019-06-09T05:33:08.000Z
|
# Generated by Django 2.1.7 on 2019-08-09 09:36
from django.db import migrations, models
def migrate_public_event(apps, schema_editor):
"""Migrate options previously with no contents (displayed as "Other:")
to a new contents ("other").
The field containing these options is in CommonRequest abstract model,
implemented in WorkshopRequest, WorkshopInquiryRequest, and
SelfOrganizedSubmission models."""
WorkshopRequest = apps.get_model('workshops', 'WorkshopRequest')
WorkshopInquiryRequest = apps.get_model('extrequests',
'WorkshopInquiryRequest')
SelfOrganizedSubmission = apps.get_model('extrequests',
'SelfOrganizedSubmission')
WorkshopRequest.objects.filter(public_event="") \
.update(public_event="other")
WorkshopInquiryRequest.objects.filter(public_event="") \
.update(public_event="other")
SelfOrganizedSubmission.objects.filter(public_event="") \
.update(public_event="other")
| 71.339286
| 949
| 0.702378
|
92ec1a79aa56994e71f763b1fea1ca3f88478806
| 1,278
|
py
|
Python
|
pix2pix/Discriminator.py
|
yubin1219/GAN
|
8345095f9816e548c968492efbe92b427b0e06a3
|
[
"MIT"
] | null | null | null |
pix2pix/Discriminator.py
|
yubin1219/GAN
|
8345095f9816e548c968492efbe92b427b0e06a3
|
[
"MIT"
] | null | null | null |
pix2pix/Discriminator.py
|
yubin1219/GAN
|
8345095f9816e548c968492efbe92b427b0e06a3
|
[
"MIT"
] | 1
|
2021-09-17T01:28:50.000Z
|
2021-09-17T01:28:50.000Z
|
import torch
import torch.nn as nn
| 29.045455
| 86
| 0.58216
|
92ec31910f4ccb9a9e9fdaf1976491caf430c06d
| 1,067
|
py
|
Python
|
tests/slicebuilders/subpopulations/test_length.py
|
ANarayan/robustness-gym
|
eed2800985631fbbe6491b5f6f0731a067eef78e
|
[
"Apache-2.0"
] | null | null | null |
tests/slicebuilders/subpopulations/test_length.py
|
ANarayan/robustness-gym
|
eed2800985631fbbe6491b5f6f0731a067eef78e
|
[
"Apache-2.0"
] | null | null | null |
tests/slicebuilders/subpopulations/test_length.py
|
ANarayan/robustness-gym
|
eed2800985631fbbe6491b5f6f0731a067eef78e
|
[
"Apache-2.0"
] | null | null | null |
from unittest import TestCase
import numpy as np
from robustnessgym.cachedops.spacy import Spacy
from robustnessgym.slicebuilders.subpopulations.length import LengthSubpopulation
from tests.testbeds import MockTestBedv0
| 34.419355
| 81
| 0.686036
|
92eca5c1a6337291d863c933685487ea52da0c9b
| 1,146
|
py
|
Python
|
pulsar_spectra/catalogue_papers/Jankowski_2018_raw_to_yaml.py
|
NickSwainston/pulsar_spectra
|
b264aab3f8fc1bb3cad14ef1b93cab519ed5bc69
|
[
"MIT"
] | null | null | null |
pulsar_spectra/catalogue_papers/Jankowski_2018_raw_to_yaml.py
|
NickSwainston/pulsar_spectra
|
b264aab3f8fc1bb3cad14ef1b93cab519ed5bc69
|
[
"MIT"
] | 4
|
2021-12-17T04:24:13.000Z
|
2022-02-24T14:51:18.000Z
|
pulsar_spectra/catalogue_papers/Jankowski_2018_raw_to_yaml.py
|
NickSwainston/pulsar_spectra
|
b264aab3f8fc1bb3cad14ef1b93cab519ed5bc69
|
[
"MIT"
] | null | null | null |
import json
from astroquery.vizier import Vizier
with open("Jankowski_2018_raw.txt", "r") as raw_file:
lines = raw_file.readlines()
print(lines)
pulsar_dict = {}
for row in lines[3:]:
row = row.split("|")
print(row)
pulsar = row[0].strip().replace("", "-")
freqs = []
fluxs = []
flux_errs = []
# If no error means it's an upper limit andnow sure how to handle it
if row[1].strip() != "" and row[2].strip() != "":
freqs.append(728)
fluxs.append(float(row[1].strip()))
flux_errs.append(float(row[2].strip()))
if row[3].strip() != "" and row[4].strip() != "":
freqs.append(1382)
fluxs.append(float(row[3].strip()))
flux_errs.append(float(row[4].strip()))
if row[5].strip() != "" and row[6].strip() != "":
freqs.append(3100)
fluxs.append(float(row[5].strip()))
flux_errs.append(float(row[6].strip()))
pulsar_dict[pulsar] = {"Frequency MHz":freqs, "Flux Density mJy":fluxs, "Flux Density error mJy":flux_errs}
with open("Jankowski_2018.yaml", "w") as cat_file:
cat_file.write(json.dumps(pulsar_dict))
print(pulsar_dict)
| 34.727273
| 111
| 0.604712
|
92ee36608ac8edb00b879a89f8f1eafb4cb4fb04
| 15,018
|
py
|
Python
|
integration-tests/run-intg-test.py
|
NishikaDeSilva/identity-test-integration
|
dbd1db07aa6d4f4942d772cd56c0b06c355bd43b
|
[
"Apache-2.0"
] | 4
|
2017-10-23T05:25:27.000Z
|
2018-01-10T08:00:14.000Z
|
integration-tests/run-intg-test.py
|
NishikaDeSilva/identity-test-integration
|
dbd1db07aa6d4f4942d772cd56c0b06c355bd43b
|
[
"Apache-2.0"
] | 42
|
2018-05-21T12:55:49.000Z
|
2020-01-17T06:40:25.000Z
|
integration-tests/run-intg-test.py
|
NishikaDeSilva/identity-test-integration
|
dbd1db07aa6d4f4942d772cd56c0b06c355bd43b
|
[
"Apache-2.0"
] | 46
|
2017-10-04T05:45:52.000Z
|
2018-05-05T14:32:26.000Z
|
# Copyright (c) 2018, WSO2 Inc. (http://wso2.com) All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# importing required modules
import sys
from xml.etree import ElementTree as ET
import toml
import subprocess
import wget
import logging
import inspect
import os
import shutil
import pymysql
import sqlparse
import re
from pathlib import Path
import urllib.request as urllib2
from xml.dom import minidom
import intg_test_manager as cm
from subprocess import Popen, PIPE
import os
from prod_test_constant import DB_META_DATA, DIST_POM_PATH, INTEGRATION_PATH, DISTRIBUTION_PATH, \
DATASOURCE_PATHS, LIB_PATH, WSO2SERVER, M2_PATH, ARTIFACT_REPORTS_PATHS, POM_FILE_PATHS
from intg_test_constant import NS, ZIP_FILE_EXTENSION, CARBON_NAME, VALUE_TAG, SURFACE_PLUGIN_ARTIFACT_ID, \
DEPLOYMENT_PROPERTY_FILE_NAME, LOG_FILE_NAME, PRODUCT_STORAGE_DIR_NAME, \
DEFAULT_DB_USERNAME, LOG_STORAGE, TEST_OUTPUT_DIR_NAME, DEFAULT_ORACLE_SID, MYSQL_DB_ENGINE, \
ORACLE_DB_ENGINE, PRODUCT_STORAGE_DIR_NAME, MSSQL_DB_ENGINE
database_names = []
db_engine = None
sql_driver_location = None
identity_db_url = None
identity_db_username = None
identity_db_password = None
identity_db_driver = None
shared_db_url = None
shared_db_username = None
shared_db_password = None
shared_db_driver = None
identity_db = "WSO2_IDENTITY_DB"
shared_db = "WSO2_SHARED_DB"
# Since we have added a method to clone a given git branch and checkout to the latest released tag it is not required to
# modify pom files. Hence in the current implementation this method is not using.
# However, in order to execute this method you can define pom file paths in const_<prod>.py as a constant
# and import it to run-intg-test.py. Thereafter assign it to global variable called pom_file_paths in the
# configure_product method and call the modify_pom_files method.
#TODO: Improve the method in generic way to support all products
#TODO: Improve the method in generic way to support all products
#TODO: Improve the method in generic way to support all products
# def set_custom_testng():
# if cm.use_custom_testng_file == "TRUE":
# testng_source = Path(cm.workspace + "/" + "testng.xml")
# testng_destination = Path(cm.workspace + "/" + cm.product_id + "/" + TESTNG_DIST_XML_PATHS)
# testng_server_mgt_source = Path(cm.workspace + "/" + "testng-server-mgt.xml")
# testng_server_mgt_destination = Path(cm.workspace + "/" + cm.product_id + "/" + TESTNG_SERVER_MGT_DIST)
# # replace testng source
# cm.replace_file(testng_source, testng_destination)
# # replace testng server mgt source
# cm.replace_file(testng_server_mgt_source, testng_server_mgt_destination)
def build_source_without_tests(source_path):
"""Build the product-source.
"""
logger.info('Building the source skipping tests')
if sys.platform.startswith('win'):
subprocess.call(['mvn', 'clean', 'install', '-B', '-e','-Dmaven.test.skip=true'], shell=True, cwd=source_path)
else:
subprocess.call(['mvn', 'clean', 'install', '-B', '-e', '-Dmaven.test.skip=true'], cwd=source_path)
logger.info('Module build is completed. Module: ' + str(source_path))
if __name__ == "__main__":
main()
| 46.639752
| 127
| 0.671195
|
92eed01036cb07058175a69126f2f5a418891a9a
| 2,376
|
py
|
Python
|
src/pytest_notification/sound.py
|
rhpvorderman/pytest-notification
|
3f322ab04914f52525e1b07bc80537d5f9a00250
|
[
"MIT"
] | 2
|
2020-08-27T03:14:05.000Z
|
2020-10-24T17:17:36.000Z
|
src/pytest_notification/sound.py
|
rhpvorderman/pytest-notification
|
3f322ab04914f52525e1b07bc80537d5f9a00250
|
[
"MIT"
] | 5
|
2019-12-02T08:49:15.000Z
|
2020-06-22T08:38:34.000Z
|
src/pytest_notification/sound.py
|
rhpvorderman/pytest-notification
|
3f322ab04914f52525e1b07bc80537d5f9a00250
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2019 Leiden University Medical Center
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import subprocess
import sys
from pathlib import Path
SOUNDS_DIR = (Path(__file__).parent / Path("sounds")).absolute()
DEFAULT_SUCCESS_SOUND = SOUNDS_DIR / Path("applause")
DEFAULT_FAIL_SOUND = SOUNDS_DIR / Path("buzzer")
def _play_sound_unix(sound_file: Path, program):
"""
Play a sound file on unix with the program.
:param sound_file: Path to the sound file.
:param program: Which program to use.
:return: No returns. Plays a sound file.
"""
# Play the sound non blocking, use Popen.
subprocess.Popen([program, str(sound_file)])
| 43.2
| 79
| 0.731481
|
92ef37eb449c4f50b5c90c7a720a5f53652a647c
| 420
|
py
|
Python
|
7KYU/next_prime.py
|
yaznasivasai/python_codewars
|
25493591dde4649dc9c1ec3bece8191a3bed6818
|
[
"MIT"
] | 4
|
2021-07-17T22:48:03.000Z
|
2022-03-25T14:10:58.000Z
|
7KYU/next_prime.py
|
yaznasivasai/python_codewars
|
25493591dde4649dc9c1ec3bece8191a3bed6818
|
[
"MIT"
] | null | null | null |
7KYU/next_prime.py
|
yaznasivasai/python_codewars
|
25493591dde4649dc9c1ec3bece8191a3bed6818
|
[
"MIT"
] | 3
|
2021-06-14T14:18:16.000Z
|
2022-03-16T06:02:02.000Z
|
from math import sqrt
| 20
| 44
| 0.435714
|
92ef91238a4d28bed6389f80b7547828e84737ba
| 6,622
|
py
|
Python
|
cozmo_sdk_examples/if_this_then_that/ifttt_gmail.py
|
manxueitp/cozmo-test
|
a91b1a4020544cb622bd67385f317931c095d2e8
|
[
"MIT"
] | null | null | null |
cozmo_sdk_examples/if_this_then_that/ifttt_gmail.py
|
manxueitp/cozmo-test
|
a91b1a4020544cb622bd67385f317931c095d2e8
|
[
"MIT"
] | null | null | null |
cozmo_sdk_examples/if_this_then_that/ifttt_gmail.py
|
manxueitp/cozmo-test
|
a91b1a4020544cb622bd67385f317931c095d2e8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2016 Anki, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''"If This Then That" Gmail example
This example demonstrates how "If This Then That" (http://ifttt.com) can be used
make Cozmo respond when a Gmail account receives an email. Instructions below
will lead you through setting up an applet on the IFTTT website. When the applet
trigger is called (which sends a web request received by the web server started
in this example), Cozmo will play an animation, speak the email sender's name and
show a mailbox image on his face.
Please place Cozmo on the charger for this example. When necessary, he will be
rolled off and back on.
Follow these steps to set up and run the example:
1) Provide a a static ip, URL or similar that can be reached from the If This
Then That server. One easy way to do this is with ngrok, which sets up
a secure tunnel to localhost running on your machine.
To set up ngrok:
a) Follow instructions here to download and install:
https://ngrok.com/download
b) Run this command to create a secure public URL for port 8080:
./ngrok http 8080
c) Note the HTTP forwarding address shown in the terminal (e.g., http://55e57164.ngrok.io).
You will use this address in your applet, below.
WARNING: Using ngrok exposes your local web server to the internet. See the ngrok
documentation for more information: https://ngrok.com/docs
2) Set up your applet on the "If This Then That" website.
a) Sign up and sign into https://ifttt.com
b) Create an applet: https://ifttt.com/create
c) Set up your trigger.
1. Click "this".
2. Select "Gmail" as your service. If prompted, click "Connect",
select your Gmail account, and click Allow to provide permissions
to IFTTT for your email account. Click "Done".
3. Under "Choose a Trigger", select Any new email in inbox".
d) Set up your action.
1. Click that".
2. Select Maker" to set it as your action channel. Connect to the Maker channel if prompted.
3. Click Make a web request" and fill out the fields as follows. Remember your publicly
accessible URL from above (e.g., http://55e57164.ngrok.io) and use it in the URL field,
followed by "/iftttGmail" as shown below:
URL: http://55e57164.ngrok.io/iftttGmail
Method: POST
Content Type: application/json
Body: {"FromAddress":"{{FromAddress}}"}
5. Click Create Action" then Finish".
3) Test your applet.
a) Run this script at the command line: ./ifttt_gmail.py
b) On ifttt.com, on your applet page, click Check now. See that IFTTT confirms that the applet
was checked.
c) Send an email to the Gmail account in your recipe
d) On your IFTTT applet webpage, again click Check now. This should cause IFTTT to detect that
the email was received and send a web request to the ifttt_gmail.py script.
e) In response to the ifttt web request, Cozmo should roll off the charger, raise and lower
his lift, announce the email, and then show a mailbox image on his face.
'''
import asyncio
import re
import sys
try:
from aiohttp import web
except ImportError:
sys.exit("Cannot import from aiohttp. Do `pip3 install --user aiohttp` to install")
import cozmo
from common import IFTTTRobot
app = web.Application()
# Attach the function as an HTTP handler.
app.router.add_post('/iftttGmail', serve_gmail)
if __name__ == '__main__':
cozmo.setup_basic_logging()
cozmo.robot.Robot.drive_off_charger_on_connect = False
# Use our custom robot class with extra helper methods
cozmo.conn.CozmoConnection.robot_factory = IFTTTRobot
try:
sdk_conn = cozmo.connect_on_loop(app.loop)
# Wait for the robot to become available and add it to the app object.
app['robot'] = app.loop.run_until_complete(sdk_conn.wait_for_robot())
except cozmo.ConnectionError as e:
sys.exit("A connection error occurred: %s" % e)
web.run_app(app)
| 41.3875
| 105
| 0.67457
|
92f0c7d812707a316f1c04c4ec3e35722444b8b5
| 13,843
|
py
|
Python
|
plotutils.py
|
parkus/mypy
|
21043c559dca14abe7508e0f6b2f8053bf376bb8
|
[
"MIT"
] | 1
|
2015-11-06T06:27:59.000Z
|
2015-11-06T06:27:59.000Z
|
plotutils.py
|
parkus/mypy
|
21043c559dca14abe7508e0f6b2f8053bf376bb8
|
[
"MIT"
] | null | null | null |
plotutils.py
|
parkus/mypy
|
21043c559dca14abe7508e0f6b2f8053bf376bb8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri May 30 17:15:27 2014
@author: Parke
"""
from __future__ import division, print_function, absolute_import
import numpy as np
import matplotlib as mplot
import matplotlib.pyplot as plt
import mypy.my_numpy as mnp
dpi = 100
fullwidth = 10.0
halfwidth = 5.0
# use these with line.set_dashes and iterate through more linestyles than come with matplotlib
# consider ussing a ::2 slice for fewer
dashes = [[],
[30, 10],
[20, 8],
[10, 5],
[3, 2],
[30, 5, 3, 5, 10, 5, 3, 5],
[15] + [5, 3]*3 + [5],
[15] + [5, 3]*2 + [5],
[15] + [5, 3] + [5]]
def textSize(ax_or_fig=None, coordinate='data'):
"""
Return x & y scale factors for converting text sizes in points to another coordinate. Useful for properly spacing
text labels and such when you need to know sizes before the text is made (otherwise you can use textBoxSize).
Coordinate can be 'data', 'axes', or 'figure'.
If data coordinates are requested and the data is plotted on a log scale, then the factor will be given in dex.
"""
if ax_or_fig is None:
fig = plt.gcf()
ax = fig.gca()
else:
if isinstance(ax_or_fig, plt.Figure):
fig = ax_or_fig
ax = fig.gca()
elif isinstance(ax_or_fig, plt.Axes):
ax = ax_or_fig
fig = ax.get_figure()
else:
raise TypeError('ax_or_fig must be a Figure or Axes instance, if given.')
w_fig_in, h_fig_in = ax.get_figure().get_size_inches()
if coordinate == 'fig':
return 1.0/(w_fig_in*72), 1.0/(h_fig_in*72)
w_ax_norm, h_ax_norm = ax.get_position().size
w_ax_in = w_ax_norm * w_fig_in
h_ax_in = h_ax_norm * h_fig_in
w_ax_pts, h_ax_pts = w_ax_in*72, h_ax_in*72
if coordinate == 'axes':
return 1.0/w_ax_pts, 1.0/h_ax_pts
if coordinate == 'data':
xlim = ax.get_xlim()
ylim = ax.get_ylim()
if ax.get_xscale() == 'log': xlim = np.log10(xlim)
if ax.get_yscale() == 'log': ylim = np.log10(ylim)
w_ax_data = xlim[1] - xlim[0]
h_ax_data = ylim[1] - ylim[0]
return w_ax_data/w_ax_pts, h_ax_data/h_ax_pts
#TODO: discard this function?
def standard_figure(app, slideAR=1.6, height=1.0):
"""Generate a figure of standard size for publishing.
implemented values for app (application) are:
'fullslide'
height is the fractional height of the figure relative to the "standard"
height. For slides the standard is the full height of a slide.
returns the figure object and default font size
"""
if app == 'fullslide':
fontsize = 20
figsize = [fullwidth, fullwidth/slideAR*height]
fig = mplot.pyplot.figure(figsize=figsize, dpi=dpi)
mplot.rcParams.update({'font.size': fontsize})
return fig, fontsize
def pcolor_reg(x, y, z, **kw):
"""
Similar to `pcolor`, but assume that the grid is uniform,
and do plotting with the (much faster) `imshow` function.
"""
x, y, z = np.asarray(x), np.asarray(y), np.asarray(z)
if x.ndim != 1 or y.ndim != 1:
raise ValueError("x and y should be 1-dimensional")
if z.ndim != 2 or z.shape != (y.size, x.size):
raise ValueError("z.shape should be (y.size, x.size)")
dx = np.diff(x)
dy = np.diff(y)
if not np.allclose(dx, dx[0], 1e-2) or not np.allclose(dy, dy[0], 1e-2):
raise ValueError("The grid must be uniform")
if np.issubdtype(z.dtype, np.complexfloating):
zp = np.zeros(z.shape, float)
zp[...] = z[...]
z = zp
plt.imshow(z, origin='lower',
extent=[x.min(), x.max(), y.min(), y.max()],
interpolation='nearest',
aspect='auto',
**kw)
plt.axis('tight')
def onscreen_pres(mpl, screenwidth=1200):
"""
Set matplotlibrc values so that plots are readable as they are created
and maximized for an audience far from a screen.
Parameters
----------
mpl : module
Current matplotlib module. Use 'import matplotlib as mpl'.
screewidth : int
Width of the screen in question in pixels.
Returns
-------
None
"""
mpl.rcParams['lines.linewidth'] = 2
fontsize = round(14 / (800.0 / screenwidth))
mpl.rcParams['font.size'] = fontsize
def textBoxSize(txt, transformation=None, figure=None):
"""Get the width and height of a text object's bounding box transformed to the desired coordinates. Defaults to
figure coordinates if transformation is None."""
fig= txt.get_figure() if figure is None else figure
if transformation is None:
transformation = fig.transFigure
coordConvert = transformation.inverted().transform
bboxDisp = txt.get_window_extent(fig.canvas.renderer)
bboxConv = coordConvert(bboxDisp)
w = bboxConv[1,0] - bboxConv[0,0]
h = bboxConv[1,1] - bboxConv[0,1]
return w, h
def stars3d(ra, dec, dist, T=5000.0, r=1.0, labels='', view=None, size=(800,800), txt_scale=1.0):
"""
Make a 3D diagram of stars positions relative to the Sun, with
semi-accurate colors and distances as desired. Coordinates must be in
degrees. Distance is assumed to be in pc (for axes labels).
Meant to be used with only a handful of stars.
"""
from mayavi import mlab
from color.maps import true_temp
n = len(ra)
dec, ra = dec*np.pi/180.0, ra*np.pi/180.0
makearr = lambda v: np.array([v] * n) if np.isscalar(v) else v
T, r, labels = list(map(makearr, (T, r, labels)))
# add the sun
ra, dec, dist = list(map(np.append, (ra, dec, dist), (0.0, 0.0, 0.0)))
r, T, labels = list(map(np.append, (r, T, labels), (1.0, 5780.0, 'Sun')))
# get xyz coordinates
z = dist * np.sin(dec)
h = dist * np.cos(dec)
x = h * np.cos(ra)
y = h * np.sin(ra)
# make figure
fig = mlab.figure(bgcolor=(0,0,0), fgcolor=(1,1,1), size=size)
# plot lines down to the dec=0 plane for all but the sun
lines = []
for x1, y1, z1 in list(zip(x, y, z))[:-1]:
xx, yy, zz = [x1, x1], [y1, y1], [0.0, z1]
line = mlab.plot3d(xx, yy, zz, color=(0.7,0.7,0.7), line_width=0.5,
figure=fig)
lines.append(line)
# plot spheres
r_factor = np.max(dist) / 30.0
pts = mlab.quiver3d(x, y, z, r, r, r, scalars=T, mode='sphere',
scale_factor=r_factor, figure=fig, resolution=100)
pts.glyph.color_mode = 'color_by_scalar'
# center the glyphs on the data point
pts.glyph.glyph_source.glyph_source.center = [0, 0, 0]
# set a temperature colormap
cmap = true_temp(T)
pts.module_manager.scalar_lut_manager.lut.table = cmap
# set the camera view
mlab.view(focalpoint=(0.0, 0.0, 0.0), figure=fig)
if view is not None:
mlab.view(*view, figure=fig)
## add labels
# unit vec to camera
view = mlab.view()
az, el = view[:2]
hc = np.sin(el * np.pi / 180.0)
xc = hc * np.cos(az * np.pi / 180.0)
yc = hc * np.sin(az * np.pi / 180.0)
zc = -np.cos(el * np.pi / 180.0)
# unit vec orthoganal to camera
if xc**2 + yc**2 == 0.0:
xoff = 1.0
yoff = 0.0
zoff = 0.0
else:
xoff = yc / np.sqrt(xc**2 + yc**2)
yoff = np.sqrt(1.0 - xoff**2)
zoff = 0.0
# xoff, yoff, zoff = xc, yc, zc
# scale orthogonal vec by sphere size
r_label = 1.0 * r_factor
xoff, yoff, zoff = [r_label * v for v in [xoff, yoff, zoff]]
# plot labels
size = r_factor * txt_scale * 0.75
for xx, yy, zz, label in zip(x, y, z, labels):
mlab.text3d(xx + xoff, yy + yoff, zz + zoff, label, figure=fig,
color=(1,1,1), scale=size)
## add translucent dec=0 surface
n = 101
t = np.linspace(0.0, 2*np.pi, n)
r = np.max(dist * np.cos(dec))
x, y = r*np.cos(t), r*np.sin(t)
z = np.zeros(n+1)
x, y = [np.insert(a, 0, 0.0) for a in [x,y]]
triangles = [(0, i, i + 1) for i in range(1, n)]
mlab.triangular_mesh(x, y, z, triangles, color=(1,1,1), opacity=0.3, figure=fig)
## add ra=0 line
line = mlab.plot3d([0, r], [0, 0], [0, 0], color=(1,1,1), line_width=1, figure=fig)
rtxt = '{:.1f} pc'.format(r)
orientation=np.array([180.0, 180.0, 0.0])
mlab.text3d(r, 0, 0, rtxt, figure=fig, scale=size*1.25, orient_to_camera=False, orientation=orientation)
if view is not None:
mlab.view(*view, figure=fig)
return fig
| 31.461364
| 117
| 0.582388
|