hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
02e1eaa7b6938abd4a858ad69dcce256aad518c8 | 2,232 | py | Python | bot.py | Euphorichuman/StarConstellationBot-TelgramBot- | 557dad7ce1d1a96a96b4ed65b796f20a6944e3b7 | [
"Apache-2.0"
] | null | null | null | bot.py | Euphorichuman/StarConstellationBot-TelgramBot- | 557dad7ce1d1a96a96b4ed65b796f20a6944e3b7 | [
"Apache-2.0"
] | null | null | null | bot.py | Euphorichuman/StarConstellationBot-TelgramBot- | 557dad7ce1d1a96a96b4ed65b796f20a6944e3b7 | [
"Apache-2.0"
] | null | null | null | import telegram.ext
import messsages as msg
import functions as f
import matplotlib.pyplot as plt
import traceback
import os
import os.path
from os import path
#Funcin para mandar la figura con todas las estrellas
#Funcin para mandar la figura con todas las estrellas y una constelacin
#Funcin para mandar la figura con todas las estrellas y todas las constelaciones
#Funcin para mandar una lista de las constelaciones disponibles
#Funcin para mandar una lista de las constelaciones disponibles
| 37.2 | 103 | 0.744624 |
02e39765ed97295a34732ab36aceb4ca2cfebe3b | 2,706 | py | Python | tests/unit/states/test_slack.py | amaclean199/salt | 8aaac011b4616e3c9e74a1daafb4a2146a5a430f | [
"Apache-2.0"
] | 12 | 2015-01-21T00:18:25.000Z | 2021-07-11T07:35:26.000Z | tests/unit/states/test_slack.py | amaclean199/salt | 8aaac011b4616e3c9e74a1daafb4a2146a5a430f | [
"Apache-2.0"
] | 1 | 2015-10-05T22:03:10.000Z | 2015-10-05T22:03:10.000Z | tests/unit/states/test_slack.py | amaclean199/salt | 8aaac011b4616e3c9e74a1daafb4a2146a5a430f | [
"Apache-2.0"
] | 12 | 2015-01-05T09:50:42.000Z | 2019-08-19T01:43:40.000Z | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Jayesh Kariya <jayeshk@saltstack.com>`
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import skipIf, TestCase
from tests.support.mock import (
NO_MOCK,
NO_MOCK_REASON,
MagicMock,
patch
)
# Import Salt Libs
import salt.states.slack as slack
| 35.605263 | 77 | 0.543607 |
02e3d3385e104cc569c1458b36ecf6ad0a158a11 | 613 | py | Python | lintcode/499.py | jianershi/algorithm | c3c38723b9c5f1cc745550d89e228f92fd4abfb2 | [
"MIT"
] | 1 | 2021-01-08T06:57:49.000Z | 2021-01-08T06:57:49.000Z | lintcode/499.py | jianershi/algorithm | c3c38723b9c5f1cc745550d89e228f92fd4abfb2 | [
"MIT"
] | null | null | null | lintcode/499.py | jianershi/algorithm | c3c38723b9c5f1cc745550d89e228f92fd4abfb2 | [
"MIT"
] | 1 | 2021-01-08T06:57:52.000Z | 2021-01-08T06:57:52.000Z | """
499. Word Count (Map Reduce)
https://www.lintcode.com/problem/word-count-map-reduce/description
"""
| 29.190476 | 66 | 0.611746 |
02e41ad97ede483bee9810e5ea1fa7bf1e5f726c | 1,205 | py | Python | dataframe/statistic.py | kuangtu/pandas_exec | 659dec5eef488bec11daec33333ff8366a0d1a91 | [
"MIT"
] | null | null | null | dataframe/statistic.py | kuangtu/pandas_exec | 659dec5eef488bec11daec33333ff8366a0d1a91 | [
"MIT"
] | null | null | null | dataframe/statistic.py | kuangtu/pandas_exec | 659dec5eef488bec11daec33333ff8366a0d1a91 | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
import numpy as np
import pandas as pd
if __name__ == '__main__':
# countnum()
statfunc()
| 25.104167 | 73 | 0.557676 |
02e6d57771f13dca80d99555989b7627a7fef655 | 6,513 | py | Python | asv/results.py | pitrou/asv | d6efa34f1308a212bc3c2f386f2f6584bbb5398f | [
"BSD-3-Clause"
] | null | null | null | asv/results.py | pitrou/asv | d6efa34f1308a212bc3c2f386f2f6584bbb5398f | [
"BSD-3-Clause"
] | null | null | null | asv/results.py | pitrou/asv | d6efa34f1308a212bc3c2f386f2f6584bbb5398f | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import base64
import os
import zlib
from .environment import get_environment
from . import util
def iter_results_for_machine(results, machine_name):
"""
Iterate over all of the result files for a particular machine.
"""
return iter_results(os.path.join(results, machine_name))
def get_existing_hashes(results):
"""
Get all of the commit hashes that have already been tested.
Each element yielded is the pair (hash, date).
"""
hashes = list(set(iter_existing_hashes(results)))
return hashes
def find_latest_result_hash(machine, root):
"""
Find the latest result for the given machine.
"""
root = os.path.join(root, machine)
latest_date = 0
latest_hash = ''
for commit_hash, date in iter_existing_hashes(root):
if date > latest_date:
latest_date = date
latest_hash = commit_hash
return latest_hash
def get_filename(machine, commit_hash, env):
"""
Get the result filename for a given machine, commit_hash and
environment.
"""
return os.path.join(
machine,
"{0}-{1}.json".format(
commit_hash[:8],
env.name))
def add_profile(self, benchmark_name, profile):
"""
Add benchmark profile data.
Parameters
----------
benchmark_name : str
Name of benchmark
profile : bytes
`cProfile` data
"""
self._profiles[benchmark_name] = base64.b64encode(
zlib.compress(profile))
def get_profile(self, benchmark_name):
"""
Get the profile data for the given benchmark name.
"""
return zlib.decompress(
base64.b64decode(self._profiles[benchmark_name]))
def has_profile(self, benchmark_name):
"""
Does the given benchmark data have profiling information?
"""
return benchmark_name in self._profiles
def save(self, result_dir):
"""
Save the results to disk.
Parameters
----------
result_dir : str
Path to root of results tree.
"""
path = os.path.join(result_dir, self._filename)
util.write_json(path, {
'results': self._results,
'params': self._params,
'requirements': self._env.requirements,
'commit_hash': self._commit_hash,
'date': self._date,
'python': self._python,
'profiles': self._profiles
}, self.api_version)
| 25.641732 | 73 | 0.590511 |
02e9695d836ae2a21a14a0f80cc396334b03974f | 1,188 | py | Python | core/secretfinder/utils.py | MakoSec/pacu | f06f110e6c181f34b89b803e7c2024563acc9fbc | [
"BSD-3-Clause"
] | 26 | 2021-03-29T13:39:28.000Z | 2022-03-21T10:57:58.000Z | core/secretfinder/utils.py | MakoSec/pacu | f06f110e6c181f34b89b803e7c2024563acc9fbc | [
"BSD-3-Clause"
] | 1 | 2021-06-02T02:39:40.000Z | 2021-06-02T02:39:40.000Z | core/secretfinder/utils.py | MakoSec/pacu | f06f110e6c181f34b89b803e7c2024563acc9fbc | [
"BSD-3-Clause"
] | 8 | 2021-02-23T12:17:04.000Z | 2022-02-25T13:28:14.000Z | import math
import json
import re
import os
| 21.214286 | 58 | 0.570707 |
02e997ec752171db83c0a7598b23b28d81788b83 | 2,342 | py | Python | validation/step_03_-_predict_state/step_03_-_plot_results.py | martin0004/drone_perception_system | ac76a002179bd1a7219f3c76747bd50aba0a0aea | [
"MIT"
] | 1 | 2021-08-25T08:16:27.000Z | 2021-08-25T08:16:27.000Z | validation/step_03_-_predict_state/step_03_-_plot_results.py | martin0004/drone_perception_system | ac76a002179bd1a7219f3c76747bd50aba0a0aea | [
"MIT"
] | null | null | null | validation/step_03_-_predict_state/step_03_-_plot_results.py | martin0004/drone_perception_system | ac76a002179bd1a7219f3c76747bd50aba0a0aea | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from typing import Tuple
def clean_df_headers(df: pd.DataFrame) -> pd.DataFrame:
"""Remove leading and trailing spaces in DataFrame headers."""
headers = pd.Series(df.columns)
new_headers = [header.strip() for header in headers]
new_headers = pd.Series(new_headers)
df.columns = new_headers
return df
def configure_ax(ax: plt.axes,
df: pd.DataFrame = None,
xlabel: str = None,
ylabel: Tuple[int,int] = None,
ylim: str = None,
title: str = None,
legend: bool = False
) -> plt.axes:
"""Configure Matplotlib axe."""
if df is not None:
x = df.index
for h in df.columns:
y = df[h]
ax.plot(x, y,label=h)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if ylim is not None:
ax.set_ylim(ylim)
if title is not None:
ax.set_title(title)
if legend is not None:
ax.legend()
return ax
if __name__ == "__main__":
# Load sensor data
df_data = pd.read_csv("step_03_-_scenario_08_-_after_tuning.txt")
# Remove leading and trailing spaces in df headers
df_data = clean_df_headers(df_data)
# Set "time" column as DataFrame index
df_data = df_data.set_index("time")
# Plot results
fig = plt.figure()
fig.suptitle("True & Predicted States \n (Global Frame)")
# X-Position and X-Speed
ax = plt.subplot(3,1,1)
df = df_data[["quad.pos.x", "quad.est.x", "quad.vel.x", "quad.est.vx"]]
ax = configure_ax(ax, df = df, ylabel = "X-Positions [m] \n X-Velocities [m/s]", title = "After Tuning", legend = True)
# Y-Position and Y-Speed
ax = plt.subplot(3,1,2)
df = df_data[["quad.pos.y", "quad.est.y", "quad.vel.y", "quad.est.vy"]]
ax = configure_ax(ax, df = df, ylabel = "Y-Positions [m] \n Y-Velocities [m/s]", legend = True)
# Z-Position and Z-Speed
ax = plt.subplot(3,1,3)
df = df_data[["quad.pos.z", "quad.est.z", "quad.vel.z", "quad.est.vz"]]
ax = configure_ax(ax, df = df, xlabel = "Time [s]", ylabel = "Z-Positions [m] \n Z-Velocities [m/s]", legend = True)
plt.show()
| 27.232558 | 123 | 0.585824 |
02eb83c13dc0114b6ab1c905f8a724d75ccb3d34 | 8,036 | py | Python | lvsr/dependency/datasets.py | mzapotoczny/dependency-parser | e37f94e23cb61d6658774f5f9843219df331eb74 | [
"MIT"
] | 3 | 2017-06-07T06:41:18.000Z | 2019-10-26T13:08:23.000Z | lvsr/dependency/datasets.py | mzapotoczny/dependency-parser | e37f94e23cb61d6658774f5f9843219df331eb74 | [
"MIT"
] | null | null | null | lvsr/dependency/datasets.py | mzapotoczny/dependency-parser | e37f94e23cb61d6658774f5f9843219df331eb74 | [
"MIT"
] | 1 | 2020-11-26T17:40:18.000Z | 2020-11-26T17:40:18.000Z | '''
Created on Mar 20, 2016
'''
import numpy
import numbers
from fuel.datasets.hdf5 import H5PYDataset
from fuel.utils import Subset
| 40.585859 | 87 | 0.56558 |
02ebdfddbb50d875cc9962bf326ad8e9c362cfea | 1,444 | py | Python | setup.py | LandRegistry/govuk-frontend-wtf | 3ac1501dd220ad8f4cff0137f2d87e973c9e1243 | [
"MIT"
] | 10 | 2021-02-02T11:38:42.000Z | 2022-01-21T15:10:23.000Z | setup.py | LandRegistry/govuk-frontend-wtf | 3ac1501dd220ad8f4cff0137f2d87e973c9e1243 | [
"MIT"
] | 23 | 2021-04-26T09:19:22.000Z | 2022-03-31T15:13:31.000Z | setup.py | LandRegistry/govuk-frontend-wtf | 3ac1501dd220ad8f4cff0137f2d87e973c9e1243 | [
"MIT"
] | 6 | 2021-02-04T11:09:51.000Z | 2021-06-01T08:39:02.000Z | import glob
import os
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
templates = []
directories = glob.glob("govuk_frontend_wtf/templates/*.html")
for directory in directories:
templates.append(os.path.relpath(os.path.dirname(directory), "govuk_frontend_wtf") + "/*.html")
setuptools.setup(
name="govuk-frontend-wtf",
version="1.0.0",
author="Matt Shaw",
author_email="matthew.shaw@landregistry.gov.uk",
description="GOV.UK Frontend WTForms Widgets",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/LandRegistry/govuk-frontend-wtf",
packages=setuptools.find_packages(exclude=["tests"]),
package_data={"govuk_frontend_wtf": templates},
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Environment :: Web Environment",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Topic :: Software Development :: Code Generators",
"Topic :: Software Development :: User Interfaces",
"Topic :: Text Processing :: Markup :: HTML",
],
python_requires=">=3.6",
install_requires=[
"deepmerge",
"flask",
"flask-wtf",
"govuk-frontend-jinja<2.0.0",
"jinja2",
"wtforms",
],
)
| 31.391304 | 99 | 0.644044 |
02ebe98586fb9a06d031ee215ed1a172f2753298 | 2,930 | py | Python | project/experiments/exp_003_best_Walker2D/src/4.plot_1.py | liusida/thesis-bodies | dceb8a36efd2cefc611f6749a52b56b9d3572f7a | [
"MIT"
] | null | null | null | project/experiments/exp_003_best_Walker2D/src/4.plot_1.py | liusida/thesis-bodies | dceb8a36efd2cefc611f6749a52b56b9d3572f7a | [
"MIT"
] | null | null | null | project/experiments/exp_003_best_Walker2D/src/4.plot_1.py | liusida/thesis-bodies | dceb8a36efd2cefc611f6749a52b56b9d3572f7a | [
"MIT"
] | null | null | null | import os
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from common.tflogs2pandas import tflog2pandas, many_logs2pandas
from common.gym_interface import template
bodies = [300]
all_seeds = list(range(20))
all_stackframe = [0,4]
cache_filename = "output_data/tmp/plot0"
try:
df = pd.read_pickle(cache_filename)
except:
# if True:
dfs = []
for body in bodies:
for seed in all_seeds:
for stackframe in all_stackframe:
path = f"output_data/tensorboard/model-{body}"
if stackframe>0:
path += f"-stack{stackframe}"
path += f"-sd{seed}/SAC_1"
print(f"Loading {path}")
if not os.path.exists(path):
continue
df = tflog2pandas(path)
df["body"] = body
df["seed"] = seed
df["stackframe"] = stackframe
df = df[df["metric"] == f"eval/{body}_mean_reward"]
print(df.shape)
print(df.head())
dfs.append(df)
df = pd.concat(dfs)
df.to_pickle(cache_filename)
print(df.shape)
# df = df[::100]
print(df[df["seed"]==0].head())
print(df[df["seed"]==1].head())
print(df[df["seed"]==2].head())
print(df[df["seed"]==3].head())
df1 = pd.DataFrame(columns=df.columns)
print(df1)
for body in bodies:
for seed in all_seeds:
for stackframe in all_stackframe:
df2 = df[(df["body"]==body) & (df["seed"]==seed) & (df["stackframe"]==stackframe)]
print(df2.shape)
x = df2.iloc[df2["value"].argsort().iloc[-1]]
df1 = df1.append(x)
# for i in range(30):
if False:
step_number = 60000
x = df2.iloc[(df2["step"] - step_number).abs().argsort()[0]]
if abs(x["step"]-step_number)>1500:
print("no")
else:
# print(x)
x = x.copy()
# x["step"] = step_number
df1 = df1.append(x)
df1 = df1[df1["step"]>550000]
print(df1)
print("control")
df2 = df1[df1["stackframe"]==0]
print(f"{df2['value'].mean():.03f} +- {2*df2['value'].std():.03f}")
print("treatment: stackframe")
df2 = df1[df1["stackframe"]==4]
print(f"{df2['value'].mean():.03f} +- {2*df2['value'].std():.03f}")
print(df1.shape, df.shape)
df = df1
fig, axes = plt.subplots(nrows=1, ncols=1, sharey=True, figsize=[10,10])
sns.barplot(ax=axes, data=df1, x="stackframe", y="value")
# axes = [axes]
# axes = axes.flatten()
# for idx, body in enumerate(bodies):
# sns.lineplot(
# ax=axes[idx],
# data=df[df["body"]==body],
# x="step", y="value", hue="stackframe",
# markers=True, dashes=False
# ).set_title(template(body))
plt.legend()
plt.tight_layout()
plt.savefig("output_data/plots/0.png")
# plt.show() | 31.505376 | 94 | 0.543686 |
02ec3adf599332a9c2e8596007821b919933d4a9 | 167 | py | Python | wsgi.py | emilan21/macvert | ac219507a6b20372861667f4ade8084c9902a231 | [
"MIT"
] | null | null | null | wsgi.py | emilan21/macvert | ac219507a6b20372861667f4ade8084c9902a231 | [
"MIT"
] | null | null | null | wsgi.py | emilan21/macvert | ac219507a6b20372861667f4ade8084c9902a231 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# mac_convertor.py - Converts mac address from various formats to other formats
from macvert.web import app
if __name__ == '__main__':
app.run()
| 20.875 | 79 | 0.742515 |
02ed59dd65e3f0007ed59a3660fc0e47a1a878ad | 461 | py | Python | config/dotenv.py | CharuchithRanjit/open-pos | ac749a0f2a6c59077d2c13f13e776963e130501f | [
"MIT"
] | null | null | null | config/dotenv.py | CharuchithRanjit/open-pos | ac749a0f2a6c59077d2c13f13e776963e130501f | [
"MIT"
] | null | null | null | config/dotenv.py | CharuchithRanjit/open-pos | ac749a0f2a6c59077d2c13f13e776963e130501f | [
"MIT"
] | null | null | null | """
Loads dotenv variables
Classes:
None
Functions:
None
Misc variables:
DATABASE_KEY (str) -- The key for the database
DATABASE_PASSWORD (str) -- The password for the database
DATABASE_URL (str) -- The url for the database
"""
import os
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
DATABASE_KEY = os.environ.get("DATABASE_KEY")
DATABASE_PASSWORD = os.environ.get("DATABASE_PASSWORD")
DATABASE_URL = os.environ.get("SUPABASE_URL") | 20.954545 | 56 | 0.776573 |
02ef52ac7a4592df5ce1f94d82e027c617d780cc | 1,094 | py | Python | tests/zero_model_test.py | shatadru99/archai | 8501080f8ecc73327979c02387e02011efb4c335 | [
"MIT"
] | 1 | 2020-01-29T18:45:42.000Z | 2020-01-29T18:45:42.000Z | tests/zero_model_test.py | shatadru99/archai | 8501080f8ecc73327979c02387e02011efb4c335 | [
"MIT"
] | null | null | null | tests/zero_model_test.py | shatadru99/archai | 8501080f8ecc73327979c02387e02011efb4c335 | [
"MIT"
] | 1 | 2020-01-31T15:51:53.000Z | 2020-01-31T15:51:53.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
from archai.nas.model import Model
from archai.nas.macro_builder import MacroBuilder
from archai.common.common import common_init
| 36.466667 | 75 | 0.698355 |
02f1e0ca932dc0360686a58ec3b261b9a83d5c58 | 13,461 | py | Python | reports/dataset.py | TexasDigitalLibrary/dataverse-reports | 90f849a1b6c0d772d19de336f9f48cd290256392 | [
"MIT"
] | 5 | 2018-10-07T14:37:40.000Z | 2021-09-14T08:57:19.000Z | reports/dataset.py | TexasDigitalLibrary/dataverse-reports | 90f849a1b6c0d772d19de336f9f48cd290256392 | [
"MIT"
] | 11 | 2019-08-30T15:29:37.000Z | 2021-12-20T19:44:37.000Z | reports/dataset.py | TexasDigitalLibrary/dataverse-reports | 90f849a1b6c0d772d19de336f9f48cd290256392 | [
"MIT"
] | 4 | 2018-01-30T18:20:54.000Z | 2021-09-30T09:04:44.000Z | import logging
import datetime | 51.18251 | 174 | 0.553079 |
02f1e521d0c60cd1bdde651eb786414631bc4c55 | 1,377 | py | Python | classifier.py | hemu243/focus-web-crawler | 8e882315d947f04b207ec76a64fa952f18105d73 | [
"MIT"
] | 2 | 2020-02-03T02:31:09.000Z | 2021-02-03T11:54:44.000Z | classifier.py | hemu243/focus-web-crawler | 8e882315d947f04b207ec76a64fa952f18105d73 | [
"MIT"
] | null | null | null | classifier.py | hemu243/focus-web-crawler | 8e882315d947f04b207ec76a64fa952f18105d73 | [
"MIT"
] | null | null | null | #
from abc import ABCMeta
import metapy
| 30.6 | 97 | 0.760349 |
02f2dc9948709df77cd05687fd7477b4be25fe0c | 609 | py | Python | backend/tests/access/test_access_event_publish.py | fjacob21/mididecweb | b65f28eb6fdeafa265796b6190a4264a5eac54ce | [
"MIT"
] | null | null | null | backend/tests/access/test_access_event_publish.py | fjacob21/mididecweb | b65f28eb6fdeafa265796b6190a4264a5eac54ce | [
"MIT"
] | 88 | 2016-11-12T14:54:38.000Z | 2018-08-02T00:25:07.000Z | backend/tests/access/test_access_event_publish.py | mididecouverte/mididecweb | b65f28eb6fdeafa265796b6190a4264a5eac54ce | [
"MIT"
] | null | null | null | from src.access import EventPublishAccess
from generate_access_data import generate_access_data
| 38.0625 | 66 | 0.760263 |
02f3815c21333fd777c5f7c2b3c081090f107885 | 4,296 | py | Python | aiorabbitmq_admin/base.py | miili/aiorabbitmq-admin | 38df67a77cd029429af9add12ead3152f58ed748 | [
"MIT"
] | null | null | null | aiorabbitmq_admin/base.py | miili/aiorabbitmq-admin | 38df67a77cd029429af9add12ead3152f58ed748 | [
"MIT"
] | null | null | null | aiorabbitmq_admin/base.py | miili/aiorabbitmq-admin | 38df67a77cd029429af9add12ead3152f58ed748 | [
"MIT"
] | null | null | null | import json
import aiohttp
from copy import deepcopy
| 30.253521 | 112 | 0.570531 |
02f390bbfb313d944ca9d6c202d4c1f28b3a192e | 115,464 | py | Python | from_3b1b/old/highD.py | tigerking/manim2 | 93e8957e433b8e59acb5a5213a4074ee0125b823 | [
"MIT"
] | null | null | null | from_3b1b/old/highD.py | tigerking/manim2 | 93e8957e433b8e59acb5a5213a4074ee0125b823 | [
"MIT"
] | null | null | null | from_3b1b/old/highD.py | tigerking/manim2 | 93e8957e433b8e59acb5a5213a4074ee0125b823 | [
"MIT"
] | null | null | null | from manim2.imports import *
##########
#force_skipping
#revert_to_original_skipping_status
##########
##########
| 31.817029 | 99 | 0.561275 |
02f4fc8fa710340e57d5ba18128bb096623e09a7 | 871 | py | Python | start_palpeo.py | RealDebian/Palpeo | 23be184831a3c529cf933277944e7aacda08cdad | [
"MIT"
] | null | null | null | start_palpeo.py | RealDebian/Palpeo | 23be184831a3c529cf933277944e7aacda08cdad | [
"MIT"
] | null | null | null | start_palpeo.py | RealDebian/Palpeo | 23be184831a3c529cf933277944e7aacda08cdad | [
"MIT"
] | null | null | null | from link_extractor import run_enumeration
from colorama import Fore
from utils.headers import HEADERS
from time import sleep
import requests
import database
import re
import json
from bs4 import BeautifulSoup
import colorama
print(Fore.GREEN + '-----------------------------------' + Fore.RESET, Fore.RED)
print(' - Website Link Extractor')
print(' by @RealDebian | V0.02')
print(Fore.GREEN + '-----------------------------------' + Fore.RESET)
print()
sleep(1)
print('Example:')
print()
target_host = str(input('Target Site: '))
print('Select the Protocol (http|https)')
sleep(.5)
protocol = str(input('http=0 | https=1: '))
while True:
if protocol == '0':
run_enumeration('http://' + target_host)
break
elif protocol == '1':
run_enumeration('https://' + target_host)
break
else:
print('Wrong option!')
| 24.194444 | 80 | 0.624569 |
02f5826c6c30c33aa057a91cc4e4070320f7be69 | 4,994 | py | Python | tests/test_scores_das_01.py | wavestoweather/enstools | d0f612b0187b0ad54dfbbb78aa678564f46eaedf | [
"Apache-2.0"
] | 5 | 2021-12-16T14:08:00.000Z | 2022-03-02T14:08:10.000Z | tests/test_scores_das_01.py | wavestoweather/enstools | d0f612b0187b0ad54dfbbb78aa678564f46eaedf | [
"Apache-2.0"
] | null | null | null | tests/test_scores_das_01.py | wavestoweather/enstools | d0f612b0187b0ad54dfbbb78aa678564f46eaedf | [
"Apache-2.0"
] | null | null | null | import xarray
import numpy
from enstools.scores import DisplacementAmplitudeScore
def test_embed_image():
"""
test of embed_image from match_pyramide_ic
"""
# create test image
test_im = xarray.DataArray(numpy.random.randn(5, 3))
# new array should have shape (8, 4)
result = DisplacementAmplitudeScore.match_pyramid_ic.embed_image(test_im, 4)
numpy.testing.assert_array_equal(numpy.array(result.shape), numpy.array((8, 4)))
# new array should have shape (24, 6)
result = DisplacementAmplitudeScore.match_pyramid_ic.embed_image(test_im, 4, 3, 3)
numpy.testing.assert_array_equal(numpy.array(result.shape), numpy.array((24, 6)))
# input image should be part of result image
numpy.testing.assert_array_equal(test_im, result[:5, :3])
def test_map_backwards():
"""
test of backward mapping from match_pyramide_ic
"""
# create test image
test_im = numpy.zeros((5, 5))
test_im[2, 2] = 1
# create displacement vectors
xdis = numpy.ones((5, 5))
ydis = xdis
# apply mapping
result = DisplacementAmplitudeScore.match_pyramid_ic.map_backward(test_im, xdis, ydis)
expected = numpy.zeros((5, 5))
expected[1, 1] = 1
numpy.testing.assert_array_equal(result, expected)
def test_gauss_kern():
"""
test of gauss_kern from match_pyramide_ic
"""
result = DisplacementAmplitudeScore.match_pyramid_ic.gauss_kern(1, 1)
numpy.testing.assert_equal(result.sum(), 1)
def test_downsize():
"""
test of downsize from match_pyramid
"""
# create test image
test_image = numpy.random.randn(4, 4)
# downsize by factor 2
result = DisplacementAmplitudeScore.match_pyramid_ic.downsize(test_image, 2)
numpy.testing.assert_equal(result[0, 0], test_image[0:2, 0:2].mean())
def test_match_pyramid():
"""
test of match_pyramid from match_pyramid
"""
# create two test images
im1 = numpy.zeros((5, 5))
im1[1:3, 1:3] = 1
im2 = numpy.zeros((5, 5))
im2[2:4, 2:4] = 1
result, xdis, ydis, lse = DisplacementAmplitudeScore.match_pyramid_ic.match_pyramid(im1, im2)
numpy.testing.assert_array_almost_equal(numpy.round(result), im2)
def test_calc_das():
"""
test of pure das calculation calc_das from calc_das.py
"""
# create two test images
obs = numpy.zeros((5, 5))
obs[1:3, 1:3] = 1
fct = numpy.zeros((5, 5))
fct[2:4, 2:4] = 1
# morph fct to obs,obs-space
morph_o, xdis_o, ydis_o, lse_o = DisplacementAmplitudeScore.match_pyramid_ic.match_pyramid(fct, obs)
# morph obs to fct,fct-space
morph_f, xdis_f, ydis_f, lse_f = DisplacementAmplitudeScore.match_pyramid_ic.match_pyramid(obs, fct)
# reproduce expected values
das, dis, amp, rms_obs = DisplacementAmplitudeScore.calc_das.calc_das(obs, fct, xdis_o, ydis_o,
lse_o, xdis_f, ydis_f, lse_f,
dis_max=5, threshold=0.5)
expected = (0.48602544875444409, 0.35238775926722798, 0.1336376894872161, 1.0)
numpy.testing.assert_array_almost_equal((das, dis, amp, rms_obs), expected)
def test_threshold_data():
"""
test of threshold data from calc_das
"""
# create test data
obs = numpy.random.randn(10, 10)
sum_obs = numpy.sum(obs)
# set everything below 1 to zero
filtered = DisplacementAmplitudeScore.calc_das.threshold_data(obs, 1)
for x in range(10):
for y in range(10):
numpy.testing.assert_equal(filtered[x, y] == 0 or filtered[x, y] > 1, True)
# the input array should remain unchanged
numpy.testing.assert_equal(numpy.sum(obs), sum_obs)
def test_das():
"""
test of the actual DAS score
"""
# create test data
obs = numpy.zeros((100, 100))
obs[50:52, 50:52] = 2
fct = numpy.zeros((100, 100))
fct[51:53, 51:53] = 2
# perform calculation
das = DisplacementAmplitudeScore.das(obs, fct)
numpy.testing.assert_array_almost_equal(das["das"], 0.857092469745)
numpy.testing.assert_array_almost_equal(das["dis"], 0.027265825324)
numpy.testing.assert_array_almost_equal(das["amp"], 0.829826644421)
numpy.testing.assert_array_almost_equal(das["rms_obs"], 0.11111111)
# perfect score
das = DisplacementAmplitudeScore.das(obs, obs)
numpy.testing.assert_array_almost_equal(das["das"], 0.0)
numpy.testing.assert_array_almost_equal(das["dis"], 0.0)
numpy.testing.assert_array_almost_equal(das["amp"], 0.0)
# only values below threshold
obs[50:52, 50:52] = 1
fct[51:53, 51:53] = 1
das = DisplacementAmplitudeScore.das(obs, fct, threshold=1)
numpy.testing.assert_array_equal(das["das"], numpy.nan)
numpy.testing.assert_array_equal(das["dis"], numpy.nan)
numpy.testing.assert_array_equal(das["amp"], numpy.nan)
numpy.testing.assert_array_equal(das["rms_obs"], numpy.nan)
| 33.293333 | 119 | 0.665999 |
02f6d5351b6d28ac6a5a83e1bce309686a5a07fc | 833 | py | Python | src/backend/backend/shopit/migrations/0024_auto_20201028_2008.py | tejpratap545/E-Commerce-Application | c1aada5d86f231e5acd6ba4c6c9b88ff4b351f7a | [
"MIT"
] | null | null | null | src/backend/backend/shopit/migrations/0024_auto_20201028_2008.py | tejpratap545/E-Commerce-Application | c1aada5d86f231e5acd6ba4c6c9b88ff4b351f7a | [
"MIT"
] | 7 | 2021-08-13T23:05:47.000Z | 2022-02-27T10:23:46.000Z | src/backend/backend/shopit/migrations/0024_auto_20201028_2008.py | tejpratap545/E-Commerce-Application | c1aada5d86f231e5acd6ba4c6c9b88ff4b351f7a | [
"MIT"
] | null | null | null | # Generated by Django 3.1.2 on 2020-10-28 14:38
from django.db import migrations, models
| 26.03125 | 80 | 0.57503 |
02f729610f64d4759bc9416f6b95eedcf29070ca | 1,804 | py | Python | aoc/event2019/day19/solve.py | rjbatista/AoC | 5c6ca4bcb376c24ec730eb12fd7044f5326ee473 | [
"MIT"
] | null | null | null | aoc/event2019/day19/solve.py | rjbatista/AoC | 5c6ca4bcb376c24ec730eb12fd7044f5326ee473 | [
"MIT"
] | null | null | null | aoc/event2019/day19/solve.py | rjbatista/AoC | 5c6ca4bcb376c24ec730eb12fd7044f5326ee473 | [
"MIT"
] | null | null | null | from event2019.day13.computer_v4 import Computer_v4
########
# PART 1
computer = Computer_v4([])
computer.load_code("event2019/day19/input.txt")
answer = sum([width for x, width in get_area() if x != None])
print("Part 1 =", answer)
assert answer == 203 # check with accepted answer
########
# PART 2
x, y = get_top_right_in_beam()
answer = 10000 * x + y
print("Part 2 =", 10000 * x + y)
assert answer == 8771057 # check with accepted answer
| 21.73494 | 114 | 0.471729 |
02f79e3624d623adc544da46b4a6554d6c1bfa3b | 849 | py | Python | fileo/accounts/forms.py | Tiqur/Fileo | 0c663f3bb28985d2d7b4cb475a95b1592cfb2013 | [
"MIT"
] | null | null | null | fileo/accounts/forms.py | Tiqur/Fileo | 0c663f3bb28985d2d7b4cb475a95b1592cfb2013 | [
"MIT"
] | null | null | null | fileo/accounts/forms.py | Tiqur/Fileo | 0c663f3bb28985d2d7b4cb475a95b1592cfb2013 | [
"MIT"
] | null | null | null | from django import forms
from django.contrib.auth import authenticate
from django.contrib.auth.forms import UserCreationForm
from .models import FileoUser
User = FileoUser()
| 29.275862 | 82 | 0.69258 |
02f7dfdc4c7be780ca3def3290b1d78bbe909246 | 959 | py | Python | setup.py | jnsgruk/lightkube-models | 7fce1ed1d00ee599eaa4fad82868ec6b55c84c8d | [
"MIT"
] | 1 | 2021-10-14T08:49:10.000Z | 2021-10-14T08:49:10.000Z | setup.py | jnsgruk/lightkube-models | 7fce1ed1d00ee599eaa4fad82868ec6b55c84c8d | [
"MIT"
] | 2 | 2021-10-14T18:09:31.000Z | 2021-10-14T18:09:52.000Z | setup.py | jnsgruk/lightkube-models | 7fce1ed1d00ee599eaa4fad82868ec6b55c84c8d | [
"MIT"
] | 1 | 2021-10-13T15:08:58.000Z | 2021-10-13T15:08:58.000Z | from setuptools import setup
from pathlib import Path
from lightkube.models import __version__
setup(
name='lightkube-models',
version=__version__,
description='Models and Resources for lightkube module',
long_description=Path("README.md").read_text(),
long_description_content_type="text/markdown",
author='Giuseppe Tribulato',
author_email='gtsystem@gmail.com',
license='Apache Software License',
url='https://github.com/gtsystem/lightkube-models',
packages=['lightkube.models', 'lightkube.resources'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
]
)
| 33.068966 | 60 | 0.667362 |
02f8318053016bd127b7feb86e89f4c704276dce | 465 | py | Python | kagi/upper/west/_capital/four.py | jedhsu/kagi | 1301f7fc437bb445118b25ca92324dbd58d6ad2d | [
"MIT"
] | null | null | null | kagi/upper/west/_capital/four.py | jedhsu/kagi | 1301f7fc437bb445118b25ca92324dbd58d6ad2d | [
"MIT"
] | null | null | null | kagi/upper/west/_capital/four.py | jedhsu/kagi | 1301f7fc437bb445118b25ca92324dbd58d6ad2d | [
"MIT"
] | null | null | null | """
*Upper-West Capital 4*
The upper-west capital four gi.
"""
from dataclasses import dataclass
from ....._gi import Gi
from ....capital import CapitalGi
from ...._gi import StrismicGi
from ....west import WesternGi
from ...._number import FourGi
from ..._gi import UpperGi
__all__ = ["UpperWestCapital4"]
| 15 | 33 | 0.668817 |
02f87c91bee648002483bc9254e7698d4ec9f8f2 | 5,626 | py | Python | tests/test_dictattr.py | atsuoishimoto/jashin | 6705839461dd9fdfe50cbc6f93fe9ba2da889f0a | [
"MIT"
] | 1 | 2020-06-04T23:44:48.000Z | 2020-06-04T23:44:48.000Z | tests/test_dictattr.py | sojin-project/jashin | 6705839461dd9fdfe50cbc6f93fe9ba2da889f0a | [
"MIT"
] | null | null | null | tests/test_dictattr.py | sojin-project/jashin | 6705839461dd9fdfe50cbc6f93fe9ba2da889f0a | [
"MIT"
] | null | null | null | import enum
from typing import Any, Dict
from jashin.dictattr import *
| 24.25 | 80 | 0.545325 |
02f8b65e136d03ceacb32c0a454b3d2ad573a0cb | 191 | py | Python | acmicpc/5612.py | juseongkr/BOJ | 8f10a2bf9a7d695455493fbe7423347a8b648416 | [
"Apache-2.0"
] | 7 | 2020-02-03T10:00:19.000Z | 2021-11-16T11:03:57.000Z | acmicpc/5612.py | juseongkr/Algorithm-training | 8f10a2bf9a7d695455493fbe7423347a8b648416 | [
"Apache-2.0"
] | 1 | 2021-01-03T06:58:24.000Z | 2021-01-03T06:58:24.000Z | acmicpc/5612.py | juseongkr/Algorithm-training | 8f10a2bf9a7d695455493fbe7423347a8b648416 | [
"Apache-2.0"
] | 1 | 2020-01-22T14:34:03.000Z | 2020-01-22T14:34:03.000Z | n = int(input())
m = int(input())
r = m
for i in range(n):
a, b = map(int, input().split())
m += a
m -= b
if m < 0:
print(0)
exit()
r = max(r, m)
print(r)
| 14.692308 | 36 | 0.418848 |
02f9422687e1cf10a5083c7345c12d1a45915872 | 66,679 | py | Python | tests/function/test_func_partition.py | ddimatos/zhmc-ansible-modules | 6eb29056052f499021a4bab26539872b25050640 | [
"Apache-2.0"
] | null | null | null | tests/function/test_func_partition.py | ddimatos/zhmc-ansible-modules | 6eb29056052f499021a4bab26539872b25050640 | [
"Apache-2.0"
] | null | null | null | tests/function/test_func_partition.py | ddimatos/zhmc-ansible-modules | 6eb29056052f499021a4bab26539872b25050640 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2017-2020 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Function tests for the 'zhmc_partition' Ansible module.
"""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
import mock
import re
from zhmcclient import Client
from zhmcclient_mock import FakedSession
from plugins.modules import zhmc_partition
from .func_utils import mock_ansible_module
# FakedSession() init arguments
FAKED_SESSION_KWARGS = dict(
host='fake-host',
hmc_name='faked-hmc-name',
hmc_version='2.13.1',
api_version='1.8'
)
# Faked Console that is used for all tests
# (with property names as specified in HMC data model)
FAKED_CONSOLE_URI = '/api/console'
FAKED_CONSOLE = {
'object-uri': FAKED_CONSOLE_URI,
'class': 'console',
'name': 'hmc-1',
'description': 'Console HMC1',
'version': '2.13.0',
}
# Faked CPC in DPM mode that is used for all tests
# (with property names as specified in HMC data model)
FAKED_CPC_1_OID = 'fake-cpc-1'
FAKED_CPC_1_URI = '/api/cpcs/' + FAKED_CPC_1_OID
FAKED_CPC_1 = {
'object-id': FAKED_CPC_1_OID,
'object-uri': FAKED_CPC_1_URI,
'class': 'cpc',
'name': 'cpc-name-1',
'description': 'CPC #1 in DPM mode',
'status': 'active',
'dpm-enabled': True,
'is-ensemble-member': False,
'iml-mode': 'dpm',
}
# Faked partition that is used for these tests. Most properties are set to
# their default values. Note, we are prepping a faked partition; we are not
# passing these properties to PartitionManager.create().
FAKED_PARTITION_1_NAME = 'part-name-1'
FAKED_PARTITION_1_OID = 'fake-part-1'
FAKED_PARTITION_1_URI = '/api/partitions/' + FAKED_PARTITION_1_OID
FAKED_PARTITION_1 = {
'object-id': FAKED_PARTITION_1_OID,
'object-uri': FAKED_PARTITION_1_URI,
'parent': FAKED_CPC_1_URI,
'class': 'partition',
'name': FAKED_PARTITION_1_NAME,
'description': 'Partition #1',
'short-name': 'PART1',
'partition-id': '4F',
'ifl-processors': 1,
'initial-memory': 1024,
'maximum-memory': 2048,
'status': 'stopped',
'acceptable-status': ['active', 'stopped'],
'has-unacceptable-status': False,
# The remaining properties get their default values:
'is-locked': False,
'type': 'linux',
'autogenerate-partition-id': True,
'os-name': '',
'os-type': '',
'os-version': '',
'reserve-resources': False,
'degraded-adapters': [],
'processor-mode': 'shared',
'cp-processors': 0,
'ifl-absolute-processor-capping': False,
'cp-absolute-processor-capping': False,
'ifl-absolute-processor-capping-value': 1.0,
'cp-absolute-processor-capping-value': 1.0,
'ifl-processing-weight-capped': False,
'cp-processing-weight-capped': False,
'minimum-ifl-processing-weight': 1,
'minimum-cp-processing-weight': 1,
'initial-ifl-processing-weight': 100,
'initial-cp-processing-weight': 100,
'current-ifl-processing-weight': 42,
'current-cp-processing-weight': 100,
'maximum-ifl-processing-weight': 999,
'maximum-cp-processing-weight': 999,
'processor-management-enabled': False,
'reserved-memory': 1024,
'auto-start': False,
'boot-device': 'none',
'boot-network-device': None,
'boot-ftp-host': None,
'boot-ftp-username': None,
'boot-ftp-password': None,
'boot-ftp-insfile': None,
'boot-removable-media': None,
'boot-removable-media-type': None,
'boot-timeout': 60,
'boot-storage-device': None,
'boot-logical-unit-number': '',
'boot-world-wide-port-name': '',
'boot-configuration-selector': 0,
'boot-record-lba': None,
'boot-os-specific-parameters': None,
'boot-iso-image-name': None,
'boot-iso-ins-file': None,
'access-global-performance-data': False,
'permit-cross-partition-commands': False,
'access-basic-counter-set': False,
'access-problem-state-counter-set': False,
'access-crypto-activity-counter-set': False,
'access-extended-counter-set': False,
'access-coprocessor-group-set': False,
'access-basic-sampling': False,
'access-diagnostic-sampling': False,
'permit-des-key-import-functions': True,
'permit-aes-key-import-functions': True,
'threads-per-processor': 0,
'virtual-function-uris': [],
'nic-uris': [],
'hba-uris': [],
'storage-group-uris': [],
'crypto-configuration': None,
# SSC-only properties; they are not present for type='linux'
# 'ssc-host-name': None,
# 'ssc-boot-selection': None,
# 'ssc-ipv4-gateway': None,
# 'ssc-dns-servers': None,
# 'ssc-master-userid': None,
# 'ssc-master-pw': None,
}
# Faked HBA that is used for these tests (for partition boot from storage).
# Most properties are set to their default values.
FAKED_HBA_1_NAME = 'hba-1'
FAKED_HBA_1_OID = 'fake-hba-1'
FAKED_HBA_1_URI = FAKED_PARTITION_1_URI + '/hbas/' + FAKED_HBA_1_OID
FAKED_HBA_1 = {
'element-id': FAKED_HBA_1_OID,
'element-uri': FAKED_HBA_1_URI,
'parent': FAKED_PARTITION_1_URI,
'class': 'hba',
'name': FAKED_HBA_1_NAME,
'description': 'HBA #1',
'device_number': '012F',
'wwpn': 'abcdef0123456789',
'adapter-port-uri': 'faked-adapter-port-uri',
}
# Faked adapter, port and vswitch used for the OSA NIC.
FAKED_ADAPTER_1_NAME = 'osa adapter #1'
FAKED_ADAPTER_1_OID = 'fake-osa-adapter-1'
FAKED_ADAPTER_1_URI = '/api/adapters/' + FAKED_ADAPTER_1_OID
FAKED_ADAPTER_1_ID = '110'
FAKED_PORT_1_INDEX = 0
FAKED_PORT_1_NAME = 'Port #1'
FAKED_PORT_1_OID = 'fake-port-1'
FAKED_PORT_1_URI = '/api/adapters/' + FAKED_ADAPTER_1_OID + '/ports/' + \
FAKED_PORT_1_OID
FAKED_VSWITCH_1_NAME = 'vswitch-1'
FAKED_VSWITCH_1_OID = 'fake-vswitch-1'
FAKED_VSWITCH_1_URI = '/api/virtual-switches/' + FAKED_VSWITCH_1_OID
FAKED_ADAPTER_1 = {
'object-id': FAKED_ADAPTER_1_OID,
'object-uri': FAKED_ADAPTER_1_URI,
'parent': FAKED_CPC_1_URI,
'class': 'adapter',
'name': FAKED_ADAPTER_1_NAME,
'description': 'OSA adapter #1',
'type': 'osd',
'adapter-family': 'osa',
'port-count': 1,
'network-port-uris': [FAKED_PORT_1_URI],
'adapter-id': FAKED_ADAPTER_1_ID,
}
FAKED_PORT_1 = {
'element-id': FAKED_PORT_1_OID,
'element-uri': FAKED_PORT_1_URI,
'parent': FAKED_ADAPTER_1_URI,
'class': 'network-port',
'name': FAKED_PORT_1_NAME,
'description': 'Port #1 of OSA adapter #1',
'index': FAKED_PORT_1_INDEX,
}
FAKED_VSWITCH_1 = {
'object-id': FAKED_VSWITCH_1_OID,
'object-uri': FAKED_VSWITCH_1_URI,
'parent': FAKED_CPC_1_URI,
'class': 'virtual-switch',
'name': FAKED_VSWITCH_1_NAME,
'description': 'vswitch for OSA adapter #1',
'type': 'osd',
'backing-adapter-uri': FAKED_ADAPTER_1_URI,
'port': FAKED_PORT_1_INDEX,
}
# Faked OSA NIC that is used for these tests (for partition boot from storage).
# Most properties are set to their default values.
FAKED_NIC_1_NAME = 'nic-1'
FAKED_NIC_1_OID = 'fake-nic-1'
FAKED_NIC_1_URI = FAKED_PARTITION_1_URI + '/nics/' + FAKED_NIC_1_OID
FAKED_NIC_1 = {
'element-id': FAKED_NIC_1_OID,
'element-uri': FAKED_NIC_1_URI,
'parent': FAKED_PARTITION_1_URI,
'class': 'nic',
'name': FAKED_NIC_1_NAME,
'description': 'NIC #1',
'device_number': '022F',
'virtual-switch-uri': FAKED_VSWITCH_1_URI,
'type': 'osd',
'ssc-management-nic': False,
'mac-address': 'fa:ce:da:dd:6e:55',
}
# Faked crypto adapters
# (with property names as specified in HMC data model)
FAKED_CRYPTO_ADAPTER_1 = {
'object-id': 'crypto-adapter-oid-1',
# We need object-uri for the assertions
'object-uri': '/api/cpcs/cpc-oid-1/adapters/crypto-adapter-oid-1',
'parent': '/api/cpcs/cpc-oid-1',
'class': 'adapter',
'name': 'crypto-adapter-name-1',
'crypto-number': 1,
'crypto-type': 'ep11-coprocessor',
'udx-loaded': True,
'description': 'Crypto adapter #1',
'status': 'active',
'type': 'crypto',
'adapter-id': '02A',
'adapter-family': 'crypto',
'detected-card-type': 'crypto-express-5s',
'card-location': 'vvvv-wwww',
'state': 'online',
'physical-channel-status': 'operating',
}
FAKED_CRYPTO_ADAPTER_2 = {
'object-id': 'crypto-adapter-oid-2',
# We need object-uri for the assertions
'object-uri': '/api/cpcs/cpc-oid-1/adapters/crypto-adapter-oid-2',
'parent': '/api/cpcs/cpc-oid-1',
'class': 'adapter',
'name': 'crypto-adapter-name-2',
'crypto-number': 2,
'crypto-type': 'cca-coprocessor',
'udx-loaded': True,
'description': 'Crypto adapter #2',
'status': 'active',
'type': 'crypto',
'adapter-id': '02B',
'adapter-family': 'crypto',
'detected-card-type': 'crypto-express-5s',
'card-location': 'vvvv-wwww',
'state': 'online',
'physical-channel-status': 'operating',
}
# Translation table from 'state' module input parameter to corresponding
# desired partition 'status' property value. 'None' means the partition
# does not exist.
PARTITION_STATUS_FROM_STATE = {
'absent': None,
'stopped': 'stopped',
'active': 'active',
}
def get_failure_msg(mod_obj):
"""
Return the module failure message, as a string (i.e. the 'msg' argument
of the call to fail_json()).
If the module succeeded, return None.
"""
if not mod_obj.fail_json.called:
return None
call_args = mod_obj.fail_json.call_args
# The following makes sure we get the arguments regardless of whether they
# were specified as positional or keyword arguments:
return func(*call_args[0], **call_args[1])
def get_module_output(mod_obj):
"""
Return the module output as a tuple (changed, partition_properties) (i.e.
the arguments of the call to exit_json()).
If the module failed, return None.
"""
if not mod_obj.exit_json.called:
return None
call_args = mod_obj.exit_json.call_args
# The following makes sure we get the arguments regardless of whether they
# were specified as positional or keyword arguments:
return func(*call_args[0], **call_args[1])
CRYPTO_CONFIG_SUCCESS_TESTCASES = [
(
"No_change_to_empty_config",
# adapters:
[],
# initial_config:
None,
# input_props:
None,
# exp_config:
None,
# exp_changed:
False
),
(
"Add adapter to empty config",
# adapters:
[
FAKED_CRYPTO_ADAPTER_1,
FAKED_CRYPTO_ADAPTER_2,
],
# initial_config:
None,
# input_props:
dict(
crypto_configuration=dict(
crypto_adapter_names=[
FAKED_CRYPTO_ADAPTER_1['name'],
],
crypto_domain_configurations=[
],
),
),
# exp_config:
{
'crypto-adapter-uris': [
FAKED_CRYPTO_ADAPTER_1['object-uri'],
],
'crypto-domain-configurations': [
],
},
# exp_changed:
True
),
(
"Add domain to empty config",
# adapters:
[
FAKED_CRYPTO_ADAPTER_1,
FAKED_CRYPTO_ADAPTER_2,
],
# initial_config:
None,
# input_props:
dict(
crypto_configuration=dict(
crypto_adapter_names=[
],
crypto_domain_configurations=[
dict(domain_index=3, access_mode='control-usage'),
],
),
),
# exp_config:
{
'crypto-adapter-uris': [
],
'crypto-domain-configurations': [
{'domain-index': 3, 'access-mode': 'control-usage'},
],
},
# exp_changed:
True
),
(
"Add adapter+domain to empty config",
# adapters:
[
FAKED_CRYPTO_ADAPTER_1,
FAKED_CRYPTO_ADAPTER_2,
],
# initial_config:
None,
# input_props:
dict(
crypto_configuration=dict(
crypto_adapter_names=[
FAKED_CRYPTO_ADAPTER_1['name'],
],
crypto_domain_configurations=[
dict(domain_index=3, access_mode='control-usage'),
],
),
),
# exp_config:
{
'crypto-adapter-uris': [
FAKED_CRYPTO_ADAPTER_1['object-uri'],
],
'crypto-domain-configurations': [
{'domain-index': 3, 'access-mode': 'control-usage'},
],
},
# exp_changed:
True
),
(
"Change access mode of domain",
# adapters:
[
FAKED_CRYPTO_ADAPTER_1,
FAKED_CRYPTO_ADAPTER_2,
],
# initial_config:
{
'crypto-adapter-uris': [
FAKED_CRYPTO_ADAPTER_1['object-uri'],
],
'crypto-domain-configurations': [
{'domain-index': 3, 'access-mode': 'control'},
],
},
# input_props:
dict(
crypto_configuration=dict(
crypto_adapter_names=[
FAKED_CRYPTO_ADAPTER_1['name'],
],
crypto_domain_configurations=[
dict(domain_index=3, access_mode='control-usage'),
],
),
),
# exp_config:
{
'crypto-adapter-uris': [
FAKED_CRYPTO_ADAPTER_1['object-uri'],
],
'crypto-domain-configurations': [
{'domain-index': 3, 'access-mode': 'control-usage'},
],
},
# exp_changed:
True
),
(
"No change to adapter+domain",
# adapters:
[
FAKED_CRYPTO_ADAPTER_1,
FAKED_CRYPTO_ADAPTER_2,
],
# initial_config:
{
'crypto-adapter-uris': [
FAKED_CRYPTO_ADAPTER_1['object-uri'],
],
'crypto-domain-configurations': [
{'domain-index': 2, 'access-mode': 'control-usage'},
],
},
# input_props:
dict(
crypto_configuration=dict(
crypto_adapter_names=[
FAKED_CRYPTO_ADAPTER_1['name'],
],
crypto_domain_configurations=[
dict(domain_index=2, access_mode='control-usage'),
],
),
),
# exp_config:
{
'crypto-adapter-uris': [
FAKED_CRYPTO_ADAPTER_1['object-uri'],
],
'crypto-domain-configurations': [
{'domain-index': 2, 'access-mode': 'control-usage'},
],
},
# exp_changed:
False
),
(
"Add adapter to adapter+domain",
# adapters:
[
FAKED_CRYPTO_ADAPTER_1,
FAKED_CRYPTO_ADAPTER_2,
],
# initial_config:
{
'crypto-adapter-uris': [
FAKED_CRYPTO_ADAPTER_1['object-uri'],
],
'crypto-domain-configurations': [
{'domain-index': 2, 'access-mode': 'control-usage'},
],
},
# input_props:
dict(
crypto_configuration=dict(
crypto_adapter_names=[
FAKED_CRYPTO_ADAPTER_1['name'],
FAKED_CRYPTO_ADAPTER_2['name'],
],
crypto_domain_configurations=[
dict(domain_index=2, access_mode='control-usage'),
],
),
),
# exp_config:
{
'crypto-adapter-uris': [
FAKED_CRYPTO_ADAPTER_1['object-uri'],
FAKED_CRYPTO_ADAPTER_2['object-uri'],
],
'crypto-domain-configurations': [
{'domain-index': 2, 'access-mode': 'control-usage'},
],
},
# exp_changed:
True
),
(
"Add domain to adapter+domain",
# adapters:
[
FAKED_CRYPTO_ADAPTER_1,
FAKED_CRYPTO_ADAPTER_2,
],
# initial_config:
{
'crypto-adapter-uris': [
FAKED_CRYPTO_ADAPTER_1['object-uri'],
],
'crypto-domain-configurations': [
{'domain-index': 2, 'access-mode': 'control-usage'},
],
},
# input_props:
dict(
crypto_configuration=dict(
crypto_adapter_names=[
FAKED_CRYPTO_ADAPTER_1['name'],
],
crypto_domain_configurations=[
dict(domain_index=2, access_mode='control-usage'),
dict(domain_index=3, access_mode='control'),
],
),
),
# exp_config:
{
'crypto-adapter-uris': [
FAKED_CRYPTO_ADAPTER_1['object-uri'],
],
'crypto-domain-configurations': [
{'domain-index': 2, 'access-mode': 'control-usage'},
{'domain-index': 3, 'access-mode': 'control'},
],
},
# exp_changed:
True
),
(
"Add adapter+domain to adapter+domain",
# adapters:
[
FAKED_CRYPTO_ADAPTER_1,
FAKED_CRYPTO_ADAPTER_2,
],
# initial_config:
{
'crypto-adapter-uris': [
FAKED_CRYPTO_ADAPTER_1['object-uri'],
],
'crypto-domain-configurations': [
{'domain-index': 2, 'access-mode': 'control-usage'},
],
},
# input_props:
dict(
crypto_configuration=dict(
crypto_adapter_names=[
FAKED_CRYPTO_ADAPTER_1['name'],
FAKED_CRYPTO_ADAPTER_2['name'],
],
crypto_domain_configurations=[
dict(domain_index=2, access_mode='control-usage'),
dict(domain_index=3, access_mode='control'),
],
),
),
# exp_config:
{
'crypto-adapter-uris': [
FAKED_CRYPTO_ADAPTER_1['object-uri'],
FAKED_CRYPTO_ADAPTER_2['object-uri'],
],
'crypto-domain-configurations': [
{'domain-index': 2, 'access-mode': 'control-usage'},
{'domain-index': 3, 'access-mode': 'control'},
],
},
# exp_changed:
True
),
(
"Remove adapter+domain from adapter+domain",
# adapters:
[
FAKED_CRYPTO_ADAPTER_1,
FAKED_CRYPTO_ADAPTER_2,
],
# initial_config:
{
'crypto-adapter-uris': [
FAKED_CRYPTO_ADAPTER_1['object-uri'],
],
'crypto-domain-configurations': [
{'domain-index': 2, 'access-mode': 'control-usage'},
],
},
# input_props:
dict(
crypto_configuration=dict(
crypto_adapter_names=[
],
crypto_domain_configurations=[
],
),
),
# exp_config:
{
'crypto-adapter-uris': [
],
'crypto-domain-configurations': [
],
},
# exp_changed:
True
),
(
"Remove adapter+domain from 2 adapters + 2 domains",
# adapters:
[
FAKED_CRYPTO_ADAPTER_1,
FAKED_CRYPTO_ADAPTER_2,
],
# initial_config:
{
'crypto-adapter-uris': [
FAKED_CRYPTO_ADAPTER_1['object-uri'],
FAKED_CRYPTO_ADAPTER_2['object-uri'],
],
'crypto-domain-configurations': [
{'domain-index': 2, 'access-mode': 'control-usage'},
{'domain-index': 3, 'access-mode': 'control'},
],
},
# input_props:
dict(
crypto_configuration=dict(
crypto_adapter_names=[
FAKED_CRYPTO_ADAPTER_1['name'],
],
crypto_domain_configurations=[
dict(domain_index=2, access_mode='control-usage'),
],
),
),
# exp_config:
{
'crypto-adapter-uris': [
FAKED_CRYPTO_ADAPTER_1['object-uri'],
],
'crypto-domain-configurations': [
{'domain-index': 2, 'access-mode': 'control-usage'},
],
},
# exp_changed:
True
),
(
"Check domain index numbers provided as strings",
# adapters:
[
FAKED_CRYPTO_ADAPTER_1,
FAKED_CRYPTO_ADAPTER_2,
],
# initial_config:
{
'crypto-adapter-uris': [
FAKED_CRYPTO_ADAPTER_1['object-uri'],
FAKED_CRYPTO_ADAPTER_2['object-uri'],
],
'crypto-domain-configurations': [
{'domain-index': 2, 'access-mode': 'control-usage'},
{'domain-index': 3, 'access-mode': 'control'},
],
},
# input_props:
dict(
crypto_configuration=dict(
crypto_adapter_names=[
FAKED_CRYPTO_ADAPTER_1['name'],
],
crypto_domain_configurations=[
# Here we provide the domain index as a string:
dict(domain_index="2", access_mode='control-usage'),
],
),
),
# exp_config:
{
'crypto-adapter-uris': [
FAKED_CRYPTO_ADAPTER_1['object-uri'],
],
'crypto-domain-configurations': [
{'domain-index': 2, 'access-mode': 'control-usage'},
],
},
# exp_changed:
True
),
]
| 36.238587 | 79 | 0.558182 |
02f942ae72f558610fdbd2e0d719bb8a1bc37d6c | 1,849 | py | Python | users/models.py | uoe-compsci-grp30/campusgame | d2d7ba99210f352a7b45a1db06cea0a09e3b8c31 | [
"MIT"
] | null | null | null | users/models.py | uoe-compsci-grp30/campusgame | d2d7ba99210f352a7b45a1db06cea0a09e3b8c31 | [
"MIT"
] | null | null | null | users/models.py | uoe-compsci-grp30/campusgame | d2d7ba99210f352a7b45a1db06cea0a09e3b8c31 | [
"MIT"
] | null | null | null | import uuid
from django.contrib.auth.models import AbstractUser
from django.db import models
"""
The user model that represents a user participating in the game.
Implemented using the built-in Django user model: AbstractUser.
"""
| 52.828571 | 449 | 0.760411 |
02fa655762a8c5f87ff87bed426342d23902e763 | 4,743 | py | Python | slidingwindow_generator/slidingwindow_generator.py | flashspys/SlidingWindowGenerator | bdcefd9506732ea9c9734bd4e8e81a884b78f08c | [
"Apache-2.0"
] | 3 | 2021-03-27T12:50:36.000Z | 2022-01-16T15:30:22.000Z | slidingwindow_generator/slidingwindow_generator.py | flashspys/SlidingWindowGenerator | bdcefd9506732ea9c9734bd4e8e81a884b78f08c | [
"Apache-2.0"
] | 3 | 2020-10-07T05:28:46.000Z | 2020-11-05T08:32:01.000Z | slidingwindow_generator/slidingwindow_generator.py | flashspys/SlidingWindowGenerator | bdcefd9506732ea9c9734bd4e8e81a884b78f08c | [
"Apache-2.0"
] | 1 | 2020-11-08T23:39:20.000Z | 2020-11-08T23:39:20.000Z | import numpy as np
import tensorflow as tf
| 37.346457 | 79 | 0.59709 |
02fb4db8ebfb72289be41e8479130a4d82ec14a9 | 1,737 | py | Python | carla/util.py | dixantmittal/intelligent-autonomous-vehicle-controller | 7ccebabe8ecb972780a492c36f48ef8f1671be71 | [
"MIT"
] | 1 | 2019-12-18T06:23:19.000Z | 2019-12-18T06:23:19.000Z | carla/util.py | dixantmittal/intelligent-autonomous-vehicle-controller | 7ccebabe8ecb972780a492c36f48ef8f1671be71 | [
"MIT"
] | null | null | null | carla/util.py | dixantmittal/intelligent-autonomous-vehicle-controller | 7ccebabe8ecb972780a492c36f48ef8f1671be71 | [
"MIT"
] | null | null | null | # Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB), and the INTEL Visual Computing Lab.
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
import datetime
import sys
from contextlib import contextmanager
def to_hex_str(header):
return ':'.join('{:02x}'.format(ord(c)) for c in header)
if sys.version_info >= (3, 3):
import shutil
else:
# Workaround for older Python versions.
print_over_same_line._last_line_length = 0
| 28.016129 | 80 | 0.663788 |
02fc1e3721895fe496443e7ceaa950d900683542 | 3,002 | py | Python | examples/session2-fi/start2.py | futurice/PythonInBrowser | 066ab28ffad265efc7968b87f33dab2c68216d9d | [
"MIT"
] | 4 | 2015-12-08T19:34:49.000Z | 2019-09-08T22:11:05.000Z | examples/session2-fi/start2.py | futurice/PythonInBrowser | 066ab28ffad265efc7968b87f33dab2c68216d9d | [
"MIT"
] | 18 | 2016-10-14T13:48:39.000Z | 2019-10-11T12:14:21.000Z | examples/session2-fi/start2.py | futurice/PythonInBrowser | 066ab28ffad265efc7968b87f33dab2c68216d9d | [
"MIT"
] | 4 | 2015-11-18T15:18:43.000Z | 2018-03-02T09:36:23.000Z | # Kydn lpi mit opimme viime viikolla (ja mys jotakin uutta)
# Jos jokin asia mietitytt, kysy vain rohkeasti apua!
##### INFO #####
# Viime viikon trkeimmt asiat olivat:
# 1. print-komento
# 2. muuttujan kytt
# 3. kilpikonnan kyttminen piirtmiseen
# Ohelmointi vaatii usein tiedon etsimist muista lhteist
# ja uuden tiedon soveltamista omaan ohjelmaasi.
# Kytnnss tietoa ohjelmoinnista lyt hyvin Internetist.
# Kyt viime viikon tehtvi lhteen tehdesssi seuraavia tehtvi
##### TEHTVT #####
##### TEHTV 1 #####
# 1. kirjoita koodinptk joka printtaa kaksi rivi
# Ensimmisell rivill tulee olla teksti:
# "Minun lempivrini on 'lempivrisi'"
# Toisella rivill pit olla yhtl joka laskee
# kuukauden jljell olevat pivt
# VINKKI: tarkista tietokoneelta kuinka monesko piv tnn on ja kuinka monta piv tss kuussa on.
# Printtauksen tulee sislt vain yksi numero: yhtln ratkaisu
# <------ kirjoita koodisi thn (ja klikkaa 'Run' printataksesi)------->
##### TEHTV 2 #####
# Yhten pivn lempivrisi saattaa olla vihre ja toisena oranssi.
# Luo muuttuja nimelt lempivari ja anna sille arvoksi lempivrisi
# <------ kirjoita muuttuja thn ------->
# Kirjoita sitten koodi joka printtaa tekstin "Lempivrini in 'lempivrisi'"
# Kyt tll kertaa muuttujaa lempivari ilmaisemaan lempivrisi
# <------ kirjoita koodisi thn (ja klikkaa 'Run' printataksesi)------->
# Tarkistuksena muuta lempiVari muuttujan arvoa ja klikkaa 'Run'
# tarkista ett lempivri on muuttunut printtauksessa
##### TEHTV 3 #####
# Pystyksemme piirtmn viereiselle piirtoalueelle, meidn tytyy kytt kilpikonnaa
# Tt varten meidn tulee tuoda (importtaa) kilpikonna ja asettaa se muuttujaan.
# <------ Tuo (import) kilpikonna tss ------->
# nin: import turtle
# <------ aseta kilpikonna muuttujaan 'jane', muistatko? ------>
# Piirr seuraava kuvia
#
# eteenpin 50 pikseli, knn 135 astetta oikealle
# eteenpin 100 pikseli, knn 135 astetta oikealle, eteenpin 100 pikseli,
# knn 135 astetta oikealla ja siirr 50 pikseli eteenpin.
#
# Pystytk arvaamaan mink kuvion kilpikonna piirt?
# <------ kirjoita koodisi thn ------->
# On mahdollista piirt mys muilla vreill. Musta on vain oletusvri.
# Kilpikonnan vrin voi muuttaa lismll seuraavan rivin ennen piirtmist:
# jane.color("pink")
# Voit mys kytt muuttujaa mrittksesi piirroksen vrin.
# Muuta muuttujan lempivari arvo englanniksi esim. "green" (vihre), "blue" (sininen) tai "yellow" (keltainen)
# ja korvaa vri vaihtava koodi seuraavalla rivill
#
# jane.color(lempivari)
#
# Muista ett kyttesssi muuttujia et tarvitse lainausmerkkej
# Onnittelut! Olet kynyt lpi viime viikon trkeimmt asiat
# ja oppinut piirtmn eri vreill
##### LISTEHTVT #####
# Mik olisi helpoin tapa piirt kolmio loppuun?
# Muuta muuttujan lempivari arvoa ja kokeile ett se toimii.
# Miten voisit piirt toisen kolmion eri suuntaan ja eri vrill
| 37.525 | 110 | 0.758161 |
02fcd2548a49becf32a01085ecf16e34635af225 | 32,807 | py | Python | train.py | EdwardLeeMacau/PFFNet | dfa6e45062627ce6ab7a1b1a37bada5cccae7167 | [
"MIT"
] | null | null | null | train.py | EdwardLeeMacau/PFFNet | dfa6e45062627ce6ab7a1b1a37bada5cccae7167 | [
"MIT"
] | null | null | null | train.py | EdwardLeeMacau/PFFNet | dfa6e45062627ce6ab7a1b1a37bada5cccae7167 | [
"MIT"
] | null | null | null | """
FileName [ train.py ]
PackageName [ PFFNet ]
Synopsis [ Train the model ]
Usage:
>>> python train.py --normalized --cuda
"""
import argparse
import os
import shutil
from datetime import date
import matplotlib
import numpy as np
import pandas as pd
import torch
import torchvision
import torchvision.models
from torchvision import transforms
from matplotlib import pyplot as plt
from matplotlib import gridspec
from skimage.measure import compare_psnr, compare_ssim
from torch import nn, optim
from torch.backends import cudnn
from torch.utils.data import DataLoader
from torchvision.transforms import (
CenterCrop, Compose, Normalize, RandomCrop, Resize, ToTensor)
from torchvision.utils import make_grid
import cmdparser
import graphs
import utils
from model import lossnet
from data import DatasetFromFolder
from model.rpnet import Net
from model.rpnet_improve import ImproveNet
from model.lossnet import LossNetwork
# Select Device
device = utils.selectDevice()
cudnn.benchmark = True
# Normalization(Mean Shift)
mean = torch.Tensor([0.485, 0.456, 0.406]).to(device)
std = torch.Tensor([0.229, 0.224, 0.225]).to(device)
def getDataset(opt, transform):
"""
Return the dataloader object
Parameters
----------
opt : namespace
transform : torchvision.transform
Return
------
train_loader, val_loader : torch.utils.data.DataLoader
"""
train_dataset = DatasetFromFolder(opt.train, transform=transform)
val_dataset = DatasetFromFolder(opt.val, transform=transform)
train_loader = DataLoader(
dataset=train_dataset,
num_workers=opt.threads,
batch_size=opt.batchsize,
pin_memory=True,
shuffle=True
)
val_loader = DataLoader(
dataset=val_dataset,
num_workers=opt.threads,
batch_size=opt.batchsize,
pin_memory=True,
shuffle=True
)
return train_loader, val_loader
def getOptimizer(model, opt):
"""
Return the optimizer (and schedular)
Parameters
----------
model : torch.nn.Model
opt : namespace
Return
------
optimizer : torch.optim
"""
if opt.optimizer == "Adam":
optimizer = optim.Adam(
filter(lambda p: p.requires_grad, model.parameters()),
lr=opt.lr,
weight_decay=opt.weight_decay
)
elif opt.optimizer == "SGD":
optimizer = optim.SGD(
filter(lambda p: p.requires_grad, model.parameters()),
lr=opt.lr,
weight_decay=opt.weight_decay
)
elif opt.optimizer == "ASGD":
optimizer = optim.ASGD(
filter(lambda p: p.requires_grad, model.parameters()),
lr=opt.lr,
lambd=1e-4,
alpha=0.75,
t0=1000000.0,
weight_decay=opt.weight_decay
)
elif opt.optimizer == "Adadelta":
optimizer = optim.Adadelta(
filter(lambda p: p.requires_grad, model.parameters()),
lr=opt.lr,
rho=0.9,
eps=1e-06,
weight_decay=opt.weight_decay
)
elif opt.optimizer == "Adagrad":
optimizer = optim.Adagrad(
filter(lambda p: p.requires_grad, model.parameters()),
lr=opt.lr,
lr_decay=0,
weight_decay=opt.weight_decay,
initial_accumulator_value=0
)
elif opt.optimizer == "Adam":
optimizer = optim.Adam(
filter(lambda p: p.requires_grad, model.parameters()),
lr=opt.lr,
weight_decay=opt.weight_decay
)
elif opt.optimizer == "SGD":
optimizer = optim.SGD(
filter(lambda p: p.requires_grad, model.parameters()),
lr=opt.lr,
weight_decay=opt.weight_decay
)
elif opt.optimizer == "ASGD":
optimizer = optim.ASGD(
filter(lambda p: p.requires_grad, model.parameters()),
lr=opt.lr,
lambd=1e-4,
alpha=0.75,
t0=1000000.0,
weight_decay=opt.weight_decay
)
elif opt.optimizer == "Adadelta":
optimizer = optim.Adadelta(
filter(lambda p: p.requires_grad, model.parameters()),
lr=opt.lr,
rho=0.9,
eps=1e-06,
weight_decay=opt.weight_decay
)
elif opt.optimizer == "Adagrad":
optimizer = optim.Adagrad(
filter(lambda p: p.requires_grad, model.parameters()),
lr=opt.lr,
lr_decay=0,
weight_decay=opt.weight_decay,
initial_accumulator_value=0
)
elif opt.optimizer == "SparseAdam":
optimizer = optim.SparseAdam(
filter(lambda p: p.requires_grad, model.parameters()),
lr=opt.lr,
betas=(opt.b1, opt.b2),
eps=1e-08
)
elif opt.optimizer == "Adamax":
optimizer = optim.Adamax(
filter(lambda p: p.requires_grad, model.parameters()),
lr=opt.lr,
betas=(opt.b1, opt.b2),
eps=1e-08,
weight_decay=opt.weight_dacay
)
else:
raise ValueError(opt.optimizer, " doesn't exist.")
return optimizer
# TODO: Developing
def logMsg(epoch, iteration, train_loader, perceptual, trainloss, perceloss)
msg = "===> [Epoch {}] [{:4d}/{:4d}] ImgLoss: (Mean: {:.6f}, Std: {:.6f})".format(
epoch, iteration, len(train_loader), np.mean(trainloss), np.std(trainloss)
)
if not perceptual is None:
msg = "\t".join([msg, "PerceptualLoss: (Mean: {:.6f}, Std: {:.6f})".format(np.mean(perceloss), np.std(perceloss))])
return msg
def getFigureSpec(iteration: int, perceptual: bool):
"""
Get 2x2 Figure And Axis
Parameters
----------
iterations : int
perceptual : bool
If true, generate the axis of perceptual loss
Return
------
fig, axis : matplotlib.figure.Figure, matplotlib.axes.Axes
The plotting instance.
"""
fig, grids = plt.figure(figsize=(19.2, 10.8)), gridspec.GridSpec(2, 2)
axis = [ fig.add_subplot(gs) for gs in grids ]
for ax in axis:
ax.set_xlabel("Epoch(s) / Iteration: {}".format(iteration))
# Linear scale of Loss
axis[0].set_ylabel("Image Loss")
axis[0].set_title("Loss")
# Log scale of Loss
axis[1].set_yscale("log")
axis[1].set_ylabel("Image Loss")
axis[1].set_title("Loss (Log scale)")
# PSNR
axis[2].set_title("Average PSNR")
# Learning Rate
axis[3].set_yscale('log')
axis[3].set_title("Learning Rate")
# Add TwinScale for Perceptual Loss
if perceptual:
axis.append( axis[0].twinx() )
axis[4].set_ylabel("Perceptual Loss")
axis.append( axis[1].twinx() )
axis[5].set_ylabel("Perceptual Loss")
return fig, axis
def getPerceptualModel(model):
"""
Return the Perceptual Model
Parameters
----------
model : str
The name of the perceptual Model.
Return
------
perceptual : {nn.Module, None}
Not None if the perceptual model is supported.
"""
perceptual = None
if opt.perceptual == 'vgg16':
print("==========> Using VGG16 as Perceptual Loss Model")
perceptual = LossNetwork(
torchvision.models.vgg16(pretrained=True),
lossnet.VGG16_Layer
)
if opt.perceptual == 'vgg16_bn':
print("==========> Using VGG16 with Batch Normalization as Perceptual Loss Model")
perceptual = LossNetwork(
torchvision.models.vgg16_bn(pretrained=True),
lossnet.VGG16_bn_Layer
)
if opt.perceptual == 'vgg19':
print("==========> Using VGG19 as Perceptual Loss Model")
perceptual = LossNetwork(
torchvision.models.vgg19(pretrained=True),
lossnet.VGG19_Layer
)
if opt.perceptual == 'vgg19_bn':
print("==========> Using VGG19 with Batch Normalization as Perceptual Loss Model")
perceptual = LossNetwork(
torchvision.models.vgg19_bn(pretrained=True),
lossnet.VGG19_bn_Layer
)
if opt.perceptual == "resnet18":
print("==========> Using Resnet18 as Perceptual Loss Model")
perceptual = LossNetwork(
torchvision.models.resnet18(pretrained=True),
lossnet.Resnet18_Layer
)
if opt.perceptual == "resnet34":
print("==========> Using Resnet34 as Perceptual Loss Model")
perceptual = LossNetwork(
torchvision.models.resnet34(pretrained=True),
lossnet.Resnet34_Layer
)
if opt.perceptual == "resnet50":
print("==========> Using Resnet50 as Perceptual Loss Model")
perceptual = LossNetwork(
torchvision.models.resnet50(pertrained=True),
lossnet.Resnet50_Layer
)
return perceptual
# TODO: Developing
def getTrainSpec(opt):
"""
Initialize the objects needs at Training.
Parameters
----------
opt : namespace
(...)
Return
------
model
optimizer
criterion
perceptual
train_loader, val_loader
scheduler
epoch,
loss_iter, perc_iter, mse_iter, psnr_iter, ssim_iter, lr_iter
iterations, opt,
name,
fig,
axis,
saveCheckpoint
"""
if opt.fixrandomseed:
seed = 1334
torch.manual_seed(seed)
if opt.cuda: torch.cuda.manual_seed(seed)
print("==========> Loading datasets")
img_transform = Compose([ToTensor(), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) if opt.normalize else ToTensor()
# Dataset
train_loader, val_loader = getDataset(opt, img_transform)
# TODO: Parameters Selection
# TODO: Mean shift Layer Handling
# Load Model
print("==========> Building model")
model = ImproveNet(opt.rb)
# ----------------------------------------------- #
# Loss: L1 Norm / L2 Norm #
# Perceptual Model (Optional) #
# TODO Append Layer (Optional) #
# ----------------------------------------------- #
criterion = nn.MSELoss(reduction='mean')
perceptual = None if (opt.perceptual is None) else getPerceptualModel(opt.perceptual).eval()
# ----------------------------------------------- #
# Optimizer and learning rate scheduler #
# ----------------------------------------------- #
print("==========> Setting Optimizer: {}".format(opt.optimizer))
optimizer = getOptimizer(model, opt)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=opt.milestones, gamma=opt.gamma)
# ----------------------------------------------- #
# Option: resume training process from checkpoint #
# ----------------------------------------------- #
if opt.resume:
if os.path.isfile(opt.resume):
print("=> loading checkpoint '{}'".format(opt.resume))
model, optimizer, _, _, scheduler = utils.loadCheckpoint(opt.resume, model, optimizer, scheduler)
else:
raise Exception("=> no checkpoint found at '{}'".format(opt.resume))
# ----------------------------------------------- #
# Option: load weights from a pretrain network #
# ----------------------------------------------- #
if opt.pretrained:
if os.path.isfile(opt.pretrained):
print("=> loading pretrained model '{}'".format(opt.pretrained))
model = utils.loadModel(opt.pretrained, model, True)
else:
raise Exception("=> no pretrained model found at '{}'".format(opt.pretrained))
# Select training device
if opt.cuda:
print("==========> Setting GPU")
model = nn.DataParallel(model, device_ids=[i for i in range(opt.gpus)]).cuda()
criterion = criterion.cuda()
if perceptual is not None: perceptual = perceptual.cuda()
else:
print("==========> Setting CPU")
model = model.cpu()
criterion = criterion.cpu()
if perceptual is not None: perceptual = perceptual.cpu()
# Create container
length = opt.epochs * len(train_loader) // opt.val_interval
loss_iter = np.empty(length, dtype=float)
perc_iter = np.empty(length, dtype=float)
psnr_iter = np.empty(length, dtype=float)
ssim_iter = np.empty(length, dtype=float)
mse_iter = np.empty(length, dtype=float)
lr_iter = np.empty(length, dtype=float)
iterations = np.empty(length, dtype=float)
loss_iter[:] = np.nan
perc_iter[:] = np.nan
psnr_iter[:] = np.nan
ssim_iter[:] = np.nan
mse_iter[:] = np.nan
lr_iter[:] = np.nan
iterations[:] = np.nan
# Set plotter to plot the loss curves
twinx = (opt.perceptual is not None)
fig, axis = getFigureSpec(len(train_loader), twinx)
# Set Model Saving Function
if opt.save_item == "model":
print("==========> Save Function: saveModel()")
saveCheckpoint = utils.saveModel
elif opt.save_item == "checkpoint":
print("==========> Save Function: saveCheckpoint()")
saveCheckpoint = utils.saveCheckpoint
else:
raise ValueError("Save Checkpoint Function Error")
return (
model, optimizer, criterion, perceptual, train_loader, val_loader, scheduler, epoch,
loss_iter, perc_iter, mse_iter, psnr_iter, ssim_iter, lr_iter, iterations, opt,
name, fig, axis, saveCheckpoint
)
def main(opt):
"""
Main process of train.py
Parameters
----------
opt : namespace
The option (hyperparameters) of these model
"""
if opt.fixrandomseed:
seed = 1334
torch.manual_seed(seed)
if opt.cuda:
torch.cuda.manual_seed(seed)
print("==========> Loading datasets")
img_transform = Compose([ToTensor(), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) if opt.normalize else ToTensor()
# Dataset
train_loader, val_loader = getDataset(opt, img_transform)
# TODO: Parameters Selection
# TODO: Mean shift Layer Handling
# Load Model
print("==========> Building model")
model = ImproveNet(opt.rb)
# ----------------------------------------------- #
# Loss: L1 Norm / L2 Norm #
# Perceptual Model (Optional) #
# TODO Append Layer (Optional) #
# ----------------------------------------------- #
criterion = nn.MSELoss(reduction='mean')
perceptual = None if (opt.perceptual is None) else getPerceptualModel(opt.perceptual).eval()
# ----------------------------------------------- #
# Optimizer and learning rate scheduler #
# ----------------------------------------------- #
print("==========> Setting Optimizer: {}".format(opt.optimizer))
optimizer = getOptimizer(model, opt)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=opt.milestones, gamma=opt.gamma)
# ----------------------------------------------- #
# Option: resume training process from checkpoint #
# ----------------------------------------------- #
if opt.resume:
if os.path.isfile(opt.resume):
print("=> loading checkpoint '{}'".format(opt.resume))
model, optimizer, _, _, scheduler = utils.loadCheckpoint(opt.resume, model, optimizer, scheduler)
else:
raise Exception("=> no checkpoint found at '{}'".format(opt.resume))
# ----------------------------------------------- #
# Option: load weights from a pretrain network #
# ----------------------------------------------- #
if opt.pretrained:
if os.path.isfile(opt.pretrained):
print("=> loading pretrained model '{}'".format(opt.pretrained))
model = utils.loadModel(opt.pretrained, model, True)
else:
raise Exception("=> no pretrained model found at '{}'".format(opt.pretrained))
# Select training device
if opt.cuda:
print("==========> Setting GPU")
model = nn.DataParallel(model, device_ids=[i for i in range(opt.gpus)]).cuda()
criterion = criterion.cuda()
if perceptual is not None:
perceptual = perceptual.cuda()
else:
print("==========> Setting CPU")
model = model.cpu()
criterion = criterion.cpu()
if perceptual is not None:
perceptual = perceptual.cpu()
# Create container
length = opt.epochs * len(train_loader) // opt.val_interval
loss_iter = np.empty(length, dtype=float)
perc_iter = np.empty(length, dtype=float)
psnr_iter = np.empty(length, dtype=float)
ssim_iter = np.empty(length, dtype=float)
mse_iter = np.empty(length, dtype=float)
lr_iter = np.empty(length, dtype=float)
iterations = np.empty(length, dtype=float)
loss_iter[:] = np.nan
perc_iter[:] = np.nan
psnr_iter[:] = np.nan
ssim_iter[:] = np.nan
mse_iter[:] = np.nan
lr_iter[:] = np.nan
iterations[:] = np.nan
# Set plotter to plot the loss curves
twinx = (opt.perceptual is not None)
fig, axis = getFigureSpec(len(train_loader), twinx)
# Set Model Saving Function
if opt.save_item == "model":
print("==========> Save Function: saveModel()")
saveCheckpoint = utils.saveModel
elif opt.save_item == "checkpoint":
print("==========> Save Function: saveCheckpoint()")
saveCheckpoint = utils.saveCheckpoint
else:
raise ValueError("Save Checkpoint Function Error")
# Start Training
print("==========> Training")
for epoch in range(opt.starts, opt.epochs + 1):
loss_iter, perc_iter, mse_iter, psnr_iter, ssim_iter, lr_iter, iterations, _, _ = train(
model, optimizer, criterion, perceptual, train_loader, val_loader, scheduler, epoch,
loss_iter, perc_iter, mse_iter, psnr_iter, ssim_iter, lr_iter, iterations,
opt, name, fig, axis, saveCheckpoint
)
scheduler.step()
# Save the last checkpoint for resume training
utils.saveCheckpoint(os.path.join(opt.checkpoints, name, "final.pth"), model, optimizer, scheduler, epoch, len(train_loader))
# TODO: Fine tuning
return
def train(model, optimizer, criterion, perceptual, train_loader, val_loader,
scheduler: optim.lr_scheduler.MultiStepLR, epoch: int, loss_iter,
perc_iter, mse_iter, psnr_iter, ssim_iter, lr_iter, iters, opt, name,
fig: matplotlib.figure.Figure, ax: matplotlib.axes.Axes,
saveCheckpoint=utils.saveCheckpoint):
"""
Main function of training and vaildation
Parameters
----------
model, optimizer, criterion : nn.Module, optim.Optimizer, nn.Module
The main elements of the Neural Network
perceptual : {nn.Module, None} optional
Pass None or a pretrained Neural Network to calculate perceptual loss
train_loader, val_loader : DataLoader
The training and validation dataset
scheduler : optim.lr_scheduler.MultiStepLR
Learning rate scheduler
epoch : int
The processing train epoch
loss_iter, perc_iter, mse_iter, psnr_iter, ssim_iter, iters : 1D-Array like
The container to record the training performance
opt : namespace
The training option
name : str
(...)
fig, ax : matplotlib.figure.Figure, matplotlib.axes.Axes
(...)
saveCheckpoint : callable
(...)
"""
trainloss, perceloss = [], []
for iteration, (data, label) in enumerate(train_loader, 1):
steps = len(train_loader) * (epoch - 1) + iteration
model.train()
# ----------------------------------------------------- #
# Handling: #
# 1. Perceptual Loss #
# 2. Multiscaling #
# 2.0 Without Multiscaling (multiscaling = [1.0]) #
# 2.1 Regular Multiscaling #
# 2.2 Random Multiscaling #
# ----------------------------------------------------- #
# 2.0 Without Multiscaling
if opt.multiscale == [1.0]:
optimizer.zero_grad()
data, label = data.to(device), label.to(device)
output = model(data)
# Calculate loss
image_loss = criterion(output, label)
if perceptual is not None: perceptual_loss = perceptual(output, label)
# Backpropagation
loss = image_loss if (perceptual is None) else image_loss + opt.perceptual_weight * percuptual_loss
loss.backward()
optimizer.step()
# Record the training loss
trainloss.append(image_loss.item())
if perceptual is not None: perceloss.append(perceptual_loss.item())
# TODO: Efficient Issue
# TODO: Resizing Loss
# 2.1 Regular Multiscaling
elif not opt.multiscaleShuffle:
data, label = data.to(device), label.to(device)
originWidth, originHeight = data.shape[1:3]
for scale in opt.multiscale:
optimizer.zero_grad()
if scale != 1.0:
newSize = (int(originWidth * scale), int(originHeight * scale))
data, label = Resize(size=newSize)(data), Resize(size=newSize)(label)
output = model(data)
# Calculate loss
image_loss = criterion(output, label)
if perceptual is not None: perceptual_loss = perceptual(output, label)
# Backpropagation
loss = image_loss if (perceptual is None) else image_loss + opt.perceptual_weight * percuptual_loss
loss.backward()
optimizer.step()
# Record the training loss
trainloss.append(image_loss.item())
if perceptual is not None: perceloss.append(perceptual_loss.item())
# TODO: Check Usage
# 2.2 Random Multiscaling
else:
optimizer.zero_grad()
data, label = data.to(device), label.to(device)
originWidth, originHeight = data.shape[1:3]
scale = np.random.choice(opt.multiscale, 1)
if scale != 1.0:
newSize = (int(originWidth * scale), int(originHeight * scale))
data, label = Resize(size=newSize)(data), Resize(size=newSize)(label)
output = model(data)
# Calculate loss
image_loss = criterion(output, label)
if perceptual is not None: perceptual_loss = perceptual(output, label)
# Backpropagation
loss = image_loss if (perceptual is None) else image_loss + opt.perceptual_weight * percuptual_loss
loss.backward()
optimizer.step()
# Record the training loss
trainloss.append(image_loss.item())
if perceptual is not None: perceloss.append(perceptual_loss.item())
# ----------------------------------------------------- #
# Execute for a period #
# 1. Print the training message #
# 2. Plot the gradient of each layer (Deprecated) #
# 3. Validate the model #
# 4. Saving the network #
# ----------------------------------------------------- #
# 1. Print the training message
if steps % opt.log_interval == 0:
msg = "===> [Epoch {}] [{:4d}/{:4d}] ImgLoss: (Mean: {:.6f}, Std: {:.6f})".format(
epoch, iteration, len(train_loader), np.mean(trainloss), np.std(trainloss)
)
if not perceptual is None:
msg = "\t".join([msg, "PerceptualLoss: (Mean: {:.6f}, Std: {:.6f})".format(np.mean(perceloss), np.std(perceloss))])
print(msg)
# 2. Print the gradient statistic message for each layer
# graphs.draw_gradient()
# 3. Save the model
if steps % opt.save_interval == 0:
checkpoint_path = os.path.join(opt.checkpoints, name, "{}.pth".format(steps))
saveCheckpoint(checkpoint_path, model, optimizer, scheduler, epoch, iteration)
# 4. Validating the network
if steps % opt.val_interval == 0:
mse, psnr = validate(model, val_loader, criterion, epoch, iteration, normalize=opt.normalize)
idx = steps // opt.val_interval - 1
loss_iter[idx] = np.mean(trainloss)
mse_iter[idx] = mse
psnr_iter[idx] = psnr
lr_iter[idx] = optimizer.param_groups[0]["lr"]
iters[idx] = steps / len(train_loader)
if perceptual is not None: perc_iter[idx] = np.mean(perceloss)
# Clean up the list
trainloss, preceloss = [], []
# Save the loss
df = pd.DataFrame(data={
'Iterations': iters * len(train_loader),
'TrainL2Loss': loss_iter,
'TrainPerceptual': perc_iter,
'ValidationLoss': mse_iter,
'ValidationPSNR': psnr_iter
})
# Loss (Training Curve) Message
df = df.nlargest(5, 'ValidationPSNR').append(df)
df.to_excel(os.path.join(opt.detail, name, "statistical.xlsx"))
# Show images in grid with validation set
# graphs.grid_show()
# Plot TrainLoss, ValidationLoss
fig, ax = training_curve(
loss_iter, perc_iter, mse_iter, psnr_iter, ssim_iter, iters, lr_iter,
epoch, len(train_loader), fig, ax
)
plt.tight_layout()
plt.savefig(os.path.join(opt.detail, name, "loss.png"))
return loss_iter, perc_iter, mse_iter, psnr_iter, ssim_iter, lr_iter, iters, fig, ax
def training_curve(train_loss, perc_iter, val_loss, psnr, ssim, x, lr, epoch, iters_per_epoch,
fig: matplotlib.figure.Figure, axis: matplotlib.axes.Axes, linewidth=0.25):
"""
Plot out learning rate, training loss, validation loss and PSNR.
Parameters
----------
train_loss, perc_iter, val_loss, psnr, ssim, lr, x: 1D-array like
(...)
iters_per_epoch : int
To show the iterations in the epoch
fig, axis : matplotlib.figure.Figure, matplotlib.axes.Axes
Matplotlib plotting object.
linewidth : float
Default linewidth
Return
------
fig, axis : matplotlib.figure.Figure, matplotlib.axes.Axes
The training curve
"""
# Linear scale of loss curve
ax = axis[0]
ax.clear()
line1, = ax.plot(x, val_loss, label="Validation Loss", color='red', linewidth=linewidth)
line2, = ax.plot(x, train_loss, label="Train Loss", color='blue', linewidth=linewidth)
ax.plot(x, np.repeat(np.amin(val_loss), len(x)), linestyle=':', linewidth=linewidth)
ax.set_xlabel("Epoch(s) / Iteration: {}".format(iters_per_epoch))
ax.set_ylabel("Image Loss")
ax.set_title("Loss")
if not np.isnan(perc_iter).all():
ax = axis[4]
ax.clear()
line4, = ax.plot(x, perc_iter, label="Perceptual Loss", color='green', linewidth=linewidth)
ax.set_ylabel("Perceptual Loss")
ax.legend(handles=(line1, line2, line4, )) if not np.isnan(perc_iter).all() else ax.legend(handles=(line1, line2, ))
# Log scale of loss curve
ax = axis[1]
ax.clear()
line1, = ax.plot(x, val_loss, label="Validation Loss", color='red', linewidth=linewidth)
line2, = ax.plot(x, train_loss, label="Train Loss", color='blue', linewidth=linewidth)
ax.plot(x, np.repeat(np.amin(val_loss), len(x)), linestyle=':', linewidth=linewidth)
ax.set_xlabel("Epoch(s) / Iteration: {}".format(iters_per_epoch))
ax.set_yscale('log')
ax.set_title("Loss(Log scale)")
if not np.isnan(perc_iter).all():
ax = axis[5]
ax.clear()
line4, = ax.plot(x, perc_iter, label="Perceptual Loss", color='green', linewidth=linewidth)
ax.set_ylabel("Perceptual Loss")
ax.legend(handles=(line1, line2, line4, )) if not np.isnan(perc_iter).all() else ax.legend(handles=(line1, line2, ))
# Linear scale of PSNR, SSIM
ax = axis[2]
ax.clear()
line1, = ax.plot(x, psnr, label="PSNR", color='blue', linewidth=linewidth)
ax.plot(x, np.repeat(np.amax(psnr), len(x)), linestyle=':', linewidth=linewidth)
ax.set_xlabel("Epochs(s) / Iteration: {}".format(iters_per_epoch))
ax.set_ylabel("Average PSNR")
ax.set_title("Validation Performance")
ax.legend(handles=(line1, ))
# Learning Rate Curve
ax = axis[3]
ax.clear()
line1, = ax.plot(x, lr, label="Learning Rate", color='cyan', linewidth=linewidth)
ax.set_xlabel("Epochs(s) / Iteration: {}".format(iters_per_epoch))
ax.set_title("Learning Rate")
ax.set_yscale('log')
ax.legend(handles=(line1, ))
return fig, axis
def validate(model: nn.Module, loader: DataLoader, criterion: nn.Module, epoch, iteration, normalize=False):
"""
Validate the model
Parameters
----------
model : nn.Module
The neural networks to train
loader : torch.utils.data.DataLoader
The training data
epoch : int
The training epoch
criterion : nn.Module
Loss function
normalize : bool
If true, normalize the image before and after the NN.
Return
------
mse, psnr : np.float
np.mean(mse) and np.mean(psnr)
"""
psnrs, mses = [], []
model.eval()
with torch.no_grad():
for index, (data, label) in enumerate(loader, 1):
data, label = data.to(device), label.to(device)
output = model(data)
mse = criterion(output, label).item()
mses.append(mse)
if normalize:
data = data * std[:, None, None] + mean[:, None, None]
label = label * std[:, None, None] + mean[:, None, None]
output = output * std[:, None, None] + mean[:, None, None]
mse = criterion(output, label).item()
psnr = 10 * np.log10(1.0 / mse)
mses.append(mse)
psnrs.append(psnr)
print("===> [Epoch {}] [ Vaild ] MSE: {:.6f}, PSNR: {:.4f}".format(epoch, np.mean(mses), np.mean(psnrs)))
return np.mean(mses), np.mean(psnrs)
if __name__ == "__main__":
# Clean up OS screen
os.system('clear')
# Cmd Parser
parser = cmdparser.parser
opt = parser.parse_args()
# Check arguments
if opt.cuda and not torch.cuda.is_available():
raise Exception("No GPU found, please run without --cuda")
if opt.resume and opt.pretrained:
raise ValueError("opt.resume and opt.pretrain should not be True in the same time.")
if opt.resume and (not os.path.isfile(opt.resume)):
raise ValueError("{} doesn't not exists".format(opt.resume))
if opt.pretrained and (not os.path.isfile(opt.pretrained)):
raise ValueError("{} doesn't not exists".format(opt.pretrained))
# Check training dataset directory
for path in opt.train:
if not os.path.exists(path):
raise ValueError("{} doesn't exist".format(path))
# Check validation dataset directory
for path in opt.val:
if not os.path.exists(path):
raise ValueError("{} doesn't exist".format(path))
# Make checkpoint storage directory
name = "{}_{}".format(opt.tag, date.today().strftime("%Y%m%d"))
os.makedirs(os.path.join(opt.checkpoints, name), exist_ok=True)
# Copy the code of model to logging file
if os.path.exists(os.path.join(opt.detail, name, 'model')):
shutil.rmtree(os.path.join(opt.detail, name, 'model'))
if os.path.exists(os.path.join(opt.checkpoints, name, 'model')):
shutil.rmtree(os.path.join(opt.checkpoints, name, 'model'))
shutil.copytree('./model', os.path.join(opt.detail, name, 'model'))
shutil.copytree('./model', os.path.join(opt.checkpoints, name, 'model'))
shutil.copyfile(__file__, os.path.join(opt.detail, name, os.path.basename(__file__)))
# Show Detail
print('==========> Training setting')
utils.details(opt, os.path.join(opt.detail, name, 'args.txt'))
# Execute main process
main(opt)
| 33.648205 | 140 | 0.572073 |
02fe1589d692043102c05d5d014222183830f3c7 | 45,373 | py | Python | clients/python/core_pb2.py | cloudwheels/grpc-test-gateway | 5fe6564804cc1dfd2761138977d9282519b8ffc6 | [
"MIT"
] | 3 | 2020-05-01T15:27:18.000Z | 2020-05-28T15:11:34.000Z | clients/python/core_pb2.py | cloudwheels/grpc-test-gateway | 5fe6564804cc1dfd2761138977d9282519b8ffc6 | [
"MIT"
] | null | null | null | clients/python/core_pb2.py | cloudwheels/grpc-test-gateway | 5fe6564804cc1dfd2761138977d9282519b8ffc6 | [
"MIT"
] | 3 | 2020-09-15T17:24:52.000Z | 2021-07-07T10:01:25.000Z | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: core.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='core.proto',
package='org.dash.platform.dapi.v0',
syntax='proto3',
serialized_pb=_b('\n\ncore.proto\x12\x19org.dash.platform.dapi.v0\"\x12\n\x10GetStatusRequest\"\xe5\x01\n\x11GetStatusResponse\x12\x14\n\x0c\x63ore_version\x18\x01 \x01(\r\x12\x18\n\x10protocol_version\x18\x02 \x01(\r\x12\x0e\n\x06\x62locks\x18\x03 \x01(\r\x12\x13\n\x0btime_offset\x18\x04 \x01(\r\x12\x13\n\x0b\x63onnections\x18\x05 \x01(\r\x12\r\n\x05proxy\x18\x06 \x01(\t\x12\x12\n\ndifficulty\x18\x07 \x01(\x01\x12\x0f\n\x07testnet\x18\x08 \x01(\x08\x12\x11\n\trelay_fee\x18\t \x01(\x01\x12\x0e\n\x06\x65rrors\x18\n \x01(\t\x12\x0f\n\x07network\x18\x0b \x01(\t\"<\n\x0fGetBlockRequest\x12\x10\n\x06height\x18\x01 \x01(\rH\x00\x12\x0e\n\x04hash\x18\x02 \x01(\tH\x00\x42\x07\n\x05\x62lock\"!\n\x10GetBlockResponse\x12\r\n\x05\x62lock\x18\x01 \x01(\x0c\"]\n\x16SendTransactionRequest\x12\x13\n\x0btransaction\x18\x01 \x01(\x0c\x12\x17\n\x0f\x61llow_high_fees\x18\x02 \x01(\x08\x12\x15\n\rbypass_limits\x18\x03 \x01(\x08\"1\n\x17SendTransactionResponse\x12\x16\n\x0etransaction_id\x18\x01 \x01(\t\"#\n\x15GetTransactionRequest\x12\n\n\x02id\x18\x01 \x01(\t\"-\n\x16GetTransactionResponse\x12\x13\n\x0btransaction\x18\x01 \x01(\x0c\"x\n!BlockHeadersWithChainLocksRequest\x12\x19\n\x0f\x66rom_block_hash\x18\x01 \x01(\x0cH\x00\x12\x1b\n\x11\x66rom_block_height\x18\x02 \x01(\rH\x00\x12\r\n\x05\x63ount\x18\x03 \x01(\rB\x0c\n\nfrom_block\"\xd3\x01\n\"BlockHeadersWithChainLocksResponse\x12@\n\rblock_headers\x18\x01 \x01(\x0b\x32\'.org.dash.platform.dapi.v0.BlockHeadersH\x00\x12^\n\x1d\x63hain_lock_signature_messages\x18\x02 \x01(\x0b\x32\x35.org.dash.platform.dapi.v0.ChainLockSignatureMessagesH\x00\x42\x0b\n\tresponses\"\x1f\n\x0c\x42lockHeaders\x12\x0f\n\x07headers\x18\x01 \x03(\x0c\".\n\x1a\x43hainLockSignatureMessages\x12\x10\n\x08messages\x18\x01 \x03(\x0c\"3\n!GetEstimatedTransactionFeeRequest\x12\x0e\n\x06\x62locks\x18\x01 \x01(\r\"1\n\"GetEstimatedTransactionFeeResponse\x12\x0b\n\x03\x66\x65\x65\x18\x01 \x01(\x01\x32\x89\x06\n\x04\x43ore\x12\x66\n\tgetStatus\x12+.org.dash.platform.dapi.v0.GetStatusRequest\x1a,.org.dash.platform.dapi.v0.GetStatusResponse\x12\x63\n\x08getBlock\x12*.org.dash.platform.dapi.v0.GetBlockRequest\x1a+.org.dash.platform.dapi.v0.GetBlockResponse\x12x\n\x0fsendTransaction\x12\x31.org.dash.platform.dapi.v0.SendTransactionRequest\x1a\x32.org.dash.platform.dapi.v0.SendTransactionResponse\x12u\n\x0egetTransaction\x12\x30.org.dash.platform.dapi.v0.GetTransactionRequest\x1a\x31.org.dash.platform.dapi.v0.GetTransactionResponse\x12\x99\x01\n\x1agetEstimatedTransactionFee\x12<.org.dash.platform.dapi.v0.GetEstimatedTransactionFeeRequest\x1a=.org.dash.platform.dapi.v0.GetEstimatedTransactionFeeResponse\x12\xa6\x01\n%subscribeToBlockHeadersWithChainLocks\x12<.org.dash.platform.dapi.v0.BlockHeadersWithChainLocksRequest\x1a=.org.dash.platform.dapi.v0.BlockHeadersWithChainLocksResponse0\x01\x62\x06proto3')
)
_GETSTATUSREQUEST = _descriptor.Descriptor(
name='GetStatusRequest',
full_name='org.dash.platform.dapi.v0.GetStatusRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=41,
serialized_end=59,
)
_GETSTATUSRESPONSE = _descriptor.Descriptor(
name='GetStatusResponse',
full_name='org.dash.platform.dapi.v0.GetStatusResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='core_version', full_name='org.dash.platform.dapi.v0.GetStatusResponse.core_version', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='protocol_version', full_name='org.dash.platform.dapi.v0.GetStatusResponse.protocol_version', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='blocks', full_name='org.dash.platform.dapi.v0.GetStatusResponse.blocks', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='time_offset', full_name='org.dash.platform.dapi.v0.GetStatusResponse.time_offset', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='connections', full_name='org.dash.platform.dapi.v0.GetStatusResponse.connections', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='proxy', full_name='org.dash.platform.dapi.v0.GetStatusResponse.proxy', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='difficulty', full_name='org.dash.platform.dapi.v0.GetStatusResponse.difficulty', index=6,
number=7, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='testnet', full_name='org.dash.platform.dapi.v0.GetStatusResponse.testnet', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='relay_fee', full_name='org.dash.platform.dapi.v0.GetStatusResponse.relay_fee', index=8,
number=9, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='errors', full_name='org.dash.platform.dapi.v0.GetStatusResponse.errors', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='network', full_name='org.dash.platform.dapi.v0.GetStatusResponse.network', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=62,
serialized_end=291,
)
_GETBLOCKREQUEST = _descriptor.Descriptor(
name='GetBlockRequest',
full_name='org.dash.platform.dapi.v0.GetBlockRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='height', full_name='org.dash.platform.dapi.v0.GetBlockRequest.height', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hash', full_name='org.dash.platform.dapi.v0.GetBlockRequest.hash', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='block', full_name='org.dash.platform.dapi.v0.GetBlockRequest.block',
index=0, containing_type=None, fields=[]),
],
serialized_start=293,
serialized_end=353,
)
_GETBLOCKRESPONSE = _descriptor.Descriptor(
name='GetBlockResponse',
full_name='org.dash.platform.dapi.v0.GetBlockResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='block', full_name='org.dash.platform.dapi.v0.GetBlockResponse.block', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=355,
serialized_end=388,
)
_SENDTRANSACTIONREQUEST = _descriptor.Descriptor(
name='SendTransactionRequest',
full_name='org.dash.platform.dapi.v0.SendTransactionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='transaction', full_name='org.dash.platform.dapi.v0.SendTransactionRequest.transaction', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='allow_high_fees', full_name='org.dash.platform.dapi.v0.SendTransactionRequest.allow_high_fees', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bypass_limits', full_name='org.dash.platform.dapi.v0.SendTransactionRequest.bypass_limits', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=390,
serialized_end=483,
)
_SENDTRANSACTIONRESPONSE = _descriptor.Descriptor(
name='SendTransactionResponse',
full_name='org.dash.platform.dapi.v0.SendTransactionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='transaction_id', full_name='org.dash.platform.dapi.v0.SendTransactionResponse.transaction_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=485,
serialized_end=534,
)
_GETTRANSACTIONREQUEST = _descriptor.Descriptor(
name='GetTransactionRequest',
full_name='org.dash.platform.dapi.v0.GetTransactionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='org.dash.platform.dapi.v0.GetTransactionRequest.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=536,
serialized_end=571,
)
_GETTRANSACTIONRESPONSE = _descriptor.Descriptor(
name='GetTransactionResponse',
full_name='org.dash.platform.dapi.v0.GetTransactionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='transaction', full_name='org.dash.platform.dapi.v0.GetTransactionResponse.transaction', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=573,
serialized_end=618,
)
_BLOCKHEADERSWITHCHAINLOCKSREQUEST = _descriptor.Descriptor(
name='BlockHeadersWithChainLocksRequest',
full_name='org.dash.platform.dapi.v0.BlockHeadersWithChainLocksRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='from_block_hash', full_name='org.dash.platform.dapi.v0.BlockHeadersWithChainLocksRequest.from_block_hash', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='from_block_height', full_name='org.dash.platform.dapi.v0.BlockHeadersWithChainLocksRequest.from_block_height', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='count', full_name='org.dash.platform.dapi.v0.BlockHeadersWithChainLocksRequest.count', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='from_block', full_name='org.dash.platform.dapi.v0.BlockHeadersWithChainLocksRequest.from_block',
index=0, containing_type=None, fields=[]),
],
serialized_start=620,
serialized_end=740,
)
_BLOCKHEADERSWITHCHAINLOCKSRESPONSE = _descriptor.Descriptor(
name='BlockHeadersWithChainLocksResponse',
full_name='org.dash.platform.dapi.v0.BlockHeadersWithChainLocksResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='block_headers', full_name='org.dash.platform.dapi.v0.BlockHeadersWithChainLocksResponse.block_headers', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='chain_lock_signature_messages', full_name='org.dash.platform.dapi.v0.BlockHeadersWithChainLocksResponse.chain_lock_signature_messages', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='responses', full_name='org.dash.platform.dapi.v0.BlockHeadersWithChainLocksResponse.responses',
index=0, containing_type=None, fields=[]),
],
serialized_start=743,
serialized_end=954,
)
_BLOCKHEADERS = _descriptor.Descriptor(
name='BlockHeaders',
full_name='org.dash.platform.dapi.v0.BlockHeaders',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='headers', full_name='org.dash.platform.dapi.v0.BlockHeaders.headers', index=0,
number=1, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=956,
serialized_end=987,
)
_CHAINLOCKSIGNATUREMESSAGES = _descriptor.Descriptor(
name='ChainLockSignatureMessages',
full_name='org.dash.platform.dapi.v0.ChainLockSignatureMessages',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='messages', full_name='org.dash.platform.dapi.v0.ChainLockSignatureMessages.messages', index=0,
number=1, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=989,
serialized_end=1035,
)
_GETESTIMATEDTRANSACTIONFEEREQUEST = _descriptor.Descriptor(
name='GetEstimatedTransactionFeeRequest',
full_name='org.dash.platform.dapi.v0.GetEstimatedTransactionFeeRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='blocks', full_name='org.dash.platform.dapi.v0.GetEstimatedTransactionFeeRequest.blocks', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1037,
serialized_end=1088,
)
_GETESTIMATEDTRANSACTIONFEERESPONSE = _descriptor.Descriptor(
name='GetEstimatedTransactionFeeResponse',
full_name='org.dash.platform.dapi.v0.GetEstimatedTransactionFeeResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='fee', full_name='org.dash.platform.dapi.v0.GetEstimatedTransactionFeeResponse.fee', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1090,
serialized_end=1139,
)
_GETBLOCKREQUEST.oneofs_by_name['block'].fields.append(
_GETBLOCKREQUEST.fields_by_name['height'])
_GETBLOCKREQUEST.fields_by_name['height'].containing_oneof = _GETBLOCKREQUEST.oneofs_by_name['block']
_GETBLOCKREQUEST.oneofs_by_name['block'].fields.append(
_GETBLOCKREQUEST.fields_by_name['hash'])
_GETBLOCKREQUEST.fields_by_name['hash'].containing_oneof = _GETBLOCKREQUEST.oneofs_by_name['block']
_BLOCKHEADERSWITHCHAINLOCKSREQUEST.oneofs_by_name['from_block'].fields.append(
_BLOCKHEADERSWITHCHAINLOCKSREQUEST.fields_by_name['from_block_hash'])
_BLOCKHEADERSWITHCHAINLOCKSREQUEST.fields_by_name['from_block_hash'].containing_oneof = _BLOCKHEADERSWITHCHAINLOCKSREQUEST.oneofs_by_name['from_block']
_BLOCKHEADERSWITHCHAINLOCKSREQUEST.oneofs_by_name['from_block'].fields.append(
_BLOCKHEADERSWITHCHAINLOCKSREQUEST.fields_by_name['from_block_height'])
_BLOCKHEADERSWITHCHAINLOCKSREQUEST.fields_by_name['from_block_height'].containing_oneof = _BLOCKHEADERSWITHCHAINLOCKSREQUEST.oneofs_by_name['from_block']
_BLOCKHEADERSWITHCHAINLOCKSRESPONSE.fields_by_name['block_headers'].message_type = _BLOCKHEADERS
_BLOCKHEADERSWITHCHAINLOCKSRESPONSE.fields_by_name['chain_lock_signature_messages'].message_type = _CHAINLOCKSIGNATUREMESSAGES
_BLOCKHEADERSWITHCHAINLOCKSRESPONSE.oneofs_by_name['responses'].fields.append(
_BLOCKHEADERSWITHCHAINLOCKSRESPONSE.fields_by_name['block_headers'])
_BLOCKHEADERSWITHCHAINLOCKSRESPONSE.fields_by_name['block_headers'].containing_oneof = _BLOCKHEADERSWITHCHAINLOCKSRESPONSE.oneofs_by_name['responses']
_BLOCKHEADERSWITHCHAINLOCKSRESPONSE.oneofs_by_name['responses'].fields.append(
_BLOCKHEADERSWITHCHAINLOCKSRESPONSE.fields_by_name['chain_lock_signature_messages'])
_BLOCKHEADERSWITHCHAINLOCKSRESPONSE.fields_by_name['chain_lock_signature_messages'].containing_oneof = _BLOCKHEADERSWITHCHAINLOCKSRESPONSE.oneofs_by_name['responses']
DESCRIPTOR.message_types_by_name['GetStatusRequest'] = _GETSTATUSREQUEST
DESCRIPTOR.message_types_by_name['GetStatusResponse'] = _GETSTATUSRESPONSE
DESCRIPTOR.message_types_by_name['GetBlockRequest'] = _GETBLOCKREQUEST
DESCRIPTOR.message_types_by_name['GetBlockResponse'] = _GETBLOCKRESPONSE
DESCRIPTOR.message_types_by_name['SendTransactionRequest'] = _SENDTRANSACTIONREQUEST
DESCRIPTOR.message_types_by_name['SendTransactionResponse'] = _SENDTRANSACTIONRESPONSE
DESCRIPTOR.message_types_by_name['GetTransactionRequest'] = _GETTRANSACTIONREQUEST
DESCRIPTOR.message_types_by_name['GetTransactionResponse'] = _GETTRANSACTIONRESPONSE
DESCRIPTOR.message_types_by_name['BlockHeadersWithChainLocksRequest'] = _BLOCKHEADERSWITHCHAINLOCKSREQUEST
DESCRIPTOR.message_types_by_name['BlockHeadersWithChainLocksResponse'] = _BLOCKHEADERSWITHCHAINLOCKSRESPONSE
DESCRIPTOR.message_types_by_name['BlockHeaders'] = _BLOCKHEADERS
DESCRIPTOR.message_types_by_name['ChainLockSignatureMessages'] = _CHAINLOCKSIGNATUREMESSAGES
DESCRIPTOR.message_types_by_name['GetEstimatedTransactionFeeRequest'] = _GETESTIMATEDTRANSACTIONFEEREQUEST
DESCRIPTOR.message_types_by_name['GetEstimatedTransactionFeeResponse'] = _GETESTIMATEDTRANSACTIONFEERESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetStatusRequest = _reflection.GeneratedProtocolMessageType('GetStatusRequest', (_message.Message,), dict(
DESCRIPTOR = _GETSTATUSREQUEST,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.GetStatusRequest)
))
_sym_db.RegisterMessage(GetStatusRequest)
GetStatusResponse = _reflection.GeneratedProtocolMessageType('GetStatusResponse', (_message.Message,), dict(
DESCRIPTOR = _GETSTATUSRESPONSE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.GetStatusResponse)
))
_sym_db.RegisterMessage(GetStatusResponse)
GetBlockRequest = _reflection.GeneratedProtocolMessageType('GetBlockRequest', (_message.Message,), dict(
DESCRIPTOR = _GETBLOCKREQUEST,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.GetBlockRequest)
))
_sym_db.RegisterMessage(GetBlockRequest)
GetBlockResponse = _reflection.GeneratedProtocolMessageType('GetBlockResponse', (_message.Message,), dict(
DESCRIPTOR = _GETBLOCKRESPONSE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.GetBlockResponse)
))
_sym_db.RegisterMessage(GetBlockResponse)
SendTransactionRequest = _reflection.GeneratedProtocolMessageType('SendTransactionRequest', (_message.Message,), dict(
DESCRIPTOR = _SENDTRANSACTIONREQUEST,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.SendTransactionRequest)
))
_sym_db.RegisterMessage(SendTransactionRequest)
SendTransactionResponse = _reflection.GeneratedProtocolMessageType('SendTransactionResponse', (_message.Message,), dict(
DESCRIPTOR = _SENDTRANSACTIONRESPONSE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.SendTransactionResponse)
))
_sym_db.RegisterMessage(SendTransactionResponse)
GetTransactionRequest = _reflection.GeneratedProtocolMessageType('GetTransactionRequest', (_message.Message,), dict(
DESCRIPTOR = _GETTRANSACTIONREQUEST,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.GetTransactionRequest)
))
_sym_db.RegisterMessage(GetTransactionRequest)
GetTransactionResponse = _reflection.GeneratedProtocolMessageType('GetTransactionResponse', (_message.Message,), dict(
DESCRIPTOR = _GETTRANSACTIONRESPONSE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.GetTransactionResponse)
))
_sym_db.RegisterMessage(GetTransactionResponse)
BlockHeadersWithChainLocksRequest = _reflection.GeneratedProtocolMessageType('BlockHeadersWithChainLocksRequest', (_message.Message,), dict(
DESCRIPTOR = _BLOCKHEADERSWITHCHAINLOCKSREQUEST,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.BlockHeadersWithChainLocksRequest)
))
_sym_db.RegisterMessage(BlockHeadersWithChainLocksRequest)
BlockHeadersWithChainLocksResponse = _reflection.GeneratedProtocolMessageType('BlockHeadersWithChainLocksResponse', (_message.Message,), dict(
DESCRIPTOR = _BLOCKHEADERSWITHCHAINLOCKSRESPONSE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.BlockHeadersWithChainLocksResponse)
))
_sym_db.RegisterMessage(BlockHeadersWithChainLocksResponse)
BlockHeaders = _reflection.GeneratedProtocolMessageType('BlockHeaders', (_message.Message,), dict(
DESCRIPTOR = _BLOCKHEADERS,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.BlockHeaders)
))
_sym_db.RegisterMessage(BlockHeaders)
ChainLockSignatureMessages = _reflection.GeneratedProtocolMessageType('ChainLockSignatureMessages', (_message.Message,), dict(
DESCRIPTOR = _CHAINLOCKSIGNATUREMESSAGES,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.ChainLockSignatureMessages)
))
_sym_db.RegisterMessage(ChainLockSignatureMessages)
GetEstimatedTransactionFeeRequest = _reflection.GeneratedProtocolMessageType('GetEstimatedTransactionFeeRequest', (_message.Message,), dict(
DESCRIPTOR = _GETESTIMATEDTRANSACTIONFEEREQUEST,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.GetEstimatedTransactionFeeRequest)
))
_sym_db.RegisterMessage(GetEstimatedTransactionFeeRequest)
GetEstimatedTransactionFeeResponse = _reflection.GeneratedProtocolMessageType('GetEstimatedTransactionFeeResponse', (_message.Message,), dict(
DESCRIPTOR = _GETESTIMATEDTRANSACTIONFEERESPONSE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.GetEstimatedTransactionFeeResponse)
))
_sym_db.RegisterMessage(GetEstimatedTransactionFeeResponse)
_CORE = _descriptor.ServiceDescriptor(
name='Core',
full_name='org.dash.platform.dapi.v0.Core',
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=1142,
serialized_end=1919,
methods=[
_descriptor.MethodDescriptor(
name='getStatus',
full_name='org.dash.platform.dapi.v0.Core.getStatus',
index=0,
containing_service=None,
input_type=_GETSTATUSREQUEST,
output_type=_GETSTATUSRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='getBlock',
full_name='org.dash.platform.dapi.v0.Core.getBlock',
index=1,
containing_service=None,
input_type=_GETBLOCKREQUEST,
output_type=_GETBLOCKRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='sendTransaction',
full_name='org.dash.platform.dapi.v0.Core.sendTransaction',
index=2,
containing_service=None,
input_type=_SENDTRANSACTIONREQUEST,
output_type=_SENDTRANSACTIONRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='getTransaction',
full_name='org.dash.platform.dapi.v0.Core.getTransaction',
index=3,
containing_service=None,
input_type=_GETTRANSACTIONREQUEST,
output_type=_GETTRANSACTIONRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='getEstimatedTransactionFee',
full_name='org.dash.platform.dapi.v0.Core.getEstimatedTransactionFee',
index=4,
containing_service=None,
input_type=_GETESTIMATEDTRANSACTIONFEEREQUEST,
output_type=_GETESTIMATEDTRANSACTIONFEERESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='subscribeToBlockHeadersWithChainLocks',
full_name='org.dash.platform.dapi.v0.Core.subscribeToBlockHeadersWithChainLocks',
index=5,
containing_service=None,
input_type=_BLOCKHEADERSWITHCHAINLOCKSREQUEST,
output_type=_BLOCKHEADERSWITHCHAINLOCKSRESPONSE,
options=None,
),
])
_sym_db.RegisterServiceDescriptor(_CORE)
DESCRIPTOR.services_by_name['Core'] = _CORE
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
def add_CoreServicer_to_server(servicer, server):
rpc_method_handlers = {
'getStatus': grpc.unary_unary_rpc_method_handler(
servicer.getStatus,
request_deserializer=GetStatusRequest.FromString,
response_serializer=GetStatusResponse.SerializeToString,
),
'getBlock': grpc.unary_unary_rpc_method_handler(
servicer.getBlock,
request_deserializer=GetBlockRequest.FromString,
response_serializer=GetBlockResponse.SerializeToString,
),
'sendTransaction': grpc.unary_unary_rpc_method_handler(
servicer.sendTransaction,
request_deserializer=SendTransactionRequest.FromString,
response_serializer=SendTransactionResponse.SerializeToString,
),
'getTransaction': grpc.unary_unary_rpc_method_handler(
servicer.getTransaction,
request_deserializer=GetTransactionRequest.FromString,
response_serializer=GetTransactionResponse.SerializeToString,
),
'getEstimatedTransactionFee': grpc.unary_unary_rpc_method_handler(
servicer.getEstimatedTransactionFee,
request_deserializer=GetEstimatedTransactionFeeRequest.FromString,
response_serializer=GetEstimatedTransactionFeeResponse.SerializeToString,
),
'subscribeToBlockHeadersWithChainLocks': grpc.unary_stream_rpc_method_handler(
servicer.subscribeToBlockHeadersWithChainLocks,
request_deserializer=BlockHeadersWithChainLocksRequest.FromString,
response_serializer=BlockHeadersWithChainLocksResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'org.dash.platform.dapi.v0.Core', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
def beta_create_Core_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_deserializers = {
('org.dash.platform.dapi.v0.Core', 'getBlock'): GetBlockRequest.FromString,
('org.dash.platform.dapi.v0.Core', 'getEstimatedTransactionFee'): GetEstimatedTransactionFeeRequest.FromString,
('org.dash.platform.dapi.v0.Core', 'getStatus'): GetStatusRequest.FromString,
('org.dash.platform.dapi.v0.Core', 'getTransaction'): GetTransactionRequest.FromString,
('org.dash.platform.dapi.v0.Core', 'sendTransaction'): SendTransactionRequest.FromString,
('org.dash.platform.dapi.v0.Core', 'subscribeToBlockHeadersWithChainLocks'): BlockHeadersWithChainLocksRequest.FromString,
}
response_serializers = {
('org.dash.platform.dapi.v0.Core', 'getBlock'): GetBlockResponse.SerializeToString,
('org.dash.platform.dapi.v0.Core', 'getEstimatedTransactionFee'): GetEstimatedTransactionFeeResponse.SerializeToString,
('org.dash.platform.dapi.v0.Core', 'getStatus'): GetStatusResponse.SerializeToString,
('org.dash.platform.dapi.v0.Core', 'getTransaction'): GetTransactionResponse.SerializeToString,
('org.dash.platform.dapi.v0.Core', 'sendTransaction'): SendTransactionResponse.SerializeToString,
('org.dash.platform.dapi.v0.Core', 'subscribeToBlockHeadersWithChainLocks'): BlockHeadersWithChainLocksResponse.SerializeToString,
}
method_implementations = {
('org.dash.platform.dapi.v0.Core', 'getBlock'): face_utilities.unary_unary_inline(servicer.getBlock),
('org.dash.platform.dapi.v0.Core', 'getEstimatedTransactionFee'): face_utilities.unary_unary_inline(servicer.getEstimatedTransactionFee),
('org.dash.platform.dapi.v0.Core', 'getStatus'): face_utilities.unary_unary_inline(servicer.getStatus),
('org.dash.platform.dapi.v0.Core', 'getTransaction'): face_utilities.unary_unary_inline(servicer.getTransaction),
('org.dash.platform.dapi.v0.Core', 'sendTransaction'): face_utilities.unary_unary_inline(servicer.sendTransaction),
('org.dash.platform.dapi.v0.Core', 'subscribeToBlockHeadersWithChainLocks'): face_utilities.unary_stream_inline(servicer.subscribeToBlockHeadersWithChainLocks),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_Core_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_serializers = {
('org.dash.platform.dapi.v0.Core', 'getBlock'): GetBlockRequest.SerializeToString,
('org.dash.platform.dapi.v0.Core', 'getEstimatedTransactionFee'): GetEstimatedTransactionFeeRequest.SerializeToString,
('org.dash.platform.dapi.v0.Core', 'getStatus'): GetStatusRequest.SerializeToString,
('org.dash.platform.dapi.v0.Core', 'getTransaction'): GetTransactionRequest.SerializeToString,
('org.dash.platform.dapi.v0.Core', 'sendTransaction'): SendTransactionRequest.SerializeToString,
('org.dash.platform.dapi.v0.Core', 'subscribeToBlockHeadersWithChainLocks'): BlockHeadersWithChainLocksRequest.SerializeToString,
}
response_deserializers = {
('org.dash.platform.dapi.v0.Core', 'getBlock'): GetBlockResponse.FromString,
('org.dash.platform.dapi.v0.Core', 'getEstimatedTransactionFee'): GetEstimatedTransactionFeeResponse.FromString,
('org.dash.platform.dapi.v0.Core', 'getStatus'): GetStatusResponse.FromString,
('org.dash.platform.dapi.v0.Core', 'getTransaction'): GetTransactionResponse.FromString,
('org.dash.platform.dapi.v0.Core', 'sendTransaction'): SendTransactionResponse.FromString,
('org.dash.platform.dapi.v0.Core', 'subscribeToBlockHeadersWithChainLocks'): BlockHeadersWithChainLocksResponse.FromString,
}
cardinalities = {
'getBlock': cardinality.Cardinality.UNARY_UNARY,
'getEstimatedTransactionFee': cardinality.Cardinality.UNARY_UNARY,
'getStatus': cardinality.Cardinality.UNARY_UNARY,
'getTransaction': cardinality.Cardinality.UNARY_UNARY,
'sendTransaction': cardinality.Cardinality.UNARY_UNARY,
'subscribeToBlockHeadersWithChainLocks': cardinality.Cardinality.UNARY_STREAM,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'org.dash.platform.dapi.v0.Core', cardinalities, options=stub_options)
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
| 42.885633 | 2,847 | 0.760518 |
02fe97635bdf12eb93fa73109a7854ea036f69bf | 546 | py | Python | python_high/chapter_3/3.1.py | Rolling-meatballs/deepshare | 47c1e599c915ccd0a123fa9ab26e1f20738252ef | [
"MIT"
] | null | null | null | python_high/chapter_3/3.1.py | Rolling-meatballs/deepshare | 47c1e599c915ccd0a123fa9ab26e1f20738252ef | [
"MIT"
] | null | null | null | python_high/chapter_3/3.1.py | Rolling-meatballs/deepshare | 47c1e599c915ccd0a123fa9ab26e1f20738252ef | [
"MIT"
] | null | null | null | name = " alberT"
one = name.rsplit()
print("one:", one)
two = name.index('al', 0)
print("two:", two)
three = name.index('T', -1)
print("three:", three)
four = name.replace('l', 'p')
print("four:", four)
five = name.split('l')
print("five:", five)
six = name.upper()
print("six:", six)
seven = name.lower()
print("seven:", seven)
eight = name[1]
print("eight:", eight )
nine = name[:3]
print("nine:", nine)
ten = name[-2:]
print("ten:", ten)
eleven = name.index("e")
print("eleven:", eleven)
twelve = name[:-1]
print("twelve:", twelve) | 14.756757 | 29 | 0.598901 |
02feb42fde4ca975bc72c9c78d9e0931c5f1d4a2 | 384 | py | Python | src/views/simplepage/models.py | svenvandescheur/svenv.nl-new | c448714853d96ad31d26c825d8b35c4890be40a1 | [
"MIT"
] | null | null | null | src/views/simplepage/models.py | svenvandescheur/svenv.nl-new | c448714853d96ad31d26c825d8b35c4890be40a1 | [
"MIT"
] | null | null | null | src/views/simplepage/models.py | svenvandescheur/svenv.nl-new | c448714853d96ad31d26c825d8b35c4890be40a1 | [
"MIT"
] | null | null | null | from cms.extensions import PageExtension
from cms.extensions.extension_pool import extension_pool
from django.utils.translation import ugettext as _
from filer.fields.image import FilerImageField
extension_pool.register(SimplePageExtension)
| 25.6 | 56 | 0.796875 |
f301917c422d9318495feced737c153caa8bd9a9 | 290 | py | Python | baekjoon/not-classified/10844/10844.py | honux77/algorithm | 2ed8cef1fbee7ad96d8f2ae583666d52bd8892ee | [
"MIT"
] | 2 | 2019-02-08T01:23:07.000Z | 2020-11-19T12:23:52.000Z | baekjoon/not-classified/10844/10844.py | honux77/algorithm | 2ed8cef1fbee7ad96d8f2ae583666d52bd8892ee | [
"MIT"
] | null | null | null | baekjoon/not-classified/10844/10844.py | honux77/algorithm | 2ed8cef1fbee7ad96d8f2ae583666d52bd8892ee | [
"MIT"
] | null | null | null | n = int(input())
s = [[0] * 10 for _ in range(n + 1)]
s[1] = [0] + [1] * 9
mod = 1000 ** 3
for i in range(2, n + 1):
for j in range(0, 9 + 1):
if j >= 1:
s[i][j] += s[i - 1][j - 1]
if j <= 8:
s[i][j] += s[i - 1][j + 1]
print(sum(s[n]) % mod) | 19.333333 | 38 | 0.358621 |
f302cba30df57e2c4fa0a9201628774e666043a8 | 3,021 | py | Python | Ideas/cricket-umpire-assistance-master/visualization/test2.py | hsspratt/Nott-Hawkeye1 | 178f4f0fef62e8699f6057d9d50adfd61a851047 | [
"MIT"
] | null | null | null | Ideas/cricket-umpire-assistance-master/visualization/test2.py | hsspratt/Nott-Hawkeye1 | 178f4f0fef62e8699f6057d9d50adfd61a851047 | [
"MIT"
] | 1 | 2021-11-11T22:15:36.000Z | 2021-11-11T22:15:36.000Z | Ideas/cricket-umpire-assistance-master/visualization/test2.py | hsspratt/Nott-Hawkeye1 | 178f4f0fef62e8699f6057d9d50adfd61a851047 | [
"MIT"
] | null | null | null | ### INITIALIZE VPYTHON
# -----------------------------------------------------------------------
from __future__ import division
from visual import *
from physutil import *
from visual.graph import *
### SETUP ELEMENTS FOR GRAPHING, SIMULATION, VISUALIZATION, TIMING
# ------------------------------------------------------------------------
# Set window title
scene.title = "Projectile Motion Particle Model"
# Make scene background black
scene.background = color.black
# Define scene objects (units are in meters)
field = box(pos = vector(0, 0, 0), size = (300, 10, 100), color = color.green, opacity = 0.3)
ball = sphere(radius = 5, color = color.blue)
# Define axis marks the field with a specified number of tick marks
xaxis = PhysAxis(field, 10) # 10 tick marks
yaxis = PhysAxis(field, 5, # 5 tick marks
axisType = "y",
labelOrientation = "left",
startPos = vector(-150, 0, 0), # start the y axis at the left edge of the scene
length = 100) # units are in meters
# Set up graph with two plots
posgraph = PhysGraph(2)
# Set up trail to mark the ball's trajectory
trail = curve(color = color.yellow, radius = 1) # units are in meters
# Set up motion map for ball
motionMap = MotionMap(ball, 8.163, # expected end time in seconds
10, # number of markers to draw
labelMarkerOffset = vector(0, -20, 0),
dropTime = False)
# Set timer in top right of screen
timerDisplay = PhysTimer(140, 150) # timer position (units are in meters)
### SETUP PARAMETERS AND INITIAL CONDITIONS
# ----------------------------------------------------------------------------------------
# Define parameters
ball.m = 0.6 # mass of ball in kg
ball.pos = vector(-150, 0, 0) # initial position of the ball in(x, y, z) form, units are in meters
ball.v = vector(30, 40, 0) # initial velocity of car in (vx, vy, vz) form, units are m/s
g = vector(0, -9.8, 0) # acceleration due to gravity; units are m/s/s
# Define time parameters
t = 0 # starting time
deltat = 0.001 # time step units are s
### CALCULATION LOOP; perform physics updates and drawing
# ------------------------------------------------------------------------------------
while ball.pos.y >= 0 : #while the ball's y-position is greater than 0 (above the ground)
# Required to make animation visible / refresh smoothly (keeps program from running faster
# than 1000 frames/s)
rate(1000)
# Compute Net Force
Fnet = ball.m * g
# Newton's 2nd Law
ball.v = ball.v + (Fnet/ball.m * deltat)
# Position update
ball.pos = ball.pos + ball.v * deltat
# Update motion map, graph, timer, and trail
motionMap.update(t, ball.v)
posgraph.plot(t, ball.pos.x, ball.pos.y) # plot x and y position vs. time
trail.append(pos = ball.pos)
timerDisplay.update(t)
# Time update
t = t + deltat
### OUTPUT
# --------------------------------------------------------------------------------------
# Print the final time and the ball's final position
print t
print ball.pos | 32.138298 | 98 | 0.589209 |
f3041c623ca233066149adf01d25baef21dbb909 | 727 | py | Python | parking_systems/models.py | InaraShalfei/parking_system | f1b326f12037808ab80e3b1d6b305235ba59a0db | [
"MIT"
] | null | null | null | parking_systems/models.py | InaraShalfei/parking_system | f1b326f12037808ab80e3b1d6b305235ba59a0db | [
"MIT"
] | null | null | null | parking_systems/models.py | InaraShalfei/parking_system | f1b326f12037808ab80e3b1d6b305235ba59a0db | [
"MIT"
] | null | null | null | from django.db import models
| 33.045455 | 119 | 0.671252 |
f30518d94f19b9e7816aaf41734cf24e7b19c736 | 4,875 | py | Python | sktime/classification/kernel_based/_rocket_classifier.py | ltoniazzi/sktime | 0ea07803115c1ec7463dde99f049b131d639f4a7 | [
"BSD-3-Clause"
] | 1 | 2021-11-02T18:56:12.000Z | 2021-11-02T18:56:12.000Z | sktime/classification/kernel_based/_rocket_classifier.py | ltoniazzi/sktime | 0ea07803115c1ec7463dde99f049b131d639f4a7 | [
"BSD-3-Clause"
] | null | null | null | sktime/classification/kernel_based/_rocket_classifier.py | ltoniazzi/sktime | 0ea07803115c1ec7463dde99f049b131d639f4a7 | [
"BSD-3-Clause"
] | 1 | 2021-04-30T08:12:18.000Z | 2021-04-30T08:12:18.000Z | # -*- coding: utf-8 -*-
"""RandOm Convolutional KErnel Transform (ROCKET)."""
__author__ = "Matthew Middlehurst"
__all__ = ["ROCKETClassifier"]
import numpy as np
from sklearn.linear_model import RidgeClassifierCV
from sklearn.pipeline import make_pipeline
from sklearn.utils.multiclass import class_distribution
from sktime.classification.base import BaseClassifier
from sktime.transformations.panel.rocket import Rocket
from sktime.utils.validation.panel import check_X
from sktime.utils.validation.panel import check_X_y
| 30.85443 | 84 | 0.611487 |
f3052e2208b42e9e168f9e6bcc11e27d4f1b41d3 | 9,922 | py | Python | mc/opcodes.py | iximeow/binja-m16c | debf368e5df90a96d6c8b0bc128626a9d6834bb4 | [
"0BSD"
] | 12 | 2020-01-15T00:51:06.000Z | 2021-10-02T12:45:50.000Z | mc/opcodes.py | iximeow/binja-m16c | debf368e5df90a96d6c8b0bc128626a9d6834bb4 | [
"0BSD"
] | 2 | 2020-02-03T08:26:26.000Z | 2020-07-01T19:51:44.000Z | mc/opcodes.py | iximeow/binja-m16c | debf368e5df90a96d6c8b0bc128626a9d6834bb4 | [
"0BSD"
] | 4 | 2020-02-03T07:51:12.000Z | 2021-02-14T19:13:07.000Z | import re
from . import tables
from .instr import Instruction
from .instr.nop import *
from .instr.alu import *
from .instr.bcd import *
from .instr.bit import *
from .instr.flag import *
from .instr.mov import *
from .instr.smov import *
from .instr.ld_st import *
from .instr.stack import *
from .instr.jmp import *
from .instr.call import *
from .instr.ctx import *
from .instr.trap import *
enumerations = {
'R': tables.rx_ax,
'I': tables.dsp8_dsp16_abs16,
'6': tables.dsp8_abs16,
'7': tables.r0x_r0y_dsp8_abs16,
'8': tables.r0x_dsp8_abs16,
'A': tables.reg16_dsp8_dsp16_dsp20_abs16,
'E': tables.reg8l_dsp8_dsp16_abs16,
'N': tables.reg8_dsp8_dsp16_abs16,
'C': tables.creg,
'J': tables.cnd_j3,
'K': tables.cnd_j4,
'M': tables.cnd_bm4,
}
encodings = {
'0111_011z_1111_dddd': AbsReg,
'0111_011z_0110_dddd': AdcImm,
'1011_000z_ssss_dddd': AdcReg,
'0111_011z_1110_dddd': Adcf,
'0111_011z_0100_dddd': AddImm,
'1100_100z_iiii_dddd': AddImm4,
'1000_0DDD;8': AddImm8,
'1010_000z_ssss_dddd': AddReg,
'0010_0DSS;7': AddReg8,
'0111_110z_1110_1011': AddImmSP,
'0111_1101_1011_iiii': AddImm4SP,
'1111_100z_iiii_dddd': Adjnz,
'0111_011z_0010_dddd': AndImm,
'1001_0DDD;8': AndImm8,
'1001_000z_ssss_dddd': AndReg,
'0001_0DSS;7': AndReg8,
'0111_1110_0100_ssss': Band,
'0111_1110_1000_dddd': Bclr,
'0100_0bbb': BclrSB,
'0111_1110_0010_dddd': Bmcnd,
'0111_1101_1101_CCCC;M': BmcndC,
'0111_1110_0101_ssss': Bnand,
'0111_1110_0111_ssss': Bnor,
'0111_1110_1010_dddd': Bnot,
'0101_0bbb': BnotSB,
'0111_1110_0011_ssss': Bntst,
'0111_1110_1101_ssss': Bnxor,
'0111_1110_0110_ssss': Bor,
'0111_1110_1001_dddd': Bset,
'0100_1bbb': BsetSB,
'0111_1110_1011_ssss': Btst,
'0101_1bbb': BtstSB,
'0111_1110_0000_dddd': Btstc,
'0111_1110_0001_dddd': Btsts,
'0111_1110_1100_ssss': Bxor,
'0000_0000': Brk,
'0111_011z_1000_dddd': CmpImm,
'1101_000z_iiii_dddd': CmpImm4,
'1110_0DDD;8': CmpImm8,
'1100_000z_ssss_dddd': CmpReg,
'0011_1DSS;7': CmpReg8,
'0111_1100_1110_1110': DadcImm8,
'0111_1101_1110_1110': DadcImm16,
'0111_1100_1110_0110': DadcReg8,
'0111_1101_1110_0110': DadcReg16,
'0111_1100_1110_1100': DaddImm8,
'0111_1101_1110_1100': DaddImm16,
'0111_1100_1110_0100': DaddReg8,
'0111_1101_1110_0100': DaddReg16,
'1010_1DDD;8': Dec,
'1111_d010': DecAdr,
'0111_110z_1110_0001': DivImm,
'0111_011z_1101_ssss': DivReg,
'0111_110z_1110_0000': DivuImm,
'0111_011z_1100_ssss': DivuReg,
'0111_110z_1110_0011': DivxImm,
'0111_011z_1001_ssss': DivxReg,
'0111_1100_1110_1111': DsbbImm8,
'0111_1101_1110_1111': DsbbImm16,
'0111_1100_1110_0111': DsbbReg8,
'0111_1101_1110_0111': DsbbReg16,
'0111_1100_1110_1101': DsubImm8,
'0111_1101_1110_1101': DsubImm16,
'0111_1100_1110_0101': DsubReg8,
'0111_1101_1110_0101': DsubReg16,
'0111_1100_1111_0010': Enter,
'0111_1101_1111_0010': Exitd,
'0111_1100_0110_DDDD;E': Exts,
'0111_1100_1111_0011': ExtsR0,
'1110_1011_0fff_0101': Fclr,
'1110_1011_0fff_0100': Fset,
'1010_0DDD;8': Inc,
'1011_d010': IncAdr,
'1110_1011_11ii_iiii': Int,
'1111_0110': Into,
'0110_1CCC;J': Jcnd1,
'0111_1101_1100_CCCC;K': Jcnd2,
'0110_0iii': Jmp3,
'1111_1110': Jmp8,
'1111_0100': Jmp16,
'1111_1100': JmpAbs,
'0111_1101_0010_ssss': Jmpi,
'0111_1101_0000_SSSS;A': JmpiAbs,
'1110_1110': Jmps,
'1111_0101': Jsr16,
'1111_1101': JsrAbs,
'0111_1101_0011_ssss': Jsri,
'0111_1101_0001_SSSS;A': JsriAbs,
'1110_1111': Jsrs,
'1110_1011_0DDD;C_0000': LdcImm,
'0111_1010_1DDD;C_ssss': LdcReg,
'0111_1100_1111_0000': Ldctx,
'0111_010z_1000_dddd': Lde,
'0111_010z_1001_dddd': LdeA0,
'0111_010z_1010_dddd': LdeA1A0,
'0111_1101_1010_0iii': Ldipl,
'0111_010z_1100_dddd': MovImmReg,
'1101_100z_iiii_dddd': MovImm4Reg,
'1100_0DDD;8': MovImm8Reg,
'1110_d010': MovImm8Adr,
'1010_d010': MovImm16Adr,
'1011_0DDD;8': MovZero8Reg,
'0111_001z_ssss_dddd': MovRegReg,
'0011_0dss': MovRegAdr,
'0000_0sDD;6': MovReg8Reg,
'0000_1DSS;7': MovRegReg8,
'0111_010z_1011_dddd': MovIndSPReg,
'0111_010z_0011_ssss': MovRegIndSP,
'1110_1011_0DDD;R_SSSS;I': Mova,
'0111_1100_10rr_DDDD;N': MovdirR0LReg,
'0111_1100_00rr_SSSS;N': MovdirRegR0L,
'0111_110z_0101_dddd': MulImm,
'0111_100z_ssss_dddd': MulReg,
'0111_110z_0100_dddd': MuluImm,
'0111_000z_ssss_dddd': MuluReg,
'0111_010z_0101_dddd': NegReg,
'0000_0100': Nop,
'0111_010z_0111_dddd': NotReg,
'1011_1DDD;8': NotReg8,
'0111_011z_0011_dddd': OrImm,
'1001_1DDD;8': OrImm8,
'1001_100z_ssss_dddd': OrReg,
'0001_1DSS;7': OrReg8,
'0111_010z_1101_dddd': Pop,
'1001_d010': PopReg8,
'1101_d010': PopAdr,
'1110_1011_0DDD;C_0011': Popc,
'1110_1101': Popm,
'0111_110z_1110_0010': PushImm,
'0111_010z_0100_ssss': Push,
'1000_s010': PushReg8,
'1100_s010': PushAdr,
'0111_1101_1001_SSSS;I': Pusha,
'1110_1011_0SSS;C_0010': Pushc,
'1110_1100': Pushm,
'1111_1011': Reit,
'0111_110z_1111_0001': Rmpa,
'1110_000z_iiii_dddd': RotImm4,
'0111_010z_0110_dddd': RotR1H,
'0111_011z_1010_dddd': Rolc,
'0111_011z_1011_dddd': Rorc,
'1111_0011': Rts,
'0111_011z_0111_dddd': SbbImm,
'1011_100z_ssss_dddd': SbbReg,
'1111_000z_iiii_dddd': ShaImm4,
'0111_010z_1111_dddd': ShaR1H,
'1110_1011_101d_iiii': Sha32Imm4,
'1110_1011_001d_0001': Sha32R1H,
'1110_100z_iiii_dddd': ShlImm4,
'0111_010z_1110_dddd': ShlR1H,
'1110_1011_100d_iiii': Shl32Imm4,
'1110_1011_000d_0001': Shl32R1H,
'0111_110z_1110_1001': Smovb,
'0111_110z_1110_1000': Smovf,
'0111_110z_1110_1010': Sstr,
'0111_1011_1SSS;C_dddd': StcReg,
'0111_1100_1100_DDDD;A': StcPc,
'0111_1101_1111_0000': Stctx,
'0111_010z_0000_ssss': Ste,
'0111_010z_0001_ssss': SteA0,
'0111_010z_0010_ssss': SteA1A0,
'1101_0DDD;8': Stnz,
'1100_1DDD;8': Stz,
'1101_1DDD;8': Stzx,
'0111_011z_0101_dddd': SubImm,
'1000_1DDD;8': SubImm8,
'1010_100z_ssss_dddd': SubReg,
'0010_1DSS;7': SubReg8,
'0111_011z_0000_dddd': TstImm,
'1000_000z_ssss_dddd': TstReg,
'1111_1111': Und,
'0111_1101_1111_0011': Wait,
'0111_101z_00ss_dddd': Xchg,
'0111_011z_0001_dddd': XorImm,
'1000_100z_ssss_dddd': XorReg,
}
generate_tables()
# print_assigned()
# print_unassigned()
| 28.429799 | 99 | 0.621951 |
f30593af5391112f0f58041cdf450a938ae282be | 797 | py | Python | class16.py | SamratAdhikari/Python_class_files | 47053e39b81c0d8f7485790fea8711aa25727caf | [
"MIT"
] | null | null | null | class16.py | SamratAdhikari/Python_class_files | 47053e39b81c0d8f7485790fea8711aa25727caf | [
"MIT"
] | null | null | null | class16.py | SamratAdhikari/Python_class_files | 47053e39b81c0d8f7485790fea8711aa25727caf | [
"MIT"
] | null | null | null | # import calculate
# import calculate as cal
# from calculate import diff as df
# from calculate import *
# print(cal.pi)
# pi = 3.1415
# print(diff(5,2))
# print(pi)
# print(calculate.pi)
# print(calculate.sum(3))
# print(calculate.div(2,1))
# print(abs(-23.21))
# print(math.ceil(5.23))
# print(dir(math))
# # print(dir(calculate))
# print(calculate.area_peri.__doc__)
import random as rd
# content = dir(rd)
# print(content)
txt = str(input("Enter a string: "))
jumble(txt)
| 20.435897 | 43 | 0.604768 |
f30618f542da8cbd2c4223847a99725100131374 | 901 | py | Python | hsir/law.py | WenjieZ/wuhan-pneumonia | 3d26955daa2deedec57cdd3effb3118531bbea7f | [
"BSD-3-Clause"
] | 6 | 2020-01-26T07:33:41.000Z | 2020-02-25T22:15:43.000Z | hsir/law.py | WenjieZ/wuhan-pneumonia | 3d26955daa2deedec57cdd3effb3118531bbea7f | [
"BSD-3-Clause"
] | 2 | 2020-02-17T16:12:50.000Z | 2020-02-29T21:31:17.000Z | hsir/law.py | WenjieZ/wuhan-pneumonia | 3d26955daa2deedec57cdd3effb3118531bbea7f | [
"BSD-3-Clause"
] | 1 | 2020-03-07T00:13:05.000Z | 2020-03-07T00:13:05.000Z | from abc import ABCMeta, abstractmethod
import numpy as np
__all__ = ['Law', 'Bin', 'Poi', 'Gau']
| 20.022222 | 89 | 0.54828 |
f30640fd7966c16ad8a70aa7a32537803f35f977 | 3,172 | py | Python | src/dummy/toga_dummy/widgets/canvas.py | Donyme/toga | 2647c7dc5db248025847e3a60b115ff51d4a0d4a | [
"BSD-3-Clause"
] | null | null | null | src/dummy/toga_dummy/widgets/canvas.py | Donyme/toga | 2647c7dc5db248025847e3a60b115ff51d4a0d4a | [
"BSD-3-Clause"
] | null | null | null | src/dummy/toga_dummy/widgets/canvas.py | Donyme/toga | 2647c7dc5db248025847e3a60b115ff51d4a0d4a | [
"BSD-3-Clause"
] | null | null | null | import re
from .base import Widget
| 31.72 | 165 | 0.573455 |
f3075ca7074510343a47f280f9ff997c85f925fa | 3,815 | py | Python | tests/unit/schemas/test_base_schema_class.py | gamechanger/dusty | dd9778e3a4f0c623209e53e98aa9dc1fe76fc309 | [
"MIT"
] | 421 | 2015-06-02T16:29:59.000Z | 2021-06-03T18:44:42.000Z | tests/unit/schemas/test_base_schema_class.py | gamechanger/dusty | dd9778e3a4f0c623209e53e98aa9dc1fe76fc309 | [
"MIT"
] | 404 | 2015-06-02T20:23:42.000Z | 2019-08-21T16:59:41.000Z | tests/unit/schemas/test_base_schema_class.py | gamechanger/dusty | dd9778e3a4f0c623209e53e98aa9dc1fe76fc309 | [
"MIT"
] | 16 | 2015-06-16T17:21:02.000Z | 2020-03-27T02:27:09.000Z | from unittest import TestCase
from schemer import Schema, Array, ValidationException
from dusty.schemas.base_schema_class import DustySchema, DustySpecs
from ...testcases import DustyTestCase
| 42.865169 | 96 | 0.636173 |
f309247f76f7d18c28aea4b2f1973377cd29af7f | 5,470 | py | Python | Objected-Oriented Systems/Python_OOP_SDA/Task1.py | syedwaleedhyder/Freelance_Projects | 7e2b85fc968850fc018014667b5ce9af0f00cb09 | [
"MIT"
] | 1 | 2020-08-13T17:26:13.000Z | 2020-08-13T17:26:13.000Z | Objected-Oriented Systems/Python_OOP_SDA/Task1.py | syedwaleedhyder/Freelance_Projects | 7e2b85fc968850fc018014667b5ce9af0f00cb09 | [
"MIT"
] | null | null | null | Objected-Oriented Systems/Python_OOP_SDA/Task1.py | syedwaleedhyder/Freelance_Projects | 7e2b85fc968850fc018014667b5ce9af0f00cb09 | [
"MIT"
] | null | null | null | from abc import ABCMeta, abstractmethod, abstractproperty
from datetime import datetime, date
def to_string(self):
if(self.on_offer == "Yes"):
offer = "**Offer"
else:
offer = "(No Offer)"
string = self.item_code + " " + self.item_name + " Availalbe= " + str(self.quantity_on_hand) + " " + offer
return string
class Perishable(Item):
class NonPerishable(Item):
class Grocer:
def __init__(self):
self.items_list = []
perishable = Perishable("P101", "Real Raisins", 10, 2, "Yes", date(2018,12, 10))
non_perishable = NonPerishable("NP210", "Tan Baking Paper", 25, 2, "No")
perishable2 = Perishable("P105", "Eggy Soup Tofu", 14, 1.85, "Yes", date(2018,11, 26))
grocer = Grocer()
grocer.add_to_list(perishable)
grocer.add_to_list(non_perishable)
grocer.add_to_list(perishable2)
grocer.print_items()
grocer.update_quantity_on_hand("P105", 10)
print()
grocer.print_items()
####################################################################
#DISCUSSION
"""
Single Responsibility Principle:
1) IN Perishable clas.
2) In NonPersishable class.
Open Closed Principle
1) Abstract class Item is open to be extended
2) Abstract class Item is closed for modification
Interface Segregation Principle
1) For using Perishable items, user don't have to know anything about Non-perishable items.
2) For using Non-perishable items, users don't have to know tha details of Perishable items.
Hence users are not forced to use methods they don't require.
"""
#################################################################### | 31.988304 | 233 | 0.609506 |
f30949586393ae32e93e9cb38a2df996aa7486fd | 1,116 | py | Python | compose/production/mongodb_backup/scripts/list_dbs.py | IMTEK-Simulation/mongodb-backup-container-image | b0e04c03cab9321d6b4277ee88412938fec95726 | [
"MIT"
] | null | null | null | compose/production/mongodb_backup/scripts/list_dbs.py | IMTEK-Simulation/mongodb-backup-container-image | b0e04c03cab9321d6b4277ee88412938fec95726 | [
"MIT"
] | null | null | null | compose/production/mongodb_backup/scripts/list_dbs.py | IMTEK-Simulation/mongodb-backup-container-image | b0e04c03cab9321d6b4277ee88412938fec95726 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
host = 'mongodb'
port = 27017
ssl_ca_cert='/run/secrets/rootCA.pem'
ssl_certfile='/run/secrets/tls_cert.pem'
ssl_keyfile='/run/secrets/tls_key.pem'
# don't turn these signal into exceptions, just die.
# necessary for integrating into bash script pipelines seamlessly.
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
# get administrator credentials
with open('/run/secrets/username','r') as f:
username = f.read()
with open('/run/secrets/password','r') as f:
password = f.read()
from pymongo import MongoClient
client = MongoClient(host, port,
ssl=True,
username=username,
password=password,
authSource=username, # assume admin database and admin user share name
ssl_ca_certs=ssl_ca_cert,
ssl_certfile=ssl_certfile,
ssl_keyfile=ssl_keyfile,
tlsAllowInvalidHostnames=True)
# Within the container environment, mongod runs on host 'mongodb'.
# That hostname, however, is not mentioned within the host certificate.
dbs = client.list_database_names()
for db in dbs:
print(db)
client.close()
| 27.9 | 74 | 0.750896 |
f309f375f4df1f396c2fac2fda0007631441102b | 1,087 | py | Python | host.py | KeePinnnn/social_media_analytic | d13580c7dcfc87699bf42c0f870fefccc2f4c78b | [
"MIT"
] | 1 | 2019-09-13T13:08:28.000Z | 2019-09-13T13:08:28.000Z | host.py | KeePinnnn/social_media_analytic | d13580c7dcfc87699bf42c0f870fefccc2f4c78b | [
"MIT"
] | null | null | null | host.py | KeePinnnn/social_media_analytic | d13580c7dcfc87699bf42c0f870fefccc2f4c78b | [
"MIT"
] | null | null | null | from flask import Flask, send_from_directory, request, Response, render_template, jsonify
from test import demo
import subprocess
import os
app = Flask(__name__, static_folder='static')
if __name__ == "__main__":
app.run(debug=True) | 26.512195 | 89 | 0.620055 |
f30ad04d785ff96d12b9344dbb04adb8373f99e0 | 5,985 | py | Python | venv/lib/python3.6/site-packages/torch/_jit_internal.py | databill86/HyperFoods | 9267937c8c70fd84017c0f153c241d2686a356dd | [
"MIT"
] | 2 | 2020-09-30T00:11:09.000Z | 2021-10-04T13:00:38.000Z | venv/lib/python3.6/site-packages/torch/_jit_internal.py | databill86/HyperFoods | 9267937c8c70fd84017c0f153c241d2686a356dd | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/torch/_jit_internal.py | databill86/HyperFoods | 9267937c8c70fd84017c0f153c241d2686a356dd | [
"MIT"
] | null | null | null | """
The weak_script annotation needs to be here instead of inside torch/jit/ so it
can be used in other places in torch/ (namely torch.nn) without running into
circular dependency problems
"""
import weakref
import inspect
try:
import builtins # PY3
except Exception:
import __builtin__ as builtins # PY2
# Tracks standalone weak script functions
_compiled_weak_fns = weakref.WeakKeyDictionary()
# Tracks which methods should be converted to strong methods
_weak_script_methods = weakref.WeakKeyDictionary()
# Converted modules and their corresponding WeakScriptModuleProxy objects
_weak_modules = weakref.WeakKeyDictionary()
# Types that have been declared as weak modules
_weak_types = weakref.WeakKeyDictionary()
# Wrapper functions that can call either of 2 functions depending on a boolean
# argument
_boolean_dispatched = weakref.WeakKeyDictionary()
COMPILATION_PENDING = object()
COMPILED = object()
def createResolutionCallback(frames_up=0):
"""
Creates a function which, given a string variable name,
returns the value of the variable in the scope of the caller of
the function which called createResolutionCallback (by default).
This is used to enable access in-scope Python variables inside
TorchScript fragments.
frames_up is number of additional frames to go up on the stack.
The default value is 0, which correspond to the frame of the caller
of createResolutionCallback. Also for example, if frames_up is set
to 1, then the frame of the caller's caller of createResolutionCallback
will be taken.
For example, the following program prints 2::
def bar():
cb = createResolutionCallback(1)
print(cb("foo"))
def baz():
foo = 2
bar()
baz()
"""
frame = inspect.currentframe()
i = 0
while i < frames_up + 1:
frame = frame.f_back
i += 1
f_locals = frame.f_locals
f_globals = frame.f_globals
return env
def weak_script(fn, _frames_up=0):
"""
Marks a function as a weak script function. When used in a script function
or ScriptModule, the weak script function will be lazily compiled and
inlined in the graph. When not used in a script function, the weak script
annotation has no effect.
"""
_compiled_weak_fns[fn] = {
"status": COMPILATION_PENDING,
"compiled_fn": None,
"rcb": createResolutionCallback(_frames_up + 1)
}
return fn
def boolean_dispatch(arg_name, arg_index, default, if_true, if_false):
"""
Dispatches to either of 2 weak script functions based on a boolean argument.
In Torch Script, the boolean argument must be constant so that the correct
function to use can be determined at compile time.
"""
if _compiled_weak_fns.get(if_true) is None or _compiled_weak_fns.get(if_false) is None:
raise RuntimeError("both functions must be weak script")
if if_true.__doc__ is None and if_false.__doc__ is not None:
doc = if_false.__doc__
if_true.__doc__ = doc
elif if_false.__doc__ is None and if_true.__doc__ is not None:
doc = if_true.__doc__
if_false.__doc__ = doc
else:
raise RuntimeError("only one function can have a docstring")
fn.__doc__ = doc
_boolean_dispatched[fn] = {
"if_true": if_true,
"if_false": if_false,
"index": arg_index,
"default": default,
"arg_name": arg_name
}
return fn
try:
import typing
from typing import Tuple, List
except ImportError:
# A minimal polyfill for versions of Python that don't have typing.
# Note that this means that they also don't support the fancy annotation syntax, so
# those instances will only be used in our tiny `type: ` comment interpreter.
# The __getitem__ in typing is implemented using metaclasses, but I'm too lazy for that.
Tuple = TupleCls()
List = ListCls()
# allows BroadcastingList instance to be subscriptable
# mypy doesn't support parameters on types, so we have to explicitly type each
# list size
BroadcastingList1 = BroadcastingListCls()
for i in range(2, 7):
globals()["BroadcastingList{}".format(i)] = BroadcastingList1
| 29.628713 | 92 | 0.671846 |
f30afc0871d71087c3fea4199baf57d7f3c9c853 | 706 | py | Python | examples/qiushi.py | qDonl/Spider | ec7e7519b173b004314fc41cf1a65c2a662eb8d5 | [
"Unlicense"
] | null | null | null | examples/qiushi.py | qDonl/Spider | ec7e7519b173b004314fc41cf1a65c2a662eb8d5 | [
"Unlicense"
] | null | null | null | examples/qiushi.py | qDonl/Spider | ec7e7519b173b004314fc41cf1a65c2a662eb8d5 | [
"Unlicense"
] | null | null | null | import re, requests
if __name__ == '__main__':
main()
| 30.695652 | 138 | 0.589235 |
f30c9db8e27b84a58028614e5f7dd98149676ac3 | 4,267 | py | Python | benchmark/python/benchmark/benchmark_main.py | toschmidt/pg-cv | 897909fdb2a7824137f2128c6bd98151f6ed3cf4 | [
"MIT"
] | 3 | 2021-03-19T04:52:26.000Z | 2021-09-13T14:11:44.000Z | benchmark/python/benchmark/benchmark_main.py | toschmidt/pg-cv | 897909fdb2a7824137f2128c6bd98151f6ed3cf4 | [
"MIT"
] | null | null | null | benchmark/python/benchmark/benchmark_main.py | toschmidt/pg-cv | 897909fdb2a7824137f2128c6bd98151f6ed3cf4 | [
"MIT"
] | null | null | null | from benchmark_query import BenchmarkQuery
from clear import ClearViews, ClearQuery, ClearPublic
from compare_query import CompareQuery
from database import Database
from setup import SetupPublic, SetupViews, SetupQuery
from timing import Timing
# remove all possible side effects of a query
# setup query and corresponding auxiliary tables needed for the maintenance approach
# benchmark a query and clear the result after that
# check if the result of all maintenance approaches is identical
| 33.865079 | 85 | 0.644715 |
f30dee16b7aab145441edae420bc159552e96a76 | 3,787 | py | Python | nelpy/plotting/decoding.py | shayokdutta/nelpy_modified | 8f3bd505beed570bfe917ed0a7f1d8c13f31b69a | [
"MIT"
] | null | null | null | nelpy/plotting/decoding.py | shayokdutta/nelpy_modified | 8f3bd505beed570bfe917ed0a7f1d8c13f31b69a | [
"MIT"
] | null | null | null | nelpy/plotting/decoding.py | shayokdutta/nelpy_modified | 8f3bd505beed570bfe917ed0a7f1d8c13f31b69a | [
"MIT"
] | null | null | null | __all__ = ['plot_cum_error_dist']
import numpy as np
# import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from mpl_toolkits.axes_grid.inset_locator import inset_axes
import itertools
from . import palettes
# colors = itertools.cycle(npl.palettes.color_palette(palette="sweet", n_colors=15))
# from ..core import *
# from ..auxiliary import *
from .. import decoding
# from . import utils # import plotting/utils
def plot_cum_error_dist(*, cumhist=None, bincenters=None,
bst=None, extern=None, decodefunc=None,
k=None, transfunc=None, n_extern=None,
n_bins = None, extmin=None, extmax=None,
sigma=None, lw=None, ax=None, inset=True,
inset_ax=None, color=None, **kwargs):
"""Plot (and optionally compute) the cumulative distribution of
decoding errors, evaluated using a cross-validation procedure.
See Fig 3.(b) of "Analysis of Hippocampal Memory Replay Using Neural
Population Decoding", Fabian Kloosterman, 2012.
Parameters
----------
Returns
-------
"""
if ax is None:
ax = plt.gca()
if lw is None:
lw=1.5
if decodefunc is None:
decodefunc = decoding.decode1D
if k is None:
k=5
if n_extern is None:
n_extern=100
if n_bins is None:
n_bins = 200
if extmin is None:
extmin=0
if extmax is None:
extmax=100
if sigma is None:
sigma = 3
# Get the color from the current color cycle
if color is None:
line, = ax.plot(0, 0.5)
color = line.get_color()
line.remove()
# if cumhist or bincenters are NOT provided, then compute them
if cumhist is None or bincenters is None:
assert bst is not None, "if cumhist and bincenters are not given, then bst must be provided to recompute them!"
assert extern is not None, "if cumhist and bincenters are not given, then extern must be provided to recompute them!"
cumhist, bincenters = \
decoding.cumulative_dist_decoding_error_using_xval(
bst=bst,
extern=extern,
decodefunc=decoding.decode1D,
k=k,
transfunc=transfunc,
n_extern=n_extern,
extmin=extmin,
extmax=extmax,
sigma=sigma,
n_bins=n_bins)
# now plot results
ax.plot(bincenters, cumhist, lw=lw, color=color, **kwargs)
ax.set_xlim(bincenters[0], bincenters[-1])
ax.set_xlabel('error [cm]')
ax.set_ylabel('cumulative probability')
ax.set_ylim(0)
if inset:
if inset_ax is None:
inset_ax = inset_axes(parent_axes=ax,
width="60%",
height="50%",
loc=4,
borderpad=2)
inset_ax.plot(bincenters, cumhist, lw=lw, color=color, **kwargs)
# annotate inset
thresh1 = 0.7
bcidx = np.asscalar(np.argwhere(cumhist>thresh1)[0]-1)
inset_ax.hlines(thresh1, 0, bincenters[bcidx], color=color, alpha=0.9, linestyle='--')
inset_ax.vlines(bincenters[bcidx], 0, thresh1, color=color, alpha=0.9, linestyle='--')
inset_ax.set_xlim(0,12*np.ceil(bincenters[bcidx]/10))
thresh2 = 0.5
bcidx = np.asscalar(np.argwhere(cumhist>thresh2)[0]-1)
inset_ax.hlines(thresh2, 0, bincenters[bcidx], color=color, alpha=0.6, linestyle='--')
inset_ax.vlines(bincenters[bcidx], 0, thresh2, color=color, alpha=0.6, linestyle='--')
inset_ax.set_yticks((0,thresh1, thresh2, 1))
inset_ax.set_ylim(0)
return ax, inset_ax
return ax | 32.930435 | 125 | 0.601532 |
f30ee9cbdc128ebb414011f1922779899d37a824 | 77 | py | Python | code/abc122_a_01.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
] | 3 | 2019-08-16T16:55:48.000Z | 2021-04-11T10:21:40.000Z | code/abc122_a_01.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
] | null | null | null | code/abc122_a_01.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
] | null | null | null | b=input()
print("A" if b=="T" else "T" if b=="A" else "G" if b=="C" else "C") | 38.5 | 67 | 0.506494 |
f30f0fecb3a5195d2294443d51e5048fb142c4a9 | 847 | py | Python | setup.py | carrasquel/wikipit | b8d2f870406eef866f68a4f7e5caca5398a671c2 | [
"MIT"
] | 1 | 2020-05-17T14:53:23.000Z | 2020-05-17T14:53:23.000Z | setup.py | carrasquel/wikipit | b8d2f870406eef866f68a4f7e5caca5398a671c2 | [
"MIT"
] | 1 | 2020-05-18T21:58:06.000Z | 2020-05-18T21:58:06.000Z | setup.py | carrasquel/wikipit | b8d2f870406eef866f68a4f7e5caca5398a671c2 | [
"MIT"
] | 1 | 2020-05-17T18:15:48.000Z | 2020-05-17T18:15:48.000Z | """Setup specifications for gitignore project."""
from os import path
from setuptools import setup
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name="wikipit",
version="1.0.4",
description="A Command Line Tool to Search Wikipedia in the terminal.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/carrasquel/wikipit",
author="Nelson Carrasquel",
license='MIT',
author_email="carrasquel@outlook.com",
py_modules=["wikipit"],
entry_points={
"console_scripts": [
"wikipit = wikipit:wiki"
]
},
install_requires=[
"wikipedia",
"Click"
]
) | 24.911765 | 75 | 0.662338 |
f31108a183ca826267db22b5fdc9dd872d8b503e | 1,469 | py | Python | samples.py | daimeng/py-geode | a4146804e4def71a6b430e5a16f6e0b1a65deefe | [
"MIT"
] | null | null | null | samples.py | daimeng/py-geode | a4146804e4def71a6b430e5a16f6e0b1a65deefe | [
"MIT"
] | 9 | 2018-11-15T00:44:11.000Z | 2019-03-01T02:52:34.000Z | samples.py | daimeng/py-geode | a4146804e4def71a6b430e5a16f6e0b1a65deefe | [
"MIT"
] | null | null | null | import aiohttp
import time
import ujson
import asyncio
import prettyprinter
import numpy as np
import pandas as pd
from geode.dispatcher import AsyncDispatcher
prettyprinter.install_extras(include=['dataclasses'])
pd.set_option('display.float_format', '{:.4f}'.format)
if __name__ == '__main__':
asyncio.run(main())
| 27.203704 | 76 | 0.538462 |
f3126093965615fe8a8564523762df648831f740 | 171 | py | Python | functional_tests.py | idanmel/soccer_friends | db370c384e99308c5f6a39a18eac1556b83cc786 | [
"MIT"
] | null | null | null | functional_tests.py | idanmel/soccer_friends | db370c384e99308c5f6a39a18eac1556b83cc786 | [
"MIT"
] | null | null | null | functional_tests.py | idanmel/soccer_friends | db370c384e99308c5f6a39a18eac1556b83cc786 | [
"MIT"
] | null | null | null | from selenium import webdriver
browser = webdriver.Firefox()
browser.get('http://localhost:8000')
try:
assert 'Django' in browser.title
finally:
browser.close()
| 17.1 | 36 | 0.730994 |
f3128d3872baa827767bd09bf278c2956175ee90 | 963 | py | Python | lorenzsj/blog/views.py | lorenzsj/lorenzsj | 631c6632f8fe70a021836c52aafd8746e13fc8a8 | [
"MIT"
] | null | null | null | lorenzsj/blog/views.py | lorenzsj/lorenzsj | 631c6632f8fe70a021836c52aafd8746e13fc8a8 | [
"MIT"
] | null | null | null | lorenzsj/blog/views.py | lorenzsj/lorenzsj | 631c6632f8fe70a021836c52aafd8746e13fc8a8 | [
"MIT"
] | null | null | null | from django.contrib.auth.models import User
from rest_framework import viewsets
from rest_framework import permissions
from rest_framework.response import Response
from blog.models import Post
from blog.serializers import PostSerializer
from blog.serializers import UserSerializer
from blog.permissions import IsAuthorOrReadOnly
| 31.064516 | 74 | 0.764278 |
f3133d707d13f1d41040304efdb1e48fd46e0e3f | 4,270 | py | Python | src/piminder_service/resources/db_autoinit.py | ZAdamMac/pyminder | 059f57cb7cea4f517f77b1bbf391ce99f25d83bb | [
"MIT"
] | null | null | null | src/piminder_service/resources/db_autoinit.py | ZAdamMac/pyminder | 059f57cb7cea4f517f77b1bbf391ce99f25d83bb | [
"MIT"
] | 3 | 2021-05-05T21:08:24.000Z | 2021-06-23T10:47:40.000Z | src/piminder_service/resources/db_autoinit.py | ZAdamMac/pyminder | 059f57cb7cea4f517f77b1bbf391ce99f25d83bb | [
"MIT"
] | null | null | null | """
This script is a component of Piminder's back-end controller.
Specifically, it is a helper utility to be used to intialize a database for the user and message tables.
Author: Zac Adam-MacEwen (zadammac@kenshosec.com)
An Arcana Labs utility.
Produced under license.
Full license and documentation to be found at:
https://github.com/ZAdamMac/Piminder
"""
import bcrypt
import getpass
import os
import pymysql
__version__ = "1.0.0" # This is the version of service that we can init, NOT the version of the script itself.
spec_tables = [
"""CREATE TABLE `messages` (
`id` CHAR(36) NOT NULL,
`name` VARCHAR(255) NOT NULL,
`message` TEXT DEFAULT NULL,
`errorlevel` CHAR(5) DEFAULT NULL,
`time_raised` TIMESTAMP,
`read_flag` BIT DEFAULT 0,
PRIMARY KEY (`id`)
)""",
"""CREATE TABLE `users` (
`username` CHAR(36) NOT NULL,
`password` VARCHAR(255) NOT NULL,
`permlevel` INT(1) DEFAULT 1,
`memo` TEXT DEFAULT NULL,
PRIMARY KEY (`username`)
)"""
]
def connect_to_db():
"""Detects if it is necessary to prompt for the root password, and either way,
establishes the db connection, returning it.
:return:
"""
print("We must now connect to the database.")
try:
db_user = os.environ['PIMINDER_DB_USER']
except KeyError:
print("Missing envvar: Piminder_DB_USER")
exit(1)
root_password = None
try:
root_password = os.environ['PIMINDER_DB_PASSWORD']
except KeyError:
print("Missing envvar: Piminder_DB_PASSWORD")
exit(1)
try:
db_host = os.environ['PIMINDER_DB_HOST']
except KeyError:
print("Missing envvar: Piminder_DB_HOST")
exit(1)
finally:
conn = pymysql.connect(host=db_host, user=db_user,
password=root_password, db='Piminder',
charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor)
return conn
def create_tables(list_tables, connection):
"""Accepts a list of create statements for tables and pushes them to the DB.
:param list_tables: A list of CREATE statements in string form.
:param connection: a pymysql.connect() object, such as returned by connect_to_db
:return:
"""
cursor = connection.cursor()
connection.begin()
for table in list_tables:
try:
cursor.execute(table)
except pymysql.err.ProgrammingError:
print("Error in the following statement; table was skipped.")
print(table)
except pymysql.err.OperationalError as error:
if str(error.args[0]) == 1050: # This table already exists
print("%s, skipping" % error.args[1])
else:
print(error)
connection.commit()
def create_administrative_user(connection):
"""Creates an administrative user if it does not already exist.
:param connection:
:return:
"""
print("Validating an admin user exists:")
try:
admin_name = os.environ['PIMINDER_ADMIN_USER']
except KeyError:
print("Missing envvar: Piminder_ADMIN_USER")
exit(1)
cur = connection.cursor()
command = "SELECT count(username) AS howmany FROM users WHERE permlevel like 3;"
# Wait, how many admins are there?
cur.execute(command)
count = cur.fetchone()["howmany"]
if count < 1: # Only do this if no more than 0 exists.
command = "INSERT INTO users (username, password, memo, permlevel) VALUES (%s, %s, 'Default User', 3);"
try:
root_password = os.environ['PIMINDER_ADMIN_PASSWORD']
except KeyError:
print("Missing envvar: Piminder_ADMIN_PASSWORD")
exit(1)
hashed_rootpw = bcrypt.hashpw(root_password.encode('utf8'), bcrypt.gensalt())
cur.execute(command, (admin_name, hashed_rootpw))
print("Created administrative user: %s" % admin_name)
else:
print("Administrative user already exists, skipping.")
connection.commit()
| 31.865672 | 111 | 0.646136 |
f314e1c52a7971b18107dd68a650e6479dbddda8 | 7,455 | py | Python | conftest.py | jirikuncar/renku-python | 69df9ea1d5db3c63fd2ea3537c7e46d079360c8f | [
"Apache-2.0"
] | null | null | null | conftest.py | jirikuncar/renku-python | 69df9ea1d5db3c63fd2ea3537c7e46d079360c8f | [
"Apache-2.0"
] | null | null | null | conftest.py | jirikuncar/renku-python | 69df9ea1d5db3c63fd2ea3537c7e46d079360c8f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright 2017 - Swiss Data Science Center (SDSC)
# A partnership between cole Polytechnique Fdrale de Lausanne (EPFL) and
# Eidgenssische Technische Hochschule Zrich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pytest configuration."""
import json
import os
import shutil
import sys
import tempfile
import time
import types
import pytest
import responses
from click.testing import CliRunner
| 27.921348 | 78 | 0.643997 |
f315277c03047d954514d2d9908c6f026aae74fa | 624 | py | Python | kuchinawa/Logger.py | threemeninaboat3247/kuchinawa | 81094e358e4dad9529a15fa526f2307caaceb82e | [
"MIT"
] | 4 | 2017-11-29T04:14:19.000Z | 2022-01-21T13:00:23.000Z | kuchinawa/Logger.py | threemeninaboat3247/kuchinawa | 81094e358e4dad9529a15fa526f2307caaceb82e | [
"MIT"
] | 3 | 2018-05-07T14:49:29.000Z | 2018-05-08T11:49:17.000Z | kuchinawa/Logger.py | threemeninaboat3247/kuchinawa | 81094e358e4dad9529a15fa526f2307caaceb82e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
""" --- Description ---
Module:
Logger.py
Abstract:
A module for logging
Modified:
threemeninaboat3247 2018/04/30
--- End ---
"""
# Standard library imports
import logging
logger = logging.getLogger('Kuchinawa Log')
#
logger.setLevel(10)
#
fh = logging.FileHandler('kuchinawa.log')
logger.addHandler(fh)
#
sh = logging.StreamHandler()
logger.addHandler(sh)
#
formatter = logging.Formatter('%(asctime)s:%(lineno)d:%(levelname)s:%(message)s')
fh.setFormatter(formatter)
sh.setFormatter(formatter) | 20.129032 | 81 | 0.674679 |
f3159c44193bd89a772b6f2bca9dbffb2ffaa8bc | 5,933 | py | Python | test/search/capacity.py | sbutler/spotseeker_server | 02bd2d646eab9f26ddbe8536b30e391359796c9c | [
"Apache-2.0"
] | null | null | null | test/search/capacity.py | sbutler/spotseeker_server | 02bd2d646eab9f26ddbe8536b30e391359796c9c | [
"Apache-2.0"
] | null | null | null | test/search/capacity.py | sbutler/spotseeker_server | 02bd2d646eab9f26ddbe8536b30e391359796c9c | [
"Apache-2.0"
] | null | null | null | """ Copyright 2012, 2013 UW Information Technology, University of Washington
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.test import TestCase
from django.conf import settings
from django.test.client import Client
from spotseeker_server.models import Spot, SpotExtendedInfo, SpotType
import simplejson as json
from django.test.utils import override_settings
from mock import patch
from django.core import cache
from spotseeker_server import models
| 36.398773 | 98 | 0.532783 |
f3163b561595dcd3e021c0a5f070a6337bbb8499 | 1,745 | py | Python | model/k1_clustering_pre-processing.py | not-a-hot-dog/spotify_project | b928fecb136cffdd62c650b054ca543047800f11 | [
"MIT"
] | null | null | null | model/k1_clustering_pre-processing.py | not-a-hot-dog/spotify_project | b928fecb136cffdd62c650b054ca543047800f11 | [
"MIT"
] | 1 | 2019-12-08T17:23:49.000Z | 2019-12-08T17:23:49.000Z | model/k1_clustering_pre-processing.py | not-a-hot-dog/spotify_project | b928fecb136cffdd62c650b054ca543047800f11 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
from model.helper_functions import build_playlist_features
print('Reading data into memory')
pid_list = np.genfromtxt('../data/train_pids.csv', skip_header=1, dtype=int)
playlistfile = '../data/playlists.csv'
playlist_df = pd.read_csv(playlistfile)
trackfile = '../data/songs_100000_feat_cleaned.csv'
track_df = pd.read_csv(trackfile, index_col='track_uri')
print('Finding playlist features')
playlist_features = build_playlist_features(pid_list, playlist_df, track_df)
playlist_features.to_csv('../data/playlist_features_train.csv')
print('Finding top artists')
# Find the top artists who dominate playlists
top_playlist_defining_artists = playlist_features.artist_uri_top.value_counts(normalize=False)
top_playlist_defining_artists.to_csv('../data/top_playlist_defining_artists_train_all.csv', header=True)
top_playlist_defining_artists = playlist_features.artist_uri_top.value_counts().index.values[:50]
np.savetxt('../data/top_playlist_defining_artists_train.csv', top_playlist_defining_artists, delimiter=',', fmt="%s")
# Keep only those artists who dominate playlists and one hot encode
artists_to_keep = playlist_features.artist_uri_top.isin(top_playlist_defining_artists)
playlist_features.artist_uri_top = playlist_features.artist_uri_top[artists_to_keep]
playlist_features.artist_uri_freq = playlist_features.artist_uri_freq[artists_to_keep]
playlist_features.artist_uri_freq.fillna(0, inplace=True)
top_artist_dummies = pd.get_dummies(playlist_features.artist_uri_top)
playlist_features = pd.concat([playlist_features, top_artist_dummies], axis=1)
playlist_features.drop(['artist_uri_top'], axis=1, inplace=True)
playlist_features.to_csv('../data/playlist_features_with_artists_train.csv')
| 52.878788 | 117 | 0.837249 |
f3166c7800fb37b00a35784025071d85b46a881a | 731 | py | Python | app/main/__init__.py | a2hsh/udacity-fsnd-capstone | 545f78111784756f469127bcb4a656306a7fe242 | [
"MIT"
] | null | null | null | app/main/__init__.py | a2hsh/udacity-fsnd-capstone | 545f78111784756f469127bcb4a656306a7fe242 | [
"MIT"
] | null | null | null | app/main/__init__.py | a2hsh/udacity-fsnd-capstone | 545f78111784756f469127bcb4a656306a7fe242 | [
"MIT"
] | null | null | null | # routes Blueprint
from flask import Blueprint, jsonify, request, redirect, render_template
from flask_cors import CORS
from os import environ
# initializing the blueprint
main = Blueprint('main', __name__)
CORS(main, resources={r'*': {'origins': '*'}})
# importing routes
from . import actors, movies, errors
| 31.782609 | 73 | 0.675787 |
f316cbca5e61cde2ebe07f8eb9690a7626e13407 | 497 | py | Python | agenda/tests/test_models.py | migueleichler/django-tdd | 5b8bd6088b5e2de4d70026b761391bce3aa52f32 | [
"MIT"
] | null | null | null | agenda/tests/test_models.py | migueleichler/django-tdd | 5b8bd6088b5e2de4d70026b761391bce3aa52f32 | [
"MIT"
] | null | null | null | agenda/tests/test_models.py | migueleichler/django-tdd | 5b8bd6088b5e2de4d70026b761391bce3aa52f32 | [
"MIT"
] | null | null | null | from django.test import TestCase
from agenda.models import Compromisso
from model_mommy import mommy
| 27.611111 | 75 | 0.750503 |
f31b214b07d8c2680f0f9e730882cb62c105cf97 | 1,868 | py | Python | tests/test_crypto/test_registry/test_misc.py | valory-xyz/agents-aea | 8f38efa96041b0156ed1ae328178e395dbabf2fc | [
"Apache-2.0"
] | null | null | null | tests/test_crypto/test_registry/test_misc.py | valory-xyz/agents-aea | 8f38efa96041b0156ed1ae328178e395dbabf2fc | [
"Apache-2.0"
] | null | null | null | tests/test_crypto/test_registry/test_misc.py | valory-xyz/agents-aea | 8f38efa96041b0156ed1ae328178e395dbabf2fc | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2022 Valory AG
# Copyright 2018-2020 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains misc tests for the registry (crypto/ledger_api/contract)."""
import logging
import pytest
from aea.crypto.registries.base import Registry
from aea.exceptions import AEAException
logger = logging.getLogger(__name__)
| 31.133333 | 85 | 0.599036 |
f31c461ea88a83b782769751389f56772c713d60 | 1,457 | py | Python | pyof/v0x05/asynchronous/table_status.py | mhaji007/python-openflow | 25f032d660e648501d1e732969b6f91357ef5b66 | [
"MIT"
] | null | null | null | pyof/v0x05/asynchronous/table_status.py | mhaji007/python-openflow | 25f032d660e648501d1e732969b6f91357ef5b66 | [
"MIT"
] | null | null | null | pyof/v0x05/asynchronous/table_status.py | mhaji007/python-openflow | 25f032d660e648501d1e732969b6f91357ef5b66 | [
"MIT"
] | null | null | null | """Defines an Table Status Message."""
# System imports
from enum import IntEnum
# Local source tree imports
from pyof.foundation.base import GenericMessage, GenericStruct
from pyof.foundation.basic_types import BinaryData, FixedTypeList, UBInt16, UBInt8, UBInt32, UBInt64, Pad
from pyof.v0x05.common.header import Header, Type
from pyof.v0x05.controller2switch.multipart_reply import TableDesc
# Third-party imports
__all__ = ('TableStatus', 'TableReason')
# Enums
# Classes
| 26.490909 | 105 | 0.691833 |
f31ce1a1719984d1cf324a95ea4f226d430436e1 | 361 | py | Python | DEQModel/utils/debug.py | JunLi-Galios/deq | 80eb6b598357e8e01ad419126465fa3ed53b12c7 | [
"MIT"
] | 548 | 2019-09-05T04:25:21.000Z | 2022-03-22T01:49:35.000Z | DEQModel/utils/debug.py | JunLi-Galios/deq | 80eb6b598357e8e01ad419126465fa3ed53b12c7 | [
"MIT"
] | 21 | 2019-10-04T16:36:05.000Z | 2022-03-24T02:20:28.000Z | DEQModel/utils/debug.py | JunLi-Galios/deq | 80eb6b598357e8e01ad419126465fa3ed53b12c7 | [
"MIT"
] | 75 | 2019-09-05T22:40:32.000Z | 2022-03-31T09:40:44.000Z | import torch
from torch.autograd import Function
| 24.066667 | 70 | 0.65928 |
f31cf93ef20fe7554b80d699b5aa26fadaf86834 | 13,629 | py | Python | feature_track_visualizer/visualizer.py | jfvilaro/rpg_feature_tracking_analysis | 4c29a64cc07db44b43c12ff66c71d5c7da062c79 | [
"MIT"
] | null | null | null | feature_track_visualizer/visualizer.py | jfvilaro/rpg_feature_tracking_analysis | 4c29a64cc07db44b43c12ff66c71d5c7da062c79 | [
"MIT"
] | null | null | null | feature_track_visualizer/visualizer.py | jfvilaro/rpg_feature_tracking_analysis | 4c29a64cc07db44b43c12ff66c71d5c7da062c79 | [
"MIT"
] | null | null | null | from os.path import isfile
import os
import cv2
from os.path import join
import numpy as np
import tqdm
import random
from big_pun.tracker_utils import filter_first_tracks, getTrackData
| 37.035326 | 131 | 0.556387 |
f31e643bb5106928ddc94996a97d51a1aa497458 | 12,163 | py | Python | Polygon2-2.0.7/Polygon/IO.py | tangrams/landgrab | 217699e4730a1bdb7c9e03bfd9c2c0c31950eb7c | [
"MIT"
] | 20 | 2015-02-26T15:55:42.000Z | 2021-07-30T00:19:31.000Z | Polygon2-2.0.7/Polygon/IO.py | tangrams/landgrab | 217699e4730a1bdb7c9e03bfd9c2c0c31950eb7c | [
"MIT"
] | 1 | 2018-04-02T12:13:30.000Z | 2021-10-04T00:59:38.000Z | Polygon2-2.0.7/Polygon/IO.py | tangrams/landgrab | 217699e4730a1bdb7c9e03bfd9c2c0c31950eb7c | [
"MIT"
] | 5 | 2015-03-03T23:31:39.000Z | 2018-01-17T03:13:34.000Z | # -*- coding: utf-8 -*-
"""
This module provides functions for reading and writing Polygons in different
formats.
The following write-methods will accept different argument types for the
output. If ofile is None, the method will create and return a StringIO-object.
If ofile is a string, a file with that name will be created. If ofile is a
file, it will be used for writing.
The following read-methods will accept different argument types for the
output. An file or StringIO object will be used directly. If the argument is a
string, the function tries to read a file with that name. If it fails, it
will evaluate the string directly.
"""
from cPolygon import Polygon
from types import StringTypes
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
from xml.dom.minidom import parseString, Node
from struct import pack, unpack, calcsize
try:
import reportlab
hasPDFExport = True
except:
hasPDFExport = False
try:
import Imaging
hasPILExport = True
except:
hasPILExport = False
## some helpers
def __unpack(f, b):
s = calcsize(f)
return unpack(f, b[:s]), b[s:]
def getWritableObject(ofile):
"""try to make a writable file-like object from argument"""
if ofile is None:
return StringIO(), False
elif type(ofile) in StringTypes:
return open(ofile, 'w'), True
elif type(ofile) in (file, StringIO):
return ofile, False
else:
raise Exception("Can't make a writable object from argument!")
def getReadableObject(ifile):
"""try to make a readable file-like object from argument"""
if type(ifile) in StringTypes:
try:
return open(ifile, 'r'), True
except:
return StringIO(ifile), True
elif type(ifile) in (file, StringIO):
return ifile, False
else:
raise Exception("Can't make a readable object from argument!")
def decodeBinary(bin):
"""
Create Polygon from a binary string created with encodeBinary(). If the string
is not valid, the whole thing may break!
:Arguments:
- s: string
:Returns:
new Polygon
"""
nC, b = __unpack('!I', bin)
p = Polygon()
for i in range(nC[0]):
x, b = __unpack('!l', b)
if x[0] < 0:
isHole = 1
s = -2*x[0]
else:
isHole = 0
s = 2*x[0]
flat, b = __unpack('!%dd' % s, b)
p.addContour(tuple(__couples(flat)), isHole)
return p
def encodeBinary(p):
"""
Encode Polygon p to a binary string. The binary string will be in a standard
format with network byte order and should be rather machine independant.
There's no redundancy in the string, any damage will make the whole polygon
information unusable.
:Arguments:
- p: Polygon
:Returns:
string
"""
l = [pack('!I', len(p))]
for i, c in enumerate(p):
l.append(pack('!l', len(c)*(1,-1)[p.isHole(i)]))
l.append(pack('!%dd' %(2*len(c)), *__flatten(c)))
return "".join(l)
def writeGnuplot(ofile, polylist):
"""
Write a list of Polygons to a gnuplot file, which may be plotted using the
command ``plot "ofile" with lines`` from gnuplot.
:Arguments:
- ofile: see above
- polylist: sequence of Polygons
:Returns:
ofile object
"""
f, cl = getWritableObject(ofile)
for p in polylist:
for vl in p:
for j in vl:
f.write('%g %g\n' % tuple(j))
f.write('%g %g\n\n' % tuple(vl[0]))
if cl: f.close()
return f
def writeGnuplotTriangles(ofile, polylist):
"""
Converts a list of Polygons to triangles and write the tringle data to a
gnuplot file, which may be plotted using the command
``plot "ofile" with lines`` from gnuplot.
:Arguments:
- ofile: see above
- polylist: sequence of Polygons
:Returns:
ofile object
"""
f, cl = getWritableObject(ofile)
for p in polylist:
for vl in p.triStrip():
j = 0
for j in range(len(vl)-2):
f.write('%g %g \n %g %g \n %g %g \n %g %g\n\n' %
tuple(vl[j]+vl[j+1]+vl[j+2]+vl[j]))
f.write('\n')
if cl: f.close()
f.close()
def writeSVG(ofile, polylist, width=None, height=None, fill_color=None,
fill_opacity=None, stroke_color=None, stroke_width=None):
"""
Write a SVG representation of the Polygons in polylist, width and/or height
will be adapted if not given. fill_color, fill_opacity, stroke_color and
stroke_width can be sequences of the corresponding SVG style attributes to use.
:Arguments:
- ofile: see above
- polylist: sequence of Polygons
- optional width: float
- optional height: height
- optional fill_color: sequence of colors (3-tuples of floats: RGB)
- optional fill_opacity: sequence of colors
- optional stroke_color: sequence of colors
- optional stroke_width: sequence of floats
:Returns:
ofile object
"""
f, cl = getWritableObject(ofile)
pp = [Polygon(p) for p in polylist] # use clones only
[p.flop(0.0) for p in pp] # adopt to the SVG coordinate system
bbs = [p.boundingBox() for p in pp]
bbs2 = zip(*bbs)
minx = min(bbs2[0])
maxx = max(bbs2[1])
miny = min(bbs2[2])
maxy = max(bbs2[3])
xdim = maxx-minx
ydim = maxy-miny
if not (xdim or ydim):
raise Error("Polygons have no extent in one direction!")
a = ydim / xdim
if not width and not height:
if a < 1.0:
width = 300
else:
height = 300
if width and not height:
height = width * a
if height and not width:
width = height / a
npoly = len(pp)
fill_color = __RingBuffer(fill_color or ((255,0,0), (0,255,0), (0,0,255), (255,255,0)))
fill_opacity = __RingBuffer(fill_opacity or (1.0,))
stroke_color = __RingBuffer(stroke_color or ((0,0,0),))
stroke_width = __RingBuffer(stroke_width or (1.0,))
s = ['<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>',
'<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" "http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">',
'<svg xmlns="http://www.w3.org/2000/svg" width="%d" height="%d">' % (width, height)]
for i in range(npoly):
p = pp[i]
bb = bbs[i]
p.warpToBox(width*(bb[0]-minx)/xdim, width*(bb[1]-minx)/xdim,
height*(bb[2]-miny)/ydim, height*(bb[3]-miny)/ydim)
subl = ['<path style="fill:rgb%s;fill-opacity:%s;fill-rule:evenodd;stroke:rgb%s;stroke-width:%s;" d="' %
(fill_color(), fill_opacity(), stroke_color(), stroke_width())]
for c in p:
subl.append('M %g, %g %s z ' % (c[0][0], c[0][1], ' '.join([("L %g, %g" % (a,b)) for a,b in c[1:]])))
subl.append('"/>')
s.append(''.join(subl))
s.append('</svg>')
f.write('\n'.join(s))
if cl: f.close()
return f
def writeXML(ofile, polylist, withHeader=False):
"""
Write a readable representation of the Polygons in polylist to a XML file.
A simple header can be added to make the file parsable.
:Arguments:
- ofile: see above
- polylist: sequence of Polygons
- optional withHeader: bool
:Returns:
ofile object
"""
f, cl = getWritableObject(ofile)
if withHeader:
f.write('<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>\n')
for p in polylist:
l = ['<polygon contours="%d" area="%g" xMin="%g" xMax="%g" yMin="%g" yMax="%g">' % ((len(p), p.area())+p.boundingBox())]
for i, c in enumerate(p):
l.append(' <contour points="%d" isHole="%d" area="%g" xMin="%g" xMax="%g" yMin="%g" yMax="%g">' \
% ((len(c), p.isHole(i), p.area(i))+p.boundingBox(i)))
for po in c:
l.append(' <p x="%g" y="%g"/>' % po)
l.append(' </contour>')
l.append('</polygon>\n')
f.write('\n'.join(l))
if cl: f.close()
return f
def readXML(ifile):
"""
Read a list of Polygons from a XML file which was written with writeXML().
:Arguments:
- ofile: see above
:Returns:
list of Polygon objects
"""
f, cl = getReadableObject(ifile)
d = parseString(f.read())
if cl: f.close()
plist = []
for pn in d.getElementsByTagName('polygon'):
p = Polygon()
plist.append(p)
for sn in pn.childNodes:
if not sn.nodeType == Node.ELEMENT_NODE:
continue
assert sn.tagName == 'contour'
polist = []
for pon in sn.childNodes:
if not pon.nodeType == Node.ELEMENT_NODE:
continue
polist.append((float(pon.getAttribute('x')), float(pon.getAttribute('y'))))
assert int(sn.getAttribute('points')) == len(polist)
p.addContour(polist, int(sn.getAttribute('isHole')))
assert int(pn.getAttribute('contours')) == len(p)
return plist
if hasPDFExport:
def writePDF(ofile, polylist, pagesize=None, linewidth=0, fill_color=None):
"""
*This function is only available if the reportlab package is installed!*
Write a the Polygons in polylist to a PDF file.
:Arguments:
- ofile: see above
- polylist: sequence of Polygons
- optional pagesize: 2-tuple of floats
- optional linewidth: float
- optional fill_color: color
:Returns:
ofile object
"""
from reportlab.pdfgen import canvas
from reportlab.lib.colors import red, green, blue, yellow, black, white
if not pagesize:
from reportlab.lib.pagesizes import A4
pagesize = A4
can = canvas.Canvas(ofile, pagesize=pagesize)
can.setLineWidth(linewidth)
pp = [Polygon(p) for p in polylist] # use clones only
bbs = [p.boundingBox() for p in pp]
bbs2 = zip(*bbs)
minx = min(bbs2[0])
maxx = max(bbs2[1])
miny = min(bbs2[2])
maxy = max(bbs2[3])
xdim = maxx-minx
ydim = maxy-miny
if not (xdim or ydim):
raise Error("Polygons have no extent in one direction!")
a = ydim / xdim
width, height = pagesize
if a > (height/width):
width = height / a
else:
height = width * a
npoly = len(pp)
fill_color = __RingBuffer(fill_color or (red, green, blue, yellow))
for i in range(npoly):
p = pp[i]
bb = bbs[i]
p.warpToBox(width*(bb[0]-minx)/xdim, width*(bb[1]-minx)/xdim,
height*(bb[2]-miny)/ydim, height*(bb[3]-miny)/ydim)
for poly in pp:
solids = [poly[i] for i in range(len(poly)) if poly.isSolid(i)]
can.setFillColor(fill_color())
for c in solids:
p = can.beginPath()
p.moveTo(c[0][0], c[0][1])
for i in range(1, len(c)):
p.lineTo(c[i][0], c[i][1])
p.close()
can.drawPath(p, stroke=1, fill=1)
holes = [poly[i] for i in range(len(poly)) if poly.isHole(i)]
can.setFillColor(white)
for c in holes:
p = can.beginPath()
p.moveTo(c[0][0], c[0][1])
for i in range(1, len(c)):
p.lineTo(c[i][0], c[i][1])
p.close()
can.drawPath(p, stroke=1, fill=1)
can.showPage()
can.save()
| 31.840314 | 128 | 0.564499 |
f3200d5d53315321e6ef6c3cef5d42425590c96b | 743 | py | Python | strings/reverse_string.py | ahcode0919/python-ds-algorithms | 0d617b78c50b6c18da40d9fa101438749bfc82e1 | [
"MIT"
] | null | null | null | strings/reverse_string.py | ahcode0919/python-ds-algorithms | 0d617b78c50b6c18da40d9fa101438749bfc82e1 | [
"MIT"
] | null | null | null | strings/reverse_string.py | ahcode0919/python-ds-algorithms | 0d617b78c50b6c18da40d9fa101438749bfc82e1 | [
"MIT"
] | 3 | 2020-10-07T20:24:45.000Z | 2020-12-16T04:53:19.000Z | from typing import List, Optional
| 28.576923 | 93 | 0.643338 |
f3212e189d04ba2e4747e03dc77f4721f12f30e5 | 14,706 | py | Python | qnarre/prep/tokens/realm.py | quantapix/qnarre.com | f51d5945c20ef8182c4aa11f1b407d064c190c70 | [
"MIT"
] | null | null | null | qnarre/prep/tokens/realm.py | quantapix/qnarre.com | f51d5945c20ef8182c4aa11f1b407d064c190c70 | [
"MIT"
] | null | null | null | qnarre/prep/tokens/realm.py | quantapix/qnarre.com | f51d5945c20ef8182c4aa11f1b407d064c190c70 | [
"MIT"
] | null | null | null | # Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import collections
import os
import unicodedata
from ...tokens.utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
from ...tokens.base import BatchEncoding
from ...utils import PaddingStrategy
VOCAB_FS = {"vocab_file": "vocab.txt"}
VOCAB_MAP = {
"vocab_file": {
"google/realm-cc-news-pretrained-embedder": "https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt",
"google/realm-cc-news-pretrained-encoder": "https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt",
"google/realm-cc-news-pretrained-scorer": "https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt",
"google/realm-cc-news-pretrained-openqa": "https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt",
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt",
}
}
INPUT_CAPS = {
"google/realm-cc-news-pretrained-embedder": 512,
"google/realm-cc-news-pretrained-encoder": 512,
"google/realm-cc-news-pretrained-scorer": 512,
"google/realm-cc-news-pretrained-openqa": 512,
"google/realm-orqa-nq-openqa": 512,
"google/realm-orqa-nq-reader": 512,
"google/realm-orqa-wq-openqa": 512,
"google/realm-orqa-wq-reader": 512,
}
PRETRAINED_INIT_CONFIGURATION = {
"google/realm-cc-news-pretrained-embedder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-encoder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-scorer": {"do_lower_case": True},
"google/realm-cc-news-pretrained-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-reader": {"do_lower_case": True},
"google/realm-orqa-wq-openqa": {"do_lower_case": True},
"google/realm-orqa-wq-reader": {"do_lower_case": True},
}
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.vocab.get(token, self.vocab.get(self.unk))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.ids_to_tokens.get(index, self.unk)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
out_string = " ".join(tokens).replace(" ##", "").strip()
return out_string
class BasicTokenizer(object):
class WordpieceTokenizer(object):
| 37.707692 | 141 | 0.589963 |
f323bb4c6d1d42af8adea82f66966d109724eba9 | 29,495 | py | Python | api/fileupload.py | subhendu01/Audio-FIle-Server | 6c7f9a093e41f0750a0a8c4c1f0e48608215c8a6 | [
"MIT"
] | 5 | 2021-05-12T18:18:49.000Z | 2022-01-06T12:35:35.000Z | api/fileupload.py | subhendu01/Audio-FIle-Server | 6c7f9a093e41f0750a0a8c4c1f0e48608215c8a6 | [
"MIT"
] | null | null | null | api/fileupload.py | subhendu01/Audio-FIle-Server | 6c7f9a093e41f0750a0a8c4c1f0e48608215c8a6 | [
"MIT"
] | null | null | null | import datetime, os, base64
from flask import Flask, jsonify, request, Blueprint
from dbstore import dbconf
import json
from bson import json_util
# process kill
# lsof -i tcp:3000
file_upload = Blueprint('uploadAPI', __name__)
app = Flask(__name__)
| 45.376923 | 137 | 0.398644 |
f324f6cba05e902a8556f523455c852d7fd15d3d | 2,542 | py | Python | dna/zfec/zfec/cmdline_zunfec.py | bobbae/examples | 6c998e2af9a48f7173a0b6b1ff0176df7edceda5 | [
"Unlicense"
] | null | null | null | dna/zfec/zfec/cmdline_zunfec.py | bobbae/examples | 6c998e2af9a48f7173a0b6b1ff0176df7edceda5 | [
"Unlicense"
] | null | null | null | dna/zfec/zfec/cmdline_zunfec.py | bobbae/examples | 6c998e2af9a48f7173a0b6b1ff0176df7edceda5 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
# zfec -- a fast C implementation of Reed-Solomon erasure coding with
# command-line, C, and Python interfaces
from __future__ import print_function
import os, sys, argparse
from zfec import filefec
from zfec import __version__ as libversion
__version__ = libversion
# zfec -- fast forward error correction library with Python interface
#
# Copyright (C) 2007 Allmydata, Inc.
# Author: Zooko Wilcox-O'Hearn
#
# This file is part of zfec.
#
# See README.rst for licensing information.
| 35.305556 | 149 | 0.663257 |
f327633efe0ce2c9e557f60f7f82ada184c4948d | 576 | py | Python | bottomline/blweb/migrations/0012_vehicleconfig_color.py | mcm219/BottomLine | db82eef403c79bffa3864c4db6bc336632abaca5 | [
"MIT"
] | null | null | null | bottomline/blweb/migrations/0012_vehicleconfig_color.py | mcm219/BottomLine | db82eef403c79bffa3864c4db6bc336632abaca5 | [
"MIT"
] | 1 | 2021-06-14T02:20:40.000Z | 2021-06-14T02:20:40.000Z | bottomline/blweb/migrations/0012_vehicleconfig_color.py | mcm219/BottomLine | db82eef403c79bffa3864c4db6bc336632abaca5 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.2 on 2021-07-10 03:16
from django.db import migrations, models
import django.db.models.deletion
| 28.8 | 211 | 0.663194 |
b823df535990bd76d900f1381be1d7cc948408cf | 11,634 | py | Python | src/acs_3dpsf.py | davidharvey1986/rrg | 26b4658f14279af21af1a61d57e9936daf315a71 | [
"MIT"
] | 2 | 2019-11-18T12:51:09.000Z | 2019-12-11T03:13:51.000Z | src/acs_3dpsf.py | davidharvey1986/rrg | 26b4658f14279af21af1a61d57e9936daf315a71 | [
"MIT"
] | 5 | 2017-06-09T10:06:27.000Z | 2019-07-19T11:28:18.000Z | src/acs_3dpsf.py | davidharvey1986/rrg | 26b4658f14279af21af1a61d57e9936daf315a71 | [
"MIT"
] | 2 | 2017-07-19T15:48:33.000Z | 2017-08-09T16:07:20.000Z | import numpy as np
from . import acs_map_xy as acs_map
# **********************************************************************
# **********************************************************************
# **********************************************************************
# **********************************************************************
# **********************************************************************
# **********************************************************************
# **********************************************************************
# **********************************************************************
# **********************************************************************
| 39.979381 | 122 | 0.565068 |
b824108791760c3044be86fca8557a92a30f2d41 | 27,400 | py | Python | gsf/function_class.py | mtakahiro/gsf | c09c5d32a45b0277c469d2d3cb2f8c11f1fc0278 | [
"MIT"
] | 9 | 2019-08-23T19:00:54.000Z | 2022-02-23T17:57:41.000Z | gsf/function_class.py | mtakahiro/gsf | c09c5d32a45b0277c469d2d3cb2f8c11f1fc0278 | [
"MIT"
] | 17 | 2020-05-22T17:41:15.000Z | 2022-03-20T03:32:48.000Z | gsf/function_class.py | mtakahiro/gsf | c09c5d32a45b0277c469d2d3cb2f8c11f1fc0278 | [
"MIT"
] | 1 | 2020-02-01T22:55:37.000Z | 2020-02-01T22:55:37.000Z | import numpy as np
import sys
import scipy.interpolate as interpolate
import asdf
from .function import *
from .basic_func import Basic
| 31.823461 | 123 | 0.464964 |
b825f9f00f6901c5d7cf23cfa47cb3197933eecd | 1,855 | py | Python | loadbalanceRL/utils/exceptions.py | fqzhou/LoadBalanceControl-RL | 689eec3b3b27e121aa45d2793e411f1863f6fc0b | [
"MIT"
] | 11 | 2018-10-29T06:50:43.000Z | 2022-03-28T14:26:09.000Z | loadbalanceRL/utils/exceptions.py | fqzhou/LoadBalanceControl-RL | 689eec3b3b27e121aa45d2793e411f1863f6fc0b | [
"MIT"
] | 1 | 2022-03-01T13:46:25.000Z | 2022-03-01T13:46:25.000Z | loadbalanceRL/utils/exceptions.py | fqzhou/LoadBalanceControl-RL | 689eec3b3b27e121aa45d2793e411f1863f6fc0b | [
"MIT"
] | 6 | 2019-02-05T20:01:53.000Z | 2020-09-04T12:30:00.000Z | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Definition of all Rainman2 exceptions
"""
__author__ = 'Ari Saha (arisaha@icloud.com), Mingyang Liu(liux3941@umn.edu)'
__date__ = 'Wednesday, February 14th 2018, 11:38:08 am'
| 21.079545 | 77 | 0.698652 |
b826697289acc6bb7f13171d32f3b15f39b8d6bc | 411 | py | Python | mundo-1/ex-014.py | guilhermesm28/python-curso-em-video | 50ab4e76b1903e62d4daa579699c5908329b26c8 | [
"MIT"
] | null | null | null | mundo-1/ex-014.py | guilhermesm28/python-curso-em-video | 50ab4e76b1903e62d4daa579699c5908329b26c8 | [
"MIT"
] | null | null | null | mundo-1/ex-014.py | guilhermesm28/python-curso-em-video | 50ab4e76b1903e62d4daa579699c5908329b26c8 | [
"MIT"
] | null | null | null | # Escreva um programa que converta uma temperatura digitando em graus Celsius e converta para graus Fahrenheit.
print('-' * 100)
print('{: ^100}'.format('EXERCCIO 014 - CONVERSOR DE TEMPERATURAS'))
print('-' * 100)
c = float(input('Informe a temperatura em C: '))
f = ((9 * c) / 5) + 32
print(f'A temperatura de {c:.2f}C corresponde a {f:.2f}F.')
print('-' * 100)
input('Pressione ENTER para sair...')
| 27.4 | 111 | 0.6691 |
b828874e2b78ad751bb04188c59615f7f159fd1a | 848 | py | Python | access_apps/controllers/main.py | aaltinisik/access-addons | 933eef8b7abd5d2ac0b07b270271cb5aed3b23b6 | [
"MIT"
] | null | null | null | access_apps/controllers/main.py | aaltinisik/access-addons | 933eef8b7abd5d2ac0b07b270271cb5aed3b23b6 | [
"MIT"
] | null | null | null | access_apps/controllers/main.py | aaltinisik/access-addons | 933eef8b7abd5d2ac0b07b270271cb5aed3b23b6 | [
"MIT"
] | 1 | 2021-02-15T03:14:52.000Z | 2021-02-15T03:14:52.000Z | from odoo import SUPERUSER_ID, http
from odoo.http import request
from odoo.addons.web_settings_dashboard.controllers.main import WebSettingsDashboard
| 44.631579 | 105 | 0.740566 |
b829ed55de73d723e9907e52986b8d92ed93231d | 686 | py | Python | dev/test.py | SmartBadge/SmartBadge | 7bddc1ec230bcf5fa6185999b0b0c0e448528629 | [
"MIT"
] | null | null | null | dev/test.py | SmartBadge/SmartBadge | 7bddc1ec230bcf5fa6185999b0b0c0e448528629 | [
"MIT"
] | null | null | null | dev/test.py | SmartBadge/SmartBadge | 7bddc1ec230bcf5fa6185999b0b0c0e448528629 | [
"MIT"
] | null | null | null | import game as g
import time as t
r = g.Game(6,6, debugger = False)
player1 = g.Sprite("Player", 1, 2)
player2 = g.Sprite("Player", 1, 2)
ball = g.Sprite("ball", 1, 1)
start_game()
wait(4)
r.move_sprite(ball,-1,-1)
r.move_sprite(player1, 0,-2)
r.move_sprite(player1, 0, 3)
r.print()
while(ball.x < 7):
r.move_sprite(ball, 1,1)
print("oi")
wait(4)
| 17.589744 | 47 | 0.610787 |
b82a954625c33b4891411d888f3fa383b4a7acc9 | 662 | py | Python | itermembers.py | hanshuaigithub/pyrogram_project | 539ebbfa00d5381b4495450580f9c77ee8be9d11 | [
"MIT"
] | null | null | null | itermembers.py | hanshuaigithub/pyrogram_project | 539ebbfa00d5381b4495450580f9c77ee8be9d11 | [
"MIT"
] | null | null | null | itermembers.py | hanshuaigithub/pyrogram_project | 539ebbfa00d5381b4495450580f9c77ee8be9d11 | [
"MIT"
] | null | null | null | from pyrogram import Client
import json
api_id = 2763716
api_hash = "d4c2d2e53efe8fbb71f0d64deb84b3da"
app = Client("+639277144517", api_id, api_hash)
target = "cnsex8" # Target channel/supergroup sigui588 cnsex8
with app:
members = app.iter_chat_members(target)
print(f"Chanel members counts : {len(members)}")
members_arr = []
for i in range(0,len(members)):
member = members[i]
members_arr.append({'id':member.user.id, 'first_name':member.user.first_name})
members_json_str = json.dumps(members_arr)
members_open = open('members.json', 'w')
members_open.write(members_json_str)
members_open.close()
| 26.48 | 86 | 0.706949 |
b82b18f5c487a5e8f40d5acca12f69514df44f14 | 590 | py | Python | FisherExactTest/__version__.py | Ae-Mc/Fisher | 166e3ac68e304ed7418393d6a7717dd6f7032c15 | [
"MIT"
] | null | null | null | FisherExactTest/__version__.py | Ae-Mc/Fisher | 166e3ac68e304ed7418393d6a7717dd6f7032c15 | [
"MIT"
] | null | null | null | FisherExactTest/__version__.py | Ae-Mc/Fisher | 166e3ac68e304ed7418393d6a7717dd6f7032c15 | [
"MIT"
] | null | null | null | __title__ = "FisherExactTest"
__version__ = "1.0.1"
__author__ = "Ae-Mc"
__author_email__ = "ae_mc@mail.ru"
__description__ = "Two tailed Fisher's exact test wrote in pure Python"
__url__ = "https://github.com/Ae-Mc/Fisher"
__classifiers__ = [
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Utilities"
]
| 34.705882 | 71 | 0.661017 |
b82b81bc5dbddba7f6dc9e8f6bf26affa5968f16 | 875 | py | Python | mimosa/pylib/mimosa_reader.py | rafelafrance/traiter_mimosa | 7a248b610747d5d0e5ce5473953cbdc90d336aae | [
"MIT"
] | null | null | null | mimosa/pylib/mimosa_reader.py | rafelafrance/traiter_mimosa | 7a248b610747d5d0e5ce5473953cbdc90d336aae | [
"MIT"
] | null | null | null | mimosa/pylib/mimosa_reader.py | rafelafrance/traiter_mimosa | 7a248b610747d5d0e5ce5473953cbdc90d336aae | [
"MIT"
] | null | null | null | """Parse PDFs about mimosas."""
from tqdm import tqdm
from . import mimosa_pipeline
from . import sentence_pipeline
from .parsed_data import Datum
| 25 | 82 | 0.584 |
b82ba735b06701323afbbc1adb2108b231b98638 | 1,647 | py | Python | CxMetrics/calcMetrics.py | Danielhiversen/pyCustusx | 5a7fca51d885ad30f4db46ab725485d86fb2d17a | [
"MIT"
] | null | null | null | CxMetrics/calcMetrics.py | Danielhiversen/pyCustusx | 5a7fca51d885ad30f4db46ab725485d86fb2d17a | [
"MIT"
] | null | null | null | CxMetrics/calcMetrics.py | Danielhiversen/pyCustusx | 5a7fca51d885ad30f4db46ab725485d86fb2d17a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Oct 24 11:39:42 2015
@author: dahoiv
"""
import numpy as np
if __name__ == '__main__':
filePath1="/home/dahoiv/disk/data/brainshift/079_Tumor.cx3/Logs/metrics_a.txt"
(mr_points_1,us_points_1)=loadMetrics(filePath1)
calcDist(mr_points_1,us_points_1)
filePath2="/home/dahoiv/disk/data/brainshift/079_Tumor.cx3/Logs/metrics_b.txt"
(mr_points_2,us_points_2)=loadMetrics(filePath2)
calcDist(mr_points_2,us_points_2)
| 32.294118 | 82 | 0.571342 |
b82dae5c13359feb72d2a0825f3801687d516058 | 118 | py | Python | twodspec/extern/__init__.py | hypergravity/songcn | e2b071c932720d02e5f085884c83c46baba7802d | [
"MIT"
] | null | null | null | twodspec/extern/__init__.py | hypergravity/songcn | e2b071c932720d02e5f085884c83c46baba7802d | [
"MIT"
] | null | null | null | twodspec/extern/__init__.py | hypergravity/songcn | e2b071c932720d02e5f085884c83c46baba7802d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
__all__ = ['interpolate', 'polynomial', 'SmoothSpline']
from .interpolate import SmoothSpline
| 29.5 | 55 | 0.70339 |
b82f6fabf22a5cbcfa7dd2e7ea076e9e772feb3f | 3,286 | py | Python | SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Weights/Correlations/Transport/tube.py | Vinicius-Tanigawa/Undergraduate-Research-Project | e92372f07882484b127d7affe305eeec2238b8a9 | [
"MIT"
] | null | null | null | SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Weights/Correlations/Transport/tube.py | Vinicius-Tanigawa/Undergraduate-Research-Project | e92372f07882484b127d7affe305eeec2238b8a9 | [
"MIT"
] | null | null | null | SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Weights/Correlations/Transport/tube.py | Vinicius-Tanigawa/Undergraduate-Research-Project | e92372f07882484b127d7affe305eeec2238b8a9 | [
"MIT"
] | null | null | null | ## @ingroup Methods-Weights-Correlations-Tube_Wing
# tube.py
#
# Created: Jan 2014, A. Wendorff
# Modified: Feb 2014, A. Wendorff
# Feb 2016, E. Botero
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
from SUAVE.Core import Units
# ----------------------------------------------------------------------
# Tube
# ----------------------------------------------------------------------
## @ingroup Methods-Weights-Correlations-Tube_Wing
def tube(vehicle, fuse, wt_wing, wt_propulsion):
""" Calculate the weight of a fuselage in the state tube and wing configuration
Assumptions:
fuselage in a standard wing and tube configuration
Source:
N/A
Inputs:
fuse.areas.wetted - fuselage wetted area [meters**2]
fuse.differential_pressure- Maximum fuselage pressure differential [Pascal]
fuse.width - width of the fuselage [meters]
fuse.heights.maximum - height of the fuselage [meters]
fuse.lengths.total - length of the fuselage [meters]
vehicle.envelope.limit_load - limit load factor at zero fuel weight of the aircraft [dimensionless]
vehicle.mass_properties.max_zero_fuel - zero fuel weight of the aircraft [kilograms]
wt_wing - weight of the wing of the aircraft [kilograms]
wt_propulsion - weight of the entire propulsion system of the aircraft [kilograms]
vehicle.wings.main_wing.chords.root - wing root chord [meters]
Outputs:
weight - weight of the fuselage [kilograms]
Properties Used:
N/A
"""
# unpack inputs
diff_p = fuse.differential_pressure / (Units.force_pound / Units.ft ** 2) # Convert Pascals to lbs/ square ft
width = fuse.width / Units.ft # Convert meters to ft
height = fuse.heights.maximum / Units.ft # Convert meters to ft
# setup
length = fuse.lengths.total - vehicle.wings.main_wing.chords.root / 2.
length = length / Units.ft # Convert meters to ft
weight = (vehicle.mass_properties.max_zero_fuel - wt_wing - wt_propulsion) / Units.lb # Convert kg to lbs
area = fuse.areas.wetted / Units.ft ** 2 # Convert square meters to square ft
# process
# Calculate fuselage indices
I_p = 1.5 * 10 ** -3. * diff_p * width
I_b = 1.91 * 10 ** -4. * vehicle.envelope.limit_load * weight * length / height ** 2.
if I_p > I_b:
I_f = I_p
else:
I_f = (I_p ** 2. + I_b ** 2.) / (2. * I_b)
# Calculate weight of wing for traditional aircraft vertical tail without rudder
fuselage_weight = ((1.051 + 0.102 * I_f) * area) * Units.lb # Convert from lbs to kg
return fuselage_weight
| 45.013699 | 123 | 0.5 |
b82ff818b8e67f8cae3f7360326180bd7e14f756 | 3,837 | py | Python | Dependencies/02_macOS/40_gtk+/x64/lib/gobject-introspection/giscanner/annotationmain.py | bognikol/Eleusis | ee518ede31893689eb6d3c5539e0bd757aeb0294 | [
"MIT"
] | 4 | 2019-05-31T19:55:23.000Z | 2020-10-27T10:00:32.000Z | Dependencies/02_macOS/40_gtk+/x64/lib/gobject-introspection/giscanner/annotationmain.py | bognikol/Eleusis | ee518ede31893689eb6d3c5539e0bd757aeb0294 | [
"MIT"
] | null | null | null | Dependencies/02_macOS/40_gtk+/x64/lib/gobject-introspection/giscanner/annotationmain.py | bognikol/Eleusis | ee518ede31893689eb6d3c5539e0bd757aeb0294 | [
"MIT"
] | 3 | 2019-04-29T14:09:38.000Z | 2020-10-27T10:00:33.000Z | # -*- Mode: Python -*-
# GObject-Introspection - a framework for introspecting GObject libraries
# Copyright (C) 2010 Johan Dahlin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import optparse
import codecs
from contextlib import contextmanager
from giscanner import message
from giscanner.annotationparser import GtkDocCommentBlockParser, GtkDocCommentBlockWriter
from giscanner.scannermain import (get_preprocessor_option_group,
create_source_scanner,
process_packages)
| 36.542857 | 89 | 0.65963 |
b830ed284183da0f588ffc8416e532df6cb6e5aa | 1,219 | py | Python | src/tools/json2db.py | chobocho/ChoboMemo2 | d3883e20d7c69c48477d1178120e0e32c062b27f | [
"MIT"
] | null | null | null | src/tools/json2db.py | chobocho/ChoboMemo2 | d3883e20d7c69c48477d1178120e0e32c062b27f | [
"MIT"
] | null | null | null | src/tools/json2db.py | chobocho/ChoboMemo2 | d3883e20d7c69c48477d1178120e0e32c062b27f | [
"MIT"
] | null | null | null | import os
import sys
import json
from manager import dbmanager
if __name__ == '__main__':
if len(sys.argv) < 3:
print("Usage: json2db json_file db_file")
else:
main(sys.argv[1:])
| 23 | 56 | 0.525021 |
b8317e86fff68e0107933de518fdf61bc7534d00 | 171 | py | Python | Configuration/ProcessModifiers/python/trackingMkFitTobTecStep_cff.py | Purva-Chaudhari/cmssw | 32e5cbfe54c4d809d60022586cf200b7c3020bcf | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | Configuration/ProcessModifiers/python/trackingMkFitTobTecStep_cff.py | Purva-Chaudhari/cmssw | 32e5cbfe54c4d809d60022586cf200b7c3020bcf | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | Configuration/ProcessModifiers/python/trackingMkFitTobTecStep_cff.py | Purva-Chaudhari/cmssw | 32e5cbfe54c4d809d60022586cf200b7c3020bcf | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
# This modifier sets replaces the default pattern recognition with mkFit for tobTecStep
trackingMkFitTobTecStep = cms.Modifier()
| 34.2 | 87 | 0.836257 |
b8327398b4c50fa047db432d9765d37a5dd0095d | 3,772 | py | Python | config/config.py | rossja/Docker-Minecraft-Overviewer | bb2285f5af723a74b1365bbcbe284b9e5ce85245 | [
"MIT"
] | 12 | 2019-12-14T13:58:44.000Z | 2022-03-12T10:43:43.000Z | config/config.py | rossja/Docker-Minecraft-Overviewer | bb2285f5af723a74b1365bbcbe284b9e5ce85245 | [
"MIT"
] | 2 | 2019-02-04T09:46:10.000Z | 2019-02-05T10:05:56.000Z | config/config.py | rossja/Docker-Minecraft-Overviewer | bb2285f5af723a74b1365bbcbe284b9e5ce85245 | [
"MIT"
] | 5 | 2020-01-29T20:38:35.000Z | 2021-12-18T19:56:49.000Z | # My config.py script for overviewer:
worlds["pudel"] = "/tmp/server/world/"
worlds["pudel_nether"] = "/tmp/server/world_nether/"
texturepath = "/tmp/overviewer/client.jar"
processes = 2
outputdir = "/tmp/export/"
my_cave = [Base(), EdgeLines(), Cave(only_lit=True), DepthTinting()]
my_nowater = [Base(), EdgeLines(), NoFluids()]
defaultzoom = 5
my_crop = (-1200, -1600, 900, 400)
thingsToMaker = [
dict(name="Players", filterFunction=playerIcons),
dict(name="Beds", filterFunction=playerSpawns),
dict(name="Signs", filterFunction=signFilter),
#dict(name="Chests", filterFunction=chestFilter)
]
renders["day_complete_smooth"] = {
'world': 'pudel',
'title': 'Day',
'rendermode': 'smooth_lighting',
"dimension": "overworld",
'markers': thingsToMaker
}
renders["night_complete"] = {
'world': 'pudel',
'title': 'Night',
'rendermode': 'smooth_night',
"dimension": "overworld",
'markers': thingsToMaker
}
renders["cave_complete"] = {
'world': 'pudel',
'title': 'Cave',
'rendermode': my_cave,
"dimension": "overworld",
'markers': thingsToMaker
}
# Railoverlay
renders["rails"] = {
'world': 'pudel',
'title': 'Rails',
"dimension": "overworld",
'rendermode': [ClearBase(),
MineralOverlay(minerals=[
(66, (255,0,0)),
(27, (255,0,0)),
(28, (255,0,0))
]), EdgeLines()],
"overlay": ["day_complete_smooth","night_complete","cave_complete"]
}
'''
# Pistons and Observer
renders["farms"] = {
'world': 'pudel',
'title': 'Farms',
"dimension": "overworld",
'rendermode': [ClearBase(),
MineralOverlay(minerals=[
(29, (255,0,0)),
(33, (255,0,0)),
(34, (255,0,0)),
(154, (255,0,0)),
(218, (255,0,0))
]), EdgeLines()],
"overlay": ["day_complete_smooth","night_complete","cave_complete"]
}
'''
'''
renders["nether"] = {
"world": "pudel_nether",
"title": "Nether",
"rendermode": "nether",
"dimension": "nether",
'crop': (-200, -200, 200, 200)
}
'''
# Import the Observers
from .observer import MultiplexingObserver, ProgressBarObserver, JSObserver
# Construct the ProgressBarObserver
pbo = ProgressBarObserver()
# Construct a basic JSObserver
jsObserver = JSObserver(outputdir, 30)
# Set the observer to a MultiplexingObserver
observer = MultiplexingObserver(pbo, jsObserver)
'''
renders["day_smooth"] = {
'world': 'pudel',
'title': 'Day',
'rendermode': 'smooth_lighting',
"dimension": "overworld",
'crop': my_crop,
'markers': thingsToMaker
}
renders["night_smooth"] = {
'world': 'pudel',
'title': 'Night',
'rendermode': 'smooth_night',
"dimension": "overworld",
'crop': my_crop,
'markers': thingsToMaker
}
renders["cave"] = {
'world': 'pudel',
'title': 'Cave',
'rendermode': my_cave,
"dimension": "overworld",
'crop': my_crop,
'markers': thingsToMaker
}
'''
| 26.013793 | 82 | 0.593054 |
b832db34004caeef160a328496546197b3b692d7 | 1,764 | py | Python | SurveyManager/survey/models.py | javiervar/SurveyManager | bbe2ed356654c32586c587f58c609c8ce014e96b | [
"MIT"
] | null | null | null | SurveyManager/survey/models.py | javiervar/SurveyManager | bbe2ed356654c32586c587f58c609c8ce014e96b | [
"MIT"
] | null | null | null | SurveyManager/survey/models.py | javiervar/SurveyManager | bbe2ed356654c32586c587f58c609c8ce014e96b | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
| 30.413793 | 92 | 0.786848 |
b8361f78932036e9f23fbe61c22ab2ba8ac449f7 | 3,150 | py | Python | pythainlp/corpus/__init__.py | petetanru/pythainlp | 83fa999336ce8c7f7b5431fc2fc41c53c5cb7604 | [
"Apache-2.0"
] | 1 | 2018-10-10T19:01:43.000Z | 2018-10-10T19:01:43.000Z | pythainlp/corpus/__init__.py | Khawoat6/pythainlp | 05979c0ac9a596bb7957fb8a050a32c87ea098e8 | [
"Apache-2.0"
] | null | null | null | pythainlp/corpus/__init__.py | Khawoat6/pythainlp | 05979c0ac9a596bb7957fb8a050a32c87ea098e8 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import,unicode_literals
from pythainlp.tools import get_path_db,get_path_data
from tinydb import TinyDB,Query
from future.moves.urllib.request import urlopen
from tqdm import tqdm
import requests
import os
import math
import requests
from nltk.corpus import names
#__all__ = ["thaipos", "thaiword","alphabet","tone","country","wordnet"]
path_db_=get_path_db()
def download_(url, dst):
"""
@param: url to download file
@param: dst place to put the file
"""
file_size = int(urlopen(url).info().get('Content-Length', -1))
if os.path.exists(dst):
first_byte = os.path.getsize(dst)
else:
first_byte = 0
if first_byte >= file_size:
return file_size
header = {"Range": "bytes=%s-%s" % (first_byte, file_size)}
pbar = tqdm(
total=file_size, initial=first_byte,
unit='B', unit_scale=True, desc=url.split('/')[-1])
req = requests.get(url, headers=header, stream=True)
with(open(get_path_data(dst), 'wb')) as f:
for chunk in req.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
pbar.update(1024)
pbar.close()
#return file_size | 40.384615 | 124 | 0.586032 |
b83a4b8131231e8ffeccb27881d8404fa73c602e | 649 | py | Python | dynamic programming/python/leetcode303_Range_Sum_Query_Immutable.py | wenxinjie/leetcode | c459a01040c8fe0783e15a16b8d7cca4baf4612a | [
"Apache-2.0"
] | null | null | null | dynamic programming/python/leetcode303_Range_Sum_Query_Immutable.py | wenxinjie/leetcode | c459a01040c8fe0783e15a16b8d7cca4baf4612a | [
"Apache-2.0"
] | null | null | null | dynamic programming/python/leetcode303_Range_Sum_Query_Immutable.py | wenxinjie/leetcode | c459a01040c8fe0783e15a16b8d7cca4baf4612a | [
"Apache-2.0"
] | null | null | null | # Given an integer array nums, find the sum of the elements between indices i and j (i j), inclusive.
# Example:
# Given nums = [-2, 0, 3, -5, 2, -1]
# sumRange(0, 2) -> 1
# sumRange(2, 5) -> -1
# sumRange(0, 5) -> -3
# Time: O(n)
# Space: O(n)
# Difficulty: easy | 20.935484 | 103 | 0.497689 |
b83d0a4d0944019fd7f267fd6043e0bc64496350 | 8,257 | py | Python | py/garage/garage/asyncs/messaging/reqrep.py | clchiou/garage | 446ff34f86cdbd114b09b643da44988cf5d027a3 | [
"MIT"
] | 3 | 2016-01-04T06:28:52.000Z | 2020-09-20T13:18:40.000Z | py/garage/garage/asyncs/messaging/reqrep.py | clchiou/garage | 446ff34f86cdbd114b09b643da44988cf5d027a3 | [
"MIT"
] | null | null | null | py/garage/garage/asyncs/messaging/reqrep.py | clchiou/garage | 446ff34f86cdbd114b09b643da44988cf5d027a3 | [
"MIT"
] | null | null | null | __all__ = [
'Terminated',
'Unavailable',
'client',
'server',
]
import logging
import time
import curio
import nanomsg as nn
from garage import asyncs
from garage.assertions import ASSERT
from garage.asyncs import futures
from garage.asyncs import queues
LOG = logging.getLogger(__name__)
def _transform_error(exc):
if isinstance(exc, curio.TaskTimeout):
new_exc = Unavailable()
new_exc.__cause__ = exc
return new_exc
elif isinstance(exc, (nn.EBADF, queues.Closed)):
new_exc = Terminated()
new_exc.__cause__ = exc
return new_exc
else:
return exc
| 33.294355 | 76 | 0.608696 |
b83f80c89541762b358261a94161b094315b1f52 | 1,412 | py | Python | fasm_utils/segbits.py | antmicro/quicklogic-fasm-utils | 83c867e3269e1186b9bcd71767bb810c82b3905d | [
"Apache-2.0"
] | null | null | null | fasm_utils/segbits.py | antmicro/quicklogic-fasm-utils | 83c867e3269e1186b9bcd71767bb810c82b3905d | [
"Apache-2.0"
] | 1 | 2021-06-25T15:38:43.000Z | 2021-06-25T15:38:43.000Z | fasm_utils/segbits.py | antmicro/quicklogic-fasm-utils | 83c867e3269e1186b9bcd71767bb810c82b3905d | [
"Apache-2.0"
] | 1 | 2020-05-18T12:04:40.000Z | 2020-05-18T12:04:40.000Z | from collections import namedtuple
Bit = namedtuple('Bit', 'x y isset')
def parsebit(val: str):
"""Parses bit notation for .db files to Bit class.
Parameters
----------
val: str
A string containing .db bit notation, i.e. "!012_23" => (12, 23, False)
Returns
-------
Bit: A named tuple Bit with parsed word column, word bit and value
"""
isset = True
# Default is 0. Skip explicit call outs
if val[0] == '!':
isset = False
val = val[1:]
# 28_05 => 28, 05
seg_word_column, word_bit_n = val.split('_')
return Bit(
x=int(seg_word_column),
y=int(word_bit_n),
isset=isset,
)
def read_segbits_line(line: str):
'''Parses segbits from line.'''
linestrip = line.strip()
if linestrip:
parts = linestrip.split(' ')
assert len(parts) > 1
return parts[0], [parsebit(val) for val in parts[1:]]
def read_segbits_file(filepath: str):
"""Parses bits from the lines of the .db file.
Parameters
----------
f: str
A path to .db file.
Returns
-------
dict of str: Bit: Dictionary containing parsed .db file.
"""
segbits = {}
with open(filepath, 'r') as f:
for l in f:
# CLBLM_L.SLICEL_X1.ALUT.INIT[10] 29_14
name, bits = read_segbits_line(l)
segbits[name] = bits
return segbits
| 21.723077 | 79 | 0.563739 |
b84015aceb9a117ef3d45102bccf99b010e44535 | 927 | py | Python | docs/_api/_build/delira/logging/visdom_backend.py | gedoensmax/delira | 545e2ccbe56ed382d300cf3d00317e9a0e3ab5f6 | [
"BSD-2-Clause"
] | 1 | 2019-10-03T21:00:20.000Z | 2019-10-03T21:00:20.000Z | docs/_api/_build/delira/logging/visdom_backend.py | gedoensmax/delira | 545e2ccbe56ed382d300cf3d00317e9a0e3ab5f6 | [
"BSD-2-Clause"
] | null | null | null | docs/_api/_build/delira/logging/visdom_backend.py | gedoensmax/delira | 545e2ccbe56ed382d300cf3d00317e9a0e3ab5f6 | [
"BSD-2-Clause"
] | null | null | null | import tensorboardX
from threading import Event
from queue import Queue
from delira.logging.writer_backend import WriterLoggingBackend
| 23.769231 | 65 | 0.593312 |
b842118c3400dc6b3842e04f1499ebec381bda43 | 7,706 | py | Python | node/substitute.py | treverson/coin-buildimage | a868250733f65140a6d11a5fbd3b4a7e1509f8d5 | [
"MIT"
] | 1 | 2018-09-28T11:51:06.000Z | 2018-09-28T11:51:06.000Z | node/substitute.py | treverson/coin-buildimage | a868250733f65140a6d11a5fbd3b4a7e1509f8d5 | [
"MIT"
] | null | null | null | node/substitute.py | treverson/coin-buildimage | a868250733f65140a6d11a5fbd3b4a7e1509f8d5 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3.5
# vim:ts=4:sw=4:ai:et:si:sts=4
import argparse
import json
import re
import os
import uuid
import shutil
import sys
import requests
filterRe = re.compile(r'(?P<block>^%=(?P<mode>.)?\s+(?P<label>.*?)\s+(?P<value>[^\s\n$]+)(?:\s*.*?)?^(?P<section>.*?)^=%.*?$)', re.M | re.S)
subItemRe = re.compile(r'@_@')
parser = argparse.ArgumentParser(description="Substitute in variables")
parser.add_argument('--coin', '-c', required=True, help="Which coin")
parser.add_argument('--nodaemon', '-D', action="store_false", dest="daemon",
help="Don't copy daemon")
parser.add_argument('--pool', '-p', action="store_true",
help="Grab pool wallet")
parser.add_argument('--explorer', '-e', action="store_true",
help="Use explorer")
args = parser.parse_args()
buildDir = os.path.join("build", args.coin)
# First read the config file
with open("config/%s.json" % args.coin, "r") as f:
config = json.load(f)
config = {key.lower(): value for (key, value) in config.items()}
if args.pool:
config["poolnode"] = 1
config.pop("grabwallet", None)
if args.explorer:
config['useexplorer'] = 1
else:
config['useexplorer'] = 0
subst = convertConfig(config)
if args.coin == 'coiniumserv' or args.coin == 'yiimp':
result = requests.get("http://169.254.169.254/latest/meta-data/local-ipv4")
subst.update(convertConfig({"hostip": result.text}))
else:
# Create a config file
outconfig = {
"daemon": 1,
"dns": 1,
"server": 1,
"listen": 1,
"rpcport": config['rpcport'],
"rpcuser": "%srpc" % config['coinname'],
}
if not args.pool:
rpcallowip = "127.0.0.1"
rpcpassword = str(uuid.uuid4())
else:
rpcallowip = ["127.0.0.1", "172.17.0.*"]
rpcpassword = "pool-%s" % args.coin
outconfig["rpcallowip"] = rpcallowip
outconfig["rpcpassword"] = rpcpassword
addnodes = config.get('addnodes', [])
if not isinstance(addnodes, list):
addnodes = [addnodes]
if addnodes:
outconfig['addnode'] = addnodes
# Add the config setting to the mapping
subst.update(convertConfig(outconfig))
conffile = os.path.join(buildDir, "%s.conf" % config['coinname'])
with open(conffile, "w") as f:
for (key, values) in sorted(outconfig.items()):
if not isinstance(values, list):
values = [values]
for value in values:
f.write("%s=%s\n" % (key, value))
# Create the Dockerfile
if args.coin == 'coiniumserv':
infile = "Dockerfile.coiniumserv.in"
elif args.coin == 'yiimp':
infile = "Dockerfile.yiimp.in"
else:
infile = "Dockerfile.in"
outfile = os.path.join(buildDir, "Dockerfile")
substituteFile(infile, outfile, subst)
# Create the node run Dockerfile
infile = "Dockerfile.node.in"
if args.pool:
outfile = os.path.join(buildDir, "Dockerfile.pool")
elif args.explorer:
outfile = os.path.join(buildDir, "Dockerfile.explorer")
else:
outfile = os.path.join(buildDir, "Dockerfile.node")
substituteFile(infile, outfile, subst)
# Create the startup script
if args.coin == 'coiniumserv':
infile = "startup.sh-coiniumserv.in"
elif args.coin == 'yiimp':
infile = "startup.sh-yiimp.in"
else:
infile = "startup.sh.in"
if args.pool:
suffix = "-pool.sh"
else:
suffix = "-node.sh"
outfile = os.path.join(buildDir, "startup%s" % suffix)
substituteFile(infile, outfile, subst)
# Create the ports file
ports = []
port = config.get('p2pport', None)
if port:
ports.append(port)
port = config.get('explorerport', None)
useexplorer = config.get('useexplorer', None)
if port and useexplorer:
ports.append(port)
port = config.get('p2poolport', None)
usep2pool = config.get('usep2pool', None)
if port and usep2pool:
ports.append(port)
port = config.get('poolport', None)
if port:
ports.append(port)
if args.pool:
port = config.get("rpcport", None)
if port:
ports.append(port)
poolports = config.get('stratumports', None)
if poolports:
if not isinstance(poolports, list):
poolports = [poolports]
ports.extend(poolports)
ports = list(map(lambda x: "-p %s:%s" % (x, x), ports))
links = config.get('links', None)
if links:
links = list(map(lambda x: "--link %s" % x, links))
ports.extend(links)
ports = " ".join(ports)
outfile = os.path.join(buildDir, "ports.txt")
with open(outfile, "w") as f:
f.write(ports)
# Copy over the daemon
if args.daemon and args.coin != 'coiniumserv' and args.coin != 'yiimp':
infile = os.path.join("..", "build", "artifacts", config["coinname"],
"linux", config['daemonname'])
copyfile(args.coin, infile, config['daemonname'])
if config.get('installexplorer', False):
# Create the Explorer settings file
infile = "explorer-settings.json.in"
outfile = os.path.join(buildDir, "explorer-settings.json")
substituteFile(infile, outfile, subst)
# Create the Explorer layout template
infile = "explorer-layout.jade.in"
outfile = os.path.join(buildDir, "explorer-layout.jade")
substituteFile(infile, outfile, subst)
# Copy over the mongo init script and the crontab for explorer
copyfile(args.coin, "explorer.mongo")
copyfile(args.coin, "explorer-crontab")
## Copy the nodejs archive
copyfile(args.coin, "build/cache/node-v8.7.0-linux-x64.tar.xz",
"node-v8.7.0-linux-x64.tar.xz")
# Copy the sudoers.d file
copyfile(args.coin, "sudoers-coinnode")
# Copy the coin-cli script
copyfile(args.coin, "coin-cli")
if config.get('copyawscreds', False):
copyfile(args.coin, os.path.expanduser("~/.aws/credentials"),
"aws-credentials")
| 29.189394 | 140 | 0.616922 |
b842ca4df0f85a27ac428ca98c508bc0fd8473bb | 379 | py | Python | pages/page1.py | kalimuthu123/dash-app | 90bf4c570abb1770ea0f082989e8f97d62b98346 | [
"MIT"
] | null | null | null | pages/page1.py | kalimuthu123/dash-app | 90bf4c570abb1770ea0f082989e8f97d62b98346 | [
"MIT"
] | null | null | null | pages/page1.py | kalimuthu123/dash-app | 90bf4c570abb1770ea0f082989e8f97d62b98346 | [
"MIT"
] | null | null | null | import dash_html_components as html
from utils import Header | 25.266667 | 72 | 0.564644 |
b8431428845abd267d2447bb2c266f7ad3458a5b | 318 | py | Python | polrev/offices/admin/office_admin.py | polrev-github/polrev-django | 99108ace1a5307b14c3eccb424a9f9616e8c02ae | [
"MIT"
] | 1 | 2021-12-10T05:54:16.000Z | 2021-12-10T05:54:16.000Z | polrev/offices/admin/office_admin.py | polrev-github/polrev-django | 99108ace1a5307b14c3eccb424a9f9616e8c02ae | [
"MIT"
] | null | null | null | polrev/offices/admin/office_admin.py | polrev-github/polrev-django | 99108ace1a5307b14c3eccb424a9f9616e8c02ae | [
"MIT"
] | null | null | null | from django.contrib import admin
from offices.models import OfficeType, Office
admin.site.register(OfficeType, OfficeTypeAdmin)
'''
class OfficeAdmin(admin.ModelAdmin):
search_fields = ['title']
admin.site.register(Office, OfficeAdmin)
''' | 22.714286 | 48 | 0.77044 |
b84346e9d501185aa45dba40c444e9fe20860224 | 6,511 | py | Python | tests/spec/test_schema_parser.py | tclh123/aio-openapi | 7c63eb628b7735501508aea6c83e458715fb070b | [
"BSD-3-Clause"
] | 19 | 2019-03-04T22:50:38.000Z | 2022-03-02T09:28:17.000Z | tests/spec/test_schema_parser.py | tclh123/aio-openapi | 7c63eb628b7735501508aea6c83e458715fb070b | [
"BSD-3-Clause"
] | 4 | 2019-03-04T23:03:08.000Z | 2022-01-16T11:32:54.000Z | tests/spec/test_schema_parser.py | tclh123/aio-openapi | 7c63eb628b7735501508aea6c83e458715fb070b | [
"BSD-3-Clause"
] | 3 | 2020-05-20T17:43:08.000Z | 2021-10-06T10:47:41.000Z | from dataclasses import dataclass, field
from datetime import datetime
from enum import Enum
from typing import Dict, List
import pytest
from openapi.data.fields import (
as_field,
bool_field,
data_field,
date_time_field,
number_field,
)
from openapi.exc import InvalidSpecException, InvalidTypeException
from openapi.spec import SchemaParser
| 30.283721 | 85 | 0.587775 |