hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ce3a314613ced74eaa69ae3cdf828f6c6582b325 | 6,875 | py | Python | scripts/jenkins_console_log_search.py | hrajput89/kv_engine | 33fb1ab2c9787f55555e5f7edea38807b3dbc371 | [
"BSD-3-Clause"
] | 1 | 2019-06-13T07:33:09.000Z | 2019-06-13T07:33:09.000Z | scripts/jenkins_console_log_search.py | paolococchi/kv_engine | 40256dca6bf77fb4bcc18e8ef7d9b8f991bf4e45 | [
"BSD-3-Clause"
] | null | null | null | scripts/jenkins_console_log_search.py | paolococchi/kv_engine | 40256dca6bf77fb4bcc18e8ef7d9b8f991bf4e45 | [
"BSD-3-Clause"
] | 1 | 2020-01-15T16:52:37.000Z | 2020-01-15T16:52:37.000Z | #!/usr/bin/env python3
"""
Copyright 2018 Couchbase, Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This short script uses curl requests to search the last 100 builds of
a jenkins job to find recurring errors, written in Python3.
It results in printing a list of links to builds that match the search
As the requests package is not included within kv, you will need to either
download this package yourself or reference the one included inside
couchbase-cli.
This is currently limited to searching for log patterns contained within
one line of the logs, as the search checks line-by-line.
Usage: python jenkins_console_log_search.py -j <job-name> -s <RegEx Search term>
"""
import argparse
import re
import requests
import sys
import time
# Search for searchParameter in logText, handling either a string or a RegEx inside
# searchPattern depending on whether the regex flag is True, and assuming that logText
# is line separated by \n's
def search(logText, searchPattern, isRegex):
output = []
if isRegex:
# Check regex against whole text
for find in re.finditer(pattern, logText):
group_list = []
if find.groups():
group_list.extend(find.groups())
else:
group_list.append(find.group(0))
for term in group_list:
output.append(term)
else: # Not a RegEx
lines = []
for line in logText.split('\n'):
result = line.find(searchPattern)
if result != -1:
# Wrap the search term in ASCII formatting to make it bold
lines.append(line.replace(searchPattern, ASCIIFormat.BOLD
+ searchPattern + ASCIIFormat.END))
output.extend(lines)
return output
# --- Start Main Script ---
# Create argparser so the user can specify which job to search
argParser = argparse.ArgumentParser()
argParser.add_argument('--job', '-j', type=str,
help='The cv job to query. '
"Common jobs are: 'kv_engine-ASan-UBSan-master', "
"'kv_engine-clang_analyzer-master', "
"'kv_engine-linux-master', "
"'kv_engine-threadsanitizer-master', "
"'kv_engine-windows-master', "
"'kv_engine-clang_format', "
"'kv-engine-cv-perf'", required=True)
argParser.add_argument('--search', '-s', type=str, required=True,
help='The string to search the logs for in a RegEx format')
argParser.add_argument('--build-no', '-b', type=int,
help='The build number of cv job to check backwards from. '
'0 (default) fetches latest build number', default=0)
argParser.add_argument('--no-of-builds', '-n', type=int,
help='The number of builds to check back', default=100)
argParser.add_argument('--format', '-f', default="plain", type=str,
help="Select the format to print results. "
"Available formats are: "
"plain (default), log-line, jira")
argParser.add_argument('--url-prefix', '-u', type=str, default='cv',
help='Determine the endpoint of logs to check, '
'http://<url-prefix>.jenkins.couchbase.com')
args = argParser.parse_args()
job = 'job/' + args.job + '/'
serverURL = 'http://' + str(args.url_prefix) + '.jenkins.couchbase.com/'
# Control the eventual output format of the findings
availableFormats = ["plain", "log-line", "jira"]
outputFormat = args.format.lower()
assert outputFormat in availableFormats, "%r format is not supported" % outputFormat
consoleText = '/consoleText/'
resultURLs = {}
failedBuildNums = []
if args.build_no == 0:
# Need to fetch the latest build number
r = requests.get(serverURL + job + 'lastBuild/api/json')
j = r.json()
args.build_no = j['number']
# Determine whether the inputted search parameter is a regex
isRegex = True
try:
pattern = re.compile(args.search)
searchingFor = 'RegEx "' + args.search + '"'
except re.error:
isRegex = False
pattern = args.search
searchingFor = '"' + args.search + '"'
print("Searching for", searchingFor, "in console logs of job:",
args.job, "between build", args.build_no - (args.no_of_builds - 1),
"and", args.build_no, file=sys.stderr)
# Trigger timing check start
start_time = time.time()
for i in range(0, args.no_of_builds):
print('\r >>> Current progress: {} '.format(str(i)), end='',
flush=True, file=sys.stderr)
# Get the console log text from the jenkins job
r = requests.get(serverURL + job + str(args.build_no-i) + consoleText)
if r.status_code != 200:
failedBuildNums.append(args.build_no-i)
# Perform Search
output = []
output.extend(search(r.text, pattern, isRegex))
if output:
resultURLs[serverURL + job + str(args.build_no-i) + '/console/'] = output
# Finish timing
print('\r Completed search in', (time.time() - start_time), 's', file=sys.stderr)
if failedBuildNums:
print("Failed log request on build(s) no:", failedBuildNums, file=sys.stderr)
# Ensure above prints actually print before results (and not mangled inside results)
sys.stderr.flush()
# Result output
if not resultURLs:
# Empty results, did not find any matches
print("No matches found")
elif outputFormat == 'jira':
# Print in a JIRA format
print("{panel:title=Search for", searchingFor,
"in console logs of job", args.job, "between build no",
args.build_no - (args.no_of_builds - 1), "and", args.build_no, '}')
for url in resultURLs:
print('[', url, ']', sep="")
print('{noformat}')
for line in resultURLs[url]:
print(line.replace(ASCIIFormat.BOLD, '').replace(ASCIIFormat.END, ''))
print('{noformat}')
print("{panel}")
elif outputFormat == "log-line":
# Print findings with log line attached
for url in resultURLs:
print(url, ':')
for line in resultURLs[url]:
print('\t', line)
else: # outputFormat == "plain"
# Print findings normally
for url in resultURLs:
print(url)
| 37.162162 | 86 | 0.6336 |
ce3ac2a462ca934025f075aabb0be44931935eba | 542 | py | Python | geokey/projects/migrations/0004_auto_20150123_1507.py | universityofsussex/geokey | 25e161dbc81841c57c148053dbe99facc81e84b8 | [
"Apache-2.0"
] | null | null | null | geokey/projects/migrations/0004_auto_20150123_1507.py | universityofsussex/geokey | 25e161dbc81841c57c148053dbe99facc81e84b8 | [
"Apache-2.0"
] | null | null | null | geokey/projects/migrations/0004_auto_20150123_1507.py | universityofsussex/geokey | 25e161dbc81841c57c148053dbe99facc81e84b8 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
| 25.809524 | 139 | 0.608856 |
ce3b5d59730c0d6fb21fce8076ca9f2a4f217a30 | 2,506 | py | Python | hr_attendance_ex/models/sql_ser_config.py | alexhong121/odoo_model | 4eff41c672bd03084eaa6eae81c8f3d359c2fb8d | [
"MIT"
] | null | null | null | hr_attendance_ex/models/sql_ser_config.py | alexhong121/odoo_model | 4eff41c672bd03084eaa6eae81c8f3d359c2fb8d | [
"MIT"
] | null | null | null | hr_attendance_ex/models/sql_ser_config.py | alexhong121/odoo_model | 4eff41c672bd03084eaa6eae81c8f3d359c2fb8d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import logging
# import pyodbc
from odoo import models, fields, api, _
from odoo.exceptions import UserError, AccessError, MissingError
_logger = logging.getLogger(__name__)
| 32.973684 | 111 | 0.602554 |
ce3bab3735a9a905747cfb1ff78c996de02c146a | 223 | py | Python | core/templatetags/my_custom_tags.py | SubhanRzayev/E-commerce-Tmart | 239218397f4ee55ab6ae4ef1798fbc83bc7d1159 | [
"MIT"
] | 2 | 2021-08-13T14:23:34.000Z | 2021-09-18T08:48:29.000Z | core/templatetags/my_custom_tags.py | SubhanRzayev/E-commerce-Tmart | 239218397f4ee55ab6ae4ef1798fbc83bc7d1159 | [
"MIT"
] | null | null | null | core/templatetags/my_custom_tags.py | SubhanRzayev/E-commerce-Tmart | 239218397f4ee55ab6ae4ef1798fbc83bc7d1159 | [
"MIT"
] | null | null | null | from blog.models import Category
from django.template import Library
from core.models import *
register = Library()
| 14.866667 | 35 | 0.735426 |
ce3c2d8194ace948fc686ddfcb1f37ff3e1e1403 | 4,476 | py | Python | Object.py | LeenJooken/RFMCollaborationMiner | 5e8b2933bc9977dcc1707474f8163964dc29ea9d | [
"MIT"
] | null | null | null | Object.py | LeenJooken/RFMCollaborationMiner | 5e8b2933bc9977dcc1707474f8163964dc29ea9d | [
"MIT"
] | null | null | null | Object.py | LeenJooken/RFMCollaborationMiner | 5e8b2933bc9977dcc1707474f8163964dc29ea9d | [
"MIT"
] | null | null | null | #Represents an object
| 33.402985 | 107 | 0.629133 |
ce3e44815e1657902dc5c20dbf4073f8b104c4db | 4,336 | py | Python | centraloffice/src/ngconfiginterface/nginterface.py | dmazzer/CogRIoT | a2d71916b0f1bd79d0f5b444865279530eb6b836 | [
"MIT"
] | null | null | null | centraloffice/src/ngconfiginterface/nginterface.py | dmazzer/CogRIoT | a2d71916b0f1bd79d0f5b444865279530eb6b836 | [
"MIT"
] | null | null | null | centraloffice/src/ngconfiginterface/nginterface.py | dmazzer/CogRIoT | a2d71916b0f1bd79d0f5b444865279530eb6b836 | [
"MIT"
] | null | null | null | """
nginterface.py: NovaGenesis Interface
"""
__author__ = "Daniel Mazzer"
__copyright__ = "Copyright 2016, CogRIoT Project"
__credits__ = "Antonio Marcos Alberti"
__license__ = "MIT"
__maintainer__ = "Daniel Mazzer"
__email__ = "dmazzer@gmail.com"
import sys
import zmq
import threading
from bzrlib.plugins.launchpad.lp_api_lite import json
sys.path.append("../../")
from utils.logmsgs import logger
| 34.688 | 99 | 0.592943 |
ce3f6405d41b2f32d5fb0b9dca8c2d47c32a7949 | 2,625 | py | Python | tests/test_connect.py | mkniewallner/edgedb-python | 2086b866d3c87c215eecf644b2393ddd857457e0 | [
"Apache-2.0"
] | 214 | 2019-01-19T03:56:10.000Z | 2022-03-31T01:37:33.000Z | tests/test_connect.py | mkniewallner/edgedb-python | 2086b866d3c87c215eecf644b2393ddd857457e0 | [
"Apache-2.0"
] | 120 | 2019-03-19T23:01:52.000Z | 2022-03-14T08:41:27.000Z | tests/test_connect.py | mkniewallner/edgedb-python | 2086b866d3c87c215eecf644b2393ddd857457e0 | [
"Apache-2.0"
] | 24 | 2019-04-29T22:41:10.000Z | 2021-11-15T00:28:01.000Z | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import socket
import edgedb
from edgedb import _testbase as tb
| 32.8125 | 74 | 0.632 |
ce3f92dd86129583471cf90aca5f82b89a2e5147 | 19,947 | py | Python | .venv/Lib/site-packages/pdoc/cli.py | JohanK91/MethodDice | 73a8962c762ff48da331c9212f10676f066ed940 | [
"MIT"
] | null | null | null | .venv/Lib/site-packages/pdoc/cli.py | JohanK91/MethodDice | 73a8962c762ff48da331c9212f10676f066ed940 | [
"MIT"
] | null | null | null | .venv/Lib/site-packages/pdoc/cli.py | JohanK91/MethodDice | 73a8962c762ff48da331c9212f10676f066ed940 | [
"MIT"
] | 1 | 2021-02-22T13:55:32.000Z | 2021-02-22T13:55:32.000Z | #!/usr/bin/env python3
"""pdoc's CLI interface and helper functions."""
import argparse
import ast
import importlib
import inspect
import os
import os.path as path
import json
import re
import sys
import warnings
from contextlib import contextmanager
from functools import lru_cache
from http.server import BaseHTTPRequestHandler, HTTPServer
from typing import Dict, List, Sequence
from warnings import warn
import pdoc
parser = argparse.ArgumentParser(
description="Automatically generate API docs for Python modules.",
epilog="Further documentation is available at <https://pdoc3.github.io/pdoc/doc>.",
)
aa = parser.add_argument
mode_aa = parser.add_mutually_exclusive_group().add_argument
aa(
'--version', action='version', version='%(prog)s ' + pdoc.__version__)
aa(
"modules",
type=str,
metavar='MODULE',
nargs="+",
help="The Python module name. This may be an import path resolvable in "
"the current environment, or a file path to a Python module or "
"package.",
)
aa(
"-c", "--config",
type=str,
metavar='OPTION=VALUE',
action='append',
default=[],
help="Override template options. This is an alternative to using "
"a custom config.mako file in --template-dir. This option "
"can be specified multiple times.",
)
aa(
"--filter",
type=str,
metavar='STRING',
default=None,
help="Comma-separated list of filters. When specified, "
"only identifiers containing the specified string "
"will be shown in the output. Search is case sensitive. "
"Has no effect when --http is set.",
)
aa(
"-f", "--force",
action="store_true",
help="Overwrite any existing generated (--output-dir) files.",
)
mode_aa(
"--html",
action="store_true",
help="When set, the output will be HTML formatted.",
)
mode_aa(
"--pdf",
action="store_true",
help="When set, the specified modules will be printed to standard output, "
"formatted in Markdown-Extra, compatible with most "
"Markdown-(to-HTML-)to-PDF converters.",
)
aa(
"--html-dir",
type=str,
help=argparse.SUPPRESS,
)
aa(
"-o", "--output-dir",
type=str,
metavar='DIR',
help="The directory to output generated HTML/markdown files to "
"(default: ./html for --html).",
)
aa(
"--html-no-source",
action="store_true",
help=argparse.SUPPRESS,
)
aa(
"--overwrite",
action="store_true",
help=argparse.SUPPRESS,
)
aa(
"--external-links",
action="store_true",
help=argparse.SUPPRESS,
)
aa(
"--template-dir",
type=str,
metavar='DIR',
default=None,
help="Specify a directory containing Mako templates "
"(html.mako, text.mako, config.mako and/or any templates they include). "
"Alternatively, put your templates in $XDG_CONFIG_HOME/pdoc and "
"pdoc will automatically find them.",
)
aa(
"--link-prefix",
type=str,
help=argparse.SUPPRESS,
)
aa(
"--close-stdin",
action="store_true",
help="When set, stdin will be closed before importing, to account for "
"ill-behaved modules that block on stdin."
)
DEFAULT_HOST, DEFAULT_PORT = 'localhost', 8080
aa(
"--http",
default='',
type=_check_host_port,
metavar='HOST:PORT',
help="When set, pdoc will run as an HTTP server providing documentation "
"for specified modules. If you just want to use the default hostname "
"and port ({}:{}), set the parameter to :.".format(DEFAULT_HOST, DEFAULT_PORT),
)
aa(
"--skip-errors",
action="store_true",
help="Upon unimportable modules, warn instead of raising."
)
args = argparse.Namespace()
def _generate_lunr_search(modules: List[pdoc.Module],
index_docstrings: bool,
template_config: dict):
"""Generate index.js for search"""
index = [] # type: List[Dict]
url_cache = {} # type: Dict[str, int]
for top_module in modules:
recursive_add_to_index(top_module)
urls = sorted(url_cache.keys(), key=url_cache.__getitem__)
main_path = args.output_dir
with _open_write_file(path.join(main_path, 'index.js')) as f:
f.write("URLS=")
json.dump(urls, f, indent=0, separators=(',', ':'))
f.write(";\nINDEX=")
json.dump(index, f, indent=0, separators=(',', ':'))
# Generate search.html
with _open_write_file(path.join(main_path, 'doc-search.html')) as f:
rendered_template = pdoc._render_template('/search.mako', **template_config)
f.write(rendered_template)
def main(_args=None):
""" Command-line entry point """
global args
args = _args or parser.parse_args()
warnings.simplefilter("once", DeprecationWarning)
if args.close_stdin:
sys.stdin.close()
if (args.html or args.http) and not args.output_dir:
args.output_dir = 'html'
if args.html_dir:
_warn_deprecated('--html-dir', '--output-dir')
args.output_dir = args.html_dir
if args.overwrite:
_warn_deprecated('--overwrite', '--force')
args.force = args.overwrite
template_config = {}
for config_str in args.config:
try:
key, value = config_str.split('=', 1)
value = ast.literal_eval(value)
template_config[key] = value
except Exception:
raise ValueError(
'Error evaluating --config statement "{}". '
'Make sure string values are quoted?'
.format(config_str)
)
if args.html_no_source:
_warn_deprecated('--html-no-source', '-c show_source_code=False', True)
template_config['show_source_code'] = False
if args.link_prefix:
_warn_deprecated('--link-prefix', '-c link_prefix="foo"', True)
template_config['link_prefix'] = args.link_prefix
if args.external_links:
_warn_deprecated('--external-links')
template_config['external_links'] = True
if args.template_dir is not None:
if not path.isdir(args.template_dir):
print('Error: Template dir {!r} is not a directory'.format(args.template_dir),
file=sys.stderr)
sys.exit(1)
pdoc.tpl_lookup.directories.insert(0, args.template_dir)
# Support loading modules specified as python paths relative to cwd
sys.path.append(os.getcwd())
# Virtual environment handling for pdoc script run from system site
try:
venv_dir = os.environ['VIRTUAL_ENV']
except KeyError:
pass # pdoc was not invoked while in a virtual environment
else:
from glob import glob
from distutils.sysconfig import get_python_lib
libdir = get_python_lib(prefix=venv_dir)
sys.path.append(libdir)
# Resolve egg-links from `setup.py develop` or `pip install -e`
# XXX: Welcome a more canonical approach
for pth in glob(path.join(libdir, '*.egg-link')):
try:
with open(pth) as f:
sys.path.append(path.join(libdir, f.readline().rstrip()))
except IOError:
warn('Invalid egg-link in venv: {!r}'.format(pth))
if args.http:
template_config['link_prefix'] = "/"
# Run the HTTP server.
_WebDoc.args = args # Pass params to HTTPServer xP
_WebDoc.template_config = template_config
host, _, port = args.http.partition(':')
host = host or DEFAULT_HOST
port = int(port or DEFAULT_PORT)
print('Starting pdoc server on {}:{}'.format(host, port), file=sys.stderr)
httpd = HTTPServer((host, port), _WebDoc)
print("pdoc server ready at http://%s:%d" % (host, port), file=sys.stderr)
# Allow tests to perform `pdoc.cli._httpd.shutdown()`
global _httpd
_httpd = httpd
try:
httpd.serve_forever()
finally:
httpd.server_close()
sys.exit(0)
docfilter = None
if args.filter and args.filter.strip():
modules = [pdoc.Module(module, docfilter=docfilter,
skip_errors=args.skip_errors)
for module in args.modules]
pdoc.link_inheritance()
if args.pdf:
_print_pdf(modules, **template_config)
import textwrap
print("""
PDF-ready markdown written to standard output.
^^^^^^^^^^^^^^^
Convert this file to PDF using e.g. Pandoc:
{PANDOC_CMD}
or using Python-Markdown and Chrome/Chromium/WkHtmlToPDF:
markdown_py --extension=meta \\
--extension=abbr \\
--extension=attr_list \\
--extension=def_list \\
--extension=fenced_code \\
--extension=footnotes \\
--extension=tables \\
--extension=admonition \\
--extension=smarty \\
--extension=toc \\
pdf.md > pdf.html
chromium --headless --disable-gpu --print-to-pdf=pdf.pdf pdf.html
wkhtmltopdf --encoding utf8 -s A4 --print-media-type pdf.html pdf.pdf
or similar, at your own discretion.""".format(PANDOC_CMD=textwrap.indent(_PANDOC_COMMAND, ' ')),
file=sys.stderr)
sys.exit(0)
for module in modules:
if args.html:
_quit_if_exists(module, ext='.html')
recursive_write_files(module, ext='.html', **template_config)
elif args.output_dir: # Generate text files
_quit_if_exists(module, ext='.md')
recursive_write_files(module, ext='.md', **template_config)
else:
sys.stdout.write(module.text(**template_config))
# Two blank lines between two modules' texts
sys.stdout.write(os.linesep * (1 + 2 * int(module != modules[-1])))
lunr_config = pdoc._get_config(**template_config).get('lunr_search')
if lunr_config is not None:
_generate_lunr_search(
modules, lunr_config.get("index_docstrings", True), template_config)
_PANDOC_COMMAND = '''\
pandoc --metadata=title:"MyProject Documentation" \\
--from=markdown+abbreviations+tex_math_single_backslash \\
--pdf-engine=xelatex --variable=mainfont:"DejaVu Sans" \\
--toc --toc-depth=4 --output=pdf.pdf pdf.md\
'''
if __name__ == "__main__":
main(parser.parse_args())
| 33.41206 | 99 | 0.587657 |
ce40a683df91507328100c3fd2d4f4e66c206aad | 4,981 | py | Python | application/helper/connection_check.py | HarshadKavathiya/acciom | 10e4d813c897bcf0078ab350d9432117cb708d1a | [
"MIT"
] | null | null | null | application/helper/connection_check.py | HarshadKavathiya/acciom | 10e4d813c897bcf0078ab350d9432117cb708d1a | [
"MIT"
] | 9 | 2019-07-23T09:55:15.000Z | 2022-02-19T01:45:12.000Z | application/helper/connection_check.py | accionlabs/acciom | 889958c0f8ec1d74db1958d0a6473c4678eaab3f | [
"MIT"
] | 21 | 2019-07-20T04:47:23.000Z | 2020-01-07T06:55:42.000Z | import cx_Oracle
import psycopg2
import pymysql
import pyodbc
from application.common.constants import APIMessages, SupportedDBType, \
GenericStrings
def connection_check(db_type_id, db_hostname, db_username, db_password,
db_name):
"""
Helper method to check the database connectivity for the given database
details.
Args:
db_type_id(int): type of the database
db_hostname(str): database hostname
db_username(str): database username
db_password(str): database password
db_name(str): database name
Returns(str):
Returns success only if connection can be establish
"""
# cnxn is a connection object
if db_type_id == SupportedDBType().get_db_id_by_name("mysql"):
try:
cnxn = pymysql.connect(host=db_hostname, user=db_username,
password=db_password, db=db_name)
except pymysql.err.InternalError as e:
if GenericStrings.UNKNOWN_DATABASE_MYSQL in e.args[1]:
return APIMessages.UNKNOWN_DATABASE.format(db_name)
elif GenericStrings.CANNOT_CONNECT_TO_REMOTE_SERVER_MYSQL in \
e.args[1]:
return APIMessages.CANNOT_CONNECT_TO_REMOTE_SERVER_MYSQL
else:
return e.args[1]
except pymysql.err.OperationalError as e:
if GenericStrings.AUTHENTICATION_FAILED_MYSQL in e.args[1]:
return APIMessages.AUTHENTICATION_FAILED.format(db_username)
elif GenericStrings.CANNOT_CONNECT_TO_SERVER_MYSQL in e.args[1]:
return APIMessages.CANNOT_CONNECT_TO_SERVER.format(
SupportedDBType().get_db_name_by_id(db_type_id),
db_hostname)
else:
return e.args[1]
cursor = cnxn.cursor()
if cursor:
return APIMessages.SUCCESS
elif db_type_id == SupportedDBType().get_db_id_by_name("mssql"):
server = db_hostname
database = db_name
username = db_username
password = db_password
# This code can handle Oracle Driver 17
# If other version 13 is given, code will fail
# TODO: Need to implement an approach that takes driver version
# based on user input
try:
cnxn = pyodbc.connect(
'DRIVER={0}'.format(GenericStrings.ORACLE_DRIVER) +
';SERVER=' + server +
';DATABASE=' + database +
';UID=' + username + ';PWD=' + password)
except pyodbc.ProgrammingError as e:
return APIMessages.UNKNOWN_DATABASE.format(db_name)
except pyodbc.InterfaceError as e:
return APIMessages.AUTHENTICATION_FAILED.format(db_username)
except pyodbc.OperationalError as e:
return APIMessages.CANNOT_CONNECT_TO_SERVER.format(
SupportedDBType().get_db_name_by_id(db_type_id),
db_hostname)
cursor = cnxn.cursor()
if cursor:
return APIMessages.SUCCESS
elif db_type_id == SupportedDBType().get_db_id_by_name("postgresql"):
try:
cnxn = psycopg2.connect(host=db_hostname, database=db_name,
user=db_username,
password=db_password)
except psycopg2.OperationalError as e:
if GenericStrings.UNKNOWN_DATABASE_POSTGRES in str(e):
return APIMessages.UNKNOWN_DATABASE.format(db_name)
elif GenericStrings.AUTHENTICATION_FAILED_POSTGRES in str(e):
return APIMessages.AUTHENTICATION_FAILED.format(db_username)
elif GenericStrings.CANNOT_CONNECT_TO_SERVER_POSTGRES in str(e):
return APIMessages.CANNOT_CONNECT_TO_SERVER.format(
SupportedDBType().get_db_name_by_id(db_type_id),
db_hostname)
else:
return e
cursor = cnxn.cursor()
if cursor:
return APIMessages.SUCCESS
elif db_type_id == SupportedDBType().get_db_id_by_name("oracle"):
try:
cnxn = cx_Oracle.connect(
"{0}/{1}@{2}/{3}".format(db_username, db_password, db_hostname,
db_name))
except cx_Oracle.DatabaseError as e:
if GenericStrings.UNKNOWN_DB_AUTHENTICATION_FAILED_ORACLE in str(
e):
return APIMessages.UNKNOWN_DB_AUTHENTICATION_FAILED.format(
db_name, db_username)
elif GenericStrings.CANNOT_CONNECT_TO_SERVER_ORACLE in str(
e):
return APIMessages.CANNOT_CONNECT_TO_SERVER.format(
SupportedDBType().get_db_name_by_id(db_type_id),
db_hostname)
else:
return e
cursor = cnxn.cursor()
if cursor:
return APIMessages.SUCCESS
| 43.313043 | 79 | 0.609717 |
ce40f79ba52230bce534975d34f03a0b62be130e | 701 | py | Python | src/db/alembic/tests/add_problems.py | furea2/ProofGame | 787f9be7f616c53eb9ce5a677660aee7cc824a14 | [
"MIT"
] | null | null | null | src/db/alembic/tests/add_problems.py | furea2/ProofGame | 787f9be7f616c53eb9ce5a677660aee7cc824a14 | [
"MIT"
] | null | null | null | src/db/alembic/tests/add_problems.py | furea2/ProofGame | 787f9be7f616c53eb9ce5a677660aee7cc824a14 | [
"MIT"
] | null | null | null | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
engine = create_engine('sqlite:///db.sqlite3')
Session = sessionmaker(engine)
import sys
sys.path.append("D:\\Users\\furea2\\NodejsProjects\\login_sample\\src\\db\\alembic\\app\\models")
from problem import Problem
userList=[
Problem(title='zero_le_one', body='theorem zero_le_one : 0 < 1 := sorry', difficulty=1),
Problem(title='zero_le_two', body='theorem zero_le_two : 0 < 2 := sorry', difficulty=1),
Problem(title='one_le_two', body='theorem one_le_two : 1 < 2 := sorry', difficulty=1),
]
if __name__=='__main__':
with Session() as session:
session.add_all(userList)
session.commit()
| 33.380952 | 97 | 0.713267 |
cbe97f3cb389489740f1e42249ec7c347020db47 | 30 | py | Python | otscrape/core/extractor/nested/__init__.py | SSripilaipong/otscrape | 73ad2ea3d20841cf5d81b37180a1f21c48e87480 | [
"MIT"
] | null | null | null | otscrape/core/extractor/nested/__init__.py | SSripilaipong/otscrape | 73ad2ea3d20841cf5d81b37180a1f21c48e87480 | [
"MIT"
] | null | null | null | otscrape/core/extractor/nested/__init__.py | SSripilaipong/otscrape | 73ad2ea3d20841cf5d81b37180a1f21c48e87480 | [
"MIT"
] | null | null | null | from .zip_dict import ZipDict
| 15 | 29 | 0.833333 |
cbea98388f135a070422bda42a79198d77ccf817 | 546 | py | Python | 10_Exceptions_and_Errors/internal.py | MANOJPATRA1991/Python-Beyond-the-Basics | aed7bfd35e33c2b1759b48e1c89314aa149c56d0 | [
"MIT"
] | null | null | null | 10_Exceptions_and_Errors/internal.py | MANOJPATRA1991/Python-Beyond-the-Basics | aed7bfd35e33c2b1759b48e1c89314aa149c56d0 | [
"MIT"
] | null | null | null | 10_Exceptions_and_Errors/internal.py | MANOJPATRA1991/Python-Beyond-the-Basics | aed7bfd35e33c2b1759b48e1c89314aa149c56d0 | [
"MIT"
] | null | null | null |
if __name__ == '__main__':
print(modulus_four(5)) | 20.222222 | 48 | 0.507326 |
cbeae155ad896dc6fd2c6c3e36347da77b95da7e | 17,038 | py | Python | ml_studio/visualate/dashboards/data_explorer.py | john-james-ai/ml-studio | 2230fcd6579d2291c761e559ec93b18ddd7a96e6 | [
"BSD-3-Clause"
] | 1 | 2020-01-30T09:37:00.000Z | 2020-01-30T09:37:00.000Z | ml_studio/visualate/dashboards/data_explorer.py | john-james-ai/ml-studio | 2230fcd6579d2291c761e559ec93b18ddd7a96e6 | [
"BSD-3-Clause"
] | 3 | 2019-12-05T19:37:59.000Z | 2020-03-31T05:49:53.000Z | ml_studio/visualate/dashboards/data_explorer.py | john-james-ai/ml-studio | 2230fcd6579d2291c761e559ec93b18ddd7a96e6 | [
"BSD-3-Clause"
] | null | null | null | # =========================================================================== #
# DATA EXPLORER #
# =========================================================================== #
# =========================================================================== #
# Project: ML Studio #
# Version: 0.1.14 #
# File: \data_explorer.py #
# Python Version: 3.7.3 #
# --------------- #
# Author: John James #
# Company: Decision Scients #
# Email: jjames@decisionscients.com #
# --------------- #
# Create Date: Friday December 6th 2019, 9:12:28 pm #
# Last Modified: Friday December 6th 2019, 9:12:35 pm #
# Modified By: John James (jjames@decisionscients.com) #
# --------------- #
# License: Modified BSD #
# Copyright (c) 2019 Decision Scients #
# =========================================================================== #
"""Data Explorer - A dash powered web app for analyzing and preparing data.
This module provides a dashboard application that supports:
- Data Audit : Missing values and outliers
- Data Analysis : Exploration of data vis-a-vis statistical
assumptions of independence, linearity, normality,
and homoscedasticity
- Data Preparation : Missing values, and outliers
- Feature Selection : Identifying the features that most
influence the dependent variable
- Features Engineering : Feature transformation, Binning
One-Hot Encoding, Features Split and Scaling
- Dimensionality Reduction : PCA,
t-Distributed Stochastic Neighbor Embedding (t-SNE)
see https://www.analyticsvidhya.com/blog/2018/08/dimensionality-reduction-techniques-python/
Note: This module was highly inspired by the plotly dash-svm
at https://github.com/plotly/dash-svm.
"""
#%%
import os
import sys
sys.path.append('ml_studio')
sys.path.append('ml_studio/utils/visual')
import time
from textwrap import dedent
import warnings
import dash
import dash_core_components as dcc
import dash_daq as daq
import dash_html_components as html
from dash.dependencies import Input, Output, State
import numpy as np
import pandas as pd
from sklearn.datasets import fetch_california_housing, make_regression
from sklearn.datasets import make_classification
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.svm import SVC
from ml_studio.visualate.classification.figures import serve_prediction_plot, serve_roc_curve, \
serve_pie_confusion_matrix
import ml_studio
from ml_studio.utils.model import get_model_name
from ml_studio.utils.data_manager import sampler, data_split, StandardScaler
from ml_studio.utils.misc import proper
import ml_studio.utils.visual as drc
# --------------------------------------------------------------------------- #
external_scripts = [
# Normalize the CSS
"https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css",
# Fonts
"https://fonts.googleapis.com/css?family=Open+Sans|Roboto",
"https://maxcdn.bootstrapcdn.com/font-awesome/4.7.0/css/font-awesome.min.css"
]
app = dash.Dash(__name__,
external_scripts=external_scripts)
app.scripts.config.serve_locally = False
server = app.server
# --------------------------------------------------------------------------- #
# Generate Data #
# --------------------------------------------------------------------------- #
# --------------------------------------------------------------------------- #
# Define Tabs #
# --------------------------------------------------------------------------- #
tabs_styles = {
'height': '44px'
}
tab_style = {
'border': '1px solid #282b38',
'borderBottom': '1px solid #282b38',
'backgroundColor': '#282b38',
'padding': '6px',
'fontWeight': 'bold'
}
tab_selected_style = {
'border': '1px solid #282b38',
'borderBottom': '1px solid #31459E',
'backgroundColor': '#282b38',
'color': 'white',
'padding': '6px'
}
app.layout = html.Div(children=[
# .container class is fixed, .container.scalable is scalable
html.Div(className="banner", children=[
# Change App Name here
html.Div(className='container scalable', children=[
# Change App Name here
html.H2(html.A(
'ML Studio Data Explorer',
href='https://github.com/decisionscients/ml-studio',
style={
'text-decoration': 'none',
'color': 'inherit'
}
)),
html.A(
# TODO: Create logo
html.Img(src="https://s3-us-west-1.amazonaws.com/plotly-tutorials/logo/new-branding/dash-logo-by-plotly-stripe-inverted.png"),
href='https://plot.ly/products/dash/'
)
]),
]),
html.Div(id='body', className='container scalable', children=[
html.Div(
id="app-container",
children=[
build_tabs()
],
),
html.Div(className='row', children=[
html.Div(
id='div-graphs',
children=dcc.Graph(
id='graph-sklearn-svm',
style={'display': 'none'}
)
),
html.Div(
className='three columns',
style={
'min-width': '24.5%',
'max-height': 'calc(100vh - 85px)',
'overflow-y': 'auto',
'overflow-x': 'hidden',
},
children=[
drc.Card([
drc.NamedDropdown(
name='Select Data Type',
id='dropdown-select-datatype',
options=[
{'label': 'Regression', 'value': 'regression'},
{'label': 'Binary Classification','value': 'binary'},
{'label': 'Multiclass Classification','value': 'multiclass'}
],
clearable=False,
searchable=False,
value='regression'
),
drc.NamedDropdown(
name='Select Dataset',
id='dropdown-select-dataset',
options=[
{'label': 'California Housing', 'value': 'california'},
{'label': 'Million Song Dataset','value': 'msd'},
{'label': 'Online News Popularity','value': 'online_news'},
{'label': 'Speed Dating', 'value': 'speed_dating'},
{'label': 'Regression', 'value': 'regression'},
{'label': 'Binary', 'value': 'binary'}
],
clearable=False,
searchable=False,
value='california'
),
]),
html.Div(
dcc.Markdown(dedent("""
[Click here](https://github.com/decisionscients/ml-studio) to visit the project repo, and learn about how to use the app.
""")),
style={'margin': '20px 0px', 'text-align': 'center'}
),
]
),
]),
])
])
# @app.callback(Output('div-graphs', 'children'),
# Input('dropdown-select-dataset', 'value'),
# Input('slider-threshold', 'value')
# def update_svm_graph(kernel,
# degree,
# C_coef,
# C_power,
# gamma_coef,
# gamma_power,
# dataset,
# noise,
# shrinking,
# threshold,
# sample_size):
# t_start = time.time()
# h = .3 # step size in the mesh
# # Data Pre-processing
# X, y = generate_data(dataset=dataset)
# StandardScaler().fit(X)
# X = StandardScaler().transform(X)
# X_train, X_test, y_train, y_test = \
# data_split(X, y, test_size=.4, seed=42)
# x_min = X[:, 0].min() - .5
# x_max = X[:, 0].max() + .5
# y_min = X[:, 1].min() - .5
# y_max = X[:, 1].max() + .5
# xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
# np.arange(y_min, y_max, h))
# C = C_coef * 10 ** C_power
# gamma = gamma_coef * 10 ** gamma_power
# # Train SVM
# clf = SVC(
# C=C,
# kernel=kernel,
# degree=degree,
# gamma=gamma,
# shrinking=shrinking
# )
# clf.fit(X_train, y_train)
# # Plot the decision boundary. For that, we will assign a color to each
# # point in the mesh [x_min, x_max]x[y_min, y_max].
# if hasattr(clf, "decision_function"):
# Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
# else:
# Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# prediction_figure = serve_prediction_plot(
# model=clf,
# X_train=X_train,
# X_test=X_test,
# y_train=y_train,
# y_test=y_test,
# Z=Z,
# xx=xx,
# yy=yy,
# mesh_step=h,
# threshold=threshold
# )
# roc_figure = serve_roc_curve(
# model=clf,
# X_test=X_test,
# y_test=y_test
# )
# confusion_figure = serve_pie_confusion_matrix(
# model=clf,
# X_test=X_test,
# y_test=y_test,
# Z=Z,
# threshold=threshold
# )
# print(
# f"Total Time Taken: {time.time() - t_start:.3f} sec")
# return [
# html.Div(
# className='three columns',
# style={
# 'min-width': '24.5%',
# 'height': 'calc(100vh - 90px)',
# 'margin-top': '5px',
# # Remove possibility to select the text for better UX
# 'user-select': 'none',
# '-moz-user-select': 'none',
# '-webkit-user-select': 'none',
# '-ms-user-select': 'none'
# },
# children=[
# dcc.Graph(
# id='graph-line-roc-curve',
# style={'height': '40%'},
# figure=roc_figure
# ),
# dcc.Graph(
# id='graph-pie-confusion-matrix',
# figure=confusion_figure,
# style={'height': '60%'}
# )
# ]),
# html.Div(
# className='six columns',
# style={'margin-top': '5px'},
# children=[
# dcc.Graph(
# id='graph-sklearn-svm',
# figure=prediction_figure,
# style={'height': 'calc(100vh - 90px)'}
# )
# ])
# ]
# Running the server
if __name__ == '__main__':
app.run_server(debug=True)
# %%
| 37.862222 | 145 | 0.435204 |
cbeb6bdd865a57de9bfabcbd439111e0ae5d40b5 | 1,080 | py | Python | bot.py | m2Link/YouTube-Video-Search | 0512ea220af271dc1853925026f31c32990fa4ff | [
"MIT"
] | 9 | 2021-09-30T06:25:03.000Z | 2022-02-10T05:45:23.000Z | bot.py | m2Link/YouTube-Video-Search | 0512ea220af271dc1853925026f31c32990fa4ff | [
"MIT"
] | null | null | null | bot.py | m2Link/YouTube-Video-Search | 0512ea220af271dc1853925026f31c32990fa4ff | [
"MIT"
] | 7 | 2021-09-30T06:24:56.000Z | 2022-02-10T04:52:10.000Z | from pyrogram import Client ,filters
import os
from py_youtube import Data, Search
from pyrogram.types import *
TOKEN = os.environ.get("TOKEN", "")
APP_ID = int(os.environ.get("APP_ID", ""))
API_HASH = os.environ.get("API_HASH", "")
app = Client( "yt-search",
bot_token = TOKEN, api_id =API_ID , api_hash = API_HASH)
app.run()
| 22.5 | 78 | 0.609259 |
cbebd1e68288c77af1b90def0eca795dc3029718 | 7,582 | py | Python | steam_review_sentiments/cnn_model.py | joshuamegnauth54/aapor_scholars_2021 | 1848083203714d2c0a205e538e91990983b3320e | [
"MIT"
] | null | null | null | steam_review_sentiments/cnn_model.py | joshuamegnauth54/aapor_scholars_2021 | 1848083203714d2c0a205e538e91990983b3320e | [
"MIT"
] | 1 | 2021-11-03T15:33:31.000Z | 2021-11-03T15:33:31.000Z | steam_review_sentiments/cnn_model.py | joshuamegnauth54/steam_user_reviews | 1848083203714d2c0a205e538e91990983b3320e | [
"MIT"
] | null | null | null | import numpy as np
import keras
import spacy
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.exceptions import NotFittedError
from keras.models import Sequential
from keras.layers import BatchNormalization, Conv1D, Dense, Embedding
from keras.layers.pooling import GlobalMaxPooling1D
from keras.initializers import Constant
from utilities import null_preproc, transform_string,\
transform_all, tokenize_all
# This class is badly designed. I wanted to leverage spaCy, but I combined
# tools in a very poor way...
| 33.254386 | 78 | 0.587048 |
cbec06c90522fab416454e28ed3f8f1ea15d10d0 | 96 | py | Python | startup.py | andreagia/WEBNMR | 512a8cc04cf69300796585feae722614501389a9 | [
"Apache-2.0"
] | null | null | null | startup.py | andreagia/WEBNMR | 512a8cc04cf69300796585feae722614501389a9 | [
"Apache-2.0"
] | null | null | null | startup.py | andreagia/WEBNMR | 512a8cc04cf69300796585feae722614501389a9 | [
"Apache-2.0"
] | null | null | null | from paste.deploy import loadapp
app = loadapp("config:/home/webenmr/WebENMR/development.ini")
| 24 | 61 | 0.791667 |
cbef0e085fbba4e6b5fa308476e408eed61f8acc | 2,548 | py | Python | dataset/components.py | mikhailkin/dataset | 7417483fdbe2e3743af4d614cb9036fd5b1375c0 | [
"Apache-2.0"
] | null | null | null | dataset/components.py | mikhailkin/dataset | 7417483fdbe2e3743af4d614cb9036fd5b1375c0 | [
"Apache-2.0"
] | null | null | null | dataset/components.py | mikhailkin/dataset | 7417483fdbe2e3743af4d614cb9036fd5b1375c0 | [
"Apache-2.0"
] | null | null | null | """ Contains classes to handle batch data components """
| 34.432432 | 105 | 0.594192 |
cbefd7cba52260caad3d20e4693a2870bae5c60c | 708 | py | Python | app/models.py | owen-rpx/RainGod | ba20023c1191519edec7f12fb488c942a2e05627 | [
"MIT"
] | 7 | 2019-04-11T09:45:37.000Z | 2019-04-19T01:40:03.000Z | app/models.py | Owenzh/RainGod | ba20023c1191519edec7f12fb488c942a2e05627 | [
"MIT"
] | 5 | 2021-03-18T23:43:45.000Z | 2022-03-11T23:44:29.000Z | app/models.py | owen-rpx/RainGod | ba20023c1191519edec7f12fb488c942a2e05627 | [
"MIT"
] | 2 | 2019-04-11T09:45:39.000Z | 2019-04-19T01:39:02.000Z | #-*- coding:utf-8 -*-
from .apps import db
| 29.5 | 57 | 0.686441 |
cbf1db6303b75bf9bb5a1fdfc15b60807174510e | 834 | py | Python | fingerExercises/fingerExercises-03/03.6-finger.how-many.py | sodaPhix/MITx-6.00.1x | 8629e227d250cf6c2d5ca56944668b5796ce78cf | [
"MIT"
] | 1 | 2019-10-06T22:58:39.000Z | 2019-10-06T22:58:39.000Z | fingerExercises/fingerExercises-03/03.6-finger.how-many.py | sodaPhix/MITx-6.00.1x | 8629e227d250cf6c2d5ca56944668b5796ce78cf | [
"MIT"
] | null | null | null | fingerExercises/fingerExercises-03/03.6-finger.how-many.py | sodaPhix/MITx-6.00.1x | 8629e227d250cf6c2d5ca56944668b5796ce78cf | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 7 03:26:16 2019
@author: sodatab
MITx: 6.00.1x
"""
"""
03.6-Finger How Many
---------------------
Consider the following sequence of expressions:
animals = { 'a': ['aardvark'], 'b': ['baboon'], 'c': ['coati']}
animals['d'] = ['donkey']
animals['d'].append('dog')
animals['d'].append('dingo')
We want to write some simple procedures that work on dictionaries to return information.
First, write a procedure, called how_many, which returns the sum of the number of values associated with a dictionary.
"""
"""Answer Script:"""
def how_many(aDict):
'''
aDict: A dictionary, where all the values are lists.
returns: int, how many values are in the dictionary.
'''
sum = 0
for i in aDict.values():
sum += len(i)
return sum
| 22.540541 | 118 | 0.631894 |
cbf29fc594fa3d410506fc9b2b10ddf99a2f2899 | 1,569 | py | Python | test/const.py | DaniFdezAlvarez/shexerp3 | 80c3bdaac856a88d53359f5996477994774d34e2 | [
"Apache-2.0"
] | 3 | 2019-06-24T18:13:06.000Z | 2020-08-06T03:08:23.000Z | test/const.py | DaniFdezAlvarez/shexerp3 | 80c3bdaac856a88d53359f5996477994774d34e2 | [
"Apache-2.0"
] | 109 | 2019-05-22T11:53:05.000Z | 2021-03-15T11:09:18.000Z | test/const.py | DaniFdezAlvarez/shexerp3 | 80c3bdaac856a88d53359f5996477994774d34e2 | [
"Apache-2.0"
] | 2 | 2019-10-23T13:06:31.000Z | 2020-07-31T09:59:15.000Z | BASE_FILES = "C:\\Users\\Dani\\repos-git\\shexerp3\\test\\t_files\\"
BASE_FILES_GENERAL = BASE_FILES + "general\\"
G1 = BASE_FILES + "t_graph_1.ttl"
G1_NT = BASE_FILES + "t_graph_1.nt"
G1_TSVO_SPO = BASE_FILES + "t_graph_1.tsv"
G1_JSON_LD = BASE_FILES + "t_graph_1.json"
G1_XML = BASE_FILES + "t_graph_1.xml"
G1_N3 = BASE_FILES + "t_graph_1.n3"
G1_ALL_CLASSES_NO_COMMENTS = BASE_FILES_GENERAL + "g1_all_classes_no_comments.shex"
# PREFIX xml: <http://www.w3.org/XML/1998/namespace/>
# PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
# PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
# PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
# PREFIX foaf: <http://xmlns.com/foaf/0.1/>
# NAMESPACES_WITH_FOAF_AND_EX = {"http://example.org/" : "ex",
# "http://www.w3.org/XML/1998/namespace/" : "xml",
# "http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
# "http://www.w3.org/2000/01/rdf-schema#" : "rdfs",
# "http://www.w3.org/2001/XMLSchema#": "xsd",
# "http://xmlns.com/foaf/0.1/": "foaf"
# }
| 42.405405 | 86 | 0.560867 |
cbf2a3881275e0a82374f52818602abe974fb113 | 23,265 | py | Python | src/lookoutequipment/evaluation.py | dast1/amazon-lookout-for-equipment-python-sdk | 37213819c46b2dd3bcd4844235bececeabca8f12 | [
"Apache-2.0"
] | 3 | 2021-09-28T19:53:53.000Z | 2022-02-14T17:50:59.000Z | src/lookoutequipment/evaluation.py | dast1/amazon-lookout-for-equipment-python-sdk | 37213819c46b2dd3bcd4844235bececeabca8f12 | [
"Apache-2.0"
] | null | null | null | src/lookoutequipment/evaluation.py | dast1/amazon-lookout-for-equipment-python-sdk | 37213819c46b2dd3bcd4844235bececeabca8f12 | [
"Apache-2.0"
] | 1 | 2021-11-11T18:15:14.000Z | 2021-11-11T18:15:14.000Z | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import boto3
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib import gridspec
from scipy.stats import wasserstein_distance
from tqdm import tqdm | 40.744308 | 105 | 0.571545 |
cbf2dc049f1ccb6bed778490aa18d0ea3f007439 | 793 | py | Python | src/opserver/plugins/alarm_process_connectivity/setup.py | biswajit-mandal/contrail-controller | 80c4a7e8515f7296b18ba4c21a439bd3daefcc4a | [
"Apache-2.0"
] | 3 | 2019-01-11T06:16:40.000Z | 2021-02-24T23:48:21.000Z | src/opserver/plugins/alarm_process_connectivity/setup.py | biswajit-mandal/contrail-controller | 80c4a7e8515f7296b18ba4c21a439bd3daefcc4a | [
"Apache-2.0"
] | null | null | null | src/opserver/plugins/alarm_process_connectivity/setup.py | biswajit-mandal/contrail-controller | 80c4a7e8515f7296b18ba4c21a439bd3daefcc4a | [
"Apache-2.0"
] | 18 | 2017-01-12T09:28:44.000Z | 2019-04-18T20:47:42.000Z | #
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
from setuptools import setup, find_packages
setup(
name='alarm_process_connectivity',
version='0.1dev',
packages=find_packages(),
entry_points = {
'contrail.analytics.alarms': [
'ObjectCollectorInfo = alarm_process_connectivity.main:ProcessConnectivity',
'ObjectVRouter = alarm_process_connectivity.main:ProcessConnectivity',
'ObjectConfigNode = alarm_process_connectivity.main:ProcessConnectivity',
'ObjectBgpRouter = alarm_process_connectivity.main:ProcessConnectivity',
'ObjectDatabaseInfo = alarm_process_connectivity.main:ProcessConnectivity',
],
},
zip_safe=False,
long_description="ProcessConnectivity alarm"
)
| 34.478261 | 88 | 0.717528 |
cbf3c9c5fb96524cab1cc99ebc8311885fc1db2a | 813 | py | Python | app/tools/dbg_queue_graph.py | samelamin/kylinmonitorbot | 00a38ca9513dceadf23bfdb19b6daab77a368e76 | [
"MIT"
] | null | null | null | app/tools/dbg_queue_graph.py | samelamin/kylinmonitorbot | 00a38ca9513dceadf23bfdb19b6daab77a368e76 | [
"MIT"
] | null | null | null | app/tools/dbg_queue_graph.py | samelamin/kylinmonitorbot | 00a38ca9513dceadf23bfdb19b6daab77a368e76 | [
"MIT"
] | null | null | null | import asyncio
import os
from localization import LocalizationManager
from services.dialog.queue_picture import queue_graph
from services.lib.config import Config
from services.lib.db import DB
from services.lib.depcont import DepContainer
if __name__ == '__main__':
d = DepContainer()
d.loc_man = LocalizationManager()
d.loop = asyncio.get_event_loop()
d.cfg = Config()
d.db = DB(d.loop)
d.loop.run_until_complete(test_plots(d))
| 22.583333 | 64 | 0.693727 |
cbf41ad432dd52c7fa96aae20328389d8a8a2749 | 8,482 | py | Python | fenpei/job.py | mverleg/fenpei | 2142e4fe4a1e2897d8d8185ef4b86adc7323e1eb | [
"BSD-3-Clause"
] | null | null | null | fenpei/job.py | mverleg/fenpei | 2142e4fe4a1e2897d8d8185ef4b86adc7323e1eb | [
"BSD-3-Clause"
] | 2 | 2016-11-17T12:10:36.000Z | 2017-02-08T09:06:37.000Z | fenpei/job.py | mverleg/fenpei | 2142e4fe4a1e2897d8d8185ef4b86adc7323e1eb | [
"BSD-3-Clause"
] | null | null | null |
"""
Base class for fenpei job; this should be considered abstract.
Your custom job(s) should inherit from this job and extend the relevant methods, such as::
* is_prepared
* is_complete
* prepare
* start
* result
* summary
"""
from re import match
from sys import stdout
from bardeen.system import mkdirp
from time import time
from os import remove
from os.path import join, isdir
from shutil import rmtree
from .utils import CALC_DIR
| 29.451389 | 157 | 0.67543 |
cbf53d52cd9777aefd5d176bd11a75c4a1b54abc | 303 | py | Python | Aula 07/ex6.py | diegorafaelvieira/Programacao-1 | 657a974f1215cec4aed68603e738d9a135131545 | [
"MIT"
] | null | null | null | Aula 07/ex6.py | diegorafaelvieira/Programacao-1 | 657a974f1215cec4aed68603e738d9a135131545 | [
"MIT"
] | null | null | null | Aula 07/ex6.py | diegorafaelvieira/Programacao-1 | 657a974f1215cec4aed68603e738d9a135131545 | [
"MIT"
] | null | null | null | val = int(input("Valor:"))
soma = val
maior = val
menor = val
for i in range(0,9):
val = int(input("Valor:"))
if val>maior:
maior = val
if val<menor:
menor=val
soma+=val
print("O maior valor :",maior)
print("O menor valor :",menor)
print("A mdia :",(soma/10))
| 16.833333 | 31 | 0.570957 |
cbf5f6dcb21e20b6bc3a2e8d76fc27d3087ec7c4 | 746 | py | Python | waterbutler/providers/github/exceptions.py | KakeruMizuno/RDM-waterbutler | 58ecd801385a7572d1ed56568a31f701291c4e3e | [
"Apache-2.0"
] | 1 | 2019-05-08T02:32:17.000Z | 2019-05-08T02:32:17.000Z | waterbutler/providers/github/exceptions.py | KakeruMizuno/RDM-waterbutler | 58ecd801385a7572d1ed56568a31f701291c4e3e | [
"Apache-2.0"
] | null | null | null | waterbutler/providers/github/exceptions.py | KakeruMizuno/RDM-waterbutler | 58ecd801385a7572d1ed56568a31f701291c4e3e | [
"Apache-2.0"
] | null | null | null | from http import HTTPStatus
from waterbutler.core.exceptions import ProviderError
| 46.625 | 99 | 0.672922 |
cbf60a5f54499551d07c8764354e2a5053355b82 | 899 | py | Python | buildencyclopedia.py | ZhenyuZ/gdc-docs | f024d5d4cd86dfa2c9e7d63850eee94d975b7948 | [
"Apache-2.0"
] | 67 | 2016-06-09T14:11:51.000Z | 2022-03-16T07:54:44.000Z | buildencyclopedia.py | ZhenyuZ/gdc-docs | f024d5d4cd86dfa2c9e7d63850eee94d975b7948 | [
"Apache-2.0"
] | 19 | 2016-06-21T15:51:11.000Z | 2021-06-07T09:22:20.000Z | buildencyclopedia.py | ZhenyuZ/gdc-docs | f024d5d4cd86dfa2c9e7d63850eee94d975b7948 | [
"Apache-2.0"
] | 32 | 2016-07-15T01:24:19.000Z | 2019-03-25T10:42:28.000Z | """updates the encyclopedia section in the mkdocs.yml
should be run whenever a file is removed or added into the directory"""
import os
import yaml
ABSFILEPATH = os.path.dirname(os.path.realpath(__file__))
FILEARRAY = os.listdir(ABSFILEPATH + '/docs/Encyclopedia/pages')
FILEARRAY = sorted(FILEARRAY, key=str.lower)
with open(ABSFILEPATH + '/mkdocs.yml', 'r') as f:
doc = yaml.load(f)
encycdict = next(d for (index, d) in enumerate(doc['pages']) \
if d.get('EncyclopediaEntries', False) != False)
newlist = []
for x in range(len(FILEARRAY)):
if FILEARRAY[x][-3:] == ".md":
tempdict = {FILEARRAY[x][:-3].replace("_"," "):"".join(['Encyclopedia/pages/', FILEARRAY[x][:-3], '.md'])}
newlist.append(tempdict)
encycdict['EncyclopediaEntries'] = newlist
with open(ABSFILEPATH + '/mkdocs.yml', 'w+') as f:
f.write(yaml.dump(doc, default_flow_style=False))
| 32.107143 | 114 | 0.670745 |
cbf64e56908db17c3f6f03defc1efa0087875a63 | 3,056 | py | Python | nucypher/tests/config/test_firstula_circumstances.py | kanzeparov/NuCypher | 0d7e349872909d0cacfd66583d018d722587b2e7 | [
"FTL",
"CNRI-Python"
] | null | null | null | nucypher/tests/config/test_firstula_circumstances.py | kanzeparov/NuCypher | 0d7e349872909d0cacfd66583d018d722587b2e7 | [
"FTL",
"CNRI-Python"
] | null | null | null | nucypher/tests/config/test_firstula_circumstances.py | kanzeparov/NuCypher | 0d7e349872909d0cacfd66583d018d722587b2e7 | [
"FTL",
"CNRI-Python"
] | null | null | null | """
This file is part of nucypher.
nucypher is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
nucypher is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with nucypher. If not, see <https://www.gnu.org/licenses/>.
"""
from functools import partial
import maya
import pytest
import pytest_twisted
from twisted.internet.threads import deferToThread
from nucypher.network.middleware import RestMiddleware
from nucypher.utilities.sandbox.ursula import make_federated_ursulas
| 39.688312 | 87 | 0.729058 |
cbf6bbc96905dc1f309f486dc863edc389cd8386 | 1,550 | py | Python | anchore/anchore-modules/queries/show-familytree.py | berez23/anchore | 594cce23f1d87d666397653054c22c2613247734 | [
"Apache-2.0"
] | 401 | 2016-06-16T15:29:48.000Z | 2022-03-24T10:05:16.000Z | anchore/anchore-modules/queries/show-familytree.py | berez23/anchore | 594cce23f1d87d666397653054c22c2613247734 | [
"Apache-2.0"
] | 63 | 2016-06-16T21:10:27.000Z | 2020-07-01T06:57:27.000Z | anchore/anchore-modules/queries/show-familytree.py | berez23/anchore | 594cce23f1d87d666397653054c22c2613247734 | [
"Apache-2.0"
] | 64 | 2016-06-16T13:05:57.000Z | 2021-07-16T10:03:45.000Z | #!/usr/bin/env python
import sys
import os
import re
import json
import traceback
import anchore.anchore_utils
# main routine
try:
config = anchore.anchore_utils.init_query_cmdline(sys.argv, "params: all\nhelp: shows dockerfile lines.")
except Exception as err:
print str(err)
sys.exit(1)
if not config:
sys.exit(0)
if len(config['params']) <= 0:
print "Query requires input: all"
warns = list()
outlist = list()
outlist.append(["Image_Id", "Repo_Tags", "Image Type"])
try:
idata = anchore.anchore_utils.load_image_report(config['imgid'])
ftree = idata['familytree']
for fid in ftree:
tags = "unknown"
itype = "unknown"
try:
fdata = anchore.anchore_utils.load_image_report(fid)
tags = ','.join(fdata['anchore_all_tags'])
if not tags:
tags = "none"
itype = fdata['meta']['usertype']
if not itype:
itype = "intermediate"
except:
warns.append("family tree id ("+str(fid)+") does not appear to have been analyzed, no data for this member of the tree")
outlist.append([fid, str(tags), str(itype)])
except Exception as err:
# handle the case where something wrong happened
import traceback
traceback.print_exc()
warns.append("query error: "+str(err))
pass
anchore.anchore_utils.write_kvfile_fromlist(config['output'], outlist)
if len(warns) > 0:
anchore.anchore_utils.write_plainfile_fromlist(config['output_warns'], warns)
sys.exit(0)
| 22.794118 | 132 | 0.645806 |
cbf70c92043ad32d0c5d7dec87ffaf9a8bdb7e8f | 2,258 | py | Python | spikeforest/spikeforestwidgets/templatewidget/templatewidget.py | mhhennig/spikeforest | 5b4507ead724af3de0be5d48a3b23aaedb0be170 | [
"Apache-2.0"
] | 1 | 2021-09-23T01:07:19.000Z | 2021-09-23T01:07:19.000Z | spikeforest/spikeforestwidgets/templatewidget/templatewidget.py | mhhennig/spikeforest | 5b4507ead724af3de0be5d48a3b23aaedb0be170 | [
"Apache-2.0"
] | null | null | null | spikeforest/spikeforestwidgets/templatewidget/templatewidget.py | mhhennig/spikeforest | 5b4507ead724af3de0be5d48a3b23aaedb0be170 | [
"Apache-2.0"
] | 1 | 2021-09-23T01:07:21.000Z | 2021-09-23T01:07:21.000Z | import uuid
from spikeforest import mdaio
import io
import base64
import vdomr as vd
import os
import numpy as np
import mtlogging
import time
import traceback
source_path = os.path.dirname(os.path.realpath(__file__))
| 30.513514 | 110 | 0.623561 |
cbf7539dbf5f7fc02064b79ad7c95855899a2086 | 5,968 | py | Python | delsmm/smm.py | sisl/delsmm | 6baae49e6f7b31c817af5668972ba6c196b66e9c | [
"MIT"
] | 1 | 2021-09-21T08:08:44.000Z | 2021-09-21T08:08:44.000Z | delsmm/smm.py | albernsrya/delsmm | 11f2750356a7c7d8b196a67af747a9bc5f39b479 | [
"MIT"
] | null | null | null | delsmm/smm.py | albernsrya/delsmm | 11f2750356a7c7d8b196a67af747a9bc5f39b479 | [
"MIT"
] | 1 | 2021-07-02T13:23:32.000Z | 2021-07-02T13:23:32.000Z | import torch
from torch import nn
from torch.autograd import grad
from torch.autograd.functional import jacobian
from scipy.optimize import root
from ceem.dynamics import *
from ceem.nn import LNMLP
from ceem.utils import temp_require_grad
from tqdm import tqdm
from delsmm.lagsys import AbstractLagrangianSystem
import delsmm.utils as utils
| 34.298851 | 88 | 0.630697 |
cbf7a1ce96364e36588a482e13d4799ada06f5db | 16,642 | py | Python | src/speech/deep_model.py | dem123456789/Speech-Emotion-Recognition-with-Dual-Sequence-LSTM-Architecture | a072cb940201bbcdb2d0f4d0dfa1dde478fa4464 | [
"MIT"
] | 6 | 2020-08-03T03:13:25.000Z | 2022-02-11T08:32:10.000Z | src/speech/deep_model.py | dem123456789/Speech-Emotion-Recognition-with-Dual-Sequence-LSTM-Architecture | a072cb940201bbcdb2d0f4d0dfa1dde478fa4464 | [
"MIT"
] | 1 | 2020-09-08T16:10:38.000Z | 2020-09-08T16:10:38.000Z | src/speech/deep_model.py | dem123456789/Speech-Emotion-Recognition-with-Dual-Sequence-LSTM-Architecture | a072cb940201bbcdb2d0f4d0dfa1dde478fa4464 | [
"MIT"
] | 2 | 2020-08-03T21:37:21.000Z | 2021-03-26T02:19:17.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_packed_sequence
import pdb
import math
torch.manual_seed(1)
| 42.671795 | 175 | 0.651785 |
cbf8a1ef0f33878d804eb957ddcbefc421928a1b | 40 | py | Python | problem/01000~09999/09498/9498.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
] | 1 | 2019-04-19T16:37:44.000Z | 2019-04-19T16:37:44.000Z | problem/01000~09999/09498/9498.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
] | 1 | 2019-04-20T11:42:44.000Z | 2019-04-20T11:42:44.000Z | problem/01000~09999/09498/9498.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
] | 3 | 2019-04-19T16:37:47.000Z | 2021-10-25T00:45:00.000Z | print(("F"*6+"DCBAA")[int(input())//10]) | 40 | 40 | 0.55 |
cbf9013b2e9891516c04252ba24b05ce5ea2d134 | 2,596 | py | Python | tests/netcdf_engine/test_utils.py | TileDB-Inc/TileDB-CF-Py | 9aab0fe9ba7346a1846c7458a5d08b123dcf90a8 | [
"MIT"
] | 12 | 2021-06-07T16:51:32.000Z | 2022-03-10T12:48:00.000Z | tests/netcdf_engine/test_utils.py | TileDB-Inc/TileDB-CF-Py | 9aab0fe9ba7346a1846c7458a5d08b123dcf90a8 | [
"MIT"
] | 72 | 2021-04-28T21:49:41.000Z | 2022-02-24T13:58:11.000Z | tests/netcdf_engine/test_utils.py | TileDB-Inc/TileDB-CF-Py | 9aab0fe9ba7346a1846c7458a5d08b123dcf90a8 | [
"MIT"
] | 3 | 2021-08-11T16:33:37.000Z | 2021-12-01T20:31:12.000Z | # Copyright 2021 TileDB Inc.
# Licensed under the MIT License.
import numpy as np
import pytest
from tiledb.cf.netcdf_engine._utils import get_netcdf_metadata, get_unpacked_dtype
netCDF4 = pytest.importorskip("netCDF4")
def test_unpacked_dtype_unsupported_dtype_error():
"""Tests attempting to unpack a NetCDF variable with a data type that does not
support packing/unpacking."""
with netCDF4.Dataset("tmp.nc", diskless=True, mode="w") as dataset:
variable = dataset.createVariable("x", dimensions=tuple(), datatype="S1")
with pytest.raises(ValueError):
get_unpacked_dtype(variable)
| 38.176471 | 87 | 0.678737 |
cbf916118eb5c3081ccd1fe9c5e35846ce4dd6b9 | 9,091 | py | Python | bib2mp3.py | ewquon/bib2mp3 | 6917f5223de7d2ae1ed9857c445015a05e64936c | [
"MIT"
] | null | null | null | bib2mp3.py | ewquon/bib2mp3 | 6917f5223de7d2ae1ed9857c445015a05e64936c | [
"MIT"
] | null | null | null | bib2mp3.py | ewquon/bib2mp3 | 6917f5223de7d2ae1ed9857c445015a05e64936c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import numpy as np
import html
from bs4 import BeautifulSoup
import bibtexparser
from bibtexparser.bparser import BibTexParser
from bibtexparser.customization import convert_to_unicode
import eyed3
from tokenizer import MyTokenizer
# acronyms to spell out
acronyms = [
'LES',
'ALM',
'MYNN',
]
spelled_out_form = ['-'.join(list(acronym)) for acronym in acronyms]
#==============================================================================
if __name__ == '__main__':
import sys
if len(sys.argv) <= 1:
sys.exit('Specify bib file')
bib = BibtexLibrary(sys.argv[1])
bib.generate_descriptions()
bib.to_mp3()
| 36.657258 | 84 | 0.527995 |
cbf92713179f71318935e2ab443c7a93e35ceec1 | 529 | py | Python | build/create_tag_body.py | Nexusforge/Nexus.Extensions.RpcDataSource | e379243a1aca38c03e882759964d8bc008a7c8bd | [
"MIT"
] | null | null | null | build/create_tag_body.py | Nexusforge/Nexus.Extensions.RpcDataSource | e379243a1aca38c03e882759964d8bc008a7c8bd | [
"MIT"
] | null | null | null | build/create_tag_body.py | Nexusforge/Nexus.Extensions.RpcDataSource | e379243a1aca38c03e882759964d8bc008a7c8bd | [
"MIT"
] | null | null | null | import os
import re
import subprocess
tag = os.getenv('GITHUB_REF_NAME')
if tag is None:
raise Exception("GITHUB_REF_NAME is not defined")
with open("tag_body.txt", "w") as file:
output = subprocess.check_output(["git", "tag", "-l", "--format='%(contents)'", tag], stdin=None, stderr=None, shell=False)
match = re.search("'(.*)'", output.decode("utf8"), re.DOTALL)
if match is None:
raise Exception("Unable to extract the tag body")
tag_body = str(match.groups(1)[0])
file.write(tag_body)
| 24.045455 | 127 | 0.655955 |
cbf954558d8ce8ef179a3032c8e9bbe1051befb6 | 3,456 | py | Python | amos/django_orchestrator/api/views.py | amosproj/2020ws02-computer-vision-for-sights | 66641de397af77f16ee36aa9e860ca7249982cb1 | [
"MIT"
] | 2 | 2021-02-03T23:25:14.000Z | 2021-03-18T15:03:12.000Z | amos/django_orchestrator/api/views.py | amosproj/2020ws02-computer-vision-for-sights | 66641de397af77f16ee36aa9e860ca7249982cb1 | [
"MIT"
] | 2 | 2021-02-03T22:35:12.000Z | 2021-02-12T14:09:31.000Z | amos/django_orchestrator/api/views.py | amosproj/2020ws02-computer-vision-for-sights | 66641de397af77f16ee36aa9e860ca7249982cb1 | [
"MIT"
] | 1 | 2021-03-18T15:03:14.000Z | 2021-03-18T15:03:14.000Z | """This module contains the views exposed to the user."""
from django.http import HttpResponse
from rest_framework.decorators import api_view
from rest_framework.request import Request
from api.view_handlers import (
handle_get_trained_city_model,
handle_persist_sight_image,
handle_add_new_city,
handle_get_supported_cities,
HTTP_200_MESSAGE,
handle_get_latest_city_model_version,
)
| 25.984962 | 79 | 0.664063 |
cbf9db657ca3437e042cc26606350dba666d7720 | 1,154 | py | Python | samples/verify.py | ssmbct-netops/CyberSaucier | 58e965e7b37ad74563319cd6b2b5c68da2dbd6c3 | [
"MIT"
] | 17 | 2019-02-01T06:46:52.000Z | 2021-12-28T06:33:07.000Z | samples/verify.py | Melon-Tropics/CyberSaucier | 780fb3df8518a41ed2d14b9a4b33f5ae520c15d4 | [
"MIT"
] | 9 | 2020-05-01T00:36:24.000Z | 2022-03-29T17:04:21.000Z | samples/verify.py | Melon-Tropics/CyberSaucier | 780fb3df8518a41ed2d14b9a4b33f5ae520c15d4 | [
"MIT"
] | 4 | 2019-10-20T03:22:34.000Z | 2022-02-07T18:27:04.000Z | import requests, json, argparse, os
from termcolor import colored
parser = argparse.ArgumentParser(description="Verify the recipes by running them through CyberSaucier")
parser.add_argument('--rulefolder', help='Folder containing the json recipes')
parser.add_argument("--url", help="URL to CyberSaucier", default="http://localhost:7000")
args = parser.parse_args()
for root, dirs, files in os.walk(args.rulefolder):
path = root.split(os.sep)
for fname in files:
if fname.lower().endswith("json"):
file = os.path.join(root, fname)
with open(file, 'r') as f:
data=f.read()
rule = json.loads(data)
if "verify" in rule:
u = args.url + "/" + rule["name"]
resp = requests.post(url=u, data=rule["verify"]["originalInput"], headers={'Content-Type':'text/plain'})
resp = resp.json()
if resp["result"] == rule["verify"]["expectedOutput"]:
print(colored(rule["name"] + " : PASS", "green"))
else:
print(colored(rule["name"] + " : FAIL", "red"))
| 42.740741 | 120 | 0.57279 |
cbfa1107b8b7c29048f818cde663861f0e4ac256 | 761 | py | Python | tests/test_binary_tree.py | avere001/dsplot | 89948c2f1b16e00bb3a240f73d0cb100b3eac847 | [
"MIT"
] | 8 | 2021-08-08T06:06:39.000Z | 2022-02-04T18:30:38.000Z | tests/test_binary_tree.py | avere001/dsplot | 89948c2f1b16e00bb3a240f73d0cb100b3eac847 | [
"MIT"
] | 1 | 2022-01-04T02:01:36.000Z | 2022-01-04T02:01:36.000Z | tests/test_binary_tree.py | avere001/dsplot | 89948c2f1b16e00bb3a240f73d0cb100b3eac847 | [
"MIT"
] | 2 | 2021-08-18T12:28:40.000Z | 2022-01-03T23:56:41.000Z | import os
import pytest
from dsplot.errors import InputException
from dsplot.tree import BinaryTree
| 29.269231 | 79 | 0.628121 |
cbfb410cacd5080693f012f125e877edd266870a | 172 | py | Python | features/environment.py | geeksforsocialchange/imok | efb7189c13c398dbd5d4301ca496a2e583b0f5b7 | [
"MIT"
] | 6 | 2021-05-12T08:40:36.000Z | 2022-01-25T08:31:06.000Z | features/environment.py | geeksforsocialchange/imok | efb7189c13c398dbd5d4301ca496a2e583b0f5b7 | [
"MIT"
] | 14 | 2021-05-12T09:03:08.000Z | 2021-06-10T13:18:52.000Z | features/environment.py | geeksforsocialchange/imok | efb7189c13c398dbd5d4301ca496a2e583b0f5b7 | [
"MIT"
] | 1 | 2021-05-14T20:54:15.000Z | 2021-05-14T20:54:15.000Z | from django.conf import settings
settings.NOTIFY_EMAIL = 'root@localhost'
settings.DEBUG = True
| 17.2 | 40 | 0.72093 |
cbfc891317c3347008f8eaea66169ec8996add82 | 2,546 | py | Python | h2o-py/tests/testdir_jira/pyunit_pubdev_7353_reset_threshold.py | vishalbelsare/h2o-3 | 9322fb0f4c0e2358449e339a434f607d524c69fa | [
"Apache-2.0"
] | 6,098 | 2015-05-22T02:46:12.000Z | 2022-03-31T16:54:51.000Z | h2o-py/tests/testdir_jira/pyunit_pubdev_7353_reset_threshold.py | vishalbelsare/h2o-3 | 9322fb0f4c0e2358449e339a434f607d524c69fa | [
"Apache-2.0"
] | 2,517 | 2015-05-23T02:10:54.000Z | 2022-03-30T17:03:39.000Z | h2o-py/tests/testdir_jira/pyunit_pubdev_7353_reset_threshold.py | vishalbelsare/h2o-3 | 9322fb0f4c0e2358449e339a434f607d524c69fa | [
"Apache-2.0"
] | 2,199 | 2015-05-22T04:09:55.000Z | 2022-03-28T22:20:45.000Z | import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.gbm import H2OGradientBoostingEstimator
from h2o.utils.model_utils import reset_model_threshold
def test_reset_threshold():
"""
Test the model threshold can be reset.
Performance metric should be recalculated and also predictions should be changed based on the new threshold.
"""
# import data
airlines = h2o.import_file(path=pyunit_utils.locate("smalldata/airlines/modified_airlines.csv"))
# convert columns to factors
airlines["Year"] = airlines["Year"].asfactor()
airlines["Month"] = airlines["Month"].asfactor()
airlines["DayOfWeek"] = airlines["DayOfWeek"].asfactor()
airlines["Cancelled"] = airlines["Cancelled"].asfactor()
airlines['FlightNum'] = airlines['FlightNum'].asfactor()
# set the predictor names and the response column name
predictors = ["Origin", "Dest", "Year", "UniqueCarrier", "DayOfWeek", "Month", "Distance", "FlightNum"]
response = "IsDepDelayed"
# split into train and validation sets
train, valid = airlines.split_frame(ratios = [.8], seed = 1234)
# initialize the estimator
model = H2OGradientBoostingEstimator(seed = 1234, ntrees=5)
# train the model
model.train(x=predictors, y=response, training_frame=train)
old_threshold = model._model_json['output']['default_threshold']
# predict
preds = model.predict(airlines)
# reset the threshold and get the old one
new_threshold = 0.6917189903082518
old_returned = reset_model_threshold(model, new_threshold)
reset_model = h2o.get_model(model.model_id)
reset_threshold = reset_model._model_json['output']['default_threshold']
# predict with reset model
preds_reset = reset_model.predict(airlines)
# compare thresholds
assert old_threshold == old_returned
assert new_threshold == reset_threshold
assert reset_threshold != old_threshold
# compare predictions
preds_local = preds.as_data_frame()
preds_reset_local = preds_reset.as_data_frame()
print("old threshold:", old_threshold, "new_threshold:", new_threshold)
for i in range(airlines.nrow):
if old_threshold <= preds_local.iloc[i, 2] < new_threshold:
assert preds_local.iloc[i, 0] != preds_reset_local.iloc[i, 0]
else:
assert preds_local.iloc[i, 0] == preds_reset_local.iloc[i, 0]
if __name__ == "__main__":
pyunit_utils.standalone_test(test_reset_threshold)
else:
test_reset_threshold()
| 35.361111 | 112 | 0.714061 |
cbfd7282e7bf8367942a36811a4c23c2043f6215 | 2,324 | py | Python | tests/datasets/TestV1/csv2sql.py | pvanderknyff/alibabacloud-adb-tableau-connector | 0280428bfc916530f9de26336631f6a6602c6804 | [
"MIT"
] | 1 | 2019-08-21T17:53:50.000Z | 2019-08-21T17:53:50.000Z | tests/datasets/TestV1/csv2sql.py | aliyun/aliyun-adb-tableau-connector | 0280428bfc916530f9de26336631f6a6602c6804 | [
"MIT"
] | 1 | 2020-06-29T08:38:54.000Z | 2020-06-29T08:38:54.000Z | tests/datasets/TestV1/csv2sql.py | aliyun/alibabacloud-adb-tableau-connector | 0280428bfc916530f9de26336631f6a6602c6804 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import argparse
import csv
import sys
'''
This script takes a CSV file with a mandatory header and a sql tablename and converts the data in the csv file into
an SQL INSERT statement.
'''
if __name__ == "__main__":
main()
| 31.835616 | 120 | 0.55809 |
cbff48d02931d3f7dcc779f4f74d3a26a84b6bb5 | 1,043 | py | Python | FlaskApp/app.py | Dec22gln/FlaskBlog | 114ca9fc39f039cbdf0f1ff613fb66e364cea171 | [
"MIT"
] | null | null | null | FlaskApp/app.py | Dec22gln/FlaskBlog | 114ca9fc39f039cbdf0f1ff613fb66e364cea171 | [
"MIT"
] | null | null | null | FlaskApp/app.py | Dec22gln/FlaskBlog | 114ca9fc39f039cbdf0f1ff613fb66e364cea171 | [
"MIT"
] | null | null | null | from flask import Flask
from flask import render_template
app = Flask(__name__)
if __name__ == '__main__':
app.run()
| 21.729167 | 56 | 0.708533 |
cbffe9c4b5d1ee44110edbd0b422813f50993bf7 | 1,913 | py | Python | azure-servicefabric/azure/servicefabric/models/primary_replicator_status_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | azure-servicefabric/azure/servicefabric/models/primary_replicator_status_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | azure-servicefabric/azure/servicefabric/models/primary_replicator_status_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2018-10-16T13:08:23.000Z | 2018-10-16T13:08:23.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .replicator_status_py3 import ReplicatorStatus
| 39.854167 | 103 | 0.665447 |
0200db1441c66699ac789aeb7d02549ecd867f2a | 448 | py | Python | example/example/models.py | KnightConan/sspdatatables | 1179a11358734e5e472e5eee703e8d34fa49e9bf | [
"MIT"
] | 4 | 2018-11-23T16:17:38.000Z | 2018-11-26T16:08:49.000Z | example/example/models.py | zhiwei2017/sspdatatables | 1179a11358734e5e472e5eee703e8d34fa49e9bf | [
"MIT"
] | 8 | 2018-11-26T16:38:55.000Z | 2019-01-18T15:13:12.000Z | example/example/models.py | KnightConan/sspdatatables | 1179a11358734e5e472e5eee703e8d34fa49e9bf | [
"MIT"
] | null | null | null | from django.db import models
from django_countries.fields import CountryField
from django.db.models.deletion import CASCADE
| 26.352941 | 57 | 0.765625 |
020213a818c2a038dbd07a3442e4a8ae253739be | 4,805 | py | Python | workspace/baseline/midi_generator.py | SeungHeonDoh/EMOPIA | 0afb93a91c9226949d617894d6aa2d67c4de4eb6 | [
"MIT"
] | 69 | 2021-07-12T03:17:17.000Z | 2022-03-27T06:16:35.000Z | workspace/baseline/midi_generator.py | SeungHeonDoh/EMOPIA | 0afb93a91c9226949d617894d6aa2d67c4de4eb6 | [
"MIT"
] | 7 | 2021-07-27T09:10:15.000Z | 2022-02-07T05:15:56.000Z | workspace/baseline/midi_generator.py | SeungHeonDoh/EMOPIA | 0afb93a91c9226949d617894d6aa2d67c4de4eb6 | [
"MIT"
] | 7 | 2021-07-12T10:41:14.000Z | 2022-02-04T10:28:08.000Z | import os
os.environ["CUDA_VISIBLE_DEVICES"] = "4"
import json
import argparse
import numpy as np
import tensorflow as tf
import midi_encoder as me
from train_generative import build_generative_model
from train_classifier import preprocess_sentence
GENERATED_DIR = './generated'
if __name__ == "__main__":
# Parse arguments
parser = argparse.ArgumentParser(description='midi_generator.py')
parser.add_argument('--model', type=str, default='./trained', help="Checkpoint dir.")
parser.add_argument('--ch2ix', type=str, default='./trained/char2idx.json', help="JSON file with char2idx encoding.")
parser.add_argument('--embed', type=int, default=256, help="Embedding size.")
parser.add_argument('--units', type=int, default=512, help="LSTM units.")
parser.add_argument('--layers', type=int, default=4, help="LSTM layers.")
parser.add_argument('--seqinit', type=str, default="\n", help="Sequence init.")
parser.add_argument('--seqlen', type=int, default=512, help="Sequence lenght.")
parser.add_argument('--cellix', type=int, default=4, help="LSTM layer to use as encoder.")
parser.add_argument('--override', type=str, default="./trained/neurons_Q1.json", help="JSON file with neuron values to override.")
opt = parser.parse_args()
# Load char2idx dict from json file
with open(opt.ch2ix) as f:
char2idx = json.load(f)
# Load override dict from json file
override = {}
try:
with open(opt.override) as f:
override = json.load(f)
except FileNotFoundError:
print("Override JSON file not provided.")
# Create idx2char from char2idx dict
idx2char = {idx:char for char,idx in char2idx.items()}
# Calculate vocab_size from char2idx dict
vocab_size = len(char2idx)
# Rebuild model from checkpoint
model = build_generative_model(vocab_size, opt.embed, opt.units, opt.layers, batch_size=1)
model.load_weights(tf.train.latest_checkpoint(opt.model))
model.build(tf.TensorShape([1, None]))
if not os.path.exists(GENERATED_DIR):
os.makedirs(GENERATED_DIR)
# Generate 5 midis
for i in range(100):
# Generate a midi as text
print("Generate midi {}".format(i))
midi_txt = generate_midi(model, char2idx, idx2char, opt.seqinit, opt.seqlen, layer_idx=opt.cellix, override=override)
me.write(midi_txt, os.path.join(GENERATED_DIR, "generated_Q1_{}.mid".format(i)))
| 35.330882 | 134 | 0.678044 |
020563bca2febded13ab705cf7257f5af323ab0d | 1,616 | py | Python | holobot/sdk/chrono/interval_parser.py | rexor12/holobot | 89b7b416403d13ccfeee117ef942426b08d3651d | [
"MIT"
] | 1 | 2021-05-24T00:17:46.000Z | 2021-05-24T00:17:46.000Z | holobot/sdk/chrono/interval_parser.py | rexor12/holobot | 89b7b416403d13ccfeee117ef942426b08d3651d | [
"MIT"
] | 41 | 2021-03-24T22:50:09.000Z | 2021-12-17T12:15:13.000Z | holobot/sdk/chrono/interval_parser.py | rexor12/holobot | 89b7b416403d13ccfeee117ef942426b08d3651d | [
"MIT"
] | null | null | null | from ..utils import pad_left, try_parse_int
from datetime import timedelta
from typing import Dict, List
TIME_PARTS: List[str] = [ "D", "H", "M", "S" ]
FIXED_INTERVALS: Dict[str, timedelta] = {
"WEEK": timedelta(weeks=1),
"DAY": timedelta(days=1),
"HOUR": timedelta(hours=1)
}
| 36.727273 | 91 | 0.633045 |
02063c864e384d1ba7ec730d4d03b03f063ebc1f | 80,245 | py | Python | pirates/ai/PiratesMagicWordManager.py | itsyaboyrocket/pirates | 6ca1e7d571c670b0d976f65e608235707b5737e3 | [
"BSD-3-Clause"
] | 3 | 2021-02-25T06:38:13.000Z | 2022-03-22T07:00:15.000Z | pirates/ai/PiratesMagicWordManager.py | itsyaboyrocket/pirates | 6ca1e7d571c670b0d976f65e608235707b5737e3 | [
"BSD-3-Clause"
] | null | null | null | pirates/ai/PiratesMagicWordManager.py | itsyaboyrocket/pirates | 6ca1e7d571c670b0d976f65e608235707b5737e3 | [
"BSD-3-Clause"
] | 1 | 2021-02-25T06:38:17.000Z | 2021-02-25T06:38:17.000Z | # uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.ai.PiratesMagicWordManager
from direct.showbase.ShowBaseGlobal import *
from direct.distributed import DistributedObject
from direct.directnotify import DirectNotifyGlobal
from direct.task import Task
from otp.avatar import Avatar
from otp.chat import ChatManager
import string
from direct.showbase import PythonUtil
from otp.otpbase import OTPGlobals
from direct.distributed.ClockDelta import *
from otp.ai import MagicWordManager
from pirates.pirate import DistributedPlayerPirate
from pirates.npc import DistributedNPCTownfolk
from direct.distributed import DistributedCartesianGrid
from pirates.piratesbase import PiratesGlobals
from pirates.piratesgui.RadarUtil import RadarUtil
from pirates.cutscene import Cutscene, CutsceneData
from pirates.effects.Fireflies import Fireflies
from pirates.effects.GroundFog import GroundFog
from pirates.effects.Bonfire import Bonfire
from pirates.effects.CeilingDust import CeilingDust
from pirates.effects.CeilingDebris import CeilingDebris
from pirates.effects.CameraShaker import CameraShaker
from pirates.effects.DarkWaterFog import DarkWaterFog
from pirates.ship import DistributedSimpleShip
from pirates.world import WorldGlobals
from pirates.effects.FireworkGlobals import *
from pirates.effects.FireworkShowManager import FireworkShowManager
from pirates.piratesbase import PLocalizer | 69.176724 | 993 | 0.408349 |
0207e1cd7c3433152b1e340e7f376f8049a8644d | 634 | bzl | Python | layers/bazel/deps.bzl | celentes/bazel-container-ubuntu1804 | 67c12c3f6db785909fa3695c80ebbdec1ff81b61 | [
"Apache-2.0"
] | null | null | null | layers/bazel/deps.bzl | celentes/bazel-container-ubuntu1804 | 67c12c3f6db785909fa3695c80ebbdec1ff81b61 | [
"Apache-2.0"
] | null | null | null | layers/bazel/deps.bzl | celentes/bazel-container-ubuntu1804 | 67c12c3f6db785909fa3695c80ebbdec1ff81b61 | [
"Apache-2.0"
] | null | null | null | load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_file")
| 39.625 | 116 | 0.62776 |
0208d63efe0cf495f00648e33345a8f7f3c257eb | 4,318 | py | Python | db.py | RecycledMedia/apprenticeship-app | 67eb18300163dedcc4f473883f20d992644af7b2 | [
"BSD-3-Clause"
] | null | null | null | db.py | RecycledMedia/apprenticeship-app | 67eb18300163dedcc4f473883f20d992644af7b2 | [
"BSD-3-Clause"
] | null | null | null | db.py | RecycledMedia/apprenticeship-app | 67eb18300163dedcc4f473883f20d992644af7b2 | [
"BSD-3-Clause"
] | null | null | null | import os
import sqlite3
| 32.712121 | 96 | 0.585456 |
02097fb19e8e97c98afe88f64252e859af37785e | 243 | py | Python | python/vars_test.py | runningforlife/CodingExamples | 808b12cdb996390225d40a687bf6215c4b7d1822 | [
"Apache-2.0"
] | null | null | null | python/vars_test.py | runningforlife/CodingExamples | 808b12cdb996390225d40a687bf6215c4b7d1822 | [
"Apache-2.0"
] | null | null | null | python/vars_test.py | runningforlife/CodingExamples | 808b12cdb996390225d40a687bf6215c4b7d1822 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
def test_vars():
"""test variables in python"""
int_var = 5
string_var = "hah"
assert int_var == 5
assert string_var == 'hah'
print("test vars is done")
if __name__ == "__main__":
test_vars()
| 15.1875 | 34 | 0.596708 |
020a172c0d9f7b9606628146aaa062d113a7182b | 7,199 | py | Python | src/data_preparation/tfrecords_and_queues.py | Zhenxingzhang/tiny_imagenet | f44512023ce52df30cdffd80d3cb7cc4e1426354 | [
"Apache-2.0"
] | null | null | null | src/data_preparation/tfrecords_and_queues.py | Zhenxingzhang/tiny_imagenet | f44512023ce52df30cdffd80d3cb7cc4e1426354 | [
"Apache-2.0"
] | null | null | null | src/data_preparation/tfrecords_and_queues.py | Zhenxingzhang/tiny_imagenet | f44512023ce52df30cdffd80d3cb7cc4e1426354 | [
"Apache-2.0"
] | null | null | null | """
Up to now we have held all data in memory. This is of course impossible with large datasets.
In this file we explore the use of TFRecords (binary files quickly loading data from disk) and Queues to store
asynchronously loading data.
In this example we the TinyImageNet-200 dataset which has 100,000 64x64 images for 200 classes
We will examine 2 options for reading from TFRecord files:
a) reading from the record directly one example at a time
b) reading from the record into a queue and sampling batches from that queue
For more info, consult the great documentation on this from Tensorflow at
https://www.tensorflow.org/versions/r0.12/how_tos/reading_data/index.html
"""
from tqdm import tqdm
import numpy as np
import tensorflow as tf
import matplotlib
# to remove issue with default matplotlib backend (causing runtime error "python is not installed as a framework")
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from PIL import Image
import os
from src.common.paths import DATA_PATH
if __name__ == '__main__':
# create TFRecords from csv files if necessary
for set_name in ['train', 'val', 'test']:
tfrecord_path = os.path.join(DATA_PATH, "{}.tfrecord".format(set_name))
if not os.path.exists(tfrecord_path):
print('Creating TFRecord from csv files for set: {}'.format(set_name))
train_csv = os.path.join(DATA_PATH, "{}.csv".format(set_name))
csv_to_record(train_csv, tfrecord_path)
else:
print('TFRecord for {} exists, nothing to do'.format(set_name))
PLOT = 10 # number of images to plot (set == None to suppress plotting)
# read from record one at time
print('Reading from record one at a time')
val_tfrecord_file = os.path.join(DATA_PATH, "train.tfrecord")
# read_from_record(val_tfrecord_file, shapes={'label': 1, 'image': (64, 64, 3)},
# plot=PLOT)
# read from record into queue, shuffle and batch
print('Reading from record into queue, random sample from queue in batches')
read_record_to_queue(val_tfrecord_file, shapes={'label': 1, 'image': (64, 64, 3)},
plot=PLOT)
| 41.137143 | 114 | 0.633282 |
020a1a0bc964b8990c94fa3dbddf6619f8e10b21 | 2,906 | py | Python | relialok/SerialPort.py | jrhosk/relialok | 28d59dfd39296695ebec19387eda9b986ecdd60f | [
"MIT"
] | null | null | null | relialok/SerialPort.py | jrhosk/relialok | 28d59dfd39296695ebec19387eda9b986ecdd60f | [
"MIT"
] | null | null | null | relialok/SerialPort.py | jrhosk/relialok | 28d59dfd39296695ebec19387eda9b986ecdd60f | [
"MIT"
] | null | null | null | import serial
import serial.tools.list_ports
from PyQt5.QtCore import QObject
import relialok.Logger | 34.188235 | 114 | 0.604955 |
020a85d2b9268f0ad8b4e717c76fefae39beb819 | 339 | py | Python | Python/DDUtil.py | dalek7/umbrella | cabf0367940905ca5164d104d7aef6ff719ee166 | [
"MIT"
] | 1 | 2021-03-09T09:12:02.000Z | 2021-03-09T09:12:02.000Z | Python/DDUtil.py | dalek7/umbrella | cabf0367940905ca5164d104d7aef6ff719ee166 | [
"MIT"
] | null | null | null | Python/DDUtil.py | dalek7/umbrella | cabf0367940905ca5164d104d7aef6ff719ee166 | [
"MIT"
] | null | null | null | import os
import datetime | 19.941176 | 62 | 0.60767 |
020b56188f2411001ea02312adb3e4b3e9f8fcbc | 4,301 | py | Python | codes/modelTraining.py | jairock282/hatsi | ecb16fb99115c413e980855ae3d06433ced2260c | [
"MIT"
] | null | null | null | codes/modelTraining.py | jairock282/hatsi | ecb16fb99115c413e980855ae3d06433ced2260c | [
"MIT"
] | null | null | null | codes/modelTraining.py | jairock282/hatsi | ecb16fb99115c413e980855ae3d06433ced2260c | [
"MIT"
] | null | null | null | """
__| |_____________________________________________________________________________________| |__
(__ _____________________________________________________________________________________ __)
| | | |
| | modelTraining Module | |
| | | |
| | Trains the LSTM model with the sliding windows of 15 frames | |
__| |_____________________________________________________________________________________| |__
(__ _____________________________________________________________________________________ __)
| | | |
"""
import glob
import numpy as np
import pandas as pd
from tensorflow import keras
from keras.layers import LSTM,Dense
from tensorflow.keras.models import Sequential
from tensorflow.keras.utils import to_categorical
from sklearn.metrics import multilabel_confusion_matrix, accuracy_score
## ------------------------------------------------ Loading Data ------------------------------------------------------------------
files = glob.glob(r'C:\Users\khmap\depthai-python\Ejemplos_Python\Datos_Completos_L/*.csv') ##Read all the CSV files
tam=len(files) ##Total of files
tTrain=(70*tam)/100 ##Gets 70% of the files to the train process
tTest=tam-tTrain ##Gets 30% of the files to the test process
## -------------- Data matrices --------------
x_train=np.zeros((int(tTrain), 15, 201))
x_test=np.zeros((int(tTest), 15, 201))
y_train=np.zeros(int(tTrain))
y_test=np.zeros(int(tTest))
## ----------------- Phrases -------------------
phrases=np.array(['A','B','C','Diarrea','DolordeCabeza','DolordeCuerpo','D','E','Fatiga','Fiebre','F','G','H','I','J','K','L','M','N','O','P','Q','R','Sin sena','S','Tos','T','U','V','W','X','Y','Z','']) ##Phrases
label_map = {label:num for num, label in enumerate(phrases)} ##Phrases mapping
cont=0 ##Counter to separate 70% of the data to the training process and 30% to the testing process
contNum=0 ##Counter to assign to ytest and ytrain
cont_x_tra=0 ##Counter of the vector x_train
cont_x_tes=0 ##Counter of the vector x_test
cont_y_tra=0 ##Counter of the vector y_train
cont_y_tes=0 ##Counter of the vector y_test
## Iterate over each CSV file
for i in range(0, tam):
fRead= pd.read_csv(files[i]) ##Read file
res= fRead.values ##Gets all the values
res = res[0:len(res), 1:len(res[1])]
if cont<70: ## Training data
x_train[cont_x_tra]=res
y_train[cont_y_tra]=contNum
cont=cont+1
cont_x_tra=cont_x_tra + 1
cont_y_tra = cont_y_tra + 1
else: ## Testing data
x_test[cont_x_tes] = res
y_test[cont_y_tes] = contNum
cont = cont + 1
cont_x_tes =cont_x_tes + 1
cont_y_tes = cont_y_tes + 1
if cont==100:
cont=0
contNum=contNum+1
##Converts to binary matrix
y_train=to_categorical (y_train).astype(int)
y_test=to_categorical (y_test).astype(int)
print("Datos Guardados")
## -------------------------------------- Model ------------------------------------------------
model=Sequential()
model.add(LSTM(3400,return_sequences=True,activation='relu',input_shape=(15,201))) ##Input layer
model.add(LSTM(400,return_sequences=True,activation='relu')) ##Hidden layers
model.add(LSTM(128,return_sequences=False,activation='relu'))
model.add(Dense(64,activation='relu'))
model.add(Dense(34,activation='softmax')) ##Output layer
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['categorical_accuracy'])
model.fit(x_train,y_train,epochs=200)
model.summary() ## Summary of the model results
print("Modelo entrenado")
resul=model.predict(x_test) ##Prediction
## ---------------- Model evaluation ------------------------
print("Evaluacion")
ytrue=np.argmax(y_test,axis=1).tolist()
yhat=np.argmax(resul,axis=1).tolist()
matriz=multilabel_confusion_matrix(ytrue,yhat)
ac = accuracy_score(ytrue,yhat)
model.save('Entrenamiento_ABC_Enf_1.h5') ##Saves the model
| 44.802083 | 215 | 0.601953 |
020c16a78df08433f5dc19175781c44bf2dcbb01 | 1,763 | py | Python | datasource/mylaps/tests.py | SphinxNZ/game-on | da10ea9303563cd91ccab13321ba15a927e703e5 | [
"Apache-2.0"
] | null | null | null | datasource/mylaps/tests.py | SphinxNZ/game-on | da10ea9303563cd91ccab13321ba15a927e703e5 | [
"Apache-2.0"
] | null | null | null | datasource/mylaps/tests.py | SphinxNZ/game-on | da10ea9303563cd91ccab13321ba15a927e703e5 | [
"Apache-2.0"
] | null | null | null | import datetime
from django.utils import timezone
from django.test import TestCase
from sport.models import Sport, Competition, Venue
from compete.models import CompetitionRound
from compete.motorsport.models import Race
from datasource.models import DataSource
from datasource.mylaps.scoreboard import ScoreboardHandler
| 43 | 115 | 0.676687 |
020c551868d4325ef446cf93f3e3b90f6e4e9908 | 1,697 | py | Python | scripts/generate_tests.py | alibaba/sionnx | 3f3e18826ddcc26402b4e2af96ca8aac15560456 | [
"Apache-2.0"
] | 34 | 2019-05-29T03:15:48.000Z | 2022-03-24T03:14:58.000Z | scripts/generate_tests.py | alibaba/sionnx | 3f3e18826ddcc26402b4e2af96ca8aac15560456 | [
"Apache-2.0"
] | 1 | 2020-05-21T11:44:22.000Z | 2020-05-21T11:44:22.000Z | scripts/generate_tests.py | alibaba/sionnx | 3f3e18826ddcc26402b4e2af96ca8aac15560456 | [
"Apache-2.0"
] | 4 | 2019-12-16T18:49:42.000Z | 2021-10-11T18:41:54.000Z | #*
#* Copyright (C) 2017-2019 Alibaba Group Holding Limited
#*
#* Licensed under the Apache License, Version 2.0 (the "License");
#* you may not use this file except in compliance with the License.
#* You may obtain a copy of the License at
#*
#* http://www.apache.org/licenses/LICENSE-2.0
#*
#* Unless required by applicable law or agreed to in writing, software
#* distributed under the License is distributed on an "AS IS" BASIS,
#* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#* See the License for the specific language governing permissions and
#* limitations under the License.
import sys
import os
import glob
import argparse
parser = argparse.ArgumentParser(description='Generate conformanc tests')
parser.add_argument("-profile_level", help="Specify the profile level: 0=smoke tests; 1=full tests", type=int)
parser.parse_args()
args = parser.parse_args()
option = "-gen-onnx-smoke-tests"
if args.profile_level:
option = "-gen-onnx-smoke-tests" if args.profile_level==0 else "-gen-onnx-tests"
print("======Generating tests with option " + option + "========")
if not os.path.exists("tests"):
os.makedirs("tests")
os.system("cp ../include/onnx_*.td -r . | cp ../include/*.algorithm -r .")
dir_path = os.path.dirname(os.path.realpath(__file__))
td_files = glob.glob(os.path.join(dir_path, '*.td'))
lens = len(td_files)
for k in range(lens):
base = os.path.basename(td_files[k])
out_file_name = os.path.splitext(base)[0]
os.system("../llvm/build/bin/llvm-tblgen " + option + " " + td_files[k] + " -I ./ -o ./tests/" + out_file_name + ".py")
print(out_file_name + ".py generated.")
os.system("rm onnx_*.td | rm *.algorithm")
| 36.891304 | 124 | 0.703595 |
020e09341ffea9ce59519650e80614b26a974b81 | 6,610 | py | Python | tests/mixins.py | jarkkorantala/sqlalchemy-utils | 7cee65f0a3074245b853425e19a732aa274bfa3e | [
"BSD-3-Clause"
] | 879 | 2015-01-01T12:06:35.000Z | 2022-03-27T16:13:05.000Z | tests/mixins.py | jarkkorantala/sqlalchemy-utils | 7cee65f0a3074245b853425e19a732aa274bfa3e | [
"BSD-3-Clause"
] | 418 | 2015-01-02T08:43:43.000Z | 2022-03-25T15:49:21.000Z | tests/mixins.py | jarkkorantala/sqlalchemy-utils | 7cee65f0a3074245b853425e19a732aa274bfa3e | [
"BSD-3-Clause"
] | 295 | 2015-01-06T14:19:33.000Z | 2022-03-26T16:20:50.000Z | import pytest
import sqlalchemy as sa
| 28.864629 | 79 | 0.522542 |
020e71ff56d4917b70bf98b950bcfa70c6d8e56c | 6,041 | py | Python | gbpservice/nfp/lib/rest_client_over_unix.py | ashutosh-mishra/my-test | 51c82af293f291b9182204392e7d21bda27786d1 | [
"Apache-2.0"
] | null | null | null | gbpservice/nfp/lib/rest_client_over_unix.py | ashutosh-mishra/my-test | 51c82af293f291b9182204392e7d21bda27786d1 | [
"Apache-2.0"
] | null | null | null | gbpservice/nfp/lib/rest_client_over_unix.py | ashutosh-mishra/my-test | 51c82af293f291b9182204392e7d21bda27786d1 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import exceptions
import httplib
import httplib2
import zlib
import six.moves.urllib.parse as urlparse
import socket
from oslo_serialization import jsonutils
from gbpservice.nfp.core import log as nfp_logging
LOG = nfp_logging.getLogger(__name__)
def get(path):
"""Implements get method for unix restclient
Return:Http Response
"""
return UnixRestClient().send_request(path, 'GET')
def put(path, body):
"""Implements put method for unix restclient
Return:Http Response
"""
headers = {'content-type': 'application/octet-stream'}
return UnixRestClient().send_request(
path, 'PUT', headers=headers, body=body)
def post(path, body, delete=False):
"""Implements post method for unix restclient
Return:Http Response
"""
# Method-Type added here,as DELETE/CREATE
# both case are handled by post as delete also needs
# to send data to the rest-unix-server.
headers = {'content-type': 'application/octet-stream'}
if delete:
headers.update({'method-type': 'DELETE'})
else:
headers.update({'method-type': 'CREATE'})
return UnixRestClient().send_request(
path, 'POST', headers=headers, body=body)
| 35.327485 | 78 | 0.609833 |
020f39177cabbb0de46cc69acb4473e957930343 | 3,916 | py | Python | tk_sim.py | incherre/slam-bot | 8479aff8f595b2d602a83e9e922b64836ae64375 | [
"MIT"
] | null | null | null | tk_sim.py | incherre/slam-bot | 8479aff8f595b2d602a83e9e922b64836ae64375 | [
"MIT"
] | null | null | null | tk_sim.py | incherre/slam-bot | 8479aff8f595b2d602a83e9e922b64836ae64375 | [
"MIT"
] | null | null | null | '''Robot sim with a nicer display.'''
from sim_framework import *
from math import radians
import tkinter
BACKGROUND_COLOR = 'grey60'
ENTITY_COLOR = 'RoyalBlue1'
OBSTACLE_COLOR = 'black'
ENTITY_TAG = 'entity'
if __name__ == '__main__':
root = tkinter.Tk()
W = TKWorld(root, -500, 500, -500, 500)
W.add_obs(Box(-500, -250, 250, 500))
W.add_obs(Box(-450, -200, 200, 450))
W.add_obs(Box(-400, -150, 150, 400))
W.add_obs(Box(-350, -100, 100, 350))
bot = CircleBot(100, 0, 0, 0)
W.add_ent(bot)
theta = radians(0)
root.after(int(1000 / 60), update)
root.mainloop()
| 36.943396 | 112 | 0.565884 |
0210ff2439d9da24bc21178720c18eee48ba770a | 1,224 | py | Python | COT/tests/test_doctests.py | morneaup/cot | 3d4dc7079a33aa0c09216ec339b44f84ab69ff4b | [
"MIT"
] | 81 | 2015-01-18T22:31:42.000Z | 2022-03-14T12:34:33.000Z | COT/tests/test_doctests.py | morneaup/cot | 3d4dc7079a33aa0c09216ec339b44f84ab69ff4b | [
"MIT"
] | 67 | 2015-01-05T15:24:39.000Z | 2021-08-16T12:44:58.000Z | COT/tests/test_doctests.py | morneaup/cot | 3d4dc7079a33aa0c09216ec339b44f84ab69ff4b | [
"MIT"
] | 20 | 2015-07-09T14:20:25.000Z | 2021-09-18T17:59:57.000Z | #!/usr/bin/env python
#
# test_doctests.py - test runner for COT doctests
#
# July 2016, Glenn F. Matthews
# Copyright (c) 2016-2017 the COT project developers.
# See the COPYRIGHT.txt file at the top-level directory of this distribution
# and at https://github.com/glennmatthews/cot/blob/master/COPYRIGHT.txt.
#
# This file is part of the Common OVF Tool (COT) project.
# It is subject to the license terms in the LICENSE.txt file found in the
# top-level directory of this distribution and at
# https://github.com/glennmatthews/cot/blob/master/LICENSE.txt. No part
# of COT, including this file, may be copied, modified, propagated, or
# distributed except according to the terms contained in the LICENSE.txt file.
"""Test runner for COT doctest tests."""
import logging
from logging import NullHandler
from doctest import DocTestSuite
from unittest import TestSuite
logging.getLogger('COT').addHandler(NullHandler())
def load_tests(*_):
"""Load doctests as unittest test suite.
For the parameters, see :mod:`unittest`. The parameters are unused here.
"""
suite = TestSuite()
suite.addTests(DocTestSuite('COT.data_validation'))
suite.addTests(DocTestSuite('COT.utilities'))
return suite
| 33.081081 | 78 | 0.750817 |
021113c40a21b05029b6c6708d8e10e3927d9701 | 1,045 | py | Python | aws/etc/packer/tools/python/stardog/cluster/test_program.py | stardog-union/stardog-graviton | 652fa3e3bbb166e92ce165938ef2075831d26c04 | [
"Apache-2.0"
] | 3 | 2017-03-10T15:00:08.000Z | 2019-10-29T07:46:19.000Z | aws/etc/packer/tools/python/stardog/cluster/test_program.py | stardog-union/stardog-graviton | 652fa3e3bbb166e92ce165938ef2075831d26c04 | [
"Apache-2.0"
] | 31 | 2017-02-21T16:19:11.000Z | 2021-03-25T21:27:50.000Z | aws/etc/packer/tools/python/stardog/cluster/test_program.py | stardog-union/stardog-graviton | 652fa3e3bbb166e92ce165938ef2075831d26c04 | [
"Apache-2.0"
] | 6 | 2017-04-26T07:22:25.000Z | 2020-07-29T20:17:55.000Z | import logging
import subprocess
import sys
import stardog.cluster.utils as utils
| 29.857143 | 97 | 0.572249 |
0211dbc40a6aa48e66ae666cbc2afb8294c1a296 | 297 | py | Python | apps/core/urls.py | tayyabRazzaq/opl-platform | 37b0efdb9327253a144c50bfd192132fac732619 | [
"MIT"
] | 2 | 2019-04-03T04:04:53.000Z | 2019-04-28T16:13:56.000Z | apps/core/urls.py | tayyabRazzaq/opl-platform | 37b0efdb9327253a144c50bfd192132fac732619 | [
"MIT"
] | 8 | 2021-06-04T21:57:30.000Z | 2022-03-11T23:48:38.000Z | apps/core/urls.py | tayyab-razzaq/opl-platform | 37b0efdb9327253a144c50bfd192132fac732619 | [
"MIT"
] | 7 | 2019-03-12T19:39:08.000Z | 2021-04-15T05:25:59.000Z | """ Here all the blog's urls routes will be mapped """
from django.urls import path
from django.conf.urls import include, url
from . import views
app_name = 'core'
urlpatterns = [
# path('', views.home, name='home-page'),
url(r'^api/', include('apps.core.api.urls', namespace='api')),
]
| 24.75 | 66 | 0.670034 |
021267aeacfe0ae1c6472616df30ce20f8a2d09b | 24,270 | py | Python | picoCTF-web/tests/api/functional/common.py | MongYahHsieh/picoCTF | dd500ad9c59768137b33e2d2b102a089ddf0ad40 | [
"MIT"
] | null | null | null | picoCTF-web/tests/api/functional/common.py | MongYahHsieh/picoCTF | dd500ad9c59768137b33e2d2b102a089ddf0ad40 | [
"MIT"
] | null | null | null | picoCTF-web/tests/api/functional/common.py | MongYahHsieh/picoCTF | dd500ad9c59768137b33e2d2b102a089ddf0ad40 | [
"MIT"
] | null | null | null | """Utilities for functional tests."""
import datetime
import json
import re
import pymongo
import pytest
import api
RATE_LIMIT_BYPASS = "test_bypass"
TESTING_DB_NAME = 'ctf_test'
db = None
def decode_response(res):
"""Parse a WebSuccess or WebError response."""
decoded_dict = json.loads(res.data.decode('utf-8'))
return (decoded_dict['status'], decoded_dict['message'],
decoded_dict['data'])
def get_csrf_token(res):
"""Extract the CSRF token from a response."""
for header in res.headers:
m = re.search('token=(.+?);', header[1])
if m:
return m.group(1)
raise RuntimeError('Could not find CSRF token in response headers: ' + str(res.headers))
def get_conn():
"""Get a connection to the testing database."""
global db
if db is None:
client = pymongo.MongoClient(host='127.0.0.1', port=27018)
db = client[TESTING_DB_NAME]
return db
def clear_db():
"""Clear out the testing database."""
db = get_conn()
db.command('dropDatabase')
def app():
"""Create an instance of the Flask app for testing."""
app = api.create_app({
'TESTING': True,
'MONGO_DB_NAME': TESTING_DB_NAME,
'MONGO_PORT': 27018
})
return app
ADMIN_DEMOGRAPHICS = {
'username': 'adminuser',
'password': 'adminpw',
'firstname': 'Admin',
'lastname': 'User',
'email': 'admin@example.com',
'country': 'US',
'affiliation': 'Admin School',
'usertype': 'other',
'demo': {
'parentemail': 'admin@example.com',
'age': '18+'
},
'gid': None,
'rid': None
}
TEACHER_DEMOGRAPHICS = {
'username': 'teacheruser',
'password': 'teacherpw',
'firstname': 'Teacher',
'lastname': 'User',
'email': 'teacher@example.com',
'country': 'US',
'affiliation': 'Sample School',
'usertype': 'teacher',
'demo': {
'parentemail': 'teacher@example.com',
'age': '18+'
},
'gid': None,
'rid': None
}
STUDENT_DEMOGRAPHICS = {
'username': 'studentuser',
'password': 'studentpw',
'firstname': 'Student',
'lastname': 'User',
'email': 'student@example.com',
'country': 'US',
'affiliation': 'Sample School',
'usertype': 'student',
'demo': {
'parentemail': 'student@example.com',
'age': '13-17'
},
'gid': None,
'rid': None
}
STUDENT_2_DEMOGRAPHICS = {
'username': 'studentuser2',
'password': 'studentpw2',
'firstname': 'Student',
'lastname': 'Usertwo',
'email': 'student2@example.com',
'country': 'US',
'affiliation': 'Sample School',
'usertype': 'student',
'demo': {
'parentemail': 'student2@example.com',
'age': '18+'
},
'gid': None,
'rid': None
}
OTHER_USER_DEMOGRAPHICS = {
'username': 'otheruser',
'password': 'otherpw',
'firstname': 'Other',
'lastname': 'User',
'email': 'other@example.com',
'country': 'US',
'affiliation': 'Sample Organization',
'usertype': 'other',
'demo': {
'age': '18+'
},
'gid': None,
'rid': None
}
def register_test_accounts():
"""
Register an admin, teacher, and student account with known demographics.
Intended to be used, if needed, in conjunction with clear_db()
to set up a clean environment for each test.
"""
with app().app_context():
api.user.add_user(ADMIN_DEMOGRAPHICS)
api.user.add_user(TEACHER_DEMOGRAPHICS)
api.user.add_user(STUDENT_DEMOGRAPHICS)
api.user.add_user(STUDENT_2_DEMOGRAPHICS)
api.user.add_user(OTHER_USER_DEMOGRAPHICS)
sample_shellserver_publish_output = r'''
{
"problems": [
{
"name": "ECB 1",
"category": "Cryptography",
"description": "There is a crypto service running at {{server}}:{{port}}. We were able to recover the source code, which you can download at {{url_for(\"ecb.py\")}}.",
"hints": [],
"walkthrough": "Let me google that for you.",
"score": 70,
"author": "Tim Becker",
"organization": "ForAllSecure",
"event": "Sample",
"pip_requirements": [
"pycrypto"
],
"pip_python_version": "3",
"unique_name": "ecb-1-b06174a",
"instances": [
{
"user": "ecb-1_0",
"deployment_directory": "/problems/ecb-1_0_73a0108a98d2862a86f4b71534aaf7c3",
"service": "ecb-1_0",
"socket": null,
"server": "192.168.2.3",
"description": "There is a crypto service running at 192.168.2.3:46981. We were able to recover the source code, which you can download at <a href='//192.168.2.3/static/fd59acc6b8d2359d48bd939a08ecb8ab/ecb.py'>ecb.py</a>.",
"flag": "49e56ea9bf2e2b60ba9af034b5b2a5fd",
"flag_sha1": "77cec418714d6eb0dc48afa6d6f38200402a83c0",
"instance_number": 0,
"should_symlink": false,
"files": [
{
"path": "flag",
"permissions": 288,
"user": null,
"group": null
},
{
"path": "key",
"permissions": 288,
"user": null,
"group": null
},
{
"path": "ecb.py",
"permissions": 1517,
"user": null,
"group": null
},
{
"path": "xinet_startup.sh",
"permissions": 1517,
"user": null,
"group": null
}
],
"port": 46981
},
{
"user": "ecb-1_1",
"deployment_directory": "/problems/ecb-1_1_83b2ed9a1806c86219347bc4982a66de",
"service": "ecb-1_1",
"socket": null,
"server": "192.168.2.3",
"description": "There is a crypto service running at 192.168.2.3:21953. We were able to recover the source code, which you can download at <a href='//192.168.2.3/static/beb9874a05a1810fa8c9d79152ace1b3/ecb.py'>ecb.py</a>.",
"flag": "85a32ccd05fa30e0efd8da555c1a101a",
"flag_sha1": "f28581a86561c885152f7622200057585787c063",
"instance_number": 1,
"should_symlink": false,
"files": [
{
"path": "flag",
"permissions": 288,
"user": null,
"group": null
},
{
"path": "key",
"permissions": 288,
"user": null,
"group": null
},
{
"path": "ecb.py",
"permissions": 1517,
"user": null,
"group": null
},
{
"path": "xinet_startup.sh",
"permissions": 1517,
"user": null,
"group": null
}
],
"port": 21953
},
{
"user": "ecb-1_2",
"deployment_directory": "/problems/ecb-1_2_1998c2cc0f0d17ae54170200f5478b7f",
"service": "ecb-1_2",
"socket": null,
"server": "192.168.2.3",
"description": "There is a crypto service running at 192.168.2.3:17648. We were able to recover the source code, which you can download at <a href='//192.168.2.3/static/19e863cba0bf14ad676e4b4799eacc72/ecb.py'>ecb.py</a>.",
"flag": "f76d2f6b885255450ed2f7307d96e28e",
"flag_sha1": "43cf6f1dab026cf2100e2f663509512416112219",
"instance_number": 2,
"should_symlink": false,
"files": [
{
"path": "flag",
"permissions": 288,
"user": null,
"group": null
},
{
"path": "key",
"permissions": 288,
"user": null,
"group": null
},
{
"path": "ecb.py",
"permissions": 1517,
"user": null,
"group": null
},
{
"path": "xinet_startup.sh",
"permissions": 1517,
"user": null,
"group": null
}
],
"port": 17648
}
],
"sanitized_name": "ecb-1"
},
{
"name": "SQL Injection 1",
"category": "Web Exploitation",
"pkg_dependencies": [
"php7.2-sqlite3"
],
"description": "There is a website running at http://{{server}}:{{port}}. Try to see if you can login!",
"score": 40,
"hints": [],
"author": "Tim Becker",
"organization": "ForAllSecure",
"event": "Sample",
"unique_name": "sql-injection-1-0c436d0",
"instances": [
{
"user": "sql-injection-1_0",
"deployment_directory": "/problems/sql-injection-1_0_9e114b246c48eb158b16525f71ae2a00",
"service": "sql-injection-1_0",
"socket": null,
"server": "192.168.2.3",
"description": "There is a website running at http://192.168.2.3:46984. Try to see if you can login!",
"flag": "9ac0a74de6bced3cdce8e7fd466f32d0",
"flag_sha1": "958416d52940e4948eca8d9fb1eca21e4cf7eda1",
"instance_number": 0,
"should_symlink": false,
"files": [
{
"path": "webroot/index.html",
"permissions": 436,
"user": null,
"group": null
},
{
"path": "webroot/login.php",
"permissions": 436,
"user": null,
"group": null
},
{
"path": "webroot/login.phps",
"permissions": 436,
"user": null,
"group": null
},
{
"path": "webroot/config.php",
"permissions": 436,
"user": null,
"group": null
},
{
"path": "users.db",
"permissions": 288,
"user": null,
"group": null
},
{
"path": "xinet_startup.sh",
"permissions": 1517,
"user": null,
"group": null
}
],
"port": 46984
},
{
"user": "sql-injection-1_1",
"deployment_directory": "/problems/sql-injection-1_1_10a4b1cdfd3a0f78d0d8b9759e6d69c5",
"service": "sql-injection-1_1",
"socket": null,
"server": "192.168.2.3",
"description": "There is a website running at http://192.168.2.3:21955. Try to see if you can login!",
"flag": "28054fef0f362256c78025f82e6572c3",
"flag_sha1": "f57fa5d3861c22a657eecafe30a43bd4ad7a4a2a",
"instance_number": 1,
"should_symlink": false,
"files": [
{
"path": "webroot/index.html",
"permissions": 436,
"user": null,
"group": null
},
{
"path": "webroot/login.php",
"permissions": 436,
"user": null,
"group": null
},
{
"path": "webroot/login.phps",
"permissions": 436,
"user": null,
"group": null
},
{
"path": "webroot/config.php",
"permissions": 436,
"user": null,
"group": null
},
{
"path": "users.db",
"permissions": 288,
"user": null,
"group": null
},
{
"path": "xinet_startup.sh",
"permissions": 1517,
"user": null,
"group": null
},
{
"path": "xinet_startup.sh",
"permissions": 1517,
"user": null,
"group": null
}
],
"port": 21955
},
{
"user": "sql-injection-1_2",
"deployment_directory": "/problems/sql-injection-1_2_57a103ad26a005f69b4332e62d611372",
"service": "sql-injection-1_2",
"socket": null,
"server": "192.168.2.3",
"description": "There is a website running at http://192.168.2.3:17649. Try to see if you can login!",
"flag": "6ed19af4c4540d444ae08735aa5664af",
"flag_sha1": "19bbc88ca231ddfde8063acdda75a92b1e6fd993",
"instance_number": 2,
"should_symlink": false,
"files": [
{
"path": "webroot/index.html",
"permissions": 436,
"user": null,
"group": null
},
{
"path": "webroot/login.php",
"permissions": 436,
"user": null,
"group": null
},
{
"path": "webroot/login.phps",
"permissions": 436,
"user": null,
"group": null
},
{
"path": "webroot/config.php",
"permissions": 436,
"user": null,
"group": null
},
{
"path": "users.db",
"permissions": 288,
"user": null,
"group": null
},
{
"path": "xinet_startup.sh",
"permissions": 1517,
"user": null,
"group": null
},
{
"path": "xinet_startup.sh",
"permissions": 1517,
"user": null,
"group": null
},
{
"path": "xinet_startup.sh",
"permissions": 1517,
"user": null,
"group": null
}
],
"port": 17649
}
],
"sanitized_name": "sql-injection-1"
},
{
"name": "Buffer Overflow 1",
"category": "Binary Exploitation",
"description": "Exploit the {{url_for(\"vuln\", display=\"Buffer Overflow\")}} found here: {{directory}}.",
"score": 50,
"hints": [
"This is a classic buffer overflow with no modern protections."
],
"walkthrough": "PROTIP: Find the correct answer to get the points.",
"author": "Tim Becker",
"organization": "ForAllSecure",
"event": "Sample",
"unique_name": "buffer-overflow-1-35e6d9d",
"instances": [
{
"user": "buffer-overflow-1_0",
"deployment_directory": "/problems/buffer-overflow-1_0_bab40cd8ebd7845e1c4c2951c6f82e1f",
"service": null,
"socket": null,
"server": "192.168.2.3",
"description": "Exploit the <a href='//192.168.2.3/static/bd08ee41f495f8bff378c13157d0f511/vuln'>Buffer Overflow</a> found here: /problems/buffer-overflow-1_0_bab40cd8ebd7845e1c4c2951c6f82e1f.",
"flag": "638608c79eca2165e7b241ff365df05b",
"flag_sha1": "4b97abef055a11ec19c14622eb31eb1168d98aca",
"instance_number": 0,
"should_symlink": true,
"files": [
{
"path": "flag.txt",
"permissions": 288,
"user": null,
"group": null
},
{
"path": "vuln",
"permissions": 1517,
"user": null,
"group": null
}
]
},
{
"user": "buffer-overflow-1_1",
"deployment_directory": "/problems/buffer-overflow-1_1_f49b6bd5da29513569bd87f98a934fa6",
"service": null,
"socket": null,
"server": "192.168.2.3",
"description": "Exploit the <a href='//192.168.2.3/static/c95410042007bb17f49b891a2a87afb2/vuln'>Buffer Overflow</a> found here: /problems/buffer-overflow-1_1_f49b6bd5da29513569bd87f98a934fa6.",
"flag": "35013564b97b80d4fd3f2be45e5836ff",
"flag_sha1": "5675d2d5819084d4203c1ef314239527074938a9",
"instance_number": 1,
"should_symlink": true,
"files": [
{
"path": "flag.txt",
"permissions": 288,
"user": null,
"group": null
},
{
"path": "vuln",
"permissions": 1517,
"user": null,
"group": null
}
]
},
{
"user": "buffer-overflow-1_2",
"deployment_directory": "/problems/buffer-overflow-1_2_6c4daed04928f80dd29290060827be61",
"service": null,
"socket": null,
"server": "192.168.2.3",
"description": "Exploit the <a href='//192.168.2.3/static/dbeb4d34945e752ea988dcdb4454f57d/vuln'>Buffer Overflow</a> found here: /problems/buffer-overflow-1_2_6c4daed04928f80dd29290060827be61.",
"flag": "8dfabcb5c4a18d03ad5ecea19eef27a6",
"flag_sha1": "aef4789685665a1bf4994d62ef10941dbce5647a",
"instance_number": 2,
"should_symlink": true,
"files": [
{
"path": "flag.txt",
"permissions": 288,
"user": null,
"group": null
},
{
"path": "vuln",
"permissions": 1517,
"user": null,
"group": null
}
]
}
],
"sanitized_name": "buffer-overflow-1"
}
],
"bundles": [
{
"name": "Challenge Sampler",
"author": "Christopher Ganas",
"description": "Dependency weightmap for the example challenges provided in the picoCTF-Problems repository.",
"dependencies": {
"ecb-1-b06174a": {
"threshold": 1,
"weightmap": {
"buffer-overflow-1-35e6d9d": 1
}
},
"sql-injection-1-0c436d0": {
"threshold": 1,
"weightmap": {
"buffer-overflow-1-35e6d9d": 1,
"ecb-1-b06174a": 1
}
}
}
}
],
"sid": "728f36885f7c4686805593b9e4988c30"
}
'''
problems_endpoint_response = [{'name': 'SQL Injection 1', 'category': 'Web Exploitation', 'description': 'There is a website running at http://192.168.2.3:17648. Try to see if you can login!', 'score': 40, 'hints': [], 'author': 'Tim Becker', 'organization': 'ForAllSecure', 'sanitized_name': 'sql-injection-1', 'disabled': False, 'pid': '4508167aa0b219fd9d131551d10aa58e', 'solves': 0, 'socket': None, 'server': '192.168.2.3', 'port': 17648, 'server_number': 1, 'solved': False, 'unlocked': True}, {'name': 'Buffer Overflow 1', 'category': 'Binary Exploitation', 'description': "Exploit the <a href='//192.168.2.3/static/bd08ee41f495f8bff378c13157d0f511/vuln'>Buffer Overflow</a> found here: /problems/buffer-overflow-1_0_bab40cd8ebd7845e1c4c2951c6f82e1f.", 'score': 50, 'hints': ['This is a classic buffer overflow with no modern protections.'], 'author': 'Tim Becker', 'organization': 'ForAllSecure', 'sanitized_name': 'buffer-overflow-1', 'disabled': False, 'pid': '1bef644c399e10a3f35fecdbf590bd0c', 'solves': 0, 'socket': None, 'server': '192.168.2.3', 'server_number': 1, 'solved': False, 'unlocked': True}, {'name': 'ECB 1', 'category': 'Cryptography', 'description': "There is a crypto service running at 192.168.2.3:21953. We were able to recover the source code, which you can download at <a href='//192.168.2.3/static/beb9874a05a1810fa8c9d79152ace1b3/ecb.py'>ecb.py</a>.", 'hints': [], 'score': 70, 'author': 'Tim Becker', 'organization': 'ForAllSecure', 'sanitized_name': 'ecb-1', 'disabled': False, 'pid': '7afda419da96e8471b49df9c2009e2ef', 'solves': 0, 'socket': None, 'server': '192.168.2.3', 'port': 21953, 'server_number': 1, 'solved': False, 'unlocked': True}]
def load_sample_problems():
"""Load the sample problems and bundle into the DB."""
with app().app_context():
db = get_conn()
db.shell_servers.insert_one({
'sid': '728f36885f7c4686805593b9e4988c30',
'name': 'Test shell server',
'host': 'testing.picoctf.com',
'port': '22',
'username': 'username',
'password': 'password',
'protocol': 'HTTPS',
'server_number': 1
})
api.problem.load_published(
json.loads(sample_shellserver_publish_output)
)
def enable_sample_problems():
"""Enable any sample problems in the DB."""
db = get_conn()
db.problems.update_many({}, {'$set': {'disabled': False}})
def ensure_within_competition():
"""Adjust the competition times so that protected methods are callable."""
db = get_conn()
db.settings.update_one({}, {'$set': {
'start_time': datetime.datetime.utcnow() - datetime.timedelta(1),
'end_time': datetime.datetime.utcnow() + datetime.timedelta(1),
}})
def ensure_before_competition():
"""Adjust the competition times so that @block_before_competition fails."""
db = get_conn()
db.settings.update_one({}, {'$set': {
'start_time': datetime.datetime.utcnow() + datetime.timedelta(11),
'end_time': datetime.datetime.utcnow() + datetime.timedelta(10),
}})
def ensure_after_competition():
"""Adjust the competition times so that @block_before_competition fails."""
db = get_conn()
db.settings.update_one({}, {'$set': {
'start_time': datetime.datetime.utcnow() - datetime.timedelta(11),
'end_time': datetime.datetime.utcnow() - datetime.timedelta(10),
}})
def get_problem_key(pid, team_name):
"""Get the flag for a given pid and team name."""
db = get_conn()
assigned_instance_id = db.teams.find_one({
'team_name': team_name
})['instances'][pid]
problem_instances = db.problems.find_one({
'pid': pid
})['instances']
assigned_instance = None
for instance in problem_instances:
if instance['iid'] == assigned_instance_id:
assigned_instance = instance
break
return assigned_instance['flag']
| 34.621969 | 1,680 | 0.486279 |
0212be2b426e881f46ce9b5faa0a4d6cd2b0e659 | 11 | py | Python | py2codes/py2_exec.py | rhabacker/lib2to3import | 36102fa844bf18234053d96f6b9b90f5c6068e87 | [
"MIT"
] | null | null | null | py2codes/py2_exec.py | rhabacker/lib2to3import | 36102fa844bf18234053d96f6b9b90f5c6068e87 | [
"MIT"
] | 1 | 2020-11-14T01:39:18.000Z | 2020-11-17T07:54:28.000Z | py2codes/py2_exec.py | rhabacker/lib2to3import | 36102fa844bf18234053d96f6b9b90f5c6068e87 | [
"MIT"
] | 2 | 2019-08-12T09:58:05.000Z | 2021-03-18T17:13:06.000Z | exec "123"
| 5.5 | 10 | 0.636364 |
02154f47c33721ccd238e5aa1dcf948b5ec4704f | 1,308 | py | Python | Tools/RaiseCheck.py | 17320692835RGF/buptoj | 3d1e4719d757b4f0199e4451be7c0bee28e7c3ca | [
"MIT"
] | null | null | null | Tools/RaiseCheck.py | 17320692835RGF/buptoj | 3d1e4719d757b4f0199e4451be7c0bee28e7c3ca | [
"MIT"
] | null | null | null | Tools/RaiseCheck.py | 17320692835RGF/buptoj | 3d1e4719d757b4f0199e4451be7c0bee28e7c3ca | [
"MIT"
] | null | null | null |
import MySQLdb
from queue import Queue
import socket
import json
from time import sleep
import threading
import os
queue = Queue() #
myjsonfile = open("./setting.json", 'r')
judgerjson = json.loads(myjsonfile.read())
if os.environ.get("DB_USER"):
judgerjson["db_ip"] = os.environ.get("DB_HOST")
judgerjson["db_pass"] = os.environ.get("DB_PASSWORD")
judgerjson["db_user"] = os.environ.get("DB_USER")
judgerjson["db_port"] = os.environ.get("DB_PORT")
try:
db = MySQLdb.connect(judgerjson["db_ip"], judgerjson["db_user"], judgerjson["db_pass"],
judgerjson["db_database"], int(judgerjson["db_port"]), charset='utf8')
except Exception as e:
print(e)
exit(1)
cursor = db.cursor()
cursor.execute("SELECT user, code from judgestatus_judgestatus")
data = cursor.fetchall()
raisenum = {}
for d in data:
id = str(d[0])
code = str(d[1])
raisenum[id] = 0
for d in data:
id = str(d[0])
code = str(d[1])
raisenum[id] = max(raisenum[id], code.count("raise"))
li = sorted(raisenum.items(), key=lambda item:item[1],reverse=True)
file = open("raisenum.txt", "w")
for l in li:
file.write(l[0]+" "+str(l[1])+'\n')
print(l[0]+" "+str(l[1]))
| 22.169492 | 96 | 0.603211 |
0216b8ad609381ab0fb91a808c2538b44b5d722d | 1,557 | py | Python | unit_test.py | LSTM-Kirigaya/MsnEnvironment | 29c6e02525c7671f304d0f9d7689942509f12a16 | [
"MIT"
] | null | null | null | unit_test.py | LSTM-Kirigaya/MsnEnvironment | 29c6e02525c7671f304d0f9d7689942509f12a16 | [
"MIT"
] | null | null | null | unit_test.py | LSTM-Kirigaya/MsnEnvironment | 29c6e02525c7671f304d0f9d7689942509f12a16 | [
"MIT"
] | null | null | null | from env import MsnDiscrete, MaplessNaviEnv
from robot_utils import *
from robot_utils.log import msn_debug
from robot_utils.scene import *
from env import *
from collections import Counter
MAX_FORCE = 10.
TARGET_VELOCITY = 5.
MULTIPLY = 2.0
# keyboard_control()
u_MsnDiscrete() | 28.833333 | 126 | 0.705202 |
0218bf8ae5e0f91bee4226c0b79fa035f5a60a3c | 12,358 | py | Python | meta-refkit-core/lib/ostree/ostreeupdate.py | kraj/intel-iot-refkit | 04cd5afec0c41deeb5e1a48b43a0a31e708295c1 | [
"MIT"
] | 36 | 2017-02-20T04:04:28.000Z | 2022-02-17T05:36:33.000Z | meta-refkit-core/lib/ostree/ostreeupdate.py | kraj/intel-iot-refkit | 04cd5afec0c41deeb5e1a48b43a0a31e708295c1 | [
"MIT"
] | 284 | 2017-02-06T08:51:52.000Z | 2021-11-03T16:52:16.000Z | meta-refkit-core/lib/ostree/ostreeupdate.py | kraj/intel-iot-refkit | 04cd5afec0c41deeb5e1a48b43a0a31e708295c1 | [
"MIT"
] | 65 | 2017-02-03T12:36:16.000Z | 2021-02-18T11:00:46.000Z | import bb
import oe.path
import glob
import hashlib
import os.path
import shutil
import string
import subprocess
VARIABLES = (
'IMAGE_ROOTFS',
'OSTREE_BRANCHNAME',
'OSTREE_COMMIT_SUBJECT',
'OSTREE_REPO',
'OSTREE_GPGDIR',
'OSTREE_GPGID',
'OSTREE_OS',
'OSTREE_REMOTE',
'OSTREE_BARE',
'OSTREE_ROOTFS',
'OSTREE_SYSROOT',
)
| 42.177474 | 149 | 0.606732 |
021a272ec30f97420b7269bd3ee1d988857ff0cb | 123 | py | Python | returns-the- value-to-the-variable.py | fatihwin-yt/a-Python-Tutorial-of-2021 | 7d2110f80efdfa79437bf64f8edcd08ec3d61926 | [
"MIT"
] | 1 | 2021-03-29T02:29:58.000Z | 2021-03-29T02:29:58.000Z | returns-the- value-to-the-variable.py | fatihwin-yt/a-Python-Tutorial-of-2021 | 7d2110f80efdfa79437bf64f8edcd08ec3d61926 | [
"MIT"
] | null | null | null | returns-the- value-to-the-variable.py | fatihwin-yt/a-Python-Tutorial-of-2021 | 7d2110f80efdfa79437bf64f8edcd08ec3d61926 | [
"MIT"
] | 1 | 2021-03-27T15:00:06.000Z | 2021-03-27T15:00:06.000Z | #returns the value to the variable #
x = 900
print(x)
#print will take the argument x as the value in the variable #
| 20.5 | 63 | 0.699187 |
021a57faf00fc6d4266f3268c12b51f08834cc6c | 1,453 | py | Python | app.py | alvaropp/interactive-fantasy-map | b75ebc734970790bc5779865ab5e786e50250709 | [
"MIT"
] | 4 | 2021-02-11T03:23:40.000Z | 2022-02-13T01:56:58.000Z | app.py | alvaropp/interactive-fantasy-map | b75ebc734970790bc5779865ab5e786e50250709 | [
"MIT"
] | null | null | null | app.py | alvaropp/interactive-fantasy-map | b75ebc734970790bc5779865ab5e786e50250709 | [
"MIT"
] | null | null | null | from glob import glob
from flask import flash, Flask, Markup, render_template, redirect, request, send_from_directory
from form import MapForm
from process_new_map import create_map_from_form
app = Flask(__name__)
with open("secret.txt", "r") as secret_f:
app.config["SECRET_KEY"] = secret_f.read()
if __name__ == "__main__":
app.run(debug=True)
| 29.653061 | 105 | 0.692361 |
021afdb076c4754aa3ba63a750975318ad4eba13 | 4,121 | py | Python | monai/deploy/core/execution_context.py | jlvahldiek/monai-deploy-app-sdk | 050aeabec581067a11566f59a2970b075d36ae7c | [
"Apache-2.0"
] | 28 | 2021-09-17T18:16:42.000Z | 2022-03-31T16:32:36.000Z | monai/deploy/core/execution_context.py | jlvahldiek/monai-deploy-app-sdk | 050aeabec581067a11566f59a2970b075d36ae7c | [
"Apache-2.0"
] | 109 | 2021-09-17T18:34:31.000Z | 2022-03-31T21:04:35.000Z | monai/deploy/core/execution_context.py | jlvahldiek/monai-deploy-app-sdk | 050aeabec581067a11566f59a2970b075d36ae7c | [
"Apache-2.0"
] | 11 | 2021-09-17T20:23:31.000Z | 2022-03-29T08:55:19.000Z | # Copyright 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from monai.deploy.core.domain.datapath import NamedDataPath
# To avoid "Cannot resolve forward reference" error
# : https://github.com/agronholm/sphinx-autodoc-typehints#dealing-with-circular-imports
from . import operator
from .datastores import Datastore, MemoryDatastore
from .io_context import InputContext, OutputContext
from .models import Model
| 32.448819 | 106 | 0.674351 |
021b5b2946a725db8a4879a92f48d89c65c21d97 | 11,698 | py | Python | LeetCode-All-Solution/Python3/LC-1728-Cat-and-Mouse-II.py | YuweiYin/Algorithm_YuweiYin | 28648fac59c5a4e3c907978cbd1b3e662ba18fd5 | [
"MIT"
] | null | null | null | LeetCode-All-Solution/Python3/LC-1728-Cat-and-Mouse-II.py | YuweiYin/Algorithm_YuweiYin | 28648fac59c5a4e3c907978cbd1b3e662ba18fd5 | [
"MIT"
] | null | null | null | LeetCode-All-Solution/Python3/LC-1728-Cat-and-Mouse-II.py | YuweiYin/Algorithm_YuweiYin | 28648fac59c5a4e3c907978cbd1b3e662ba18fd5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""=================================================================
@Project : Algorithm_YuweiYin/LeetCode-All-Solution/Python3
@File : LC-1728-Cat-and-Mouse-II.py
@Author : [YuweiYin](https://github.com/YuweiYin)
@Date : 2022-05-10
=================================================================="""
import sys
import time
from typing import List, Tuple
import collections
"""
LeetCode - 1728 - (Hard) - Cat and Mouse II
https://leetcode.com/problems/cat-and-mouse-ii/
Description:
A game is played by a cat and a mouse named Cat and Mouse.
The environment is represented by a grid of size rows x cols,
where each element is a wall, floor, player (Cat, Mouse), or food.
Players are represented by the characters 'C'(Cat),'M'(Mouse).
Floors are represented by the character '.' and can be walked on.
Walls are represented by the character '#' and cannot be walked on.
Food is represented by the character 'F' and can be walked on.
There is only one of each character 'C', 'M', and 'F' in grid.
Mouse and Cat play according to the following rules:
Mouse moves first, then they take turns to move.
During each turn, Cat and Mouse can jump in one of the four directions (left, right, up, down).
They cannot jump over the wall nor outside of the grid.
catJump, mouseJump are the maximum lengths Cat and Mouse can jump at a time, respectively.
Cat and Mouse can jump less than the maximum length.
Staying in the same position is allowed.
Mouse can jump over Cat.
The game can end in 4 ways:
If Cat occupies the same position as Mouse, Cat wins.
If Cat reaches the food first, Cat wins.
If Mouse reaches the food first, Mouse wins.
If Mouse cannot get to the food within 1000 turns, Cat wins.
Given a rows x cols matrix grid and two integers catJump and mouseJump,
return true if Mouse can win the game if both Cat and Mouse play optimally, otherwise return false.
Example 1:
Input: grid = ["####F","#C...","M...."], catJump = 1, mouseJump = 2
Output: true
Explanation: Cat cannot catch Mouse on its turn nor can it get the food before Mouse.
Example 2:
Input: grid = ["M.C...F"], catJump = 1, mouseJump = 4
Output: true
Example 3:
Input: grid = ["M.C...F"], catJump = 1, mouseJump = 3
Output: false
Constraints:
rows == grid.length
cols = grid[i].length
1 <= rows, cols <= 8
grid[i][j] consist only of characters 'C', 'M', 'F', '.', and '#'.
There is only one of each character 'C', 'M', and 'F' in grid.
1 <= catJump, mouseJump <= 8
"""
if __name__ == "__main__":
sys.exit(main())
| 44.310606 | 116 | 0.566422 |
021c36744a33f4725dc24d93c0aa09acf81e97bf | 2,193 | py | Python | tictac/tictac/cli.py | SteveDMurphy/tic_tac_go | 7e80dc1ec6fbeceb3c9879cee7fb32b7ecfe37a7 | [
"MIT"
] | null | null | null | tictac/tictac/cli.py | SteveDMurphy/tic_tac_go | 7e80dc1ec6fbeceb3c9879cee7fb32b7ecfe37a7 | [
"MIT"
] | null | null | null | tictac/tictac/cli.py | SteveDMurphy/tic_tac_go | 7e80dc1ec6fbeceb3c9879cee7fb32b7ecfe37a7 | [
"MIT"
] | null | null | null | import click
from random import randrange
from tictac import Tictac
if __name__ == "__main__":
tictac()
| 33.227273 | 96 | 0.645691 |
021d46262a81bc3bd29354a1c4c85f1ce3571b25 | 4,230 | py | Python | matchId.py | terryhahm/ARAM | bbaa6446aec6ad7141d492aef174832e627c7b74 | [
"MIT"
] | null | null | null | matchId.py | terryhahm/ARAM | bbaa6446aec6ad7141d492aef174832e627c7b74 | [
"MIT"
] | null | null | null | matchId.py | terryhahm/ARAM | bbaa6446aec6ad7141d492aef174832e627c7b74 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import riotConstant
import time
import requests
| 37.433628 | 108 | 0.607092 |
021d5769d36b572a0f2addec694597fefa3cfa6f | 158 | py | Python | Backend/order/urls.py | Bhavya0020/Readopolis | a0053e4fae97dc8291b50c746f3dc3e6b454ad95 | [
"MIT"
] | null | null | null | Backend/order/urls.py | Bhavya0020/Readopolis | a0053e4fae97dc8291b50c746f3dc3e6b454ad95 | [
"MIT"
] | null | null | null | Backend/order/urls.py | Bhavya0020/Readopolis | a0053e4fae97dc8291b50c746f3dc3e6b454ad95 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('checkout/', views.checkout),
path('orders/', views.OrdersList.as_view()),
] | 19.75 | 50 | 0.677215 |
02223351c3f6f455c742ce52e04a38d560dc3455 | 299 | py | Python | src/z3c/saconfig/__init__.py | zopefoundation/z3c.saconfig | 69a32e7f7617ec4a1f9667d673a1ddc00aff59c2 | [
"ZPL-2.1"
] | 2 | 2016-03-12T14:22:23.000Z | 2019-05-22T04:18:26.000Z | src/z3c/saconfig/__init__.py | zopefoundation/z3c.saconfig | 69a32e7f7617ec4a1f9667d673a1ddc00aff59c2 | [
"ZPL-2.1"
] | 13 | 2015-05-05T12:27:48.000Z | 2021-05-20T11:11:49.000Z | src/z3c/saconfig/__init__.py | zopefoundation/z3c.saconfig | 69a32e7f7617ec4a1f9667d673a1ddc00aff59c2 | [
"ZPL-2.1"
] | 4 | 2015-05-04T12:18:31.000Z | 2019-11-18T09:47:31.000Z | from z3c.saconfig.scopedsession import Session, named_scoped_session
from z3c.saconfig.utility import (
GloballyScopedSession, SiteScopedSession, EngineFactory)
__all__ = [
'Session',
'named_scoped_session',
'GloballyScopedSession',
'SiteScopedSession',
'EngineFactory',
]
| 23 | 68 | 0.752508 |
0223c05bd579183b627da44b67aca37eba1114e5 | 557 | py | Python | src/triage/experiments/singlethreaded.py | josephbajor/triage_NN | cbaee6e5a06e597c91fec372717d89a2b5f34fa5 | [
"MIT"
] | 160 | 2017-06-13T09:59:59.000Z | 2022-03-21T22:00:35.000Z | src/triage/experiments/singlethreaded.py | josephbajor/triage_NN | cbaee6e5a06e597c91fec372717d89a2b5f34fa5 | [
"MIT"
] | 803 | 2016-10-21T19:44:02.000Z | 2022-03-29T00:02:33.000Z | src/triage/experiments/singlethreaded.py | josephbajor/triage_NN | cbaee6e5a06e597c91fec372717d89a2b5f34fa5 | [
"MIT"
] | 59 | 2017-01-31T22:10:22.000Z | 2022-03-19T12:35:03.000Z | from triage.experiments import ExperimentBase
| 34.8125 | 66 | 0.800718 |
022635491f2d2bfe0024464d83f72d0ff2d7769e | 11,374 | py | Python | Webspider.py | radiantbk/webspider | 62a9c71f8f3f39e5e07e0fb68682fc05a83edd5b | [
"MIT"
] | 1 | 2019-11-09T01:36:39.000Z | 2019-11-09T01:36:39.000Z | Webspider.py | radiantbk/webspider | 62a9c71f8f3f39e5e07e0fb68682fc05a83edd5b | [
"MIT"
] | null | null | null | Webspider.py | radiantbk/webspider | 62a9c71f8f3f39e5e07e0fb68682fc05a83edd5b | [
"MIT"
] | null | null | null |
import re
import os
| 31.076503 | 164 | 0.481976 |
022a8bafe44b23b7f0a6af1c6947a769d26527f0 | 4,909 | py | Python | QScrollAreaImages.py | ErwinSchotman/QT5-QScrollAreaImages | 053e06a3ff67311f753712902902c43b1f011d30 | [
"MIT"
] | 1 | 2019-11-29T00:37:31.000Z | 2019-11-29T00:37:31.000Z | QScrollAreaImages.py | ErwinSchotman/QT5-QScrollAreaImages | 053e06a3ff67311f753712902902c43b1f011d30 | [
"MIT"
] | null | null | null | QScrollAreaImages.py | ErwinSchotman/QT5-QScrollAreaImages | 053e06a3ff67311f753712902902c43b1f011d30 | [
"MIT"
] | null | null | null | #
# Copyright (c) 2019 Erwin Schotman
#
# Licensed under MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH
# THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from PyQt5.QtWidgets import QScrollArea, QWidget, QGridLayout
from QClickableImage import *
from PyQt5.QtCore import QRect
#=======================================================================================================================
| 48.127451 | 121 | 0.534528 |
022b9e68ba47723e01a95addbedb6c10c435b96e | 30,434 | py | Python | pyrax/fakes.py | jfreeman812/pyrax | dba18df916dcc3a9f539bd9c609b1bb68f3d9203 | [
"Apache-2.0"
] | null | null | null | pyrax/fakes.py | jfreeman812/pyrax | dba18df916dcc3a9f539bd9c609b1bb68f3d9203 | [
"Apache-2.0"
] | 1 | 2019-11-06T20:21:59.000Z | 2019-11-06T20:21:59.000Z | pyrax/fakes.py | jfreeman812/pyrax | dba18df916dcc3a9f539bd9c609b1bb68f3d9203 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import json
import os
import random
import time
import uuid
import pyrax
from pyrax.autoscale import AutoScaleClient
from pyrax.autoscale import AutoScalePolicy
from pyrax.autoscale import AutoScaleWebhook
from pyrax.autoscale import ScalingGroup
from pyrax.autoscale import ScalingGroupManager
from pyrax.client import BaseClient
from pyrax.clouddatabases import CloudDatabaseClient
from pyrax.clouddatabases import CloudDatabaseDatabaseManager
from pyrax.clouddatabases import CloudDatabaseInstance
from pyrax.clouddatabases import CloudDatabaseManager
from pyrax.clouddatabases import CloudDatabaseUser
from pyrax.clouddatabases import CloudDatabaseUserManager
from pyrax.clouddatabases import CloudDatabaseVolume
from pyrax.cloudblockstorage import CloudBlockStorageClient
from pyrax.cloudblockstorage import CloudBlockStorageManager
from pyrax.cloudblockstorage import CloudBlockStorageSnapshot
from pyrax.cloudblockstorage import CloudBlockStorageSnapshotManager
from pyrax.cloudblockstorage import CloudBlockStorageVolume
from pyrax.cloudloadbalancers import CloudLoadBalancer
from pyrax.cloudloadbalancers import CloudLoadBalancerManager
from pyrax.cloudloadbalancers import CloudLoadBalancerClient
from pyrax.cloudloadbalancers import Node
from pyrax.cloudloadbalancers import VirtualIP
from pyrax.clouddns import CloudDNSClient
from pyrax.clouddns import CloudDNSDomain
from pyrax.clouddns import CloudDNSManager
from pyrax.clouddns import CloudDNSRecord
from pyrax.clouddns import CloudDNSPTRRecord
from pyrax.cloudnetworks import CloudNetwork
from pyrax.cloudnetworks import CloudNetworkClient
from pyrax.cloudmonitoring import CloudMonitorClient
from pyrax.cloudmonitoring import CloudMonitorEntity
from pyrax.cloudmonitoring import CloudMonitorCheck
from pyrax.cloudmonitoring import CloudMonitorNotification
from pyrax.image import Image
from pyrax.image import ImageClient
from pyrax.image import ImageManager
from pyrax.image import ImageMemberManager
from pyrax.image import ImageTagManager
from pyrax.object_storage import BulkDeleter
from pyrax.object_storage import Container
from pyrax.object_storage import ContainerManager
from pyrax.object_storage import FolderUploader
from pyrax.object_storage import StorageClient
from pyrax.object_storage import StorageObject
from pyrax.object_storage import StorageObjectManager
from pyrax.queueing import Queue
from pyrax.queueing import QueueClaim
from pyrax.queueing import QueueMessage
from pyrax.queueing import QueueClient
from pyrax.queueing import QueueManager
import pyrax.exceptions as exc
from pyrax.base_identity import BaseIdentity
from pyrax.base_identity import Endpoint
from pyrax.base_identity import Service
from pyrax.identity.rax_identity import RaxIdentity
from pyrax.identity.keystone_identity import KeystoneIdentity
import pyrax.utils as utils
example_uri = "http://example.com"
class FakeIterator(utils.ResultsIterator):
class FakeClient(object):
user_agent = "Fake"
USER_AGENT = "Fake"
class FakeStorageClient(StorageClient):
class FakeContainerManager(ContainerManager):
class FakeContainer(Container):
class FakeStorageObjectManager(StorageObjectManager):
class FakeStorageObject(StorageObject):
def __init__(self, manager, name=None, total_bytes=None, content_type=None,
last_modified=None, etag=None, attdict=None):
"""
The object can either be initialized with individual params, or by
passing the dict that is returned by swiftclient.
"""
self.manager = manager
self.name = name
self.bytes = total_bytes or 0
self.content_type = content_type
self.last_modified = last_modified
self.hash = etag
if attdict:
self._read_attdict(attdict)
fake_attdict = {"name": "fake",
"content-length": 42,
"content-type": "text/html",
"etag": "ABC",
"last-modified": "Tue, 01 Jan 2013 01:02:03 GMT",
}
fake_config_file = """[settings]
identity_type = rackspace
keyring_username =
region = FAKE
custom_user_agent = FAKE
http_debug =
"""
# This will handle both singular and plural responses.
fake_identity_user_response = {
"users": [{"name": "fake", "id": "fake"},
{"name": "faker", "id": "faker"}],
"user": {"name": "fake", "id": "fake"},
"roles": [{u'description': 'User Admin Role.',
'id': '3',
'name': 'identity:user-admin'}],
}
fake_identity_tenant_response = {"name": "fake", "id": "fake",
"description": "fake", "enabled": True}
fake_identity_tenants_response = {
"tenants": [
{"name": "fake", "id": "fake", "description": "fake",
"enabled": True},
{"name": "faker", "id": "faker", "description": "faker",
"enabled": True},
]}
fake_identity_tokens_response = {"access":
{'metadata': {u'is_admin': 0,
'roles': [u'asdfgh',
'sdfghj',
'dfghjk']},
'serviceCatalog': [{u'endpoints': [
{u'adminURL': 'http://10.0.0.0:8774/v2/qweqweqwe',
'id': 'dddddddddd',
'publicURL': 'http://10.0.0.0:8774/v2/qweqweqwe',
'internalURL': 'http://10.0.0.0:8774/v2/qweqweqwe',
'region': 'some_region'}],
'endpoints_links': [],
'name': 'nova',
'type': 'compute'},
{u'endpoints': [{u'adminURL': 'http://10.0.0.0:35357/v2.0',
'id': 'qweqweqwe',
'internalURL': 'http://10.0.0.0:5000/v2.0',
'publicURL': 'http://10.0.0.0:5000/v2.0',
'region': 'some_region'}],
'endpoints_links': [],
'name': 'keystone',
'type': 'identity'}],
'token': {u'expires': '1999-05-04T16:45:05Z',
'id': 'qweqweqwe',
'tenant': {u'description': 'admin Tenant',
'enabled': True,
'id': 'qweqweqwe',
'name': 'admin'}},
'user': {u'id': 'qweqweqwe',
'name': 'admin',
'roles': [{u'id': 'qweqweqwe', 'name': 'admin'},
{u'id': 'qweqweqwe', 'name': 'KeystoneAdmin'},
{u'id': 'qweqweqwe',
'name': 'KeystoneServiceAdmin'}],
'roles_links': [],
'username': 'admin'}}}
fake_identity_endpoints_response = {"access": {
"endpoints": ["fake", "faker", "fakest"]}}
fake_identity_response = {u'access':
{u'serviceCatalog': [
{u'endpoints': [{u'publicURL':
'https://ord.loadbalancers.api.rackspacecloud.com/v1.0/000000',
'region': 'ORD',
'tenantId': '000000'},
{u'publicURL':
'https://dfw.loadbalancers.api.rackspacecloud.com/v1.0/000000',
'region': 'DFW',
'tenantId': '000000'},
{u'publicURL':
'https://syd.loadbalancers.api.rackspacecloud.com/v1.0/000000',
'region': 'SYD',
'tenantId': '000000'}],
'name': 'cloudLoadBalancers',
'type': 'rax:load-balancer'},
{u'endpoints': [{u'internalURL':
'https://snet-aa.fake1.clouddrive.com/v1/MossoCloudFS_abc',
'publicURL': 'https://aa.fake1.clouddrive.com/v1/MossoCloudFS_abc',
'region': 'FAKE',
'tenantId': 'MossoCloudFS_abc'},
{u'internalURL':
'https://snet-aa.dfw1.clouddrive.com/v1/MossoCloudFS_abc',
'publicURL': 'https://aa.dfw1.clouddrive.com/v1/MossoCloudFS_abc',
'region': 'DFW',
'tenantId': 'MossoCloudFS_abc'},
{u'internalURL':
'https://snet-aa.ord1.clouddrive.com/v1/MossoCloudFS_abc',
'publicURL': 'https://aa.ord1.clouddrive.com/v1/MossoCloudFS_abc',
'region': 'ORD',
'tenantId': 'MossoCloudFS_abc'},
{u'internalURL':
'https://snet-aa.syd1.clouddrive.com/v1/MossoCloudFS_abc',
'publicURL': 'https://aa.ord1.clouddrive.com/v1/MossoCloudFS_abc',
'region': 'SYD',
'tenantId': 'MossoCloudFS_abc'}],
'name': 'cloudFiles',
'type': 'object-store'},
{u'endpoints': [{u'publicURL':
'https://dfw.servers.api.rackspacecloud.com/v2/000000',
'region': 'DFW',
'tenantId': '000000',
'versionId': '2',
'versionInfo': 'https://dfw.servers.api.rackspacecloud.com/v2',
'versionList': 'https://dfw.servers.api.rackspacecloud.com/'},
{u'publicURL':
'https://ord.servers.api.rackspacecloud.com/v2/000000',
'region': 'ORD',
'tenantId': '000000',
'versionId': '2',
'versionInfo': 'https://ord.servers.api.rackspacecloud.com/v2',
'versionList': 'https://ord.servers.api.rackspacecloud.com/'},
{u'publicURL':
'https://syd.servers.api.rackspacecloud.com/v2/000000',
'region': 'SYD',
'tenantId': '000000',
'versionId': '2',
'versionInfo': 'https://syd.servers.api.rackspacecloud.com/v2',
'versionList': 'https://syd.servers.api.rackspacecloud.com/'}],
'name': 'cloudServersOpenStack',
'type': 'compute'},
{u'endpoints': [{u'publicURL':
'https://dns.api.rackspacecloud.com/v1.0/000000',
'tenantId': '000000'}],
'name': 'cloudDNS',
'type': 'rax:dns'},
{u'endpoints': [{u'publicURL':
'https://dfw.databases.api.rackspacecloud.com/v1.0/000000',
'region': 'DFW',
'tenantId': '000000'},
{u'publicURL':
'https://syd.databases.api.rackspacecloud.com/v1.0/000000',
'region': 'SYD',
'tenantId': '000000'},
{u'publicURL':
'https://ord.databases.api.rackspacecloud.com/v1.0/000000',
'region': 'ORD',
'tenantId': '000000'}],
'name': 'cloudDatabases',
'type': 'rax:database'},
{u'endpoints': [{u'publicURL':
'https://servers.api.rackspacecloud.com/v1.0/000000',
'tenantId': '000000',
'versionId': '1.0',
'versionInfo': 'https://servers.api.rackspacecloud.com/v1.0',
'versionList': 'https://servers.api.rackspacecloud.com/'}],
'name': 'cloudServers',
'type': 'compute'},
{u'endpoints': [{u'publicURL':
'https://cdn1.clouddrive.com/v1/MossoCloudFS_abc',
'region': 'DFW',
'tenantId': 'MossoCloudFS_abc'},
{u'publicURL': 'https://cdn1.clouddrive.com/v1/MossoCloudFS_abc',
'region': 'FAKE',
'tenantId': 'MossoCloudFS_abc'},
{u'publicURL': 'https://cdn1.clouddrive.com/v1/MossoCloudFS_abc',
'region': 'SYD',
'tenantId': 'MossoCloudFS_abc'},
{u'publicURL': 'https://cdn2.clouddrive.com/v1/MossoCloudFS_abc',
'region': 'ORD',
'tenantId': 'MossoCloudFS_abc'}],
'name': 'cloudFilesCDN',
'type': 'rax:object-cdn'},
{u'endpoints': [{u'publicURL':
'https://monitoring.api.rackspacecloud.com/v1.0/000000',
'tenantId': '000000'}],
'name': 'cloudMonitoring',
'type': 'rax:monitor'}],
u'token': {u'expires': '2222-02-22T22:22:22.000-02:00',
'id': 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx',
'tenant': {u'id': '000000', 'name': '000000'}},
u'user': {u'id': '123456',
'name': 'fakeuser',
'RAX-AUTH:defaultRegion': 'DFW',
'roles': [{u'description': 'User Admin Role.',
'id': '3',
'name': 'identity:user-admin'}],
}}}
| 33.554576 | 80 | 0.630676 |
022be07ba133b6de16720dad8708b355fc237656 | 2,869 | py | Python | ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/alerts/alert_logfeeder.py | likenamehaojie/Apache-Ambari-ZH | 5973025bd694cdbb4b49fb4c4e0d774782811ff6 | [
"Apache-2.0"
] | 25 | 2019-12-04T03:09:55.000Z | 2022-03-08T10:52:06.000Z | ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/alerts/alert_logfeeder.py | likenamehaojie/Apache-Ambari-ZH | 5973025bd694cdbb4b49fb4c4e0d774782811ff6 | [
"Apache-2.0"
] | 29 | 2019-12-04T03:00:39.000Z | 2022-03-02T06:25:44.000Z | ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/alerts/alert_logfeeder.py | likenamehaojie/Apache-Ambari-ZH | 5973025bd694cdbb4b49fb4c4e0d774782811ff6 | [
"Apache-2.0"
] | 33 | 2019-12-04T02:51:30.000Z | 2022-03-24T02:47:38.000Z | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import socket
from resource_management.libraries.functions.check_process_status import check_process_status
from resource_management.core.exceptions import ComponentIsNotRunning
RESULT_CODE_OK = 'OK'
RESULT_CODE_CRITICAL = 'CRITICAL'
RESULT_CODE_UNKNOWN = 'UNKNOWN'
LOGFEEDER_PID_DIR = '{{logfeeder-env/logfeeder_pid_dir}}'
def get_tokens():
"""
Returns a tuple of tokens in the format {{site/property}} that will be used
to build the dictionary passed into execute
"""
return (LOGFEEDER_PID_DIR,)
def is_logfeeder_process_live(pid_file):
"""
Gets whether the LogSearch Logfeeder represented by the specified file is running.
:param pid_file: the PID file of the Logfeeder to check
:return: True if the Logfeeder is running, False otherwise
"""
live = False
try:
check_process_status(pid_file)
live = True
except ComponentIsNotRunning:
pass
return live
def execute(configurations={}, parameters={}, host_name=None):
"""
Returns a tuple containing the result code and a pre-formatted result label
Keyword arguments:
configurations (dictionary): a mapping of configuration key to value
parameters (dictionary): a mapping of script parameter key to value
host_name (string): the name of this host where the alert is running
"""
if configurations is None:
return (RESULT_CODE_UNKNOWN, ['There were no configurations supplied to the script.'])
if set([LOGFEEDER_PID_DIR]).issubset(configurations):
LOGFEEDER_PID_PATH = os.path.join(configurations[LOGFEEDER_PID_DIR], 'logfeeder.pid')
else:
return (RESULT_CODE_UNKNOWN, ['The logfeeder_pid_dir is a required parameter.'])
if host_name is None:
host_name = socket.getfqdn()
logfeeder_process_running = is_logfeeder_process_live(LOGFEEDER_PID_PATH)
alert_state = RESULT_CODE_OK if logfeeder_process_running else RESULT_CODE_CRITICAL
alert_label = 'LogFeeder is running on {0}' if logfeeder_process_running else 'LogFeeder is NOT running on {0}'
alert_label = alert_label.format(host_name)
return (alert_state, [alert_label]) | 33.752941 | 113 | 0.776577 |
022d316f00567159f07f5f66967da1595528de9a | 3,589 | py | Python | hack/scripts/ca_metrics_parser.py | nicdoye/autoscaler | ebadbda2b2417d7da6147fbc0c1b39f7f55aff22 | [
"Apache-2.0"
] | 17 | 2018-09-14T10:31:43.000Z | 2021-09-14T08:47:34.000Z | hack/scripts/ca_metrics_parser.py | nicdoye/autoscaler | ebadbda2b2417d7da6147fbc0c1b39f7f55aff22 | [
"Apache-2.0"
] | 12 | 2019-01-09T10:34:06.000Z | 2022-03-24T08:37:25.000Z | hack/scripts/ca_metrics_parser.py | nicdoye/autoscaler | ebadbda2b2417d7da6147fbc0c1b39f7f55aff22 | [
"Apache-2.0"
] | 3 | 2019-05-06T14:51:10.000Z | 2020-12-22T14:03:43.000Z | #!/usr/bin/env python
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This script parses metrics from Cluster Autoscaler e2e tests.
'''
from __future__ import division
from __future__ import print_function
import argparse
import json
def upper_bound(buckets):
'''
Going from the rightmost bucket, find the first one that has some samples
and return its upper bound.
'''
for i in xrange(len(buckets) - 1, -1, -1):
le, count = buckets[i]
if i == 0:
return le
else:
le_prev, count_prev = buckets[i-1]
if count_prev < count:
return le
def parse_metrics_file(metrics_file):
'''
Return interesting metrics for all Cluster Autoscaler functions.
Merics are stored in a map keyed by function name and are expressed in
seconds. They include
* sum of all samples
* count of sumples
* average value of samples
* upper bound - all collected samples were smaller than this value
* buckets - list of tuples (# of samples, bucket upper bound)
'''
summary = {}
with open(metrics_file) as metrics_file:
summary = {}
metrics = json.load(metrics_file)
ca_metrics = metrics['ClusterAutoscalerMetrics']
total_sum = ca_metrics['cluster_autoscaler_function_duration_seconds_sum']
for sample in total_sum:
function = function_name(sample)
summary[function] = CAMetric(function)
summary[function].sum = float(metric_value(sample))
count = ca_metrics['cluster_autoscaler_function_duration_seconds_count']
for sample in count:
function = function_name(sample)
summary[function].count = int(metric_value(sample))
summary[function].avg = summary[function].sum / summary[function].count
buckets = ca_metrics['cluster_autoscaler_function_duration_seconds_bucket']
for sample in buckets:
function = function_name(sample)
summary[function].buckets.append(
(float(sample['metric']['le']), int(metric_value(sample))))
for value in summary.values():
value.upper_bound = upper_bound(value.buckets)
return summary
if __name__ == '__main__':
main()
| 28.712 | 96 | 0.713291 |
022e461176e9788379dfe2431986a89fcba4d6ae | 2,631 | py | Python | tests/test_cli.py | dls-controls/tickit | 00bb013e69674bcfe4926f365ecb3c65c080abe8 | [
"Apache-2.0"
] | 4 | 2021-09-16T13:35:33.000Z | 2022-02-01T23:35:53.000Z | tests/test_cli.py | dls-controls/tickit | 00bb013e69674bcfe4926f365ecb3c65c080abe8 | [
"Apache-2.0"
] | 46 | 2021-09-16T13:44:58.000Z | 2022-02-02T13:42:56.000Z | tests/test_cli.py | dls-controls/tickit | 00bb013e69674bcfe4926f365ecb3c65c080abe8 | [
"Apache-2.0"
] | null | null | null | from typing import Iterable
import pytest
from click.testing import CliRunner, Result
from mock import Mock, patch
from mock.mock import create_autospec
from tickit.cli import main
from tickit.core.components.component import ComponentConfig
from tickit.core.management.schedulers.master import MasterScheduler
from tickit.core.typedefs import ComponentID, ComponentPort, PortID
def test_cli_set_loggging_level(patch_logging):
runner: CliRunner = CliRunner()
result: Result = runner.invoke(main, args=["--log-level", "INFO"])
assert result.exit_code == 0
patch_logging.basicConfig.assert_called_with(level="INFO")
def test_component_command(
patch_run_all_forever,
patch_read_configs,
):
runner: CliRunner = CliRunner()
result: Result = runner.invoke(
main, args=["component", "fake_device", "path/to/fake_device.yaml"]
)
assert result.exit_code == 0
patch_run_all_forever.assert_called_once()
| 27.989362 | 88 | 0.72824 |
022e5e8924eb3bc3c0fcb9bc827782f367ea128d | 565 | py | Python | homework5/app/config.py | sakost/tinkoff_fintech | 64b9d5a2a818b4db7c438b0dc53a8f31882f95ba | [
"MIT"
] | null | null | null | homework5/app/config.py | sakost/tinkoff_fintech | 64b9d5a2a818b4db7c438b0dc53a8f31882f95ba | [
"MIT"
] | null | null | null | homework5/app/config.py | sakost/tinkoff_fintech | 64b9d5a2a818b4db7c438b0dc53a8f31882f95ba | [
"MIT"
] | 2 | 2021-08-29T15:01:39.000Z | 2022-02-23T18:48:21.000Z | from typing import Any
from pydantic import BaseSettings
from .utils import singleton_cache
| 20.178571 | 57 | 0.699115 |
022f6a23b370efd01d97a4fc32d332f4e763d78f | 2,158 | py | Python | nabu/story.py | sterlingbaldwin/nabu | 6f19a1b237cdab6ff2179c952f41e239e1a0a3e8 | [
"MIT"
] | null | null | null | nabu/story.py | sterlingbaldwin/nabu | 6f19a1b237cdab6ff2179c952f41e239e1a0a3e8 | [
"MIT"
] | 1 | 2022-02-14T12:15:45.000Z | 2022-02-14T12:15:45.000Z | nabu/story.py | sterlingbaldwin/nabu | 6f19a1b237cdab6ff2179c952f41e239e1a0a3e8 | [
"MIT"
] | null | null | null | from typing import ChainMap
import yaml
from pathlib import Path
from jinja2 import Template
from weasyprint import HTML, CSS
# from xhtml2pdf import pisa
| 37.206897 | 79 | 0.598239 |
022fd56061f4a128f54c059a42d1bbaadf434720 | 322 | py | Python | src/homework/models/__init__.py | nvo87/education-backend | 1f008bd396b5dde4483af611532826a9bca9fef5 | [
"MIT"
] | 62 | 2021-09-22T18:38:26.000Z | 2022-03-29T06:09:42.000Z | src/homework/models/__init__.py | nvo87/education-backend | 1f008bd396b5dde4483af611532826a9bca9fef5 | [
"MIT"
] | 50 | 2021-09-16T07:17:31.000Z | 2022-03-26T12:06:58.000Z | src/homework/models/__init__.py | nvo87/education-backend | 1f008bd396b5dde4483af611532826a9bca9fef5 | [
"MIT"
] | 16 | 2021-10-17T17:43:31.000Z | 2022-03-26T11:22:45.000Z | from homework.models.answer import Answer
from homework.models.answer_access_log_entry import AnswerAccessLogEntry
from homework.models.answer_cross_check import AnswerCrossCheck
from homework.models.question import Question
__all__ = [
'Answer',
'AnswerAccessLogEntry',
'AnswerCrossCheck',
'Question',
]
| 26.833333 | 72 | 0.801242 |
0230ced77fc05cfeb2ad94e5f316982b5ce418ba | 1,650 | py | Python | second workout/8B/A.py | paktusov/algorithms | b21e7ead2325f77a606dc53495866e359f2e24fe | [
"BSD-3-Clause"
] | null | null | null | second workout/8B/A.py | paktusov/algorithms | b21e7ead2325f77a606dc53495866e359f2e24fe | [
"BSD-3-Clause"
] | null | null | null | second workout/8B/A.py | paktusov/algorithms | b21e7ead2325f77a606dc53495866e359f2e24fe | [
"BSD-3-Clause"
] | null | null | null |
tree = []
with open('input.txt', 'r', encoding='utf-8') as file:
string = file.readline().strip()
while string != '':
line = [i for i in string.split()]
if line[0] == 'ADD':
add(tree, int(line[1]))
elif line[0] == 'SEARCH':
if find(tree, int(line[1])):
print('YES')
else:
print('NO')
elif line[0] == 'PRINTTREE':
printtree(tree)
string = file.readline().strip()
| 21.710526 | 54 | 0.434545 |
023179993902aa78bcb94918909fb230bdfcaedd | 5,502 | py | Python | fewshot/clis/score_simple.py | armancohan/flex | 2a005fd18f522d2667421f170568df1164a73c3a | [
"Apache-2.0"
] | 63 | 2021-07-01T23:40:55.000Z | 2022-03-15T21:56:57.000Z | fewshot/clis/score_simple.py | armancohan/flex | 2a005fd18f522d2667421f170568df1164a73c3a | [
"Apache-2.0"
] | 1 | 2022-03-04T11:15:55.000Z | 2022-03-28T09:33:54.000Z | fewshot/clis/score_simple.py | armancohan/flex | 2a005fd18f522d2667421f170568df1164a73c3a | [
"Apache-2.0"
] | 3 | 2021-07-31T05:06:14.000Z | 2022-02-28T12:45:06.000Z | import json
from typing import TextIO
from functools import partial
import click
import numpy as np
from scipy.stats import sem
import pandas as pd
from fewshot.bootstrap import bootstrap
from fewshot.bootstrap import ci
from fewshot.challenges.utils import get_gold_dataset
from . import score_utils as su
def statistics(a, estimator=np.mean, conf_interval=95, n_boot=1000, seed=0):
"""With 95% CI"""
[ci_lower, ci_upper] = ci(
bootstrap(
a,
func=estimator,
n_boot=n_boot,
seed=seed,
),
conf_interval
)
stat = estimator(a)
return {
'stat': stat,
'stat_ci_lower': stat - ci_lower,
'stat_ci_upper': ci_upper - stat,
'stat_ci_sem': sem(a, ddof=1) * 1.96,
'std': np.std(a),
'n': len(a),
}
| 38.746479 | 117 | 0.596692 |
0232a5792f409bc2541863dd10af6a3d5b55632c | 1,196 | py | Python | KWS/Dissection/tf_mfcc_from_log_mel_spectrogram_sample.py | xrick/gotek_smic | 7655b6d7415b23c35810b8db48af7424f7dcdb06 | [
"MIT"
] | null | null | null | KWS/Dissection/tf_mfcc_from_log_mel_spectrogram_sample.py | xrick/gotek_smic | 7655b6d7415b23c35810b8db48af7424f7dcdb06 | [
"MIT"
] | null | null | null | KWS/Dissection/tf_mfcc_from_log_mel_spectrogram_sample.py | xrick/gotek_smic | 7655b6d7415b23c35810b8db48af7424f7dcdb06 | [
"MIT"
] | null | null | null | batch_size, num_samples, sample_rate = 32, 32000, 16000.0
# A Tensor of [batch_size, num_samples] mono PCM samples in the range [-1, 1].
pcm = tf.random.normal([batch_size, num_samples], dtype=tf.float32)
# A 1024-point STFT with frames of 64 ms and 75% overlap.
stfts = tf.signal.stft(pcm, frame_length=1024, frame_step=256,
fft_length=1024)
spectrograms = tf.abs(stfts)
# Warp the linear scale spectrograms into the mel-scale.
num_spectrogram_bins = stfts.shape[-1].value
lower_edge_hertz, upper_edge_hertz, num_mel_bins = 80.0, 7600.0, 80
linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(
num_mel_bins, num_spectrogram_bins, sample_rate, lower_edge_hertz,
upper_edge_hertz)
mel_spectrograms = tf.tensordot(
spectrograms, linear_to_mel_weight_matrix, 1)
mel_spectrograms.set_shape(spectrograms.shape[:-1].concatenate(
linear_to_mel_weight_matrix.shape[-1:]))
# Compute a stabilized log to get log-magnitude mel-scale spectrograms.
log_mel_spectrograms = tf.math.log(mel_spectrograms + 1e-6)
# Compute MFCCs from log_mel_spectrograms and take the first 13.
mfccs = tf.signal.mfccs_from_log_mel_spectrograms(
log_mel_spectrograms)[..., :13] | 46 | 78 | 0.778428 |
0232d872e8633ddbe199a54a9b7cd036c696f627 | 458 | py | Python | user/migrations/0017_auto_20200812_2149.py | Muia23/Grammer | dcc26937d88382c1da36a5f72306e6de367e90a3 | [
"Unlicense"
] | null | null | null | user/migrations/0017_auto_20200812_2149.py | Muia23/Grammer | dcc26937d88382c1da36a5f72306e6de367e90a3 | [
"Unlicense"
] | null | null | null | user/migrations/0017_auto_20200812_2149.py | Muia23/Grammer | dcc26937d88382c1da36a5f72306e6de367e90a3 | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-08-12 18:49
from __future__ import unicode_literals
from django.db import migrations
import tinymce.models
| 20.818182 | 55 | 0.617904 |
0233159b2601985f539a68dd35218b81258f9ecc | 1,834 | py | Python | audio/loudness_normalization.py | Open-Speech-EkStep/common_scripts | 916f01444e028f9111d5499217abf4443bd24017 | [
"MIT"
] | 4 | 2021-07-22T15:32:13.000Z | 2022-01-25T08:13:45.000Z | audio/loudness_normalization.py | Open-Speech-EkStep/common_scripts | 916f01444e028f9111d5499217abf4443bd24017 | [
"MIT"
] | null | null | null | audio/loudness_normalization.py | Open-Speech-EkStep/common_scripts | 916f01444e028f9111d5499217abf4443bd24017 | [
"MIT"
] | 3 | 2021-04-12T05:04:55.000Z | 2021-08-25T06:55:42.000Z | from pydub import AudioSegment, effects
import glob
import os
from tqdm import tqdm
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Normalize')
parser.add_argument('-i', '--input', required=True, help='Input path')
parser.add_argument('-o', '--output', required=True, help='Output path')
args_local = parser.parse_args()
normalize_loudness(args_local.input, args_local.output)
| 36.68 | 95 | 0.698473 |
0233975ca46a04c5b097d1d82d0ed1a76059f352 | 12,308 | py | Python | libcloud/dns/drivers/nsone.py | dupontz/libcloud | 419c69441ea10e7bbf37319e5e8d02e82e7e6b40 | [
"Apache-2.0"
] | 4 | 2017-11-14T17:24:12.000Z | 2020-10-30T01:46:02.000Z | libcloud/dns/drivers/nsone.py | dupontz/libcloud | 419c69441ea10e7bbf37319e5e8d02e82e7e6b40 | [
"Apache-2.0"
] | 11 | 2017-01-29T08:59:21.000Z | 2018-07-02T09:17:47.000Z | libcloud/dns/drivers/nsone.py | dupontz/libcloud | 419c69441ea10e7bbf37319e5e8d02e82e7e6b40 | [
"Apache-2.0"
] | 4 | 2016-04-04T08:01:48.000Z | 2018-06-06T08:04:36.000Z | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
try:
import simplejson as json
except ImportError:
import json
from libcloud.dns.types import Provider, ZoneDoesNotExistError, \
ZoneAlreadyExistsError, RecordDoesNotExistError, RecordAlreadyExistsError
from libcloud.utils.py3 import httplib
from libcloud.dns.base import DNSDriver, Zone, Record, RecordType
from libcloud.common.nsone import NsOneConnection, NsOneResponse, \
NsOneException
__all__ = [
'NsOneDNSDriver'
]
| 34.188889 | 79 | 0.537699 |
02339931b6a314a7b42357abbf8fe125695e6d76 | 533 | py | Python | ocr.py | PI2-Braille-printer/OCR | 25511596efbe5e408fe43a92c0d04e513d7fea39 | [
"MIT"
] | null | null | null | ocr.py | PI2-Braille-printer/OCR | 25511596efbe5e408fe43a92c0d04e513d7fea39 | [
"MIT"
] | 6 | 2021-03-18T20:56:22.000Z | 2022-03-11T23:28:10.000Z | ocr.py | PI2-Braille-printer/OCR | 25511596efbe5e408fe43a92c0d04e513d7fea39 | [
"MIT"
] | null | null | null | from PIL import Image, ImageEnhance
import pytesseract
import os
#image = Image.open('f_test.jpg')
#enhance = ImageEnhance.Contrast(image)
#new_image = enhance.enhance(1.5)
#new_image.save('f_test__c_2.jpg')
for x in range(0,3):
os.system('./textcleaner -g -s 2 -a 1 ./Images/test_crop_'+str(x)+'.jpg ./Images/test_crop_'+str(x)+'_r.jpg')
result_string = pytesseract.image_to_string(Image.open('./Images/test_crop_'+str(x)+'_r.jpg'),lang='por')
print(result_string)
#result_string = result_string.split()
#print(result_string)
| 31.352941 | 110 | 0.739212 |
0233f5b5066a471f59d0277aa64b3c981e22b913 | 2,090 | py | Python | processing/lua_file_builder.py | eubr-atmosphere/Spark-Log-Parser | 6f2025d50944b3603ce3e41ab09afcb38eab4e08 | [
"Apache-2.0"
] | 1 | 2017-05-06T21:25:39.000Z | 2017-05-06T21:25:39.000Z | processing/lua_file_builder.py | eubr-atmosphere/Spark-Log-Parser | 6f2025d50944b3603ce3e41ab09afcb38eab4e08 | [
"Apache-2.0"
] | null | null | null | processing/lua_file_builder.py | eubr-atmosphere/Spark-Log-Parser | 6f2025d50944b3603ce3e41ab09afcb38eab4e08 | [
"Apache-2.0"
] | 3 | 2018-10-19T12:35:56.000Z | 2019-05-09T08:09:54.000Z | #! /usr/bin/env python3
## Copyright 2018 Eugenio Gianniti <eugenio.gianniti@polimi.it>
## Copyright 2016 Giorgio Pea <giorgio.pea@mail.polimi.it>
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
import os
import sys
if __name__ == '__main__':
main()
| 32.65625 | 75 | 0.623445 |
0236d15dce7606a0d8edbca50d378b142b6663f7 | 127 | py | Python | mynlp/__init__.py | Suneel123/mynlp | 9dcf6fb57df66ebd4a359b8cd866323f43bc8ec4 | [
"MIT"
] | null | null | null | mynlp/__init__.py | Suneel123/mynlp | 9dcf6fb57df66ebd4a359b8cd866323f43bc8ec4 | [
"MIT"
] | null | null | null | mynlp/__init__.py | Suneel123/mynlp | 9dcf6fb57df66ebd4a359b8cd866323f43bc8ec4 | [
"MIT"
] | null | null | null | """Top-level package for mynlp."""
__author__ = """Suneel Dondapati"""
__email__ = 'dsuneel1@gmail.com'
__version__ = '0.1.0'
| 21.166667 | 35 | 0.685039 |
0236d5c96173fb20b1c62f540c0341822dff9bf5 | 788 | py | Python | test/point_test.py | markupCode/computational-geometry | 9a0a63a0b0c86e0618c18f82283b41baded21c50 | [
"MIT"
] | null | null | null | test/point_test.py | markupCode/computational-geometry | 9a0a63a0b0c86e0618c18f82283b41baded21c50 | [
"MIT"
] | null | null | null | test/point_test.py | markupCode/computational-geometry | 9a0a63a0b0c86e0618c18f82283b41baded21c50 | [
"MIT"
] | null | null | null | import unittest
from geometry.point import Point
if __name__ == '__main__':
unittest.main()
| 23.878788 | 50 | 0.549492 |
0238ca053db973ce47447cd47778ddb364794224 | 2,183 | py | Python | scenarios/simpleBTSEdgeCloudIngestion/units/sensors.py | rdsea/IoTCloudSamples | 37a3550627682981aa7d2a4cf317f19a3b1a699c | [
"Apache-2.0"
] | 5 | 2019-05-04T08:43:58.000Z | 2021-12-20T14:22:52.000Z | scenarios/simpleBTSEdgeCloudIngestion/units/sensors.py | rdsea/IoTCloudSamples | 37a3550627682981aa7d2a4cf317f19a3b1a699c | [
"Apache-2.0"
] | 7 | 2017-10-30T22:53:51.000Z | 2022-02-06T18:03:32.000Z | scenarios/simpleBTSEdgeCloudIngestion/units/sensors.py | rdsea/IoTCloudSamples | 37a3550627682981aa7d2a4cf317f19a3b1a699c | [
"Apache-2.0"
] | 3 | 2018-12-17T17:04:04.000Z | 2021-09-23T07:07:01.000Z | import yaml
import os, errno
import json
| 30.319444 | 149 | 0.607879 |
0238ea3d027c6d41c055683ac6fc0e17e3bc821b | 879 | py | Python | array/0018_4_sum/0018_4_sum.py | zdyxry/LeetCode | 33371285d0f3302158230f46e8b1b63b9f4639c4 | [
"Xnet",
"X11"
] | 6 | 2019-09-16T01:50:44.000Z | 2020-09-17T08:52:25.000Z | array/0018_4_sum/0018_4_sum.py | zdyxry/LeetCode | 33371285d0f3302158230f46e8b1b63b9f4639c4 | [
"Xnet",
"X11"
] | null | null | null | array/0018_4_sum/0018_4_sum.py | zdyxry/LeetCode | 33371285d0f3302158230f46e8b1b63b9f4639c4 | [
"Xnet",
"X11"
] | 4 | 2020-02-07T12:43:16.000Z | 2021-04-11T06:38:55.000Z | import collections
nums = [1,0,-1,0,-2,2]
target = 0
res = Solution().fourSum(nums, target)
print(res) | 35.16 | 83 | 0.480091 |
023b3b94e54c17d3e9f985c30a7d72a9e9d96bce | 573 | py | Python | Qcover/backends/__init__.py | BAQIS-Quantum/Qcover | ca3776ed73fefa0cfef08042143a8cf842f8dad5 | [
"Apache-2.0"
] | 38 | 2021-12-22T03:12:01.000Z | 2022-03-17T06:57:10.000Z | Qcover/backends/__init__.py | BAQIS-Quantum/Qcover | ca3776ed73fefa0cfef08042143a8cf842f8dad5 | [
"Apache-2.0"
] | null | null | null | Qcover/backends/__init__.py | BAQIS-Quantum/Qcover | ca3776ed73fefa0cfef08042143a8cf842f8dad5 | [
"Apache-2.0"
] | 13 | 2021-12-22T07:32:44.000Z | 2022-02-28T06:47:41.000Z | from .backend import Backend
from .circuitbyqiskit import CircuitByQiskit
from .circuitbyprojectq import CircuitByProjectq
from .circuitbycirq import CircuitByCirq
from .circuitbyqulacs import CircuitByQulacs
# from .circuitbytket import CircuitByTket
from .circuitbytensor import CircuitByTensor
from .circuitbyqton import CircuitByQton
import warnings
warnings.filterwarnings("ignore")
__all__ = [
'Backend',
'CircuitByCirq',
'CircuitByQiskit',
'CircuitByProjectq',
'CircuitByTensor',
'CircuitByQulacs',
'CircuitByQton'
]
| 27.285714 | 49 | 0.767888 |
023c2aec98d43d7652c64c1fee878f6de026330b | 766 | py | Python | python-files/dictionary-val.py | chirumist/Python-Practice | fc7d6447ca492989221904121321aaf762bb6b43 | [
"MIT"
] | null | null | null | python-files/dictionary-val.py | chirumist/Python-Practice | fc7d6447ca492989221904121321aaf762bb6b43 | [
"MIT"
] | null | null | null | python-files/dictionary-val.py | chirumist/Python-Practice | fc7d6447ca492989221904121321aaf762bb6b43 | [
"MIT"
] | null | null | null | """
User Get Key Value Input Dictionary Start
"""
dic = {
"google": "google is provide job and internship.",
"amezon": "amezon is e-commerce store and cloud computing provider.",
"zoom": "zoom is provide video call system to connecting meeating.",
"microsoft": "microsoft is owner of windows and office software.."
}
# For beginner
print("google")
print("amezon")
print("zoom")
print("microsoft")
key = input("search detail of dectionary! \n")
print(dic[key.lower()])
# For advance
while True:
for index, item in dic.items():
print(index)
key = input("search detail of dectionary! \n")
print(dic[key.lower()])
if int(input("Press 1 to exit 0 to continue \n")):
break
"""
User Get Key Value Input Dictionary End
"""
| 24.709677 | 73 | 0.663185 |