hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bea518b5d7670e7d0f948f9ce8eda34d0fa0bd36 | 20,671 | py | Python | evalml/tests/objective_tests/test_standard_metrics.py | sharshofski/evalml | f13dcd969e86b72ba01ca520247a16850030dcb0 | [
"BSD-3-Clause"
] | null | null | null | evalml/tests/objective_tests/test_standard_metrics.py | sharshofski/evalml | f13dcd969e86b72ba01ca520247a16850030dcb0 | [
"BSD-3-Clause"
] | null | null | null | evalml/tests/objective_tests/test_standard_metrics.py | sharshofski/evalml | f13dcd969e86b72ba01ca520247a16850030dcb0 | [
"BSD-3-Clause"
] | null | null | null | from itertools import product
import numpy as np
import pandas as pd
import pytest
from sklearn.metrics import matthews_corrcoef as sk_matthews_corrcoef
from evalml.objectives import (
F1,
MAPE,
MSE,
AccuracyBinary,
AccuracyMulticlass,
BalancedAccuracyBinary,
BalancedAccuracyMulticlass,
BinaryClassificationObjective,
CostBenefitMatrix,
ExpVariance,
F1Macro,
F1Micro,
F1Weighted,
LogLossBinary,
MCCBinary,
MCCMulticlass,
MeanSquaredLogError,
Precision,
PrecisionMacro,
PrecisionMicro,
PrecisionWeighted,
Recall,
RecallMacro,
RecallMicro,
RecallWeighted,
RootMeanSquaredError,
RootMeanSquaredLogError
)
from evalml.objectives.utils import (
_all_objectives_dict,
get_non_core_objectives
)
EPS = 1e-5
all_automl_objectives = _all_objectives_dict()
all_automl_objectives = {name: class_() for name, class_ in all_automl_objectives.items() if class_ not in get_non_core_objectives()}
def test_calculate_percent_difference_negative_and_equal_numbers():
assert CostBenefitMatrix.calculate_percent_difference(score=5, baseline_score=5) == 0
assert CostBenefitMatrix.calculate_percent_difference(score=-5, baseline_score=-10) == 50
assert CostBenefitMatrix.calculate_percent_difference(score=-10, baseline_score=-5) == -100
assert CostBenefitMatrix.calculate_percent_difference(score=-5, baseline_score=10) == -150
assert CostBenefitMatrix.calculate_percent_difference(score=10, baseline_score=-5) == 300
# These values are not possible for LogLossBinary but we need them for 100% coverage
# We might add an objective where lower is better that can take negative values in the future
assert LogLossBinary.calculate_percent_difference(score=-5, baseline_score=-10) == -50
assert LogLossBinary.calculate_percent_difference(score=-10, baseline_score=-5) == 100
assert LogLossBinary.calculate_percent_difference(score=-5, baseline_score=10) == 150
assert LogLossBinary.calculate_percent_difference(score=10, baseline_score=-5) == -300
def test_calculate_percent_difference_small():
expected_value = 100 * -1 * np.abs(1e-9 / (1e-9))
assert np.isclose(ExpVariance.calculate_percent_difference(score=0, baseline_score=1e-9), expected_value, atol=1e-8)
assert pd.isna(ExpVariance.calculate_percent_difference(score=0, baseline_score=1e-10))
assert pd.isna(ExpVariance.calculate_percent_difference(score=1e-9, baseline_score=0))
assert pd.isna(ExpVariance.calculate_percent_difference(score=0, baseline_score=0))
| 40.771203 | 133 | 0.592473 |
bea66694bcf52b9fffd500768ba31f40d22d16ce | 4,908 | py | Python | server-python3/server.py | Aaron-Ming/websocket_terminal | 42c24391d51c275eabf1f879fb312b9a3614f51e | [
"MIT"
] | 40 | 2016-11-20T09:48:27.000Z | 2021-04-02T00:29:14.000Z | server-python3/server.py | Aaron-Ming/websocket_terminal | 42c24391d51c275eabf1f879fb312b9a3614f51e | [
"MIT"
] | 6 | 2018-01-07T03:43:22.000Z | 2022-03-21T08:43:33.000Z | server-python3/server.py | glensc/websocket_terminal | 42c24391d51c275eabf1f879fb312b9a3614f51e | [
"MIT"
] | 20 | 2016-12-02T06:00:27.000Z | 2021-08-15T11:40:34.000Z | import os
import urllib.parse
import eventlet
import eventlet.green.socket
# eventlet.monkey_patch()
import eventlet.websocket
import eventlet.wsgi
import wspty.pipe
from flask import Flask, request, redirect
from wspty.EchoTerminal import EchoTerminal
from wspty.EncodedTerminal import EncodedTerminal
from wspty.WebsocketBinding import WebsocketBinding
import config
def make_parser():
import argparse
parser = argparse.ArgumentParser(description='Websocket Terminal server')
parser.add_argument('-l', '--listen', default='', help='Listen on interface (default all)')
parser.add_argument('-p', '--port', default=5002, type=int, help='Listen on port')
parser.add_argument('--unsafe', action='store_true', help='Allow unauthenticated connections to local machine')
return parser
def start(interface, port, root_app_handler):
conn = (interface, port)
listener = eventlet.listen(conn)
print('listening on {0}:{1}'.format(*conn))
try:
eventlet.wsgi.server(listener, root_app_handler)
except KeyboardInterrupt:
pass
def start_default(interface, port, allow_unsafe=False, root_app_cls=DefaultRootApp):
root_app = root_app_cls()
root_app.allow_unsafe = allow_unsafe
start(interface, port, root_app.handler)
app = make_app()
if __name__ == '__main__':
main()
| 31.261146 | 115 | 0.647514 |
bea71c525e82317994bbd637b8bebff771fe81eb | 3,406 | py | Python | tests/unit/test_roger_promote.py | seomoz/roger-mesos-tools | 88b4cb3550a4b49d0187cfb5e6a22246ff6b9765 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_roger_promote.py | seomoz/roger-mesos-tools | 88b4cb3550a4b49d0187cfb5e6a22246ff6b9765 | [
"Apache-2.0"
] | 47 | 2016-05-26T22:09:56.000Z | 2018-08-08T20:33:39.000Z | tests/unit/test_roger_promote.py | seomoz/roger-mesos-tools | 88b4cb3550a4b49d0187cfb5e6a22246ff6b9765 | [
"Apache-2.0"
] | 3 | 2017-09-20T22:39:03.000Z | 2017-11-07T22:29:29.000Z | # -*- encoding: utf-8 -*-
"""
Unit test for roger_promote.py
"""
import tests.helper
import unittest
import os
import os.path
import pytest
import requests
from mockito import mock, Mock, when
from cli.roger_promote import RogerPromote
from cli.appconfig import AppConfig
from cli.settings import Settings
from cli.framework import Framework
from cli.frameworkUtils import FrameworkUtils
from cli.marathon import Marathon
from cli.chronos import Chronos
| 28.383333 | 76 | 0.645919 |
bea77828d8025fc0087d40bc8239898137482a39 | 7,097 | py | Python | data/collectors.py | papb/COVID-19 | 2dc8e683f55c494ca894727aca56f90e53b161f3 | [
"MIT"
] | 6 | 2020-03-24T22:03:34.000Z | 2020-03-25T21:08:02.000Z | data/collectors.py | papb/COVID-19 | 2dc8e683f55c494ca894727aca56f90e53b161f3 | [
"MIT"
] | null | null | null | data/collectors.py | papb/COVID-19 | 2dc8e683f55c494ca894727aca56f90e53b161f3 | [
"MIT"
] | 1 | 2020-03-27T20:25:03.000Z | 2020-03-27T20:25:03.000Z | import json
import pandas as pd
import requests
def load_jh_df(csv):
'''
Loads a CSV file from JH repository and make some transforms
'''
jh_data_path = (
'https://raw.githubusercontent.com/'
'CSSEGISandData/COVID-19/master/'
'csse_covid_19_data/csse_covid_19_time_series/'
)
return (
pd.read_csv(
jh_data_path
+ csv[1]
)
.drop(['Lat', 'Long'], axis=1)
.groupby('Country/Region')
.sum()
.reset_index()
.rename(
columns={'Country/Region':'country'}
)
.melt(
id_vars=['country'],
var_name='date',
value_name=csv[0]
)
.assign(
date=lambda x: pd.to_datetime(
x['date'],
format='%m/%d/%y'
)
)
)
def load_jh_data():
'''
Loads the latest COVID-19 global data from
Johns Hopkins University repository
'''
cases_csv = ('cases', 'time_series_19-covid-Confirmed.csv')
deaths_csv = ('deaths', 'time_series_19-covid-Deaths.csv')
recovered_csv = ('recoveries', 'time_series_19-covid-Recovered.csv')
return (
pd.merge(
pd.merge(
load_jh_df(cases_csv),
load_jh_df(deaths_csv)
),
load_jh_df(recovered_csv)
)
.reindex(
columns = ['date',
'cases',
'deaths',
'recoveries',
'country']
)
)
if __name__ == '__main__':
try:
load_dump_covid_19_data()
except Exception as e:
print(f'Error when collecting COVID-19 cases data: {repr(e)}')
try:
load_dump_uf_pop()
except Exception as e:
print(f'Error when collecting population data: {repr(e)}')
| 32.856481 | 121 | 0.479217 |
bea7f47e5f837c85a165df266359f1d2def3dfcd | 5,976 | py | Python | testsuite/testsuite_helpers.py | freingruber/JavaScript-Raider | d1c1fff2fcfc60f210b93dbe063216fa1a83c1d0 | [
"Apache-2.0"
] | 91 | 2022-01-24T07:32:34.000Z | 2022-03-31T23:37:15.000Z | testsuite/testsuite_helpers.py | zeusguy/JavaScript-Raider | d1c1fff2fcfc60f210b93dbe063216fa1a83c1d0 | [
"Apache-2.0"
] | null | null | null | testsuite/testsuite_helpers.py | zeusguy/JavaScript-Raider | d1c1fff2fcfc60f210b93dbe063216fa1a83c1d0 | [
"Apache-2.0"
] | 11 | 2022-01-24T14:21:12.000Z | 2022-03-31T23:37:23.000Z | import config as cfg
import utils
import native_code.executor as executor
number_performed_tests = 0
expectations_correct = 0
expectations_wrong = 0
# The expect functions don't throw an exception like the assert_* functions
# Instead, they just count how often the expected result was true
# The expect functions don't throw an exception like the assert_* functions
# Instead, they just count how often the expected result was true
| 34.947368 | 130 | 0.737784 |
bea8a3765c24aad74f039b0b081e005d38797cfe | 99 | py | Python | examples/my_configs/two.py | davidhyman/override | e34bd3c8676233439de5c002367b3bff5c1b88d6 | [
"MIT"
] | null | null | null | examples/my_configs/two.py | davidhyman/override | e34bd3c8676233439de5c002367b3bff5c1b88d6 | [
"MIT"
] | 1 | 2017-07-11T22:03:27.000Z | 2017-07-11T22:03:27.000Z | examples/my_configs/two.py | davidhyman/override | e34bd3c8676233439de5c002367b3bff5c1b88d6 | [
"MIT"
] | null | null | null | from .one import *
fruit = 'banana'
colour = 'orange'
sam['eggs'] = 'plenty'
sam.pop('ham')
| 14.142857 | 23 | 0.585859 |
bea8aa6132f2274610cc25a57ec0c74c8765342d | 371 | py | Python | students/K33402/Komarov_Georgy/LAB2/elevennote/src/api/urls.py | aglaya-pill/ITMO_ICT_WebDevelopment_2021-2022 | a63691317a72fb9b29ae537bc3d7766661458c22 | [
"MIT"
] | null | null | null | students/K33402/Komarov_Georgy/LAB2/elevennote/src/api/urls.py | aglaya-pill/ITMO_ICT_WebDevelopment_2021-2022 | a63691317a72fb9b29ae537bc3d7766661458c22 | [
"MIT"
] | null | null | null | students/K33402/Komarov_Georgy/LAB2/elevennote/src/api/urls.py | aglaya-pill/ITMO_ICT_WebDevelopment_2021-2022 | a63691317a72fb9b29ae537bc3d7766661458c22 | [
"MIT"
] | null | null | null | from django.urls import path, include
from rest_framework_jwt.views import obtain_jwt_token
from rest_framework.routers import DefaultRouter
from .views import NoteViewSet
app_name = 'api'
router = DefaultRouter(trailing_slash=False)
router.register('notes', NoteViewSet)
urlpatterns = [
path('jwt-auth/', obtain_jwt_token),
path('', include(router.urls)),
]
| 23.1875 | 53 | 0.77628 |
beaa8784fc43c71bc8bb5120744ac9a157c4e2a7 | 2,387 | py | Python | PathPlanning/run.py | CandleStein/VAlg | 43aecdd351954d316f132793cf069b70bf2e5cc2 | [
"MIT"
] | null | null | null | PathPlanning/run.py | CandleStein/VAlg | 43aecdd351954d316f132793cf069b70bf2e5cc2 | [
"MIT"
] | null | null | null | PathPlanning/run.py | CandleStein/VAlg | 43aecdd351954d316f132793cf069b70bf2e5cc2 | [
"MIT"
] | 1 | 2020-09-25T18:31:34.000Z | 2020-09-25T18:31:34.000Z | from planning_framework import path
import cv2 as cv
import numpy as np
import argparse
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description="Path Planning Visualisation")
parser.add_argument(
"-n",
"--n_heuristic",
default=2,
help="Heuristic for A* Algorithm (default = 2). 0 for Dijkstra's Algorithm",
)
args = parser.parse_args()
N_H = int(args.n_heuristic)
drawing = False # true if mouse is pressed
mode = "obs" # if True, draw rectangle. Press 'm' to toggle to curve
ix, iy = -1, -1
sx, sy = 0, 0
dx, dy = 50, 50
# mouse callback function
img = np.zeros((512, 512, 3), np.uint8)
inv_im = np.ones(img.shape) * 255
cv.namedWindow("Draw the Occupancy Map")
cv.setMouseCallback("Draw the Occupancy Map", draw)
while 1:
cv.imshow("Draw the Occupancy Map", inv_im - img)
if cv.waitKey(20) & 0xFF == 27:
break
cv.destroyAllWindows()
mode = "src"
img_ = img
cv.namedWindow("Set the Starting Point")
cv.setMouseCallback("Set the Starting Point", draw)
while 1:
cv.imshow("Set the Starting Point", inv_im - img)
if cv.waitKey(20) & 0xFF == 27:
break
# cv.waitKey(20)
cv.destroyAllWindows()
mode = "dst"
end = "Set the End Point"
cv.namedWindow(end)
cv.setMouseCallback(end, draw)
while cv.getWindowProperty(end, 0) >= 0:
cv.imshow(end, inv_im - img)
if cv.waitKey(20) & 0xFF == 27:
break
cv.destroyAllWindows()
img = cv.resize(img_, (50, 50), interpolation=cv.INTER_AREA)
inv_img = np.ones(img.shape)
np.savetxt("map.txt", np.array(img[:, :, 0]))
plt.imshow(inv_img - img)
start = np.array([sx, sy]) * 50 // 512
end = np.array([dx, dy]) * 50 // 512
path(start, end, N_H)
| 26.820225 | 86 | 0.607038 |
beaf6a34e9709a7f3a490a80d9b84b4126151d38 | 186 | py | Python | Codeforces/problems/0136/A/136A.py | object-oriented-human/competitive | 9e761020e887d8980a39a64eeaeaa39af0ecd777 | [
"MIT"
] | 2 | 2021-07-27T10:46:47.000Z | 2021-07-27T10:47:57.000Z | Codeforces/problems/0136/A/136A.py | foooop/competitive | 9e761020e887d8980a39a64eeaeaa39af0ecd777 | [
"MIT"
] | null | null | null | Codeforces/problems/0136/A/136A.py | foooop/competitive | 9e761020e887d8980a39a64eeaeaa39af0ecd777 | [
"MIT"
] | null | null | null | n = int(input())
line = list(map(int, input().split()))
l = {}
res = ""
for i, j in enumerate(line):
l[j] = i+1
for k in range(n):
res += str(l[k+1]) + " "
print(res.rstrip()) | 15.5 | 38 | 0.516129 |
beb013240bc0b9610971205973878d44dedde94f | 323 | py | Python | generatey.py | YiLisa/DSCI560-hw2 | 9cf4a40a6e4755ea1b0b68248e553fb4b6b7fdf4 | [
"Apache-2.0"
] | null | null | null | generatey.py | YiLisa/DSCI560-hw2 | 9cf4a40a6e4755ea1b0b68248e553fb4b6b7fdf4 | [
"Apache-2.0"
] | null | null | null | generatey.py | YiLisa/DSCI560-hw2 | 9cf4a40a6e4755ea1b0b68248e553fb4b6b7fdf4 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
if __name__ == '__main__':
main()
print('generating y = 3x+6...') | 21.533333 | 56 | 0.582043 |
beb0a9e7bb5a51ebb9a999b6f45ac4bb5d9df106 | 1,002 | py | Python | setup.py | burn874/mtg | cef47f6ec0ca110bdcb885ec09d6f5aca517c3b2 | [
"Apache-2.0"
] | null | null | null | setup.py | burn874/mtg | cef47f6ec0ca110bdcb885ec09d6f5aca517c3b2 | [
"Apache-2.0"
] | null | null | null | setup.py | burn874/mtg | cef47f6ec0ca110bdcb885ec09d6f5aca517c3b2 | [
"Apache-2.0"
] | null | null | null | import re
from pkg_resources import parse_requirements
import pathlib
from setuptools import find_packages, setup
README_FILE = 'README.md'
REQUIREMENTS_FILE = 'requirements.txt'
VERSION_FILE = 'mtg/_version.py'
VERSION_REGEXP = r'^__version__ = \'(\d+\.\d+\.\d+)\''
r = re.search(VERSION_REGEXP, open(VERSION_FILE).read(), re.M)
if r is None:
raise RuntimeError(f'Unable to find version string in {VERSION_FILE}.')
version = r.group(1)
long_description = open(README_FILE, encoding='utf-8').read()
install_requires = [str(r) for r in parse_requirements(open(REQUIREMENTS_FILE, 'rt'))]
setup(
name='mtg',
version=version,
description='mtg is a collection of data science and ml projects for Magic:the Gathering',
long_description=long_description,
long_description_content_type='text/markdown',
author='Ryan Saxe',
author_email='ryancsaxe@gmail.com',
url='https://github.com/RyanSaxe/mtg',
packages=find_packages(),
install_requires=install_requires,
)
| 31.3125 | 94 | 0.739521 |
beb1a4b08f2fc3818a575158bc7a69b7e5f252c7 | 1,399 | py | Python | avilla/core/resource/interface.py | RF-Tar-Railt/Avilla | 0b6eff0e253d4c04a5c82f4f252b6a11b7d81e04 | [
"MIT"
] | null | null | null | avilla/core/resource/interface.py | RF-Tar-Railt/Avilla | 0b6eff0e253d4c04a5c82f4f252b6a11b7d81e04 | [
"MIT"
] | 1 | 2021-12-19T07:43:30.000Z | 2021-12-19T07:43:30.000Z | avilla/core/resource/interface.py | RF-Tar-Railt/Avilla | 0b6eff0e253d4c04a5c82f4f252b6a11b7d81e04 | [
"MIT"
] | null | null | null | from __future__ import annotations
from dataclasses import dataclass
from avilla.core.platform import Base
from avilla.core.resource import Resource, ResourceProvider
| 29.765957 | 97 | 0.647605 |
beb313eb5f64fc657c1686ad77dc2225b87a4889 | 570 | py | Python | viewer_examples/plugins/median_filter.py | atemysemicolon/scikit-image | a48cf5822f9539c6602b9327c18253aed14fa692 | [
"BSD-3-Clause"
] | null | null | null | viewer_examples/plugins/median_filter.py | atemysemicolon/scikit-image | a48cf5822f9539c6602b9327c18253aed14fa692 | [
"BSD-3-Clause"
] | null | null | null | viewer_examples/plugins/median_filter.py | atemysemicolon/scikit-image | a48cf5822f9539c6602b9327c18253aed14fa692 | [
"BSD-3-Clause"
] | null | null | null | from skimage import data
from skimage.filter.rank import median
from skimage.morphology import disk
from skimage.viewer import ImageViewer
from skimage.viewer.widgets import Slider, OKCancelButtons, SaveButtons
from skimage.viewer.plugins.base import Plugin
image = data.coins()
viewer = ImageViewer(image)
plugin = Plugin(image_filter=median_filter)
plugin += Slider('radius', 2, 10, value_type='int')
plugin += SaveButtons()
plugin += OKCancelButtons()
viewer += plugin
viewer.show()
| 25.909091 | 71 | 0.784211 |
beb317bf51c8d955452bb7ade64a00caeb647030 | 8,722 | py | Python | autotest/test_gwf_buy_lak01.py | scharlton2/modflow6 | 83ac72ee3b6f580aaffef6352cf15c1697d3ce66 | [
"CC0-1.0"
] | 3 | 2019-07-10T21:16:57.000Z | 2021-10-08T00:56:20.000Z | autotest/test_gwf_buy_lak01.py | scharlton2/modflow6 | 83ac72ee3b6f580aaffef6352cf15c1697d3ce66 | [
"CC0-1.0"
] | null | null | null | autotest/test_gwf_buy_lak01.py | scharlton2/modflow6 | 83ac72ee3b6f580aaffef6352cf15c1697d3ce66 | [
"CC0-1.0"
] | 3 | 2019-11-28T16:26:50.000Z | 2020-02-05T11:08:37.000Z | # Test the buoyancy package and the variable density flows between the lake
# and the gwf model. This model has 4 layers and a lake incised within it.
# The model is transient and has heads in the aquifer higher than the initial
# stage in the lake. As the model runs, the lake and aquifer equalize and
# should end up at the same level. The test ensures that the initial and
# final water volumes in the entire system are the same. There are three
# different cases:
# 1. No buoyancy package
# 2. Buoyancy package with lake and aquifer density = 1000.
# 3. Buoyancy package with lake and aquifer density = 1024.5
import os
import pytest
import sys
import numpy as np
try:
import flopy
except:
msg = "Error. FloPy package is not available.\n"
msg += "Try installing using the following command:\n"
msg += " pip install flopy"
raise Exception(msg)
from framework import testing_framework
from simulation import Simulation
ex = ["buy_lak_01a"] # , 'buy_lak_01b', 'buy_lak_01c']
buy_on_list = [False] # , True, True]
concbuylist = [0.0] # , 0., 35.]
exdirs = []
for s in ex:
exdirs.append(os.path.join("temp", s))
# - No need to change any code below
if __name__ == "__main__":
# print message
print("standalone run of {}".format(os.path.basename(__file__)))
# run main routine
main()
| 29.073333 | 90 | 0.576015 |
beb37d345ad255de414b430caeba23a0fa10d2d1 | 441 | py | Python | lesson-08/roll_dice_v1.0.py | hemiaoio/pylearning | 4b3885ed7177db4e6e03da80dd9ed69719c8d866 | [
"MIT"
] | 1 | 2018-11-11T03:44:02.000Z | 2018-11-11T03:44:02.000Z | lesson-08/roll_dice_v1.0.py | hemiaoio/learn-python | 4b3885ed7177db4e6e03da80dd9ed69719c8d866 | [
"MIT"
] | null | null | null | lesson-08/roll_dice_v1.0.py | hemiaoio/learn-python | 4b3885ed7177db4e6e03da80dd9ed69719c8d866 | [
"MIT"
] | null | null | null | """
1.0
"""
import random
if __name__ == '__main__':
main()
| 15.206897 | 63 | 0.569161 |
beb557aa11e275e2f9691dee969a012dab3f26db | 759 | py | Python | composer/dataflow-python3/main.py | gxercavins/gcp-snippets | a90e4e9c922370face876aa7c56db610896e1a6f | [
"Apache-2.0"
] | 2 | 2022-02-07T07:53:35.000Z | 2022-02-23T18:46:03.000Z | composer/dataflow-python3/main.py | gxercavins/gcp-snippets | a90e4e9c922370face876aa7c56db610896e1a6f | [
"Apache-2.0"
] | 1 | 2019-10-26T19:03:34.000Z | 2019-10-26T19:03:48.000Z | composer/dataflow-python3/main.py | gxercavins/gcp-snippets | a90e4e9c922370face876aa7c56db610896e1a6f | [
"Apache-2.0"
] | 6 | 2020-03-19T23:58:46.000Z | 2022-02-07T07:53:37.000Z | import argparse
import logging
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
def run(argv=None, save_main_session=True):
"""Dummy pipeline to test Python3 operator."""
parser = argparse.ArgumentParser()
known_args, pipeline_args = parser.parse_known_args(argv)
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = save_main_session
p = beam.Pipeline(options=pipeline_options)
# Just a simple test
p | 'Create Events' >> beam.Create([1, 2, 3])
result = p.run()
result.wait_until_finish()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
| 27.107143 | 78 | 0.777339 |
beb680071d94ed8dd93dc11b2e313714df1f9b83 | 1,727 | py | Python | dingtalk/message/conversation.py | kangour/dingtalk-python | b37b9dac3ca3ff9d727308fb120a8fd05e11eaa5 | [
"Apache-2.0"
] | 88 | 2017-12-28T05:23:15.000Z | 2021-12-20T13:44:18.000Z | dingtalk/message/conversation.py | niulinlnc/dingtalk-python | c4209658f88344e8f0890137ed7c887c8b740a6c | [
"Apache-2.0"
] | 8 | 2018-04-28T05:41:49.000Z | 2021-06-01T21:51:11.000Z | dingtalk/message/conversation.py | niulinlnc/dingtalk-python | c4209658f88344e8f0890137ed7c887c8b740a6c | [
"Apache-2.0"
] | 43 | 2017-12-07T09:43:48.000Z | 2021-12-03T01:19:52.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/11/30 3:02
# @Author : Matrix
# @Github : https://github.com/blackmatrix7/
# @Blog : http://www.cnblogs.com/blackmatrix/
# @File : messages.py
# @Software: PyCharm
import json
from ..foundation import *
from json import JSONDecodeError
__author__ = 'blackmatrix'
__all__ = ['async_send_msg', 'get_msg_send_result', 'get_msg_send_progress']
if __name__ == '__main__':
pass
| 31.981481 | 120 | 0.70469 |
beb69b31ba90004b6f8731fea0065e0f64e36216 | 1,412 | py | Python | backend/garpix_page/setup.py | griviala/garpix_page | 55f1d9bc6d1de29d18e15369bebcbef18811b5a4 | [
"MIT"
] | null | null | null | backend/garpix_page/setup.py | griviala/garpix_page | 55f1d9bc6d1de29d18e15369bebcbef18811b5a4 | [
"MIT"
] | null | null | null | backend/garpix_page/setup.py | griviala/garpix_page | 55f1d9bc6d1de29d18e15369bebcbef18811b5a4 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
from os import path
here = path.join(path.abspath(path.dirname(__file__)), 'garpix_page')
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='garpix_page',
version='2.23.0',
description='',
long_description=long_description,
url='https://github.com/garpixcms/garpix_page',
author='Garpix LTD',
author_email='info@garpix.com',
license='MIT',
packages=find_packages(exclude=['testproject', 'testproject.*']),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
include_package_data=True,
zip_safe=False,
install_requires=[
'Django >= 1.11',
'django-polymorphic-tree-for-garpix-page >= 2.1.1',
'django-modeltranslation >= 0.16.2',
'django-multiurl >= 1.4.0',
'djangorestframework >= 3.12.4',
'garpix_utils >= 1.4.0',
'django-tabbed-admin >= 1.0.4',
'model-bakery >= 1.4.0'
],
)
| 32.090909 | 69 | 0.607649 |
beb76b3debe06f273a8ef3ec32c53943cd031a3b | 20,225 | py | Python | .kodi/addons/plugin.video.p2p-streams/resources/core/livestreams.py | C6SUMMER/allinclusive-kodi-pi | 8baf247c79526849c640c6e56ca57a708a65bd11 | [
"Apache-2.0"
] | null | null | null | .kodi/addons/plugin.video.p2p-streams/resources/core/livestreams.py | C6SUMMER/allinclusive-kodi-pi | 8baf247c79526849c640c6e56ca57a708a65bd11 | [
"Apache-2.0"
] | null | null | null | .kodi/addons/plugin.video.p2p-streams/resources/core/livestreams.py | C6SUMMER/allinclusive-kodi-pi | 8baf247c79526849c640c6e56ca57a708a65bd11 | [
"Apache-2.0"
] | 2 | 2018-04-17T17:34:39.000Z | 2020-07-26T03:43:33.000Z | # -*- coding: utf-8 -*-
""" p2p-streams (c) 2014 enen92 fightnight
This file contains the livestream addon engine. It is mostly based on divingmule work on livestreams addon!
Functions:
xml_lists_menu() -> main menu for the xml list category
addlista() -> add a new list. It'll ask for local or remote and processes the given input
remove_list(name) -> Remove a list
get_groups(url) -> First regex function to parse a given list. Sopcast type list
get_channels(name,url) -> Second regex function to parse a given list. Used to general livestreams xml type lists
getData(url,fanart) -> Get the item data such as iconimage, fanart, etc
getChannelItems(name,url,fanart) -> Function to grab the channel items
getItems(items,fanart) -> Function to grab the items from the xml
removeNonAscii(s) -> Function to remove non-ascii characters from the list
getSoup(url) -> uses beautifulsoup to parse a remote xml
addon_log(string) -> Simple log/print function
getRegexParsed(regexs, url) -> parse the regex expression
list_type(url) -> Checks if the list is xml or m3u
parse_m3u(url) -> Parses a m3u type list
"""
import urllib,urllib2,re,xbmcplugin,xbmcgui,xbmc,xbmcaddon,HTMLParser,time,datetime,os,xbmcvfs,sys
from BeautifulSoup import BeautifulStoneSoup, BeautifulSoup, BeautifulSOAP
from peertopeerutils.pluginxbmc import *
from peertopeerutils.webutils import *
from peertopeerutils.directoryhandle import *
from peertopeerutils.iofile import *
"""
Main Menu
"""
"""
Add a new list function
"""
"""
Remove a List
"""
"""
Parsing functions
"""
| 38.745211 | 303 | 0.514907 |
beb77481d7d9ef64134079c15cf78aedfbcf66f2 | 187 | py | Python | RainIt/rain_it/ric/Procedure.py | luisgepeto/RainItPi | 47cb7228e9c584c3c4489ebc78abf6de2096b770 | [
"MIT"
] | null | null | null | RainIt/rain_it/ric/Procedure.py | luisgepeto/RainItPi | 47cb7228e9c584c3c4489ebc78abf6de2096b770 | [
"MIT"
] | null | null | null | RainIt/rain_it/ric/Procedure.py | luisgepeto/RainItPi | 47cb7228e9c584c3c4489ebc78abf6de2096b770 | [
"MIT"
] | null | null | null | from ric.RainItComposite import RainItComposite
| 18.7 | 47 | 0.705882 |
beb861661de128962032b82c144ceaca4b7cc85f | 81 | py | Python | 1067.py | FahimFBA/URI-Problem-Solve | d718a95e5a873dffbce19d850998e8917ec87ebb | [
"Apache-2.0"
] | 3 | 2020-11-25T19:05:31.000Z | 2021-03-29T07:29:36.000Z | 1067.py | FahimFBA/URI-Problem-Solve | d718a95e5a873dffbce19d850998e8917ec87ebb | [
"Apache-2.0"
] | null | null | null | 1067.py | FahimFBA/URI-Problem-Solve | d718a95e5a873dffbce19d850998e8917ec87ebb | [
"Apache-2.0"
] | null | null | null | valor = int(input())
for i in range(valor+1):
if(i%2 != 0):
print(i) | 16.2 | 24 | 0.506173 |
beb987a1f2b8198bf13096fe552301ac5d99117d | 889 | py | Python | api-reference-examples/python/te-tag-query/api-example-update.py | b-bold/ThreatExchange | 6f8d0dc803faccf576c9398569bb52d54a4f9a87 | [
"BSD-3-Clause"
] | 997 | 2015-03-13T18:04:03.000Z | 2022-03-30T12:09:10.000Z | api-reference-examples/python/te-tag-query/api-example-update.py | b-bold/ThreatExchange | 6f8d0dc803faccf576c9398569bb52d54a4f9a87 | [
"BSD-3-Clause"
] | 444 | 2015-03-26T17:28:49.000Z | 2022-03-28T19:34:05.000Z | api-reference-examples/python/te-tag-query/api-example-update.py | b-bold/ThreatExchange | 6f8d0dc803faccf576c9398569bb52d54a4f9a87 | [
"BSD-3-Clause"
] | 294 | 2015-03-13T22:19:43.000Z | 2022-03-30T08:42:45.000Z | #!/usr/bin/env python
# ================================================================
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ================================================================
import sys
import json
import TE
TE.Net.setAppTokenFromEnvName("TX_ACCESS_TOKEN")
postParams = {
"descriptor_id": "4036655176350945", # ID of the descriptor to be updated
"reactions": "INGESTED,IN_REVIEW",
}
showURLs = False
dryRun = False
validationErrorMessage, serverSideError, responseBody = TE.Net.updateThreatDescriptor(
postParams, showURLs, dryRun
)
if validationErrorMessage != None:
sys.stderr.write(validationErrorMessage + "\n")
sys.exit(1)
if serverSideError != None:
sys.stderr.write(str(serverSideError) + "\n")
sys.stderr.write(json.dumps(responseBody) + "\n")
sys.exit(1)
print(json.dumps(responseBody))
| 26.147059 | 86 | 0.620922 |
beb9a541895990f03cef5c41fda543323a1a2725 | 12,362 | py | Python | loaner/web_app/backend/api/shelf_api_test.py | Bottom-Feeders/GrabNGO | 5a467362e423700a5a7276a7fa9a47040033cfcf | [
"Apache-2.0"
] | null | null | null | loaner/web_app/backend/api/shelf_api_test.py | Bottom-Feeders/GrabNGO | 5a467362e423700a5a7276a7fa9a47040033cfcf | [
"Apache-2.0"
] | null | null | null | loaner/web_app/backend/api/shelf_api_test.py | Bottom-Feeders/GrabNGO | 5a467362e423700a5a7276a7fa9a47040033cfcf | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for backend.api.shelf_api."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import mock
from protorpc import message_types
from google.appengine.api import search
import endpoints
from loaner.web_app.backend.api import root_api # pylint: disable=unused-import
from loaner.web_app.backend.api import shelf_api
from loaner.web_app.backend.api.messages import shared_messages
from loaner.web_app.backend.api.messages import shelf_messages
from loaner.web_app.backend.models import device_model
from loaner.web_app.backend.models import shelf_model # pylint: disable=unused-import
from loaner.web_app.backend.testing import loanertest
def test_get_shelf_urlsafe_key(self):
"""Test getting a shelf using the urlsafe key."""
request = shelf_messages.ShelfRequest(urlsafe_key=self.shelf.key.urlsafe())
shelf = shelf_api.get_shelf(request)
self.assertEqual(shelf, self.shelf)
def test_get_shelf_using_location(self):
"""Test getting a shelf using the location."""
request = shelf_messages.ShelfRequest(location=self.shelf.location)
shelf = shelf_api.get_shelf(request)
self.assertEqual(shelf, self.shelf)
def test_get_shelf_using_location_error(self):
"""Test getting a shelf with an invalid location."""
request = shelf_messages.ShelfRequest(location='Not_Valid')
with self.assertRaisesRegexp(
endpoints.NotFoundException,
shelf_api._SHELF_DOES_NOT_EXIST_MSG % request.location):
shelf_api.get_shelf(request)
if __name__ == '__main__':
loanertest.main()
| 41.905085 | 86 | 0.729494 |
beb9ee31926225eb2b3cd87871300007116d1d11 | 2,177 | py | Python | app/views/main.py | ArmandDS/ai_bert_resumes | 743f37049bbca67bcbbaf21a2ffecf0d093351df | [
"MIT"
] | 1 | 2020-02-28T12:17:38.000Z | 2020-02-28T12:17:38.000Z | app/views/main.py | ArmandDS/ai_bert_resumes | 743f37049bbca67bcbbaf21a2ffecf0d093351df | [
"MIT"
] | 1 | 2021-06-02T00:54:48.000Z | 2021-06-02T00:54:48.000Z | app/views/main.py | ArmandDS/ai_bert_resumes | 743f37049bbca67bcbbaf21a2ffecf0d093351df | [
"MIT"
] | null | null | null | from flask import render_template, jsonify, Flask, redirect, url_for, request
from app import app
import random
import os
# import tensorflow as tf
# import numpy as np
# import sys
# import spacy
# nlp = spacy.load('en')
# sys.path.insert(0, "/content/bert_experimental")
# from bert_experimental.finetuning.text_preprocessing import build_preprocessor
# from bert_experimental.finetuning.graph_ops import load_graph
# restored_graph = load_graph("models/frozen_graph.pb")
# graph_ops = restored_graph.get_operations()
# input_op, output_op = graph_ops[0].name, graph_ops[-1].name
# x = restored_graph.get_tensor_by_name(input_op + ':0')
# y = restored_graph.get_tensor_by_name(output_op + ':0')
# preprocessor = build_preprocessor("./uncased_L-12_H-768_A-12/vocab.txt", 256)
# py_func = tf.numpy_function(preprocessor, [x], [tf.int32, tf.int32, tf.int32], name='preprocessor')
# py_func = tf.numpy_function(preprocessor, [x], [tf.int32, tf.int32, tf.int32])
# sess = tf.Session(graph=restored_graph)
# delimiter = " ||| "
| 33.492308 | 117 | 0.686266 |
bebb042aa5530a31d011f0dddb5b720502bac701 | 11,710 | py | Python | ahrs/filters/complementary.py | jaluebbe/ahrs | 4b4a33b1006e0d455a71ac8379a2697202361758 | [
"MIT"
] | null | null | null | ahrs/filters/complementary.py | jaluebbe/ahrs | 4b4a33b1006e0d455a71ac8379a2697202361758 | [
"MIT"
] | null | null | null | ahrs/filters/complementary.py | jaluebbe/ahrs | 4b4a33b1006e0d455a71ac8379a2697202361758 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Complementary Filter
====================
Attitude quaternion obtained with gyroscope and accelerometer-magnetometer
measurements, via complementary filter.
First, the current orientation is estimated at time :math:`t`, from a previous
orientation at time :math:`t-1`, and a given angular velocity,
:math:`\\omega`, in rad/s.
This orientation is computed by numerically integrating the angular velocity
and adding it to the previous orientation, which is known as an **attitude
propagation**.
.. math::
\\begin{array}{rcl}
\\mathbf{q}_\\omega &=& \\Big(\\mathbf{I}_4 + \\frac{\\Delta t}{2}\\boldsymbol\\Omega_t\\Big)\\mathbf{q}_{t-1} \\\\
&=&
\\begin{bmatrix}
1 & -\\frac{\\Delta t}{2}\\omega_x & -\\frac{\\Delta t}{2}\\omega_y & -\\frac{\\Delta t}{2}\\omega_z \\\\
\\frac{\\Delta t}{2}\\omega_x & 1 & \\frac{\\Delta t}{2}\\omega_z & -\\frac{\\Delta t}{2}\\omega_y \\\\
\\frac{\\Delta t}{2}\\omega_y & -\\frac{\\Delta t}{2}\\omega_z & 1 & \\frac{\\Delta t}{2}\\omega_x \\\\
\\frac{\\Delta t}{2}\\omega_z & \\frac{\\Delta t}{2}\\omega_y & -\\frac{\\Delta t}{2}\\omega_x & 1
\\end{bmatrix}
\\begin{bmatrix}q_w \\\\ q_x \\\\ q_y \\\\ q_z \\end{bmatrix} \\\\
&=&
\\begin{bmatrix}
q_w - \\frac{\\Delta t}{2} \\omega_x q_x - \\frac{\\Delta t}{2} \\omega_y q_y - \\frac{\\Delta t}{2} \\omega_z q_z\\\\
q_x + \\frac{\\Delta t}{2} \\omega_x q_w - \\frac{\\Delta t}{2} \\omega_y q_z + \\frac{\\Delta t}{2} \\omega_z q_y\\\\
q_y + \\frac{\\Delta t}{2} \\omega_x q_z + \\frac{\\Delta t}{2} \\omega_y q_w - \\frac{\\Delta t}{2} \\omega_z q_x\\\\
q_z - \\frac{\\Delta t}{2} \\omega_x q_y + \\frac{\\Delta t}{2} \\omega_y q_x + \\frac{\\Delta t}{2} \\omega_z q_w
\\end{bmatrix}
\\end{array}
Secondly, the *tilt* is computed from the accelerometer measurements as:
.. math::
\\begin{array}{rcl}
\\theta &=& \\mathrm{arctan2}(a_y, a_z) \\\\
\\phi &=& \\mathrm{arctan2}\\big(-a_x, \\sqrt{a_y^2+a_z^2}\\big)
\\end{array}
Only the pitch, :math:`\\phi`, and roll, :math:`\\theta`, angles are computed,
leaving the yaw angle, :math:`\\psi` equal to zero.
If a magnetometer sample is available, the yaw angle can be computed. First
compensate the measurement using the *tilt*:
.. math::
\\begin{array}{rcl}
\\mathbf{b} &=&
\\begin{bmatrix}
\\cos\\theta & \\sin\\theta\\sin\\phi & \\sin\\theta\\cos\\phi \\\\
0 & \\cos\\phi & -\\sin\\phi \\\\
-\\sin\\theta & \\cos\\theta\\sin\\phi & \\cos\\theta\\cos\\phi
\\end{bmatrix}
\\begin{bmatrix}m_x \\\\ m_y \\\\ m_z\\end{bmatrix} \\\\
\\begin{bmatrix}b_x \\\\ b_y \\\\ b_z\\end{bmatrix} &=&
\\begin{bmatrix}
m_x\\cos\\theta + m_y\\sin\\theta\\sin\\phi + m_z\\sin\\theta\\cos\\phi \\\\
m_y\\cos\\phi - m_z\\sin\\phi \\\\
-m_x\\sin\\theta + m_y\\cos\\theta\\sin\\phi + m_z\\cos\\theta\\cos\\phi
\\end{bmatrix}
\\end{array}
Then, the yaw angle, :math:`\\psi`, is obtained as:
.. math::
\\begin{array}{rcl}
\\psi &=& \\mathrm{arctan2}(-b_y, b_x) \\\\
&=& \\mathrm{arctan2}\\big(m_z\\sin\\phi - m_y\\cos\\phi, \\; m_x\\cos\\theta + \\sin\\theta(m_y\\sin\\phi + m_z\\cos\\phi)\\big)
\\end{array}
We transform the roll-pitch-yaw angles to a quaternion representation:
.. math::
\\mathbf{q}_{am} =
\\begin{pmatrix}q_w\\\\q_x\\\\q_y\\\\q_z\\end{pmatrix} =
\\begin{pmatrix}
\\cos\\Big(\\frac{\\phi}{2}\\Big)\\cos\\Big(\\frac{\\theta}{2}\\Big)\\cos\\Big(\\frac{\\psi}{2}\\Big) + \\sin\\Big(\\frac{\\phi}{2}\\Big)\\sin\\Big(\\frac{\\theta}{2}\\Big)\\sin\\Big(\\frac{\\psi}{2}\\Big) \\\\
\\sin\\Big(\\frac{\\phi}{2}\\Big)\\cos\\Big(\\frac{\\theta}{2}\\Big)\\cos\\Big(\\frac{\\psi}{2}\\Big) - \\cos\\Big(\\frac{\\phi}{2}\\Big)\\sin\\Big(\\frac{\\theta}{2}\\Big)\\sin\\Big(\\frac{\\psi}{2}\\Big) \\\\
\\cos\\Big(\\frac{\\phi}{2}\\Big)\\sin\\Big(\\frac{\\theta}{2}\\Big)\\cos\\Big(\\frac{\\psi}{2}\\Big) + \\sin\\Big(\\frac{\\phi}{2}\\Big)\\cos\\Big(\\frac{\\theta}{2}\\Big)\\sin\\Big(\\frac{\\psi}{2}\\Big) \\\\
\\cos\\Big(\\frac{\\phi}{2}\\Big)\\cos\\Big(\\frac{\\theta}{2}\\Big)\\sin\\Big(\\frac{\\psi}{2}\\Big) - \\sin\\Big(\\frac{\\phi}{2}\\Big)\\sin\\Big(\\frac{\\theta}{2}\\Big)\\cos\\Big(\\frac{\\psi}{2}\\Big)
\\end{pmatrix}
Finally, after each orientation is estimated independently, they are fused with
the complementary filter.
.. math::
\\mathbf{q} = (1 - \\alpha) \\mathbf{q}_\\omega + \\alpha\\mathbf{q}_{am}
where :math:`\\mathbf{q}_\\omega` is the attitude estimated from the gyroscope,
:math:`\\mathbf{q}_{am}` is the attitude estimated from the accelerometer and
the magnetometer, and :math:`\\alpha` is the gain of the filter.
The filter gain must be a floating value within the range :math:`[0.0, 1.0]`.
It can be seen that when :math:`\\alpha=1`, the attitude is estimated entirely
with the accelerometer and the magnetometer. When :math:`\\alpha=0`, it is
estimated solely with the gyroscope. The values within the range decide how
much of each estimation is "blended" into the quaternion.
This is actually a simple implementation of `LERP
<https://en.wikipedia.org/wiki/Linear_interpolation>`_ commonly used to
linearly interpolate quaternions with small differences between them.
"""
import numpy as np
from ..common.orientation import ecompass
| 41.232394 | 219 | 0.566695 |
bebb3991fe53855d056c3141b393c1defb50e7e5 | 4,197 | py | Python | aliyun-python-sdk-ehpc/aliyunsdkehpc/request/v20180412/EditJobTemplateRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-ehpc/aliyunsdkehpc/request/v20180412/EditJobTemplateRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-ehpc/aliyunsdkehpc/request/v20180412/EditJobTemplateRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkehpc.endpoint import endpoint_data | 29.978571 | 74 | 0.751013 |
bebb7eff935771339795abf6b86ab3ed10b32cc3 | 3,550 | py | Python | tests/common/models/test_execution.py | angry-tony/ceph-lcm-decapod | 535944d3ee384c3a7c4af82f74041b0a7792433f | [
"Apache-2.0"
] | 41 | 2016-11-03T16:40:17.000Z | 2019-05-23T08:39:17.000Z | tests/common/models/test_execution.py | Mirantis/ceph-lcm | fad9bad0b94f2ef608362953583b10a54a841d24 | [
"Apache-2.0"
] | 30 | 2016-10-14T10:54:46.000Z | 2017-10-20T15:58:01.000Z | tests/common/models/test_execution.py | angry-tony/ceph-lcm-decapod | 535944d3ee384c3a7c4af82f74041b0a7792433f | [
"Apache-2.0"
] | 28 | 2016-09-17T01:17:36.000Z | 2019-07-05T03:32:54.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for decapod_common.models.execution."""
import pytest
from decapod_common.models import execution
def test_getting_logfile(new_execution, execution_log_storage):
new_execution.logfile
execution_log_storage.get.assert_called_once_with(new_execution.model_id)
def test_create_logfile(new_execution, execution_log_storage):
new_execution.new_logfile.write("1")
execution_log_storage.delete.assert_called_once_with(
new_execution.model_id
)
execution_log_storage.new_file.assert_called_once_with(
new_execution.model_id,
filename="{0}.log".format(new_execution.model_id),
content_type="text/plain"
)
execution_log_storage.new_file().write.assert_called_once_with("1")
| 33.809524 | 77 | 0.719155 |
bebc4c58538a85c2ad00b34ebacde9538e3d0d9b | 1,613 | py | Python | board/models.py | Fahreeve/TaskManager | 7f0a16312b43867270eaade1fe153c07abc2c10e | [
"MIT"
] | null | null | null | board/models.py | Fahreeve/TaskManager | 7f0a16312b43867270eaade1fe153c07abc2c10e | [
"MIT"
] | null | null | null | board/models.py | Fahreeve/TaskManager | 7f0a16312b43867270eaade1fe153c07abc2c10e | [
"MIT"
] | 1 | 2020-09-15T09:15:13.000Z | 2020-09-15T09:15:13.000Z | from django.contrib.auth.models import User
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.utils.translation import ugettext_lazy as _
| 37.511628 | 118 | 0.695598 |
bebe436d87bb3f3a76cbb71e91dc6e70bb5b2e46 | 475 | py | Python | test/test_hex_line.py | bicobus/Hexy | e75d58e66546c278fb648af85e3f9dae53127826 | [
"MIT"
] | 72 | 2017-08-30T03:02:51.000Z | 2022-03-11T23:15:15.000Z | test/test_hex_line.py | bicobus/Hexy | e75d58e66546c278fb648af85e3f9dae53127826 | [
"MIT"
] | 10 | 2019-03-14T08:04:33.000Z | 2021-08-10T09:36:45.000Z | test/test_hex_line.py | bicobus/Hexy | e75d58e66546c278fb648af85e3f9dae53127826 | [
"MIT"
] | 15 | 2017-11-08T05:37:06.000Z | 2021-08-05T19:16:48.000Z | import numpy as np
import hexy as hx
if __name__ == "__main__":
test_get_hex_line()
| 21.590909 | 38 | 0.471579 |
bebe5670df71295bc98ec96c4bde4a3c31d4fb66 | 6,747 | py | Python | wofry/propagator/propagators2D/integral.py | PaNOSC-ViNYL/wofry | 779b5a738ee7738e959a58aafe01e7e49b03894a | [
"MIT"
] | null | null | null | wofry/propagator/propagators2D/integral.py | PaNOSC-ViNYL/wofry | 779b5a738ee7738e959a58aafe01e7e49b03894a | [
"MIT"
] | 1 | 2021-02-16T12:12:10.000Z | 2021-02-16T12:12:10.000Z | wofryimpl/propagator/propagators2D/integral.py | oasys-kit/wofryimpl | f300b714b038110987783c40d2c3af8dca7e54eb | [
"MIT"
] | null | null | null | # propagate_2D_integral: Simplification of the Kirchhoff-Fresnel integral. TODO: Very slow and give some problems
import numpy
from wofry.propagator.wavefront2D.generic_wavefront import GenericWavefront2D
from wofry.propagator.propagator import Propagator2D
# TODO: check resulting amplitude normalization (fft and srw likely agree, convolution gives too high amplitudes, so needs normalization)
| 49.977778 | 156 | 0.621165 |
bebf0f2b55c9070eb2aa8dd30568a2e408a3e498 | 842 | py | Python | Problems/Study Plans/Dynamic Programming/Dynamic Programming I/07_delete_and_earn.py | andor2718/LeetCode | 59874f49085818e6da751f1cc26867b31079d35d | [
"BSD-3-Clause"
] | 1 | 2022-01-17T19:51:15.000Z | 2022-01-17T19:51:15.000Z | Problems/Study Plans/Dynamic Programming/Dynamic Programming I/07_delete_and_earn.py | andor2718/LeetCode | 59874f49085818e6da751f1cc26867b31079d35d | [
"BSD-3-Clause"
] | null | null | null | Problems/Study Plans/Dynamic Programming/Dynamic Programming I/07_delete_and_earn.py | andor2718/LeetCode | 59874f49085818e6da751f1cc26867b31079d35d | [
"BSD-3-Clause"
] | null | null | null | # https://leetcode.com/problems/delete-and-earn/
| 42.1 | 76 | 0.609264 |
bebf8ceeebe9e29c2c913232279dc6462e901f90 | 334 | py | Python | Desafio051.py | GabrielSanchesRosa/Python | 3a129e27e076b2a91af03d68ede50b9c45c50217 | [
"MIT"
] | null | null | null | Desafio051.py | GabrielSanchesRosa/Python | 3a129e27e076b2a91af03d68ede50b9c45c50217 | [
"MIT"
] | null | null | null | Desafio051.py | GabrielSanchesRosa/Python | 3a129e27e076b2a91af03d68ede50b9c45c50217 | [
"MIT"
] | null | null | null | # Desenvolva um programa que leia o primeiro termo e a razo de uma PA. No final mostre, os 10 primeiros termos dessa prograsso.
primeiro = int(input("Primeiro Termo: "))
razao = int(input("Razo: "))
decimo = primeiro + (10 - 1) * razao
for c in range(primeiro, decimo + razao, razao):
print(f"{c}", end=" -> ")
print("Acabou")
| 33.4 | 129 | 0.679641 |
bebfe36afc8a169020e2b3f2d6602873133b4e74 | 884 | py | Python | tiddlyweb/filters/limit.py | tiddlyweb/tiddlyweb | 376bcad280e24d2de4d74883dc4d8369abcb2c28 | [
"BSD-3-Clause"
] | 57 | 2015-02-01T21:03:34.000Z | 2021-12-25T12:02:31.000Z | tiddlyweb/filters/limit.py | tiddlyweb/tiddlyweb | 376bcad280e24d2de4d74883dc4d8369abcb2c28 | [
"BSD-3-Clause"
] | 6 | 2016-02-05T11:43:32.000Z | 2019-09-05T13:38:49.000Z | tiddlyweb/filters/limit.py | tiddlyweb/tiddlyweb | 376bcad280e24d2de4d74883dc4d8369abcb2c28 | [
"BSD-3-Clause"
] | 17 | 2015-05-12T08:53:23.000Z | 2021-12-21T15:56:30.000Z | """
A :py:mod:`filter <tiddlyweb.filters>` type to limit a group of entities
using a syntax similar to SQL Limit::
limit=<index>,<count>
limit=<count>
"""
import itertools
def limit_parse(count='0'):
"""
Parse the argument of a ``limit`` :py:mod:`filter <tiddlyweb.filters>`
for a count and index argument, return a function which does the limiting.
Exceptions while parsing are passed up the stack.
"""
index = '0'
if ',' in count:
index, count = count.split(',', 1)
index = int(index)
count = int(count)
return limiter
def limit(entities, count=0, index=0):
"""
Make a slice of a list of entities based on a count and index.
"""
return itertools.islice(entities, index, index + count)
| 23.891892 | 78 | 0.64819 |
bec160209ec5f54fda5f5e3628b149076a57302b | 7,019 | py | Python | pytorch_keras_converter/API.py | sonibla/pytorch_keras_converter | 21925b67b6eb3cbbfa8eb6d33f682d57dafd357d | [
"MIT"
] | 17 | 2019-10-01T14:14:18.000Z | 2021-04-25T13:32:24.000Z | pytorch_keras_converter/API.py | sonibla/pytorch_keras_converter | 21925b67b6eb3cbbfa8eb6d33f682d57dafd357d | [
"MIT"
] | null | null | null | pytorch_keras_converter/API.py | sonibla/pytorch_keras_converter | 21925b67b6eb3cbbfa8eb6d33f682d57dafd357d | [
"MIT"
] | 2 | 2019-10-01T14:02:43.000Z | 2019-10-01T14:14:19.000Z | """
Simple API to convert models between PyTorch and Keras
(Conversions from Keras to PyTorch aren't implemented)
"""
from . import utility
from . import tests
from . import io_utils as utils
import tensorflow
def convert(model,
input_shape,
weights=True,
quiet=True,
ignore_tests=False,
input_range=None,
save=None,
filename=None,
directory=None):
"""
Conversion between PyTorch and Keras
(Conversions from Keras to PyTorch aren't implemented)
Arguments:
-model:
A Keras or PyTorch model or layer to convert
-input_shape:
Input shape (list, tuple or int), without batchsize.
-weights (bool):
Also convert weights. If set to false, only convert model
architecture
-quiet (bool):
If a progress bar and some messages should appear
-ignore_tests (bool):
If tests should be ignored.
If set to True, converted model will
still be tested by security. If models are not identical, it will
only print a warning.
If set to False, and models are not identical, RuntimeWarning will
be raised
If weights is False, tests are automatically ignored
-input_range:
Optionnal.
A list of 2 elements containing max and min values to give as
input to the model when performing the tests. If None, models will
be tested on samples from the "standard normal" distribution.
-save:
If model should be exported to a hdf5 file.
-filename:
Filename to give to model's hdf5 file. If filename is not None and
save is not False, then save will automatically be set to True
-directory:
Where to save model's hdf5 file. If directory is not None and
save is not False, then save will automatically be set to True
Raises:
-RuntimeWarning:
If converted and original model aren't identical, and ignore_tests
is False
Returns:
If model has been exported to a file, it will return the name of the
file
Else, it returns the converted model
"""
if (filename is not None or directory is not None) and save is None:
save = True
if save is None:
save = False
if weights == False:
ignore_tests = True
if not quiet:
print('\nConversion...')
# Converting:
newModel = utility.convert(model=utility.LayerRepresentation(model),
input_size=input_shape,
weights=weights,
quiet=quiet)
# Actually, newModel is a LayerRepresentation object
# Equivalents:
torchModel = newModel.equivalent['torch']
kerasModel = newModel.equivalent['keras']
if not quiet:
print('Automatically testing converted model reliability...\n')
# Checking converted model reliability
tested = False
try:
meanSquaredError = tests.comparison(model1=torchModel,
model2=kerasModel,
input_shape=input_shape,
input_range=input_range,
quiet=quiet)
tested = True
except tensorflow.errors.InvalidArgumentError:
print("Warning: tests unavailable!")
if tested and meanSquaredError > 0.0001:
if ignore_tests:
print("Warning: converted and original models aren't identical !\
(mean squared error: {})".format(meanSquaredError))
else:
raise RuntimeWarning("Original and converted model do not match !\
\nOn random input data, outputs showed a mean squared error of {} (if should \
be below 1e-10)".format(meanSquaredError))
elif not quiet and tested:
print('\n Original and converted models match !\nMean squared err\
or : {}'.format(meanSquaredError))
if save:
if not quiet:
print('Saving model...')
defaultName = 'conversion_{}'.format(newModel.name)
if filename is None:
filename = defaultName
# Formatting filename so that we don't overwrite any existing file
file = utils.formatFilename(filename,
directory)
# Freezing Keras model (trainable = False everywhere)
utils.freeze(kerasModel)
# Save the entire model
kerasModel.save(file + '.h5')
if not quiet:
print('Done !')
return file + '.h5'
if not quiet:
print('Done !')
return kerasModel
def convert_and_save(model,
input_shape,
weights=True,
quiet=True,
ignore_tests=False,
input_range=None,
filename=None,
directory=None):
"""
Conversion between PyTorch and Keras, and automatic save
(Conversions from Keras to PyTorch aren't implemented)
Arguments:
-model:
A Keras or PyTorch model or layer to convert
-input_shape:
Input shape (list, tuple or int), without batchsize.
-weights (bool):
Also convert weights. If set to false, only convert model
architecture
-quiet (bool):
If a progress bar and some messages should appear
-ignore_tests (bool):
If tests should be ignored.
If set to True, converted model will
still be tested by security. If models are not identical, it will
only print a warning.
If set to False, and models are not identical, RuntimeWarning will
be raised
If weights is False, tests are automatically ignored
-input_range:
Optionnal.
A list of 2 elements containing max and min values to give as
input to the model when performing the tests. If None, models will
be tested on samples from the "standard normal" distribution.
-filename:
Filename to give to model's hdf5 file. If filename is not None and
save is not False, then save will automatically be set to True
-directory:
Where to save model's hdf5 file. If directory is not None and
save is not False, then save will automatically be set to True
Returns:
Name of created hdf5 file
"""
return convert(model=model,
input_shape=input_shape,
weights=weights,
quiet=quiet,
ignore_tests=ignore_tests,
input_range=input_range,
save=True,
filename=filename,
directory=directory)
| 33.42381 | 78 | 0.581992 |
bec1a22fa74c5c4f594a0551d336d70522ed93f7 | 1,734 | py | Python | examples/enable_notifications.py | kjwill/bleak | 7e0fdae6c0f6a78713e5984c2840666e0c38c3f3 | [
"MIT"
] | null | null | null | examples/enable_notifications.py | kjwill/bleak | 7e0fdae6c0f6a78713e5984c2840666e0c38c3f3 | [
"MIT"
] | null | null | null | examples/enable_notifications.py | kjwill/bleak | 7e0fdae6c0f6a78713e5984c2840666e0c38c3f3 | [
"MIT"
] | 1 | 2021-09-15T18:53:58.000Z | 2021-09-15T18:53:58.000Z | # -*- coding: utf-8 -*-
"""
Notifications
-------------
Example showing how to add notifications to a characteristic and handle the responses.
Updated on 2019-07-03 by hbldh <henrik.blidh@gmail.com>
"""
import sys
import logging
import asyncio
import platform
from bleak import BleakClient
from bleak import _logger as logger
CHARACTERISTIC_UUID = "f000aa65-0451-4000-b000-000000000000" # <--- Change to the characteristic you want to enable notifications from.
ADDRESS = (
"24:71:89:cc:09:05" # <--- Change to your device's address here if you are using Windows or Linux
if platform.system() != "Darwin"
else "B9EA5233-37EF-4DD6-87A8-2A875E821C46" # <--- Change to your device's address here if you are using macOS
)
if len(sys.argv) == 3:
ADDRESS = sys.argv[1]
CHARACTERISTIC_UUID = sys.argv[2]
def notification_handler(sender, data):
"""Simple notification handler which prints the data received."""
print("{0}: {1}".format(sender, data))
if __name__ == "__main__":
import os
os.environ["PYTHONASYNCIODEBUG"] = str(1)
loop = asyncio.get_event_loop()
# loop.set_debug(True)
loop.run_until_complete(run(ADDRESS, True))
| 27.52381 | 136 | 0.689158 |
bec28b12230a8be61261eca269a7854ba31ae9da | 820 | py | Python | src/15 listener_and_backdoor/listener_2.py | raminjafary/ethical-hacking | e76f74f4f23e1d8cb7f433d19871dcf966507dfc | [
"MIT"
] | null | null | null | src/15 listener_and_backdoor/listener_2.py | raminjafary/ethical-hacking | e76f74f4f23e1d8cb7f433d19871dcf966507dfc | [
"MIT"
] | null | null | null | src/15 listener_and_backdoor/listener_2.py | raminjafary/ethical-hacking | e76f74f4f23e1d8cb7f433d19871dcf966507dfc | [
"MIT"
] | null | null | null | #!/usr/bin/python
import socket
my_listener = Listener("localhost",1234)
my_listener.run() | 25.625 | 70 | 0.734146 |
bec37dd307106b82f4f0bcaf14227eb2f2a4ba93 | 1,974 | py | Python | dialogflow/history2xls.py | ray-hrst/temi-tools | 8efb1e1af93a41bd98fe0ee8c1fd6fb44e788341 | [
"MIT"
] | 1 | 2020-06-04T19:30:57.000Z | 2020-06-04T19:30:57.000Z | dialogflow/history2xls.py | ray-hrst/temi-tools | 8efb1e1af93a41bd98fe0ee8c1fd6fb44e788341 | [
"MIT"
] | 1 | 2020-01-14T04:16:12.000Z | 2020-01-14T04:16:12.000Z | dialogflow/history2xls.py | ray-hrst/temi-tools | 8efb1e1af93a41bd98fe0ee8c1fd6fb44e788341 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Convert Dialogflow history to spreadsheet
User must manually copy the history from the browser and save this in a text file.
This reads the textfile, parses the data, and saves it to a spreadsheet.
Example training sample:
USER
Nov 4, 11:19 PM
AGENT
No matched intent
Nov 4, 11:19 PM
more_vert
"""
import argparse
import os
from simple_report import SimpleReport
# constants
FIELDS = ["Date", "User", "Agent"]
if __name__ == "__main__":
# collect arguments
PARSER = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
PARSER.add_argument("filename", help="History text file")
ARGS = PARSER.parse_args()
# generate report
filename, file_extension = os.path.splitext(ARGS.filename)
REPORT = SimpleReport(filename, FIELDS)
# step each line of history text file
with open(ARGS.filename, 'r') as fp:
num_lines = sum(1 for line in open(ARGS.filename))
rows = int(num_lines / 7)
print("Reading {} lines of text.".format(num_lines))
print("Writing {} rows.".format(rows))
for row in range(1, rows):
user_utterance = fp.readline().strip() # USER UTTERANCE
date = fp.readline().strip() # DATE
agent_intent = fp.readline().strip() # AGENT INTENT
date = fp.readline().strip() # DATE
_ = fp.readline().strip() # 'more_vert'
utterance = user_utterance.split("USER", 1)[1]
intent = agent_intent.split("AGENT", 1)[1]
if not intent:
intent = "Intent found"
print("[{}] {} {} {}".format(row, date, utterance, intent))
# add row to report
REPORT.add("Date", row, date, date)
REPORT.add("User", row, utterance)
REPORT.add("Agent", row, intent)
REPORT.close()
| 27.802817 | 82 | 0.609422 |
bec393e2a78f7bd8032716c650f164a7178aab68 | 25,176 | py | Python | recognition/views.py | usathe71-u/Attendance-System-Face-Recognition | c73f660a6089e8ca9dd5c473efcf2bc78f13a207 | [
"Apache-2.0"
] | 3 | 2021-05-31T21:11:38.000Z | 2021-07-22T18:29:47.000Z | recognition/views.py | usathe71-u/Attendance-System-Face-Recognition | c73f660a6089e8ca9dd5c473efcf2bc78f13a207 | [
"Apache-2.0"
] | null | null | null | recognition/views.py | usathe71-u/Attendance-System-Face-Recognition | c73f660a6089e8ca9dd5c473efcf2bc78f13a207 | [
"Apache-2.0"
] | null | null | null | from django.shortcuts import render,redirect
from .forms import usernameForm,DateForm,UsernameAndDateForm, DateForm_2
from django.contrib import messages
from django.contrib.auth.models import User
import cv2
import dlib
import imutils
from imutils import face_utils
from imutils.video import VideoStream
from imutils.face_utils import rect_to_bb
from imutils.face_utils import FaceAligner
import time
from attendance_system_facial_recognition.settings import BASE_DIR
import os
import face_recognition
from face_recognition.face_recognition_cli import image_files_in_folder
import pickle
from sklearn.preprocessing import LabelEncoder
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
import numpy as np
from django.contrib.auth.decorators import login_required
import matplotlib as mpl
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import datetime
from django_pandas.io import read_frame
from users.models import Present, Time
import seaborn as sns
import pandas as pd
from django.db.models import Count
#import mpld3
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
from matplotlib import rcParams
import math
mpl.use('Agg')
#utility functions:
#used
#used
#used
#used
# Create your views here.
def mark_your_attendance(request):
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('face_recognition_data/shape_predictor_68_face_landmarks.dat') #Add path to the shape predictor ######CHANGE TO RELATIVE PATH LATER
svc_save_path="face_recognition_data/svc.sav"
with open(svc_save_path, 'rb') as f:
svc = pickle.load(f)
fa = FaceAligner(predictor , desiredFaceWidth = 96)
encoder=LabelEncoder()
encoder.classes_ = np.load('face_recognition_data/classes.npy')
faces_encodings = np.zeros((1,128))
no_of_faces = len(svc.predict_proba(faces_encodings)[0])
count = dict()
present = dict()
log_time = dict()
start = dict()
for i in range(no_of_faces):
count[encoder.inverse_transform([i])[0]] = 0
present[encoder.inverse_transform([i])[0]] = False
vs = VideoStream(src=0).start()
sampleNum = 0
while(True):
frame = vs.read()
frame = imutils.resize(frame ,width = 800)
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector(gray_frame,0)
for face in faces:
print("INFO : inside for loop")
(x,y,w,h) = face_utils.rect_to_bb(face)
face_aligned = fa.align(frame,gray_frame,face)
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),1)
(pred,prob)=predict(face_aligned,svc)
if(pred!=[-1]):
person_name=encoder.inverse_transform(np.ravel([pred]))[0]
pred=person_name
if count[pred] == 0:
start[pred] = time.time()
count[pred] = count.get(pred,0) + 1
if count[pred] == 4 and (time.time()-start[pred]) > 1.2:
count[pred] = 0
else:
#if count[pred] == 4 and (time.time()-start) <= 1.5:
present[pred] = True
log_time[pred] = datetime.datetime.now()
count[pred] = count.get(pred,0) + 1
print(pred, present[pred], count[pred])
cv2.putText(frame, str(person_name)+ str(prob), (x+6,y+h-6), cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1)
else:
person_name="unknown"
cv2.putText(frame, str(person_name), (x+6,y+h-6), cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1)
#cv2.putText()
# Before continuing to the next loop, I want to give it a little pause
# waitKey of 100 millisecond
#cv2.waitKey(50)
#Showing the image in another window
#Creates a window with window name "Face" and with the image img
cv2.imshow("Mark Attendance - In - Press q to exit",frame)
#Before closing it we need to give a wait command, otherwise the open cv wont work
# @params with the millisecond of delay 1
#cv2.waitKey(1)
#To get out of the loop
key=cv2.waitKey(50) & 0xFF
if(key==ord("q")):
break
#Stoping the videostream
vs.stop()
# destroying all the windows
cv2.destroyAllWindows()
update_attendance_in_db_in(present)
return redirect('home')
def mark_your_attendance_out(request):
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('face_recognition_data/shape_predictor_68_face_landmarks.dat') #Add path to the shape predictor ######CHANGE TO RELATIVE PATH LATER
svc_save_path="face_recognition_data/svc.sav"
with open(svc_save_path, 'rb') as f:
svc = pickle.load(f)
fa = FaceAligner(predictor , desiredFaceWidth = 96)
encoder=LabelEncoder()
encoder.classes_ = np.load('face_recognition_data/classes.npy')
faces_encodings = np.zeros((1,128))
no_of_faces = len(svc.predict_proba(faces_encodings)[0])
count = dict()
present = dict()
log_time = dict()
start = dict()
for i in range(no_of_faces):
count[encoder.inverse_transform([i])[0]] = 0
present[encoder.inverse_transform([i])[0]] = False
vs = VideoStream(src=0).start()
sampleNum = 0
while(True):
frame = vs.read()
frame = imutils.resize(frame ,width = 800)
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector(gray_frame,0)
for face in faces:
print("INFO : inside for loop")
(x,y,w,h) = face_utils.rect_to_bb(face)
face_aligned = fa.align(frame,gray_frame,face)
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),1)
(pred,prob)=predict(face_aligned,svc)
if(pred!=[-1]):
person_name=encoder.inverse_transform(np.ravel([pred]))[0]
pred=person_name
if count[pred] == 0:
start[pred] = time.time()
count[pred] = count.get(pred,0) + 1
if count[pred] == 4 and (time.time()-start[pred]) > 1.5:
count[pred] = 0
else:
#if count[pred] == 4 and (time.time()-start) <= 1.5:
present[pred] = True
log_time[pred] = datetime.datetime.now()
count[pred] = count.get(pred,0) + 1
print(pred, present[pred], count[pred])
cv2.putText(frame, str(person_name)+ str(prob), (x+6,y+h-6), cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1)
else:
person_name="unknown"
cv2.putText(frame, str(person_name), (x+6,y+h-6), cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0),1)
#cv2.putText()
# Before continuing to the next loop, I want to give it a little pause
# waitKey of 100 millisecond
#cv2.waitKey(50)
#Showing the image in another window
#Creates a window with window name "Face" and with the image img
cv2.imshow("Mark Attendance- Out - Press q to exit",frame)
#Before closing it we need to give a wait command, otherwise the open cv wont work
# @params with the millisecond of delay 1
#cv2.waitKey(1)
#To get out of the loop
key=cv2.waitKey(50) & 0xFF
if(key==ord("q")):
break
#Stoping the videostream
vs.stop()
# destroying all the windows
cv2.destroyAllWindows()
update_attendance_in_db_out(present)
return redirect('home')
| 24.97619 | 167 | 0.722196 |
bec698afd2c5801e7a05fe6be1339638668af844 | 856 | py | Python | 2018/05.py | GillesArcas/Advent_of_Code | 1f57eb1686875df2684b0d56916b1d20724e9fb9 | [
"MIT"
] | null | null | null | 2018/05.py | GillesArcas/Advent_of_Code | 1f57eb1686875df2684b0d56916b1d20724e9fb9 | [
"MIT"
] | null | null | null | 2018/05.py | GillesArcas/Advent_of_Code | 1f57eb1686875df2684b0d56916b1d20724e9fb9 | [
"MIT"
] | null | null | null | import re
import string
DATA = '05.txt'
code1()
code2()
| 21.948718 | 108 | 0.53972 |
bec6becd26fa525cff31dffaad9d3ab5e8f46f15 | 11,873 | py | Python | lib/fbuild/builders/__init__.py | felix-lang/fbuild | 9595fbfd6d3ceece31fda2f96c35d4a241f0129b | [
"PSF-2.0",
"BSD-2-Clause"
] | 40 | 2015-02-07T00:44:12.000Z | 2021-04-02T13:41:08.000Z | lib/fbuild/builders/__init__.py | felix-lang/fbuild | 9595fbfd6d3ceece31fda2f96c35d4a241f0129b | [
"PSF-2.0",
"BSD-2-Clause"
] | 30 | 2015-02-06T17:45:15.000Z | 2019-01-10T16:34:29.000Z | lib/fbuild/builders/__init__.py | felix-lang/fbuild | 9595fbfd6d3ceece31fda2f96c35d4a241f0129b | [
"PSF-2.0",
"BSD-2-Clause"
] | 3 | 2015-09-03T06:38:02.000Z | 2019-10-24T14:26:57.000Z | import abc
import contextlib
import os
import sys
from functools import partial
from itertools import chain
import fbuild
import fbuild.db
import fbuild.path
import fbuild.temp
from . import platform
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def check_version(ctx, builder, version_function, *,
requires_version=None,
requires_at_least_version=None,
requires_at_most_version=None):
"""Helper function to simplify checking the version of a builder."""
if any(v is not None for v in (
requires_version,
requires_at_least_version,
requires_at_most_version)):
ctx.logger.check('checking %s version' % builder)
version_str = version_function()
# Convert the version into a tuple
version = []
for i in version_str.split('.'):
try:
version.append(int(i))
except ValueError:
# The subversion isn't a number, so just convert it to a
# string.
version.append(i)
version = tuple(version)
if requires_version is not None and requires_version != version:
msg = 'version %s required; found %s' % (
'.'.join(str(i) for i in requires_version), version_str)
ctx.logger.failed(msg)
raise fbuild.ConfigFailed(msg)
if requires_at_least_version is not None and \
requires_at_least_version > version:
msg = 'at least version %s required; found %s' % (
'.'.join(str(i) for i in requires_at_least_version),
version_str)
ctx.logger.failed(msg)
raise fbuild.ConfigFailed(msg)
if requires_at_most_version is not None and \
requires_at_most_version < version:
msg = 'at most version %s required; found %s' % (
'.'.join(str(i) for i in requires_at_most_version),
version_str)
ctx.logger.failed(msg)
raise fbuild.ConfigFailed(msg)
ctx.logger.passed(version_str)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def check_link_lib(self, code, msg, *args, **kwargs):
self.ctx.logger.check(msg)
if self.try_link_lib(code, *args, **kwargs):
self.ctx.logger.passed()
return True
else:
self.ctx.logger.failed()
return False
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
| 35.762048 | 80 | 0.553609 |
bec79a34dea2c5eb7b1cdd952dbf003070a952c4 | 1,746 | py | Python | WebServer.py | i3uex/CompareML | 3d53d58117507db11ad08ca0b1c883ec0997840e | [
"MIT"
] | null | null | null | WebServer.py | i3uex/CompareML | 3d53d58117507db11ad08ca0b1c883ec0997840e | [
"MIT"
] | null | null | null | WebServer.py | i3uex/CompareML | 3d53d58117507db11ad08ca0b1c883ec0997840e | [
"MIT"
] | null | null | null | import json
import cherrypy
import engine
| 30.103448 | 120 | 0.611684 |
bec81857d7e4af0801337540f4b978497c5536f9 | 1,897 | py | Python | tuprolog/solve/exception/error/existence/__init__.py | DavideEva/2ppy | 55609415102f8116165a42c8e33e029c4906e160 | [
"Apache-2.0"
] | 1 | 2021-08-07T06:29:28.000Z | 2021-08-07T06:29:28.000Z | tuprolog/solve/exception/error/existence/__init__.py | DavideEva/2ppy | 55609415102f8116165a42c8e33e029c4906e160 | [
"Apache-2.0"
] | 14 | 2021-09-16T13:25:12.000Z | 2022-01-03T10:12:22.000Z | tuprolog/solve/exception/error/existence/__init__.py | DavideEva/2ppy | 55609415102f8116165a42c8e33e029c4906e160 | [
"Apache-2.0"
] | 1 | 2021-12-22T00:25:32.000Z | 2021-12-22T00:25:32.000Z | from typing import Union
from tuprolog import logger
# noinspection PyUnresolvedReferences
import jpype.imports
# noinspection PyUnresolvedReferences
import it.unibo.tuprolog.solve.exception.error as errors
from tuprolog.core import Term, Atom
from tuprolog.solve import ExecutionContext, Signature
ExistenceError = errors.ExistenceError
ObjectType = ExistenceError.ObjectType
OBJECT_PROCEDURE = ObjectType.PROCEDURE
OBJECT_SOURCE_SINK = ObjectType.SOURCE_SINK
OBJECT_RESOURCE = ObjectType.RESOURCE
OBJECT_STREAM = ObjectType.STREAM
OBJECT_OOP_ALIAS = ObjectType.OOP_ALIAS
OBJECT_OOP_METHOD = ObjectType.OOP_METHOD
OBJECT_OOP_CONSTRUCTOR = ObjectType.OOP_CONSTRUCTOR
OBJECT_OOP_PROPERTY = ObjectType.OOP_PROPERTY
logger.debug("Loaded JVM classes from it.unibo.tuprolog.solve.exception.error.ExistenceError.*")
| 24.960526 | 96 | 0.765946 |
bec831a08a3c7355e5ebc6786562ec7da94bccbd | 2,421 | py | Python | cptk/core/fetcher.py | RealA10N/cptk | e500d948e91bb70661adc3c2539b149704c734a1 | [
"Apache-2.0"
] | 5 | 2021-12-25T01:49:45.000Z | 2022-03-27T10:30:14.000Z | cptk/core/fetcher.py | RealA10N/cptk | e500d948e91bb70661adc3c2539b149704c734a1 | [
"Apache-2.0"
] | 39 | 2021-12-24T16:35:07.000Z | 2022-03-18T23:15:14.000Z | cptk/core/fetcher.py | RealA10N/cptk | e500d948e91bb70661adc3c2539b149704c734a1 | [
"Apache-2.0"
] | 2 | 2022-01-12T19:13:20.000Z | 2022-01-12T19:32:05.000Z | from __future__ import annotations
from typing import TYPE_CHECKING
import pkg_resources
from bs4 import BeautifulSoup
from requests import session
from cptk.scrape import PageInfo
from cptk.scrape import Website
from cptk.utils import cptkException
if TYPE_CHECKING:
from cptk.scrape import Problem
| 31.441558 | 79 | 0.646014 |
bec8b40804691cfab7d99feee2707b808f11aaed | 15,006 | py | Python | machine_learning/deep_reinforcement_learning_grasping/drlgrasp/drlgrasp/pybullet_envs/kuka_reach_with_visual.py | Hinson-A/guyueclass | e59129526729542dccefa6c7232378a00dc0175a | [
"Apache-2.0"
] | 227 | 2021-01-20T05:34:32.000Z | 2022-03-29T12:43:05.000Z | machine_learning/deep_reinforcement_learning_grasping/drlgrasp/drlgrasp/pybullet_envs/kuka_reach_with_visual.py | passYYYY/guyueclass | 2054ccec2f5e6c002727a5561b494a1046484504 | [
"Apache-2.0"
] | 1 | 2021-04-22T05:56:00.000Z | 2021-05-26T06:00:17.000Z | machine_learning/deep_reinforcement_learning_grasping/drlgrasp/drlgrasp/pybullet_envs/kuka_reach_with_visual.py | passYYYY/guyueclass | 2054ccec2f5e6c002727a5561b494a1046484504 | [
"Apache-2.0"
] | 239 | 2021-01-28T02:59:53.000Z | 2022-03-29T08:02:17.000Z | import pybullet as p
import pybullet_data
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
from math import sqrt
import random
import time
import math
import cv2
import torch
import os
def random_crop(imgs, out):
"""
args:
imgs: shape (B,C,H,W)
out: output size (e.g. 84)
"""
n, c, h, w = imgs.shape
crop_max = h - out + 1
w1 = np.random.randint(0, crop_max, n)
h1 = np.random.randint(0, crop_max, n)
cropped = np.empty((n, c, out, out), dtype=imgs.dtype)
for i, (img, w11, h11) in enumerate(zip(imgs, w1, h1)):
cropped[i] = img[:, h11:h11 + out, w11:w11 + out]
return cropped
if __name__ == '__main__':
# baseline
import matplotlib.pyplot as plt
env = KukaReachVisualEnv(is_render=False)
env = CustomSkipFrame(env)
print(env.observation_space.shape)
print(env.action_space.shape)
print(env.action_space.n)
# for _ in range(20):
# action=env.action_space.sample()
# print(action)
# env.step(action)
#
# state = env.reset()
# print(state.shape)
# img = state[0][0]
# plt.imshow(img, cmap='gray')
# plt.show()
| 36.159036 | 121 | 0.53212 |
bec8bbb28dfc0d99421b26ef588fd15b586c1fe9 | 509 | py | Python | bucket_4C/python-Pillow/patches/patch-setup.py | jrmarino/ravensource | 91d599fd1f2af55270258d15e72c62774f36033e | [
"FTL"
] | 17 | 2017-04-22T21:53:52.000Z | 2021-01-21T16:57:55.000Z | bucket_4C/python-Pillow/patches/patch-setup.py | jrmarino/ravensource | 91d599fd1f2af55270258d15e72c62774f36033e | [
"FTL"
] | 186 | 2017-09-12T20:46:52.000Z | 2021-11-27T18:15:14.000Z | bucket_4C/python-Pillow/patches/patch-setup.py | jrmarino/ravensource | 91d599fd1f2af55270258d15e72c62774f36033e | [
"FTL"
] | 74 | 2017-09-06T14:48:01.000Z | 2021-08-28T02:48:27.000Z | --- setup.py.orig 2019-07-02 19:13:39 UTC
+++ setup.py
elif (
- sys.platform.startswith("linux")
- or sys.platform.startswith("gnu")
- or sys.platform.startswith("freebsd")
+ sys.platform.startswith("nothing")
):
for dirname in _find_library_dirs_ldconfig():
_add_directory(library_dirs, dirname)
| 36.357143 | 65 | 0.579568 |
bec8c0835477d8b4651705098efe6f5b0368b832 | 6,581 | py | Python | tests/ut/cpp/python_input/gtest_input/pre_activate/ir_fusion_test.py | GeekHee/mindspore | 896b8e5165dd0a900ed5a39e0fb23525524bf8b0 | [
"Apache-2.0"
] | null | null | null | tests/ut/cpp/python_input/gtest_input/pre_activate/ir_fusion_test.py | GeekHee/mindspore | 896b8e5165dd0a900ed5a39e0fb23525524bf8b0 | [
"Apache-2.0"
] | null | null | null | tests/ut/cpp/python_input/gtest_input/pre_activate/ir_fusion_test.py | GeekHee/mindspore | 896b8e5165dd0a900ed5a39e0fb23525524bf8b0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from mindspore.ops import Primitive
from mindspore.ops import operations as P
from mindspore.ops.operations import _grad_ops as G
from mindspore.ops import _constants as Constants
# pylint: disable=unused-variable
tuple_getitem = Primitive(Constants.kTupleGetItem)
add = P.Add()
allreduce = P.AllReduce()
allreduce.add_prim_attr('fusion', 1)
make_tuple = Primitive("make_tuple")
conv = P.Conv2D(out_channel=64, kernel_size=7, mode=1, pad_mode="valid", pad=0, stride=1, dilation=1, group=1)
bn = P.FusedBatchNorm()
relu = P.ReLU()
conv_bn1 = Primitive('ConvBN1')
bn2_add_relu = Primitive('BN2AddRelu')
bn2_relu = Primitive('BN2Relu')
fused_bn1 = Primitive('FusedBN1')
fused_bn2 = Primitive('FusedBN2')
fused_bn3 = Primitive('FusedBN3')
bn_grad = G.FusedBatchNormGrad()
bn_grad1 = Primitive('BNGrad1')
bn_grad2 = Primitive('BNGrad2')
bn_grad3 = Primitive('BNGrad3')
def test_bn_split(tag):
""" test_split_bn_fusion """
fns = FnDict()
return fns[tag]
def test_bn_grad_split(tag):
""" test_bn_grad_split """
fns = FnDict()
return fns[tag]
def test_all_reduce_fusion_all(tag):
""" test_all_reduce_fusion_all """
fns = FnDict()
return fns[tag]
def test_all_reduce_fusion_group(tag):
""" test_all_reduce_fusion_group """
fns = FnDict()
return fns[tag]
| 33.576531 | 110 | 0.647166 |
bec9227899c9767af55354a2d39773951766ff07 | 486 | py | Python | tdx/abc.py | TrainerDex/DiscordBot | 7e7bb20c5ac76bed236a7458c31017b8ddd8b8be | [
"Apache-2.0"
] | 2 | 2020-09-18T12:43:48.000Z | 2020-11-10T00:34:15.000Z | tdx/abc.py | TrainerDex/DiscordBot | 7e7bb20c5ac76bed236a7458c31017b8ddd8b8be | [
"Apache-2.0"
] | 59 | 2020-07-24T00:04:53.000Z | 2022-03-29T11:15:48.000Z | tdx/abc.py | TrainerDex/DiscordBot | 7e7bb20c5ac76bed236a7458c31017b8ddd8b8be | [
"Apache-2.0"
] | 1 | 2022-01-12T12:33:15.000Z | 2022-01-12T12:33:15.000Z | from abc import ABC
from typing import Dict
from redbot.core import Config
from redbot.core.bot import Red
from trainerdex.client import Client
| 23.142857 | 89 | 0.699588 |
fe1f4c025bf53ebda91717d8cd83c5c619dbfc64 | 7,044 | py | Python | app.py | PolinaRomanchenko/Victorious_Secret_DSCI_532 | e83bc19169a1736618ac55f2ade40741583089fd | [
"MIT"
] | null | null | null | app.py | PolinaRomanchenko/Victorious_Secret_DSCI_532 | e83bc19169a1736618ac55f2ade40741583089fd | [
"MIT"
] | null | null | null | app.py | PolinaRomanchenko/Victorious_Secret_DSCI_532 | e83bc19169a1736618ac55f2ade40741583089fd | [
"MIT"
] | null | null | null | import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import pandas as pd
import numpy as np
import altair as alt
import vega_datasets
alt.data_transformers.enable('default')
alt.data_transformers.disable_max_rows()
app = dash.Dash(__name__, assets_folder='assets', external_stylesheets=[dbc.themes.BOOTSTRAP])
# Boostrap CSS.
app.css.append_css({'external_url': 'https://codepen.io/amyoshino/pen/jzXypZ.css'}) # noqa: E501
server = app.server
app.title = 'Dash app with pure Altair HTML'
df = pd.read_csv('data/Police_Department_Incidents_-_Previous_Year__2016_.csv')
# df = pd.read_csv("https://raw.github.ubc.ca/MDS-2019-20/DSCI_531_lab4_anas017/master/data/Police_Department_Incidents_-_Previous_Year__2016_.csv?token=AAAHQ0dLxUd74i7Zhzh1SJ_UuOaFVI3_ks5d5dT3wA%3D%3D")
df['datetime'] = pd.to_datetime(df[["Date","Time"]].apply(lambda x: x[0].split()[0] +" "+x[1], axis=1), format="%m/%d/%Y %H:%M")
df['hour'] = df['datetime'].dt.hour
df.dropna(inplace=True)
top_4_crimes = df['Category'].value_counts()[:6].index.to_list()
top_4_crimes
top_4_crimes.remove("NON-CRIMINAL")
top_4_crimes.remove("OTHER OFFENSES")
# top 4 crimes df subset
df_t4 = df[df["Category"].isin(top_4_crimes)].copy()
body = dbc.Container(
[
dbc.Row(
[
dbc.Col(
[
html.H2("San Francisco Crime"),
html.P(
"""\
When looking for a place to live or visit, one important factor that people will consider
is the safety of the neighborhood. Searching that information district
by district could be time consuming and exhausting. It is even more difficult to
compare specific crime statistics across districts such as the crime rate
at a certain time of day. It would be useful if people can look up crime
related information across district on one application. Our app
aims to help people make decisions when considering their next trip or move to San Francisco, California
via visually exploring a dataset of crime statistics. The app provides an overview of the crime rate across
neighborhoods and allows users to focus on more specific information through
filtering of geological location, crime rate, crime type or time of the
crime.
Use the box below to choose crimes of interest.
"""
),
dcc.Dropdown(
id = 'drop_selection_crime',
options=[{'label': i, 'value': i} for i in df_t4['Category'].unique()
],
style={'height': '20px',
'width': '400px'},
value=df_t4['Category'].unique(),
multi=True)
],
md=5,
),
dbc.Col(
[
dbc.Row(
[
html.Iframe(
sandbox = "allow-scripts",
id = "plot_top",
height = "500",
width = "650",
style = {"border-width": "0px"},
srcDoc = make_plot_top().to_html()
)
]
)
]
),
]
),
dbc.Row(
html.Iframe(
sandbox='allow-scripts',
id='plot_bot',
height='500',
width='1200',
style={'border-width': '0px'},
srcDoc= make_plot_bot().to_html()
)
)
],
className="mt-4",
)
app.layout = html.Div(body)
if __name__ == '__main__':
app.run_server(debug=False) | 38.917127 | 203 | 0.534923 |
fe2001122588d2b0248d76d502b21c18d29d729d | 40 | py | Python | catkin_ws/src:/opt/ros/kinetic/lib/python2.7/dist-packages:/home/bala/duckietown/catkin_ws/src:/home/bala/duckietown/catkin_ws/src/lib/python2.7/site-packages/geometry/subspaces/__init__.py | johnson880319/Software | 045894227f359e0a3a3ec5b7a53f8d1ebc06acdd | [
"CC-BY-2.0"
] | null | null | null | catkin_ws/src:/opt/ros/kinetic/lib/python2.7/dist-packages:/home/bala/duckietown/catkin_ws/src:/home/bala/duckietown/catkin_ws/src/lib/python2.7/site-packages/geometry/subspaces/__init__.py | johnson880319/Software | 045894227f359e0a3a3ec5b7a53f8d1ebc06acdd | [
"CC-BY-2.0"
] | null | null | null | catkin_ws/src:/opt/ros/kinetic/lib/python2.7/dist-packages:/home/bala/duckietown/catkin_ws/src:/home/bala/duckietown/catkin_ws/src/lib/python2.7/site-packages/geometry/subspaces/__init__.py | johnson880319/Software | 045894227f359e0a3a3ec5b7a53f8d1ebc06acdd | [
"CC-BY-2.0"
] | null | null | null | # coding=utf-8
from .subspaces import *
| 13.333333 | 24 | 0.725 |
fe2070ac8557cbd4275cc5e584c79388af700674 | 2,510 | py | Python | detection/contor.py | chika626/chainer_rep | a1d4fd32a8cfcab753269455d08c1918f273388d | [
"MIT"
] | null | null | null | detection/contor.py | chika626/chainer_rep | a1d4fd32a8cfcab753269455d08c1918f273388d | [
"MIT"
] | 7 | 2020-03-13T08:29:46.000Z | 2020-05-27T17:34:14.000Z | detection/contor.py | chika626/chainer_rep | a1d4fd32a8cfcab753269455d08c1918f273388d | [
"MIT"
] | null | null | null | import json
import math
from PIL import Image,ImageDraw
import pandas as pd
import glob
import argparse
import copy
import numpy as np
import matplotlib.pyplot as plt
import pickle
import cv2
from PIL import ImageEnhance
import chainer
from chainer.datasets import ConcatenatedDataset
from chainer.datasets import TransformDataset
from chainer.optimizer_hooks import WeightDecay
from chainer import serializers
from chainer import training
from chainer.training import extensions
from chainer.training import triggers
from chainercv.datasets import voc_bbox_label_names
from chainercv.datasets import VOCBboxDataset
from chainercv.extensions import DetectionVOCEvaluator
from chainercv.links.model.ssd import GradientScaling
from chainercv.links.model.ssd import multibox_loss
from chainercv.links import SSD300
from chainercv.links import SSD512
from chainercv import transforms
from chainercv.utils import read_image
from chainercv.links.model.ssd import random_crop_with_bbox_constraints
from chainercv.links.model.ssd import random_distort
from chainercv.links.model.ssd import resize_with_random_interpolation
import queue
if __name__ == '__main__':
main() | 26.989247 | 72 | 0.640239 |
fe2074c1f1219a5f9d1c7d8eeb8c9be145ffb2ea | 5,982 | py | Python | train.py | hjl-yul154/autodeeplab | 1bd8399ac830fcafd506a4207b75e05682d1e260 | [
"MIT"
] | 1 | 2020-07-27T07:08:47.000Z | 2020-07-27T07:08:47.000Z | train.py | hjl-yul154/autodeeplab | 1bd8399ac830fcafd506a4207b75e05682d1e260 | [
"MIT"
] | null | null | null | train.py | hjl-yul154/autodeeplab | 1bd8399ac830fcafd506a4207b75e05682d1e260 | [
"MIT"
] | null | null | null | import os
import pdb
import warnings
import numpy as np
import torch
import torch.nn as nn
import torch.utils.data
import torch.backends.cudnn
import torch.optim as optim
import dataloaders
from utils.utils import AverageMeter
from utils.loss import build_criterion
from utils.metrics import Evaluator
from utils.step_lr_scheduler import Iter_LR_Scheduler
from retrain_model.build_autodeeplab import Retrain_Autodeeplab
from config_utils.re_train_autodeeplab import obtain_retrain_autodeeplab_args
if __name__ == "__main__":
main()
| 38.844156 | 127 | 0.596122 |
fe21c2ef055f99448891893a1c18824fdde9d61e | 1,883 | py | Python | test.py | xxaxdxcxx/miscellaneous-code | cdb88783f39e1b9a89fdb12f7cddfe62619e4357 | [
"MIT"
] | null | null | null | test.py | xxaxdxcxx/miscellaneous-code | cdb88783f39e1b9a89fdb12f7cddfe62619e4357 | [
"MIT"
] | null | null | null | test.py | xxaxdxcxx/miscellaneous-code | cdb88783f39e1b9a89fdb12f7cddfe62619e4357 | [
"MIT"
] | null | null | null |
sol = Solution()
A = [1]
B = [-1]
C = [0]
D = [1]
result = sol.fourSumCount(A, B, C, D)
print("Test 1: {0}".format(result))
A = [1, 2]
B = [-2, -1]
C = [-1, 2]
D = [0, 2]
result = sol.fourSumCount(A, B, C, D)
print("Test 2: {0}".format(result))
| 31.383333 | 79 | 0.463622 |
fe21f2c89737b3c4d120cba724974597cb079bc4 | 1,675 | py | Python | src/boot.py | johngtrs/krux | 7b6c6d410e29c16ab5d3c05a5aafab618f13a86f | [
"MIT"
] | null | null | null | src/boot.py | johngtrs/krux | 7b6c6d410e29c16ab5d3c05a5aafab618f13a86f | [
"MIT"
] | null | null | null | src/boot.py | johngtrs/krux | 7b6c6d410e29c16ab5d3c05a5aafab618f13a86f | [
"MIT"
] | null | null | null | # The MIT License (MIT)
# Copyright (c) 2021 Tom J. Sun
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import machine
from pmu import axp192
from context import Context
from login import Login
from home import Home
import settings
pmu = axp192()
# Enable power management so that if power button is held down 6 secs,
# it shuts off as expected
pmu.enablePMICSleepMode(True)
ctx = Context()
ctx.display.flash_text(settings.load('splash', ( 'Krux' ), strip=False))
while True:
if not Login(ctx).run():
break
if not Home(ctx).run():
break
ctx.display.flash_text(( 'Shutting down..' ))
ctx.clear()
pmu.setEnterSleepMode()
machine.reset()
| 32.211538 | 79 | 0.755224 |
fe224e1ffb01067a1145784abb7281fb2243b190 | 1,788 | py | Python | smartfields/processors/video.py | suhaibroomy/django-smartfields | e9331dc74f72d0254608526f8816aa4bb8f1fca4 | [
"MIT"
] | null | null | null | smartfields/processors/video.py | suhaibroomy/django-smartfields | e9331dc74f72d0254608526f8816aa4bb8f1fca4 | [
"MIT"
] | null | null | null | smartfields/processors/video.py | suhaibroomy/django-smartfields | e9331dc74f72d0254608526f8816aa4bb8f1fca4 | [
"MIT"
] | null | null | null | import re
import six
from smartfields.processors.base import ExternalFileProcessor
from smartfields.utils import ProcessingError
__all__ = [
'FFMPEGProcessor'
]
| 39.733333 | 91 | 0.599553 |
fe22b8aac4f7560fc1450a1ab43865faaf7aecdc | 2,192 | py | Python | tests/test_vmtkScripts/test_vmtksurfaceconnectivity.py | ramtingh/vmtk | 4d6f58ce65d73628353ba2b110cbc29a2e7aa7b3 | [
"Apache-2.0"
] | null | null | null | tests/test_vmtkScripts/test_vmtksurfaceconnectivity.py | ramtingh/vmtk | 4d6f58ce65d73628353ba2b110cbc29a2e7aa7b3 | [
"Apache-2.0"
] | null | null | null | tests/test_vmtkScripts/test_vmtksurfaceconnectivity.py | ramtingh/vmtk | 4d6f58ce65d73628353ba2b110cbc29a2e7aa7b3 | [
"Apache-2.0"
] | 1 | 2019-06-18T23:41:11.000Z | 2019-06-18T23:41:11.000Z | ## Program: VMTK
## Language: Python
## Date: January 12, 2018
## Version: 1.4
## Copyright (c) Richard Izzo, Luca Antiga, All rights reserved.
## See LICENSE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
## Note: this code was contributed by
## Richard Izzo (Github @rlizzo)
## University at Buffalo
import pytest
import vmtk.vmtksurfaceconnectivity as connectivity
import os
| 35.354839 | 117 | 0.764599 |
fe23546882c9babc55f9bce0abdfba0776ff09c5 | 653 | py | Python | sssoon/forms.py | Kingpin-Apps/django-sssoon | 2a44d0d19e70dcd3127f9425c0ed4ba52355a1d2 | [
"BSD-3-Clause"
] | 2 | 2018-04-20T08:28:10.000Z | 2018-05-04T15:32:30.000Z | sssoon/forms.py | KINGH242/django-sssoon | 2a44d0d19e70dcd3127f9425c0ed4ba52355a1d2 | [
"BSD-3-Clause"
] | 2 | 2018-05-16T13:45:14.000Z | 2020-07-29T22:01:37.000Z | sssoon/forms.py | Kingpin-Apps/django-sssoon | 2a44d0d19e70dcd3127f9425c0ed4ba52355a1d2 | [
"BSD-3-Clause"
] | null | null | null | from django import forms
from nocaptcha_recaptcha.fields import NoReCaptchaField
| 40.8125 | 70 | 0.444104 |
fe242c827a7e391a419864c9504b7e2daf4968d1 | 1,054 | py | Python | simple_run_menu.py | william01110111/simple_run_menu | 804c6bb8d6c63c3a4d4c6d3377601bd44fb0eeea | [
"MIT"
] | null | null | null | simple_run_menu.py | william01110111/simple_run_menu | 804c6bb8d6c63c3a4d4c6d3377601bd44fb0eeea | [
"MIT"
] | null | null | null | simple_run_menu.py | william01110111/simple_run_menu | 804c6bb8d6c63c3a4d4c6d3377601bd44fb0eeea | [
"MIT"
] | null | null | null | #! /bin/python3
# simple run menu
import os
import stat
if __name__ == "__main__":
print([command_to_name(i) for i in get_files_in_dir('') if is_file_executable(i)])
| 22.425532 | 83 | 0.685009 |
fe2476b1a28089e744d395040c690305385ddcb6 | 1,792 | py | Python | mne/io/cnt/tests/test_cnt.py | stevemats/mne-python | 47051833f21bb372d60afc3adbf4305648ac7f69 | [
"BSD-3-Clause"
] | 1,953 | 2015-01-17T20:33:46.000Z | 2022-03-30T04:36:34.000Z | mne/io/cnt/tests/test_cnt.py | LiFeng-SECUC/mne-python | 732bb1f994e64e41a8e95dcc10dc98c22cac95c0 | [
"BSD-3-Clause"
] | 8,490 | 2015-01-01T13:04:18.000Z | 2022-03-31T23:02:08.000Z | mne/io/cnt/tests/test_cnt.py | LiFeng-SECUC/mne-python | 732bb1f994e64e41a8e95dcc10dc98c22cac95c0 | [
"BSD-3-Clause"
] | 1,130 | 2015-01-08T22:39:27.000Z | 2022-03-30T21:44:26.000Z |
# Author: Jaakko Leppakangas <jaeilepp@student.jyu.fi>
# Joan Massich <mailsik@gmail.com>
#
# License: BSD-3-Clause
import os.path as op
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from mne import pick_types
from mne.datasets import testing
from mne.io.tests.test_raw import _test_raw_reader
from mne.io.cnt import read_raw_cnt
from mne.annotations import read_annotations
data_path = testing.data_path(download=False)
fname = op.join(data_path, 'CNT', 'scan41_short.cnt')
| 32 | 74 | 0.65346 |
fe24a27fb5e1b1af1324c59e811661bad02c4101 | 792 | py | Python | parliament_proposal_fetcher.py | Track-your-parliament/track-your-parliament-data | 1ab9d9fe5cf4921e4cc792d0e3db3263557daafd | [
"MIT"
] | null | null | null | parliament_proposal_fetcher.py | Track-your-parliament/track-your-parliament-data | 1ab9d9fe5cf4921e4cc792d0e3db3263557daafd | [
"MIT"
] | null | null | null | parliament_proposal_fetcher.py | Track-your-parliament/track-your-parliament-data | 1ab9d9fe5cf4921e4cc792d0e3db3263557daafd | [
"MIT"
] | null | null | null | import urllib.request, json
import pandas as pd
baseUrl = 'https://avoindata.eduskunta.fi/api/v1/tables/VaskiData'
parameters = 'rows?columnName=Eduskuntatunnus&columnValue=LA%25&perPage=100'
page = 0
df = ''
while True:
print(f'Fetching page number {page}')
with urllib.request.urlopen(f'{baseUrl}/{parameters}&page={page}') as url:
data = json.loads(url.read().decode())
if page == 0:
columns = data['columnNames']
df = pd.DataFrame(columns=columns)
dataRows = data['rowData']
df = df.append(pd.DataFrame(dataRows, columns=data['columnNames']), ignore_index=True)
if data['hasMore'] == False:
break
page = page + 1
df.to_csv('./data/parliament_proposals_raw.csv', sep=';', encoding='utf-8') | 29.333333 | 94 | 0.641414 |
fe2717913fd1b6cb1c949e299c54e281bc41335e | 2,899 | py | Python | examples/Catboost_regression-scorer_usage.py | emaldonadocruz/UTuning | b32207bcbeb80e4c07e098bcbe4d5ce8b3fee778 | [
"BSD-3-Clause"
] | null | null | null | examples/Catboost_regression-scorer_usage.py | emaldonadocruz/UTuning | b32207bcbeb80e4c07e098bcbe4d5ce8b3fee778 | [
"BSD-3-Clause"
] | null | null | null | examples/Catboost_regression-scorer_usage.py | emaldonadocruz/UTuning | b32207bcbeb80e4c07e098bcbe4d5ce8b3fee778 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 20 16:15:37 2021
@author: em42363
"""
# In[1]: Import functions
'''
CatBoost is a high-performance open source library for gradient boosting
on decision trees
'''
from catboost import CatBoostRegressor
from sklearn.model_selection import train_test_split
import pandas as pd
import seaborn as sns
import numpy as np
import os
os.chdir(os.path.dirname(__file__))
import sys
sys.path.insert(0, r'C:\Users\eduar\OneDrive\PhD\UTuning')
sys.path.insert(0, r'C:\Users\em42363\OneDrive\PhD\UTuning')
from UTuning import scorer, plots
#df = pd.read_csv(r'C:\Users\eduar\OneDrive\PhD\UTuning\dataset\unconv_MV.csv')
df = pd.read_csv(r'C:\Users\em42363\OneDrive\PhD\UTuning\dataset\unconv_MV.csv')
import random
import matplotlib.pyplot as plt
# In[1]: Split train test
'''
Perform split train test
'''
y = df['Production'].values
X = df[['Por', 'LogPerm', 'Brittle', 'TOC']].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)
# In[6]: Regressor
'''
Define the regressor, fit the model and predict the estimates
'''
model = CatBoostRegressor(iterations=1000, learning_rate=0.2, loss_function='RMSEWithUncertainty',
verbose=False, random_seed=0)
model.fit(X_train, y_train)
estimates = model.predict(X_test)
# In[9]: Plot error line
'''
Use UTuning to plot error lines
'''
plots.error_line(estimates[:, 0], y_test, np.sqrt(estimates[:, 1]), Frac=1)
# %% Define the virtual ensemble
# %%
n_quantiles = 11
perc = np.linspace(0.0, 1.00, n_quantiles)
Samples = 10
ens_preds=virt_ensemble(X_train,y_train, num_samples=Samples)
Pred_array = ens_preds[:,:,0]
Knowledge_u=np.sqrt(np.var(Pred_array,axis=1)) #Knowledge uncertainty
Data_u=np.sqrt(np.mean(ens_preds[:,:,1],axis=1)) #Data uncertainty
Sigma=Knowledge_u+Data_u
# %%
'''
We use UTuning to return the Indicator Function and plot the
accuracy plot and diagnose our model.
'''
scorer = scorer.scorer(Pred_array, y_test, Sigma)
IF_array = scorer.IndicatorFunction()
avgIF = np.mean(IF_array,axis=0)
# % Second plot test
plots.error_accuracy_plot(perc,IF_array,Pred_array,y_test,Sigma)
# %
print('Accuracy = {0:2.2f}'.format(scorer.Accuracy()))
print('Precision = {0:2.2f}'.format(scorer.Precision()))
print('Goodness = {0:2.2f}'.format(scorer.Goodness()))
| 26.354545 | 102 | 0.703001 |
fe27a69a39058bf33d488a199887b8c07ffdf22c | 1,683 | py | Python | sujson/_logger.py | PotasnikM/translator-to-suJSON | abb2001c78d431bd2087754666bc896ba0543dfd | [
"MIT"
] | 2 | 2019-07-01T12:45:25.000Z | 2020-06-23T11:48:08.000Z | sujson/_logger.py | PotasnikM/translator-to-suJSON | abb2001c78d431bd2087754666bc896ba0543dfd | [
"MIT"
] | 17 | 2019-04-25T10:46:40.000Z | 2020-11-10T09:28:55.000Z | sujson/_logger.py | PotasnikM/translator-to-suJSON | abb2001c78d431bd2087754666bc896ba0543dfd | [
"MIT"
] | 3 | 2019-06-22T19:51:08.000Z | 2021-02-08T09:17:55.000Z | import logging
from platform import system
from tqdm import tqdm
from multiprocessing import Lock
loggers = {}
# https://stackoverflow.com/questions/38543506/
def setup_custom_logger(name):
"""
Create a logger with a certain name and level
"""
global loggers
if loggers.get(name):
return loggers.get(name)
formatter = logging.Formatter(
fmt='%(levelname)s: %(message)s'
)
handler = TqdmLoggingHandler()
handler.setFormatter(formatter)
if system() not in ['Windows', 'cli']:
logging.addLevelName(logging.ERROR, "\033[1;31m%s\033[1;0m" % logging.getLevelName(logging.ERROR))
logging.addLevelName(logging.WARNING, "\033[1;33m%s\033[1;0m" % logging.getLevelName(logging.WARNING))
logging.addLevelName(logging.INFO, "\033[1;34m%s\033[1;0m" % logging.getLevelName(logging.INFO))
logging.addLevelName(logging.DEBUG, "\033[1;35m%s\033[1;0m" % logging.getLevelName(logging.DEBUG))
logger = logging.getLogger(name)
logger.setLevel(logging.WARNING)
# if (logger.hasHandlers()):
# logger.handlers.clear()
if logger.handlers:
logger.handlers = []
logger.addHandler(handler)
loggers.update(dict(name=logger))
return logger
| 29.017241 | 110 | 0.655971 |
fe27abc65b6073ec58be633f81761077a129a312 | 1,243 | py | Python | face-detect.py | Gicehajunior/face-recognition-detection-OpenCv-Python | 6551285ce5b4532d8b6f3ad6b8e9a29564673ea9 | [
"Unlicense"
] | null | null | null | face-detect.py | Gicehajunior/face-recognition-detection-OpenCv-Python | 6551285ce5b4532d8b6f3ad6b8e9a29564673ea9 | [
"Unlicense"
] | null | null | null | face-detect.py | Gicehajunior/face-recognition-detection-OpenCv-Python | 6551285ce5b4532d8b6f3ad6b8e9a29564673ea9 | [
"Unlicense"
] | null | null | null | import cv2
import sys
import playsound
face_cascade = cv2.CascadeClassifier('cascades/haarcascade_frontalface_default.xml')
# capture video using cv2
video_capture = cv2.VideoCapture(0)
while True:
# capture frame by frame, i.e, one by one
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# for each face on the projected on the frame
faces = face_cascade.detectMultiScale(
gray,
scaleFactor = 1.1,
minNeighbors = 5,
# minSize(35, 35)
)
# loop through the video faces for detection
for (x, y, w, h) in faces:
point1 = x+w
point2 = y+h
frame_color = (50, 50, 200)
rectangleBox = cv2.rectangle(frame, (x, y), (point1, point2), frame_color, 2)
cv2.imshow('video', frame)
if faces.any():
playsound.playsound('openDoorAlert.mp3', True)
if len(faces) > 1:
print("There are " + str(len(faces)) + " peoples at the gate")
else:
print("There is " + str(len(faces)) + " person at the gate")
else:
pass
if cv2.waitKey(1) & 0xFF == ord('q'):
sys.exit()
| 28.25 | 85 | 0.563154 |
fe27fecf1f48f5d4699cad091ca66149a513fe9b | 7,938 | py | Python | sis/enrollments.py | ryanlovett/sis-cli | 5efe5b9344b547c3f1365ef63a0ad33ec013fcca | [
"Apache-2.0"
] | null | null | null | sis/enrollments.py | ryanlovett/sis-cli | 5efe5b9344b547c3f1365ef63a0ad33ec013fcca | [
"Apache-2.0"
] | null | null | null | sis/enrollments.py | ryanlovett/sis-cli | 5efe5b9344b547c3f1365ef63a0ad33ec013fcca | [
"Apache-2.0"
] | null | null | null | # vim:set et sw=4 ts=4:
import logging
import sys
import jmespath
from . import sis, classes
# logging
logging.basicConfig(stream=sys.stdout, level=logging.WARNING)
logger = logging.getLogger(__name__)
# SIS endpoint
enrollments_uri = "https://apis.berkeley.edu/sis/v2/enrollments"
# apparently some courses have LAB without LEC (?)
section_codes = ['LEC', 'SES', 'WBL']
def section_id(section):
'''Return a section's course ID, e.g. "15807".'''
return section['id']
def section_subject_area(section):
'''Return a section's subject area, e.g. "STAT".'''
return jmespath.search('class.course.subjectArea.code', section)
def section_catalog_number(section):
'''Return a section's formatted catalog number, e.g. "215B".'''
return jmespath.search('class.course.catalogNumber.formatted', section)
def section_display_name(section):
'''Return a section's displayName, e.g. "STAT 215B".'''
return jmespath.search('class.course.displayName', section)
def section_is_primary(section):
'''Return a section's primary status.'''
return jmespath.search('association.primary', section)
def enrollment_campus_uid(enrollment):
'''Return an enrollent's campus UID.'''
expr = "student.identifiers[?disclose && type=='campus-uid'].id | [0]"
return jmespath.search(expr, enrollment)
def enrollment_campus_email(enrollment):
'''Return an enrollment's campus email if found, otherwise
return any other email.'''
expr = "student.emails[?type.code=='CAMP'].emailAddress | [0]"
email = jmespath.search(expr, enrollment)
if email: return email
expr = "student.emails[?type.code=='OTHR'].emailAddress | [0]"
return jmespath.search(expr, enrollment)
def get_enrollment_uids(enrollments):
'''Given an SIS enrollment, return the student's campus UID.'''
return list(map(lambda x: enrollment_campus_uid(x), enrollments))
def get_enrollment_emails(enrollments):
'''Given an SIS enrollment, return the student's campus email.'''
return list(map(lambda x: enrollment_campus_email(x), enrollments))
def enrollment_status(enrollment):
'''Return an enrollment's status, e.g. 'E', 'W', or 'D'.'''
return jmespath.search('enrollmentStatus.status.code', enrollment)
def filter_enrollment_status(enrollments, status):
return list(filter(lambda x: enrollment_status(x) == status, enrollments))
def status_code(constituents):
return {'enrolled':'E', 'waitlisted':'W', 'dropped':'D'}[constituents]
def filter_lectures(sections, relevant_codes=section_codes):
'''
Given a list of SIS sections:
[{'code': '32227', 'description': '2019 Spring ASTRON 128 001 LAB 001'}]
return only the section codes which are lectures.
'''
codes = []
for section in sections:
if 'description' not in section: continue
desc_words = set(section['description'].split())
if len(set(desc_words) & set(relevant_codes)) > 0:
codes.append(section['code'])
return codes
| 37.620853 | 105 | 0.68317 |
fe2900b93b3b942d3363b1695eb5a7b3920a90d6 | 1,913 | py | Python | app.py | Nishanth-Gobi/Da-Vinci-Code | b44a2d0c553e4f9cf9e2bb3283ebb5f6eaecea4a | [
"MIT"
] | null | null | null | app.py | Nishanth-Gobi/Da-Vinci-Code | b44a2d0c553e4f9cf9e2bb3283ebb5f6eaecea4a | [
"MIT"
] | null | null | null | app.py | Nishanth-Gobi/Da-Vinci-Code | b44a2d0c553e4f9cf9e2bb3283ebb5f6eaecea4a | [
"MIT"
] | null | null | null | from flask import Flask, render_template, request, redirect, url_for
from os.path import join
from stego import Steganography
app = Flask(__name__)
UPLOAD_FOLDER = 'static/files/'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}
if __name__ == '__main__':
app.run(debug=True)
| 31.360656 | 97 | 0.67747 |
fe292b4982f3dd8af18a6b88ccaadbbba6d158ef | 8,012 | py | Python | imitation_learning/generate_demonstrations/gen_envs.py | HaiDangDang/2020-flatland | abbf2f7f62fabf6da0937f80c2181f1c457ce24a | [
"MIT"
] | 1 | 2021-02-21T02:54:35.000Z | 2021-02-21T02:54:35.000Z | imitation_learning/generate_demonstrations/gen_envs.py | HaiDangDang/2020-flatland | abbf2f7f62fabf6da0937f80c2181f1c457ce24a | [
"MIT"
] | null | null | null | imitation_learning/generate_demonstrations/gen_envs.py | HaiDangDang/2020-flatland | abbf2f7f62fabf6da0937f80c2181f1c457ce24a | [
"MIT"
] | null | null | null | from flatland.envs.agent_utils import RailAgentStatus
from flatland.envs.malfunction_generators import malfunction_from_params, MalfunctionParameters
from flatland.envs.observations import GlobalObsForRailEnv
from flatland.envs.rail_env import RailEnv
from flatland.envs.rail_generators import sparse_rail_generator
from flatland.envs.schedule_generators import sparse_schedule_generator
from flatland.utils.rendertools import RenderTool
import random
import sys
import os
import time
import msgpack
import json
from PIL import Image
import argparse as ap
#env = create_test_env(RandomTestParams_small, 0, "train-envs-small/Test_0")
if __name__=="__main__":
main2()
| 29.240876 | 96 | 0.623689 |
fe2b48a6665b98787ac1bd205fe634201bd2120e | 1,480 | py | Python | job-queue-portal/postgres_django_queue/djangoenv/lib/python3.8/site-packages/django_celery_results/migrations/0006_taskresult_date_created.py | Sruthi-Ganesh/postgres-django-queue | 4ea8412c073ff8ceb0efbac48afc29456ae11346 | [
"Apache-2.0"
] | null | null | null | job-queue-portal/postgres_django_queue/djangoenv/lib/python3.8/site-packages/django_celery_results/migrations/0006_taskresult_date_created.py | Sruthi-Ganesh/postgres-django-queue | 4ea8412c073ff8ceb0efbac48afc29456ae11346 | [
"Apache-2.0"
] | null | null | null | job-queue-portal/postgres_django_queue/djangoenv/lib/python3.8/site-packages/django_celery_results/migrations/0006_taskresult_date_created.py | Sruthi-Ganesh/postgres-django-queue | 4ea8412c073ff8ceb0efbac48afc29456ae11346 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 2.2.4 on 2019-08-21 19:53
# this file is auto-generated so don't do flake8 on it
# flake8: noqa
from __future__ import absolute_import, unicode_literals
from django.db import migrations, models
import django.utils.timezone
| 30.204082 | 83 | 0.664189 |
fe2bf5d430a026df243c522eca3e9b1d054d0492 | 45 | py | Python | remediar/modules/http/__init__.py | fabaff/remediar | 014d7733b00cd40a45881c2729c04df5584476e7 | [
"Apache-2.0"
] | null | null | null | remediar/modules/http/__init__.py | fabaff/remediar | 014d7733b00cd40a45881c2729c04df5584476e7 | [
"Apache-2.0"
] | null | null | null | remediar/modules/http/__init__.py | fabaff/remediar | 014d7733b00cd40a45881c2729c04df5584476e7 | [
"Apache-2.0"
] | null | null | null | """Support for HTTP or web server issues."""
| 22.5 | 44 | 0.688889 |
fe2e74a698807b4b6d0cf881031198f5da548dd4 | 1,891 | py | Python | Image Recognition/utils/BayesianModels/Bayesian3Conv3FC.py | AlanMorningLight/PyTorch-BayesianCNN | 5de7133f09dd10135bf605efbdd26c18f2a4df13 | [
"MIT"
] | 1 | 2020-02-10T12:58:25.000Z | 2020-02-10T12:58:25.000Z | utils/BayesianModels/Bayesian3Conv3FC.py | SulemanKhurram/ThesisExperiments | 4fdf7b6558c87a096dcdc374c35085ac946d3a58 | [
"MIT"
] | null | null | null | utils/BayesianModels/Bayesian3Conv3FC.py | SulemanKhurram/ThesisExperiments | 4fdf7b6558c87a096dcdc374c35085ac946d3a58 | [
"MIT"
] | null | null | null | import torch.nn as nn
from utils.BBBlayers import BBBConv2d, BBBLinearFactorial, FlattenLayer | 35.679245 | 89 | 0.599683 |
fe2fc61a568a0e2538b7b1f99349a5186a485475 | 8,657 | py | Python | custom_scripts/load_animals.py | nphilou/influence-release | bcf3603705b6ff172bcb62123aef0248afa77a05 | [
"MIT"
] | null | null | null | custom_scripts/load_animals.py | nphilou/influence-release | bcf3603705b6ff172bcb62123aef0248afa77a05 | [
"MIT"
] | null | null | null | custom_scripts/load_animals.py | nphilou/influence-release | bcf3603705b6ff172bcb62123aef0248afa77a05 | [
"MIT"
] | null | null | null | import os
from tensorflow.contrib.learn.python.learn.datasets import base
import numpy as np
import IPython
from subprocess import call
from keras.preprocessing import image
from influence.dataset import DataSet
from influence.inception_v3 import preprocess_input
BASE_DIR = 'data' # TODO: change
| 35.479508 | 167 | 0.611644 |
fe2fd1a403e44db33fca9bd236a441a4df247ba1 | 13,000 | py | Python | src/qiskit_aws_braket_provider/awsbackend.py | carstenblank/qiskit-aws-braket-provider | 539f0c75c2ccf1f6e5e981b92ea74f497fcba237 | [
"Apache-2.0"
] | 7 | 2020-09-25T17:16:54.000Z | 2021-05-20T10:42:52.000Z | src/qiskit_aws_braket_provider/awsbackend.py | carstenblank/qiskit-aws-braket-provider | 539f0c75c2ccf1f6e5e981b92ea74f497fcba237 | [
"Apache-2.0"
] | 4 | 2020-09-21T19:33:39.000Z | 2020-09-22T12:21:11.000Z | src/qiskit_aws_braket_provider/awsbackend.py | carstenblank/qiskit-aws-braket-provider | 539f0c75c2ccf1f6e5e981b92ea74f497fcba237 | [
"Apache-2.0"
] | 1 | 2020-09-21T19:32:16.000Z | 2020-09-21T19:32:16.000Z | # Copyright 2020 Carsten Blank
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from datetime import datetime, timedelta
from braket.device_schema.device_service_properties_v1 import DeviceCost
from typing import List, Dict, Optional, Any, Union, Tuple
from botocore.response import StreamingBody
from braket.aws import AwsDevice, AwsQuantumTask, AwsSession
from braket.circuits import Circuit
from braket.device_schema import DeviceCapabilities
from braket.device_schema.ionq import IonqDeviceCapabilities
from braket.device_schema.rigetti import RigettiDeviceCapabilities
from braket.device_schema.simulators import GateModelSimulatorDeviceCapabilities
from qiskit.providers import BaseBackend, JobStatus
from qiskit.providers.models import QasmBackendConfiguration, BackendProperties, BackendStatus
from qiskit.qobj import QasmQobj
from . import awsjob
from . import awsprovider
from .conversions_configuration import aws_device_2_configuration
from .conversions_properties import aws_ionq_to_properties, aws_rigetti_to_properties, aws_simulator_to_properties
from .transpilation import convert_qasm_qobj
logger = logging.getLogger(__name__)
| 45.138889 | 144 | 0.666923 |
fe2ffb0cf28c08ae4282aa561c6f775796ff339b | 14,564 | py | Python | test/unit/Algorithms/GenericLinearTransportTest.py | thirtywang/OpenPNM | e55ee7ae69a8be3e2b0e6bf24c9ff92b6d24e16a | [
"MIT"
] | null | null | null | test/unit/Algorithms/GenericLinearTransportTest.py | thirtywang/OpenPNM | e55ee7ae69a8be3e2b0e6bf24c9ff92b6d24e16a | [
"MIT"
] | null | null | null | test/unit/Algorithms/GenericLinearTransportTest.py | thirtywang/OpenPNM | e55ee7ae69a8be3e2b0e6bf24c9ff92b6d24e16a | [
"MIT"
] | 1 | 2020-07-02T02:21:10.000Z | 2020-07-02T02:21:10.000Z | import OpenPNM
import numpy as np
import OpenPNM.Physics.models as pm
| 51.101754 | 78 | 0.49025 |
fe3002f8ab77d8668df51f08f7789bc9628e8c1f | 2,370 | py | Python | EC2 Auto Clean Room Forensics/Lambda-Functions/snapshotForRemediation.py | spartantri/aws-security-automation | a3904931220111022d12e71a3d79e4a85fc82173 | [
"Apache-2.0"
] | null | null | null | EC2 Auto Clean Room Forensics/Lambda-Functions/snapshotForRemediation.py | spartantri/aws-security-automation | a3904931220111022d12e71a3d79e4a85fc82173 | [
"Apache-2.0"
] | null | null | null | EC2 Auto Clean Room Forensics/Lambda-Functions/snapshotForRemediation.py | spartantri/aws-security-automation | a3904931220111022d12e71a3d79e4a85fc82173 | [
"Apache-2.0"
] | null | null | null | # MIT No Attribution
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import boto3
import os
| 33.857143 | 110 | 0.670042 |
fe30812932f608889eaceef38afb76f593b3db27 | 3,830 | py | Python | gpu_bdb/queries/q26/gpu_bdb_query_26.py | VibhuJawa/gpu-bdb | 13987b4ef8b92db3b9d2905dec7bd2fd81f42ae9 | [
"Apache-2.0"
] | 62 | 2020-05-14T13:33:02.000Z | 2020-10-29T13:28:26.000Z | gpu_bdb/queries/q26/gpu_bdb_query_26.py | VibhuJawa/gpu-bdb | 13987b4ef8b92db3b9d2905dec7bd2fd81f42ae9 | [
"Apache-2.0"
] | 104 | 2020-07-01T21:07:42.000Z | 2020-11-13T16:36:04.000Z | gpu_bdb/queries/q26/gpu_bdb_query_26.py | VibhuJawa/gpu-bdb | 13987b4ef8b92db3b9d2905dec7bd2fd81f42ae9 | [
"Apache-2.0"
] | 21 | 2020-05-14T14:44:40.000Z | 2020-11-07T12:08:28.000Z | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
train_clustering_model,
run_query,
)
from bdb_tools.q26_utils import (
Q26_CATEGORY,
Q26_ITEM_COUNT,
N_CLUSTERS,
CLUSTER_ITERATIONS,
N_ITER,
read_tables
)
import numpy as np
from dask import delayed
def agg_count_distinct(df, group_key, counted_key):
"""Returns a Series that is the result of counting distinct instances of 'counted_key' within each 'group_key'.
The series' index will have one entry per unique 'group_key' value.
Workaround for lack of nunique aggregate function on Dask df.
"""
return (
df.drop_duplicates([group_key, counted_key])
.groupby(group_key)[counted_key]
.count()
)
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
run_query(config=config, client=client, query_func=main)
| 31.138211 | 115 | 0.703655 |
fe317187c1c12b8c77ea5e51802f388e760744e4 | 1,324 | py | Python | tests/test_intbounds.py | alex/optimizer-model | 0e40a0763082f5fe0bd596e8e77ebccbcd7f4a98 | [
"BSD-3-Clause"
] | 4 | 2015-04-29T22:49:25.000Z | 2018-02-16T09:06:08.000Z | tests/test_intbounds.py | alex/optimizer-model | 0e40a0763082f5fe0bd596e8e77ebccbcd7f4a98 | [
"BSD-3-Clause"
] | null | null | null | tests/test_intbounds.py | alex/optimizer-model | 0e40a0763082f5fe0bd596e8e77ebccbcd7f4a98 | [
"BSD-3-Clause"
] | null | null | null | from optimizer.utils.intbounds import IntBounds
| 23.22807 | 67 | 0.5929 |
fe3188f73830a0839c72948677e1605c9ae2ae83 | 1,586 | py | Python | tdclient/test/database_model_test.py | minchuang/td-client-python | 6cf6dfbb60119f400274491d3e942d4f9fbcebd6 | [
"Apache-2.0"
] | 2 | 2019-02-22T11:56:17.000Z | 2019-02-25T10:09:46.000Z | tdclient/test/database_model_test.py | minchuang/td-client-python | 6cf6dfbb60119f400274491d3e942d4f9fbcebd6 | [
"Apache-2.0"
] | null | null | null | tdclient/test/database_model_test.py | minchuang/td-client-python | 6cf6dfbb60119f400274491d3e942d4f9fbcebd6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from __future__ import print_function
from __future__ import unicode_literals
try:
from unittest import mock
except ImportError:
import mock
from tdclient import models
from tdclient.test.test_helper import *
| 40.666667 | 202 | 0.713745 |
fe31f26debb52795b22561b36355ce06ff7905d8 | 558 | py | Python | setup.py | ballcap231/fireTS | 74cc89a14d67edabf31139d1552025d54791f2a9 | [
"MIT"
] | null | null | null | setup.py | ballcap231/fireTS | 74cc89a14d67edabf31139d1552025d54791f2a9 | [
"MIT"
] | null | null | null | setup.py | ballcap231/fireTS | 74cc89a14d67edabf31139d1552025d54791f2a9 | [
"MIT"
] | null | null | null | from setuptools import setup
dependencies = [
'numpy',
'scipy',
'scikit-learn',
]
setup(
name='fireTS',
version='0.0.7',
description='A python package for multi-variate time series prediction',
long_description=open('README.md').read(),
long_description_content_type="text/markdown",
url='https://github.com/jxx123/fireTS.git',
author='Jinyu Xie',
author_email='xjygr08@gmail.com',
license='MIT',
packages=['fireTS'],
install_requires=dependencies,
include_package_data=True,
zip_safe=False)
| 24.26087 | 76 | 0.677419 |
fe3273d41978521818a7243089a132072ef92c5a | 883 | py | Python | euler/py/project_019.py | heyihan/scodes | 342518b548a723916c9273d8ebc1b345a0467e76 | [
"BSD-3-Clause"
] | null | null | null | euler/py/project_019.py | heyihan/scodes | 342518b548a723916c9273d8ebc1b345a0467e76 | [
"BSD-3-Clause"
] | null | null | null | euler/py/project_019.py | heyihan/scodes | 342518b548a723916c9273d8ebc1b345a0467e76 | [
"BSD-3-Clause"
] | null | null | null | # https://projecteuler.net/problem=19
day_19000101 = 1
days_1900 = year_days(1900)
day_next_day1 = (day_19000101 + days_1900)%7
print(day_19000101, days_1900, day_next_day1)
sum = 0
for i in range(1901, 2001):
for j in range(1, 13):
if day_next_day1 == 0:
print(i, j)
sum = sum + 1
days = month_days(j, i)
day_next_day1 = (day_next_day1 + days)%7
#print(i, j, days, day_next_day1)
print(sum)
| 20.534884 | 61 | 0.582106 |
fe32cc9e555895354fe2279db255494d9b4433fb | 1,652 | py | Python | address_book/address_book.py | wowsuchnamaste/address_book | 4877d16d795c54b750e151fa93e69c080717ae72 | [
"MIT"
] | null | null | null | address_book/address_book.py | wowsuchnamaste/address_book | 4877d16d795c54b750e151fa93e69c080717ae72 | [
"MIT"
] | null | null | null | address_book/address_book.py | wowsuchnamaste/address_book | 4877d16d795c54b750e151fa93e69c080717ae72 | [
"MIT"
] | null | null | null | """A simple address book."""
from ._tools import generate_uuid
| 25.415385 | 80 | 0.565981 |
fe3415df5ab13d93fe351122344f2bd2d2fe4c5f | 3,839 | py | Python | inference.py | zzhang87/ChestXray | eaafe2f7f5e91bb30fbed02dec1f77ff314434b5 | [
"MIT"
] | null | null | null | inference.py | zzhang87/ChestXray | eaafe2f7f5e91bb30fbed02dec1f77ff314434b5 | [
"MIT"
] | 11 | 2020-01-28T21:44:26.000Z | 2022-03-11T23:19:37.000Z | inference.py | zzhang87/ChestXray | eaafe2f7f5e91bb30fbed02dec1f77ff314434b5 | [
"MIT"
] | null | null | null | import keras
import numpy as np
import pandas as pd
import cv2
import os
import json
import pdb
import argparse
import math
import copy
from vis.visualization import visualize_cam, overlay, visualize_activation
from vis.utils.utils import apply_modifications
from shutil import rmtree
import matplotlib.cm as cm
from matplotlib import pyplot as plt
from sklearn import metrics
import keras.backend as K
from keras import activations
from keras.applications.inception_v3 import preprocess_input as inception_pre
from keras.applications.mobilenet import preprocess_input as mobilenet_pre
from keras.applications.resnet50 import preprocess_input as resnet_pre
from keras.applications.densenet import preprocess_input as densenet_pre
from datagenerator import ImageDataGenerator
from utils import load_model
if __name__ == "__main__":
main() | 27.035211 | 107 | 0.716593 |
fe34376d96d5593399f4f9364cf5da83ea7d813b | 530 | py | Python | test/DQueueTest.py | MistSun-Chen/py_verifier | 7e9161d1fdbb611fe4be5eeb2f89a6286fa7b555 | [
"MIT"
] | null | null | null | test/DQueueTest.py | MistSun-Chen/py_verifier | 7e9161d1fdbb611fe4be5eeb2f89a6286fa7b555 | [
"MIT"
] | null | null | null | test/DQueueTest.py | MistSun-Chen/py_verifier | 7e9161d1fdbb611fe4be5eeb2f89a6286fa7b555 | [
"MIT"
] | null | null | null | from libTask import Queue
from common import configParams
from common import common
if __name__ == '__main__':
main() | 35.333333 | 124 | 0.718868 |
fe3599447ec843cd5c9296bccc205dff470707c7 | 1,417 | py | Python | src/Knn-Tensor.py | python-itb/knn-from-scratch | dbc6fb53cffb245a76d35b9ff85ac8cb21877ca8 | [
"MIT"
] | null | null | null | src/Knn-Tensor.py | python-itb/knn-from-scratch | dbc6fb53cffb245a76d35b9ff85ac8cb21877ca8 | [
"MIT"
] | 2 | 2018-03-20T06:47:32.000Z | 2018-10-25T10:54:08.000Z | src/Knn-Tensor.py | python-itb/knn-from-scratch | dbc6fb53cffb245a76d35b9ff85ac8cb21877ca8 | [
"MIT"
] | 4 | 2018-03-20T06:43:11.000Z | 2019-04-15T16:34:28.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 13 18:52:28 2018
@author: amajidsinar
"""
from sklearn import datasets
import matplotlib.pyplot as plt
import numpy as np
plt.style.use('seaborn-white')
iris = datasets.load_iris()
dataset = iris.data
# only take 0th and 1th column for X
data_known = iris.data[:,:2]
# y
label_known = iris.target
# the hard part
# so matplotlib does not readily support labeling based on class
# but we know that one of the feature of plt is that a plt call would give those set of number
# the same color
category = np.unique(label_known)
for i in category:
plt.scatter(data_known[label_known==i][:,0],data_known[label_known==i][:,1],label=i)
# Unknown class of a data
data_unknown = np.array([[5.7,3.3],[5.6,3.4],[6.4,3],[8.2,2.2]])
plt.scatter(data_unknown[:,0],data_unknown[:,1], label='?')
plt.legend()
#-------------
# Euclidean Distance
diff = data_known - data_unknown.reshape(data_unknown.shape[0],1,data_unknown.shape[1])
distance = (diff**2).sum(2)
#return sorted index of distance
dist_index = np.argsort(distance)
label = label_known[dist_index]
#for k in [1,2,3,4,5,6,7,8,9,10]:
#keep the rank
k = 10
label = label[:,:k]
label_predict = []
for i in range(data_unknown.shape[0]):
values,counts = np.unique(label[i], return_counts=True)
ind = np.argmax(counts)
label_predict.append(values[ind])
| 21.149254 | 94 | 0.687368 |
fe35a3606e5ec595f8753af44fd793743da1ae33 | 2,135 | py | Python | de_test_tron2.py | volpepe/detectron2-ResNeSt | 1481d50880baa615b873b7a18156c06a5606a85c | [
"Apache-2.0"
] | null | null | null | de_test_tron2.py | volpepe/detectron2-ResNeSt | 1481d50880baa615b873b7a18156c06a5606a85c | [
"Apache-2.0"
] | null | null | null | de_test_tron2.py | volpepe/detectron2-ResNeSt | 1481d50880baa615b873b7a18156c06a5606a85c | [
"Apache-2.0"
] | null | null | null | import torch, torchvision
import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()
# import some common libraries
import numpy as np
import os, json, cv2, random
# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
import argparse, time
if __name__ == "__main__":
args = parse_args()
start_segment(args) | 40.283019 | 164 | 0.710539 |
fe35e371f2d0a2c205ae69e2ee6c811fd9ed1de5 | 8,916 | py | Python | pika/data.py | Pankrat/pika | 9f62cbe032e9b4fa0fe1842587ce0702c3926a3d | [
"BSD-3-Clause"
] | null | null | null | pika/data.py | Pankrat/pika | 9f62cbe032e9b4fa0fe1842587ce0702c3926a3d | [
"BSD-3-Clause"
] | null | null | null | pika/data.py | Pankrat/pika | 9f62cbe032e9b4fa0fe1842587ce0702c3926a3d | [
"BSD-3-Clause"
] | null | null | null | """AMQP Table Encoding/Decoding"""
import struct
import decimal
import calendar
from datetime import datetime
from pika import exceptions
from pika.compat import unicode_type, PY2, long, as_bytes
def encode_short_string(pieces, value):
"""Encode a string value as short string and append it to pieces list
returning the size of the encoded value.
:param list pieces: Already encoded values
:param value: String value to encode
:type value: str or unicode
:rtype: int
"""
encoded_value = as_bytes(value)
length = len(encoded_value)
# 4.2.5.3
# Short strings, stored as an 8-bit unsigned integer length followed by zero
# or more octets of data. Short strings can carry up to 255 octets of UTF-8
# data, but may not contain binary zero octets.
# ...
# 4.2.5.5
# The server SHOULD validate field names and upon receiving an invalid field
# name, it SHOULD signal a connection exception with reply code 503 (syntax
# error).
# -> validate length (avoid truncated utf-8 / corrupted data), but skip null
# byte check.
if length > 255:
raise exceptions.ShortStringTooLong(encoded_value)
pieces.append(struct.pack('B', length))
pieces.append(encoded_value)
return 1 + length
if PY2:
def decode_short_string(encoded, offset):
"""Decode a short string value from ``encoded`` data at ``offset``.
"""
length = struct.unpack_from('B', encoded, offset)[0]
offset += 1
# Purely for compatibility with original python2 code. No idea what
# and why this does.
value = encoded[offset:offset + length]
try:
value = bytes(value)
except UnicodeEncodeError:
pass
offset += length
return value, offset
else:
def decode_short_string(encoded, offset):
"""Decode a short string value from ``encoded`` data at ``offset``.
"""
length = struct.unpack_from('B', encoded, offset)[0]
offset += 1
value = encoded[offset:offset + length].decode('utf8')
offset += length
return value, offset
def encode_table(pieces, table):
"""Encode a dict as an AMQP table appending the encded table to the
pieces list passed in.
:param list pieces: Already encoded frame pieces
:param dict table: The dict to encode
:rtype: int
"""
table = table or {}
length_index = len(pieces)
pieces.append(None) # placeholder
tablesize = 0
for (key, value) in table.items():
tablesize += encode_short_string(pieces, key)
tablesize += encode_value(pieces, value)
pieces[length_index] = struct.pack('>I', tablesize)
return tablesize + 4
def encode_value(pieces, value):
"""Encode the value passed in and append it to the pieces list returning
the the size of the encoded value.
:param list pieces: Already encoded values
:param any value: The value to encode
:rtype: int
"""
if PY2:
if isinstance(value, basestring):
if isinstance(value, unicode_type):
value = value.encode('utf-8')
pieces.append(struct.pack('>cI', b'S', len(value)))
pieces.append(value)
return 5 + len(value)
else:
# support only str on Python 3
if isinstance(value, str):
value = value.encode('utf-8')
pieces.append(struct.pack('>cI', b'S', len(value)))
pieces.append(value)
return 5 + len(value)
if isinstance(value, bool):
pieces.append(struct.pack('>cB', b't', int(value)))
return 2
if isinstance(value, long):
pieces.append(struct.pack('>cq', b'l', value))
return 9
elif isinstance(value, int):
pieces.append(struct.pack('>ci', b'I', value))
return 5
elif isinstance(value, decimal.Decimal):
value = value.normalize()
if value.as_tuple().exponent < 0:
decimals = -value.as_tuple().exponent
raw = int(value * (decimal.Decimal(10) ** decimals))
pieces.append(struct.pack('>cBi', b'D', decimals, raw))
else:
# per spec, the "decimals" octet is unsigned (!)
pieces.append(struct.pack('>cBi', b'D', 0, int(value)))
return 6
elif isinstance(value, datetime):
pieces.append(struct.pack('>cQ', b'T',
calendar.timegm(value.utctimetuple())))
return 9
elif isinstance(value, dict):
pieces.append(struct.pack('>c', b'F'))
return 1 + encode_table(pieces, value)
elif isinstance(value, list):
p = []
for v in value:
encode_value(p, v)
piece = b''.join(p)
pieces.append(struct.pack('>cI', b'A', len(piece)))
pieces.append(piece)
return 5 + len(piece)
elif value is None:
pieces.append(struct.pack('>c', b'V'))
return 1
else:
raise exceptions.UnsupportedAMQPFieldException(pieces, value)
def decode_table(encoded, offset):
"""Decode the AMQP table passed in from the encoded value returning the
decoded result and the number of bytes read plus the offset.
:param str encoded: The binary encoded data to decode
:param int offset: The starting byte offset
:rtype: tuple
"""
result = {}
tablesize = struct.unpack_from('>I', encoded, offset)[0]
offset += 4
limit = offset + tablesize
while offset < limit:
key, offset = decode_short_string(encoded, offset)
value, offset = decode_value(encoded, offset)
result[key] = value
return result, offset
def decode_value(encoded, offset):
"""Decode the value passed in returning the decoded value and the number
of bytes read in addition to the starting offset.
:param str encoded: The binary encoded data to decode
:param int offset: The starting byte offset
:rtype: tuple
:raises: pika.exceptions.InvalidFieldTypeException
"""
# slice to get bytes in Python 3 and str in Python 2
kind = encoded[offset:offset + 1]
offset += 1
# Bool
if kind == b't':
value = struct.unpack_from('>B', encoded, offset)[0]
value = bool(value)
offset += 1
# Short-Short Int
elif kind == b'b':
value = struct.unpack_from('>B', encoded, offset)[0]
offset += 1
# Short-Short Unsigned Int
elif kind == b'B':
value = struct.unpack_from('>b', encoded, offset)[0]
offset += 1
# Short Int
elif kind == b'U':
value = struct.unpack_from('>h', encoded, offset)[0]
offset += 2
# Short Unsigned Int
elif kind == b'u':
value = struct.unpack_from('>H', encoded, offset)[0]
offset += 2
# Long Int
elif kind == b'I':
value = struct.unpack_from('>i', encoded, offset)[0]
offset += 4
# Long Unsigned Int
elif kind == b'i':
value = struct.unpack_from('>I', encoded, offset)[0]
offset += 4
# Long-Long Int
elif kind == b'L':
value = long(struct.unpack_from('>q', encoded, offset)[0])
offset += 8
# Long-Long Unsigned Int
elif kind == b'l':
value = long(struct.unpack_from('>Q', encoded, offset)[0])
offset += 8
# Float
elif kind == b'f':
value = long(struct.unpack_from('>f', encoded, offset)[0])
offset += 4
# Double
elif kind == b'd':
value = long(struct.unpack_from('>d', encoded, offset)[0])
offset += 8
# Decimal
elif kind == b'D':
decimals = struct.unpack_from('B', encoded, offset)[0]
offset += 1
raw = struct.unpack_from('>i', encoded, offset)[0]
offset += 4
value = decimal.Decimal(raw) * (decimal.Decimal(10) ** -decimals)
# Short String
elif kind == b's':
value, offset = decode_short_string(encoded, offset)
# Long String
elif kind == b'S':
length = struct.unpack_from('>I', encoded, offset)[0]
offset += 4
value = encoded[offset:offset + length].decode('utf8')
offset += length
# Field Array
elif kind == b'A':
length = struct.unpack_from('>I', encoded, offset)[0]
offset += 4
offset_end = offset + length
value = []
while offset < offset_end:
v, offset = decode_value(encoded, offset)
value.append(v)
# Timestamp
elif kind == b'T':
value = datetime.utcfromtimestamp(struct.unpack_from('>Q', encoded,
offset)[0])
offset += 8
# Field Table
elif kind == b'F':
(value, offset) = decode_table(encoded, offset)
# Null / Void
elif kind == b'V':
value = None
else:
raise exceptions.InvalidFieldTypeException(kind)
return value, offset
| 30.534247 | 80 | 0.596456 |
fe372dac70d64a37ad3e688bb47fa5b1bd4ad42e | 528 | py | Python | tests/fixtures/data_sets/service/dummy/dummy_configurable.py | Agi-dev/pylaas_core | c44866b5e57eb6f05f5b2b8d731f22d62a8c01c2 | [
"MIT"
] | null | null | null | tests/fixtures/data_sets/service/dummy/dummy_configurable.py | Agi-dev/pylaas_core | c44866b5e57eb6f05f5b2b8d731f22d62a8c01c2 | [
"MIT"
] | 2 | 2021-03-25T21:30:41.000Z | 2021-06-01T21:25:37.000Z | tests/fixtures/data_sets/service/dummy/dummy_configurable.py | Agi-dev/pylaas_core | c44866b5e57eb6f05f5b2b8d731f22d62a8c01c2 | [
"MIT"
] | null | null | null | from pylaas_core.abstract.abstract_service import AbstractService
import time
from pylaas_core.interface.technical.container_configurable_aware_interface import ContainerConfigurableAwareInterface
| 31.058824 | 118 | 0.765152 |
fe3845f60103709c0d0030d388891565874650ad | 1,076 | py | Python | blogtech/src/blog/views.py | IVAN-URBACZKA/django-blog | 7ef6050c0de2938791843c3ec93e6e6a1e683baa | [
"MIT"
] | null | null | null | blogtech/src/blog/views.py | IVAN-URBACZKA/django-blog | 7ef6050c0de2938791843c3ec93e6e6a1e683baa | [
"MIT"
] | null | null | null | blogtech/src/blog/views.py | IVAN-URBACZKA/django-blog | 7ef6050c0de2938791843c3ec93e6e6a1e683baa | [
"MIT"
] | null | null | null | from django.urls import reverse_lazy, reverse
from django.utils.decorators import method_decorator
from django.views.generic import ListView, DetailView, CreateView, DeleteView, UpdateView
from .models import BlogPost
from django.contrib.auth.decorators import login_required
| 31.647059 | 89 | 0.760223 |
fe393898f4084fe1c0d82dbb19e8e9bf170a60ea | 4,514 | py | Python | apc_deep_vision/python/generate_data.py | Juxi/apb-baseline | fd47a5fd78cdfd75c68601a40ca4726d7d20c9ce | [
"BSD-3-Clause"
] | 9 | 2017-02-06T10:24:56.000Z | 2022-02-27T20:59:52.000Z | apc_deep_vision/python/generate_data.py | Juxi/apb-baseline | fd47a5fd78cdfd75c68601a40ca4726d7d20c9ce | [
"BSD-3-Clause"
] | null | null | null | apc_deep_vision/python/generate_data.py | Juxi/apb-baseline | fd47a5fd78cdfd75c68601a40ca4726d7d20c9ce | [
"BSD-3-Clause"
] | 2 | 2017-10-15T08:33:37.000Z | 2019-03-05T07:29:38.000Z | #! /usr/bin/env python
# ********************************************************************
# Software License Agreement (BSD License)
#
# Copyright (c) 2015, University of Colorado, Boulder
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the University of Colorado Boulder
# nor the names of its contributors may be
# used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ********************************************************************/
import cv2
import os
import numpy as np
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("proposal_path", type=str,
help="relative path from python script to proposals, no slash")
parser.add_argument("--view", default=None,
help="true/1 shows each masked image")
args = parser.parse_args()
# args.proposal_path = "../test_proposals"
# args.proposal_path = args.proposal_path
included_extenstions = ['txt']
image_names = [fn[0:len(fn)-4] for fn in os.listdir(args.proposal_path)
if any(fn.endswith(ext) for ext in included_extenstions)]
for image_name in image_names:
load_path = args.proposal_path + '/' + image_name
image = cv2.imread(load_path + ".jpeg")
data = np.loadtxt(load_path + ".txt", str)
# If there is only one line, force data to be a list of lists anyway
# Note, only works for our data as first list item is a string
if isinstance(data[0], basestring):
data = [data]
# If any line does not conform to classification tl_x tl_y br_x br_y
# then forget about it
skip = False
for line in data:
if len(line) < 5:
skip = True
if skip:
continue
for i, proposal in zip(range(0,len(data)),data):
mask = cv2.imread(load_path + '_mask{0:04d}.jpeg'.format(i))
mask = np.invert(mask)
maskGray = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
ret, maskGray = cv2.threshold(maskGray,128,255,cv2.THRESH_BINARY)
print load_path + '_mask{0:04d}.jpeg'.format(i)
cropped = image[float(proposal[2]):float(proposal[4]), float(proposal[1]):float(proposal[3])]
masked = cv2.bitwise_and(cropped, cropped, mask = maskGray)
if args.view:
cv2.imshow("original", masked)
cv2.waitKey(0)
mask_directory = args.proposal_path + '/masked/' + proposal[0];
crop_directory = args.proposal_path + '/cropped/' + proposal[0];
if not os.path.exists(mask_directory):
os.makedirs(mask_directory)
if not os.path.exists(crop_directory):
os.makedirs(crop_directory)
cv2.imwrite(mask_directory + '/{}_{}.jpeg'.format(image_name,i), masked)
cv2.imwrite(crop_directory + '/{}_{}.jpeg'.format(image_name,i), cropped)
# item = data[]
# cropped = image[70:170, 440:540]
# startY:endY, startX:endX
# startX:startY, endX:endY
#
| 37.932773 | 105 | 0.634914 |
fe39cd977754d7baa5900e133ad7f76b583b9786 | 3,509 | py | Python | stats.py | shirshanka/fact-ory | 9e6bae63ca7f8f534b811058efb8942004d6a37b | [
"Apache-2.0"
] | null | null | null | stats.py | shirshanka/fact-ory | 9e6bae63ca7f8f534b811058efb8942004d6a37b | [
"Apache-2.0"
] | null | null | null | stats.py | shirshanka/fact-ory | 9e6bae63ca7f8f534b811058efb8942004d6a37b | [
"Apache-2.0"
] | null | null | null | import numpy as np;
import sys
import matplotlib.pyplot as plt;
from matplotlib import cm;
from termcolor import colored;
if __name__=="__main__":
print "hello world"
| 34.742574 | 154 | 0.68937 |
fe3be5e4c8643dd88fcaa6473267f6ae2cf76961 | 1,706 | py | Python | examples/peptidecutter/advanced.py | zjuchenyuan/EasyLogin | acc67187d902f20ec64d2d6b9eeb953e2a0ac77d | [
"MIT"
] | 33 | 2016-12-01T01:33:31.000Z | 2021-05-12T03:32:27.000Z | examples/peptidecutter/advanced.py | zjuchenyuan/EasyLogin | acc67187d902f20ec64d2d6b9eeb953e2a0ac77d | [
"MIT"
] | 2 | 2018-04-26T06:58:29.000Z | 2020-01-11T15:18:14.000Z | examples/peptidecutter/advanced.py | zjuchenyuan/EasyLogin | acc67187d902f20ec64d2d6b9eeb953e2a0ac77d | [
"MIT"
] | 4 | 2017-02-24T11:08:45.000Z | 2021-01-13T16:00:33.000Z | from EasyLogin import EasyLogin
from pprint import pprint
if __name__ == "__main__":
#pprint(peptidecutter("SERVELAT"))
import sys
pprint(peptidecutter_more(sys.argv[1]))
| 30.464286 | 200 | 0.579132 |
fe3c13d3bb5c59b28cc1e5b5a35923b1537df9f4 | 1,333 | py | Python | pgn2fixture/tests/test_utils.py | pointerish/pgn2fixture | 02039680acc37cbca22fb332738e34cd113831a4 | [
"MIT"
] | 3 | 2021-03-18T19:08:59.000Z | 2021-10-10T03:52:49.000Z | pgn2fixture/tests/test_utils.py | pointerish/pgn2fixture | 02039680acc37cbca22fb332738e34cd113831a4 | [
"MIT"
] | null | null | null | pgn2fixture/tests/test_utils.py | pointerish/pgn2fixture | 02039680acc37cbca22fb332738e34cd113831a4 | [
"MIT"
] | null | null | null | import unittest
from .. import utils
| 43 | 165 | 0.549887 |
fe3c354a94b9bc97c332f504c7fb8dc959b31224 | 7,019 | py | Python | manila/tests/share/test_snapshot_access.py | gouthampacha/manila | 4b7ba9b99d272663f519b495668715fbf979ffbc | [
"Apache-2.0"
] | 3 | 2016-06-06T13:05:00.000Z | 2021-05-05T04:29:24.000Z | manila/tests/share/test_snapshot_access.py | gouthampacha/manila | 4b7ba9b99d272663f519b495668715fbf979ffbc | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | manila/tests/share/test_snapshot_access.py | gouthampacha/manila | 4b7ba9b99d272663f519b495668715fbf979ffbc | [
"Apache-2.0"
] | 2 | 2020-03-15T01:24:15.000Z | 2020-07-22T20:34:26.000Z | # Copyright (c) 2016 Hitachi Data Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import ddt
import mock
from manila.common import constants
from manila import context
from manila import db
from manila import exception
from manila.share import snapshot_access
from manila import test
from manila.tests import db_utils
from manila import utils
| 41.532544 | 78 | 0.648383 |
fe3d447e3c8eb707e5a1d8550493f94e70efafc2 | 269 | py | Python | packages/pyright-internal/src/tests/samples/unnecessaryCast1.py | sasano8/pyright | e804f324ee5dbd25fd37a258791b3fd944addecd | [
"MIT"
] | 4,391 | 2019-05-07T01:18:57.000Z | 2022-03-31T20:45:44.000Z | packages/pyright-internal/src/tests/samples/unnecessaryCast1.py | sasano8/pyright | e804f324ee5dbd25fd37a258791b3fd944addecd | [
"MIT"
] | 2,740 | 2019-05-07T03:29:30.000Z | 2022-03-31T12:57:46.000Z | packages/pyright-internal/src/tests/samples/unnecessaryCast1.py | sasano8/pyright | e804f324ee5dbd25fd37a258791b3fd944addecd | [
"MIT"
] | 455 | 2019-05-07T12:55:14.000Z | 2022-03-31T17:09:15.000Z | # This sample tests the type checker's reportUnnecessaryCast feature.
from typing import cast, Union
c: Union[int, str] = "hello"
d = cast(int, c)
| 19.214286 | 69 | 0.687732 |
fe3dd2d72750bce0851326699b900d4e0689f605 | 690 | py | Python | Python/1238.py | ArikBartzadok/beecrowd-challenges | ddb0453d1caa75c87c4b3ed6a40309ab99da77f2 | [
"MIT"
] | null | null | null | Python/1238.py | ArikBartzadok/beecrowd-challenges | ddb0453d1caa75c87c4b3ed6a40309ab99da77f2 | [
"MIT"
] | null | null | null | Python/1238.py | ArikBartzadok/beecrowd-challenges | ddb0453d1caa75c87c4b3ed6a40309ab99da77f2 | [
"MIT"
] | null | null | null |
combinador() | 18.157895 | 60 | 0.510145 |
fe3e731bfc56815773233eb7a914918e37d052e2 | 974 | py | Python | metadata_service/api/popular_tables.py | worldwise001/amundsenmetadatalibrary | 9914c8b51d38b8bd76d3249eb4f7fcce3e198d09 | [
"Apache-2.0"
] | null | null | null | metadata_service/api/popular_tables.py | worldwise001/amundsenmetadatalibrary | 9914c8b51d38b8bd76d3249eb4f7fcce3e198d09 | [
"Apache-2.0"
] | 1 | 2019-09-21T23:59:46.000Z | 2019-09-21T23:59:46.000Z | metadata_service/api/popular_tables.py | worldwise001/amundsenmetadatalibrary | 9914c8b51d38b8bd76d3249eb4f7fcce3e198d09 | [
"Apache-2.0"
] | 1 | 2019-09-21T23:56:40.000Z | 2019-09-21T23:56:40.000Z | from http import HTTPStatus
from typing import Iterable, Union, Mapping
from flask import request
from flask_restful import Resource, fields, marshal
from metadata_service.proxy import get_proxy_client
popular_table_fields = {
'database': fields.String,
'cluster': fields.String,
'schema': fields.String,
'table_name': fields.String(attribute='name'),
'table_description': fields.String(attribute='description'), # Optional
}
popular_tables_fields = {
'popular_tables': fields.List(fields.Nested(popular_table_fields))
}
| 29.515152 | 96 | 0.722793 |
fe3e90a0352653677e5f89aa3d6275c22d3a1048 | 470 | py | Python | tests/test1.py | SaijC/manhwaDownloader | f6e97cfe25355598e42633a3796d84b666d5302f | [
"MIT"
] | null | null | null | tests/test1.py | SaijC/manhwaDownloader | f6e97cfe25355598e42633a3796d84b666d5302f | [
"MIT"
] | null | null | null | tests/test1.py | SaijC/manhwaDownloader | f6e97cfe25355598e42633a3796d84b666d5302f | [
"MIT"
] | null | null | null | import requests
import logging
import cfscrape
import os
from manhwaDownloader.constants import CONSTANTS as CONST
logging.basicConfig(level=logging.DEBUG)
folderPath = os.path.join(CONST.OUTPUTPATH, 'serious-taste-of-forbbiden-fruit')
logging.info(len([file for file in os.walk(folderPath)]))
walkList = [file for file in os.walk(folderPath)]
chapterDicts = dict()
for folder, _, files in walkList[1:]:
chapterDicts.update({folder: files})
print(chapterDicts) | 24.736842 | 79 | 0.778723 |
fe3ee793457d0725edb13bd4a978ffe58340aff1 | 11,708 | py | Python | others/Keras_custom_error.py | rahasayantan/Work-For-Reference | e052da538df84034ec5a0fe3b19c4287de307286 | [
"MIT"
] | null | null | null | others/Keras_custom_error.py | rahasayantan/Work-For-Reference | e052da538df84034ec5a0fe3b19c4287de307286 | [
"MIT"
] | null | null | null | others/Keras_custom_error.py | rahasayantan/Work-For-Reference | e052da538df84034ec5a0fe3b19c4287de307286 | [
"MIT"
] | null | null | null | # define custom R2 metrics for Keras backend
from keras import backend as K
# base model architecture definition
################K2
import pandas as pd
import numpy as np
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import LassoCV
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import RobustScaler
from keras import backend as K
from keras.models import Sequential
from keras.layers import Dense, InputLayer, GaussianNoise
from keras.wrappers.scikit_learn import KerasRegressor
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
#
# Data preparation
#
y_train = train['y'].values
id_test = test['ID']
num_train = len(train)
df_all = pd.concat([train, test])
df_all.drop(['ID', 'y'], axis=1, inplace=True)
# One-hot encoding of categorical/strings
df_all = pd.get_dummies(df_all, drop_first=True)
# Sscaling features
scaler = RobustScaler()
df_all = scaler.fit_transform(df_all)
train = df_all[:num_train]
test = df_all[num_train:]
# Keep only the most contributing features
sfm = SelectFromModel(LassoCV())
sfm.fit(train, y_train)
train = sfm.transform(train)
test = sfm.transform(test)
print ('Number of features : %d' % train.shape[1])
#
# Tuning model parameters
#
model = KerasRegressor(build_fn=build_model_fn, epochs=75, verbose=0)
gsc = GridSearchCV(
estimator=model,
param_grid={
#'neurons': range(18,31,4),
'noise': [x/20.0 for x in range(3, 7)],
},
#scoring='r2',
scoring='neg_mean_squared_error',
cv=5
)
grid_result = gsc.fit(train, y_train)
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
for test_mean, test_stdev, train_mean, train_stdev, param in zip(
grid_result.cv_results_['mean_test_score'],
grid_result.cv_results_['std_test_score'],
grid_result.cv_results_['mean_train_score'],
grid_result.cv_results_['std_train_score'],
grid_result.cv_results_['params']):
print("Train: %f (%f) // Test : %f (%f) with: %r" % (train_mean, train_stdev, test_mean, test_stdev, param))
#
# Train model with best params for submission
#
model = build_model_fn(**grid_result.best_params_)
model.fit(train, y_train, epochs=75, verbose=2)
y_test = model.predict(test).flatten()
df_sub = pd.DataFrame({'ID': id_test, 'y': y_test})
df_sub.to_csv('mercedes-submission.csv', index=False)
#########################
import pandas as pd
import numpy as np
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor
from sklearn.decomposition import PCA, FastICA
from sklearn.preprocessing import RobustScaler
from sklearn.pipeline import make_pipeline, Pipeline, _name_estimators
from sklearn.linear_model import ElasticNet, ElasticNetCV
from sklearn.model_selection import cross_val_score, KFold
from sklearn.metrics import r2_score
from sklearn.base import BaseEstimator, TransformerMixin
import xgboost as xgb
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
y_train = train['y'].values
y_mean = np.mean(y_train)
id_test = test['ID']
num_train = len(train)
df_all = pd.concat([train, test])
df_all.drop(['ID', 'y'], axis=1, inplace=True)
# One-hot encoding of categorical/strings
df_all = pd.get_dummies(df_all, drop_first=True)
train = df_all[:num_train]
test = df_all[num_train:]
#
# Model/pipeline with scaling,pca,svm
#
svm_pipe = LogExpPipeline(_name_estimators([RobustScaler(),
PCA(),
SVR(kernel='rbf', C=1.0, epsilon=0.05)]))
# results = cross_val_score(svm_pipe, train, y_train, cv=5, scoring='r2')
# print("SVM score: %.4f (%.4f)" % (results.mean(), results.std()))
# exit()
#
# Model/pipeline with scaling,pca,ElasticNet
#
en_pipe = LogExpPipeline(_name_estimators([RobustScaler(),
PCA(n_components=125),
ElasticNet(alpha=0.001, l1_ratio=0.1)]))
#
# XGBoost model
#
xgb_model = xgb.sklearn.XGBRegressor(max_depth=4, learning_rate=0.005, subsample=0.921,
objective='reg:linear', n_estimators=1300, base_score=y_mean)
xgb_pipe = Pipeline(_name_estimators([AddColumns(transform_=PCA(n_components=10)),
AddColumns(transform_=FastICA(n_components=10, max_iter=500)),
xgb_model]))
# results = cross_val_score(xgb_model, train, y_train, cv=5, scoring='r2')
# print("XGB score: %.4f (%.4f)" % (results.mean(), results.std()))
#
# Random Forest
#
rf_model = RandomForestRegressor(n_estimators=250, n_jobs=4, min_samples_split=25,
min_samples_leaf=25, max_depth=3)
# results = cross_val_score(rf_model, train, y_train, cv=5, scoring='r2')
# print("RF score: %.4f (%.4f)" % (results.mean(), results.std()))
#
# Now the training and stacking part. In previous version i just tried to train each model and
# find the best combination, that lead to a horrible score (Overfit?). Code below does out-of-fold
# training/predictions and then we combine the final results.
#
# Read here for more explanation (This code was borrowed/adapted) :
#
stack = Ensemble(n_splits=5,
#stacker=ElasticNetCV(l1_ratio=[x/10.0 for x in range(1,10)]),
stacker=ElasticNet(l1_ratio=0.1, alpha=1.4),
base_models=(svm_pipe, en_pipe, xgb_pipe, rf_model))
y_test = stack.fit_predict(train, y_train, test)
df_sub = pd.DataFrame({'ID': id_test, 'y': y_test})
df_sub.to_csv('submission.csv', index=False)
#############################
'''This example demonstrates the use of Convolution1D for text classification.
Gets to 0.89 test accuracy after 2 epochs.
90s/epoch on Intel i5 2.4Ghz CPU.
10s/epoch on Tesla K40 GPU.
'''
from __future__ import print_function
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
from keras.layers import Conv1D, GlobalMaxPooling1D
from keras.datasets import imdb
# set parameters:
max_features = 5000
maxlen = 400
batch_size = 32
embedding_dims = 50
filters = 250
kernel_size = 3
hidden_dims = 250
epochs = 2
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
print('Build model...')
model = Sequential()
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
model.add(Embedding(max_features,
embedding_dims,
input_length=maxlen))
model.add(Dropout(0.2))
# we add a Convolution1D, which will learn filters
# word group filters of size filter_length:
model.add(Conv1D(filters,
kernel_size,
padding='valid',
activation='relu',
strides=1))
# we use max pooling:
model.add(GlobalMaxPooling1D())
# We add a vanilla hidden layer:
model.add(Dense(hidden_dims))
model.add(Dropout(0.2))
model.add(Activation('relu'))
# We project onto a single unit output layer, and squash it with a sigmoid:
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
| 31.643243 | 112 | 0.656389 |
fe4036ba021d5a543848f0719df15257dc0be8cd | 7,239 | py | Python | tests/ut/python/parallel/test_manual_gatherv2.py | PowerOlive/mindspore | bda20724a94113cedd12c3ed9083141012da1f15 | [
"Apache-2.0"
] | 3,200 | 2020-02-17T12:45:41.000Z | 2022-03-31T20:21:16.000Z | tests/ut/python/parallel/test_manual_gatherv2.py | zimo-geek/mindspore | 665ec683d4af85c71b2a1f0d6829356f2bc0e1ff | [
"Apache-2.0"
] | 176 | 2020-02-12T02:52:11.000Z | 2022-03-28T22:15:55.000Z | tests/ut/python/parallel/test_manual_gatherv2.py | zimo-geek/mindspore | 665ec683d4af85c71b2a1f0d6829356f2bc0e1ff | [
"Apache-2.0"
] | 621 | 2020-03-09T01:31:41.000Z | 2022-03-30T03:43:19.000Z | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore as ms
from mindspore import context, Tensor, Parameter
from mindspore.common.api import _cell_graph_executor
from mindspore.nn import Cell, TrainOneStepCell, Momentum
from mindspore.ops import operations as P
from mindspore.common.initializer import initializer
_x = Tensor(np.ones([8, 8]), dtype=ms.int32)
_b = Tensor(np.ones([64, 8]), dtype=ms.float32)
| 37.703125 | 117 | 0.654234 |
fe4088c9d39d6abd819f54e637798544df93b9db | 3,396 | py | Python | ClemBot.Bot/bot/api/tag_route.py | makayla-moster/ClemBot | 26503d25f1fbe2abcf99dbf0f68b17e88ad11a7c | [
"MIT"
] | 121 | 2020-04-25T06:20:28.000Z | 2021-06-07T03:08:46.000Z | ClemBot.Bot/bot/api/tag_route.py | makayla-moster/ClemBot | 26503d25f1fbe2abcf99dbf0f68b17e88ad11a7c | [
"MIT"
] | 180 | 2020-04-25T04:49:51.000Z | 2021-06-22T15:21:30.000Z | ClemBot.Bot/bot/api/tag_route.py | makayla-moster/ClemBot | 26503d25f1fbe2abcf99dbf0f68b17e88ad11a7c | [
"MIT"
] | 72 | 2020-04-25T03:28:49.000Z | 2021-06-20T20:17:00.000Z | from bot.api.api_client import ApiClient
from bot.api.base_route import BaseRoute
import typing as t
from bot.models import Tag
| 33.294118 | 114 | 0.564193 |
fe40ab7f78d9978c2d19631879cf3439c2112560 | 2,967 | py | Python | formfactor_AL.py | kirichoi/PolymerConnectome | 064df932cfca57a97e62dfa9a32d1fa976500906 | [
"MIT"
] | null | null | null | formfactor_AL.py | kirichoi/PolymerConnectome | 064df932cfca57a97e62dfa9a32d1fa976500906 | [
"MIT"
] | null | null | null | formfactor_AL.py | kirichoi/PolymerConnectome | 064df932cfca57a97e62dfa9a32d1fa976500906 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 7 10:59:00 2020
@author: user
"""
import numpy as np
import multiprocessing as mp
import matplotlib.pyplot as plt
import time
import itertools
import ctypes
if __name__ == '__main__':
AL_dist_flat = np.load(r'./AL_dist_flat.npy')
n = np.shape(AL_dist_flat)[0]
m = np.shape(AL_dist_flat)[1]
q_range = np.logspace(-2,3,100)
# r_x = np.array([1, 0, 0])
# q_range_glo = mp.Array(ctypes.c_double, q_range)
AL_dist_flat_glo = mp.Array(ctypes.c_double, AL_dist_flat.flatten())
n_glo = mp.Value(ctypes.c_int, n)
m_glo = mp.Value(ctypes.c_int, m)
# r_x_glo = mp.Array(ctypes.c_double, r_x)
paramlist = list(itertools.product(range(100), range(n)))
pool = mp.Pool(20, initializer=parallelinit, initargs=(AL_dist_flat_glo, n_glo, m_glo))
t1 = time.time()
results = pool.map(formfactor, paramlist)
pool.close()
t2 = time.time()
print(t2-t1)
np.save(r'./AL_results.npy', results)
Pq = 2*np.divide(np.sum(np.array(results).reshape(100, n), axis=1), n)
# fig = plt.figure(figsize=(8,6))
# plt.plot(q_range, Pq, lw=3, color='tab:orange')
# plt.xscale('log')
# plt.xlabel('$q$', fontsize=15)
# plt.ylabel('$P(q)$', fontsize=15)
# plt.tight_layout()
# plt.savefig(r'./AL_form_factor.pdf', dpi=300, bbox_inches='tight')
# plt.show()
fig = plt.figure(figsize=(8,6))
plt.plot(q_range, Pq, lw=3, color='tab:orange')
plt.xscale('log')
plt.yscale('log')
plt.xlabel('$q$', fontsize=15)
plt.ylabel('$P(q)$', fontsize=15)
plt.tight_layout()
plt.savefig(r'./AL_form_factor_log.pdf', dpi=300, bbox_inches='tight')
plt.show()
| 33.337079 | 110 | 0.625211 |
fe427f872414bfa986cd9b2c48b6113399437840 | 1,039 | py | Python | utils/tests.py | nanodude/cairocffi | 9d6a9a420a91da80f7901ace9945fd864f5d04dc | [
"BSD-3-Clause"
] | null | null | null | utils/tests.py | nanodude/cairocffi | 9d6a9a420a91da80f7901ace9945fd864f5d04dc | [
"BSD-3-Clause"
] | null | null | null | utils/tests.py | nanodude/cairocffi | 9d6a9a420a91da80f7901ace9945fd864f5d04dc | [
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
import io
import cairo # pycairo
import cairocffi
from pycairo_to_cairocffi import _UNSAFE_pycairo_context_to_cairocffi
from cairocffi_to_pycairo import _UNSAFE_cairocffi_context_to_pycairo
import pango_example
if __name__ == '__main__':
test()
| 34.633333 | 79 | 0.73821 |
fe433c22e1af644dfc7ebbadd44ff0872fa4438b | 487 | py | Python | riddle.py | robertlit/monty-hall-problem | 746cab513dacdc1f47ce7269db35167df3520865 | [
"MIT"
] | null | null | null | riddle.py | robertlit/monty-hall-problem | 746cab513dacdc1f47ce7269db35167df3520865 | [
"MIT"
] | null | null | null | riddle.py | robertlit/monty-hall-problem | 746cab513dacdc1f47ce7269db35167df3520865 | [
"MIT"
] | null | null | null | import random
goat1 = random.randint(1, 3)
goat2 = random.randint(1, 3)
while goat1 == goat2:
goat2 = random.randint(1, 3)
success = 0
tries = 1_000_000
for _ in range(tries):
options = [1, 2, 3]
choice = random.randint(1, 3)
options.remove(choice)
if choice == goat1:
options.remove(goat2)
else:
options.remove(goat1)
choice = options[0]
if choice != goat1 and choice != goat2:
success = success + 1
print(success / tries)
| 18.037037 | 43 | 0.61807 |