hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ac5ca3d9a1b5567a5c378d005b2800a24b5822f4 | 1,148 | py | Python | python/paddle/fluid/tests/unittests/test_python_bf16_numpy_datatype.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | 11 | 2016-08-29T07:43:26.000Z | 2016-08-29T07:51:24.000Z | python/paddle/fluid/tests/unittests/test_python_bf16_numpy_datatype.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/tests/unittests/test_python_bf16_numpy_datatype.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | 1 | 2021-12-09T08:59:17.000Z | 2021-12-09T08:59:17.000Z | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from paddle_bfloat import bfloat16
import unittest
if __name__ == "__main__":
unittest.main()
| 31.888889 | 74 | 0.718641 |
ac5d5c3626cc5c773bf91a5f517bfdbe0b549607 | 687 | py | Python | tests/api/v2/test_datasources.py | droessmj/python-sdk | 42ea2366d08ef5e4d1fa45029480b800352ab765 | [
"MIT"
] | 2 | 2020-09-08T20:42:05.000Z | 2020-09-09T14:27:55.000Z | tests/api/v2/test_datasources.py | droessmj/python-sdk | 42ea2366d08ef5e4d1fa45029480b800352ab765 | [
"MIT"
] | null | null | null | tests/api/v2/test_datasources.py | droessmj/python-sdk | 42ea2366d08ef5e4d1fa45029480b800352ab765 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Test suite for the community-developed Python SDK for interacting with Lacework APIs.
"""
import pytest
from laceworksdk.api.v2.datasources import DatasourcesAPI
from tests.api.test_base_endpoint import BaseEndpoint
# Tests
| 22.9 | 85 | 0.737991 |
ac5ea208004616e2bfb96c0a007f009fdaeed064 | 2,793 | py | Python | src/expand_mnist.py | whalsey/misc | 8649cb070017a2a6c3c1cdd7fd1e37f45b251ef1 | [
"Unlicense"
] | null | null | null | src/expand_mnist.py | whalsey/misc | 8649cb070017a2a6c3c1cdd7fd1e37f45b251ef1 | [
"Unlicense"
] | null | null | null | src/expand_mnist.py | whalsey/misc | 8649cb070017a2a6c3c1cdd7fd1e37f45b251ef1 | [
"Unlicense"
] | null | null | null | """expand_mnist.py
~~~~~~~~~~~~~~~~~~
Take the 50,000 MNIST training images, and create an expanded set of
250,000 images, by displacing each training image up, down, left and
right, by one pixel. Save the resulting file to
../data/mnist_expanded.pkl.gz.
Note that this program is memory intensive, and may not run on small
systems.
"""
from __future__ import print_function
#### Libraries
# Standard library
import cPickle
import gzip
import os.path
import random
# Third-party libraries
import numpy as np
import scipy.ndimage.interpolation
import matplotlib.pyplot as plt
print("Expanding the MNIST training set")
if os.path.exists("../data/mnist_expanded.pkl.gz"):
print("The expanded training set already exists. Exiting.")
else:
f = gzip.open("../data/mnist.pkl.gz", 'rb')
training_data, validation_data, test_data = cPickle.load(f)
f.close()
expanded_training_pairs = []
j = 0 # counter
# for each image in the training data
for x, y in zip(training_data[0], training_data[1]):
expanded_training_pairs.append((x, y))
image = np.reshape(x, (-1, 28))
j += 1
if j % 1000 == 0: print("Expanding image number ", j)
# create four new images with shifts and rotations
for _ in range(4):
# calculate x shift
shift_x = random.randint(-3, 3)
# calculate y shift
shift_y = random.randint(-3, 3)
new_img = np.roll(image, shift_x, 0)
new_img = np.roll(new_img, shift_y, 1)
# pad the shifted area with 0's
# todo - will add this later *(though it does not seem necessary)
# if sign(shift_x) == 1:
# new_img[:shift_x][:] = np.zeros((shift_x, 28))
# else:
# new_img[28-shift_x:][:] = np.zeros((shift_x, 28))
#
# if sign(shift_y) == 1:
# new_img[:][:shift_y] = np.zeros((28, shift_y))
# else:
# new_img[:][28-shift_y:] = np.zeros((28, shift_y))
# calculate degree of rotation
degree = (random.random() - 0.5) * 90
new_img = scipy.ndimage.interpolation.rotate(new_img, degree, reshape=False)
# plt.imshow(new_img)
#
# plt.pause(0.01)
# plt.clf()
expanded_training_pairs.append((np.reshape(new_img, 784), y))
random.shuffle(expanded_training_pairs)
expanded_training_data = [list(d) for d in zip(*expanded_training_pairs)]
print("Saving expanded data. This may take a few minutes.")
f = gzip.open("../data/mnist_expanded.pkl.gz", "w")
cPickle.dump((expanded_training_data, validation_data, test_data), f)
f.close()
| 29.09375 | 88 | 0.6058 |
ac6056041eeb8497e63663fa127d721d28fac540 | 17,489 | py | Python | randconv/coordinator_factory.py | jm-begon/randconv | cb7438f5876c18192e8caaf3cafd88e839c26048 | [
"BSD-3-Clause"
] | 1 | 2016-08-01T08:09:28.000Z | 2016-08-01T08:09:28.000Z | randconv/coordinator_factory.py | jm-begon/randconv | cb7438f5876c18192e8caaf3cafd88e839c26048 | [
"BSD-3-Clause"
] | null | null | null | randconv/coordinator_factory.py | jm-begon/randconv | cb7438f5876c18192e8caaf3cafd88e839c26048 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
A set of factory function to help create usual cases of coordinator
"""
__author__ = "Begon Jean-Michel <jm.begon@gmail.com>"
__copyright__ = "3-clause BSD License"
__date__ = "20 January 2015"
import math
from .image import *
from .util import (OddUniformGenerator, NumberGenerator,
CustomDiscreteNumberGenerator, GaussianNumberGenerator)
from .feature_extractor import ImageLinearizationExtractor, DepthCompressorILE
from .coordinator import (RandConvCoordinator, PyxitCoordinator)
def pyxit_factory(
nb_subwindows=10,
sw_min_size_ratio=0.5, sw_max_size_ratio=1.,
sw_target_width=16, sw_target_height=16,
fixed_size=False,
sw_interpolation=SubWindowExtractor.INTERPOLATION_BILINEAR,
n_jobs=-1, verbosity=10, temp_folder=None,
random=True):
"""
Factory method to create :class:`PyxitCoordinator`
Parameters
----------
nb_subwindows : int >= 0 (default : 10)
The number of subwindow to extract
sw_min_size_ratio : float > 0 (default : 0.5)
The minimum size of a subwindow expressed as the ratio of the size
of the original image
sw_max_size_ratio : float : sw_min_size_ratio
<= sw_max_size_ratio <= 1 (default : 1.)
The maximim size of a subwindow expressed as the ratio of the size
of the original image
sw_target_width : int > 0 (default : 16)
The width of the subwindows after reinterpolation
sw_target_height : int > 0 (default : 16)
The height of the subwindows after reinterpolation
fixed_size : boolean (default : False)
Whether to use fixe size subwindow. If False, subwindows are drawn
randomly. If True, the target size is use as the subwindow size and
only the position is drawn randomly
sw_interpolation : int (default :
SubWindowExtractor.INTERPOLATION_BILINEAR)
The subwindow reinterpolation algorithm. For more information, see
:class:`SubWindowExtractor`
n_jobs : int >0 or -1 (default : -1)
The number of process to spawn for parallelizing the computation.
If -1, the maximum number is selected. See also :mod:`Joblib`.
verbosity : int >= 0 (default : 10)
The verbosity level
temp_folder : string (directory path) (default : None)
The temporary folder used for memmap. If none, some default folder
will be use (see the :class:`ParallelCoordinator`)
random : bool (default : True)
Whether to use randomness or use a predefined seed
Return
------
coordinator : :class:`Coordinator`
The PyxitCoordinator (possibly decorated) corresponding to the set
of parameters
Notes
-----
- Subwindow random generator
The subwindow random generator is a :class:`NumberGenerator` base
instance (generate real nubers uniformely).
- Feature extractor
Base instance of :class:`ImageLinearizationExtractor`
"""
swngSeed = 0
#Randomness
if random:
swngSeed = None
#SubWindowExtractor
swNumGenerator = NumberGenerator(seed=swngSeed)
if fixed_size:
sw_extractor = FixTargetSWExtractor(sw_target_width,
sw_target_height,
sw_interpolation,
swNumGenerator)
else:
sw_extractor = SubWindowExtractor(sw_min_size_ratio,
sw_max_size_ratio,
sw_target_width,
sw_target_height,
sw_interpolation,
swNumGenerator)
multi_sw_extractor = MultiSWExtractor(sw_extractor, nb_subwindows, True)
#FEATURE EXTRACTOR
feature_extractor = ImageLinearizationExtractor()
#LOGGER
autoFlush = verbosity >= 45
logger = ProgressLogger(StandardLogger(autoFlush=autoFlush,
verbosity=verbosity))
#COORDINATOR
coordinator = PyxitCoordinator(multi_sw_extractor, feature_extractor, logger,
verbosity)
if n_jobs != 1:
coordinator.parallelize(n_jobs, temp_folder)
return coordinator
#TODO : include in randconv : (Const.FEATEXT_ALL, {}), (Const.FEATEXT_SPASUB, {"nbCol":2})
def randconv_factory(
nb_filters=5,
filter_policy=(Const.FGEN_ZEROPERT,
{"min_size": 2, "max_size": 32, "min_val": -1, "max_val": 1,
"value_generator": Const.RND_RU,
"normalization": FilterGenerator.NORMALISATION_MEANVAR}),
poolings=[(3, 3, Const.POOLING_AGGREG_AVG)],
extractor=(Const.FEATEXT_ALL, {}),
nb_subwindows=10,
sw_min_size_ratio=0.5, sw_max_size_ratio=1.,
sw_target_width=16, sw_target_height=16,
sw_interpolation=SubWindowExtractor.INTERPOLATION_BILINEAR,
include_original_img=False,
n_jobs=-1, verbosity=10, temp_folder=None,
random=True):
"""
Factory method to create :class:`RandConvCoordinator` tuned for RGB images
Parameters
----------
nb_filterss : int >= 0 (default : 5)
The number of filter
filter_policy : pair (policyType, parameters)
policyType : one of Const.FGEN_*
The type of filter generation policy to use
parameters : dict
The parameter dictionnary to forward to :func:`get_filter_generator`
poolings : iterable of triple (height, width, policy) (default :
[(3, 3, Const.POOLING_AGGREG_AVG)])
A list of parameters to instanciate the according :class:`Pooler`
height : int > 0
the height of the neighborhood window
width : int > 0
the width of the neighborhood window
policy : int in {Const.POOLING_NONE, Const.POOLING_AGGREG_MIN,
Const.POOLING_AGGREG_AVG, Const.POOLING_AGGREG_MAX,
Const.POOLING_CONV_MIN, Const.POOLING_CONV_AVG, Const.POOLING_CONV_MAX}
nb_subwindows : int >= 0 (default : 10)
The number of subwindow to extract
sw_min_size_ratio : float > 0 (default : 0.5)
The minimum size of a subwindow expressed as the ratio of the size
of the original image
sw_max_size_ratio : float : sw_min_size_ratio
<= sw_max_size_ratio <= 1 (default : 1.)
The maximim size of a subwindow expressed as the ratio of the size
of the original image
sw_target_width : int > 0 (default : 16)
The width of the subwindows after reinterpolation
sw_target_height : int > 0 (default : 16)
The height of the subwindows after reinterpolation
sw_interpolation : int (default :
SubWindowExtractor.INTERPOLATION_BILINEAR)
The subwindow reinterpolation algorithm. For more information, see
:class:`SubWindowExtractor`
include_original_img : boolean (default : False)
Whether or not to include the original image in the subwindow
extraction process
n_jobs : int >0 or -1 (default : -1)
The number of process to spawn for parallelizing the computation.
If -1, the maximum number is selected. See also :mod:`Joblib`.
verbosity : int >= 0 (default : 10)
The verbosity level
temp_folder : string (directory path) (default : None)
The temporary folder used for memmap. If none, some default folder
will be use (see the :class:`ParallelCoordinator`)
random : bool (default : True)
Whether to use randomness or use a predefined seed
Return
------
coordinator : :class:`Coordinator`
The RandConvCoordinator corresponding to the
set of parameters
Notes
-----
- Filter generator
Base instance of :class:`Finite3SameFilter` with a base instance of
:class:`NumberGenerator` for the values and
:class:`OddUniformGenerator` for the sizes
- Filter size
The filter are square (same width as height)
- Convolver
Base instance of :class:`RGBConvolver`
- Subwindow random generator
The subwindow random generator is a :class:`NumberGenerator` base
instance (generate real nubers uniformely).
- Feature extractor
Base instance of :class:`ImageLinearizationExtractor`
"""
#RANDOMNESS
swngSeed = None
if random is False:
swngSeed = 0
#CONVOLUTIONAL EXTRACTOR
#Filter generator
#Type/policy parameters, #filters, random
filter_policyType, filter_policyParam = filter_policy
filter_generator = get_filter_generator(filter_policyType, filter_policyParam,
nb_filters, random)
#Convolver
convolver = RGBConvolver()
#Aggregator
multi_pooler = get_multi_poolers(poolings, sw_target_height,
sw_target_width)
#SubWindowExtractor
swNumGenerator = NumberGenerator(seed=swngSeed)
sw_extractor = SubWindowExtractor(sw_min_size_ratio,
sw_max_size_ratio,
sw_target_width,
sw_target_height,
sw_interpolation, swNumGenerator)
multi_sw_extractor = MultiSWExtractor(sw_extractor, nb_subwindows, False)
#ConvolutionalExtractor
convolutional_extractor = ConvolutionalExtractor(filter_generator,
convolver,
multi_sw_extractor,
multi_pooler,
include_original_img)
#FEATURE EXTRACTOR
feature_extractor = get_feature_extractor(extractor[0], **extractor[1])
#COORDINATOR
coordinator = RandConvCoordinator(convolutional_extractor, feature_extractor)
if n_jobs != 1:
coordinator.parallelize(n_jobs, temp_folder)
return coordinator
| 40.204598 | 95 | 0.623535 |
ac6074b9d7933f990f474a4b8d34085357c16a13 | 2,944 | py | Python | projections.py | barrulik/3d-projections | 291770c466383c917dd68eb0ad4121195598a29f | [
"Apache-2.0"
] | 1 | 2022-01-20T20:01:24.000Z | 2022-01-20T20:01:24.000Z | projections.py | barrulik/3d-projections | 291770c466383c917dd68eb0ad4121195598a29f | [
"Apache-2.0"
] | 1 | 2022-01-20T20:01:30.000Z | 2022-01-21T14:23:11.000Z | projections.py | barrulik/3d-projections | 291770c466383c917dd68eb0ad4121195598a29f | [
"Apache-2.0"
] | null | null | null | import pygame
import numpy as np
from math import *
import json
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RAINBOW = (0, 0, 0)
rainbow = True
WIDTH, HEIGHT = 800, 600
#WIDTH, HEIGHT = 1600, 900
screen = pygame.display.set_mode((WIDTH, HEIGHT))
clock = pygame.time.Clock()
angle = 0
while True:
# so spin rate is not super fast/constant
clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit()
angle += 0.01
screen.fill(WHITE)
# type ur code here
renderObject("objects/2squares.json", [0, 0, 0], [angle, angle, angle], 100, screen)
renderObject("objects/square.json", [0, 0, 1], [angle, angle, angle], 100, screen)
pygame.display.update()
| 27.009174 | 224 | 0.571332 |
ac615575fca01649282cfa5b82c564f0a14d7c09 | 708 | py | Python | insert_data.py | Amantechcse/cricket-fantasy-game | dd256adef88de7fd4132dea55d52bfba493efa30 | [
"MIT"
] | null | null | null | insert_data.py | Amantechcse/cricket-fantasy-game | dd256adef88de7fd4132dea55d52bfba493efa30 | [
"MIT"
] | null | null | null | insert_data.py | Amantechcse/cricket-fantasy-game | dd256adef88de7fd4132dea55d52bfba493efa30 | [
"MIT"
] | null | null | null | import sqlite3
book=sqlite3.connect("bookstore.db")
curbook=book.cursor()
#curbook.execute('''create table books (book_id integer primary key autoincrement , book_name text(20), author text(20), price integer);''')
while True:
x=input("want to enter data yes/no: ")
if x=='yes':
book_id=int(input("Enter book id: "))
book_name=input("Enter book name: ")
author=input("Enter author name: ")
price=input("Enter price of book: ")
curbook.execute("insert into books (book_id,book_name,author, price) values(?,?,?,?);",(book_id,book_name,author, price))
book.commit()
print("data add successfully")
else:
break
#book.close()
| 28.32 | 141 | 0.638418 |
ac61daa3c54495624b8682899688bd4fd36deaca | 13,110 | py | Python | api/config/h5Template/tanmuContent.py | jimbunny/wedding-invitation | a3648454e1105d9362f95d9f6e69055a7522e15b | [
"MIT"
] | null | null | null | api/config/h5Template/tanmuContent.py | jimbunny/wedding-invitation | a3648454e1105d9362f95d9f6e69055a7522e15b | [
"MIT"
] | null | null | null | api/config/h5Template/tanmuContent.py | jimbunny/wedding-invitation | a3648454e1105d9362f95d9f6e69055a7522e15b | [
"MIT"
] | null | null | null | tanmuContent = '''
<style>
.barrage-input-tip {
z-index: 1999;
position: absolute;
left: 10px;
width: 179.883px;
height: 35.7422px;
line-height: 35.7422px;
border-radius: 35.7422px;
box-sizing: border-box;
color: rgb(255, 255, 255);
margin-left: 45.7031px;
background-color: {{ data.tanmuBtnColor }};
opacity: 0.65;
pointer-events: initial;
padding: 0px 16.9922px;
font-size: 14.0625px;
display: block;
}
.data-box{display:none}
.barrage_box_top{width:100%;height:160px;margin:0px auto;}
.barrage_box_top .barrage-row{margin-bottom:20px;}
.barrage_box_top .barrage-item{
background-color: {{ data.tanmuColor }};margin-bottom:10px; white-space:nowrap;color:{{ data.fontColor }}; font-size: 12px; transform: scale(1); opacity: 1; transition: all 0.65s ease-in 0s;padding: 6px 8px 0px 8px; height: 32px;display: inline-block;border-radius: 25px;
}
</style>
<div class="maka-barrage-dom" style="top: 0px; left: 0px; background-color: transparent; z-index: 1000;">
<div class="barrage-content" style="position: fixed; box-sizing: border-box; padding: 11.7188px; right: 0px; bottom: 0px; z-index: 1000; width: 100%; pointer-events: none; background: linear-gradient(rgba(0, 0, 0, 0) 0%, rgba(0, 0, 0, 0.2) 100%);">
<div class="barrage-words row" style="margin-top: 11.7188px; height: 212.695px;"><div class="barrage-word" style="min-height: 32.2266px; line-height: 32.2266px; font-size: 12.8906px; padding: 4.10156px; border-radius: 22.8516px; bottom: 94.3359px; max-width: 310.547px; background-color: rgba(47, 50, 52, 0.6); transform: scale(1); opacity: 0; transition: bottom 2s ease-out 0s, opacity 0.75s linear 0.75s;">
</div>
</div>
<div class="barrage-bottom row" id="barrageBtn" style="padding-bottom: env(safe-area-inset-bottom); margin-top: 14.0625px; position: fixed; left: 11.7188px; bottom: 47px; pointer-events: initial;">
<div class="barrage-input-tip" data-toggle="modal" data-target="#myModal" style="background:{{ data.tanmuColor }}; width: 179.883px; height: 35.7422px; line-height: 35.7422px; border-radius: 35.7422px; box-sizing: border-box; color: rgb(255, 255, 255); margin-left: 45.7031px; background-color: rgb(47, 50, 52); opacity: 0.65; pointer-events: initial; padding: 0px 16.9922px; font-size: 14.0625px;">...</div>
</div>
<div class="backdrop" style="position: fixed; width: 100%; height: 100%; background-color: rgba(0, 0, 0, 0); z-index: 999; display: none; top: 0px; left: 0px; pointer-events: initial;"></div>
<div class="barrage-btn tanBtn" style="padding-bottom: env(safe-area-inset-bottom); margin-top: 14.0625px; position: fixed; left: 11.7188px; bottom: 11.7188px; pointer-events: initial;">
<div class="correct-icon" id="tanmuOpen" style="background: url("https://i.ibb.co/1QmGHWV/danmu-open1.png") 0% 0% / contain no-repeat; border-radius: 100%; width: 35.7422px; height: 35.7422px;"></div>
<div class="close-icon" id="tanmuClose" style="background: url("https://i.ibb.co/QNwcxLx/danmu-close1.png") 0% 0% / contain no-repeat; border-radius: 100%; width: 35.7422px; height: 35.7422px; display: none;">
<b style="position: absolute; color: rgb(255, 255, 255); top: 2.92969px; left: 19.9219px; font-weight: 600; font-size: 8.78906px; transform: scale(0.8);">{{ data.greetings | length }}</b>
</div>
</div>
<div id="j-barrage-top" class="barrage_box barrage_box_top" style="position: fixed; box-sizing: border-box; padding: 0px; right: 0px; bottom: 0px; z-index: 1000; width: 100%; pointer-events: none;"></div>
</div>
<div class="barrage-input-wrap" id="modalShow" style="display: none; position: fixed; left: 0px; bottom: 0px;height: 0px; width: 100%; background-color:transparent; padding: 9.375px 11.7188px; box-sizing: border-box; z-index: 2000; pointer-events: initial;">
<!-- Modal -->
<div class="modal fade" id="myModal" tabindex="-1" role="dialog" aria-labelledby="myModalLabel" aria-hidden="true">
<div style="width:100%;" class="modal-dialog">
<div class="modal-content">
<div class="modal-header">
<button type="button" class="close" style="cursor: pointer;" data-dismiss="modal" aria-hidden="true"></button>
<h4 class="modal-title" id="myModalLabel"></h4>
</div>
<div class="modal-body">
<form action="" id="form" class="form-horizontal">
<div class="form-group">
<div class="col-md-24" style="padding-left:10px;padding-right: 10px;">
<input type="text" class="form-control" style="width:100% !important;" name="name" placeholder="-" />
</div>
</div>
<div class="form-group">
<div class="col-md-24" style="padding-left:10px;padding-right: 10px;">
<input type="text" class="form-control" style="width:100% !important;" name="greetings" placeholder="" />
</div>
</div>
<div class="form-group">
<div class="col-md-24 col-md-offset-2" style="padding-left:10px;padding-right: 10px;">
<button id="subBtn" type="submit" class="btn btn-primary" style="width:100%;"></button>
</div>
</div>
</form>
</div>
</div><!-- /.modal-content -->
</div><!-- /.modal-dialog -->
</div>
<!-- /.modal -->
</div>
</div>
<div class="alert alert-danger hide"></div>
<div class="alert alert-success hide"></div>
<script src="/static/js/bootstrap.min.js"></script>
<script src="/static/js/bootstrapValidator.min.js"></script>
<script type="text/javascript" src="/static/js/index.js"></script>
<style type="text/css">
*{
padding:0;
margin:0;
}
a{
text-decoration: none;
}
.form-control{
display: inline-block;
width: auto;
padding: 6px 12px;
font-size: 14px;
line-height: 1.42857143;
color: #555;
background-color: #fff;
background-image: none;
border: 1px solid #ccc;
border-radius: 4px;
-webkit-box-shadow: inset 0 1px 1px rgba(0,0,0,.075);
box-shadow: inset 0 1px 1px rgba(0,0,0,.075);
-webkit-transition: border-color ease-in-out .15s,-webkit-box-shadow ease-in-out .15s;
-o-transition: border-color ease-in-out .15s,box-shadow ease-in-out .15s;
transition: border-color ease-in-out .15s,box-shadow ease-in-out .15s;
}
.btn{
display: inline-block;
padding: 6px 12px;
margin-bottom: 0;
font-size: 14px;
font-weight: 400;
line-height: 1.42857143;
text-align: center;
white-space: nowrap;
vertical-align: middle;
-ms-touch-action: manipulation;
touch-action: manipulation;
cursor: pointer;
-webkit-user-select: none;
-moz-user-select: none;
-ms-user-select: none;
user-select: none;
background-image: none;
border: 1px solid transparent;
border-radius: 4px;
}
.btn-primary {
color: #fff;
background-color: #337ab7;
border-color: #2e6da4;
}
/**/
.overflow-text{
display: block;
white-space:nowrap;
overflow:hidden;
text-overflow:ellipsis;
opacity:0;
clear: both;
padding:0 10px;
border-radius: 10px;
box-sizing: border-box;
max-width: 100%;
color:#fff;
animation:colorchange 3s infinite alternate;
-webkit-animation:colorchange 3s infinite alternate; /*Safari and Chrome*/
}
@keyframes colorchange{
0%{
color:red;
}
50%{
color:green;
}
100%{
color:#6993f9;
}
}
/**/
.alert{
position: fixed;
width: 50%;
margin-left: 20%;
z-index: 2000;
}
</style>
<script type="text/javascript">
var Obj;
$.ajax({
//
type: "GET",//
dataType: "json",//
url: "/api/v1/h5/greetings/"+{{ data.id }},//url
success: function (result) {
console.log(result);//()
if (result.code == 0) {
//
Obj = $('#j-barrage-top').barrage({
data : result.data, //
row : 1, //
time : 2500, //
gap : 100, //
position : 'fixed', //
direction : 'bottom left', //
ismoseoverclose : true, //
height : 30, //div
})
Obj.start();
} else {
alert("tanmu Error");
};
},
error : function() {
alert("tanmu Error");
}
});
</script>
<script>
$("#barrageBtn").click(function() {
var modalShowDiv = document.getElementById('modalShow');
modalShowDiv.style.display = 'block';
})
var kg = true; // if else
$(".tanBtn").click(function() { //button
if (kg) { //
var tanmuOpenDiv= document.getElementById('tanmuOpen');
tanmuOpenDiv.style.display = 'block';
var tanmuCloseDiv= document.getElementById('tanmuClose');
tanmuCloseDiv.style.display='none';
Obj.start();
var barrageBtnDiv= document.getElementById('barrageBtn');
barrageBtnDiv.style.display = 'block';
} else {
var tanmuOpenDiv= document.getElementById('tanmuOpen');
tanmuOpenDiv.style.display = 'none';
var tanmuCloseDiv= document.getElementById('tanmuClose');
tanmuCloseDiv.style.display='block';
Obj.close();
var barrageBtnDiv= document.getElementById('barrageBtn');
barrageBtnDiv.style.display = 'none';
}
kg = !kg; //
})
$('#myModal').on('hidden.bs.modal', function (e) {
//
// Reset a form
document.getElementById("form").reset();
$('#form').bootstrapValidator("resetForm",true);
})
$('form').bootstrapValidator({
//
message: 'This value is not valid',
// icon
feedbackIcons: {
valid: 'glyphicon glyphicon-ok',
invalid: 'glyphicon glyphicon-remove',
validating: 'glyphicon glyphicon-refresh'
},
excluded: [':disabled'],
submitHandler: function (validator, form, submitButton) {
//
// validator:
// form jq
// submitButton jq
},
fields: {
name: {
message: ', 20 ',
validators: {
notEmpty: { //
message: ''
},
stringLength: {
max: 20,
message: ' 20 '
},
}
},
greetings: {
message: ', 40 ',
validators: {
notEmpty: {
message: ''
},
stringLength: {
max: 40,
message: ' 40 '
},
}
},
}
});
var that = this
$("#subBtn").click(function () { //submitsubmit
$("form").bootstrapValidator('validate'); //
if ($("form").data('bootstrapValidator').isValid()) { //
$.ajax({
//
type: "POST",//
dataType: "json",//
url: "/api/v1/h5/greetings/"+{{ data.id }},//url
data: $('#form').serialize(),
success: function (result) {
console.log(result);//()
if (result.code == 0) {
$("#myModal").modal('hide');
//
//dataa.js
var addVal = {
text : result.data
}
//
Obj.data.unshift(addVal);
$(".alert-success").addClass("show");
window.setTimeout(function(){
$(".alert-success").removeClass("show");
},1000);//
} else {
$(".alert-danger").addClass("show");
window.setTimeout(function(){
$(".alert-danger").removeClass("show");
},1000);//
};
},
error : function() {
{#alert("Error");#}
$(".alert-danger").addClass("show");
window.setTimeout(function(){
$(".alert-danger").removeClass("show");
},1000);//
}
});
}
});
</script>
''' | 39.017857 | 427 | 0.564607 |
ac622bca39310127b42776aefdbd9c65467abc04 | 871 | py | Python | example/example2.py | xrloong/xrSolver | 4f36660b78456840f65215ffce0481cdc280f980 | [
"Apache-2.0"
] | null | null | null | example/example2.py | xrloong/xrSolver | 4f36660b78456840f65215ffce0481cdc280f980 | [
"Apache-2.0"
] | null | null | null | example/example2.py | xrloong/xrSolver | 4f36660b78456840f65215ffce0481cdc280f980 | [
"Apache-2.0"
] | null | null | null | from xrsolver import Problem
import solver
# This example is the second case from https://www.youtube.com/watch?v=WJEZh7GWHnw
s = solver.Solver()
p = Problem()
x1 = p.generateVariable("x1", lb=0, ub=3)
x2 = p.generateVariable("x2", lb=0, ub=3)
x3 = p.generateVariable("x3", lb=0, ub=3)
x4 = p.generateVariable("x4", lb=0, ub=3)
x5 = p.generateVariable("x5", lb=0, ub=3)
p.addVariable(x1)
p.addVariable(x2)
p.addVariable(x3)
p.addVariable(x4)
p.addVariable(x5)
p.appendConstraint(x1 + x2 <= 5)
p.appendConstraint(x2 <= 0.5 * (x1 + x2))
p.appendConstraint(x5 >= 0.4 * (x3 + x4))
p.appendConstraint(x1 + x2 + x3 + x4 +x5 == 10)
p.appendObjective(8.1 * x1 + 10.5 * x2 + 6.4 * x3 + 7.5 * x4 + 5.0 * x5)
s.solveProblem(p)
print("x1 =", x1.getValue())
print("x2 =", x2.getValue())
print("x3 =", x3.getValue())
print("x4 =", x4.getValue())
print("x5 =", x5.getValue())
| 23.540541 | 82 | 0.64868 |
ac635fff94dc7903f590b0e63087a7ab13c8a9ab | 11,669 | py | Python | src/ipyradiant/visualization/explore/interactive_exploration.py | lnijhawan/ipyradiant | d804e9031ef39c1ea75fedd52d110302c065ad84 | [
"BSD-3-Clause"
] | null | null | null | src/ipyradiant/visualization/explore/interactive_exploration.py | lnijhawan/ipyradiant | d804e9031ef39c1ea75fedd52d110302c065ad84 | [
"BSD-3-Clause"
] | null | null | null | src/ipyradiant/visualization/explore/interactive_exploration.py | lnijhawan/ipyradiant | d804e9031ef39c1ea75fedd52d110302c065ad84 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2021 ipyradiant contributors.
# Distributed under the terms of the Modified BSD License.
from typing import Union
import ipycytoscape as cyto
import ipywidgets as W
import rdflib
import traitlets as trt
from ipyradiant.query.api import SPARQLQueryFramer
from ipyradiant.rdf2nx.uri_converter import URItoID
DEFAULT_CYTO_STYLE = [
{
"selector": "node",
"css": {
"label": "data(_label)",
"text-wrap": "wrap",
"text-max-width": "150px",
"text-valign": "center",
"text-halign": "center",
"font-size": "10",
"font-family": '"Gill Sans", sans-serif',
"color": "black",
"background-color": "grey",
},
},
{
"selector": "edge[classes='temp-edge']",
"css": {
"label": "data(_label)",
"line-color": "#a8eae5",
},
},
{
"selector": "node.clicked",
"css": {
"background-color": "grey",
"line-color": "black",
"target-arrow-color": "black",
"source-arrow-color": "black",
},
},
{
"selector": "node.temp",
"css": {
"background-color": "#FFB6C1",
"line-color": "black",
"target-arrow-color": "black",
"source-arrow-color": "black",
},
},
{
"selector": "edge.directed",
"style": {
"curve-style": "bezier",
"target-arrow-shape": "triangle",
"line-color": "grey",
# "label": "data(iri)",
"font-size": "5",
},
},
{
"selector": "edge.temp",
"style": {
"curve-style": "bezier",
"line-color": "#a8eae5",
# "label": "data(iri)",
"font-size": "5",
},
},
{"selector": "edge.multiple_edges", "style": {"curve-style": "bezier"}},
]
def add_cyto_class(element: Union[cyto.Node, cyto.Edge], class_addition: str) -> str:
"""Update the classes string for a cytoscape element with an addition
TODO support multiple class additions
:param element: the cytoscape Node/Edge to update classes for
:param class_addition: the class string to add
:return: the class string
"""
try:
classes = set(element.classes.split(" "))
except AttributeError:
classes = set()
classes.add(class_addition)
return " ".join(classes)
def remove_cyto_class(element: Union[cyto.Node, cyto.Edge], class_removal: str) -> str:
"""Update the classes string for a cytoscape element with a removal
TODO support multiple class additions
:param element: the cytoscape Node/Edge to update classes for
:param class_removal: the class string to remove
:return: the class string
"""
try:
classes = set(element.classes.split(" "))
classes.discard(class_removal)
return " ".join(classes)
except AttributeError:
return ""
# Throughout we assign the layout to self.cytoscape_widget_layout multiple times.
# This is so that the graph refreshes the layout every time nodes are added or removed,
# which provides an optimal viewing experience.
def log_node_clicks(self, node: dict):
"""
This function works with registering a click on a node.
This will mark the node as selected and change the color of the selected node.
"""
cyto_node = self.get_node(node)
if self.selected_node == cyto_node:
cyto_node.classes = remove_cyto_class(cyto_node, "temp")
cyto_node.classes = add_cyto_class(cyto_node, "clicked")
# NOTE: changes won't propagate to frontend until graph is updated
self.update_cytoscape_frontend()
self.selected_node = cyto_node
def expand_button_clicked(self, button):
"""
This function expands a node by loading in its predicates and subjects when
a node is selected and the expand button is clicked.
"""
self.undo_button.disabled = False
if self.selected_node is None:
return None
new_data = GetOutgoingPredicateObjects.run_query(
graph=self.rdf_graph, s=self.selected_node.data["iri"]
)
objs = new_data["o"].tolist()
preds = new_data["p"].tolist()
labels = new_data["label"].tolist()
self.existing_node_ids = [
node.data["id"] for node in self.cytoscape_widget.graph.nodes
]
self.new_nodes = {
idx: cyto.Node(
data={
"id": str(iri),
"iri": iri,
"_label": labels[idx] or str(iri),
},
classes="temp",
)
for idx, iri in enumerate(objs)
if str(iri) not in self.existing_node_ids
}
self.new_edges = {
idx: cyto.Edge(
data={
"source": self.selected_node.data["id"],
"target": str(iri),
"iri": URItoID(preds[idx]),
},
classes="temp",
)
for idx, iri in enumerate(objs)
}
self.cytoscape_widget.graph.add_nodes(self.new_nodes.values())
self.cytoscape_widget.graph.add_edges(self.new_edges.values())
self.cytoscape_widget.set_layout(name=self.cytoscape_widget_layout)
def undo_expansion(self, button):
"""
Preliminary function for undoing expansions upon a node.
As of right now, a user can only undo the most recent expansion.
Afterwards, the button will be disabled until a new expansion is made.
"""
self.undo_button.disabled = True
for node in self.new_nodes:
self.cytoscape_widget.graph.remove_node_by_id(
self.new_nodes[node].data["id"]
)
for edge in self.new_edges:
try:
self.cytoscape_widget.graph.remove_edge(self.new_edges[edge])
except ValueError:
# edge already removed from graph because the node was removed earlier.
pass
self.cytoscape_widget.set_layout(name=self.cytoscape_widget_layout)
def remove_temp_nodes(self, button):
"""Remove all nodes that have the 'temp' style"""
nodes_to_remove = {
node for node in self.cytoscape_widget.graph.nodes if "temp" in node.classes
}
for node in nodes_to_remove:
self.cytoscape_widget.graph.remove_node(node)
# change edge color
for edge in self.cytoscape_widget.graph.edges:
edge.classes = remove_cyto_class(edge, "temp")
edge.classes = add_cyto_class(edge, "directed")
# NOTE: changes won't propagate to frontend until graph is updated
self.update_cytoscape_frontend()
self.cytoscape_widget.set_layout(name=self.cytoscape_widget_layout)
self.undo_button.disabled = True
def update_cytoscape_frontend(self):
"""A temporary workaround to trigger a frontend refresh"""
self.cytoscape_widget.graph.add_node(cyto.Node(data={"id": "random node"}))
self.cytoscape_widget.graph.remove_node_by_id("random node")
| 32.504178 | 88 | 0.589768 |
ac64882142c0ccfa449492e6be034f7737d14e85 | 2,165 | py | Python | src/main/serialization/codec/object/collectionCodec.py | typingtanuki/pyserialization | f4a0d9cff08b3a6ce8f83f3a258c4dce1367d151 | [
"Apache-2.0"
] | null | null | null | src/main/serialization/codec/object/collectionCodec.py | typingtanuki/pyserialization | f4a0d9cff08b3a6ce8f83f3a258c4dce1367d151 | [
"Apache-2.0"
] | null | null | null | src/main/serialization/codec/object/collectionCodec.py | typingtanuki/pyserialization | f4a0d9cff08b3a6ce8f83f3a258c4dce1367d151 | [
"Apache-2.0"
] | null | null | null | from typing import List, TypeVar
from src.main.serialization.codec.codec import Codec
from src.main.serialization.codec.codecCache import CodecCache
from src.main.serialization.codec.object.noneCodec import NoneCodec
from src.main.serialization.codec.utils.byteIo import ByteIo
from src.main.serialization.codec.utils.bytes import *
T = TypeVar('T')
| 29.657534 | 69 | 0.611085 |
ac64c1d7463ad68a50c3cf1fa6beb3067354a863 | 3,049 | py | Python | pyatv/__init__.py | acheronfail/pyatv | 9cb96ffcc49938c4b43c92b7b40ddcecae37e732 | [
"MIT"
] | null | null | null | pyatv/__init__.py | acheronfail/pyatv | 9cb96ffcc49938c4b43c92b7b40ddcecae37e732 | [
"MIT"
] | 128 | 2020-04-24T06:42:29.000Z | 2021-02-19T11:34:20.000Z | pyatv/__init__.py | acheronfail/pyatv | 9cb96ffcc49938c4b43c92b7b40ddcecae37e732 | [
"MIT"
] | null | null | null | """Main routines for interacting with an Apple TV."""
import asyncio
import datetime # noqa
from ipaddress import IPv4Address
from typing import List
import aiohttp
from pyatv import conf, exceptions, interface
from pyatv.airplay import AirPlayStreamAPI
from pyatv.const import Protocol
from pyatv.dmap import DmapAppleTV
from pyatv.dmap.pairing import DmapPairingHandler
from pyatv.mrp import MrpAppleTV
from pyatv.mrp.pairing import MrpPairingHandler
from pyatv.airplay.pairing import AirPlayPairingHandler
from pyatv.support import net
from pyatv.support.scan import BaseScanner, UnicastMdnsScanner, MulticastMdnsScanner
| 29.317308 | 84 | 0.711053 |
ac65e8637b6048418cfca104d86483ce0041387d | 1,323 | py | Python | demessifyme/file_read_write.py | lilianluong/demessifyme | 7b90611316a4fc723fe38af8fe6e1ee4209e8fd2 | [
"MIT"
] | null | null | null | demessifyme/file_read_write.py | lilianluong/demessifyme | 7b90611316a4fc723fe38af8fe6e1ee4209e8fd2 | [
"MIT"
] | null | null | null | demessifyme/file_read_write.py | lilianluong/demessifyme | 7b90611316a4fc723fe38af8fe6e1ee4209e8fd2 | [
"MIT"
] | 1 | 2020-10-10T11:13:37.000Z | 2020-10-10T11:13:37.000Z | import glob
import os
from doc2vec import read_file, embed_document
| 33.075 | 84 | 0.615268 |
ac665b07df9871fa56973cfc475e0b3e944d2fc8 | 8,902 | py | Python | util/third_party/tensorflow_extra/tool/tflite/tflite.py | bojanpotocnik/gecko_sdk | 9e70b13fc4701459c5f8a8f5e8918ec3f5ea8903 | [
"Zlib"
] | 82 | 2016-06-29T17:24:43.000Z | 2021-04-16T06:49:17.000Z | util/third_party/tensorflow_extra/tool/tflite/tflite.py | bojanpotocnik/gecko_sdk | 9e70b13fc4701459c5f8a8f5e8918ec3f5ea8903 | [
"Zlib"
] | 2 | 2017-02-13T10:07:17.000Z | 2017-03-22T21:28:26.000Z | util/third_party/tensorflow_extra/tool/tflite/tflite.py | bojanpotocnik/gecko_sdk | 9e70b13fc4701459c5f8a8f5e8918ec3f5ea8903 | [
"Zlib"
] | 56 | 2016-08-02T10:50:50.000Z | 2021-07-19T08:57:34.000Z | #!/usr/bin/env python3
import sys
import os
import argparse
from string import Template
import re
# Patch site-packages to find numpy
import jinja2
if sys.platform.startswith("win"):
site_packages_path = os.path.abspath(os.path.join(os.path.dirname(jinja2.__file__), "../../../ext-site-packages"))
else:
site_packages_path = os.path.abspath(os.path.join(os.path.dirname(jinja2.__file__), "../../../../ext-site-packages"))
if os.path.exists(site_packages_path):
if site_packages_path not in sys.path:
sys.path.insert(0, site_packages_path)
"""
Generation of parameter files requires the tflite_model, tflite_model_parameters
and tensorflow_lite_support packages. Because these packages are not installed
in the uc-generation environment where this python script will be run, these
packages are supplied as source. tflite_model and tflite_model_parameters were
fetched from internal repos, while the tensorflow_lite_support was fetched from
https://github.com/tensorflow/tflite-support.
"""
import tflite.Model
from tflite_model import TfliteModel
from tflite_model_parameters import TfliteModelParameters
template_model_h = """// Auto-generated serialization of TFLite flatbuffers in config directory
#ifndef SL_TFLITE_MICRO_MODEL_H
#define SL_TFLITE_MICRO_MODEL_H
#ifdef __cplusplus
extern "C" {
#endif
#include <stdint.h>
${data}
extern const uint8_t *default_model_array;
extern const uint32_t default_model_len;
#ifdef __cplusplus
}
#endif
#endif // SL_TFLITE_MICRO_MODEL_H
"""
template_model_h_single ="""
extern const uint8_t ${model_name}_array[];
extern const uint32_t ${model_name}_len;
"""
template_model_c = """// Auto-generated serialization of TFLite flatbuffers in config directory
#include "em_device.h"
#include "sl_tflite_micro_model.h"
${data}
const uint8_t *default_model_array = ${model_name}_array;
const uint32_t default_model_len = ${data_len}UL;
"""
template_model_c_single = """
const uint8_t ${model_name}_array[] __ALIGNED(4) = {
${data}
};
const uint32_t ${model_name}_len = ${data_len}UL;
"""
template_opcode_resolver_h = """// Auto-generated macro to instanciate and initialize opcode resolver based on TFLite flatbuffers in config directory
#ifndef SL_TFLITE_MICRO_OPCODE_RESOLVER_H
#define SL_TFLITE_MICRO_OPCODE_RESOLVER_H
#define SL_TFLITE_MICRO_OPCODE_RESOLVER(opcode_resolver, error_reporter) \\
static tflite::MicroMutableOpResolver<${data_len}> opcode_resolver(error_reporter); \\
${data}
#endif // SL_TFLITE_MICRO_OPCODE_RESOLVER_H
"""
template_model_parameter_single = """#define SL_${model_name}_${config_key} ${config_val}
"""
template_model_default_parameter_single = """#define SL_DEFAULT_MODEL_${config_key} SL_${model_name}_${config_key}
"""
template_model_parameters_h = """// Auto-generated parameters from TFLite flatbuffers in config directory
#ifndef SL_TFLITE_MICRO_MODEL_PARAMETERS_H
#define SL_TFLITE_MICRO_MODEL_PARAMETERS_H
${data}
#endif // SL_TFLITE_MICRO_MODEL_PARAMETERS_H
"""
"""
The following dictionary has been created using the BuiltinOperator enum defining operatior values, see schema_generated.h, and
function names defined in the MicroMutableOpResolver object, see micro_mutable_op_resolver.h.
"""
opcode_dict = {
101: 'AddAbs',
0: 'AddAdd',
106: 'AddAddN',
56: 'AddArgMax',
79: 'AddArgMin',
1: 'AddAveragePool2D',
104: 'AddCeil',
2: 'AddConcatenation',
3: 'AddConv2D',
108: 'AddCos',
4: 'AddDepthwiseConv2D',
6: 'AddDequantize',
71: 'AddEqual',
8: 'AddFloor',
9: 'AddFullyConnected',
61: 'AddGreater',
62: 'AddGreaterEqual',
117: 'AddHardSwish',
11: 'AddL2Normalization',
58: 'AddLess',
63: 'AddLessEqual',
73: 'AddLog',
86: 'AddLogicalAnd',
87: 'AddLogicalNot',
84: 'AddLogicalOr',
14: 'AddLogistic',
55: 'AddMaximum',
17: 'AddMaxPool2D',
40: 'AddMean',
57: 'AddMinimum',
18: 'AddMul',
59: 'AddNeg',
73: 'AddNotEqual',
83: 'AddPack',
34: 'AddPad',
60: 'AddPadV2',
54: 'AddPrelu',
114: 'AddQuantize',
82: 'AddReduceMax',
19: 'AddRelu',
21: 'AddRelu6',
22: 'AddReshape',
97: 'AddResizeNearestNeighbor',
116: 'AddRound',
76: 'AddRsqrt',
77: 'AddShape',
66: 'AddSin',
25: 'AddSoftmax',
47: 'AddSplit',
102: 'AddSplitV',
75: 'AddSqrt',
92: 'AddSquare',
45: 'AddStridedSlice',
41: 'AddSub',
27: 'AddSvdf',
28: 'AddTanh',
67: 'AddTransposeConv',
88: 'AddUnpack'
}
entry() | 30.909722 | 149 | 0.702089 |
ac66fd2ca8108c50a826f92ddb5befe5db26ac80 | 3,290 | py | Python | braintree/transparent_redirect_gateway.py | eldarion/braintree_python | 8be3f69fb9a4171c5e9be049c8440fcc4f79fb40 | [
"MIT"
] | 3 | 2015-11-05T08:57:12.000Z | 2016-07-17T18:10:55.000Z | braintree/transparent_redirect_gateway.py | eldarion/braintree_python | 8be3f69fb9a4171c5e9be049c8440fcc4f79fb40 | [
"MIT"
] | null | null | null | braintree/transparent_redirect_gateway.py | eldarion/braintree_python | 8be3f69fb9a4171c5e9be049c8440fcc4f79fb40 | [
"MIT"
] | null | null | null | import cgi
from datetime import datetime
import urllib
import braintree
from braintree.util.crypto import Crypto
from braintree.error_result import ErrorResult
from braintree.exceptions.forged_query_string_error import ForgedQueryStringError
from braintree.util.http import Http
from braintree.successful_result import SuccessfulResult
from braintree.transparent_redirect import TransparentRedirect
| 40.121951 | 143 | 0.687234 |
ac67224e0a480ab178264f670f037b9c677d4fdc | 358 | py | Python | paint/migrations/0007_auto_20200405_1748.py | atulk17/Paint-App | 4b56455596d140cee4a9b19c71fe82364c3f3b7c | [
"BSD-2-Clause"
] | null | null | null | paint/migrations/0007_auto_20200405_1748.py | atulk17/Paint-App | 4b56455596d140cee4a9b19c71fe82364c3f3b7c | [
"BSD-2-Clause"
] | null | null | null | paint/migrations/0007_auto_20200405_1748.py | atulk17/Paint-App | 4b56455596d140cee4a9b19c71fe82364c3f3b7c | [
"BSD-2-Clause"
] | 1 | 2020-05-31T11:37:48.000Z | 2020-05-31T11:37:48.000Z | # Generated by Django 3.0.4 on 2020-04-05 12:18
from django.db import migrations
| 19.888889 | 48 | 0.578212 |
ac681a8ffae94d52efd701e9160788c29c5b6e8c | 72 | py | Python | app.py | ManarAbdelkarim/-hr-managmentSystem | 22d2ea340824c9533576c3e7c96296f443d7bf50 | [
"MIT"
] | null | null | null | app.py | ManarAbdelkarim/-hr-managmentSystem | 22d2ea340824c9533576c3e7c96296f443d7bf50 | [
"MIT"
] | null | null | null | app.py | ManarAbdelkarim/-hr-managmentSystem | 22d2ea340824c9533576c3e7c96296f443d7bf50 | [
"MIT"
] | null | null | null | from flask import Flask
app = Flask(__name__,static_folder='../static') | 36 | 47 | 0.763889 |
ac68c7b7a0f74f060897b04d3c95265a4c3918fb | 4,412 | py | Python | esprit/models.py | Ken125pig/esprit | 6fc6b24450627077f1d38145e14765f776feba8b | [
"Apache-2.0"
] | null | null | null | esprit/models.py | Ken125pig/esprit | 6fc6b24450627077f1d38145e14765f776feba8b | [
"Apache-2.0"
] | 6 | 2016-02-04T12:08:40.000Z | 2017-12-08T15:48:35.000Z | esprit/models.py | Ken125pig/esprit | 6fc6b24450627077f1d38145e14765f776feba8b | [
"Apache-2.0"
] | 4 | 2016-09-09T11:01:27.000Z | 2021-11-21T05:49:14.000Z | from copy import deepcopy
import string
from esprit import versions
unicode_punctuation_map = dict((ord(char), None) for char in string.punctuation)
| 34.46875 | 120 | 0.529465 |
ac6bcb9638b71ebbb75437e8984e9150ca824759 | 2,074 | py | Python | src/focus/api.py | RogerRueegg/lvw-young-talents | baf8490230230fffb232a13eb641b55ede29a710 | [
"MIT"
] | 1 | 2018-02-13T08:09:02.000Z | 2018-02-13T08:09:02.000Z | src/focus/api.py | RogerRueegg/lvw-young-talents | baf8490230230fffb232a13eb641b55ede29a710 | [
"MIT"
] | null | null | null | src/focus/api.py | RogerRueegg/lvw-young-talents | baf8490230230fffb232a13eb641b55ede29a710 | [
"MIT"
] | null | null | null | from . import models
from . import serializers
from rest_framework import viewsets, permissions
| 29.628571 | 61 | 0.771456 |
ac6bff0ef28a6f43b86268b434e114b49de8f3f4 | 1,273 | py | Python | algorithms/array/majority_element.py | kevinshenyang07/Data-Structure-and-Algo | 36b02feea04b892f1256de090c4fcf7b6aa98873 | [
"MIT"
] | null | null | null | algorithms/array/majority_element.py | kevinshenyang07/Data-Structure-and-Algo | 36b02feea04b892f1256de090c4fcf7b6aa98873 | [
"MIT"
] | null | null | null | algorithms/array/majority_element.py | kevinshenyang07/Data-Structure-and-Algo | 36b02feea04b892f1256de090c4fcf7b6aa98873 | [
"MIT"
] | null | null | null | # Moore Voting
# Majority Element
# Given an array of size n, find the majority element.
# The majority element is the element that appears more than n/2 times.
# assume at leat one element
# Majority Element II
| 25.979592 | 75 | 0.495679 |
ac6d9ecff3da360afd64ff653a18f8313213ea89 | 13,844 | py | Python | evmosproto/evmos/incentives/v1/incentives_pb2.py | hanchon-live/evmosproto | 141f336cf027a88c5bf227ab49069dd1cf2e4853 | [
"MIT"
] | null | null | null | evmosproto/evmos/incentives/v1/incentives_pb2.py | hanchon-live/evmosproto | 141f336cf027a88c5bf227ab49069dd1cf2e4853 | [
"MIT"
] | null | null | null | evmosproto/evmos/incentives/v1/incentives_pb2.py | hanchon-live/evmosproto | 141f336cf027a88c5bf227ab49069dd1cf2e4853 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: evmos/incentives/v1/incentives.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from evmosproto.gogoproto import gogo_pb2 as gogoproto_dot_gogo__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from evmosproto.cosmos.base.v1beta1 import coin_pb2 as cosmos_dot_base_dot_v1beta1_dot_coin__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='evmos/incentives/v1/incentives.proto',
package='evmos.incentives.v1',
syntax='proto3',
serialized_options=b'Z+github.com/tharsis/evmos/x/incentives/types',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n$evmos/incentives/v1/incentives.proto\x12\x13\x65vmos.incentives.v1\x1a\x14gogoproto/gogo.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1e\x63osmos/base/v1beta1/coin.proto\"\xe2\x01\n\tIncentive\x12\x10\n\x08\x63ontract\x18\x01 \x01(\t\x12\x66\n\x0b\x61llocations\x18\x02 \x03(\x0b\x32\x1c.cosmos.base.v1beta1.DecCoinB3\xc8\xde\x1f\x00\xaa\xdf\x1f+github.com/cosmos/cosmos-sdk/types.DecCoins\x12\x0e\n\x06\x65pochs\x18\x03 \x01(\r\x12\x38\n\nstart_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x08\x90\xdf\x1f\x01\xc8\xde\x1f\x00\x12\x11\n\ttotal_gas\x18\x05 \x01(\x04\"I\n\x08GasMeter\x12\x10\n\x08\x63ontract\x18\x01 \x01(\t\x12\x13\n\x0bparticipant\x18\x02 \x01(\t\x12\x16\n\x0e\x63umulative_gas\x18\x03 \x01(\x04\"\xcf\x01\n\x19RegisterIncentiveProposal\x12\r\n\x05title\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x10\n\x08\x63ontract\x18\x03 \x01(\t\x12\x66\n\x0b\x61llocations\x18\x04 \x03(\x0b\x32\x1c.cosmos.base.v1beta1.DecCoinB3\xc8\xde\x1f\x00\xaa\xdf\x1f+github.com/cosmos/cosmos-sdk/types.DecCoins\x12\x0e\n\x06\x65pochs\x18\x05 \x01(\r:\x04\xe8\xa0\x1f\x00\"U\n\x17\x43\x61ncelIncentiveProposal\x12\r\n\x05title\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x10\n\x08\x63ontract\x18\x03 \x01(\t:\x04\xe8\xa0\x1f\x00\x42-Z+github.com/tharsis/evmos/x/incentives/typesb\x06proto3'
,
dependencies=[gogoproto_dot_gogo__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,cosmos_dot_base_dot_v1beta1_dot_coin__pb2.DESCRIPTOR,])
_INCENTIVE = _descriptor.Descriptor(
name='Incentive',
full_name='evmos.incentives.v1.Incentive',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='contract', full_name='evmos.incentives.v1.Incentive.contract', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='allocations', full_name='evmos.incentives.v1.Incentive.allocations', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\310\336\037\000\252\337\037+github.com/cosmos/cosmos-sdk/types.DecCoins', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='epochs', full_name='evmos.incentives.v1.Incentive.epochs', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='start_time', full_name='evmos.incentives.v1.Incentive.start_time', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\220\337\037\001\310\336\037\000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='total_gas', full_name='evmos.incentives.v1.Incentive.total_gas', index=4,
number=5, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=149,
serialized_end=375,
)
_GASMETER = _descriptor.Descriptor(
name='GasMeter',
full_name='evmos.incentives.v1.GasMeter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='contract', full_name='evmos.incentives.v1.GasMeter.contract', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='participant', full_name='evmos.incentives.v1.GasMeter.participant', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cumulative_gas', full_name='evmos.incentives.v1.GasMeter.cumulative_gas', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=377,
serialized_end=450,
)
_REGISTERINCENTIVEPROPOSAL = _descriptor.Descriptor(
name='RegisterIncentiveProposal',
full_name='evmos.incentives.v1.RegisterIncentiveProposal',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='title', full_name='evmos.incentives.v1.RegisterIncentiveProposal.title', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='description', full_name='evmos.incentives.v1.RegisterIncentiveProposal.description', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='contract', full_name='evmos.incentives.v1.RegisterIncentiveProposal.contract', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='allocations', full_name='evmos.incentives.v1.RegisterIncentiveProposal.allocations', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\310\336\037\000\252\337\037+github.com/cosmos/cosmos-sdk/types.DecCoins', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='epochs', full_name='evmos.incentives.v1.RegisterIncentiveProposal.epochs', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'\350\240\037\000',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=453,
serialized_end=660,
)
_CANCELINCENTIVEPROPOSAL = _descriptor.Descriptor(
name='CancelIncentiveProposal',
full_name='evmos.incentives.v1.CancelIncentiveProposal',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='title', full_name='evmos.incentives.v1.CancelIncentiveProposal.title', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='description', full_name='evmos.incentives.v1.CancelIncentiveProposal.description', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='contract', full_name='evmos.incentives.v1.CancelIncentiveProposal.contract', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'\350\240\037\000',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=662,
serialized_end=747,
)
_INCENTIVE.fields_by_name['allocations'].message_type = cosmos_dot_base_dot_v1beta1_dot_coin__pb2._DECCOIN
_INCENTIVE.fields_by_name['start_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_REGISTERINCENTIVEPROPOSAL.fields_by_name['allocations'].message_type = cosmos_dot_base_dot_v1beta1_dot_coin__pb2._DECCOIN
DESCRIPTOR.message_types_by_name['Incentive'] = _INCENTIVE
DESCRIPTOR.message_types_by_name['GasMeter'] = _GASMETER
DESCRIPTOR.message_types_by_name['RegisterIncentiveProposal'] = _REGISTERINCENTIVEPROPOSAL
DESCRIPTOR.message_types_by_name['CancelIncentiveProposal'] = _CANCELINCENTIVEPROPOSAL
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Incentive = _reflection.GeneratedProtocolMessageType('Incentive', (_message.Message,), {
'DESCRIPTOR' : _INCENTIVE,
'__module__' : 'evmos.incentives.v1.incentives_pb2'
# @@protoc_insertion_point(class_scope:evmos.incentives.v1.Incentive)
})
_sym_db.RegisterMessage(Incentive)
GasMeter = _reflection.GeneratedProtocolMessageType('GasMeter', (_message.Message,), {
'DESCRIPTOR' : _GASMETER,
'__module__' : 'evmos.incentives.v1.incentives_pb2'
# @@protoc_insertion_point(class_scope:evmos.incentives.v1.GasMeter)
})
_sym_db.RegisterMessage(GasMeter)
RegisterIncentiveProposal = _reflection.GeneratedProtocolMessageType('RegisterIncentiveProposal', (_message.Message,), {
'DESCRIPTOR' : _REGISTERINCENTIVEPROPOSAL,
'__module__' : 'evmos.incentives.v1.incentives_pb2'
# @@protoc_insertion_point(class_scope:evmos.incentives.v1.RegisterIncentiveProposal)
})
_sym_db.RegisterMessage(RegisterIncentiveProposal)
CancelIncentiveProposal = _reflection.GeneratedProtocolMessageType('CancelIncentiveProposal', (_message.Message,), {
'DESCRIPTOR' : _CANCELINCENTIVEPROPOSAL,
'__module__' : 'evmos.incentives.v1.incentives_pb2'
# @@protoc_insertion_point(class_scope:evmos.incentives.v1.CancelIncentiveProposal)
})
_sym_db.RegisterMessage(CancelIncentiveProposal)
DESCRIPTOR._options = None
_INCENTIVE.fields_by_name['allocations']._options = None
_INCENTIVE.fields_by_name['start_time']._options = None
_REGISTERINCENTIVEPROPOSAL.fields_by_name['allocations']._options = None
_REGISTERINCENTIVEPROPOSAL._options = None
_CANCELINCENTIVEPROPOSAL._options = None
# @@protoc_insertion_point(module_scope)
| 48.069444 | 1,372 | 0.769142 |
ac6da0ba7a668648271540c11b0b554223adac1a | 15,332 | py | Python | modules/dashboard/question_editor.py | danieldanciu/schoggi | 0e18f0cca58cf2318525d57691c2e674b131206d | [
"Apache-2.0"
] | null | null | null | modules/dashboard/question_editor.py | danieldanciu/schoggi | 0e18f0cca58cf2318525d57691c2e674b131206d | [
"Apache-2.0"
] | null | null | null | modules/dashboard/question_editor.py | danieldanciu/schoggi | 0e18f0cca58cf2318525d57691c2e674b131206d | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes supporting creation and editing of questions."""
__author__ = 'John Orr (jorr@google.com)'
import copy
import messages
from common import schema_fields
from models import transforms
from models.models import QuestionDAO
from models.models import QuestionDTO
from models.models import SaQuestionConstants
from modules.dashboard import dto_editor
from modules.dashboard import utils as dashboard_utils
| 40.560847 | 79 | 0.621706 |
ac6e43614244577418d650dcbc06148d7a2b0c7c | 5,598 | py | Python | clayful/__init__.py | Clayful/clayful-python | ddd5f1f986fb0079d5128e17f4b0fdce83b4cec1 | [
"MIT"
] | null | null | null | clayful/__init__.py | Clayful/clayful-python | ddd5f1f986fb0079d5128e17f4b0fdce83b4cec1 | [
"MIT"
] | 3 | 2020-04-17T05:24:06.000Z | 2022-02-10T09:00:22.000Z | clayful/__init__.py | Clayful/clayful-python | ddd5f1f986fb0079d5128e17f4b0fdce83b4cec1 | [
"MIT"
] | null | null | null | import re
import urllib
import numbers
from clayful.models import register_models
from clayful.requester import request
from clayful.exception import ClayfulException
# Register models
register_models(Clayful) | 19.992857 | 128 | 0.66363 |
ac6eca8aa04c226da4e5ecb684240f7192f29f63 | 1,478 | py | Python | racecar_gym/envs/scenarios.py | luigiberducci/racecar_gym | fd2ff7fb14e9319530786ef54a4a6864bf1f1c26 | [
"MIT"
] | 16 | 2020-11-27T02:55:24.000Z | 2022-03-24T01:27:29.000Z | racecar_gym/envs/scenarios.py | luigiberducci/racecar_gym | fd2ff7fb14e9319530786ef54a4a6864bf1f1c26 | [
"MIT"
] | 5 | 2020-08-24T15:59:39.000Z | 2020-10-20T19:45:46.000Z | racecar_gym/envs/scenarios.py | luigiberducci/racecar_gym | fd2ff7fb14e9319530786ef54a4a6864bf1f1c26 | [
"MIT"
] | 4 | 2020-10-08T16:14:19.000Z | 2021-12-26T18:19:53.000Z | from dataclasses import dataclass
from typing import Dict
from racecar_gym.bullet import load_world, load_vehicle
from racecar_gym.tasks import Task, get_task
from racecar_gym.core import World, Agent
from .specs import ScenarioSpec, TaskSpec
| 30.791667 | 119 | 0.688092 |
ac6eda3ee83ebc645d67258023269c71e7def1cb | 26,972 | py | Python | vkts/real.py | smurphik/vkts | 21e16f37eebf80cd41fd02d7401e523e772e98f9 | [
"MIT"
] | null | null | null | vkts/real.py | smurphik/vkts | 21e16f37eebf80cd41fd02d7401e523e772e98f9 | [
"MIT"
] | null | null | null | vkts/real.py | smurphik/vkts | 21e16f37eebf80cd41fd02d7401e523e772e98f9 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
"""Implementation of the main functions of the application: editing user data,
thematic search, other console commands"""
import sys
import re
from collections import Counter
from .report import Report
from . import vklib as vk
from .vklib import apply_vk_method
from .usrdata import UsrData
from .utils import exception_handler
####################################################################
## Account commands ##
####################################################################
def add_account():
"""Interactive function of account adding"""
try:
# Read data about account
# Account type
in_str = input('What type of account do you want to create?\n'
+ 'Input please one letter - [e]mail, [v]k.ru\n'
+ 'or [t]elegram: ').lower()
ac_type = {'e': 'email', 'v': 'vk', 't': 'telegram'}[in_str]
# Account name
ac_name = input('\nInput name of new account: ').lower()
# Privacy
print('\nAre your account fake (password will be stored\n'
+ 'unencrypted on your computer) or private (you\n'
+ 'will be forced to enter the password every run)?')
in_str = input('Input please [f]ake or [p]rivate: ').lower()
ac_privacy = {'f': 'fake', 'p': 'private'}[in_str]
# User name
if ac_type == 'email' or ac_type == 'vk':
ac_user_name = input('\nInput email address: ')
elif ac_type == 'telegram':
ac_user_name = input('\nInput nickname: ').lstrip('@')
else:
raise
# Password (it's fake account, so we don't use smth. like getpass)
if ac_privacy == 'fake':
ac_password = input('\nInput password: ')
else:
ac_password = None
# Save account data
acc_obj = {'uname': ac_user_name,
'password': ac_password,
'token': None,
'is_activated': False}
UsrData().set(acc_obj, 'acc', ac_type, ac_name, correct_is_act=True)
except Exception as e:
exception_handler(e, 'Failed to enter account data')
def delete_account(ac_type, ac_name):
"""Deletion account ac_name of type ac_type from registry"""
UsrData().del_('acc', ac_type, ac_name, correct_is_act=True)
def activate_account(ac_type, ac_name):
"""Choose active account"""
u = UsrData()
if ac_name in u.get('acc', ac_type):
u.drop_activations('acc', ac_type)
u.set(True, 'acc', ac_type, ac_name, 'is_activated')
def display_accounts():
"""Display all accounts and active marks"""
accs = UsrData().get('acc')
for ac_type in accs:
# check existance
if not accs[ac_type]:
continue
# print accounts of type ac_type
print(ac_type)
for ac_name in accs[ac_type]:
acc_obj = accs[ac_type][ac_name]
print(' {}: \t[{}|{}]{}'.format(ac_name,
acc_obj['uname'],
acc_obj['password'],
(' \t<- activated'
if acc_obj['is_activated']
else '')))
print('')
####################################################################
## University commands ##
####################################################################
# Search hot university ids by un_groups
def add_university():
"""Interactive function of adding data abaot university"""
# Read data about university
try:
# Name of university
un_name = input(
'Input please simple name of university (better in short latin;\n'
+ 'for example: mipt, msu, mgimo): \n> ')
# Titles of university
print('Input please all titles of university (for example:\n'
+ ', , MIPT). After every title press [ENTER].\n'
+ 'Finish input by [ENTER].')
un_titles = []
while True:
title = input('> ')
if not title:
break
un_titles.append(title)
assert un_titles
# Looking for a university in the vk database
e = vk.Executor()
for title in un_titles:
e.add_request('database.getUniversities', q=title)
e.emit_requests()
un_items = []
for r in e.responses:
un_items.extend(r['items'])
# Clarify the results by checking exact words match
tmp = un_items
un_items = []
unique_ids = set()
for item in tmp:
# ensuring uniqueness of search results
if item['id'] in unique_ids:
continue
unique_ids.add(item['id'])
# check title existence
for title in un_titles:
for word in title.split():
if not re.search(r'\b' + word.lower() + r'\b',
item['title'].lower()):
break
else:
un_items.append(item)
break
del tmp, unique_ids
assert un_items
# Ask the user to clarify the results
print(
'Database search results may contain extra items.\n'
+ 'Check it please. You will see chanks of 10 items in turn.\n'
+ 'For every chank enter the numbers corresponding to the wrong\n'
+ 'elements (for example, 239 if corr. three items are wrong).\n'
+ 'Then press [ENTER].')
tmp = un_items
un_items = []
for i in range(0, len(tmp), 10):
chunk = tmp[i:i+10]
for j, item in enumerate(chunk):
print('{}> {}'.format(j, item['title']))
in_str = input('Wrong numbers: ')
extra_indexes_list = [int(c) for c in in_str if c.isdigit()]
extra_indexes_list.sort(reverse=True)
for j in extra_indexes_list:
del chunk[j]
un_items.extend(chunk)
del tmp
assert un_items
# Read very big groups which consists many students
print('Input please id or domains of very very big groups\n'
+ 'which consists many students of added university.\n'
+ 'After every id press [ENTER]. Finish input by [ENTER].')
un_big_groups = []
while True:
gr_id = input('> ')
if not gr_id:
break
un_big_groups.append(gr_id)
un_big_groups = vk.resolve_group_ids(un_big_groups)
un_big_groups = [{'id': x[0], 'domain': x[1]} for x in un_big_groups]
assert un_big_groups
# Read groups which consists almost exclusively of students
print('Input please id or domains of groups which consists\n'
+ 'almost exclusively of students of added university.\n'
+ 'After every id press [ENTER]. Finish input by [ENTER].')
un_groups = []
while True:
gr_id = input('> ')
if not gr_id:
break
un_groups.append(gr_id)
un_groups = vk.resolve_group_ids(un_groups)
un_groups = [{'id': x[0], 'domain': x[1]} for x in un_groups]
# Read users whose almost all friends are students
print('Input please id or domains of users whose almost all\n'
+ 'friends are students of added university.\n'
+ 'After every id press [ENTER]. Finish input by [ENTER].')
un_users = []
while True:
us_id = input('> ')
if not us_id:
break
us_id, us_scr = vk.resolve_user_ids(us_id)
un_users.append({'id': us_id, 'domain': us_scr})
# Search hot university ids by un_big_groups (unfortunately,
# vk search method is not stable, so we use this approach)
print('Start analysis of the prevalence of university identifiers.\n'
+ '(It could take several minutes)')
un_ids = list(map(str, [x['id'] for x in un_items]))
hot_ids = search_hot_university_ids(un_big_groups, un_ids)
# Save university data
univ_obj = {'titles': un_titles,
'big_groups': un_big_groups,
'crystal_groups': un_groups,
'crystal_users': un_users,
'all_ids': [x['id'] for x in un_items],
'hot_ids': hot_ids,
'is_activated': False}
UsrData().set(univ_obj, 'univ', un_name, correct_is_act=True)
except Exception as e:
exception_handler(e, 'Failed to enter or add university data')
def delete_university(un_name):
"""Deletion un_name from registry"""
UsrData().del_('univ', un_name, correct_is_act=True)
def activate_university(un_name):
"""Choose active university for analysis"""
u = UsrData()
if un_name in u.get('univ'):
u.drop_activations('univ')
u.set(True, 'univ', un_name, 'is_activated')
def display_universities():
"""Display data for all universities"""
# Read universities registry
univs = UsrData().get('univ')
# Print
for un_name in univs:
univ_obj = univs[un_name]
print(un_name + '{}'.format('\t\t\t\t<- ACTIVATED'
if univ_obj['is_activated']
else ''))
print('Title: {}'.format(', '.join(univ_obj['titles'])))
s = ', '.join([x['domain'] for x in univ_obj['big_groups']])
print('Big vk groups: {}'.format(s))
s = ', '.join([x['domain'] for x in univ_obj['crystal_groups']])
print('Crystal vk groups: {}'.format(s))
s = ', '.join([x['domain'] for x in univ_obj['crystal_users']])
print('Crystal vk users: {}'.format(s))
# Identifier list with carry and indents
print('VK ids: ', end='')
ids = [str(x) for x in univ_obj['all_ids']]
ids2 = []
for i in range(0, len(ids), 5):
ids2.append(', '.join(ids[i:i+5]))
s = ',\n '.join(x for x in ids2)
print(s)
# Hot identifiers with temperature in parentheses
sum_cnt = sum([x['temp'] for x in univ_obj['hot_ids']])
s = ', '.join(['{} ({:.1f} %)'.format(x['id'], 100*x['temp']/sum_cnt)
for x in univ_obj['hot_ids']])
print('Hot VK ids: {}'.format(s))
print('')
####################################################################
## VK API commands ##
####################################################################
# Any user-defined vk API method
####################################################################
## Other commands ##
####################################################################
# Search new crystal students among friends of given set of crystal students
# Load friends of given users list with counters of repetitions
# Return: generate dict {id0:num_of_repeat(id0), id1:..., ...}
# Load ids of friends of users and write them to data/users_packs/pack_name
# Wrapper for method users.search
# Load list of phystechs using method users.search
# WARNING: Don't use it too often. Otherwise account will be banned.
# TODO: (, , , , ),
# . data id
# Read from groups of set members of university (field 'univer_members')
#
# Create report about thematic student by list with
# information (id, their groups packs) about them
| 38.752874 | 84 | 0.58075 |
ac6f2dbc609bab1cd3af2ace2bafd614f0610168 | 10,226 | py | Python | sample_facemesh.py | swipswaps/mediapipe-python | 00700129ced41dcdab174cd46454f5e7e3d9e25b | [
"Apache-2.0"
] | 92 | 2021-03-09T08:27:17.000Z | 2022-03-09T08:20:48.000Z | sample_facemesh.py | swipswaps/mediapipe-python | 00700129ced41dcdab174cd46454f5e7e3d9e25b | [
"Apache-2.0"
] | 1 | 2021-12-23T05:15:26.000Z | 2022-02-21T20:35:21.000Z | sample_facemesh.py | swipswaps/mediapipe-python | 00700129ced41dcdab174cd46454f5e7e3d9e25b | [
"Apache-2.0"
] | 46 | 2021-03-08T10:24:54.000Z | 2021-12-20T07:12:48.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import copy
import argparse
import cv2 as cv
import numpy as np
import mediapipe as mp
from utils import CvFpsCalc
if __name__ == '__main__':
main()
| 38.588679 | 88 | 0.552415 |
ac6f3083292de976db6a89e3601228fd50986b48 | 1,413 | py | Python | andromeda/modules/loans/views/inventory_loans.py | sango09/andromeda_api_rest | b4a3267146f4f9a985fb3f512e652d4ff354bba2 | [
"MIT"
] | 1 | 2021-09-08T18:58:16.000Z | 2021-09-08T18:58:16.000Z | andromeda/modules/loans/views/inventory_loans.py | sango09/andromeda_api_rest | b4a3267146f4f9a985fb3f512e652d4ff354bba2 | [
"MIT"
] | null | null | null | andromeda/modules/loans/views/inventory_loans.py | sango09/andromeda_api_rest | b4a3267146f4f9a985fb3f512e652d4ff354bba2 | [
"MIT"
] | null | null | null | """Vista del inventario del modulo de prestamos tecnologicos."""
# Django REST Framework
from rest_framework import viewsets, mixins
# Permisos
from rest_framework.permissions import IsAuthenticated
from andromeda.modules.inventory.permissions import IsAdmin, IsStaff
# Modelos
from andromeda.modules.loans.models import InventoryLoans
# Serializers
from andromeda.modules.loans.serializers import InventoryLoansSerializer, CreateInventoryLoansSerializer
| 36.230769 | 104 | 0.685775 |
ac6fc1b210632046a04f35464d2d89383a795143 | 876 | py | Python | N-Gram/PlotUtils.py | FindTheTruth/Natural-Language-Processing | a52c777e505dd5ccd9f892fbf98ba50d4c29b31b | [
"Apache-2.0"
] | 1 | 2022-03-23T09:26:59.000Z | 2022-03-23T09:26:59.000Z | N-Gram/PlotUtils.py | FindTheTruth/Natural-Language-Processing | a52c777e505dd5ccd9f892fbf98ba50d4c29b31b | [
"Apache-2.0"
] | null | null | null | N-Gram/PlotUtils.py | FindTheTruth/Natural-Language-Processing | a52c777e505dd5ccd9f892fbf98ba50d4c29b31b | [
"Apache-2.0"
] | null | null | null | import matplotlib.pyplot as plt
x = ["N=1", "N=2", "N=3", "N=4", "N=5","N=6"]
y = [0.9365, 0.9865, 0.9895, 0.9950,0.9880,0.9615]
rects = plt.barh(x, y, color=["red", "blue", "purple", "violet", "green", "black"])
for rect in rects: # rects
width = rect.get_width()
print(width)
plt.text(width, rect.get_y() + rect.get_height() / 2, str(width), size=10)
plt.xlim(0.0,1.3)
# plt.legend()
plt.show()
x = ["k=1e-5","k=1e-4", "k=1e-3", "k=1e-2", "k=1e-1", "k=1.0"]
y = [0.9895, 0.9900, 0.9950, 0.9885,0.9740,0.831]
# y = [0.9365, 0.9865, 0.9895, 0.9950,0.9880,0.9615]
rects = plt.barh(x, y, color=["red", "blue", "purple", "violet", "green", "black"])
for rect in rects: # rects
width = rect.get_width()
print(width)
plt.text(width, rect.get_y() + rect.get_height() / 2, str(width), size=10)
plt.xlim(0.0,1.3)
# plt.legend()
plt.show() | 36.5 | 83 | 0.589041 |
ac70a851d5c96469acf3749c8c769ab79086f7dc | 226 | py | Python | tests/test_logger.py | sp-95/python-template | 3d4fab175314fe2b200e77c7c71c464e897749b9 | [
"MIT"
] | null | null | null | tests/test_logger.py | sp-95/python-template | 3d4fab175314fe2b200e77c7c71c464e897749b9 | [
"MIT"
] | null | null | null | tests/test_logger.py | sp-95/python-template | 3d4fab175314fe2b200e77c7c71c464e897749b9 | [
"MIT"
] | null | null | null | from _pytest.logging import LogCaptureFixture
from loguru import logger
| 22.6 | 62 | 0.774336 |
ac714ac75d2b71ae4b1604d302d1232df43b166e | 142 | py | Python | Semana 09/fase.py | heltonricardo/grupo-estudos-maratonas-programacao | 0c07d84a900858616647d07574ec56b0533cddfb | [
"MIT"
] | null | null | null | Semana 09/fase.py | heltonricardo/grupo-estudos-maratonas-programacao | 0c07d84a900858616647d07574ec56b0533cddfb | [
"MIT"
] | null | null | null | Semana 09/fase.py | heltonricardo/grupo-estudos-maratonas-programacao | 0c07d84a900858616647d07574ec56b0533cddfb | [
"MIT"
] | null | null | null | n, k, v = int(input()), int(input()), []
for i in range(n): v.append(int(input()))
v = sorted(v, reverse=True)
print(k + v[k:].count(v[k-1]))
| 28.4 | 41 | 0.570423 |
ac715495b2bb97e43a63daf0a85ca3c192ec09c8 | 1,160 | py | Python | 02_crowsnest/ragz_crowsnest.py | zrucker/tiny_python_projects | 7760c9db2e89640c6485e01891a0022927a46c3b | [
"MIT"
] | null | null | null | 02_crowsnest/ragz_crowsnest.py | zrucker/tiny_python_projects | 7760c9db2e89640c6485e01891a0022927a46c3b | [
"MIT"
] | null | null | null | 02_crowsnest/ragz_crowsnest.py | zrucker/tiny_python_projects | 7760c9db2e89640c6485e01891a0022927a46c3b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Date : 2021-09-06
Purpose: learning to work with strings
"""
import argparse
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Crow\'s Nest -- choose the correct article',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('word',
metavar='word',
help='A word')
return parser.parse_args()
# --------------------------------------------------
def get_article(user_input):
"""Determine which article to use"""
# vowels = ['a', 'e', 'i', 'o', 'u']
if user_input[0] in 'aeiouAEIOU':
solution = "an"
else:
solution = "a"
return solution
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
word = args.word
article = get_article(word)
print("Ahoy, Captain, {} {} off the larboard bow!".format(article, word))
# --------------------------------------------------
if __name__ == '__main__':
main()
| 21.481481 | 77 | 0.483621 |
ac71f74611bb270f9befb24fa9ac317b196927ef | 9,397 | py | Python | lvsfunc/render.py | End-of-Eternity/lvsfunc | 1c9ff7f1d9731378536fb428f077075285e25843 | [
"MIT"
] | null | null | null | lvsfunc/render.py | End-of-Eternity/lvsfunc | 1c9ff7f1d9731378536fb428f077075285e25843 | [
"MIT"
] | null | null | null | lvsfunc/render.py | End-of-Eternity/lvsfunc | 1c9ff7f1d9731378536fb428f077075285e25843 | [
"MIT"
] | null | null | null | """
Clip rendering helpers.
"""
import vapoursynth as vs
from enum import Enum
from threading import Condition
from typing import BinaryIO, Callable, Dict, List, Optional, TextIO, Union
from concurrent.futures import Future
from functools import partial
from .progress import Progress, BarColumn, FPSColumn, TextColumn, TimeRemainingColumn
from .util import get_prop
core = vs.core
RenderCallback = Callable[[int, vs.VideoFrame], None]
def finish_frame(outfile: Optional[BinaryIO], timecodes: Optional[TextIO], ctx: RenderContext) -> None:
"""
Output a frame.
:param outfile: Output IO handle for Y4MPEG
:param timecodes: Output IO handle for timecodesv2
:param ctx: Rendering context
"""
if timecodes:
timecodes.write(f"{round(ctx.timecodes[ctx.frames_rendered]*1000):d}\n")
if outfile is None:
return
f: vs.VideoFrame = ctx.frames[ctx.frames_rendered]
outfile.write("FRAME\n".encode("utf-8"))
for i, p in enumerate(f.planes()):
if f.get_stride(i) != p.width * f.format.bytes_per_sample:
outfile.write(bytes(p)) # type: ignore
else:
outfile.write(p) # type: ignore
def clip_async_render(clip: vs.VideoNode,
outfile: Optional[BinaryIO] = None,
timecodes: Optional[TextIO] = None,
progress: Optional[str] = "Rendering clip...",
callback: Union[RenderCallback, List[RenderCallback], None] = None) -> List[float]:
"""
Render a clip by requesting frames asynchronously using clip.get_frame_async,
providing for callback with frame number and frame object.
This is mostly a re-implementation of VideoNode.output, but a little bit slower since it's pure python.
You only really need this when you want to render a clip while operating on each frame in order
or you want timecodes without using vspipe.
:param clip: Clip to render.
:param outfile: Y4MPEG render output BinaryIO handle. If None, no Y4M output is performed.
Use ``sys.stdout.buffer`` for stdout. (Default: None)
:param timecodes: Timecode v2 file TextIO handle. If None, timecodes will not be written.
:param progress: String to use for render progress display.
If empty or ``None``, no progress display.
:param callback: Single or list of callbacks to be preformed. The callbacks are called
when each sequential frame is output, not when each frame is done.
Must have signature ``Callable[[int, vs.VideoNode], None]``
See :py:func:`lvsfunc.comparison.diff` for a use case (Default: None).
:return: List of timecodes from rendered clip.
"""
cbl = [] if callback is None else callback if isinstance(callback, list) else [callback]
if progress:
p = get_render_progress()
task = p.add_task(progress, total=clip.num_frames)
cbl.append(_progress_cb)
ctx = RenderContext(clip, core.num_threads)
bad_timecodes: bool = False
if outfile:
if clip.format is None:
raise ValueError("clip_async_render: 'Cannot render a variable format clip to y4m!'")
if clip.format.color_family not in (vs.YUV, vs.GRAY):
raise ValueError("clip_async_render: 'Can only render YUV and GRAY clips to y4m!'")
if clip.format.color_family == vs.GRAY:
y4mformat = "mono"
else:
ss = (clip.format.subsampling_w, clip.format.subsampling_h)
if ss == (1, 1):
y4mformat = "420"
elif ss == (1, 0):
y4mformat = "422"
elif ss == (0, 0):
y4mformat = "444"
elif ss == (2, 2):
y4mformat = "410"
elif ss == (2, 0):
y4mformat = "411"
elif ss == (0, 1):
y4mformat = "440"
else:
raise ValueError("clip_async_render: 'What have you done'")
y4mformat = f"{y4mformat}p{clip.format.bits_per_sample}" if clip.format.bits_per_sample > 8 else y4mformat
header = f"YUV4MPEG2 C{y4mformat} W{clip.width} H{clip.height} F{clip.fps_num}:{clip.fps_den} Ip A0:0\n"
outfile.write(header.encode("utf-8"))
if timecodes:
timecodes.write("# timestamp format v2\n")
ctx.condition.acquire()
# seed threads
if progress:
p.start()
try:
for n in range(min(clip.num_frames, core.num_threads)):
cbp = partial(cb, n=n) # lambda won't bind the int immediately
clip.get_frame_async(n).add_done_callback(cbp) # type: ignore
while ctx.frames_rendered != clip.num_frames:
ctx.condition.wait()
finally:
if progress:
p.stop()
return ctx.timecodes # might as well
def find_scene_changes(clip: vs.VideoNode, mode: SceneChangeMode = SceneChangeMode.WWXD) -> List[int]:
"""
Generate a list of scene changes (keyframes).
Dependencies:
* vapoursynth-wwxd
* vapoursynth-scxvid (Optional: scxvid mode)
:param clip: Clip to search for scene changes. Will be rendered in its entirety.
:param mode: Scene change detection mode:
* WWXD: Use wwxd
* SCXVID: Use scxvid
* WWXD_SCXVID_UNION: Union of wwxd and sxcvid (must be detected by at least one)
* WWXD_SCXVID_INTERSECTION: Intersection of wwxd and scxvid (must be detected by both)
:return: List of scene changes.
"""
frames = []
clip = clip.resize.Bilinear(640, 360, format=vs.YUV420P8)
if mode in (SceneChangeMode.WWXD, SceneChangeMode.WWXD_SCXVID_UNION, SceneChangeMode.WWXD_SCXVID_INTERSECTION):
clip = clip.wwxd.WWXD()
if mode in (SceneChangeMode.SCXVID, SceneChangeMode.WWXD_SCXVID_UNION, SceneChangeMode.WWXD_SCXVID_INTERSECTION):
clip = clip.scxvid.Scxvid()
clip_async_render(clip, progress="Detecting scene changes...", callback=_cb)
return sorted(frames)
| 36.85098 | 117 | 0.612536 |
ac72f9700e343b6945d908397ce48596d4d77b7e | 1,689 | py | Python | geekjobs/forms.py | paconte/geekjobs | 4f5f72c9a08dd4e7bf58dc68364dedce9c248c3e | [
"MIT"
] | null | null | null | geekjobs/forms.py | paconte/geekjobs | 4f5f72c9a08dd4e7bf58dc68364dedce9c248c3e | [
"MIT"
] | null | null | null | geekjobs/forms.py | paconte/geekjobs | 4f5f72c9a08dd4e7bf58dc68364dedce9c248c3e | [
"MIT"
] | 1 | 2019-09-14T21:59:18.000Z | 2019-09-14T21:59:18.000Z | from django import forms
from django.utils.translation import ugettext_lazy as _
from geekjobs.models import Job
"""
class JobForm(forms.Form):
title = forms.CharField(label='Job title', max_length=380)
city = forms.CharField(label='City', max_length=100, required=False)
state = forms.ChoiceField(label='State', choices=DE_STATE_CHOICES)
remote = forms.BooleanField(label='Remote', required=False)
salary = forms.CharField(label='Salary', max_length=100, required=False)
description = forms.CharField(label='Job Description', max_length=10000)
description.widget = forms.HiddenInput()
instructions = forms.CharField(label='How do people apply for this job?', max_length=380)
instructions.widget = forms.Textarea(attrs={'rows': 3})
name = forms.CharField(label='Company Name', max_length=100)
url = forms.CharField(label='Job URL', max_length=150)
email = forms.EmailField(label='Email')
"""
| 40.214286 | 118 | 0.616341 |
ac7444482c933dd6646be8739b9a9cde05c17711 | 1,133 | py | Python | solutions/day3/solution.py | JavierLuna/advent-of-code-2020 | 57429a7973446472fffb07dc770f260160407f0c | [
"MIT"
] | 1 | 2020-12-03T08:57:20.000Z | 2020-12-03T08:57:20.000Z | solutions/day3/solution.py | JavierLuna/advent-of-code-2020 | 57429a7973446472fffb07dc770f260160407f0c | [
"MIT"
] | null | null | null | solutions/day3/solution.py | JavierLuna/advent-of-code-2020 | 57429a7973446472fffb07dc770f260160407f0c | [
"MIT"
] | null | null | null | import math
from typing import Tuple, Set
from solutions.runner.base_solution import BaseSolution
from solutions.runner.readers.base_reader import BaseReader
TREE = "#"
| 24.106383 | 85 | 0.579876 |
ac74f61ecc9c4ba33c87adf303e2474d5cc2e06f | 10,003 | py | Python | lib/core_tools/tools.py | rocketcapital-ai/competition_submission | bb9663dfe17733cc7de841f48e9e2770d911c599 | [
"MIT"
] | 1 | 2022-02-12T08:38:42.000Z | 2022-02-12T08:38:42.000Z | lib/core_tools/tools.py | rocketcapital-ai/competition_submission | bb9663dfe17733cc7de841f48e9e2770d911c599 | [
"MIT"
] | null | null | null | lib/core_tools/tools.py | rocketcapital-ai/competition_submission | bb9663dfe17733cc7de841f48e9e2770d911c599 | [
"MIT"
] | null | null | null | import base58
import datetime
import json
import os
import pandas as pd
import requests
import shutil
import time
import web3
import yaml
from Crypto.Cipher import AES, PKCS1_OAEP
from Crypto.PublicKey import RSA
from Crypto.Random import get_random_bytes
from decimal import Decimal
from typing import Any, Callable
from web3 import types
CURRENT_DIR = os.path.abspath(os.path.dirname(__file__))
CFG_DIR = os.path.abspath('{}//..//..//cfg_files'.format(CURRENT_DIR))
with open("{}//cfg.yml".format(CFG_DIR), "r") as config_file:
CFG = yaml.safe_load(config_file)
TOKEN_ADDRESS = CFG['LIVE']['TOKEN']
COMPETITION_ADDRESS = CFG['LIVE']['COMPETITION']
SUBMISSION_DIRECTORY = os.path.abspath('{}//..//..//{}'.format(CURRENT_DIR, CFG['SUBMISSION_FOLDER_NAME']))
ENCRYPTED_SUBMISSIONS_DIRECTORY = os.path.abspath('{}//..//..//{}'.format(CURRENT_DIR, CFG['ENCRYPTED_SUBMISSIONS']))
| 41.853556 | 148 | 0.674298 |
ac75e12c17c4b689ad5e95e21f9b92a7a82c808e | 2,108 | py | Python | my_loc/__init__.py | PIYUSH-GEEK/my_loc | 777eeefec3bc29f03c1be956037c10bf8457dfc9 | [
"MIT"
] | 1 | 2019-08-18T07:06:36.000Z | 2019-08-18T07:06:36.000Z | my_loc/__init__.py | PIYUSH-GEEK/my_loc | 777eeefec3bc29f03c1be956037c10bf8457dfc9 | [
"MIT"
] | null | null | null | my_loc/__init__.py | PIYUSH-GEEK/my_loc | 777eeefec3bc29f03c1be956037c10bf8457dfc9 | [
"MIT"
] | null | null | null | import requests
from bs4 import BeautifulSoup
| 22.913043 | 80 | 0.624763 |
ac7655238bec5c95a9a31d91cc90421f9c35aee8 | 1,045 | py | Python | Python3/03_Longest_Substring_Without_Repeating_Characters.py | yangjiahao106/LeetCode | c30ba0ef06f444951f7ab8eee495ac43613d7f4f | [
"RSA-MD"
] | 1 | 2018-04-28T09:07:11.000Z | 2018-04-28T09:07:11.000Z | Python3/03_Longest_Substring_Without_Repeating_Characters.py | yangjiahao106/LeetCode | c30ba0ef06f444951f7ab8eee495ac43613d7f4f | [
"RSA-MD"
] | 1 | 2018-02-24T16:26:30.000Z | 2018-02-24T16:26:44.000Z | Python3/03_Longest_Substring_Without_Repeating_Characters.py | yangjiahao106/LeetCode | c30ba0ef06f444951f7ab8eee495ac43613d7f4f | [
"RSA-MD"
] | null | null | null | #! python3
# __author__ = "YangJiaHao"
# date: 2018/1/26
if __name__ == '__main__':
solution = Solution()
theMax = solution.lengthOfLongestSubstring("aaaabc")
print(theMax)
| 23.222222 | 56 | 0.453589 |
ac78bb16539f516e7b674abcc8a2e5d7d2d059dd | 2,636 | py | Python | youwol/backends/cdn/resources_initialization.py | youwol/py-youwol | 85a8877e302c9da1aea168bf1d964d19036c1134 | [
"MIT"
] | null | null | null | youwol/backends/cdn/resources_initialization.py | youwol/py-youwol | 85a8877e302c9da1aea168bf1d964d19036c1134 | [
"MIT"
] | 1 | 2022-03-14T09:40:15.000Z | 2022-03-14T09:40:15.000Z | youwol/backends/cdn/resources_initialization.py | youwol/py-youwol | 85a8877e302c9da1aea168bf1d964d19036c1134 | [
"MIT"
] | null | null | null | import asyncio
import os
from youwol_utils import WhereClause, QueryBody, Query, Path, flatten
from .configurations import Configuration
from .utils import format_download_form, post_storage_by_chunk, md5_from_folder
from .utils_indexing import format_doc_db_record, post_indexes, get_version_number_str
| 42.516129 | 115 | 0.685129 |
ac79385f0f5c532c4496afb0999dcc78d78a4e70 | 174 | py | Python | Python32/hackeandoface3.py | andersonsilvade/python_C | ffc00184883089f1c2d9b8a6c32503b2c8b8d035 | [
"MIT"
] | null | null | null | Python32/hackeandoface3.py | andersonsilvade/python_C | ffc00184883089f1c2d9b8a6c32503b2c8b8d035 | [
"MIT"
] | null | null | null | Python32/hackeandoface3.py | andersonsilvade/python_C | ffc00184883089f1c2d9b8a6c32503b2c8b8d035 | [
"MIT"
] | 1 | 2020-11-04T08:36:28.000Z | 2020-11-04T08:36:28.000Z | import urllib.request
import json
url = 'http://graph.facebook.com/fmasanori'
resp = urllib.request.urlopen(url).read()
data = json.loads(resp.decode('utf-8'))
print(data)
| 19.333333 | 43 | 0.735632 |
ac79cb182a550452c6328b4a381ff03422af574d | 1,769 | py | Python | oasislmf/cli/admin.py | fl-ndaq/OasisLMF | 921718bfad2eb12844960df7f7330284d4e0bedc | [
"BSD-3-Clause"
] | 88 | 2018-03-24T11:57:10.000Z | 2022-03-21T13:04:41.000Z | oasislmf/cli/admin.py | fl-ndaq/OasisLMF | 921718bfad2eb12844960df7f7330284d4e0bedc | [
"BSD-3-Clause"
] | 558 | 2018-03-14T14:16:30.000Z | 2022-03-29T12:48:14.000Z | oasislmf/cli/admin.py | fl-ndaq/OasisLMF | 921718bfad2eb12844960df7f7330284d4e0bedc | [
"BSD-3-Clause"
] | 41 | 2018-04-09T11:13:12.000Z | 2021-10-05T14:43:11.000Z | __all__ = [
'AdminCmd',
'CreateComplexModelCmd',
'CreateSimpleModelCmd',
'EnableBashCompleteCmd',
]
from argparse import RawDescriptionHelpFormatter
from .command import OasisBaseCommand, OasisComputationCommand
| 30.5 | 76 | 0.70944 |
ac7ad18388f99ac970094896775d9050ae74ed49 | 1,802 | py | Python | examples/spend_non_std_tx.py | kanzure/python-bitcoin-utils | a75b470676edb70bd71cb6a57e7e86f78ccc63ce | [
"MIT"
] | null | null | null | examples/spend_non_std_tx.py | kanzure/python-bitcoin-utils | a75b470676edb70bd71cb6a57e7e86f78ccc63ce | [
"MIT"
] | null | null | null | examples/spend_non_std_tx.py | kanzure/python-bitcoin-utils | a75b470676edb70bd71cb6a57e7e86f78ccc63ce | [
"MIT"
] | null | null | null | # Copyright (C) 2018 The python-bitcoin-utils developers
#
# This file is part of python-bitcoin-utils
#
# It is subject to the license terms in the LICENSE file found in the top-level
# directory of this distribution.
#
# No part of python-bitcoin-utils, including this file, may be copied,
# modified,
# propagated, or distributed except according to the terms contained in the
# LICENSE file.
from bitcoinutils.setup import setup
from bitcoinutils.transactions import Transaction, TxInput, TxOutput
from bitcoinutils.keys import P2pkhAddress
from bitcoinutils.script import Script
#
# Note that a non-standard transaction can only be included in a block if a
# miner agrees with it. For this to work one needs to use a node setup up
# for regtest so that you can mine your own blocks; unless you mine your own
# testnet/mainnet blocks.
# Node's config file requires:
# regtest=1
# acceptnonstdtxn=1
#
if __name__ == "__main__":
main()
| 34.653846 | 89 | 0.743618 |
ac7e3f648dd1532e3a366df0f18eb0ba06867a86 | 801 | py | Python | src/nutman_field_names.py | hudsonburgess/nutcracker | 2533d7659873d1ec75beb251f941e8a90bdebb89 | [
"MIT"
] | null | null | null | src/nutman_field_names.py | hudsonburgess/nutcracker | 2533d7659873d1ec75beb251f941e8a90bdebb89 | [
"MIT"
] | null | null | null | src/nutman_field_names.py | hudsonburgess/nutcracker | 2533d7659873d1ec75beb251f941e8a90bdebb89 | [
"MIT"
] | null | null | null |
f = open('../data/8-20-17.nm2')
out = open('../data/8-20-17-field-names.txt', 'w')
for line in f:
if line.startswith('FieldName'):
field_name = get_field_name_from_line(line)
field_name = remove_description(field_name)
field_name_list = split_multiple_field_names(field_name)
for name in field_name_list:
out.writelines([name, '\n'])
f.close()
out.close() | 27.62069 | 64 | 0.627965 |
ac7e4bb5b2639e3ce5fc5958996d880251838bdd | 317 | py | Python | setup.py | algerbrex/plex | 0d7096634d13ee4d695b580892894910eba6a4eb | [
"MIT"
] | 2 | 2018-02-15T16:26:54.000Z | 2021-11-08T12:26:12.000Z | setup.py | algerbrex/plex | 0d7096634d13ee4d695b580892894910eba6a4eb | [
"MIT"
] | null | null | null | setup.py | algerbrex/plex | 0d7096634d13ee4d695b580892894910eba6a4eb | [
"MIT"
] | null | null | null | from distutils.core import setup
setup(
name='plex',
version='0.1.0',
author='Christian Dean',
author_email='c1dea2n@gmail.com',
packages=['plex'],
license='MIT',
platforms='any',
description='Generic, lighweight regex based lexer.',
long_description=open('README.md').read(),
)
| 21.133333 | 57 | 0.649842 |
ac8080a2e2bb7b553d1d31e52508d8e6de00b522 | 595 | py | Python | kwikposts/admin.py | Vicynet/kwiktalk | 198efdd5965cc0cd3ee8dcf5e469d9022330ec25 | [
"bzip2-1.0.6"
] | null | null | null | kwikposts/admin.py | Vicynet/kwiktalk | 198efdd5965cc0cd3ee8dcf5e469d9022330ec25 | [
"bzip2-1.0.6"
] | null | null | null | kwikposts/admin.py | Vicynet/kwiktalk | 198efdd5965cc0cd3ee8dcf5e469d9022330ec25 | [
"bzip2-1.0.6"
] | null | null | null | from django.contrib import admin
from .models import KwikPost, Comment, Like
# Register your models here.
| 25.869565 | 80 | 0.710924 |
ac82ebe59874721741e7d60b6d0389e4f4666104 | 3,113 | py | Python | solvebio/resource/solveobject.py | PolinaBevad/solvebio-python | f6c736baa01b5a868a385cb0baf8f9dc2007cec3 | [
"MIT"
] | 14 | 2015-01-07T15:31:00.000Z | 2021-11-02T10:03:28.000Z | solvebio/resource/solveobject.py | PolinaBevad/solvebio-python | f6c736baa01b5a868a385cb0baf8f9dc2007cec3 | [
"MIT"
] | 200 | 2015-01-26T17:12:21.000Z | 2022-01-14T08:59:30.000Z | solvebio/resource/solveobject.py | PolinaBevad/solvebio-python | f6c736baa01b5a868a385cb0baf8f9dc2007cec3 | [
"MIT"
] | 9 | 2015-02-18T22:49:28.000Z | 2020-09-01T17:48:35.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import six
import sys
from ..client import client
from .util import json
| 28.3 | 73 | 0.603919 |
ac844765c4ce5c32dfa9aa2da4c0094d7fbd95aa | 154 | py | Python | convert.py | Resist4263/span-aste-1 | 13815a262638592506b445315d03df8db965947b | [
"Apache-2.0"
] | null | null | null | convert.py | Resist4263/span-aste-1 | 13815a262638592506b445315d03df8db965947b | [
"Apache-2.0"
] | null | null | null | convert.py | Resist4263/span-aste-1 | 13815a262638592506b445315d03df8db965947b | [
"Apache-2.0"
] | null | null | null | from gensim.scripts.glove2word2vec import glove2word2vec
(count, dimensions) = glove2word2vec("dataset/glove.42B.300d.txt", "dataset/cropus/42B_w2v.txt") | 51.333333 | 96 | 0.811688 |
ac854af38b11787c1689126c56bfc2405c99c2c4 | 4,970 | py | Python | src/pcbLibraryManager/libraries/librarySwitches.py | NiceCircuits/pcbLibraryManager | df83f24b5a558e0f7f72629d9ef7c82fcb9e6718 | [
"CC0-1.0"
] | null | null | null | src/pcbLibraryManager/libraries/librarySwitches.py | NiceCircuits/pcbLibraryManager | df83f24b5a558e0f7f72629d9ef7c82fcb9e6718 | [
"CC0-1.0"
] | 1 | 2016-04-16T08:16:36.000Z | 2016-04-16T08:16:36.000Z | src/pcbLibraryManager/libraries/librarySwitches.py | NiceCircuits/pcbLibraryManager | df83f24b5a558e0f7f72629d9ef7c82fcb9e6718 | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun Jul 19 22:17:33 2015
@author: piotr at nicecircuits.com
"""
from libraryManager.library import libraryClass
from footprints.footprintSmdDualRow import footprintSmdDualRow
from libraryManager.part import part
from libraryManager.symbol import symbol
from libraryManager.symbolPrimitive import *
from libraryManager.footprintPrimitive import *
from libraryManager.defaults import defaults
from libraryManager.generateLibraries import generateLibraries
if __name__ == "__main__":
generateLibraries([librarySwitches()]) | 38.230769 | 88 | 0.597988 |
ac85b889248a7fe66df90411b1896a2b3cc25961 | 131 | py | Python | Codeforces/problems/0799/A/799A.py | object-oriented-human/competitive | 9e761020e887d8980a39a64eeaeaa39af0ecd777 | [
"MIT"
] | 2 | 2021-07-27T10:46:47.000Z | 2021-07-27T10:47:57.000Z | Codeforces/problems/0799/A/799A.py | foooop/competitive | 9e761020e887d8980a39a64eeaeaa39af0ecd777 | [
"MIT"
] | null | null | null | Codeforces/problems/0799/A/799A.py | foooop/competitive | 9e761020e887d8980a39a64eeaeaa39af0ecd777 | [
"MIT"
] | null | null | null | import math
n, t, k, d = map(int, input().split())
x = math.ceil(n/k) * t
if (d + t) < x:
print("YES")
else:
print("NO") | 13.1 | 38 | 0.503817 |
ac8615af3d9f334ab252cb1f100f3ad3f649766c | 3,159 | py | Python | model.py | ganeshpc/flower-classification | aa836389f04b40a4368d4c1bf2c13cc44ee51af1 | [
"MIT"
] | null | null | null | model.py | ganeshpc/flower-classification | aa836389f04b40a4368d4c1bf2c13cc44ee51af1 | [
"MIT"
] | null | null | null | model.py | ganeshpc/flower-classification | aa836389f04b40a4368d4c1bf2c13cc44ee51af1 | [
"MIT"
] | null | null | null | import keras
from keras import layers
from keras.layers import Dropout, Dense
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
import tensorflow_hub as hub
import cv2
import pandas as p
IMAGE_SHAPE = (224, 224) #(HEIGHT, WIDTH)
TRAINING_DATA_DIRECTORY = '/content/drive/My Drive/Colab Notebooks/FlowerClassification/data/TrainingData'
datagen_kwargs = dict(rescale=1./255, validation_split=.2)
if __name__ == '__main__':
model = get_mobile_net_model()
model = train_model(model)
evaluate_model(model)
| 24.679688 | 106 | 0.709085 |
ac86900c935920ddf1e7d5f2d9ab4b10680baaec | 12,421 | py | Python | psg_utils/io/header/header_standardizers.py | perslev/sleep-utils | 9f9edf67f4cac6d8361243b4153bfe6351314844 | [
"MIT"
] | 1 | 2022-03-17T10:37:17.000Z | 2022-03-17T10:37:17.000Z | psg_utils/io/header/header_standardizers.py | perslev/sleep-utils | 9f9edf67f4cac6d8361243b4153bfe6351314844 | [
"MIT"
] | null | null | null | psg_utils/io/header/header_standardizers.py | perslev/sleep-utils | 9f9edf67f4cac6d8361243b4153bfe6351314844 | [
"MIT"
] | null | null | null | """
A set of functions for extracting header information from PSG objects
Typically only used internally in from unet.io.header.header_extractors
Each function takes some PSG or header-like object and returns a dictionary with at least
the following keys:
{
'n_channels': int,
'channel_names': list of strings,
'sample_rate': int
'date': datetime or None
'length': int
}
Note: length gives the number of samples, divide by sample_rate to get length_sec
"""
import logging
import warnings
import numpy as np
import h5py
from datetime import datetime
from psg_utils.errors import (MissingHeaderFieldError, HeaderFieldTypeError,
LengthZeroSignalError, H5VariableAttributesError,
VariableSampleRateError, FloatSampleRateWarning)
logger = logging.getLogger(__name__)
def _assert_header(header):
"""
Checks that a standardized header:
1) contains the right field names
2) each value has an expected type
3) the 'length' value is greater than 0
Args:
header: dict
Returns: dict
"""
field_requirements = [
("n_channels", [int]),
("channel_names", [list]),
("sample_rate", [int]),
("date", [datetime, type(None)]),
("length", [int])
]
for field, valid_types in field_requirements:
if field not in header:
raise MissingHeaderFieldError(f"Missing value '{field}' from header '{header}'. "
"This could be an error in the code implementation. "
"Please raise this issue on GitHub.")
type_ = type(header[field])
if type_ not in valid_types:
raise HeaderFieldTypeError(f"Field {field} of type {type_} was not expected, expected one of {valid_types}")
if header['length'] <= 0:
raise LengthZeroSignalError(f"Expected key 'length' to be a non-zero integer, "
f"but header {header} has value {header['length']}")
# Warn on duplicate channels
from psg_utils.io.channels.utils import check_duplicate_channels
check_duplicate_channels(header['channel_names'], raise_or_warn="warn")
return header
def _sample_rate_as_int(sample_rate, raise_or_warn='warn'):
"""
Returns the sample rate rounded to the nearest whole integer.
If the integer sample rate is not exactly (as determined by np.isclose) equal to the original,
possibly floating, value an warning is issued if raise_or_warn="warn" or an FloatSampleRateError
is raised if raise_or_warn="raise".
Raises ValueError if raise_or_warn not in ('raise', 'warn', 'warning').
Args:
sample_rate: int, float sample rate
Returns:
sample_rate, int
"""
new_sample_rate = int(np.round(sample_rate))
if not np.isclose(new_sample_rate, sample_rate):
s = f"The loaded file has a float sample rate of value {sample_rate} which is not exactly equal to the " \
f"rounded integer value of {new_sample_rate}. Please note: Integer value {new_sample_rate} will be used."
if raise_or_warn.lower() == "raise":
raise FloatSampleRateWarning(s)
elif raise_or_warn.lower() in ("warn", "warning"):
warnings.warn(s, FloatSampleRateWarning)
else:
raise ValueError("raise_or_warn argument must be one of 'raise' or 'warn'.")
return new_sample_rate
def _standardized_edf_header(raw_edf, channel_names_overwrite=None):
"""
Header extraction function for RawEDF and Raw objects.
Reads the number of channels, channel names and sample rate properties
If existing, reads the date information as well.
channel_names_overwrite allows passing a list of channel names to use instead of
those loaded by MNE per default. This is useful e.g. to set the raw EDF names in the
header instead of the truncated / renamed (on duplicates) used by MNE.
Returns:
Header information as dict
"""
# Each tuple below follows the format:
# 1) output name, 2) edf_obj name, 3) function to apply to the read
# value, 4) whether a missing value should raise an error.
header_map = [("n_channels", "nchan", int, True),
("channel_names", "ch_names", list, True),
("sample_rate", "sfreq", _sample_rate_as_int, True),
("date", "meas_date", datetime.utcfromtimestamp, False)]
if isinstance(raw_edf.info["meas_date"], (tuple, list)):
assert raw_edf.info["meas_date"][1] == 0
raw_edf.info["meas_date"] = raw_edf.info["meas_date"][0]
header = {}
for renamed, org, transform, raise_err in header_map:
value = raw_edf.info.get(org)
try:
value = transform(value)
except Exception as e:
if raise_err:
raise HeaderFieldTypeError("Missing or invalid value in EDF file for key {} "
"- got {}".format(org, value)) from e
header[renamed] = value
header["length"] = len(raw_edf)
header["channel_names"] = list(channel_names_overwrite) or header["channel_names"]
return _assert_header(header)
def _standardized_wfdb_header(wfdb_record):
"""
Header extraction function for WFDB Record objects.
Reads the number of channels, channel names and sample rate properties
If existing, reads the date information as well.
Returns:
Header information as dict
"""
# Each tuple below follows the format:
# 1) output name, 2) record_obj name, 3) function to apply to the read
# value, 4) whether a missing value should raise an error.
header_map = [("n_channels", "n_sig", int, True),
("channel_names", "sig_name", list, True),
("sample_rate", "fs", _sample_rate_as_int, True),
("date", "base_date", datetime.utcfromtimestamp, False),
("length", "sig_len", int, True)]
header = {}
for renamed, org, transform, raise_err in header_map:
value = getattr(wfdb_record, org, None)
try:
value = transform(value)
except Exception as e:
if raise_err:
raise HeaderFieldTypeError("Missing or invalid value in WFDB file for key {} "
"- got {}".format(org, value)) from e
header[renamed] = value
return _assert_header(header)
def _get_unique_value(items):
"""
Takes a list of items, checks that all are equal (in value, ==) and returns the unique value.
Returns None if the list is empty.
Raises ValueError if not all items are not equal.
Args:
items: List
Returns:
The unique item in list
"""
if len(items) == 0:
return None
for item in items[1:]:
if item != items[0]:
raise H5VariableAttributesError(f"The input list '{items}' contains more than 1 unique value")
return items[0]
def _standardized_h5_header(h5_file, channel_group_name="channels"):
"""
Header extraction function for h5py.File objects.
The object must:
- Have an attribute 'sample_rate'
- Have a group named {channel_group_name} which stores the data for all channels as
Dataset entries under the group (can be nested in deeper groups too)
Can have:
- An attribute 'date' which gives a date string or unix timestamp integer
Currently raises an error if any attribute in ('date', 'sample_rate', 'length') are not equal among all
datasets in the archive.
All attributes may be set at any node, and will affect any non-attributed node deeper in the tree.
E.g. setting the 'sample_rate' attribute on the root note will have it affect all datasets, unless
the attribute is set on deeper nodes too in which case the later will overwrite the root attribute for
all its nested, un-attributed children.
Returns:
Header information as dict
"""
# Traverse the h5 archive for datasets and assigned attributes
h5_content = _traverse_h5_file(h5_file[channel_group_name], attributes=h5_file.attrs)
header = {
"channel_names": [],
"channel_paths": {}, # will store channel_name: channel path entries
"sample_rate": [],
"date": [],
"length": []
}
for channel_path, attributes in h5_content.items():
channel_name = channel_path.split("/")[-1]
header["channel_paths"][channel_name] = channel_path
header["channel_names"].append(channel_name)
header["sample_rate"].append(attributes.get("sample_rate"))
header["date"].append(attributes.get("date"))
header["length"].append(attributes.get("length"))
header["n_channels"] = len(h5_content)
# Ensure all dates, lengths and sample rate attributes are equal
# TODO: Remove this restriction at least for sample rates; requires handling at PSG loading time
try:
header["date"] = _get_unique_value(header["date"])
header["sample_rate"] = _sample_rate_as_int(_get_unique_value(header["sample_rate"]))
header["length"] = int(_get_unique_value(header["length"]))
except H5VariableAttributesError as e:
raise H5VariableAttributesError("Datasets stored in the specified H5 archive differ with respect to one or "
"multiple of the following attributes: 'date', 'sampling_rate', 'length'. "
"All datasets must currently match with respect to those attributes.") from e
# Get datetime date or set to None
date = header["date"]
if not isinstance(date, str) and (isinstance(date, int) or np.issubdtype(date, np.integer)):
date = datetime.utcfromtimestamp(date)
elif not isinstance(date, datetime):
date = None
header["date"] = date
return _assert_header(header)
def _standardized_bin_header(raw_header):
"""
Header extraction function for custom dict type headers for data in .bin files.
Raw header has structure:
{"CHX": [list of channel inds], "NAME": [list of channel names],
"TYPE": [list of channel types], "FS": [list of channel sample rates]}
All values stored in the header are strings and should be cast to ints. etc as appropriate
for header standardization.
Currently raises an error if all attribute in header["FS"] are not equal
(i.e., same sample rate is required for all channels).
Returns:
Header information as dict
"""
# Assert upper case keys
raw_header = {key.upper(): values for key, values in raw_header.items()}
# Order header entries according to CHX column
order = np.argsort(np.array(raw_header['CHX'], dtype=np.int))
raw_header = {key: ([entry[i] for i in order]
if isinstance(entry, (list, tuple, np.ndarray))
else entry)
for key, entry in raw_header.items()}
# Assert that all samples rates are equal
sample_rates = np.array(raw_header["FS"], dtype=np.int32)
if not (sample_rates[0] == sample_rates).all():
raise VariableSampleRateError(f"Sample rates in header {raw_header} are not "
f"all equal with rates: {sample_rates}. "
f"The data loaders for .bin formatted files currently "
f"support only files with all channels sampled at equal rates.")
# Build standardized header
header = {
"n_channels": len(raw_header["NAME"]),
"channel_names": list(raw_header["NAME"]),
"sample_rate": _sample_rate_as_int(sample_rates[0]),
"date": None,
"length": int(raw_header["LENGTH"]),
"channel_types": [type_.upper() for type_ in raw_header.get("TYPE", [])]
}
return _assert_header(header)
| 41.821549 | 120 | 0.646164 |
ac8b1415e78b57c9f8a18413e55459164d6e3fa4 | 2,620 | py | Python | py/hover/cfg.py | HomeSmartMesh/raspi | 43ecd07c130144c72d7c7da677eab1980d555cc2 | [
"MIT"
] | 16 | 2019-12-19T09:23:05.000Z | 2022-01-25T18:34:36.000Z | py/hover/cfg.py | HomeSmartMesh/raspi | 43ecd07c130144c72d7c7da677eab1980d555cc2 | [
"MIT"
] | 4 | 2020-06-07T08:13:29.000Z | 2021-09-04T09:34:37.000Z | py/hover/cfg.py | HomeSmartMesh/raspi | 43ecd07c130144c72d7c7da677eab1980d555cc2 | [
"MIT"
] | 3 | 2020-08-28T13:00:30.000Z | 2020-12-17T09:10:19.000Z | import sys,os
import json
import logging as log
import socket
from collections import OrderedDict
import datetime
from platform import system as system_name # Returns the system/OS name
from subprocess import call as system_call # Execute a shell command
def ping(host):
"""
Returns True if host (str) responds to a ping request.
Remember that a host may not respond to a ping (ICMP) request even if the host name is valid.
"""
# Ping command count option as function of OS
param = '-n' if system_name().lower()=='windows' else '-c'
# Building the command. Ex: "ping -c 1 google.com"
command = ['ping', param, '1', host]
# Pinging
return system_call(command) == 0
# -------------------- config --------------------
def get_local_json():
"""fetches the config.json file in the local directory
if config_hostname.json is found it is used over the default one
"""
config = None
dirname = os.path.dirname(sys.argv[0])
if(len(dirname) == 0):
dirname = "."
config_file = dirname+'/'+"config_"+socket.gethostname()+".json"
if(os.path.isfile(config_file)):
print("loading: ",config_file)
config = json.load(open(config_file))
else:
config_file = dirname+'/'+"config.json"
if(os.path.isfile(config_file)):
print("loading: %s",config_file)
config = json.load(open(config_file))
else:
print("Fatal error 'config.json' not found")
return config
# -------------------- config --------------------
| 34.473684 | 127 | 0.594656 |
ac8b3ba743761efaa0df8e7bef2daa017b63a13e | 27,156 | py | Python | guillotina/schema/tests/test__bootstrapfields.py | diefenbach/guillotina | a8c7247fca8294752901f643b35c5ed1c5dee76d | [
"BSD-2-Clause"
] | null | null | null | guillotina/schema/tests/test__bootstrapfields.py | diefenbach/guillotina | a8c7247fca8294752901f643b35c5ed1c5dee76d | [
"BSD-2-Clause"
] | null | null | null | guillotina/schema/tests/test__bootstrapfields.py | diefenbach/guillotina | a8c7247fca8294752901f643b35c5ed1c5dee76d | [
"BSD-2-Clause"
] | null | null | null | ##############################################################################
#
# Copyright (c) 2012 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
# flake8: noqa
import unittest
def test_suite():
return unittest.TestSuite((
unittest.makeSuite(ValidatedPropertyTests),
unittest.makeSuite(DefaultPropertyTests),
unittest.makeSuite(FieldTests),
unittest.makeSuite(ContainerTests),
unittest.makeSuite(IterableTests),
unittest.makeSuite(OrderableTests),
unittest.makeSuite(MinMaxLenTests),
unittest.makeSuite(TextTests),
unittest.makeSuite(TextLineTests),
unittest.makeSuite(PasswordTests),
unittest.makeSuite(BoolTests),
unittest.makeSuite(IntTests),
))
| 33.902622 | 79 | 0.647739 |
ac8b7c1d0b88747e30dd4b04da5a9e3ae9540f10 | 2,410 | py | Python | project.py | pedroalvesfilho/rest_flask_render0 | 81b7e1e865d083718a60e5ec38a0ca8f889e14f8 | [
"MIT"
] | null | null | null | project.py | pedroalvesfilho/rest_flask_render0 | 81b7e1e865d083718a60e5ec38a0ca8f889e14f8 | [
"MIT"
] | null | null | null | project.py | pedroalvesfilho/rest_flask_render0 | 81b7e1e865d083718a60e5ec38a0ca8f889e14f8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from flask import Flask
from flask import render_template, request
app = Flask(__name__)
"""
# https://www.pluralsight.com/guides/manipulating-lists-dictionaries-python
# A list is a mutable, ordered sequence of items.
# list = ['a', 'b', 'c']
# list[0]
# A dictionary is a mutable, unordered set of key-value pairs where each key must be unique.
# dictionary = {}
# In Python, a dictionary is an unordered collection of items. For example:
# dictionary = {'key' : 'value', 'key_2': 'value_2'}
# dictionary['key']
"""
# Fake restaurants
restaurants = [ # Start a list `[...]` with dictionaries `{...}` inside
{'name': 'The CRUDdy Crab'}, # 'index': '0'
{'name': 'Blue Burger'}, # 'index': '1'
{'name': 'Taco Hut'} # 'index': '3'
]
# >>> print(restaurants)
# [{'name': 'The CRUDdy Crab'}, {'name': 'Blue Burger'}, {'name': 'Taco Hut'}]
# >>>
# >>> print(restaurants[0])
# {'name': 'The CRUDdy Crab'}
# >>>
# >>> print(restaurants[0]['name'])
# The CRUDdy Crab
# >>>
# Fake Menu Items
items = [
{'name': 'Cheese Pizza', 'description': 'made with fresh cheese',
'price': '$5.59', 'course': 'Entree'}, # 'index': '0'
{'name': 'Cheese Pizza2', 'description': 'made with fresh cheese2',
'price': '$6.59', 'course': 'Entree2'}, # 'index': '1'
{'name': 'Cheese Pizza3', 'description': 'made with fresh cheese3',
'price': '$7.59', 'course': 'Entree3'}, # 'index': '2'
{'name': 'Cheese Pizza4', 'description': 'made with fresh cheese4',
'price': '$8.59', 'course': 'Entree4'}, # 'index': '3'
]
if __name__ == '__main__':
app.secret_key = 'super_secret_key'
app.debug = True
app.run(host='localhost', port=5000)
| 33.472222 | 92 | 0.607469 |
ac8b7fc77b1b29feaa2f6078b42fbbccbd054d3d | 1,971 | py | Python | tests/bugs/core_1894_test.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
] | 1 | 2022-02-05T11:37:13.000Z | 2022-02-05T11:37:13.000Z | tests/bugs/core_1894_test.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
] | 1 | 2021-09-03T11:47:00.000Z | 2021-09-03T12:42:10.000Z | tests/bugs/core_1894_test.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
] | 1 | 2021-06-30T14:14:16.000Z | 2021-06-30T14:14:16.000Z | #coding:utf-8
#
# id: bugs.core_1894
# title: Circular dependencies between computed fields crashs the engine
# decription:
# Checked on LI-T4.0.0.419 after commit 19.10.2016 18:26
# https://github.com/FirebirdSQL/firebird/commit/6a00b3aee6ba17b2f80a5b00def728023e347707
# -- all OK.
#
# tracker_id: CORE-1894
# min_versions: ['3.0.2']
# versions: 3.0.2
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0.2
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
recreate table t (
n integer,
n1 computed by (n),
n2 computed by (n1)
);
recreate table t2 (
n integer,
c1 computed by (1),
c2 computed by (c1)
);
alter table t alter n1 computed by (n2);
commit;
set autoddl off;
alter table t2 drop c1;
alter table t2 add c1 computed by (c2);
commit;
select * from t;
select * from t2; -- THIS LEAD SERVER CRASH (checked on WI-T4.0.0.399)
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stderr_1 = """
Statement failed, SQLSTATE = 42000
unsuccessful metadata update
-Cannot have circular dependencies with computed fields
Statement failed, SQLSTATE = 42000
unsuccessful metadata update
-cannot delete
-COLUMN T2.C1
-there are 1 dependencies
Statement failed, SQLSTATE = 42000
Cannot have circular dependencies with computed fields
Statement failed, SQLSTATE = 42000
unsuccessful metadata update
-cannot delete
-COLUMN T2.C1
-there are 1 dependencies
"""
| 24.036585 | 106 | 0.650431 |
3ba2a6cd1ef7931310fdf480d5b0bc1594f43a55 | 1,296 | py | Python | lib/pykdlib/modules.py | bin601/pkyd | 2b60c0ebd81e9e3e29f88ea60691d5c403df2160 | [
"MIT"
] | 1 | 2020-05-31T20:11:03.000Z | 2020-05-31T20:11:03.000Z | lib/pykdlib/modules.py | bin601/pykd | 2b60c0ebd81e9e3e29f88ea60691d5c403df2160 | [
"MIT"
] | null | null | null | lib/pykdlib/modules.py | bin601/pykd | 2b60c0ebd81e9e3e29f88ea60691d5c403df2160 | [
"MIT"
] | 1 | 2020-11-13T11:12:47.000Z | 2020-11-13T11:12:47.000Z |
#
# Modules Info
#
import pykd
moduleList = []
reloadModules()
| 17.28 | 133 | 0.549383 |
3ba3da7e08c14ac9df758e9e45e7cb972fc56eb2 | 139 | py | Python | compiled/construct/repeat_eos_u4.py | smarek/ci_targets | c5edee7b0901fd8e7f75f85245ea4209b38e0cb3 | [
"MIT"
] | 4 | 2017-04-08T12:55:11.000Z | 2020-12-05T21:09:31.000Z | compiled/construct/repeat_eos_u4.py | smarek/ci_targets | c5edee7b0901fd8e7f75f85245ea4209b38e0cb3 | [
"MIT"
] | 7 | 2018-04-23T01:30:33.000Z | 2020-10-30T23:56:14.000Z | compiled/construct/repeat_eos_u4.py | smarek/ci_targets | c5edee7b0901fd8e7f75f85245ea4209b38e0cb3 | [
"MIT"
] | 6 | 2017-04-08T11:41:14.000Z | 2020-10-30T22:47:31.000Z | from construct import *
from construct.lib import *
repeat_eos_u4 = Struct(
'numbers' / GreedyRange(Int32ul),
)
_schema = repeat_eos_u4
| 15.444444 | 34 | 0.755396 |
3ba72f829fd19f1024caa6c030fe4b09746f7a00 | 2,981 | py | Python | Python/simpleencrypt/aes256.py | shreyasnayak/SimpleEncrypt | 32223ee7a52baf186c53ac065cb61e9a32e4d20f | [
"MIT"
] | null | null | null | Python/simpleencrypt/aes256.py | shreyasnayak/SimpleEncrypt | 32223ee7a52baf186c53ac065cb61e9a32e4d20f | [
"MIT"
] | null | null | null | Python/simpleencrypt/aes256.py | shreyasnayak/SimpleEncrypt | 32223ee7a52baf186c53ac065cb61e9a32e4d20f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# aes256.py
# This file is part of SimpleEncrypt project (https://github.com/shreyasnayak/SimpleEncrypt)
#
# Copyright Shreyas Nayak (c) 2021-2022 <shreyasnayak21@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
__author__ = "Shreyas Nayak"
__email__ = "shreyasnayak21@gmail.com"
__copyright__ = "Copyright 2021-2022 Shreyas Nayak"
__license__ = "MIT"
from ctypes import *
CDLL("libcrypto.so.1.1", mode = RTLD_GLOBAL)
CDLL("libssl.so.1.1", mode = RTLD_GLOBAL)
lib = cdll.LoadLibrary('/usr/local/lib/libSimpleEncrypt.so')
lib.freeme.argtypes = c_void_p,
lib.freeme.restype = None
lib.encryptMessage.argtypes = [c_char_p,c_char_p,c_char_p]
lib.encryptMessage.restype = c_void_p
lib.decryptMessage.argtypes = [c_char_p,c_char_p,c_char_p]
lib.decryptMessage.restype = c_void_p
def encrypt(plainText,key,iv):
"""
Encrypt text with the initiation vector and key
@param plainText: string Text to encrypt
@param key: string key
@param iv: string initiation vector
@type plainText: string
@type key: string
@type iv: string
@rtype: string
"""
en_ptr = lib.encryptMessage(c_char_p(plainText),c_char_p(key),c_char_p(iv))
value = cast(en_ptr, c_char_p).value
lib.freeme(en_ptr)
return value
def decrypt(ciphertext,key,iv):
"""
Encrypt text with the initiation vector and key
@param ciphertext: string ciphertext to decrypt
@param key: string key
@param iv: string initiation vector
@type ciphertext: string
@type key: string
@type iv: string
@rtype: string
"""
de_ptr = lib.decryptMessage(c_char_p(ciphertext),c_char_p(key),c_char_p(iv))
value = cast(de_ptr, c_char_p).value
lib.freeme(de_ptr)
return value
if __name__ == '__main__':
iv = b'171A065A7675A09AECEC118DBC008A822A041FC2EBF2B3E4CF7A4C966E5D5897'
key = b'2B5442AD8739992F'
plainText = b'TEXT'
print(decrypt(encrypt(plainText,key, iv),key,iv))
| 36.353659 | 92 | 0.739349 |
3ba741e941c63d039ed8b54e0f39f036cca0c01c | 1,735 | py | Python | tests/widgets/test_error_dialog.py | sisoe24/NukeServerSocket | fbb95a609fcaf462aeb349597fae23dda67bf49b | [
"MIT"
] | 12 | 2021-08-01T09:41:24.000Z | 2021-12-03T02:53:10.000Z | tests/widgets/test_error_dialog.py | sisoe24/NukeServerSocket | fbb95a609fcaf462aeb349597fae23dda67bf49b | [
"MIT"
] | 5 | 2021-09-11T16:51:01.000Z | 2022-02-18T16:20:29.000Z | tests/widgets/test_error_dialog.py | sisoe24/NukeServerSocket | fbb95a609fcaf462aeb349597fae23dda67bf49b | [
"MIT"
] | 2 | 2021-08-03T16:02:27.000Z | 2021-08-06T07:51:54.000Z | """Test module for the Error dialog widget."""
import os
import logging
import pytest
from PySide2.QtGui import QClipboard
from src.widgets import error_dialog
from src.about import about_to_string
def test_report_return_value(report):
"""Check if prepare report return is a tuple."""
assert isinstance(report, tuple)
def test_prepare_report_link(report):
"""Check if error dialog returns the issues link when clicking Report."""
assert report.link == 'https://github.com/sisoe24/NukeServerSocket/issues'
def test_prepare_report_clipboard(report):
"""Check if report gets copied into clipboard."""
assert 'NukeServerSocket' in QClipboard().text()
def test_prepare_report_file(report, error_log_path):
"""Check if the report file has the about to string information."""
with open(error_log_path) as file:
assert about_to_string() in file.read()
def test_get_critical_logger():
"""Check if method returns the critical logger file handler."""
logger = error_dialog._get_critical_logger()
assert logger.name == 'Critical'
assert isinstance(logger, logging.FileHandler)
| 27.539683 | 78 | 0.725648 |
3ba791283dc54edfe51e5015658bbc050291ab63 | 3,854 | py | Python | DataStructure/Table/HashTable.py | zhangsifan/ClassicAlgorighthms | b769d46727279cf6d8532819076a3fef496d05c7 | [
"Apache-2.0"
] | 27 | 2021-04-21T08:17:25.000Z | 2021-06-30T07:04:49.000Z | DataStructure/Table/HashTable.py | zhangsifan/ClassicAlgorighthms | b769d46727279cf6d8532819076a3fef496d05c7 | [
"Apache-2.0"
] | null | null | null | DataStructure/Table/HashTable.py | zhangsifan/ClassicAlgorighthms | b769d46727279cf6d8532819076a3fef496d05c7 | [
"Apache-2.0"
] | 1 | 2021-04-21T11:26:01.000Z | 2021-04-21T11:26:01.000Z | # -*- coding: utf-8 -*-#
'''
@Project : ClassicAlgorighthms
@File : HashTable.py
@USER : ZZZZZ
@TIME : 2021/4/25 18:25
'''
if __name__ == "__main__":
ht = HashTable()
#
ht.init([2, 3, 5, 8, 9, 10, 2, 9, 1, 5, 2, 1, 7, 9, 11])
print(":\n{}".format(ht))
#
ht.insert(9)
print(":\n{}".format(ht))
#
ht.delete(11)
print(":\n{}".format(ht))
#
res = ht.search(8)
print("8: {}".format(res))
| 23.644172 | 69 | 0.493254 |
3ba7a6554c459e41f8d769b9e6caea0153ae3a05 | 7,908 | py | Python | ossdbtoolsservice/query/batch.py | DaeunYim/pgtoolsservice | b7e548718d797883027b2caee2d4722810b33c0f | [
"MIT"
] | 33 | 2019-05-27T13:04:35.000Z | 2022-03-17T13:33:05.000Z | ossdbtoolsservice/query/batch.py | DaeunYim/pgtoolsservice | b7e548718d797883027b2caee2d4722810b33c0f | [
"MIT"
] | 31 | 2019-06-10T01:55:47.000Z | 2022-03-09T07:27:49.000Z | ossdbtoolsservice/query/batch.py | DaeunYim/pgtoolsservice | b7e548718d797883027b2caee2d4722810b33c0f | [
"MIT"
] | 25 | 2019-05-13T18:39:24.000Z | 2021-11-16T03:07:33.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from enum import Enum
from typing import List # noqa
from datetime import datetime
import uuid
import sqlparse
from ossdbtoolsservice.driver import ServerConnection
from ossdbtoolsservice.utils.time import get_time_str, get_elapsed_time_str
from ossdbtoolsservice.query.contracts import BatchSummary, SaveResultsRequestParams, SelectionData
from ossdbtoolsservice.query.result_set import ResultSet # noqa
from ossdbtoolsservice.query.file_storage_result_set import FileStorageResultSet
from ossdbtoolsservice.query.in_memory_result_set import InMemoryResultSet
from ossdbtoolsservice.query.data_storage import FileStreamFactory
from ossdbtoolsservice.utils.constants import PG_PROVIDER_NAME
def execute(self, conn: ServerConnection) -> None:
"""
Execute the batch using a cursor retrieved from the given connection
:raises DatabaseError: if an error is encountered while running the batch's query
"""
self._execution_start_time = datetime.now()
if self._batch_events and self._batch_events._on_execution_started:
self._batch_events._on_execution_started(self)
cursor = self.get_cursor(conn)
try:
cursor.execute(self.batch_text)
# Commit the transaction if autocommit is True
if conn.autocommit:
conn.commit()
self.after_execute(cursor)
except conn.database_error as error:
self._has_error = True
raise error
finally:
# We are doing this because when the execute fails for named cursors
# cursor is not activated on the server which results in failure on close
# Hence we are checking if the cursor was really executed for us to close it
if cursor and cursor.rowcount != -1 and cursor.rowcount is not None:
cursor.close()
self._has_executed = True
self._execution_end_time = datetime.now()
# TODO: PyMySQL doesn't support notices from a connection
if conn._provider_name == PG_PROVIDER_NAME:
self._notices = cursor.connection.notices
cursor.connection.notices = []
if self._batch_events and self._batch_events._on_execution_completed:
self._batch_events._on_execution_completed(self)
def after_execute(self, cursor) -> None:
if cursor.description is not None:
self.create_result_set(cursor)
class SelectBatch(Batch):
def create_result_set(storage_type: ResultSetStorageType, result_set_id: int, batch_id: int) -> ResultSet:
if storage_type is ResultSetStorageType.FILE_STORAGE:
return FileStorageResultSet(result_set_id, batch_id)
return InMemoryResultSet(result_set_id, batch_id)
def create_batch(batch_text: str, ordinal: int, selection: SelectionData, batch_events: BatchEvents, storage_type: ResultSetStorageType) -> Batch:
sql = sqlparse.parse(batch_text)
statement = sql[0]
if statement.get_type().lower() == 'select':
into_checker = [True for token in statement.tokens if token.normalized == 'INTO']
cte_checker = [True for token in statement.tokens if token.ttype == sqlparse.tokens.Keyword.CTE]
if len(into_checker) == 0 and len(cte_checker) == 0: # SELECT INTO and CTE keywords can't be used in named cursor
return SelectBatch(batch_text, ordinal, selection, batch_events, storage_type)
return Batch(batch_text, ordinal, selection, batch_events, storage_type)
| 38.38835 | 157 | 0.69221 |
3ba806cb9a29badf3e7a080781be0d67fc995823 | 1,017 | py | Python | seqlib.py | rvenkatesh99/sequence_alignment | 107c262ef25ddbf025e054339bdd29efd728033a | [
"MIT"
] | null | null | null | seqlib.py | rvenkatesh99/sequence_alignment | 107c262ef25ddbf025e054339bdd29efd728033a | [
"MIT"
] | null | null | null | seqlib.py | rvenkatesh99/sequence_alignment | 107c262ef25ddbf025e054339bdd29efd728033a | [
"MIT"
] | null | null | null | import gzip
| 17.237288 | 43 | 0.573255 |
3ba896ffed28499b8d1d9a50c6e51c5241f414aa | 2,413 | py | Python | train.py | adamian98/LabelNoiseFlatMinimizers | 2c7a60ea0b72f8ac3a0ce3f059526440385b4f60 | [
"MIT"
] | 7 | 2021-08-21T23:45:28.000Z | 2021-12-13T13:39:38.000Z | train.py | adamian98/LabelNoiseFlatMinimizers | 2c7a60ea0b72f8ac3a0ce3f059526440385b4f60 | [
"MIT"
] | null | null | null | train.py | adamian98/LabelNoiseFlatMinimizers | 2c7a60ea0b72f8ac3a0ce3f059526440385b4f60 | [
"MIT"
] | null | null | null | import torch
from pytorch_lightning import Trainer, seed_everything
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.callbacks import LearningRateMonitor
from data import CIFAR10Data
from module import CIFAR10Module
from callbacks import *
from pathlib import Path
import wandb
from absl import app
from absl import flags
FLAGS = flags.FLAGS
flags.DEFINE_string("name", None, "name used for wandb logger")
flags.DEFINE_string("init", None, "initial weights to use")
flags.DEFINE_integer("max_epochs", 1000, "number of epochs to run for")
flags.DEFINE_integer("precision", 32, "precision to use")
flags.DEFINE_integer("seed", 0, "random seed")
flags.DEFINE_integer("num_workers", 4, "number of workers to use for data loading")
flags.DEFINE_string("save", None, "output file to save model weights")
flags.DEFINE_bool("callbacks", True, "whether to compute gradient callbacks")
flags.DEFINE_bool(
"fullbatch", False, "whether to aggregate batches to emulate full batch training"
)
if __name__ == "__main__":
app.run(main)
| 30.544304 | 85 | 0.703274 |
3ba9c357940e99f10b2151b5ccc410817c1d8e70 | 10,559 | py | Python | test/lda/createGraphFeatures.py | bekou/graph-topic-model | 7bd99aede6c22675f738166e690174ae0917b9eb | [
"MIT"
] | 6 | 2020-01-17T13:23:35.000Z | 2022-01-15T22:49:34.000Z | learn/lda/createGraphFeatures.py | bekou/graph-topic-model | 7bd99aede6c22675f738166e690174ae0917b9eb | [
"MIT"
] | null | null | null | learn/lda/createGraphFeatures.py | bekou/graph-topic-model | 7bd99aede6c22675f738166e690174ae0917b9eb | [
"MIT"
] | 1 | 2019-05-26T15:57:35.000Z | 2019-05-26T15:57:35.000Z | import networkx as nx
import string
import numpy as np
import math
| 43.632231 | 254 | 0.58604 |
3bab2d583dbdb20d52e61488899227c07f7c4954 | 30,867 | py | Python | blocks/bricks/sequence_generators.py | KIKOcaoyue/blocks | dfbeb400cfacfc1abe75e377cc03c1bf61b9c2fa | [
"BSD-3-Clause"
] | 1,067 | 2015-05-16T23:39:15.000Z | 2019-02-10T13:33:00.000Z | blocks/bricks/sequence_generators.py | loveisbasa/blocks | 7f380deec8f810b390880e6a4de836115e6e478d | [
"BSD-3-Clause"
] | 577 | 2015-05-16T18:52:53.000Z | 2018-11-27T15:31:09.000Z | blocks/bricks/sequence_generators.py | loveisbasa/blocks | 7f380deec8f810b390880e6a4de836115e6e478d | [
"BSD-3-Clause"
] | 379 | 2015-05-21T03:24:04.000Z | 2019-01-29T02:55:00.000Z | """Sequence generation framework.
Recurrent networks are often used to generate/model sequences.
Examples include language modelling, machine translation, handwriting
synthesis, etc.. A typical pattern in this context is that
sequence elements are generated one often another, and every generated
element is fed back into the recurrent network state. Sometimes
also an attention mechanism is used to condition sequence generation
on some structured input like another sequence or an image.
This module provides :class:`SequenceGenerator` that builds a sequence
generating network from three main components:
* a core recurrent transition, e.g. :class:`~blocks.bricks.recurrent.LSTM`
or :class:`~blocks.bricks.recurrent.GatedRecurrent`
* a readout component that can produce sequence elements using
the network state and the information from the attention mechanism
* an attention mechanism (see :mod:`~blocks.bricks.attention` for
more information)
Implementation-wise :class:`SequenceGenerator` fully relies on
:class:`BaseSequenceGenerator`. At the level of the latter an
attention is mandatory, moreover it must be a part of the recurrent
transition (see :class:`~blocks.bricks.attention.AttentionRecurrent`).
To simulate optional attention, :class:`SequenceGenerator` wraps the
pure recurrent network in :class:`FakeAttentionRecurrent`.
"""
from abc import ABCMeta, abstractmethod
from six import add_metaclass
from theano import tensor
from blocks.bricks import Initializable, Random, Bias, NDimensionalSoftmax
from blocks.bricks.base import application, Brick, lazy
from blocks.bricks.parallel import Fork, Merge
from blocks.bricks.lookup import LookupTable
from blocks.bricks.recurrent import recurrent
from blocks.bricks.attention import (
AbstractAttentionRecurrent, AttentionRecurrent)
from blocks.roles import add_role, COST
from blocks.utils import dict_union, dict_subset
def get_dim(self, name):
if name in (self._state_names + self._context_names +
self._glimpse_names):
return self.transition.get_dim(name)
elif name == 'outputs':
return self.readout.get_dim(name)
return super(BaseSequenceGenerator, self).get_dim(name)
class Readout(AbstractReadout):
r"""Readout brick with separated emitter and feedback parts.
:class:`Readout` combines a few bits and pieces into an object
that can be used as the readout component in
:class:`BaseSequenceGenerator`. This includes an emitter brick,
to which :meth:`emit`, :meth:`cost` and :meth:`initial_outputs`
calls are delegated, a feedback brick to which :meth:`feedback`
functionality is delegated, and a pipeline to actually compute
readouts from all the sources (see the `source_names` attribute
of :class:`AbstractReadout`).
The readout computation pipeline is constructed from `merge` and
`post_merge` brick, whose responsibilites are described in the
respective docstrings.
Parameters
----------
emitter : an instance of :class:`AbstractEmitter`
The emitter component.
feedback_brick : an instance of :class:`AbstractFeedback`
The feedback component.
merge : :class:`~.bricks.Brick`, optional
A brick that takes the sources given in `source_names` as an input
and combines them into a single output. If given, `merge_prototype`
cannot be given.
merge_prototype : :class:`.FeedForward`, optional
If `merge` isn't given, the transformation given by
`merge_prototype` is applied to each input before being summed. By
default a :class:`.Linear` transformation without biases is used.
If given, `merge` cannot be given.
post_merge : :class:`.Feedforward`, optional
This transformation is applied to the merged inputs. By default
:class:`.Bias` is used.
merged_dim : int, optional
The input dimension of `post_merge` i.e. the output dimension of
`merge` (or `merge_prototype`). If not give, it is assumed to be
the same as `readout_dim` (i.e. `post_merge` is assumed to not
change dimensions).
\*\*kwargs : dict
Passed to the parent's constructor.
See Also
--------
:class:`BaseSequenceGenerator` : see how exactly a readout is used
:class:`AbstractEmitter`, :class:`AbstractFeedback`
"""
def get_dim(self, name):
if name == 'outputs':
return self.emitter.get_dim(name)
elif name == 'feedback':
return self.feedback_brick.get_dim(name)
elif name == 'readouts':
return self.readout_dim
return super(Readout, self).get_dim(name)
class TrivialEmitter(AbstractEmitter):
"""An emitter for the trivial case when readouts are outputs.
Parameters
----------
readout_dim : int
The dimension of the readout.
Notes
-----
By default :meth:`cost` always returns zero tensor.
"""
def get_dim(self, name):
if name == 'outputs':
return self.readout_dim
return super(TrivialEmitter, self).get_dim(name)
class SoftmaxEmitter(AbstractEmitter, Initializable, Random):
"""A softmax emitter for the case of integer outputs.
Interprets readout elements as energies corresponding to their indices.
Parameters
----------
initial_output : int or a scalar :class:`~theano.Variable`
The initial output.
"""
def get_dim(self, name):
if name == 'outputs':
return 0
return super(SoftmaxEmitter, self).get_dim(name)
class TrivialFeedback(AbstractFeedback):
"""A feedback brick for the case when readout are outputs."""
def get_dim(self, name):
if name == 'feedback':
return self.output_dim
return super(TrivialFeedback, self).get_dim(name)
class LookupFeedback(AbstractFeedback, Initializable):
"""A feedback brick for the case when readout are integers.
Stores and retrieves distributed representations of integers.
"""
class FakeAttentionRecurrent(AbstractAttentionRecurrent, Initializable):
"""Adds fake attention interface to a transition.
:class:`BaseSequenceGenerator` requires its transition brick to support
:class:`~blocks.bricks.attention.AbstractAttentionRecurrent` interface,
that is to have an embedded attention mechanism. For the cases when no
attention is required (e.g. language modeling or encoder-decoder
models), :class:`FakeAttentionRecurrent` is used to wrap a usual
recurrent brick. The resulting brick has no glimpses and simply
passes all states and contexts to the wrapped one.
.. todo::
Get rid of this brick and support attention-less transitions
in :class:`BaseSequenceGenerator`.
"""
def get_dim(self, name):
return self.transition.get_dim(name)
class SequenceGenerator(BaseSequenceGenerator):
r"""A more user-friendly interface for :class:`BaseSequenceGenerator`.
Parameters
----------
readout : instance of :class:`AbstractReadout`
The readout component for the sequence generator.
transition : instance of :class:`.BaseRecurrent`
The recurrent transition to be used in the sequence generator.
Will be combined with `attention`, if that one is given.
attention : object, optional
The attention mechanism to be added to ``transition``,
an instance of
:class:`~blocks.bricks.attention.AbstractAttention`.
add_contexts : bool
If ``True``, the
:class:`.AttentionRecurrent` wrapping the
`transition` will add additional contexts for the attended and its
mask.
\*\*kwargs : dict
All keywords arguments are passed to the base class. If `fork`
keyword argument is not provided, :class:`.Fork` is created
that forks all transition sequential inputs without a "mask"
substring in them.
"""
| 35.357388 | 78 | 0.655846 |
3bad4e23a3beb6bf9c00280bce54c9d434b3e821 | 1,752 | py | Python | src/plotComponents2D.py | ElsevierSoftwareX/SOFTX-D-21-00108 | 3ae995e7ef2fa2eb706a2f55390d8b53424082af | [
"MIT"
] | null | null | null | src/plotComponents2D.py | ElsevierSoftwareX/SOFTX-D-21-00108 | 3ae995e7ef2fa2eb706a2f55390d8b53424082af | [
"MIT"
] | null | null | null | src/plotComponents2D.py | ElsevierSoftwareX/SOFTX-D-21-00108 | 3ae995e7ef2fa2eb706a2f55390d8b53424082af | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np | 36.5 | 114 | 0.4629 |
3bafd74c087e7fcccb34aa701eb50c300c1ce2a1 | 5,541 | py | Python | bin/sa_haveibeenpwned/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_data_collector.py | hRun/SA-haveibeenpwned | 2a8ae3dedc405dc3c8dac1cb6a705a70f574afdb | [
"Apache-2.0"
] | 2 | 2020-08-17T07:52:48.000Z | 2020-12-18T16:39:32.000Z | bin/sa_haveibeenpwned/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_data_collector.py | hRun/SA-haveibeenpwned | 2a8ae3dedc405dc3c8dac1cb6a705a70f574afdb | [
"Apache-2.0"
] | 5 | 2020-12-15T23:40:14.000Z | 2022-02-23T15:43:18.000Z | bin/sa_haveibeenpwned/aob_py3/cloudconnectlib/splunktacollectorlib/data_collection/ta_data_collector.py | hRun/SA-haveibeenpwned | 2a8ae3dedc405dc3c8dac1cb6a705a70f574afdb | [
"Apache-2.0"
] | 4 | 2019-05-16T09:57:33.000Z | 2021-07-14T12:31:21.000Z | #!/usr/bin/python
from __future__ import absolute_import
from builtins import object
import threading
import time
from collections import namedtuple
from . import ta_consts as c
from ..common import log as stulog
from ...splunktalib.common import util as scu
evt_fmt = ("<stream><event><host>{0}</host>"
"<source><![CDATA[{1}]]></source>"
"<sourcetype><![CDATA[{2}]]></sourcetype>"
"<time>{3}</time>"
"<index>{4}</index><data>"
"<![CDATA[{5}]]></data></event></stream>")
unbroken_evt_fmt = ("<stream>"
"<event unbroken=\"1\">"
"<host>{0}</host>"
"<source><![CDATA[{1}]]></source>"
"<sourcetype><![CDATA[{2}]]></sourcetype>"
"<time>{3}</time>"
"<index>{4}</index>"
"<data><![CDATA[{5}]]></data>"
"{6}"
"</event>"
"</stream>")
event_tuple = namedtuple('Event',
['host', 'source', 'sourcetype', 'time', 'index',
'raw_data', 'is_unbroken', 'is_done'])
| 37.693878 | 80 | 0.497203 |
3bb2017aa37b51490a6ff0089cf9d21b31b7addf | 220 | py | Python | basic_assignment/32.py | 1212091/python-learning | 30fad66460daf73fd3961cf667ee25b91dee923d | [
"MIT"
] | null | null | null | basic_assignment/32.py | 1212091/python-learning | 30fad66460daf73fd3961cf667ee25b91dee923d | [
"MIT"
] | null | null | null | basic_assignment/32.py | 1212091/python-learning | 30fad66460daf73fd3961cf667ee25b91dee923d | [
"MIT"
] | null | null | null |
circle = Circle(2)
print("Area of circuit: " + str(circle.compute_area()))
| 20 | 55 | 0.631818 |
3bb3f6ef40737ea90a9f45f3effca880da0d4227 | 154 | py | Python | lambdata-mkhalil/my_script.py | mkhalil7625/lambdata-mkhalil | 87f74166a3ae4f4cc92733cb5fc0c15e3b32f565 | [
"MIT"
] | null | null | null | lambdata-mkhalil/my_script.py | mkhalil7625/lambdata-mkhalil | 87f74166a3ae4f4cc92733cb5fc0c15e3b32f565 | [
"MIT"
] | null | null | null | lambdata-mkhalil/my_script.py | mkhalil7625/lambdata-mkhalil | 87f74166a3ae4f4cc92733cb5fc0c15e3b32f565 | [
"MIT"
] | null | null | null | import pandas as pd
from my_mod import enlarge
print("Hello!")
df = pd.DataFrame({"a":[1,2,3], "b":[4,5,6]})
print(df.head())
x = 11
print(enlarge(x)) | 14 | 45 | 0.62987 |
3bb55e28e639f9add169bf9d74da89f4b5663f84 | 885 | py | Python | exploit_mutillidae.py | binarioGH/minihacktools | 664e72ccc54089baa3b4d2ddc28bdcddbfdd1833 | [
"MIT"
] | null | null | null | exploit_mutillidae.py | binarioGH/minihacktools | 664e72ccc54089baa3b4d2ddc28bdcddbfdd1833 | [
"MIT"
] | null | null | null | exploit_mutillidae.py | binarioGH/minihacktools | 664e72ccc54089baa3b4d2ddc28bdcddbfdd1833 | [
"MIT"
] | null | null | null | #-*-coding: utf-8-*-
from requests import session
from bs4 import BeautifulSoup
#Aqui pones la ip de tu maquina.
host = "192.168.1.167"
#Aqui pones la ruta de el dns-lookup.php
route = "/mutillidae/index.php?page=dns-lookup.php"
with session() as s:
cmd = ''
while cmd != 'exit':
cmd = input(">>")
payload = "|| {}".format(cmd)
#Mandar el payload al host por medio de un post request.
response = s.post("http://{}{}".format(host, route), data={"target_host": payload})
#Parsear la respuesta con beautiful soup
soup = BeautifulSoup(response.text, "html.parser")
#El output del comando se encuentra en un <pre> con la clase 'report-header'
#Asi que le decimos a beautiful soup que lo encuentre, y que nos de el texto.
command_output = soup.find_all("pre", attrs={"class": "report-header"})[0].get_text()
#Imprimir output del comando
print(command_output) | 32.777778 | 87 | 0.692655 |
3bb5cf6df03cde1b36d438f6ec362fdce3a55254 | 101 | py | Python | submissions/abc085/a.py | m-star18/atcoder | 08e475810516602fa088f87daf1eba590b4e07cc | [
"Unlicense"
] | 1 | 2021-05-10T01:16:28.000Z | 2021-05-10T01:16:28.000Z | submissions/abc085/a.py | m-star18/atcoder | 08e475810516602fa088f87daf1eba590b4e07cc | [
"Unlicense"
] | 3 | 2021-05-11T06:14:15.000Z | 2021-06-19T08:18:36.000Z | submissions/abc085/a.py | m-star18/atcoder | 08e475810516602fa088f87daf1eba590b4e07cc | [
"Unlicense"
] | null | null | null | # sys.stdin.readline()
import sys
input = sys.stdin.readline
print(input().replace('2017', '2018'))
| 16.833333 | 38 | 0.70297 |
3bb6a612c41a6fc405f13a76e25386f09fd4787a | 4,069 | py | Python | rsmtool/utils/files.py | MarcoGorelli/rsmtool | 8759f5bec09a8ba0dd2ca16f6af8ce100d5ea6a0 | [
"Apache-2.0"
] | null | null | null | rsmtool/utils/files.py | MarcoGorelli/rsmtool | 8759f5bec09a8ba0dd2ca16f6af8ce100d5ea6a0 | [
"Apache-2.0"
] | null | null | null | rsmtool/utils/files.py | MarcoGorelli/rsmtool | 8759f5bec09a8ba0dd2ca16f6af8ce100d5ea6a0 | [
"Apache-2.0"
] | null | null | null | """
Utility classes and functions for RSMTool file management.
:author: Jeremy Biggs (jbiggs@ets.org)
:author: Anastassia Loukina (aloukina@ets.org)
:author: Nitin Madnani (nmadnani@ets.org)
:organization: ETS
"""
import json
import re
from glob import glob
from pathlib import Path
from os.path import join
from .constants import POSSIBLE_EXTENSIONS
def parse_json_with_comments(pathlike):
"""
Parse a JSON file after removing any comments.
Comments can use either ``//`` for single-line
comments or or ``/* ... */`` for multi-line comments.
The input filepath can be a string or ``pathlib.Path``.
Parameters
----------
filename : str or os.PathLike
Path to the input JSON file either as a string
or as a ``pathlib.Path`` object.
Returns
-------
obj : dict
JSON object representing the input file.
Note
----
This code was adapted from:
https://web.archive.org/web/20150520154859/http://www.lifl.fr/~riquetd/parse-a-json-file-with-comments.html
"""
# Regular expression to identify comments
comment_re = re.compile(r'(^)?[^\S\n]*/(?:\*(.*?)\*/[^\S\n]*|/[^\n]*)($)?',
re.DOTALL | re.MULTILINE)
# if we passed in a string, convert it to a Path
if isinstance(pathlike, str):
pathlike = Path(pathlike)
with open(pathlike, 'r') as file_buff:
content = ''.join(file_buff.readlines())
# Looking for comments
match = comment_re.search(content)
while match:
# single line comment
content = content[:match.start()] + content[match.end():]
match = comment_re.search(content)
# Return JSON object
config = json.loads(content)
return config
def has_files_with_extension(directory, ext):
"""
Check if the directory has any files with the given extension.
Parameters
----------
directory : str
The path to the directory where output is located.
ext : str
The the given extension.
Returns
-------
bool
True if directory contains files with given extension,
else False.
"""
files_with_extension = glob(join(directory, '*.{}'.format(ext)))
return len(files_with_extension) > 0
def get_output_directory_extension(directory, experiment_id):
"""
Check the output directory to determine what file extensions
exist. If more than one extension (in the possible list of
extensions) exists, then raise a ValueError. Otherwise,
return the one file extension. If no extensions can be found, then
`csv` will be returned by default.
Possible extensions include: `csv`, `tsv`, `xlsx`. Files in the
directory with none of these extensions will be ignored.
Parameters
----------
directory : str
The path to the directory where output is located.
experiment_id : str
The ID of the experiment.
Returns
-------
extension : {'csv', 'tsv', 'xlsx'}
The extension that output files in this directory
end with.
Raises
------
ValueError
If any files in the directory have different extensions,
and are in the list of possible output extensions.
"""
extension = 'csv'
extensions_identified = {ext for ext in POSSIBLE_EXTENSIONS
if has_files_with_extension(directory, ext)}
if len(extensions_identified) > 1:
raise ValueError('Some of the files in the experiment output directory (`{}`) '
'for `{}` have different extensions. All files in this directory '
'must have the same extension. The following extensions were '
'identified : {}'.format(directory,
experiment_id,
', '.join(extensions_identified)))
elif len(extensions_identified) == 1:
extension = list(extensions_identified)[0]
return extension
| 29.70073 | 111 | 0.616368 |
3bbaa7105c8bdb5d9e446a53505849bc8d258fd0 | 2,479 | py | Python | src/aiotube/playlist.py | jnsougata/AioTube | 719bc52e442d06f922ada65da7650cfb92a0f237 | [
"MIT"
] | 4 | 2021-10-02T07:01:22.000Z | 2021-12-30T08:27:36.000Z | src/aiotube/playlist.py | jnsougata/AioTube | 719bc52e442d06f922ada65da7650cfb92a0f237 | [
"MIT"
] | 2 | 2021-11-18T20:21:39.000Z | 2021-12-27T17:12:17.000Z | src/aiotube/playlist.py | jnsougata/AioTube | 719bc52e442d06f922ada65da7650cfb92a0f237 | [
"MIT"
] | 3 | 2021-10-01T03:21:33.000Z | 2021-12-21T20:49:30.000Z | from ._threads import _Thread
from .utils import filter
from .videobulk import _VideoBulk
from ._http import _get_playlist_data
from ._rgxs import _PlaylistPatterns as rgx
from typing import List, Optional, Dict, Any
| 27.853933 | 90 | 0.592981 |
3bbaf9656983dcec4f85d013784da058e74250a8 | 160 | py | Python | actions/sleep.py | bhaveshAn/Lucy | 9ea97184c725a10a041af64cad0ef4b533be42ad | [
"MIT"
] | 1 | 2018-04-13T08:26:27.000Z | 2018-04-13T08:26:27.000Z | actions/sleep.py | bhaveshAn/Lucy | 9ea97184c725a10a041af64cad0ef4b533be42ad | [
"MIT"
] | null | null | null | actions/sleep.py | bhaveshAn/Lucy | 9ea97184c725a10a041af64cad0ef4b533be42ad | [
"MIT"
] | null | null | null | import random
| 20 | 73 | 0.65 |
3bbc9e86460d1c4a98d44711f0b944fd26bd0864 | 441 | py | Python | django_sso_app/core/api/utils.py | paiuolo/django-sso-app | 75b96c669dc0b176dc77e08f018a3e97d259f636 | [
"MIT"
] | 1 | 2021-11-16T15:16:08.000Z | 2021-11-16T15:16:08.000Z | django_sso_app/core/api/utils.py | paiuolo/django-sso-app | 75b96c669dc0b176dc77e08f018a3e97d259f636 | [
"MIT"
] | null | null | null | django_sso_app/core/api/utils.py | paiuolo/django-sso-app | 75b96c669dc0b176dc77e08f018a3e97d259f636 | [
"MIT"
] | null | null | null | import logging
from django.contrib.messages import get_messages
from django.utils.encoding import force_str
logger = logging.getLogger('django_sso_app')
def get_request_messages_string(request):
"""
Serializes django messages
:param request:
:return:
"""
storage = get_messages(request)
_messages = []
for message in storage:
_messages.append(force_str(message))
return ', '.join(_messages)
| 20.045455 | 48 | 0.709751 |
3bbcc2fb4051ff428b3a3b66b12c0e4e0235c79c | 1,092 | py | Python | download_stock_data.py | dabideee13/Price-Pattern-Prediction | 632d961fc08777adab8eeb7ecbf16ac7cc71a3a7 | [
"MIT"
] | null | null | null | download_stock_data.py | dabideee13/Price-Pattern-Prediction | 632d961fc08777adab8eeb7ecbf16ac7cc71a3a7 | [
"MIT"
] | null | null | null | download_stock_data.py | dabideee13/Price-Pattern-Prediction | 632d961fc08777adab8eeb7ecbf16ac7cc71a3a7 | [
"MIT"
] | null | null | null | #!/opt/anaconda3/bin/python
# -*- coding: utf-8 -*-
"""
Get Stock Data
"""
import time
import pandas as pd
import yfinance as yf
if __name__ == '__main__':
# Path to file
# TODO: make directory if directory doesn't exist
f_file = "/Users/d.e.magno/Datasets/raw_stocks_new.csv"
# TODO: need to check which is already downloaded
stock_file = pd.read_csv('/Users/d.e.magno/Datasets/tickers/generic.csv')
stock_list = stock_file.Ticker
start_timeA = time.time()
for stock in stock_list:
try:
start_timeB = time.time()
print("Downloading {}...".format(stock))
yf.Ticker(stock).history(period="max").to_csv(
f_file.format(stock))
time.sleep(10)
end_timeB = time.time()
print("Time elapsed:", end_timeB - start_timeB)
print()
except Exception as ex:
pass
except KeyboardInterrupt as ex:
break
print("Finished.")
end_timeA = time.time()
print("Total time elapsed:", end_timeA - start_timeA)
| 24.266667 | 77 | 0.598901 |
3bbd5cc24a379d3da78746ccf10468524d2749f7 | 2,421 | py | Python | _int_tools.py | CaptainSora/Python-Project-Euler | 056400f434eec837ece5ef06653b310ebfcc3d4e | [
"MIT"
] | null | null | null | _int_tools.py | CaptainSora/Python-Project-Euler | 056400f434eec837ece5ef06653b310ebfcc3d4e | [
"MIT"
] | null | null | null | _int_tools.py | CaptainSora/Python-Project-Euler | 056400f434eec837ece5ef06653b310ebfcc3d4e | [
"MIT"
] | null | null | null | """
This module contains functions related to integer formatting and math.
"""
from functools import reduce
from itertools import count
from math import gcd, prod
# ================ ARRAY FORMATTING FUNCTIONS ================
def int_to_int_array(num):
"""
Deprecated, use int_to_digit_array(num)
"""
return [int(str(num)[a]) for a in range(len(str(num)))]
# ================ CALCULATION FUNCTIONS ================
def product(numlist):
"""
Deprecated since Python 3.8, use math.prod instead
Also remove functools.reduce
"""
return reduce(lambda x, y: x * y, numlist, 1)
def phi(n):
"""
Returns the value of (n), or the Euler Totient function.
"""
return len([x for x in range(1, n) if gcd(n, x) == 1])
# ================ COUNTING FUNCTIONS ================
def counting_summations(values, target):
"""
Returns the number of ways to write target as the sum of numbers in values.
"""
csums = [[0 for _ in values]]
while len(csums) <= target:
tempsum = [0 for _ in values]
for a in range(len(values)):
if values[a] > len(csums):
break
elif values[a] == len(csums):
tempsum[a] = 1
else:
tempsum[a] += sum(csums[len(csums) - values[a]][:a+1])
csums.append(tempsum)
return sum(csums[target])
| 23.278846 | 79 | 0.534077 |
3bbf01d5cb0102d02a8a8a3dba1f25da4c1520b3 | 2,260 | py | Python | source/stats/lstm_model_builder.py | dangtunguyen/nids | a92b56a5ac29cc1482ae29374eef02bb7654785f | [
"MIT"
] | 2 | 2019-11-22T19:56:50.000Z | 2020-12-15T02:43:52.000Z | source/stats/lstm_model_builder.py | dangtunguyen/nids | a92b56a5ac29cc1482ae29374eef02bb7654785f | [
"MIT"
] | null | null | null | source/stats/lstm_model_builder.py | dangtunguyen/nids | a92b56a5ac29cc1482ae29374eef02bb7654785f | [
"MIT"
] | 2 | 2020-12-15T02:43:54.000Z | 2021-11-05T03:19:59.000Z | #!/usr/bin/env python
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, TimeDistributed, Flatten
from keras.layers import LSTM
'''
Reference: https://keras.io/getting-started/sequential-model-guide/
A stateful recurrent model is one for which the internal states (memories)
obtained after processing a batch of samples are reused as initial states
for the samples of the next batch. This allows to process longer sequences
while keeping computational complexity manageable.
'''
| 48.085106 | 161 | 0.712389 |
3bc0be85bb851d619749be911d22c015dc81cc08 | 26,696 | py | Python | pyinstagram/base.py | alessandrocucci/PyInstagram | cd8f30b8c470a8cdcd8da801af897e4d14f7a677 | [
"MIT"
] | 1 | 2019-05-03T17:46:02.000Z | 2019-05-03T17:46:02.000Z | pyinstagram/base.py | alessandrocucci/PyInstagram | cd8f30b8c470a8cdcd8da801af897e4d14f7a677 | [
"MIT"
] | 1 | 2021-06-01T21:51:23.000Z | 2021-06-01T21:51:23.000Z | pyinstagram/base.py | alessandrocucci/PyInstagram | cd8f30b8c470a8cdcd8da801af897e4d14f7a677 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import random
from datetime import datetime
from operator import itemgetter
import requests
import time
from pyinstagram.model import Media
from .exceptions import OAuthException, PyInstagramException
from .oauth import OAuth
from .constants import API_URL
from .utils import DESAdapter
| 44.271973 | 661 | 0.523599 |
3bc1971c02e3a51d7591b4d8543cd3bec3e278e6 | 1,483 | py | Python | csv_readers/stay_points_csv_reader.py | s0lver/stm-creator | b058185ca028abd1902edbb35a52d3565b06f8b0 | [
"Apache-2.0"
] | null | null | null | csv_readers/stay_points_csv_reader.py | s0lver/stm-creator | b058185ca028abd1902edbb35a52d3565b06f8b0 | [
"Apache-2.0"
] | null | null | null | csv_readers/stay_points_csv_reader.py | s0lver/stm-creator | b058185ca028abd1902edbb35a52d3565b06f8b0 | [
"Apache-2.0"
] | null | null | null | import csv
from typing import List, Iterator, Dict
from entities.StayPoint import StayPoint
def read(file_path: str) -> List[StayPoint]:
"""
Returns a list of StayPoint read from the specified file path
:param file_path: The path of file to read
:return: A list of StayPoint
"""
file = open(file_path, 'r', newline='', encoding='utf-8')
results = []
reader = csv.DictReader(file, delimiter=',')
for line in reader:
stay_point = build_stay_point_from_line(line)
results.append(stay_point)
return results
def build_stay_point_from_line(line: Dict) -> StayPoint:
"""
Builds a StayPoint object parsing the specified line
:param line: The line to parse
:return: A StayPoint object
"""
id_stay_point = int(line["_id"])
latitude = float(line["latitude"])
longitude = float(line["longitude"])
visit_count = int(line["visitCount"])
stay_point = StayPoint(id_stay_point, latitude, longitude, visit_count)
return stay_point
| 29.66 | 75 | 0.681052 |
3bc47ccda883fce926fb879e2f171e425ac7191d | 1,959 | py | Python | login/models.py | zcw576020095/netsysyconfig_platform | d47be2c5b3418d59a226cb9e135972160e51df00 | [
"Unlicense"
] | 1 | 2022-03-25T07:49:10.000Z | 2022-03-25T07:49:10.000Z | login/models.py | zcw576020095/netsysyconfig_platform | d47be2c5b3418d59a226cb9e135972160e51df00 | [
"Unlicense"
] | null | null | null | login/models.py | zcw576020095/netsysyconfig_platform | d47be2c5b3418d59a226cb9e135972160e51df00 | [
"Unlicense"
] | null | null | null | from django.db import models
# Create your models here.
##
## | 27.591549 | 78 | 0.669219 |
3bc52fc59dc21473a03e193fd04c98996f1d2a1e | 2,083 | py | Python | DataAnalysis.py | andairka/Simple-default-Backpropagation-ANN | 995de1471e2b132af721b2babbec034f29228640 | [
"MIT"
] | null | null | null | DataAnalysis.py | andairka/Simple-default-Backpropagation-ANN | 995de1471e2b132af721b2babbec034f29228640 | [
"MIT"
] | null | null | null | DataAnalysis.py | andairka/Simple-default-Backpropagation-ANN | 995de1471e2b132af721b2babbec034f29228640 | [
"MIT"
] | null | null | null | import ImportTitanicData
import DataPreparation
# analiza danych przed preparacj danych
# analiza danych po preparacji danych
# dataAnaysis = DataAnaliysisBefore()
dataAnaysis = DataAnaliysisAfter()
# print('czesio')
# print('Analiza danych przed wypenieniem NaN')
# print('Tabela Train\n', dataAnaysis.showTrain())
# print('\n\nshape Train\n', dataAnaysis.shapeTrain())
# print('\n\ndtypes Train\n', dataAnaysis.dtypesTrain())
print('Analiza danych po wypenieniem NaN i preparacji danych')
print('Tabela Train\n', dataAnaysis.showTrain())
print('\n\nshape Train\n', dataAnaysis.shapeTrain())
print('\n\ndtypes Train\n', dataAnaysis.dtypesTrain())
# dataPreparation = DataPreparation.DataPreparation()
# print(dataPreparation.prepareTrainData().to_string())
| 26.367089 | 63 | 0.68987 |
3bc5e3ab47f6373dad23233f3b3391f39ba91b96 | 10,341 | py | Python | tests/api/test_predict.py | mldock/mldock | 314b733e4f0102321727f8b145fc276486ecad85 | [
"Apache-2.0"
] | 2 | 2021-07-12T13:51:21.000Z | 2021-07-19T08:40:02.000Z | tests/api/test_predict.py | mldock/mldock | 314b733e4f0102321727f8b145fc276486ecad85 | [
"Apache-2.0"
] | 41 | 2021-06-28T11:05:20.000Z | 2022-03-13T13:48:50.000Z | tests/api/test_predict.py | mldock/mldock | 314b733e4f0102321727f8b145fc276486ecad85 | [
"Apache-2.0"
] | 1 | 2021-07-17T19:07:06.000Z | 2021-07-17T19:07:06.000Z | """Test Predict API calls"""
import io
from PIL import Image
from dataclasses import dataclass
import tempfile
from pathlib import Path
import pytest
from mock import patch
from mldock.api.predict import send_image_jpeg, send_csv, send_json, handle_prediction
import responses
import requests
class TestPredictAPI:
"""
TEST ERROR STATUS_CODE!=200 SCENERIO
"""
"""
TEST SUCCESS STATUS_CODE=200 SCENERIO
"""
"""
TEST WRITING RESPONSE TO FILE SCENERIO
"""
"""
TEST ADDING ADDTIONAL HEADERS
"""
| 35.782007 | 86 | 0.579634 |
3bc608810561bbe247f5ed3cfef52e4be93e7faa | 2,081 | py | Python | neutron/tests/tempest/api/test_qos_negative.py | mail2nsrajesh/neutron | 352afb37afcf4952f03436b25618d0066c51f3f1 | [
"Apache-2.0"
] | null | null | null | neutron/tests/tempest/api/test_qos_negative.py | mail2nsrajesh/neutron | 352afb37afcf4952f03436b25618d0066c51f3f1 | [
"Apache-2.0"
] | null | null | null | neutron/tests/tempest/api/test_qos_negative.py | mail2nsrajesh/neutron | 352afb37afcf4952f03436b25618d0066c51f3f1 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.db import constants as db_const
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
from tempest import test
from neutron.tests.tempest.api import base
LONG_NAME_NG = 'z' * (db_const.NAME_FIELD_SIZE + 1)
LONG_DESCRIPTION_NG = 'z' * (db_const.LONG_DESCRIPTION_FIELD_SIZE + 1)
LONG_TENANT_ID_NG = 'z' * (db_const.PROJECT_ID_FIELD_SIZE + 1)
| 41.62 | 78 | 0.695819 |
3bc6222a69419d7c3721ce7c39a656221c86ab89 | 1,105 | py | Python | src/main/python/storytext/lib/storytext/javageftoolkit/__init__.py | emilybache/texttest-runner | 2d5c42b8d37699a2cbcb8f19af7c271d6ad1024a | [
"MIT"
] | null | null | null | src/main/python/storytext/lib/storytext/javageftoolkit/__init__.py | emilybache/texttest-runner | 2d5c42b8d37699a2cbcb8f19af7c271d6ad1024a | [
"MIT"
] | null | null | null | src/main/python/storytext/lib/storytext/javageftoolkit/__init__.py | emilybache/texttest-runner | 2d5c42b8d37699a2cbcb8f19af7c271d6ad1024a | [
"MIT"
] | null | null | null |
""" Don't load any Eclipse stuff at global scope, needs to be importable previous to Eclipse starting """
from storytext import javarcptoolkit
import sys
| 38.103448 | 105 | 0.761086 |
3bc648d7577a48d53c343d95dc1ac69b209de7c4 | 11,380 | py | Python | subscribe/models.py | jonge-democraten/dyonisos | bebc5b28761bd5e036e4e6e219b5474d901026c3 | [
"MIT"
] | null | null | null | subscribe/models.py | jonge-democraten/dyonisos | bebc5b28761bd5e036e4e6e219b5474d901026c3 | [
"MIT"
] | 10 | 2016-10-31T21:14:06.000Z | 2021-01-07T22:34:42.000Z | subscribe/models.py | jonge-democraten/dyonisos | bebc5b28761bd5e036e4e6e219b5474d901026c3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2011,2014 Floor Terra <floort@gmail.com>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import datetime
import logging
import traceback
from django.core.mail import EmailMessage
from django.db import models
from django.template import Context, Template
logger = logging.getLogger(__name__)
AFDELINGEN = (
("AMS", "Amsterdam"),
("AN", "Arnhem-Nijmegen"),
("BB", "Brabant"),
("FR", "Friesland"),
("GR", "Groningen"),
("LH", "Leiden-Haaglanden"),
("MS", "Limburg"),
("RD", "Rotterdam"),
("TW", "Overijssel"),
("UT", "Utrecht"),
("WN", "Wageningen"),
("INT", "Internationaal"),
)
QUESTION_TYPES = (
("INT", "Integer"),
("TXT", "Text Input"),
("AFD", "Afdeling"),
("BOOL", "Ja/Nee"),
("CHOICE", "Multiple Choice"),
("TEXT", "HTML Text"),
)
| 37.682119 | 228 | 0.644991 |
3bc7d40a1ff3f95ca6bbd675bada6d5806be3718 | 7,661 | py | Python | vumi/worker.py | hnec-vr/vumi | b9c1100176a46774b502d5a0db225930a2d298c7 | [
"BSD-3-Clause"
] | 1 | 2016-07-27T17:13:32.000Z | 2016-07-27T17:13:32.000Z | vumi/worker.py | TouK/vumi | 6d250c7039fa1d82b01c5b68722aa8a6a94580b2 | [
"BSD-3-Clause"
] | null | null | null | vumi/worker.py | TouK/vumi | 6d250c7039fa1d82b01c5b68722aa8a6a94580b2 | [
"BSD-3-Clause"
] | null | null | null | # -*- test-case-name: vumi.tests.test_worker -*-
"""Basic tools for workers that handle TransportMessages."""
import time
import os
import socket
from twisted.internet.defer import (
inlineCallbacks, succeed, maybeDeferred, gatherResults)
from twisted.python import log
from vumi.service import Worker
from vumi.middleware import setup_middlewares_from_config
from vumi.connectors import ReceiveInboundConnector, ReceiveOutboundConnector
from vumi.config import Config, ConfigInt
from vumi.errors import DuplicateConnectorError
from vumi.utils import generate_worker_id
from vumi.blinkenlights.heartbeat import (HeartBeatPublisher,
HeartBeatMessage)
| 35.967136 | 79 | 0.645477 |
3bc950ff5c221db7b60343065a9a70cf058a7e1a | 1,204 | py | Python | tt/ksl/setup.py | aiboyko/ttpy | 8fda9d29e27e4f9f68ffba5cc1e16b3020eb131f | [
"MIT"
] | 2 | 2021-04-02T17:42:03.000Z | 2021-11-17T11:30:42.000Z | tt/ksl/setup.py | qbit-/ttpy | 596b1c9fe6ce5f0ba66d801ac88a1147204cec2f | [
"MIT"
] | null | null | null | tt/ksl/setup.py | qbit-/ttpy | 596b1c9fe6ce5f0ba66d801ac88a1147204cec2f | [
"MIT"
] | 1 | 2021-01-10T07:02:09.000Z | 2021-01-10T07:02:09.000Z | # setup.py
# This script will build the main subpackages
# See LICENSE for details
from __future__ import print_function, absolute_import
from numpy.distutils.misc_util import Configuration
from os.path import join
TTFORT_DIR = '../tt-fort'
EXPM_DIR = '../tt-fort/expm'
EXPOKIT_SRC = [
'explib.f90',
'normest.f90',
'expokit.f',
'dlacn1.f',
'dlapst.f',
'dlarpc.f',
'zlacn1.f',
]
TTKSL_SRC = [
'ttals.f90',
'tt_ksl.f90',
'tt_diag_ksl.f90'
]
if __name__ == '__main__':
print('This is the wrong setup.py to run')
| 19.737705 | 59 | 0.58887 |
3bcd4cf0614ab6f7c88bcffd7170ce176a5a3489 | 305 | py | Python | tests/text_processors/test_json_text_processor.py | lyteloli/NekoGram | f077471000b40a74e0eb4e98dfb570b5e34d23ab | [
"MIT"
] | 8 | 2020-08-21T07:43:52.000Z | 2022-01-27T06:48:01.000Z | tests/text_processors/test_json_text_processor.py | lyteloli/NekoGram | f077471000b40a74e0eb4e98dfb570b5e34d23ab | [
"MIT"
] | null | null | null | tests/text_processors/test_json_text_processor.py | lyteloli/NekoGram | f077471000b40a74e0eb4e98dfb570b5e34d23ab | [
"MIT"
] | 1 | 2022-01-27T06:48:02.000Z | 2022-01-27T06:48:02.000Z | from NekoGram import Neko, Bot
import json
| 30.5 | 86 | 0.688525 |
3bcdd1ca315307c12c5399ab4a8df2ed64ad6708 | 7,960 | py | Python | itdagene/app/meetings/migrations/0001_initial.py | itdagene-ntnu/itdagene | b972cd3d803debccebbc33641397a39834b8d69a | [
"MIT"
] | 9 | 2018-10-17T20:58:09.000Z | 2021-12-16T16:16:45.000Z | itdagene/app/meetings/migrations/0001_initial.py | itdagene-ntnu/itdagene | b972cd3d803debccebbc33641397a39834b8d69a | [
"MIT"
] | 177 | 2018-10-27T18:15:56.000Z | 2022-03-28T04:29:06.000Z | itdagene/app/meetings/migrations/0001_initial.py | itdagene-ntnu/itdagene | b972cd3d803debccebbc33641397a39834b8d69a | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
| 35.695067 | 87 | 0.358417 |
3bcdfa047a911d02d5d42da304bd32569f2f1c95 | 2,247 | py | Python | models/mail.py | Huy-Ngo/temp-mail | 6269f1f405cd7447ea0d45799ee1c4a0623d23a6 | [
"MIT"
] | 3 | 2022-01-18T17:15:17.000Z | 2022-01-22T09:52:19.000Z | models/mail.py | Huy-Ngo/temp-mail | 6269f1f405cd7447ea0d45799ee1c4a0623d23a6 | [
"MIT"
] | 28 | 2020-06-18T08:53:32.000Z | 2020-08-07T02:33:47.000Z | models/mail.py | Huy-Ngo/temp-mail | 6269f1f405cd7447ea0d45799ee1c4a0623d23a6 | [
"MIT"
] | null | null | null | # Copyright (c) 2020 Ng Ngc c Huy
from db import db
def set_read(self):
"""Set the email as read."""
self.is_read = True
db.session.add(self)
db.session.commit()
| 28.807692 | 68 | 0.549622 |
3bce9c159c555e02a3e9d2befee3b2b0dfb1fa84 | 1,681 | py | Python | jesse/indicators/bollinger_bands.py | leaiannotti/jesse | 564c54845774891ff3b5a8d3c02cc7cea890ac54 | [
"MIT"
] | 5 | 2021-05-21T07:39:16.000Z | 2021-11-17T11:08:41.000Z | jesse/indicators/bollinger_bands.py | leaiannotti/jesse | 564c54845774891ff3b5a8d3c02cc7cea890ac54 | [
"MIT"
] | null | null | null | jesse/indicators/bollinger_bands.py | leaiannotti/jesse | 564c54845774891ff3b5a8d3c02cc7cea890ac54 | [
"MIT"
] | 2 | 2021-05-21T10:14:53.000Z | 2021-05-27T04:39:51.000Z | from collections import namedtuple
import numpy as np
import talib
from jesse.indicators.ma import ma
from jesse.indicators.mean_ad import mean_ad
from jesse.indicators.median_ad import median_ad
from jesse.helpers import get_candle_source, slice_candles
BollingerBands = namedtuple('BollingerBands', ['upperband', 'middleband', 'lowerband'])
def bollinger_bands(candles: np.ndarray, period: int = 20, devup: float = 2, devdn: float = 2, matype: int = 0, devtype: int = 0,
source_type: str = "close",
sequential: bool = False) -> BollingerBands:
"""
BBANDS - Bollinger Bands
:param candles: np.ndarray
:param period: int - default: 20
:param devup: float - default: 2
:param devdn: float - default: 2
:param matype: int - default: 0
:param devtype: int - default: 0
:param source_type: str - default: "close"
:param sequential: bool - default=False
:return: BollingerBands(upperband, middleband, lowerband)
"""
candles = slice_candles(candles, sequential)
source = get_candle_source(candles, source_type=source_type)
if devtype == 0:
dev = talib.STDDEV(source, period)
elif devtype == 1:
dev = mean_ad(source, period, sequential=True)
elif devtype == 2:
dev = median_ad(source, period, sequential=True)
middlebands = ma(source, period=period, matype=matype, sequential=True)
upperbands = middlebands + devup * dev
lowerbands = middlebands - devdn * dev
if sequential:
return BollingerBands(upperbands, middlebands, lowerbands)
else:
return BollingerBands(upperbands[-1], middlebands[-1], lowerbands[-1])
| 32.960784 | 129 | 0.684117 |
3bcf9e27b60f3e382de5df8d52f14b8d023fe9df | 3,050 | py | Python | coveralls_check.py | jayvdb/coveralls-check | ca3b0428b90fe3c6d22cad3a122dedc2c46d12e4 | [
"MIT"
] | null | null | null | coveralls_check.py | jayvdb/coveralls-check | ca3b0428b90fe3c6d22cad3a122dedc2c46d12e4 | [
"MIT"
] | 2 | 2018-07-11T07:09:25.000Z | 2022-03-10T12:18:18.000Z | coveralls_check.py | jayvdb/coveralls-check | ca3b0428b90fe3c6d22cad3a122dedc2c46d12e4 | [
"MIT"
] | 1 | 2020-01-10T05:27:46.000Z | 2020-01-10T05:27:46.000Z | from __future__ import print_function
import logging
from argparse import ArgumentParser
import backoff
import requests
import sys
POLL_URL = 'https://coveralls.io/builds/{}.json'
DONE_URL = 'https://coveralls.io/webhook'
| 30.5 | 80 | 0.585902 |
3bd21db03fbff68669d9cf01fd41194e607124e2 | 1,399 | py | Python | ScienceCruiseDataManagement/data_storage_management/utils.py | Swiss-Polar-Institute/science-cruise-data-management | 67721a0f4a1255b8ac43e530ed95a8c324239c7c | [
"MIT"
] | 6 | 2017-10-06T09:18:04.000Z | 2022-02-10T08:54:56.000Z | ScienceCruiseDataManagement/data_storage_management/utils.py | Swiss-Polar-Institute/science-cruise-data-management | 67721a0f4a1255b8ac43e530ed95a8c324239c7c | [
"MIT"
] | 12 | 2020-02-27T09:24:50.000Z | 2021-09-22T17:39:55.000Z | ScienceCruiseDataManagement/data_storage_management/utils.py | Swiss-Polar-Institute/science-cruise-data-management | 67721a0f4a1255b8ac43e530ed95a8c324239c7c | [
"MIT"
] | 1 | 2017-10-16T13:49:33.000Z | 2017-10-16T13:49:33.000Z | import subprocess
import glob
import os
import datetime
# This file is part of https://github.com/cpina/science-cruise-data-management
#
# This project was programmed in a hurry without any prior Django experience,
# while circumnavigating the Antarctic on the ACE expedition, without proper
# Internet access, with 150 scientists using the system and doing at the same
# cruise other data management and system administration tasks.
#
# Sadly there aren't unit tests and we didn't have time to refactor the code
# during the cruise, which is really needed.
#
# Carles Pina (carles@pina.cat) and Jen Thomas (jenny_t152@yahoo.co.uk), 2016-2017.
| 31.088889 | 83 | 0.68549 |
3bd3be55e7aed0ec74f1031095e8ca063b4aa8fd | 515 | py | Python | Applied Project/Web/app/config.py | rebeccabernie/CurrencyAnalyser | 1f57e5b5fee854912c205cb98f57c980027f0a03 | [
"MIT"
] | 27 | 2018-06-22T18:49:52.000Z | 2022-02-18T07:58:48.000Z | Applied Project/Web/app/config.py | taraokelly/CurrencyAnalyser | 1f57e5b5fee854912c205cb98f57c980027f0a03 | [
"MIT"
] | 10 | 2020-01-28T22:24:22.000Z | 2022-02-10T13:11:32.000Z | Applied Project/Web/app/config.py | taraokelly/CurrencyAnalyser | 1f57e5b5fee854912c205cb98f57c980027f0a03 | [
"MIT"
] | 6 | 2018-05-02T16:43:45.000Z | 2020-11-17T18:00:36.000Z | """ Global Flask Application Settings """
import os
from app import app
# Set FLASK_CONFIG env to 'Production' or 'Development' to set Config
flask_config = os.environ.get('FLASK_CONFIG', 'Development')
app.config.from_object('app.config.{}'.format(flask_config))
| 19.074074 | 69 | 0.700971 |
3bd3e56f8e3f7640af1c0c1de7776e8679289263 | 103 | py | Python | Exercise-1/Q4_reverse.py | abhay-lal/18CSC207J-APP | 79a955a99837e6d41c89cb1a9e84eb0230c0fa7b | [
"MIT"
] | null | null | null | Exercise-1/Q4_reverse.py | abhay-lal/18CSC207J-APP | 79a955a99837e6d41c89cb1a9e84eb0230c0fa7b | [
"MIT"
] | null | null | null | Exercise-1/Q4_reverse.py | abhay-lal/18CSC207J-APP | 79a955a99837e6d41c89cb1a9e84eb0230c0fa7b | [
"MIT"
] | null | null | null | word = input('Enter a word')
len = len(word)
for i in range(len-1, -1, -1):
print(word[i], end='')
| 20.6 | 30 | 0.572816 |
3bd475c386fa8a4a56cad45921819b936313ea64 | 162 | py | Python | exercise/6.py | zhaoshengshi/practicepython-exercise | 3e123eb602aaf1c9638c7a2199607146e860b96c | [
"Apache-2.0"
] | null | null | null | exercise/6.py | zhaoshengshi/practicepython-exercise | 3e123eb602aaf1c9638c7a2199607146e860b96c | [
"Apache-2.0"
] | null | null | null | exercise/6.py | zhaoshengshi/practicepython-exercise | 3e123eb602aaf1c9638c7a2199607146e860b96c | [
"Apache-2.0"
] | null | null | null | ss = input('Please give me a string: ')
if ss == ss[::-1]:
print("Yes, %s is a palindrome." % ss)
else:
print('Nevermind, %s isn\'t a palindrome.' % ss)
| 23.142857 | 52 | 0.574074 |
3bd4ef311d3ceb65a757cf2dcd1641a9fa9f94c6 | 5,420 | py | Python | app/v2/resources/users.py | fabischolasi/fast-food-fast-v1 | 492f0bdaaeadf12089a200a9b64bdfc22cd03d0c | [
"MIT"
] | 1 | 2019-10-16T07:56:31.000Z | 2019-10-16T07:56:31.000Z | app/v2/resources/users.py | fabzer0/FastFoodAPI | 492f0bdaaeadf12089a200a9b64bdfc22cd03d0c | [
"MIT"
] | null | null | null | app/v2/resources/users.py | fabzer0/FastFoodAPI | 492f0bdaaeadf12089a200a9b64bdfc22cd03d0c | [
"MIT"
] | null | null | null | from flask import Blueprint, jsonify, make_response
from flask_restful import Resource, Api, reqparse, inputs
from ..models.decorators import admin_required
from ..models.models import UserModel
import os
users_api = Blueprint('resources.users', __name__)
api = Api(users_api)
api.add_resource(SignUp, '/auth/signup', endpoint='signup')
api.add_resource(AllUsers, '/users')
api.add_resource(PromoteUser, '/users/<int:user_id>')
api.add_resource(Login, '/auth/login', endpoint='login')
| 38.992806 | 132 | 0.587823 |
3bd6298a19903f15f7e907c194b8869777800558 | 21,160 | py | Python | model_blocks/tests.py | aptivate/django-model-blocks | 5057ed57887683d777f04c95d67d268d21a18c02 | [
"BSD-3-Clause"
] | 6 | 2015-01-20T08:43:44.000Z | 2020-08-13T01:57:10.000Z | model_blocks/tests.py | techdragon/django-model-blocks | 8175d7353d792cb720b4ac356f4538888bf7747c | [
"BSD-3-Clause"
] | 1 | 2016-10-16T17:35:07.000Z | 2016-10-16T17:35:07.000Z | model_blocks/tests.py | techdragon/django-model-blocks | 8175d7353d792cb720b4ac356f4538888bf7747c | [
"BSD-3-Clause"
] | null | null | null | """
Test the model blocks
"""
import datetime
from django.test import TestCase
from mock import Mock
from django.db.models import Model, IntegerField, DateTimeField, CharField
from django.template import Context, Template, TemplateSyntaxError
from example_project.pepulator_factory.models import Pepulator, Distributor
from model_blocks.templatetags import model_filters
from model_blocks.templatetags import model_nodes
| 44.453782 | 304 | 0.578544 |
3bd65ecba92bc72c9d3f44c46609ce82742a80af | 1,794 | py | Python | lit_nlp/examples/toxicity_demo.py | ghostian/lit | 891673ef120391f4682be4478881fdb408241f82 | [
"Apache-2.0"
] | null | null | null | lit_nlp/examples/toxicity_demo.py | ghostian/lit | 891673ef120391f4682be4478881fdb408241f82 | [
"Apache-2.0"
] | null | null | null | lit_nlp/examples/toxicity_demo.py | ghostian/lit | 891673ef120391f4682be4478881fdb408241f82 | [
"Apache-2.0"
] | null | null | null | # Lint as: python3
r"""LIT Demo for a Toxicity model.
To run locally:
python -m lit_nlp.examples.toxicity_demo --port=5432
Once you see the ASCII-art LIT logo, navigate to localhost:5432 to access the
demo UI.
"""
from absl import app
from absl import flags
from absl import logging
from lit_nlp import dev_server
from lit_nlp import server_flags
from lit_nlp.examples.datasets import classification
from lit_nlp.examples.models import glue_models
TOXICITY_MODEL_PATH = "https://storage.googleapis.com/what-if-tool-resources/lit-models/toxicity.tar.gz" # pylint: disable=line-too-long
import transformers
TOXICITY_MODEL_PATH = transformers.file_utils.cached_path(TOXICITY_MODEL_PATH,
extract_compressed_file=True)
# NOTE: additional flags defined in server_flags.py
FLAGS = flags.FLAGS
FLAGS.set_default("development_demo", True)
flags.DEFINE_string("model_path", TOXICITY_MODEL_PATH,
"Path to save trained model.")
flags.DEFINE_integer(
"max_examples", 1000, "Maximum number of examples to load into LIT. ")
if __name__ == "__main__":
app.run(main)
| 30.931034 | 137 | 0.755295 |
3bd7deac97fd990b363d6a3492c5b97386f321f3 | 2,669 | py | Python | portifolio_analysis.py | lucasHashi/app-calculate-stock-portifolio-division | 1e2d852215db29f9768bf509d2f52bdec5988ad4 | [
"MIT"
] | null | null | null | portifolio_analysis.py | lucasHashi/app-calculate-stock-portifolio-division | 1e2d852215db29f9768bf509d2f52bdec5988ad4 | [
"MIT"
] | null | null | null | portifolio_analysis.py | lucasHashi/app-calculate-stock-portifolio-division | 1e2d852215db29f9768bf509d2f52bdec5988ad4 | [
"MIT"
] | null | null | null | import pandas as pd
import streamlit as st
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
pio.templates.default = "plotly_white"
| 32.54878 | 160 | 0.671787 |
3bd99b382f3bb66011f0a2e220815e6d5fe49246 | 864 | py | Python | clicrud/clicrud/helpers.py | DavidJohnGee/clicrud | f1f178ac44649efe7b7681d37e97d2632b8971b2 | [
"Apache-2.0"
] | 9 | 2015-12-07T23:00:24.000Z | 2021-06-23T21:31:47.000Z | clicrud/clicrud/helpers.py | DavidJohnGee/clicrud | f1f178ac44649efe7b7681d37e97d2632b8971b2 | [
"Apache-2.0"
] | 8 | 2016-04-05T12:36:54.000Z | 2017-05-15T16:00:08.000Z | clicrud/clicrud/helpers.py | DavidJohnGee/clicrud | f1f178ac44649efe7b7681d37e97d2632b8971b2 | [
"Apache-2.0"
] | 7 | 2016-06-02T23:39:05.000Z | 2021-03-25T20:52:46.000Z | """
Copyright 2015 Brocade Communications Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
| 24.685714 | 72 | 0.689815 |