hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
82e49f1ad3ed1e40ba856944b5ae80363654a869 | 133 | py | Python | ska_skeleton/__init__.py | Vinod-Sathe-Company-Limited/ska-skeleton | e93d131fc4d33d5b2f0cd715553fd5907955eccd | [
"BSD-3-Clause"
] | null | null | null | ska_skeleton/__init__.py | Vinod-Sathe-Company-Limited/ska-skeleton | e93d131fc4d33d5b2f0cd715553fd5907955eccd | [
"BSD-3-Clause"
] | null | null | null | ska_skeleton/__init__.py | Vinod-Sathe-Company-Limited/ska-skeleton | e93d131fc4d33d5b2f0cd715553fd5907955eccd | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Module init code."""
__version__ = '0.0.0'
__author__ = 'Your Name'
__email__ = 'your.email@mail.com'
| 13.3 | 33 | 0.609023 |
82e67a1fd499cdf5d94a3a3ff757c622620968ef | 2,669 | py | Python | src/users.py | dtekcth/tvmannen | 47d9441ee4000dc3600ae1a28580ba95a5b46a2a | [
"MIT"
] | null | null | null | src/users.py | dtekcth/tvmannen | 47d9441ee4000dc3600ae1a28580ba95a5b46a2a | [
"MIT"
] | null | null | null | src/users.py | dtekcth/tvmannen | 47d9441ee4000dc3600ae1a28580ba95a5b46a2a | [
"MIT"
] | 1 | 2019-12-25T21:49:16.000Z | 2019-12-25T21:49:16.000Z | # Blueprint for user management in /admin/users and /admin/users/delete
from tv import login_manager, db
from flask_login import LoginManager, current_user, login_user, logout_user, login_required
from flask import Blueprint, flash, redirect, render_template, request
from data import User
from forms import RegistrationForm, ModifyUserForm
users_page = Blueprint("users", __name__)
# Page for listing, creating and deleting users
# Deletes an user on request for admin accounts
# Takes user_id "id" as argument
# User modification page, takes user id "id" as an argument
| 32.156627 | 91 | 0.683402 |
7d5335d6ee6e5dd4d8013184f474bc8d3185581f | 337 | py | Python | mxfield/models.py | krescruz/django-mxfield | 98855412d4414e239a74370380aed5d28b52eeb1 | [
"MIT"
] | null | null | null | mxfield/models.py | krescruz/django-mxfield | 98855412d4414e239a74370380aed5d28b52eeb1 | [
"MIT"
] | null | null | null | mxfield/models.py | krescruz/django-mxfield | 98855412d4414e239a74370380aed5d28b52eeb1 | [
"MIT"
] | null | null | null | from django.db.models import CharField
from django.utils.translation import ugettext_lazy as _
import validators
| 25.923077 | 55 | 0.759644 |
7d53f22522d63caa5e1b6eeef4ed280bfe59205b | 5,646 | py | Python | tests/unit/test_crypt.py | oba11/salt | ddc0286d57c5ce864b60bf43e5bc3007bf7c2549 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_crypt.py | oba11/salt | ddc0286d57c5ce864b60bf43e5bc3007bf7c2549 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_crypt.py | oba11/salt | ddc0286d57c5ce864b60bf43e5bc3007bf7c2549 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
# python libs
from __future__ import absolute_import
import os
# salt testing libs
from tests.support.unit import TestCase, skipIf
from tests.support.mock import patch, call, mock_open, NO_MOCK, NO_MOCK_REASON, MagicMock
# salt libs
import salt.utils
import salt.utils.files
from salt import crypt
# third-party libs
try:
from Cryptodome.PublicKey import RSA # pylint: disable=unused-import
HAS_PYCRYPTO_RSA = True
except ImportError:
HAS_PYCRYPTO_RSA = False
if not HAS_PYCRYPTO_RSA:
try:
from Crypto.PublicKey import RSA
HAS_PYCRYPTO_RSA = True
except ImportError:
HAS_PYCRYPTO_RSA = False
PRIVKEY_DATA = (
'-----BEGIN RSA PRIVATE KEY-----\n'
'MIIEpAIBAAKCAQEA75GR6ZTv5JOv90Vq8tKhKC7YQnhDIo2hM0HVziTEk5R4UQBW\n'
'a0CKytFMbTONY2msEDwX9iA0x7F5Lgj0X8eD4ZMsYqLzqjWMekLC8bjhxc+EuPo9\n'
'Dygu3mJ2VgRC7XhlFpmdo5NN8J2E7B/CNB3R4hOcMMZNZdi0xLtFoTfwU61UPfFX\n'
'14mV2laqLbvDEfQLJhUTDeFFV8EN5Z4H1ttLP3sMXJvc3EvM0JiDVj4l1TWFUHHz\n'
'eFgCA1Im0lv8i7PFrgW7nyMfK9uDSsUmIp7k6ai4tVzwkTmV5PsriP1ju88Lo3MB\n'
'4/sUmDv/JmlZ9YyzTO3Po8Uz3Aeq9HJWyBWHAQIDAQABAoIBAGOzBzBYZUWRGOgl\n'
'IY8QjTT12dY/ymC05GM6gMobjxuD7FZ5d32HDLu/QrknfS3kKlFPUQGDAbQhbbb0\n'
'zw6VL5NO9mfOPO2W/3FaG1sRgBQcerWonoSSSn8OJwVBHMFLG3a+U1Zh1UvPoiPK\n'
'S734swIM+zFpNYivGPvOm/muF/waFf8tF/47t1cwt/JGXYQnkG/P7z0vp47Irpsb\n'
'Yjw7vPe4BnbY6SppSxscW3KoV7GtJLFKIxAXbxsuJMF/rYe3O3w2VKJ1Sug1VDJl\n'
'/GytwAkSUer84WwP2b07Wn4c5pCnmLslMgXCLkENgi1NnJMhYVOnckxGDZk54hqP\n'
'9RbLnkkCgYEA/yKuWEvgdzYRYkqpzB0l9ka7Y00CV4Dha9Of6GjQi9i4VCJ/UFVr\n'
'UlhTo5y0ZzpcDAPcoZf5CFZsD90a/BpQ3YTtdln2MMCL/Kr3QFmetkmDrt+3wYnX\n'
'sKESfsa2nZdOATRpl1antpwyD4RzsAeOPwBiACj4fkq5iZJBSI0bxrMCgYEA8GFi\n'
'qAjgKh81/Uai6KWTOW2kX02LEMVRrnZLQ9VPPLGid4KZDDk1/dEfxjjkcyOxX1Ux\n'
'Klu4W8ZEdZyzPcJrfk7PdopfGOfrhWzkREK9C40H7ou/1jUecq/STPfSOmxh3Y+D\n'
'ifMNO6z4sQAHx8VaHaxVsJ7SGR/spr0pkZL+NXsCgYEA84rIgBKWB1W+TGRXJzdf\n'
'yHIGaCjXpm2pQMN3LmP3RrcuZWm0vBt94dHcrR5l+u/zc6iwEDTAjJvqdU4rdyEr\n'
'tfkwr7v6TNlQB3WvpWanIPyVzfVSNFX/ZWSsAgZvxYjr9ixw6vzWBXOeOb/Gqu7b\n'
'cvpLkjmJ0wxDhbXtyXKhZA8CgYBZyvcQb+hUs732M4mtQBSD0kohc5TsGdlOQ1AQ\n'
'McFcmbpnzDghkclyW8jzwdLMk9uxEeDAwuxWE/UEvhlSi6qdzxC+Zifp5NBc0fVe\n'
'7lMx2mfJGxj5CnSqQLVdHQHB4zSXkAGB6XHbBd0MOUeuvzDPfs2voVQ4IG3FR0oc\n'
'3/znuwKBgQChZGH3McQcxmLA28aUwOVbWssfXKdDCsiJO+PEXXlL0maO3SbnFn+Q\n'
'Tyf8oHI5cdP7AbwDSx9bUfRPjg9dKKmATBFr2bn216pjGxK0OjYOCntFTVr0psRB\n'
'CrKg52Qrq71/2l4V2NLQZU40Dr1bN9V+Ftd9L0pvpCAEAWpIbLXGDw==\n'
'-----END RSA PRIVATE KEY-----')
PUBKEY_DATA = (
'-----BEGIN PUBLIC KEY-----\n'
'MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA75GR6ZTv5JOv90Vq8tKh\n'
'KC7YQnhDIo2hM0HVziTEk5R4UQBWa0CKytFMbTONY2msEDwX9iA0x7F5Lgj0X8eD\n'
'4ZMsYqLzqjWMekLC8bjhxc+EuPo9Dygu3mJ2VgRC7XhlFpmdo5NN8J2E7B/CNB3R\n'
'4hOcMMZNZdi0xLtFoTfwU61UPfFX14mV2laqLbvDEfQLJhUTDeFFV8EN5Z4H1ttL\n'
'P3sMXJvc3EvM0JiDVj4l1TWFUHHzeFgCA1Im0lv8i7PFrgW7nyMfK9uDSsUmIp7k\n'
'6ai4tVzwkTmV5PsriP1ju88Lo3MB4/sUmDv/JmlZ9YyzTO3Po8Uz3Aeq9HJWyBWH\n'
'AQIDAQAB\n'
'-----END PUBLIC KEY-----')
MSG = b'It\'s me, Mario'
SIG = (
b'\x07\xf3\xb1\xe7\xdb\x06\xf4_\xe2\xdc\xcb!F\xfb\xbex{W\x1d\xe4E'
b'\xd3\r\xc5\x90\xca(\x05\x1d\x99\x8b\x1aug\x9f\x95>\x94\x7f\xe3+'
b'\x12\xfa\x9c\xd4\xb8\x02]\x0e\xa5\xa3LL\xc3\xa2\x8f+\x83Z\x1b\x17'
b'\xbfT\xd3\xc7\xfd\x0b\xf4\xd7J\xfe^\x86q"I\xa3x\xbc\xd3$\xe9M<\xe1'
b'\x07\xad\xf2_\x9f\xfa\xf7g(~\xd8\xf5\xe7\xda-\xa3Ko\xfc.\x99\xcf'
b'\x9b\xb9\xc1U\x97\x82\'\xcb\xc6\x08\xaa\xa0\xe4\xd0\xc1+\xfc\x86'
b'\r\xe4y\xb1#\xd3\x1dS\x96D28\xc4\xd5\r\xd4\x98\x1a44"\xd7\xc2\xb4'
b']\xa7\x0f\xa7Db\x85G\x8c\xd6\x94!\x8af1O\xf6g\xd7\x03\xfd\xb3\xbc'
b'\xce\x9f\xe7\x015\xb8\x1d]AHK\xa0\x14m\xda=O\xa7\xde\xf2\xff\x9b'
b'\x8e\x83\xc8j\x11\x1a\x98\x85\xde\xc5\x91\x07\x84!\x12^4\xcb\xa8'
b'\x98\x8a\x8a&#\xb9(#?\x80\x15\x9eW\xb5\x12\xd1\x95S\xf2<G\xeb\xf1'
b'\x14H\xb2\xc4>\xc3A\xed\x86x~\xcfU\xd5Q\xfe~\x10\xd2\x9b')
| 49.526316 | 108 | 0.732554 |
7d54215d7a89cdc6dee240942d655951555aa1e4 | 628 | py | Python | gubbins/tests/utils_tests.py | doismellburning/django-gubbins | d94e91082adfe2ae7462209a5793b479429d40d9 | [
"BSD-2-Clause"
] | null | null | null | gubbins/tests/utils_tests.py | doismellburning/django-gubbins | d94e91082adfe2ae7462209a5793b479429d40d9 | [
"BSD-2-Clause"
] | 4 | 2018-12-20T13:02:40.000Z | 2018-12-21T16:09:20.000Z | gubbins/tests/utils_tests.py | doismellburning/django-gubbins | d94e91082adfe2ae7462209a5793b479429d40d9 | [
"BSD-2-Clause"
] | 2 | 2015-01-05T10:13:42.000Z | 2020-05-29T08:17:58.000Z | import unittest
from gubbins.utils import append_params
| 31.4 | 65 | 0.603503 |
7d553204536b771ce8440161d9597d5690c1a810 | 2,804 | py | Python | tests/components/test_power_output.py | Shivam60/j5 | 18069737644c8f1c95944386773c7643d5df5aeb | [
"MIT"
] | null | null | null | tests/components/test_power_output.py | Shivam60/j5 | 18069737644c8f1c95944386773c7643d5df5aeb | [
"MIT"
] | null | null | null | tests/components/test_power_output.py | Shivam60/j5 | 18069737644c8f1c95944386773c7643d5df5aeb | [
"MIT"
] | null | null | null | """Tests for the power output classes."""
from typing import List, Optional, Type
from j5.backends import Backend
from j5.boards import Board
from j5.components.power_output import PowerOutput, PowerOutputInterface
def test_power_output_interface_implementation():
"""Test that we can implement the PowerOutputInterface."""
MockPowerOutputDriver()
def test_power_output_instantiation():
"""Test that we can instantiate a PowerOutput."""
PowerOutput(0, MockPowerOutputBoard(), MockPowerOutputDriver())
def test_power_output_interface():
"""Test that the class returns the correct interface."""
assert PowerOutput.interface_class() is PowerOutputInterface
def test_power_output_enabled():
"""Test the is_enabled property of a PowerOutput."""
power_output = PowerOutput(0, MockPowerOutputBoard(), MockPowerOutputDriver())
assert power_output.is_enabled is False
power_output.is_enabled = True
assert power_output.is_enabled is True
def test_power_output_current():
"""Test the current property of a PowerOutput."""
power_output = PowerOutput(0, MockPowerOutputBoard(), MockPowerOutputDriver())
assert type(power_output.current) is float
assert power_output.current == 8.1
| 30.813187 | 82 | 0.690442 |
7d55cd544a02e7f8eda686f396f1e614dce7adb0 | 11,660 | py | Python | msg/tools/genmsg/test/test_genmsg_msgs.py | sikuner/Firmware_Marine | 80411dc4eb5aa9dc8eb3ca8ff6d59d1cf081a010 | [
"BSD-3-Clause"
] | 17 | 2020-03-13T00:10:28.000Z | 2021-09-06T17:13:17.000Z | msg/tools/genmsg/test/test_genmsg_msgs.py | sikuner/Firmware_Marine | 80411dc4eb5aa9dc8eb3ca8ff6d59d1cf081a010 | [
"BSD-3-Clause"
] | 1 | 2020-08-24T03:28:49.000Z | 2020-08-24T03:28:49.000Z | msg/tools/genmsg/test/test_genmsg_msgs.py | sikuner/Firmware_Marine | 80411dc4eb5aa9dc8eb3ca8ff6d59d1cf081a010 | [
"BSD-3-Clause"
] | 2 | 2020-03-13T09:05:32.000Z | 2021-08-13T08:28:14.000Z | # Software License Agreement (BSD License)
#
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import random
PKG = 'genmsg'
| 38.996656 | 180 | 0.620583 |
7d565d78426b6ee97241efc8582c656e0fcdebc5 | 4,118 | py | Python | custom_components/waste_collection_schedule/waste_collection_schedule/wizard/stadtreinigung_hamburg.py | UBS-P/hacs_waste_collection_schedule | 9ce0fd55010bbab3948f1ee0aa5edb4b65b7d866 | [
"MIT"
] | 142 | 2020-04-13T18:56:12.000Z | 2022-03-30T19:44:08.000Z | custom_components/waste_collection_schedule/waste_collection_schedule/wizard/stadtreinigung_hamburg.py | UBS-P/hacs_waste_collection_schedule | 9ce0fd55010bbab3948f1ee0aa5edb4b65b7d866 | [
"MIT"
] | 138 | 2020-04-30T18:11:30.000Z | 2022-03-30T20:56:33.000Z | custom_components/waste_collection_schedule/waste_collection_schedule/wizard/stadtreinigung_hamburg.py | UBS-P/hacs_waste_collection_schedule | 9ce0fd55010bbab3948f1ee0aa5edb4b65b7d866 | [
"MIT"
] | 89 | 2020-06-16T05:13:08.000Z | 2022-03-28T09:28:25.000Z | #!/usr/bin/env python3
from html.parser import HTMLParser
import inquirer
import requests
# Parser for HTML input
# Parser for HTML option list
def main():
# search for street
questions = [
inquirer.Text("strasse", message="Enter search string for street"),
# inquirer.Text("hausnummer", message="Enter search string for house number"),
]
answers = inquirer.prompt(questions)
answers["hausnummer"] = ""
answers["bestaetigung"] = "true"
answers["mode"] = "search"
r = requests.post(
"https://www.stadtreinigung.hamburg/privatkunden/abfuhrkalender/index.html",
data=answers,
)
# search for street
input_parser = InputParser(input_name="asId")
input_parser.feed(r.text)
if input_parser.value is not None:
answers["asId"] = input_parser.value
else:
# query returned a list of streets
parser = OptionParser(select_name="asId")
parser.feed(r.text)
questions = [
inquirer.List("asId", choices=parser.choices, message="Select street")
]
answers.update(inquirer.prompt(questions))
# search for building number
r = requests.post(
"https://www.stadtreinigung.hamburg/privatkunden/abfuhrkalender/index.html",
data=answers,
)
# parser HTML option list
parser = OptionParser(select_name="hnId")
parser.feed(r.text)
if len(parser.choices) == 0:
answers["hnId"] = ""
else:
questions = [
inquirer.List("hnId", choices=parser.choices, message="Select house number")
]
answers.update(inquirer.prompt(questions))
print("Copy the following statements into your configuration.yaml:\n")
print("# waste_collection_schedule source configuration")
print("waste_collection_schedule:")
print(" sources:")
print(" - name: stadtreinigung_hamburg")
print(" args:")
print(f" asId: {answers['asId']}")
print(f" hnId: {answers['hnId']}")
if __name__ == "__main__":
main()
| 29.205674 | 93 | 0.573579 |
7d56702dbd9fe5b8f3529654e0855fa2b7b8f074 | 1,480 | py | Python | pythonstudy/convert.py | flyonskycn/pythonstudy | c2eabe40ed369046c80ba9882b2212feb34cdad6 | [
"Apache-2.0"
] | null | null | null | pythonstudy/convert.py | flyonskycn/pythonstudy | c2eabe40ed369046c80ba9882b2212feb34cdad6 | [
"Apache-2.0"
] | null | null | null | pythonstudy/convert.py | flyonskycn/pythonstudy | c2eabe40ed369046c80ba9882b2212feb34cdad6 | [
"Apache-2.0"
] | null | null | null | import chardet
import sys
import codecs
import os
def getAllFile(path, suffix='.'):
"recursive is enable"
f = os.walk(path)
fpath = []
for root, dir, fname in f:
for name in fname:
if name.endswith(suffix):
fpath.append(os.path.join(root, name))
return fpath
if __name__ == "__main__":
path = 'E:\\logs'
if len(sys.argv) == 1:
path = os.getcwd()
elif len(sys.argv) == 2:
path = sys.argv[1]
else:
print("error parameter")
exit()
convertAll(path) | 23.492063 | 84 | 0.531081 |
7d56e588d7a6fdb0c64b6925b9b5823ebec11f36 | 4,547 | py | Python | tests/tests.py | arck1/aio-counter | ffff58bf14ca2f155be5a54c9385481fce5ee58c | [
"MIT"
] | null | null | null | tests/tests.py | arck1/aio-counter | ffff58bf14ca2f155be5a54c9385481fce5ee58c | [
"MIT"
] | null | null | null | tests/tests.py | arck1/aio-counter | ffff58bf14ca2f155be5a54c9385481fce5ee58c | [
"MIT"
] | null | null | null | import unittest
from asyncio import sleep
from async_unittest import TestCase
from aio_counter import AioCounter
from aio_counter.exceptions import AioCounterException
def test_dec_nowait(self):
assert self.counter.empty()
try:
self.counter.dec_nowait()
except AioCounterException as e:
assert e
else:
assert False
count = self.counter.inc_nowait()
assert count == 1
assert self.counter.count == 1
count = self.counter.dec_nowait()
assert count == 0
assert self.counter.count == 0
def test_inc_nowait(self):
assert self.counter.empty()
count = self.counter.inc_nowait()
assert count == 1
assert self.counter.count == 1
# fill counter
self.counter._count = self.counter.max_count
try:
self.counter.inc_nowait()
except AioCounterException as e:
assert e
else:
assert False
if __name__ == '__main__':
unittest.main()
| 25.544944 | 96 | 0.61667 |
7d57683f060246ecdbe9fa25924715de937635d2 | 67 | py | Python | dexp/processing/remove_beads/__init__.py | haesleinhuepf/dexp | 2ea84f3db323724588fac565fae56f0d522bc5ca | [
"BSD-3-Clause"
] | 16 | 2021-04-21T14:09:19.000Z | 2022-03-22T02:30:59.000Z | dexp/processing/remove_beads/__init__.py | haesleinhuepf/dexp | 2ea84f3db323724588fac565fae56f0d522bc5ca | [
"BSD-3-Clause"
] | 28 | 2021-04-15T17:43:08.000Z | 2022-03-29T16:08:35.000Z | dexp/processing/remove_beads/__init__.py | haesleinhuepf/dexp | 2ea84f3db323724588fac565fae56f0d522bc5ca | [
"BSD-3-Clause"
] | 3 | 2022-02-08T17:41:30.000Z | 2022-03-18T15:32:27.000Z | from dexp.processing.remove_beads.beadsremover import BeadsRemover
| 33.5 | 66 | 0.895522 |
7d57cb53958a854e64b6d878a9826f34dbca7a63 | 96 | py | Python | venv/lib/python3.8/site-packages/pip/_internal/operations/install/editable_legacy.py | Retraces/UkraineBot | 3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71 | [
"MIT"
] | 2 | 2022-03-13T01:58:52.000Z | 2022-03-31T06:07:54.000Z | venv/lib/python3.8/site-packages/pip/_internal/operations/install/editable_legacy.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | 19 | 2021-11-20T04:09:18.000Z | 2022-03-23T15:05:55.000Z | venv/lib/python3.8/site-packages/pip/_internal/operations/install/editable_legacy.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | null | null | null | /home/runner/.cache/pip/pool/6e/30/4e/6df13ab33dd498623bcb8f860a029ad969938275a514553b6fe8b4b10b | 96 | 96 | 0.895833 |
7d58040a8760df0e7d462d968892a9628d5e39f3 | 8,960 | py | Python | corrector_module/opmon_corrector/corrector_worker.py | nordic-institute/X-Road-Metrics | 249d859466bf6065257cf8b3c27d0e9db4ab2378 | [
"MIT"
] | 2 | 2021-06-30T11:12:31.000Z | 2021-09-24T08:50:03.000Z | corrector_module/opmon_corrector/corrector_worker.py | nordic-institute/X-Road-Metrics | 249d859466bf6065257cf8b3c27d0e9db4ab2378 | [
"MIT"
] | null | null | null | corrector_module/opmon_corrector/corrector_worker.py | nordic-institute/X-Road-Metrics | 249d859466bf6065257cf8b3c27d0e9db4ab2378 | [
"MIT"
] | 2 | 2021-07-02T12:31:37.000Z | 2021-11-09T08:44:09.000Z |
# The MIT License
# Copyright (c) 2021- Nordic Institute for Interoperability Solutions (NIIS)
# Copyright (c) 2017-2020 Estonian Information System Authority (RIA)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import queue
from . import database_manager
| 48.172043 | 136 | 0.60904 |
7d5889cacaec1535d87725d19f570fd238dc7beb | 724 | py | Python | autosa_tests/large/mm_int16/unroll.py | mfkiwl/AutoSA-SystolicArray | 699742eefda66bd3fd6cac608f7c96f5bf60a2a0 | [
"MIT"
] | 102 | 2020-05-18T04:52:26.000Z | 2022-03-29T06:53:10.000Z | autosa_tests/large/mm_int16/unroll.py | mfkiwl/AutoSA-SystolicArray | 699742eefda66bd3fd6cac608f7c96f5bf60a2a0 | [
"MIT"
] | 14 | 2020-06-04T11:12:33.000Z | 2022-03-14T20:55:00.000Z | autosa_tests/large/mm_int16/unroll.py | mfkiwl/AutoSA-SystolicArray | 699742eefda66bd3fd6cac608f7c96f5bf60a2a0 | [
"MIT"
] | 26 | 2020-05-20T02:47:04.000Z | 2022-03-16T15:09:41.000Z | import math
# Modify the parameters here
UNROLL_FACTOR = 32
DATA_T = 'unsigned short'
# Generate the code
data_type = DATA_T
level = int(math.log2(UNROLL_FACTOR))
for layer in range(level - 1, -1, -1):
pair = int(math.pow(2, layer))
for i in range(pair):
# data_t tmp_[layer]_[pair] = tmp_[layer+1]_[pair*2]_[pair*2+1]
if layer == level - 1:
print(f'{data_type} mul_{layer}_{i}_0 = local_A[0][{i*2}] * local_B[0][{i*2}];')
print(f'{data_type} add_{layer}_{i} = mul_{layer}_{i}_0 + local_A[0][{i*2+1}] * local_B[0][{i*2+1}];')
else:
print(f'{data_type} add_{layer}_{i} = add_{layer+1}_{i*2} + add_{layer+1}_{i*2+1};')
print('local_C[c7][c6] += add_0_0;')
| 36.2 | 114 | 0.592541 |
7d589dd1f59c435f5b8daa7514686b5a0b85423d | 4,451 | py | Python | battlecode-manager/player_plain.py | gruzzlymug/ddg-2018 | 76f598f7548ad51b126ec9efb7da0fd0d4a306c2 | [
"MIT"
] | 1 | 2018-02-11T03:32:22.000Z | 2018-02-11T03:32:22.000Z | battlecode-manager/player_plain.py | gruzzlymug/ddg-2018 | 76f598f7548ad51b126ec9efb7da0fd0d4a306c2 | [
"MIT"
] | null | null | null | battlecode-manager/player_plain.py | gruzzlymug/ddg-2018 | 76f598f7548ad51b126ec9efb7da0fd0d4a306c2 | [
"MIT"
] | null | null | null | import os
import psutil
import subprocess
import threading
import sys
from threading import Timer
import select
from player_abstract import AbstractPlayer
def reap(process, timeout=3):
"Tries hard to terminate and ultimately kill all the children of this process."
try:
procs = process.children(recursive=True)
# send SIGTERM
for p in procs:
p.terminate()
gone, alive = psutil.wait_procs(procs, timeout=timeout, callback=on_terminate)
if alive:
# send SIGKILL
for p in alive:
p.kill()
gone, alive = psutil.wait_procs(alive, timeout=timeout, callback=on_terminate)
if alive:
# give up
for p in alive:
print("process {} survived SIGKILL; giving up" % p.pid)
process.kill()
except:
print("Killing failed; assuming process exited early.")
| 32.253623 | 119 | 0.599191 |
7d58f75c60cd92e49b8842d06b9c5d9c9a1f2ca8 | 91 | py | Python | skfda/exploratory/__init__.py | jiduque/scikit-fda | 5ea71e78854801b259aa3a01eb6b154aa63bf54b | [
"BSD-3-Clause"
] | 147 | 2019-05-10T20:46:42.000Z | 2022-03-25T17:23:19.000Z | skfda/exploratory/__init__.py | jiduque/scikit-fda | 5ea71e78854801b259aa3a01eb6b154aa63bf54b | [
"BSD-3-Clause"
] | 306 | 2019-04-26T08:56:05.000Z | 2022-03-30T11:12:48.000Z | skfda/exploratory/__init__.py | jiduque/scikit-fda | 5ea71e78854801b259aa3a01eb6b154aa63bf54b | [
"BSD-3-Clause"
] | 38 | 2019-09-03T17:24:04.000Z | 2022-01-06T05:09:18.000Z | from . import depth
from . import outliers
from . import stats
from . import visualization
| 18.2 | 27 | 0.78022 |
7d5919e7ea877027b781af2973db1c3cf8b3e549 | 4,726 | py | Python | jassen/django/project/blog/views.py | cabilangan112/intern-drf-blog | b2d6c7a4af1316b2c7ce38547bd9df99b4f3e8b9 | [
"MIT"
] | null | null | null | jassen/django/project/blog/views.py | cabilangan112/intern-drf-blog | b2d6c7a4af1316b2c7ce38547bd9df99b4f3e8b9 | [
"MIT"
] | null | null | null | jassen/django/project/blog/views.py | cabilangan112/intern-drf-blog | b2d6c7a4af1316b2c7ce38547bd9df99b4f3e8b9 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.contrib.auth.models import User, Group
from rest_framework import viewsets
from django.shortcuts import get_object_or_404
from rest_framework.response import Response
from rest_framework import viewsets, status
from .models import Post,Comment,Category,Tag
from .serializers import PostSerializer,CommentSerializer,CategorySerializer,TagSerializer
| 34.75 | 90 | 0.658697 |
7d5a512e475a15e2cba00eeed5fa7df50d174682 | 15,479 | py | Python | loopchain/rest_server/rest_server_rs.py | ahastudio/loopchain | 88b76956c069fedc1a0a2d239f47c3866493ad0f | [
"Apache-2.0"
] | null | null | null | loopchain/rest_server/rest_server_rs.py | ahastudio/loopchain | 88b76956c069fedc1a0a2d239f47c3866493ad0f | [
"Apache-2.0"
] | null | null | null | loopchain/rest_server/rest_server_rs.py | ahastudio/loopchain | 88b76956c069fedc1a0a2d239f47c3866493ad0f | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module for restful API server of Radio station"""
import _ssl
import base64
import json
import logging
import pickle
import ssl
from concurrent import futures
from typing import List
import grpc
from sanic import Sanic, response
from sanic.views import HTTPMethodView
from loopchain import configure as conf, utils
from loopchain.baseservice import PeerManager, PeerStatus
from loopchain.baseservice import StubManager
from loopchain.baseservice.ca_service import CAService
from loopchain.components import SingletonMetaClass
from loopchain.protos import loopchain_pb2, loopchain_pb2_grpc, message_code
from loopchain.utils import loggers
class Peer(HTTPMethodView):
__REQUEST_TYPE = {
'PEER_LIST': 'list',
'LEADER_PEER': 'leader',
'PEER_STATUS': 'status',
'PEER_STATUS_LIST': 'status-list'
}
class Configuration(HTTPMethodView):
| 38.600998 | 122 | 0.640481 |
7d5ba93142fb8ff5765303ca6b3001d2cd9dccdf | 10,178 | py | Python | ceilometer/tests/storage/test_impl_sqlalchemy.py | aristanetworks/ceilometer | 8776b137f82f71eef1241bcb1600de10c1f77394 | [
"Apache-2.0"
] | null | null | null | ceilometer/tests/storage/test_impl_sqlalchemy.py | aristanetworks/ceilometer | 8776b137f82f71eef1241bcb1600de10c1f77394 | [
"Apache-2.0"
] | null | null | null | ceilometer/tests/storage/test_impl_sqlalchemy.py | aristanetworks/ceilometer | 8776b137f82f71eef1241bcb1600de10c1f77394 | [
"Apache-2.0"
] | null | null | null | #
# Author: John Tran <jhtran@att.com>
# Julien Danjou <julien@danjou.info>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for ceilometer/storage/impl_sqlalchemy.py
.. note::
In order to run the tests against real SQL server set the environment
variable CEILOMETER_TEST_SQL_URL to point to a SQL server before running
the tests.
"""
import datetime
import repr
import mock
from ceilometer.alarm.storage import impl_sqlalchemy as impl_sqla_alarm
from ceilometer.openstack.common import timeutils
from ceilometer.storage import impl_sqlalchemy
from ceilometer.storage import models
from ceilometer.storage.sqlalchemy import models as sql_models
from ceilometer.tests import base as test_base
from ceilometer.tests import db as tests_db
from ceilometer.tests.storage import test_storage_scenarios as scenarios
class MyException(Exception):
pass
class CapabilitiesTest(test_base.BaseTestCase):
# Check the returned capabilities list, which is specific to each DB
# driver
| 40.070866 | 78 | 0.610434 |
7d5e7f22dbe1241e9828565a5008c4bed0402c69 | 17,906 | py | Python | ProjectManager.py | kojingharang/ManagerKit | 6efb9b2290b62e0bd3fe88eb3dc814d066f72f02 | [
"MIT"
] | null | null | null | ProjectManager.py | kojingharang/ManagerKit | 6efb9b2290b62e0bd3fe88eb3dc814d066f72f02 | [
"MIT"
] | null | null | null | ProjectManager.py | kojingharang/ManagerKit | 6efb9b2290b62e0bd3fe88eb3dc814d066f72f02 | [
"MIT"
] | null | null | null | from collections import namedtuple
import datetime
import pprint
import sys
import copy
import json
def expandStatusValue(v):
"""
v : string | (string, datetime.date | None)
string (string, None) .
"""
if isinstance(v, str):
v = (v, None)
return v
"""
title:
url:
owner:
status:
"" :
"o" :
"v" :
startDate:
"" | "yyyy-mm-dd"
endDate
blocking:
doc:
milestones:
(finishDate : datetime.date | None, title : string)[]
"""
colorDone = "#DDFADE"
colorDoing = "#E0F0FF"
def hsv2rgb(hsv):
"""
hsv: [h, s, v]
h in [0, 360]
s in [0, 1]
v in [0, 1]
return [r, g, b]
r, g, b in [0, 1]
"""
h = hsv[0]
s = hsv[1]
v = hsv[2]
hd = h/60; # in [0, 6]
r = v
g = v
b = v
if s > 0:
hdi = max(0, min(5, int(hd)));
f = hd - hdi
if hdi==0:
g *= 1 - s * (1-f)
b *= 1 - s
elif hdi==1:
r *= 1 - s * f
b *= 1 - s
elif hdi==2:
r *= 1 - s
b *= 1 - s * (1-f)
elif hdi==3:
r *= 1 - s
g *= 1 - s * f
elif hdi==4:
r *= 1 - s * (1-f)
g *= 1 - s
elif hdi==5:
g *= 1 - s
b *= 1 - s * f
return [r, g, b]
def genProjectListHtml(projects, status_master, ticketLinkFun, additional_milestones, getLabels):
"""
getLabels: index:int, project -> label[]
"""
### Generate milestone list
# milestones: (datetime.date, label)[]
milestones = sum([ p.getMilestones(status_master) for p in projects], []) + additional_milestones
milestones = sorted(milestones)
s = []
for d, l in milestones:
color = "black" if datetime.date.today() <= d else "#c0c0c0"
tentative = " ()" if datetime.date.today() <= d else ""
s.append("<li style='color:"+color+"'>"+formatDate(d)+tentative+" "+l+"</li><br>")
s = "\n".join(s)
html = """
<ul>
<li></li>
<ul>
{s}
</ul>
</ul>
<div id="filters"> (AND): </div>
""".format(**vars())
### Generate project list
projects = sorted(projects, key=sortFun)
statusTitles = "".join([ """<td style="width: 5%;">{label}</td>""".format(**vars()) for name, label in status_master])
html += """
<html><body><table class="projects">
<tr class="title">
<td style="width: 5%;"></td>
<td style="width: 5%;"></td>
<td></td>
{statusTitles}
<td style="width: 5%;"></td>
<td style="width: 10%;"></td>
<td style="width: 10%;">()</td>
</tr>
""".format(**vars())
labels = {}
for i, p in enumerate(projects):
if p.startDate:
startS = "{0:%Y-%m-%d}".format(p.startDate)
endS = "{0:%Y-%m-%d}".format(p.endDate)
schedule = "{startS}<br>{endS}".format(**vars())
if p.isDone():
schedule = ""
title = p.title
if p.url:
title = """<a href="{p.url}">{title}</a>""".format(**vars())
# status = StatusDetail(p.status)
statusTitles = "".join([ statusCell(p.status, name, label) for name, label in status_master])
trCol = "white" if i%2==0 else "#f0f0f0"
schedule_bg = "background-color: "+colorDoing+";" if p.doing() else ""
index = i+1
owner_note = ""
doc_note = ""
if p.orig_owner=="":
owner_note = "()"
doc_note = "(TODO )"
tasks = ""
if p.epic:
link = ticketLinkFun(p.epic)
style = """background-color: darkgreen; color: white; text-decoration: none; font-size: 0.8em; padding: 4px; border-radius: 10px;"""
tasks = """<a href="{link}" target="_blank" style="{style}">Tasks</a>""".format(**vars())
odd = "odd" if i%2==0 else ""
id = "project%04d" % i
labels[id] = getLabels(i, p)
html += """
<tr style="background-color: {trCol}" id="{id}">
<td>{index}</td>
<td>{p.priority}</td>
<td>
<a name="{p.codeName}"></a>
<span style="font-size: 0.8em; font-weight: bold; color: #5050c0;">
<a style="text-decoration: none;" href="#{p.codeName}">{p.codeName}</a>
</span>
{tasks}<br>
{title}
</td>
{statusTitles}
<td>{p.owner}{owner_note}</td>
<td>{p.doc}{doc_note}<span style="color: red;">{p.blocking}</span></td>
<td style="font-size: 0.5em;{schedule_bg}">{schedule}</td>
</tr>
""".format(**vars())
html += """
</table></body></html>
"""
return html, labels
#def Xsect(s0, e0, s1, e1):
# return not (e1 < s0 or e0 < s1)
def dupCheck(p, projects):
"""
True .
"""
if p.isDone():
return True
if not p.fixed():
return True
for pp in projects:
if pp.fixed() and not pp.isDone() and p.owner==pp.owner and p.title != pp.title:
if Xsect(p, pp):
print("[CONFLICT]", p.title, p.startDate, p.endDate, p.owner, "AND", pp.title, pp.startDate, pp.endDate, pp.owner)
return False
return True
def isClone(name):
"""
.
.
"""
return any([str(i) in name for i in range(10)])
def assign(projects, people):
"""
return
Dict
person -> project[]
"""
# PJ( x PJ)
# TODO startDate
# ->
freeDates = dict([(p, datetime.date.min) for p, _ in people])
# owner -> {startDate, project}[]
schedule = {}
"""
startDateFixed
canStart
blocking
"""
for phase in ["startDateFixed", "canStart", "blocking"]:
print("\nPhase", phase, "\n")
if phase=="canStart":
for k in freeDates:
freeDates[k] = max(freeDates[k], datetime.date.today())
for i, p in enumerate(sorted(projects, key=lambda v: (v.priority, v.title))):
if phase!="blocking" and p.blocking:
continue
if phase=="startDateFixed" and p.startDate is None:
continue
if p.isDone():
continue
if p.put:
continue
print("Try to put", p.title)
person = p.owner
if person=="":
person = getFreePerson(freeDates)
# print(person)
origStartDate = p.startDate
origEndDate = p.endDate
if p.blocking:
# Later
p.startDate = datetime.date.today() + datetime.timedelta(365*3+i*30)
p.endDate = p.startDate + datetime.timedelta(30)
if p.startDate is None:
p.startDate = freeDates[person]
if p.endDate is None:
p.endDate = p.startDate + datetime.timedelta(90)
if not dupCheck(p, projects):
p.startDate = origStartDate
p.endDate = origEndDate
# continue
sys.exit(0)
schedule.setdefault(person, [])
p.owner = person
print("Put", p.title, p.startDate, p.endDate, person)
schedule[person].append(p)
p.put = True
freeDates[person] = max(freeDates[person], p.endDate + datetime.timedelta(1))
#pprint.pprint(freeDates)
# pprint.pprint(schedule)
# for p in projects:
# print("[]", p.title, p.startDate, p.endDate)
for p in projects:
if not p.isDone():
for pp in projects:
if not pp.isDone() and p.title != pp.title and p.owner==pp.owner and p.title < pp.title:
if Xsect(p, pp):
print("[CONFLICT]", p.title, p.startDate, p.endDate, p.owner, "AND", pp.title, pp.startDate, pp.endDate, pp.owner)
return schedule
def genScheduleHtml(projects, schedule, people, ticketLinkFun):
"""
schedule
Dict
person -> project[]
"""
# date x
allDates = [ d for ps in schedule.values() for p in ps for d in [p.startDate, p.endDate]]
minDate = min(allDates)
maxDate = max(allDates)
colors = [ rgb2hex(hsv2rgb([i/len(projects)*360, 0.1, 1])) for i in range(len(projects)) ]
startDateIndex = minDate.toordinal()
endDateIndex = maxDate.toordinal()
N = endDateIndex - startDateIndex + 1
# print(N)
table = {0: createRow()}
#
for i in range(10000):
d = minDate + datetime.timedelta(i)
if maxDate < d:
break
if d.day in [1, 15, 30]:
table.setdefault(d.toordinal(), createRow())
wp = 95/len(people)
#
for i, (person, ps) in enumerate(sorted(schedule.items())):
if person not in [p for p, _ in people]:
continue
for p in ps:
# print(p.startDate, p.endDate)
si = p.startDate.toordinal()
ei = p.endDate.toordinal()
for d in [si, ei]:
table.setdefault(d, createRow())
if d==si:
title = p.title
if p.url:
title = """
<a href="{p.url}">{title}</a>
""".format(**vars())
title += "<br>"
doc = p.doc.replace("\n", "<br>")
title += """
<span style="font-size: 0.8em;">{doc}</span>""".format(**vars())
title += """<br><span style="color: red;">{p.blocking}</span>""".format(**vars())
table[d][i+1][0] = title
table[d][i+1][1] = "font-size: 1em;"
#
for i, (person, ps) in enumerate(sorted(schedule.items())):
for p in ps:
si = p.startDate.toordinal()
ei = p.endDate.toordinal()
for d in sorted(table.keys()):
if si <= d and d <= ei:
col = colors[p.index]
table[d][i+1][1] += "width: {wp}%; background-color: {col};".format(**vars())
#
today = datetime.date.today()
for d in table:
if d==0:
continue
da = datetime.date.fromordinal(d)
s = "{0:%Y-%m-%d}".format(da)
col = "white" if da.month % 2==0 else "#e0e0e0"
if da.year==today.year and da.month==today.month:
col = "#c0ffff"
style = "vertical-align: top; width: 5%; font-size: 3px; background-color: "+col+";"
table[d][0] = [s, style]
table = [ table[k] for k in sorted(table.keys()) ]
# pprint.pprint(table)
def createHeader():
"""
"""
row = [["", ""]]
for i, (person, ps) in enumerate(sorted(schedule.items())):
row.append([person, "width: %f; background-color: #e0e0e0".format(**vars())])
return row
for i in range(0, len(table), 10):
table.insert(i, createHeader())
return tableToHtml(table)
######################
######################
def run(projects, people, status_master, ticketLinkFun, css="", project_list_header="", schedule_header="",
statusFilename="status.html",
tasksFilename="tasks.html",
additional_milestones=[],
getLabels=lambda i, p: []):
"""
people:
(Name, NameInTicketSystem)[]
ticketLinkFun:
epic : string, assignee : string, label : string -> url : string
milestones:
(datetime.date, label)[]
"""
codeNames = {}
for p in projects:
codeNames.setdefault(p.codeName, 0)
codeNames[p.codeName] += 1
bad = False
for k, v in codeNames.items():
if 1 < v:
print("[ERROR] Duplicate code name:", k, "(", v, "projects)")
bad = True
if bad:
print()
return
for i, p in enumerate(projects):
p.index = i
names = [ name for name, _ in people ]
if p.owner and p.owner not in names:
people.append((p.owner, ""))
people = list(set(people))
schedule = assign(projects, people)
projectsHtml, labels = genProjectListHtml(projects, status_master, ticketLinkFun, additional_milestones, getLabels)
scheduleHtml = genScheduleHtml(projects, schedule, people, ticketLinkFun)
css = """
body {
margin: 0;
}
h1 {
font-size: 1.2em;
background-color: darkgreen;
color: white;
padding: 10px;
}
table {
border-spacing: 1;
margin-left: 20px;
}
table.projects tr.title td {
color: white;
padding: 5px;
}
table.projects tr.title {
background-color: darkgreen;
}
table.example tr td {
margin: 20px;
font-size: 0.9em;
}
table.schedule {
border-spacing: 0;
}
table.schedule tr td {
padding: 0;
}
#filters {
padding: 20px;
}
span.filter {
cursor: pointer;
padding: 20px;
border-radius: 40px;
margin: 10px;
}
""" + css
example = """
<table class="example"><tr>
<td style="background-color: white;"></td>
<td style="background-color: {colorDoing};"></td>
<td style="background-color: {colorDone};"></td>
</tr></table>
""".format(**globals())
projectLabels = json.dumps(labels)
labelsMaster = getLabels(0, None)
filters = json.dumps([ name for name, label in labelsMaster ])
filterLabels = json.dumps([ label for name, label in labelsMaster ])
vs = """
// Master data
var filters = {filters};
var filterLabels = {filterLabels};
var projectLabels = {projectLabels};
""".format(**vars())
ready = vs + """
// : name -> bool
var filterEnabled = {};
//
function applyFilters() {
Object.keys(projectLabels).forEach(function(eid) {
var labels = projectLabels[eid];
// console.log(eid, labels);
var show = true;
// Check all enabled filters are in labels
for(var fi=0;fi<filters.length;fi++) {
if(filterEnabled[filters[fi]]) {
var lok = 0;
for(var li=0;li<labels.length;li++) {
if(labels[li] == filters[fi]) lok=1;
}
if(!lok) show=false;
}
}
// console.log(show);
$("#"+eid).toggle(show);
});
for(var i=0;i<filters.length;i++) {
$(".filter#"+filters[i]).css({"background-color": filterEnabled[filters[i]] ? "#aaffaa" : "#eeeeee"});
}
// console.log(filterEnabled);
}
$(document).ready(function(){
//
var html = "";
for(var i=0;i<filters.length;i++) {
var name = filters[i];
html += '<span class="filter" id="'+name+'">'+filterLabels[i]+'</span>';
}
$("#filters").html($("#filters").html() + html);
//
$(".filter").on("click", function(event) {
var name = $(event.target).attr("id");
filterEnabled[name] = !filterEnabled[name];
applyFilters();
});
applyFilters();
});
"""
html = """
<html>
<head>
<meta charset="utf-8" />
<script type="text/javascript" src="jquery-3.2.1.min.js"></script>
<style>
{css}
</style>
<script>
{ready}
</script>
</head>
<body>
{project_list_header}
<br><br>
{example}
<br><br>
{projectsHtml}
<br><br>
{schedule_header}
{scheduleHtml}
<hr>
<a href="https://github.com/kojingharang/ManagerKit/blob/master/ProjectManager.py">Source</a>
</body>
</html>
""".format(**vars())
with open(statusFilename, "w") as f:
print(html, file=f)
print("[ProjectManager.run] OK. Wrote", statusFilename)
titleAndEpics = [(p.title, p.epic) for p in sorted(projects, key=lambda p: p.priority) if p.epic and not p.isDone()]
members = [ name for _, name in people if name]
createTasksHtml(titleAndEpics, members, ticketLinkFun)
| 24.629986 | 135 | 0.627667 |
7d60c0b18a3d86b57134273bbd22d9fd56431efb | 18,643 | py | Python | asteroids/whatsobservable.py | mcnowinski/various-and-sundry | ec0038d52f43435a45bf4fd1975315ad08fce560 | [
"MIT"
] | 2 | 2016-09-29T09:24:22.000Z | 2021-01-15T06:11:04.000Z | asteroids/whatsobservable.py | mcnowinski/various-and-sundry | ec0038d52f43435a45bf4fd1975315ad08fce560 | [
"MIT"
] | null | null | null | asteroids/whatsobservable.py | mcnowinski/various-and-sundry | ec0038d52f43435a45bf4fd1975315ad08fce560 | [
"MIT"
] | null | null | null | import datetime
import ephem
import os.path
import os
import numpy as np
import pdb
from pandas import DataFrame
__version__ = '0.1.2'
def pack_mpc_date(in_datetime):
"""
Convert a datetime.date or datetime.datetime object into the MPC packed date format, as described at:
http://www.minorplanetcenter.net/iau/info/PackedDates.html
Copy of the packing definition from the above web page:
Packed Dates
Dates of the form YYYYMMDD may be packed into five characters to conserve space.
The first two digits of the year are packed into a single character in column 1 (I = 18, J = 19, K = 20). Columns 2-3 contain the last two digits of the year. Column 4 contains the month and column 5 contains the day, coded as detailed below:
Month Day Character Day Character
in Col 4 or 5 in Col 4 or 5
Jan. 1 1 17 H
Feb. 2 2 18 I
Mar. 3 3 19 J
Apr. 4 4 20 K
May 5 5 21 L
June 6 6 22 M
July 7 7 23 N
Aug. 8 8 24 O
Sept. 9 9 25 P
Oct. 10 A 26 Q
Nov. 11 B 27 R
Dec. 12 C 28 S
13 D 29 T
14 E 30 U
15 F 31 V
16 G
Examples:
1996 Jan. 1 = J9611
1996 Jan. 10 = J961A
1996 Sept.30 = J969U
1996 Oct. 1 = J96A1
2001 Oct. 22 = K01AM
This system can be extended to dates with non-integral days. The decimal fraction of the day is simply appended to the five characters defined above.
Examples:
1998 Jan. 18.73 = J981I73
2001 Oct. 22.138303 = K01AM138303
"""
if in_datetime.year >= 1800 and in_datetime.year < 1900:
century = 'I'
elif in_datetime.year >= 1900 and in_datetime.year < 2000:
century = 'J'
elif in_datetime.year >= 2000 and in_datetime.year < 2100:
century = 'K'
else:
raise Error("Year is not within 1800-2099: " + in_datetime.isoformat())
year = in_datetime.strftime('%y')
translate = {}
for i in range(10):
translate[i] = str(i)
for i in range(10,32):
translate[i] = chr(ord('A') + i - 10)
month = translate[in_datetime.month]
day = translate[in_datetime.day]
try:
decimaldays = ('%7.5f' % ((in_datetime.hour + (in_datetime.minute / 60.) + (in_datetime.second / 3600.)) / 24.))[2:]
except:
decimaldays = ''
return century + year + month + day + decimaldays
def unpack_mpc_date(in_packed):
"""
Convert a MPC packed date format (as described below) to a datetime.date or datetime.datetime object
http://www.minorplanetcenter.net/iau/info/PackedDates.html
Copy of the packing definition from the above web page:
Packed Dates
Dates of the form YYYYMMDD may be packed into five characters to conserve space.
The first two digits of the year are packed into a single character in column 1 (I = 18, J = 19, K = 20). Columns 2-3 contain the last two digits of the year. Column 4 contains the month and column 5 contains the day, coded as detailed below:
Month Day Character Day Character
in Col 4 or 5 in Col 4 or 5
Jan. 1 1 17 H
Feb. 2 2 18 I
Mar. 3 3 19 J
Apr. 4 4 20 K
May 5 5 21 L
June 6 6 22 M
July 7 7 23 N
Aug. 8 8 24 O
Sept. 9 9 25 P
Oct. 10 A 26 Q
Nov. 11 B 27 R
Dec. 12 C 28 S
13 D 29 T
14 E 30 U
15 F 31 V
16 G
Examples:
1996 Jan. 1 = J9611
1996 Jan. 10 = J961A
1996 Sept.30 = J969U
1996 Oct. 1 = J96A1
2001 Oct. 22 = K01AM
This system can be extended to dates with non-integral days. The decimal fraction of the day is simply appended to the five characters defined above.
Examples:
1998 Jan. 18.73 = J981I73
2001 Oct. 22.138303 = K01AM138303
"""
translate = {}
for i in range(10):
translate[str(i)] = i
for i in range(10,32):
translate[chr(ord('A') + i - 10)] = i
if in_packed[0] == 'I':
year = 1800
elif in_packed[0] == 'J':
year = 1900
elif in_packed[0] == 'K':
year = 2000
else:
raise Error('Unrecognized century code at start of: ' + in_packed)
year += int(in_packed[1:3])
month = translate[in_packed[3]]
day = translate[in_packed[4]]
if len(in_packed) == 5:
return datetime.date(year, month, day)
else:
decimaldays = float('0.' + in_packed[5:])
hour = int(decimaldays * 24.)
minute = int((decimaldays * 24. - hour) * 60.)
second = int(round(decimaldays * 24. * 60. * 60. - (hour * 3600.) - (minute * 60.)))
return datetime.datetime(year, month, day, hour, minute, second)
#TODO: clean up the following comments and incorporate into the code
# can get all numbered asteroids (and other junk) from minor planet center in MPCORB.DAT file:
# [MPCORB.DAT](http://www.minorplanetcenter.net/iau/MPCORB/MPCORB.DAT)
# [Format is described in more detail](http://www.minorplanetcenter.org/iau/info/MPOrbitFormat.html)
# 944 Hidalgo line as of 2013-07-26 is:
#Des'n H G Epoch M Peri. Node Incl. e n a Reference #Obs #Opp Arc rms Perts Computer
#00944 10.77 0.15 K134I 215.40344 56.65077 21.56494 42.54312 0.6617811 0.07172582 5.7370114 0 MPO263352 582 21 1920-2010 0.77 M-v 38h MPCLINUX 0000 (944) Hidalgo 20100222
# But, I want in xephem format, [described here](http://www.clearskyinstitute.com/xephem/help/xephem.html#mozTocId468501)
# and minor planet provides a subset in xephem format [here](http://www.minorplanetcenter.net/iau/Ephemerides/Bright/2013/Soft03Bright.txt):
# though to ensure I was comparing same exact orbit solutions, used 944 Hidalgo from
# http://www.minorplanetcenter.net/iau/Ephemerides/Distant/Soft03Distant.txt
# From MPO263352
#944 Hidalgo,e,42.5431,21.5649,56.6508,5.737011,0.0717258,0.66178105,215.4034,04/18.0/2013,2000,H10.77,0.15
# So, for my purposes, the xephem format, separated by commas is:
# NUMBER NAME - easy enough....
# e - for ecliptic elliptical orbit
# i = inclination, degrees (directly from MPCORB.DAT)
# O = longitude of ascending node, degrees (directly from MPCORB.DAT)
# o = argument of perihelion, degrees (directly from MPCORB.DAT)
# a = mean distance (aka semi-major axis), AU (directly from MPCORB.DAT)
# n = mean daily motion, degrees per day (computed from a**3/2 if omitted) (directly from MPCORB.DAT)
# e = eccentricity, must be < 1 (directly from MPCORB.DAT)
# M = mean anomaly, i.e., degrees from perihelion (directly from MPCORB.DAT)
# E = epoch date, i.e., time of M MM/DD.D/YYYY
# in MPCORB.DAT epoch date is packed according to rules:
# http://www.minorplanetcenter.net/iau/info/PackedDates.html
# Subfield 10A First date these elements are valid, optional
# SubField 10B Last date these elements are valid, optional
# D = the equinox year, i.e., time of i, O and o (always J2000.0 in MPCORB.DAT, so 2000
# First component of magnitude model, either g from (g,k) or H from (H,G). Specify which by preceding the number with a "g" or an "H". In absence of either specifier the default is (H,G) model. See Magnitude models.
# corresponds to H in MPCORB.DAT, just need to preface with an 'H'
# Second component of magnitude model, either k or G (directly from MPCORB.DAT)
# s = angular size at 1 AU, arc seconds, optional - I don't care, so skip....
def convert_mpcorb_to_xephem(input):
"""
convert from, e.g.:
[MPCORB.DAT](http://www.minorplanetcenter.net/iau/MPCORB/MPCORB.DAT)
[Format is described in more detail](http://www.minorplanetcenter.org/iau/info/MPOrbitFormat.html)
Des'n H G Epoch M Peri. Node Incl. e n a Reference #Obs #Opp Arc rms Perts Computer
# 944 Hidalgo line as of 2013-07-26 is:
00944 10.77 0.15 K134I 215.40344 56.65077 21.56494 42.54312 0.6617811 0.07172582 5.7370114 0 MPO263352 582 21 1920-2010 0.77 M-v 38h MPCLINUX 0000 (944) Hidalgo 20100222
to
# From MPO263352
944 Hidalgo,e,42.5431,21.5649,56.6508,5.737011,0.0717258,0.66178105,215.4034,04/18.0/2013,2000,H10.77,0.15
input is a single line of text, output will include a newline character within it (but no newline at end)
"""
output = '# From ' + input[107:116] + '\n'
output += input[166:194].strip().replace('(','').replace(')','') + ','
output += 'e,'
output += input[59:68].strip() + ',' # i = inclination, degrees
output += input[48:57].strip() + ',' # O = longitude of ascending node, degrees
output += input[37:46].strip() + ',' # o = argument of perihelion, degrees
output += input[92:103].strip() + ',' # a = mean distance (aka semi-major axis), AU
output += input[80:91].strip() + ',' # n = mean daily motion, degrees per day (computed from a**3/2 if omitted)
output += input[70:79].strip() + ',' # e = eccentricity, must be < 1
output += input[26:35].strip() + ',' # M = mean anomaly, i.e., degrees from perihelion
output += unpack_mpc_date(input[20:25].strip()).strftime('%m/%d/%Y') + ',' # E = epoch date, i.e., time of M
output += '2000,' # D = the equinox year, i.e., time of i, O and o (always J2000.0 in MPCORB.DAT
output += 'H' + input[8:13].strip() + ',' # First component of magnitude model
output += input[14:19].strip() # Second component of magnitude model
return output
def minorplanets(in_datetime, observatory_code,
max_objects=None,
max_magnitude=None, require_magnitude=True,
max_zenithdistance_deg=90.0,
min_heliocentric_distance_AU=None, max_heliocentric_distance_AU=None,
min_topocentric_distance_AU=None, max_topocentric_distance_AU=None):
"""
in_datetime - datetime.datetime(), e.g. datetime.datetime.utcnow()
observatory_code - the Code of the observatory in
http://www.minorplanetcenter.net/iau/lists/ObsCodes.html
can be either string or integer.
max_objects - default is None, otherwise limits the return to this number
of observable objects
max_magnitude - default is None, otherwise limits return to objects
brighter than or equal to this magnitude
(as calculated by PyEphem from the MPC data)
(TODO: confirm whether this is V-band, R-band,
or other...)
require_magnitude - default is True. If False and max_magnitude is None,
then return all objects, whether PyEphem can calculate
a magnitude or not.
max_zenithdistance_deg - default is 90 degrees (horizon)
min/max_heliocentric_distance_AU - defaults are None
min/max_topocentric_distance_AU - defaults are None
"""
obs_info = get_latlon_from_observatory_code(observatory_code)
obs = ephem.Observer()
obs.lat = np.radians(obs_info['latitude'])
obs.lon = np.radians(obs_info['longitude'])
obs.date = _convert_datetime_to_pyephem_date_string(in_datetime)
mpc_filename = _find_cached_file('MPCORB.DAT')
if mpc_filename == 'File Not Found':
raise Error("Problem reading MPCORB.DAT file from disk. \n"
"Most likely you need to go download a copy from: \n"
" http://www.minorplanetcenter.net/iau/MPCORB/MPCORB.DAT")
if max_magnitude is not None:
require_magnitude = True
matching_objects = []
with open(mpc_filename) as f:
in_header = True
for line in f:
if in_header is False and len(line) > 1:
if (not require_magnitude) or (require_magnitude and (line[8:13] != ' ')):
eph = ephem.readdb(convert_mpcorb_to_xephem(line).splitlines()[1])
eph.compute(obs)
if (max_magnitude is None) or (eph.mag <= max_magnitude):
if ((max_zenithdistance_deg is None) or
(np.degrees(np.pi/2. - eph.alt) <= max_zenithdistance_deg)):
if ((min_heliocentric_distance_AU is None) or
(eph.sun_distance >= min_heliocentric_distance_AU)):
if ((max_heliocentric_distance_AU is None) or
(eph.sun_distance <= max_heliocentric_distance_AU)):
if ((min_topocentric_distance_AU is None) or
(eph.earth_distance >= min_topocentric_distance_AU)):
if ((max_topocentric_distance_AU is None) or
(eph.earth_distance <= max_topocentric_distance_AU)):
matching_objects.append(eph)
else:
if line.startswith('-------------------'):
in_header = False
if max_objects is not None:
if len(matching_objects) >= max_objects:
break
name = [a.name for a in matching_objects]
d = {}
d['rise_time'] = [a.rise_time.datetime() if a.rise_time is not None else np.nan for a in matching_objects]
d['transit_time'] = [a.transit_time.datetime() if a.transit_time is not None else np.nan for a in matching_objects]
d['set_time'] = [a.set_time.datetime() if a.set_time is not None else np.nan for a in matching_objects]
d['raJ2000_deg'] = [np.degrees(a.a_ra) for a in matching_objects]
d['decJ2000_deg'] = [np.degrees(a.a_dec) for a in matching_objects]
d['mag'] = [a.mag for a in matching_objects]
d['R_AU'] = [a.sun_distance for a in matching_objects]
d['delta_AU'] = [a.earth_distance for a in matching_objects]
moon = ephem.Moon()
moon.compute(obs.date)
d['O-E-M_deg'] = [np.degrees(ephem.separation(moon, a)) for a in matching_objects]
output = DataFrame(d, index=name)
output = output[['rise_time', 'transit_time', 'set_time', 'raJ2000_deg', 'decJ2000_deg',
'mag', 'R_AU', 'delta_AU', 'O-E-M_deg']] # re-order columns to something sensible
return output
| 53.418338 | 250 | 0.565145 |
7d6278af283b8d74f950804bc1e7d3a988413e1b | 7,573 | py | Python | pcdet/models/backbones_3d/vfe/pillar_vfe.py | KPeng9510/OpenPCDet | 4bebf2f45a3193afb1ffe4f7ee1913afc0632e62 | [
"Apache-2.0"
] | 1 | 2021-02-18T19:46:44.000Z | 2021-02-18T19:46:44.000Z | pcdet/models/backbones_3d/vfe/pillar_vfe.py | KPeng9510/OpenPCDet | 4bebf2f45a3193afb1ffe4f7ee1913afc0632e62 | [
"Apache-2.0"
] | null | null | null | pcdet/models/backbones_3d/vfe/pillar_vfe.py | KPeng9510/OpenPCDet | 4bebf2f45a3193afb1ffe4f7ee1913afc0632e62 | [
"Apache-2.0"
] | 1 | 2022-01-23T13:37:49.000Z | 2022-01-23T13:37:49.000Z | import torch
from torch_geometric.nn import FeaStConv
from knn_cuda import KNN
from torch_cluster import fps
#from ....ops.roiaware_pool3d import roiaware_pool3d_utils
import torch.nn as nn
import torch.nn.functional as F
from .vfe_template import VFETemplate
import sys
from lppproj import LocalityPreservingProjection
| 44.810651 | 193 | 0.629737 |
7d68c3cd5ebdfbe4a4f33c56583ea1d144745710 | 915 | py | Python | chess/pythonchess/docs/conf.py | mahakbansal/ChessAlphaZero | 2b3f823fdc252d7fd32de0b5e4e53aece9082dd5 | [
"MIT"
] | 2 | 2021-02-22T21:53:58.000Z | 2021-04-03T16:40:52.000Z | chess/pythonchess/docs/conf.py | mahakbansal/ChessAlphaZero | 2b3f823fdc252d7fd32de0b5e4e53aece9082dd5 | [
"MIT"
] | 1 | 2018-09-26T03:38:57.000Z | 2018-09-26T03:38:57.000Z | chess/pythonchess/docs/conf.py | mahakbansal/ChessAlphaZero | 2b3f823fdc252d7fd32de0b5e4e53aece9082dd5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
import os
# Import the chess module.
sys.path.insert(0, os.path.abspath('..'))
import chess
# Autodoc.
extensions = ["sphinx.ext.autodoc"]
autodoc_member_order = 'bysource'
# The suffix of source filenames.
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "python-chess"
copyright = "20142018, Niklas Fiekas"
# The version.
version = chess.__version__
release = chess.__version__
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "default"
| 22.875 | 74 | 0.747541 |
7d69ee0ea7680377c19eec8ca94d5abf487ee54e | 1,227 | py | Python | python/example.py | msmerlak/aa | 09ffdf7df582be9c83c7c9bfd873c55fddb65109 | [
"MIT"
] | null | null | null | python/example.py | msmerlak/aa | 09ffdf7df582be9c83c7c9bfd873c55fddb65109 | [
"MIT"
] | null | null | null | python/example.py | msmerlak/aa | 09ffdf7df582be9c83c7c9bfd873c55fddb65109 | [
"MIT"
] | null | null | null | # min (1/2) x'Q'x - q'x
from __future__ import print_function
import numpy as np
import aa
dim = 1000
mems = [5, 10, 20, 50, 100]
N = int(1e4)
np.random.seed(1234)
Q = np.random.randn(dim,dim)
Q = Q.T.dot(Q)
q = np.random.randn(dim)
x_0 = np.random.randn(dim)
x_star = np.linalg.solve(Q, q)
step = 0.0005
f_star = f(x_star)
print('f^* = ', f_star)
print('No acceleration')
x = x_0.copy()
for i in range(N):
x_prev = np.copy(x)
x -= step * (Q.dot(x) - q)
if i % 1000 == 0:
print('i: ', i,' f - f^*: ', f(x) - f_star)
for mem in mems:
print('Type-I acceleration, mem:', mem)
x = x_0.copy()
aa_wrk = aa.AndersonAccelerator(dim, mem, True, eta=1e-8)
for i in range(N):
x_prev = np.copy(x)
x -= step * (Q.dot(x) - q)
aa_wrk.apply(x, x_prev)
if i % 1000 == 0:
print('i: ', i,' f - f^*: ', f(x) - f_star)
print('Type-II acceleration, mem:', mem)
x = x_0.copy()
aa_wrk = aa.AndersonAccelerator(dim, mem, False, eta=1e-10)
for i in range(N):
x_prev = np.copy(x)
x -= step * (Q.dot(x) - q)
aa_wrk.apply(x, x_prev)
if i % 1000 == 0:
print('i: ', i,' f - f^*: ', f(x) - f_star)
| 22.309091 | 61 | 0.544417 |
7d6a2293f4de2609456441f4d1fef57b68982b63 | 2,193 | py | Python | MuonAnalysis/MuonAssociators/test/L1MuonMatcher/test.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | MuonAnalysis/MuonAssociators/test/L1MuonMatcher/test.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | MuonAnalysis/MuonAssociators/test/L1MuonMatcher/test.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
process = cms.Process("PAT")
# initialize MessageLogger and output report
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.threshold = 'INFO'
process.MessageLogger.cerr.INFO = cms.untracked.PSet(
default = cms.untracked.PSet( limit = cms.untracked.int32(0) ),
PATSummaryTables = cms.untracked.PSet( limit = cms.untracked.int32(-1) )
)
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )
# source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
#'file:/afs/cern.ch/cms/PRS/top/cmssw-data/relval200-for-pat-testing/TauolaTTbar-Summer08_IDEAL_V9_v1-AODSIM.80.root'
'/store/relval/CMSSW_2_2_7/RelValWM/GEN-SIM-RECO/STARTUP_V9_v1/0004/1E84F77B-341C-DE11-8A99-0019DB29C5FC.root',
'/store/relval/CMSSW_2_2_7/RelValWM/GEN-SIM-RECO/STARTUP_V9_v1/0004/34267FD6-1C1C-DE11-A836-001617C3B78C.root',
'/store/relval/CMSSW_2_2_7/RelValWM/GEN-SIM-RECO/STARTUP_V9_v1/0004/68BF59CF-1C1C-DE11-AFA9-000423D98BC4.root'
)
)
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1000) )
process.load("Configuration.StandardSequences.Geometry_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
#process.GlobalTag.globaltag = cms.string('IDEAL_V9::All')
process.GlobalTag.globaltag = cms.string('STARTUP_V9::All')
process.load("Configuration.StandardSequences.MagneticField_cff")
# PAT Layer 0+1
process.load("PhysicsTools.PatAlgos.patSequences_cff")
process.load("MuonAnalysis.MuonAssociators.muonL1Match_cfi")
process.muonL1Match.preselection = cms.string("")
process.allLayer1Muons.trigPrimMatch = cms.VInputTag(
cms.InputTag("muonL1Match"),
cms.InputTag("muonL1Match","propagatedReco"),
)
## Put your EDAnalyzer here
## process.plots = cms.EDFilter("DataPlotter",
## muons = cms.InputTag("cleanLayer1Muons"),
## muonCut = cms.string("")
## )
process.p = cms.Path(
process.muonL1Match *
process.patDefaultSequence
# * process.plots
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string("plots.root")
)
| 37.810345 | 125 | 0.75285 |
7d6a59d7fa23a596aac99cdbb9dd178d370f5c83 | 95 | py | Python | hydropy/__init__.py | GironsLopez/hydropy | 59cb29254e4a3f02f994e2d049e3c1135e9295a2 | [
"MIT"
] | null | null | null | hydropy/__init__.py | GironsLopez/hydropy | 59cb29254e4a3f02f994e2d049e3c1135e9295a2 | [
"MIT"
] | null | null | null | hydropy/__init__.py | GironsLopez/hydropy | 59cb29254e4a3f02f994e2d049e3c1135e9295a2 | [
"MIT"
] | null | null | null | """
Hydropy
=======
Provides functions to work with hydrological processes and equations
"""
| 11.875 | 68 | 0.705263 |
7d6a678fc2e4bddc6ad3dc6d90062ac0ebecff7e | 915 | py | Python | Desafios Finais Python - Cognizant Data Cloud Engineer #2/Preenchimento de Vetor I.py | italocreator/heros-journey | 76a867b3c9addf2c8b6c06999f9993e12a5b4e46 | [
"MIT"
] | null | null | null | Desafios Finais Python - Cognizant Data Cloud Engineer #2/Preenchimento de Vetor I.py | italocreator/heros-journey | 76a867b3c9addf2c8b6c06999f9993e12a5b4e46 | [
"MIT"
] | null | null | null | Desafios Finais Python - Cognizant Data Cloud Engineer #2/Preenchimento de Vetor I.py | italocreator/heros-journey | 76a867b3c9addf2c8b6c06999f9993e12a5b4e46 | [
"MIT"
] | null | null | null | """
Desafio
Voc recebeu o desafio de ler um valor e criar um programa que coloque o valor lido na primeira posio de um vetor N[10].
Em cada posio subsequente, coloque o dobro do valor da posio anterior.
Por exemplo, se o valor lido for 1, os valores do vetor devem ser 1,2,4,8 e assim sucessivamente.
Mostre o vetor em seguida.
Entrada
A entrada contm um valor inteiro (V<=50).
Sada
Para cada posio do vetor, escreva "N[i] = X", onde i a posio do vetor e X o valor armazenado na posio i.
O primeiro nmero do vetor N (N[0]) ir receber o valor de V.
Exemplo de Entrada Exemplo de Sada
1 N[0] = 1
N[1] = 2
N[2] = 4
...
"""
x = int(input())
n = list()
# TODO: Complete os espaos em branco com uma soluo possvel para o problema.
for i in range(10):
n.append(x)
x = x*2
print(f"N[{i}] = {n[i]}")
| 30.5 | 123 | 0.632787 |
7d6a9fc0ae2c18fcc1e9420cc0d5c546fe26cbe4 | 1,267 | py | Python | Home_Work_2_B_Naychuk_Anastasiya/Task1.py | NaychukAnastasiya/goiteens-python3-naychuk | a79d0af238a15f58a822bb5d8e4d48227d4a7bc1 | [
"MIT"
] | null | null | null | Home_Work_2_B_Naychuk_Anastasiya/Task1.py | NaychukAnastasiya/goiteens-python3-naychuk | a79d0af238a15f58a822bb5d8e4d48227d4a7bc1 | [
"MIT"
] | null | null | null | Home_Work_2_B_Naychuk_Anastasiya/Task1.py | NaychukAnastasiya/goiteens-python3-naychuk | a79d0af238a15f58a822bb5d8e4d48227d4a7bc1 | [
"MIT"
] | null | null | null | # 3
print(" ")
var1 = float(input())
print(" ")
var2 = float(input())
print(" ")
var3 = float(input())
# Avg = (var1+var2+var3)/3 # ' :
if ((var1 > var2) and (var1 < var3)) or (var1 < var2) and (var1 > var3):
print (" ",var1)
elif ((var2 > var1) and (var2 < var3)) or ((var2 < var1) and (var12 > var3)):
print (" ",var2)
else:
print (" ",var3)
# # ' :
# if (abs(var1-Avg))>(abs(var2-Avg)):
# if (abs(var2-Avg))>(abs(var3-Avg)):
# print (" ",var3)
# else: #(abs(var2-Avg))<(abs(var3-Avg))
# print (" ",var2)
# else: #(abs(var1-Avg))<(abs(var2-Avg))
# if (abs(var1-Avg))>(abs(var3-Avg)):
# print (" ",var3)
# else: #(abs(var1-Avg))<(abs(var3-Avg))
# print (" ",var1) | 45.25 | 93 | 0.648777 |
7d6ad190979d6481b1c2985d3daa77d4ce6fbfd1 | 5,689 | py | Python | src/paper_1/curriculum/main.py | ludwigflo/paper1 | 13202febdb01a76bbf115435ce9676f6b82e1393 | [
"MIT"
] | null | null | null | src/paper_1/curriculum/main.py | ludwigflo/paper1 | 13202febdb01a76bbf115435ce9676f6b82e1393 | [
"MIT"
] | null | null | null | src/paper_1/curriculum/main.py | ludwigflo/paper1 | 13202febdb01a76bbf115435ce9676f6b82e1393 | [
"MIT"
] | null | null | null | from paper_1.data.data_loader import load_val_data, load_train_data, sequential_data_loader, random_data_loader
from paper_1.utils import read_parameter_file, create_experiment_directory
from paper_1.evaluation.eval_utils import init_metrics_object
from paper_1.baseline.main import train as baseline_train
from paper_1.model.model_utils import initialize_model
from torch.utils.tensorboard import SummaryWriter
from train import select_splitted_pseudo_labels
from os.path import dirname, abspath
from torch.optim import Adam
import pandas as pd
import numpy as np
import random
import torch
import os
if __name__ == '__main__':
# set the seed for reproducability
seed_value = 0
random.seed(seed_value)
np.random.seed(seed_value)
torch.manual_seed(seed_value)
torch.cuda.manual_seed_all(seed_value)
# get the current and parent directory
current_file = abspath(__file__)
current_dir = dirname(current_file)
parent_dir = dirname(current_dir)
metric_param_file = parent_dir + '/parameters/metric_params.yaml'
model_param_file = parent_dir + '/parameters/model_params.yaml'
data_param_file = parent_dir + '/parameters/data_params.yaml'
main_param_file = current_dir + '/main_params.yaml'
# load the parameters
metric_params = read_parameter_file(metric_param_file)
model_params = read_parameter_file(model_param_file)
main_params = read_parameter_file(main_param_file)
data_params = read_parameter_file(data_param_file)
# define the domains, on which the models should be trained
source_domains = ['Race', 'Religion', 'Sexual Orientation']
target_domains = ['Race', 'Religion', 'Sexual Orientation']
for source_domain in source_domains:
for target_domain in target_domains:
if source_domain != target_domain:
main(main_params, data_params, metric_params, model_params, parent_dir, source_domain, target_domain)
| 40.347518 | 123 | 0.731763 |
7d6b4c15322d55cd0ce898e730c14103fb38d94b | 6,793 | py | Python | sfc/tests/functest/sfc_symmetric_chain.py | pkaralis/sfc | b2572f3e4e96ef82fbfd5b6233933f1eac5cb166 | [
"Apache-2.0"
] | null | null | null | sfc/tests/functest/sfc_symmetric_chain.py | pkaralis/sfc | b2572f3e4e96ef82fbfd5b6233933f1eac5cb166 | [
"Apache-2.0"
] | null | null | null | sfc/tests/functest/sfc_symmetric_chain.py | pkaralis/sfc | b2572f3e4e96ef82fbfd5b6233933f1eac5cb166 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (c) 2017 Ericsson AB and others. All rights reserved
#
# This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
import os
import sys
import threading
import logging
import urllib3
import sfc.lib.openstack_utils as os_sfc_utils
import sfc.lib.odl_utils as odl_utils
import sfc.lib.config as sfc_config
from sfc.tests.functest import sfc_parent_function
""" logging configuration """
logger = logging.getLogger(__name__)
COMMON_CONFIG = sfc_config.CommonConfig()
CLIENT = "client"
SERVER = "server"
openstack_sfc = os_sfc_utils.OpenStackSFC()
if __name__ == '__main__':
# Disable InsecureRequestWarning errors when executing the SFC tests in XCI
urllib3.disable_warnings()
TESTCASE_CONFIG = sfc_config.TestcaseConfig('sfc_symmetric_chain')
supported_installers = ['fuel', 'apex', 'osa', 'compass']
vnf_names = ['testVNF1']
test_run = SfcSymmetricChain(TESTCASE_CONFIG, supported_installers,
vnf_names)
test_run.run()
| 35.196891 | 79 | 0.6227 |
7d6ecad90431713565bfe9a36d5edf9284440624 | 1,827 | py | Python | site-packages/offshoot/main.py | nanpuhaha/SerpentAI | 6af1105fc0a970227a0d7c11e6a0da1bd0bacec6 | [
"MIT"
] | 42 | 2017-01-23T22:36:03.000Z | 2021-11-14T21:22:17.000Z | site-packages/offshoot/main.py | nanpuhaha/SerpentAI | 6af1105fc0a970227a0d7c11e6a0da1bd0bacec6 | [
"MIT"
] | 6 | 2021-09-26T21:18:30.000Z | 2022-02-01T01:26:18.000Z | site-packages/offshoot/main.py | nanpuhaha/SerpentAI | 6af1105fc0a970227a0d7c11e6a0da1bd0bacec6 | [
"MIT"
] | 6 | 2017-04-14T13:07:27.000Z | 2020-06-17T06:24:18.000Z | #!/usr/bin/env python
import sys
import os
import subprocess
import offshoot
valid_commands = ["init", "install", "uninstall"]
if __name__ == "__main__":
execute()
| 27.681818 | 103 | 0.636015 |
7d6f707bec1ef6f1945e2739232de8ac3b5e6c3e | 1,953 | py | Python | samples/unsharp/unsharp.py | hj424/heterocl | e51b8f7f65ae6ad55c0c2426ab7192c3d8f6702b | [
"Apache-2.0"
] | 7 | 2019-08-20T02:43:44.000Z | 2019-12-13T14:26:05.000Z | samples/unsharp/unsharp.py | hj424/heterocl | e51b8f7f65ae6ad55c0c2426ab7192c3d8f6702b | [
"Apache-2.0"
] | null | null | null | samples/unsharp/unsharp.py | hj424/heterocl | e51b8f7f65ae6ad55c0c2426ab7192c3d8f6702b | [
"Apache-2.0"
] | 1 | 2019-07-25T21:46:50.000Z | 2019-07-25T21:46:50.000Z | import heterocl as hcl
from math import sqrt
hcl.config.init_dtype = hcl.Float()
input_image = hcl.placeholder((480, 640, 3), name = "input")
output_image = hcl.placeholder((480, 640, 3), name = "output")
def unsharp(input_image, output_image):
"""
Helper Functions
"""
rx = hcl.reduce_axis(-4, 5, "rx")
ry = hcl.reduce_axis(-4, 5, "ry")
my = hcl.reduce_axis(0, 640, "my")
gray = hcl.compute((480, 640), lambda x, y: (input_image[x, y, 0] * 77 + input_image[x, y, 1] * 150 + input_image[x, y, 2] * 29) >> 8, name = "gray")
blur = hcl.compute(gray.shape, lambda x, y: hcl.sum(gray[rx+x, ry+y] * kernel(rx) * kernel(ry), axis = [rx, ry]), name = "blur")
sharpen = clamp2D(hcl.compute(gray.shape, lambda x, y: gray[x, y] * 2 - blur[x, y], name = "sharpen"), 0, 255)
ratio = clamp2D(hcl.compute(gray.shape, lambda x, y: sharpen[x, y] * 32 / hcl.max(gray[x, my], axis = my), name = "ratio"), 0, 255)
out = clamp3D(hcl.compute(output_image.shape, lambda x, y, c: ratio[x, y] * input_image[x, y, c] >> 5, name = "out"), 0, 255)
U = hcl.update(output_image, lambda x, y, c: out[x, y, c])
return U
s = hcl.make_schedule([input_image, output_image], unsharp)
print hcl.lower(s, [input_image, output_image])
| 39.06 | 151 | 0.620072 |
7d702e229890e1a0e38bb9dc45ff5dead9dc3d80 | 14,391 | py | Python | hatspil/core/utils.py | dodomorandi/hatspil | 99c4d255b3f9836b32506636c84b16b3456bd74c | [
"MIT"
] | 2 | 2018-12-20T08:54:17.000Z | 2019-10-19T18:35:33.000Z | hatspil/core/utils.py | dodomorandi/hatspil | 99c4d255b3f9836b32506636c84b16b3456bd74c | [
"MIT"
] | null | null | null | hatspil/core/utils.py | dodomorandi/hatspil | 99c4d255b3f9836b32506636c84b16b3456bd74c | [
"MIT"
] | null | null | null | """A collection of utility function, shared across modules."""
import collections
import datetime
import gzip as gz
import logging
import os
import re
import shutil
import subprocess
from argparse import ArgumentTypeError
from copy import deepcopy
from logging import Logger
from typing import (Any, Callable, Dict, Generator, Iterable, List, Mapping,
Optional, Sequence, Tuple, TypeVar, Union, ValuesView,
cast)
from ..config import Config, KitData
from .barcoded_filename import BarcodedFilename
from .exceptions import AnnotationError, DataError
def get_current() -> str:
"""Get the current date in standard HaTSPiL format."""
today = datetime.date.today()
return "%04d_%02d_%02d" % (today.year, today.month, today.day)
def get_overridable_current_date(parameters: Dict[str, Any]) -> str:
"""Get an eventual overridden date.
If the `parameters` dict contains a `use_date` value, return it.
Otherwise return the result of `get_current`.
"""
if parameters["use_date"] is None:
return get_current()
else:
current_date = parameters["use_date"]
assert isinstance(current_date, str)
return current_date
def run_and_log(command: str, logger: Logger) -> int:
"""Run a command and log everything.
Use `subprocess.Popen` to run a command. The standard output and the
standard error are piped into the logger.
Args:
command: the command to run.
logger: the logger.
Returns:
int: the exit status of the process.
"""
logger.info("Running command: %s", command)
with subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True,
bufsize=1,
) as process:
(out, err) = process.communicate()
for line in out.split("\n"):
if line != "":
logger.info(line)
for line in err.split("\n"):
if line != "":
logger.warning(line)
return process.wait()
def get_sample_filenames(
obj: Union[Sequence[str], Mapping[str, List[str]], str],
split_by_organism: bool = False,
) -> Union[List[str], Mapping[str, List[str]]]:
"""Return the filenames organised in a different way.
Take a set of filenames in different possible shapes and reorganize
them depending on the content and the value of `split_by_organism`.
Args:
obj: the filenames. It can be a string for one single filename,
a list of filenames or a dict where each key is an organism
code (i.e.: hg19) and the relative value is a list of
filenames.
split_by_organism: whether the filenames must be split by
organism or they must be returned all
together.
Returns:
The input filenames with the desired shape. There are different
cases:
* If `obj` is a list and its length is greater than 1 and
`split_by_organism` is `True`, the organism for each file
is obtained using `get_organism_from_filename`. A dict is
created, where each organism maps to a list of filenames.
If the dict contains more than one organism, it is returned,
otherwise a list of the filenames is returned.
* If `obj` is a list but its length is not greater than 1 or
`split_by_organism` is `False`, a **copy** of `obj` is
returned.
* If `obj` is a dict and it contains more than one entry and
`split_by_organism` is `True`, a **deep copy** of `obj` is
returned.
* If `obj` is a dict but it contains less than two entries or
`split_by_organism` is `False`, a list of all the filenames
in `obj` is returned.
* If `obj` is a string and `split_by_organism` is `True`, the
organism is obtained using `get_organism_from_filename`. If
the organism is valid, a dict with the organism mapped to
a list of one element, `obj`, is returned. Otherwise, if the
organism is invalid (`None` or empty), a list of one element,
`obj`, is returned.
* If `obj` is a string but `split_by_organism` is `False`, a
list of one element, `obj`, is returned.
"""
if isinstance(obj, list):
if split_by_organism and len(obj) > 1:
filenames: Dict[str, List[str]] = {}
for filename in obj:
organism = get_organism_from_filename(filename)
if organism is None:
organism = ""
filenames.setdefault(organism, []).append(filename)
if len(filenames) > 1:
return filenames
else:
return list(next(iter(filenames.values())))
else:
return list(obj)
elif isinstance(obj, dict):
if split_by_organism and len(obj) > 1:
return deepcopy(obj)
else:
values = obj.values()
if not values:
return []
elif isinstance(next(iter(values)), list):
return [filename for filenames in values for filename in filenames]
elif isinstance(next(iter(values)), str):
return list(cast(ValuesView[str], values))
else:
raise DataError("unexpected filenames type")
else:
assert isinstance(obj, str)
if split_by_organism:
organism = get_organism_from_filename(obj)
if organism:
return {organism: [obj]}
else:
return [obj]
else:
return [obj]
def get_organism_from_filename(filename: str) -> Optional[str]:
"""Get the organism from a filename.
Try to analyse the barcode of a filename, and return the organism
if available. Otherwise return `None`.
"""
try:
barcoded = BarcodedFilename(os.path.basename(filename))
return barcoded.organism
except Exception:
return None
def get_samples_by_organism(
obj: Union[List[str], Dict[str, List[str]], str], default_organism: str
) -> Dict[str, List[str]]:
"""Return the samples in a dict.
Create a organism-samples dict.
Args:
obj: the samples that are collected.
default_organism: when `obj` is not a dict, `default_organism`
is used as key for the output dict.
Returns:
A dictionary that maps organisms to lists of samples. If `obj`
is a dict, a copy of `obj` is returned. If `obj` is a list,
a dict with `default_organism` that maps to `obj` is returned.
If `obj` is a string, a dict with `default_organism` that maps
to a list of one element, `obj`, is returned.
"""
if isinstance(obj, list):
return {default_organism: obj}
elif isinstance(obj, dict):
return dict(obj)
else:
return {default_organism: [obj]}
def get_genome_ref_index_by_organism(config: Config, organism: str) -> Tuple[str, str]:
"""Return the reference file and the index file.
Select the `config.*_ref` and `config.*_index` depending on
`organism`.
"""
if organism == "hg19":
return (config.hg19_ref, config.hg19_index)
elif organism == "hg38":
return (config.hg38_ref, config.hg38_index)
elif organism == "mm9":
return (config.mm9_ref, config.mm9_index)
elif organism == "mm10":
return (config.mm10_ref, config.mm10_index)
else:
raise DataError("Invalid organism")
def get_dbsnp_by_organism(config: Config, organism: str) -> str:
"""Return the dbSNP filename.
Select the `config.dbsnp_*` depending on `organism`.
"""
if organism == "hg19":
return config.dbsnp_hg19
elif organism == "hg38":
return config.dbsnp_hg38
else:
raise DataError("Invalid organism")
def get_cosmic_by_organism(config: Config, organism: str) -> str:
"""Return the cosmic DB filename.
Select the `config.cosmic_*` depending on `organism`.
"""
if organism == "hg19":
return config.cosmic_hg19
elif organism == "hg38":
return config.cosmic_hg38
else:
raise DataError("Invalid organism")
def get_picard_max_records_string(max_records: str) -> str:
"""Get the max records string for Picard.
Create the 'MAX_RECORDS_IN_RAM' parameter using `max_records`. If
`max_records` is empty, an empty string is returned.
"""
if max_records is None or max_records == "":
return ""
else:
return " MAX_RECORDS_IN_RAM=%d" % int(max_records)
def find_fastqs_by_organism(
sample: str, fastq_dir: str, default_organism: str
) -> Dict[str, List[Tuple[str, int]]]:
"""Search for FASTQ files and group them by organism.
Find all the .fastq files inside `fastq_dir` that start with
`sample` and have a valid suffix. Group all the files by organism.
Args:
sample: the barcoded sample as string.
fastq_dir: the directory where the fastq files must be searched.
default_organism: the organism to use in case the organism field
in a filename is absent.
Returns:
A dict that maps an organism to a list of fastq files.
"""
re_fastq_filename = re.compile(
r"^%s(?:\.((?:hg|mm)\d+))?\.R([12])\.fastq(?:\.gz)?$" % sample, re.I
)
fastq_files = [
filename
for filename in os.listdir(fastq_dir)
if re_fastq_filename.match(filename)
]
fastqs: Dict[str, List[Tuple[str, int]]] = {}
for filename in fastq_files:
match = re_fastq_filename.match(filename)
assert match is not None
organism = match.group(1)
read_index = int(match.group(2))
if organism is None or organism == "":
organism = default_organism
if organism in fastqs:
fastqs[organism].append((filename, read_index))
else:
fastqs[organism] = [(filename, read_index)]
return fastqs
def gzip(filename: str) -> None:
"""Compress a file with GZ compression."""
compressed_filename = filename + ".gz"
with open(filename, "rb") as in_fd, gz.open(
compressed_filename, "wb", compresslevel=6
) as out_fd:
shutil.copyfileobj(in_fd, out_fd)
os.unlink(filename)
def gunzip(filename: str) -> None:
"""Decompress a GZ file."""
decompressed_filename = filename[:-3]
with open(decompressed_filename, "wb") as out_fd, gz.open(filename, "rb") as in_fd:
shutil.copyfileobj(in_fd, out_fd)
os.unlink(filename)
def check_gz(filename: str) -> bool:
"""Check if a GZ file is valid."""
chunk_size = 2 ** 20
with gz.open(filename, "rb") as fd:
try:
while fd.read(1):
fd.seek(chunk_size, os.SEEK_CUR)
return True
except Exception:
return False
def parsed_date(raw_date: str) -> str:
"""Parse a date in 'Y_M_D' format and return a std HaTSPiL date."""
try:
date = datetime.datetime.strptime(raw_date, "%Y_%m_%d")
except ValueError:
raise ArgumentTypeError("expected string in format YYYY_MM_DD")
return "%04d_%02d_%02d" % (date.year, date.month, date.day)
def get_human_annotation(config: Config) -> str:
"""Get the best human genome annotation available in config."""
if config.use_hg38:
return "hg38"
elif config.use_hg19:
return "hg19"
else:
raise AnnotationError("no available human annotation in config")
def get_mouse_annotation(config: Config) -> str:
"""Get the best murine genome annotation available in config."""
if config.use_mm10:
return "mm10"
elif config.use_mm9:
return "mm9"
else:
raise AnnotationError("no available mouse annotation in config")
reFloat = re.compile(r"^(\d+\.\d*|\.\d+)$")
reInt = re.compile(r"^(\d+)$")
def parse_as_number(s: str) -> Union[int, float, str]:
"""Try to parse a string as number.
If `s` matches a float format, a parsed float is returned. If `s`
matches an int, a parset int is returned. Otherwise `s` is returned.
"""
if reFloat.match(s):
return float(s)
elif reInt.match(s):
return int(s)
else:
return s
T = TypeVar("T")
U = TypeVar("U")
def rfind_if(iterable: Sequence[T], fun: Callable[[T], bool]) -> Optional[int]:
"""Reverse find an object in an iterable that satisfies `fun`.
Args:
iterable: an iterable object.
fun: a function that returns `True` when the item is found.
Returns:
The index of the first element for which `fun` returns `True`,
performing the operation on the reversed iterable.
"""
for index, element in enumerate(reversed(iterable)):
if fun(element):
return len(iterable) - index
return None
def argmin(
iterable: Iterable[T], key: Optional[Callable[[T], U]] = None
) -> Optional[int]:
"""Like `min`, but return the index of the element found."""
best = min(
((index, element) for (index, element) in enumerate(iterable)),
key=lambda x: key(x[1]) if key else x[1],
)
if best is not None:
return best[0]
else:
return None
def create_logger(
logger_name: str, handler: Optional[logging.FileHandler] = None
) -> Logger:
"""Create a named logger and add a handler to this."""
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
if handler:
logger.addHandler(handler)
return logger
def get_kit_from_barcoded(
config: Config, barcoded: BarcodedFilename
) -> Optional[KitData]:
"""Get a kit from the config given a barcoded filename."""
assert barcoded.kit is not None
assert barcoded.analyte is not None
return config.kits.get((barcoded.kit, barcoded.analyte))
| 32.485327 | 87 | 0.625599 |
7d7258deda24afb1f717d1778a24d42c5aaa3305 | 2,556 | py | Python | DistrictData.py | robbierobinette/rcv-tensorflow | 984852902f465bb6f61ba863e4b76092249911d0 | [
"MIT"
] | null | null | null | DistrictData.py | robbierobinette/rcv-tensorflow | 984852902f465bb6f61ba863e4b76092249911d0 | [
"MIT"
] | null | null | null | DistrictData.py | robbierobinette/rcv-tensorflow | 984852902f465bb6f61ba863e4b76092249911d0 | [
"MIT"
] | null | null | null | import csv
from typing import List
from CombinedPopulation import CombinedPopulation
from PopulationGroup import PopulationGroup, Democrats, Republicans, Independents
def main():
dd = DistrictData("data-5vPn3.csv")
print("got dd")
for k, v in dd.dvr.items():
v.print()
if __name__ == "__main__":
main()
| 29.72093 | 103 | 0.534429 |
7d72c0bcd96eb18d89e4b84f9f4aa4228039c607 | 102 | py | Python | urlmiddleware/base.py | dbramwell/django-urlmiddleware | 8f7f4a571730805cdd04f321548c8d1dc7751ec7 | [
"MIT"
] | 4 | 2015-04-10T10:41:18.000Z | 2016-06-16T01:19:15.000Z | urlmiddleware/base.py | dbramwell/django-urlmiddleware | 8f7f4a571730805cdd04f321548c8d1dc7751ec7 | [
"MIT"
] | 2 | 2015-12-18T12:24:05.000Z | 2015-12-18T17:00:27.000Z | urlmiddleware/base.py | dbramwell/django-urlmiddleware | 8f7f4a571730805cdd04f321548c8d1dc7751ec7 | [
"MIT"
] | 7 | 2015-11-17T17:53:37.000Z | 2016-03-29T06:21:17.000Z | from django.core.urlresolvers import Resolver404
| 17 | 48 | 0.823529 |
7d745ae2b2c11edcf86ebca48a6d9d1699e9100c | 98 | py | Python | test.py | ifplusor/actrie | 54e9aff441594fbcd30a936d4fbc300ad81007b9 | [
"BSD-3-Clause"
] | 8 | 2017-10-01T04:47:12.000Z | 2022-02-15T10:16:11.000Z | test.py | ifplusor/actrie | 54e9aff441594fbcd30a936d4fbc300ad81007b9 | [
"BSD-3-Clause"
] | null | null | null | test.py | ifplusor/actrie | 54e9aff441594fbcd30a936d4fbc300ad81007b9 | [
"BSD-3-Clause"
] | 4 | 2018-04-06T08:27:02.000Z | 2021-05-11T07:56:17.000Z | # coding=utf-8
from actrie.tests.test_matcher import test
if __name__ == "__main__":
test()
| 14 | 42 | 0.704082 |
7d7502212e99f51f8f089c24fff476d5cecb479f | 5,137 | py | Python | warehouse/email/services.py | pradyunsg/warehouse | 82815b06d9f98deed5f205c66e054de59d22a10d | [
"Apache-2.0"
] | 1 | 2022-03-29T11:56:45.000Z | 2022-03-29T11:56:45.000Z | warehouse/email/services.py | pradyunsg/warehouse | 82815b06d9f98deed5f205c66e054de59d22a10d | [
"Apache-2.0"
] | 358 | 2022-01-03T05:30:40.000Z | 2022-03-31T05:40:50.000Z | warehouse/email/services.py | anthonysidesap/warehouse | 140a2cc3cc007daca5f7fa2878a43e7e152d8959 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from email.headerregistry import Address
from email.message import EmailMessage as RawEmailMessage
from email.utils import parseaddr
from typing import Optional
import premailer
from jinja2.exceptions import TemplateNotFound
from pyramid.renderers import render
from pyramid_mailer import get_mailer
from pyramid_mailer.message import Message
from zope.interface import implementer
from warehouse.email.interfaces import IEmailSender
from warehouse.email.ses.models import EmailMessage as SESEmailMessage
class ConsoleAndSMTPEmailSender(SMTPEmailSender):
| 32.308176 | 87 | 0.647265 |
7d762add2bb0e919d8e50f41074b703f99873c98 | 265 | py | Python | quickvision/pretrained/_pretrained_weights.py | zlapp/quickvision | cbf87756088bd7fe24d380ca831f5c1a204466f8 | [
"Apache-2.0"
] | 47 | 2020-11-15T03:36:48.000Z | 2021-04-08T05:28:02.000Z | quickvision/pretrained/_pretrained_weights.py | zlapp/quickvision | cbf87756088bd7fe24d380ca831f5c1a204466f8 | [
"Apache-2.0"
] | 78 | 2020-11-14T17:55:28.000Z | 2021-04-06T08:55:24.000Z | quickvision/pretrained/_pretrained_weights.py | zlapp/quickvision | cbf87756088bd7fe24d380ca831f5c1a204466f8 | [
"Apache-2.0"
] | 15 | 2020-11-14T18:01:04.000Z | 2021-02-16T14:50:12.000Z | import torch
__all__ = ["_load_pretrained_weights"]
| 29.444444 | 109 | 0.8 |
7d762e8385c0a3df789a5bd08064a714cdafb006 | 2,420 | py | Python | woke/woke/a_config/data_model.py | Ackee-Blockchain/woke | 0d27de25720142beb9619a89619b7a94c3556af1 | [
"ISC"
] | 7 | 2022-01-28T06:50:00.000Z | 2022-02-14T11:34:32.000Z | woke/woke/a_config/data_model.py | Ackee-Blockchain/woke | 0d27de25720142beb9619a89619b7a94c3556af1 | [
"ISC"
] | 30 | 2022-01-26T17:54:48.000Z | 2022-03-21T12:33:53.000Z | woke/woke/a_config/data_model.py | Ackee-Blockchain/woke | 0d27de25720142beb9619a89619b7a94c3556af1 | [
"ISC"
] | null | null | null | from typing import Optional, List
from pathlib import Path
from dataclasses import astuple
import re
from pydantic import BaseModel, Field, Extra, validator
from pydantic.dataclasses import dataclass
from woke.core.enums import EvmVersionEnum
from woke.c_regex_parsing.solidity_version import SolidityVersion
| 30.25 | 117 | 0.673554 |
7d765dcd0b83ec7b2f5cef707b8de57d0e0211e3 | 1,399 | py | Python | model/rcnn/network.py | da-h/tf-boilerplate | ab8409c935d3fcbed07bbefd1cb0049d45283222 | [
"MIT"
] | null | null | null | model/rcnn/network.py | da-h/tf-boilerplate | ab8409c935d3fcbed07bbefd1cb0049d45283222 | [
"MIT"
] | null | null | null | model/rcnn/network.py | da-h/tf-boilerplate | ab8409c935d3fcbed07bbefd1cb0049d45283222 | [
"MIT"
] | null | null | null | import tensorflow as tf
import tensorflow.contrib.layers as tfl
"""Copied from the almighty Christian Hundt;
CECAM/CSM/IRTG School 2018: Machine Learning in Scientific Computing
https://github.com/CECAML/school_nierstein_2018/blob/master/Convnet%20TF.ipynb
"""
| 31.795455 | 104 | 0.717655 |
7d76a9eff5e5d91d0da51d617aa1f132efbb6c52 | 517 | py | Python | app/application.py | dulin/tornado-test | 8ceeb9f2b50b4cd0f18baa9149140721feec1925 | [
"MIT"
] | null | null | null | app/application.py | dulin/tornado-test | 8ceeb9f2b50b4cd0f18baa9149140721feec1925 | [
"MIT"
] | null | null | null | app/application.py | dulin/tornado-test | 8ceeb9f2b50b4cd0f18baa9149140721feec1925 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- mode: python -*-
import tornado.web
from app.views import HelloWorld
from app.ws.communication import CommunicationSocketHandler
| 23.5 | 63 | 0.599613 |
7d76d0e887ea0135157eb8f9b5b96280465e3061 | 31,326 | py | Python | python-fmclient/fmclient/fmclient/common/wrapping_formatters.py | starlingx/fault | 6105f83a85a8ca2e5ed8f33e0f5ed5455c8f0e17 | [
"Apache-2.0"
] | 2 | 2020-02-07T19:02:07.000Z | 2021-05-28T15:44:48.000Z | python-fmclient/fmclient/fmclient/common/wrapping_formatters.py | starlingx/fault | 6105f83a85a8ca2e5ed8f33e0f5ed5455c8f0e17 | [
"Apache-2.0"
] | null | null | null | python-fmclient/fmclient/fmclient/common/wrapping_formatters.py | starlingx/fault | 6105f83a85a8ca2e5ed8f33e0f5ed5455c8f0e17 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
"""
Manages WrapperFormatter objects.
WrapperFormatter objects can be used for wrapping CLI column celldata in order
for the CLI table (using prettyTable) to fit the terminal screen
The basic idea is:
Once celldata is retrieved and ready to display, first iterate through the celldata
and word wrap it so that fits programmer desired column widths. The
WrapperFormatter objects fill this role.
Once the celldata is formatted to their desired widths, then it can be passed to
the existing prettyTable code base for rendering.
"""
import copy
import re
import six
import textwrap
from fmclient.common.cli_no_wrap import is_nowrap_set
from fmclient.common.cli_no_wrap import set_no_wrap
from prettytable import _get_size
from six.moves import range
UUID_MIN_LENGTH = 36
# monkey patch (customize) how the textwrap module breaks text into chunks
wordsep_re = re.compile(r'(\s+|' # any whitespace
r',|'
r'=|'
r'\.|'
r':|'
r'[^\s\w]*\w+[^0-9\W]-(?=\w+[^0-9\W])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
textwrap.TextWrapper.wordsep_re = wordsep_re
def is_uuid_field(field_name):
"""
:param field_name:
:return: True if field_name looks like a uuid name
"""
if field_name is not None and field_name in ["uuid", "UUID"] or field_name.endswith("uuid"):
return True
return False
def field_value_function_factory(formatter, field):
"""Builds function for getting a field value from table cell celldata
As a side-effect, attaches function as the 'get_field_value' attribute
of the formatter
:param formatter:the formatter to attach return function to
:param field:
:return: function that returns cell celldata
"""
return field_value_function_builder
def wrapper_formatter_factory(ctx, field, formatter):
"""
This function is a factory for building WrapperFormatter objects.
The function needs to be called for each celldata column (field)
that will be displayed in the prettyTable.
The function looks at the formatter parameter and based on its type,
determines what WrapperFormatter to construct per field (column).
ex:
formatter = 15 - type = int : Builds a WrapperFixedWidthFormatter that
will wrap at 15 chars
formatter = .25 - type = int : Builds a WrapperPercentWidthFormatter that
will wrap at 25% terminal width
formatter = type = callable : Builds a WrapperLambdaFormatter that
will call some arbitrary function
formatter = type = dict : Builds a WrapperWithCustomFormatter that
will call some arbitrary function to format
and then apply a wrapping formatter to the result
ex: this dict {"formatter" : captializeFunction,,
"wrapperFormatter": .12}
will apply the captializeFunction to the column
celldata and then wordwrap at 12 % of terminal width
:param ctx: the WrapperContext that the built WrapperFormatter will use
:param field: name of field (column_ that the WrapperFormatter will execute on
:param formatter: specifies type and input for WrapperFormatter that will be built
:return: WrapperFormatter
"""
if isinstance(formatter, WrapperFormatter):
return formatter
if callable(formatter):
return WrapperLambdaFormatter(ctx, field, formatter)
if isinstance(formatter, int):
return WrapperFixedWidthFormatter(ctx, field, formatter)
if isinstance(formatter, float):
return WrapperPercentWidthFormatter(ctx, field, formatter)
if isinstance(formatter, dict):
if "wrapperFormatter" in formatter:
embedded_wrapper_formatter = wrapper_formatter_factory(ctx, None,
formatter["wrapperFormatter"])
elif "hard_width" in formatter:
embedded_wrapper_formatter = WrapperFixedWidthFormatter(ctx, field, formatter["hard_width"])
embedded_wrapper_formatter.min_width = formatter["hard_width"]
else:
embedded_wrapper_formatter = WrapperFormatter(ctx, None) # effectively a NOOP width formatter
if "formatter" not in formatter:
return embedded_wrapper_formatter
custom_formatter = formatter["formatter"]
wrapper = WrapperWithCustomFormatter(ctx, field, custom_formatter, embedded_wrapper_formatter)
return wrapper
raise Exception("Formatter Error! Unrecognized formatter {} for field {}".format(formatter, field))
def build_column_stats_for_best_guess_formatting(objs, fields, field_labels, custom_formatters={}):
if objs is None or len(objs) == 0:
return {"stats": {},
"total_max_width": 0,
"total_avg_width": 0}
stats = {}
for i in range(0, len(fields)):
stats[fields[i]] = ColumnStats(fields[i], field_labels[i], custom_formatters.get(fields[i]))
for obj in objs:
for field in fields:
column_stat = stats[field]
column_stat.add_value(column_stat.get_field_value(obj))
total_max_width = sum([s.max_width for s in stats.values()])
total_avg_width = sum([s.average_width for s in stats.values()])
return {"stats": stats,
"total_max_width": total_max_width,
"total_avg_width": total_avg_width}
def build_best_guess_formatters_using_average_widths(objs, fields, field_labels, custom_formatters={}, no_wrap_fields=[]):
column_info = build_column_stats_for_best_guess_formatting(objs, fields, field_labels, custom_formatters)
format_spec = {}
total_avg_width = float(column_info["total_avg_width"])
if total_avg_width <= 0:
return format_spec
for f in [ff for ff in fields if ff not in no_wrap_fields]:
format_spec[f] = float(column_info["stats"][f].average_width) / total_avg_width # pylint: disable=old-division
custom_formatter = custom_formatters.get(f, None)
if custom_formatter:
format_spec[f] = {"formatter": custom_formatter, "wrapperFormatter": format_spec[f]}
# Handle no wrap fields by building formatters that will not wrap
for f in [ff for ff in fields if ff in no_wrap_fields]:
format_spec[f] = {"hard_width": column_info["stats"][f].max_width}
custom_formatter = custom_formatters.get(f, None)
if custom_formatter:
format_spec[f] = {"formatter": custom_formatter, "wrapperFormatter": format_spec[f]}
return format_spec
def build_best_guess_formatters_using_max_widths(objs, fields, field_labels, custom_formatters={}, no_wrap_fields=[]):
column_info = build_column_stats_for_best_guess_formatting(objs, fields, field_labels, custom_formatters)
format_spec = {}
for f in [ff for ff in fields if ff not in no_wrap_fields]:
format_spec[f] = float(column_info["stats"][f].max_width) / float(column_info["total_max_width"]) # pylint: disable=old-division
custom_formatter = custom_formatters.get(f, None)
if custom_formatter:
format_spec[f] = {"formatter": custom_formatter, "wrapperFormatter": format_spec[f]}
# Handle no wrap fields by building formatters that will not wrap
for f in [ff for ff in fields if ff in no_wrap_fields]:
format_spec[f] = {"hard_width": column_info["stats"][f].max_width}
custom_formatter = custom_formatters.get(f, None)
if custom_formatter:
format_spec[f] = {"formatter": custom_formatter, "wrapperFormatter": format_spec[f]}
return format_spec
def needs_wrapping_formatters(formatters, no_wrap=None):
no_wrap = is_nowrap_set(no_wrap)
if no_wrap:
return False
# handle easy case:
if not formatters:
return True
# If we have at least one wrapping formatter,
# then we assume we don't need to wrap
for f in formatters.values():
if WrapperFormatter.is_wrapper_formatter(f):
return False
# looks like we need wrapping
return True
def as_wrapping_formatters(objs, fields, field_labels, formatters, no_wrap=None, no_wrap_fields=[]):
"""This function is the entry point for building the "best guess"
word wrapping formatters. A best guess formatter guesses what the best
columns widths should be for the table celldata. It does this by collecting
various stats on the celldata (min, max average width of column celldata) and from
this celldata decides the desired widths and the minimum widths.
Given a list of formatters and the list of objects (objs), this function
first determines if we need to augment the passed formatters with word wrapping
formatters. If the no_wrap parameter or global no_wrap flag is set,
then we do not build wrapping formatters. If any of the formatters within formatters
is a word wrapping formatter, then it is assumed no more wrapping is required.
:param objs:
:param fields:
:param field_labels:
:param formatters:
:param no_wrap:
:param no_wrap_fields:
:return: When no wrapping is required, the formatters parameter is returned
-- effectively a NOOP in this case
When wrapping is required, best-guess word wrapping formatters are returned
with original parameter formatters embedded in the word wrapping formatters
"""
no_wrap = is_nowrap_set(no_wrap)
if not needs_wrapping_formatters(formatters, no_wrap):
return formatters
format_spec = build_best_guess_formatters_using_average_widths(objs, fields, field_labels, formatters, no_wrap_fields)
formatters = build_wrapping_formatters(objs, fields, field_labels, format_spec)
return formatters
def build_wrapping_formatters(objs, fields, field_labels, format_spec, add_blank_line=True,
no_wrap=None, use_max=False):
"""
A convenience function for building all wrapper formatters that will be used to
format a CLI's output when its rendered in a prettyTable object.
It iterates through the keys of format_spec and calls wrapperFormatterFactory to build
wrapperFormatter objects for each column.
Its best to show by example parameters:
field_labels = ['UUID', 'Time Stamp', 'State', 'Event Log ID', 'Reason Text',
'Entity Instance ID', 'Severity']
fields = ['uuid', 'timestamp', 'state', 'event_log_id', 'reason_text',
'entity_instance_id', 'severity']
format_spec = {
"uuid" : .10, # float = so display as 10% of terminal width
"timestamp" : .08,
"state" : .08,
"event_log_id" : .07,
"reason_text" : .42,
"entity_instance_id" : .13,
"severity" : {"formatter" : captializeFunction,
"wrapperFormatter": .12}
}
:param objs: the actual celldata that will get word wrapped
:param fields: fields (attributes of the celldata) that will be displayed in the table
:param field_labels: column (field headers)
:param format_spec: dict specify formatter for each column (field)
:param add_blank_line: default True, when tru adds blank line to column if it wraps, aids readability
:param no_wrap: default False, when True turns wrapping off but does not suppress other custom formatters
:param use_max
:return: wrapping formatters as functions
"""
no_wrap = set_no_wrap(no_wrap)
if objs is None or len(objs) == 0:
return {}
biggest_word_pattern = re.compile("[\.:,;\!\?\\ =-\_]")
wrapping_formatters_as_functions = {}
if len(fields) != len(field_labels):
raise Exception("Error in buildWrappingFormatters: "
"len(fields) = {}, len(field_labels) = {},"
" they must be the same length!".format(len(fields),
len(field_labels)))
field_to_label = {}
for i in range(0, len(fields)):
field_to_label[fields[i]] = field_labels[i]
ctx = WrapperContext()
ctx.set_num_columns(len(fields))
if not format_spec:
if use_max:
format_spec = build_best_guess_formatters_using_max_widths(objs, fields, field_labels)
else:
format_spec = build_best_guess_formatters_using_average_widths(objs, fields, field_labels)
for k in list(format_spec.keys()):
if k not in fields:
raise Exception("Error in buildWrappingFormatters: format_spec "
"specifies a field {} that is not specified "
"in fields : {}".format(k, fields))
format_spec_for_k = copy.deepcopy(format_spec[k])
if callable(format_spec_for_k):
format_spec_for_k = {"formatter": format_spec_for_k}
wrapper_formatter = wrapper_formatter_factory(ctx, k, format_spec_for_k)
if wrapper_formatter.min_width <= 0:
# need to specify min-width so that
# column is not unnecessarily squashed
if is_uuid_field(k): # special case
wrapper_formatter.set_min_width(UUID_MIN_LENGTH)
else:
# column width cannot be smaller than the widest word
column_data = [str(wrapper_formatter.get_unwrapped_field_value(data)) for data in objs]
widest_word_in_column = max([get_biggest_word(d) + " "
for d in column_data + [field_to_label[k]]], key=len)
wrapper_formatter.set_min_width(len(widest_word_in_column))
wrapper_formatter.header_width = get_width(field_to_label[k])
wrapper_formatter.add_blank_line = add_blank_line
wrapper_formatter.no_wrap = no_wrap
wrapping_formatters_as_functions[k] = wrapper_formatter.as_function()
ctx.add_column_formatter(k, wrapper_formatter)
return wrapping_formatters_as_functions
def set_no_wrap_on_formatters(no_wrap, formatters):
"""
Purpose of this function is to temporarily force
the no_wrap setting for the formatters parameter.
returns orig_no_wrap_settings defined for each formatter
Use unset_no_wrap_on_formatters(orig_no_wrap_settings) to undo what
this function does
"""
# handle easy case:
if not formatters:
return {}
formatter_no_wrap_settings = {}
global_orig_no_wrap = is_nowrap_set()
set_no_wrap(no_wrap)
for k, f in formatters.items():
if WrapperFormatter.is_wrapper_formatter(f):
formatter_no_wrap_settings[k] = (f.wrapper_formatter.no_wrap, f.wrapper_formatter)
f.wrapper_formatter.no_wrap = no_wrap
return {"global_orig_no_wrap": global_orig_no_wrap,
"formatter_no_wrap_settings": formatter_no_wrap_settings}
def unset_no_wrap_on_formatters(orig_no_wrap_settings):
"""
It only makes sense to call this function with the return value
from the last call to set_no_wrap_on_formatters(no_wrap, formatters).
It effectively undoes what set_no_wrap_on_formatters() does
"""
if not orig_no_wrap_settings:
return {}
global_orig_no_wrap = orig_no_wrap_settings["global_orig_no_wrap"]
formatter_no_wrap_settings = orig_no_wrap_settings["formatter_no_wrap_settings"]
formatters = {}
for k, v in formatter_no_wrap_settings.items():
formatters[k] = v[1]
formatters[k].no_wrap = v[0]
set_no_wrap(global_orig_no_wrap)
return formatters
def _simpleTestHarness(no_wrap):
from fmclient.common import utils
set_no_wrap(no_wrap)
field_labels = ['Time Stamp', 'State', 'Event Log ID', 'Reason Text',
'Entity Instance ID', 'Severity', 'Number']
fields = ['timestamp', 'state', 'event_log_id', 'reason_text',
'entity_instance_id', 'severity', 'number']
formatterSpecX = {"timestamp": 10,
"state": 8,
"event_log_id": 70,
"reason_text": 30,
"entity_instance_id": 30,
"severity": 12,
"number": 4}
formatterSpec = {}
for f in fields:
formatterSpec[f] = buildFormatter(f, formatterSpecX[f])
logs = []
for i in range(0, 30):
log = {}
for f in fields:
if f == 'number':
log[f] = i
else:
log[f] = "{}{}".format(f, i)
logs.append(utils.objectify(log))
formatterSpec = formatterSpecX
formatters = build_wrapping_formatters(logs, fields, field_labels, formatterSpec)
utils.print_list(logs, fields, field_labels, formatters=formatters, sortby=6,
reversesort=True, no_wrap_fields=['entity_instance_id'])
print("nowrap = {}".format(is_nowrap_set()))
if __name__ == "__main__":
_simpleTestHarness(True)
_simpleTestHarness(False)
| 38.721879 | 137 | 0.632925 |
7d77a229da1b2cdc8c56a9c402927cc2d1140814 | 2,139 | py | Python | simple.py | vaiorabbit/python-glfw | b5984650e976f4702c3dc06db7115aebc13698ca | [
"Zlib"
] | null | null | null | simple.py | vaiorabbit/python-glfw | b5984650e976f4702c3dc06db7115aebc13698ca | [
"Zlib"
] | null | null | null | simple.py | vaiorabbit/python-glfw | b5984650e976f4702c3dc06db7115aebc13698ca | [
"Zlib"
] | 1 | 2020-03-04T08:59:15.000Z | 2020-03-04T08:59:15.000Z | # Ref.: https://github.com/vaiorabbit/ruby-opengl/blob/master/sample/simple.rb
from ctypes import *
from OpenGL.GL import *
import GLFW
from GLFW import *
key_callback = GLFWkeyfun(key_callback_fn)
if __name__ == '__main__':
main()
| 28.905405 | 78 | 0.632071 |
7d77a393017f4de426158a54d01130a88642e6af | 34,661 | py | Python | market_sim/_agents/risk_model.py | quanttrade/rl_trading | f4168c69f44fe5a11a06461387d4591426a43735 | [
"Apache-2.0"
] | 247 | 2017-09-14T03:26:39.000Z | 2022-03-30T10:23:02.000Z | market_sim/_agents/risk_model.py | Deeptradingfx/rl_trading | f4168c69f44fe5a11a06461387d4591426a43735 | [
"Apache-2.0"
] | null | null | null | market_sim/_agents/risk_model.py | Deeptradingfx/rl_trading | f4168c69f44fe5a11a06461387d4591426a43735 | [
"Apache-2.0"
] | 111 | 2017-10-18T07:47:07.000Z | 2022-03-30T10:18:49.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Implement different methods to hedge positions and measure the risk of a Zero
cupon bond portfolio
REFERENCE: Nawalkha, S. K; Soto, G. M.; Beliaeva, N. A., "Interest Rate Risk
Modeling, the fixed Income Valuation course". Wiley, 2005
@author: ucaiado
Created on 12/22/2016
"""
import numpy as np
import math
import pandas as pd
import pprint
'''
Begin help functions
'''
'''
End help functions
'''
def update_maxmin(f_frice, a):
'''
Update maximum and minimum price observed by the agent while positioned
:param f_frice: float.
:param a: agent object.
'''
if f_frice > a.current_max_price:
a.current_max_price = f_frice
if f_frice < a.current_min_price:
a.current_min_price = f_frice
| 40.72973 | 79 | 0.578979 |
7d78430382af94d8d75d17a72371f34356ac1d39 | 193 | py | Python | hris/apps/jobs/admin.py | Minedomain/hris_backend | 90aab497c076c2d4ce4e05a441db0ee7a175df57 | [
"MIT"
] | null | null | null | hris/apps/jobs/admin.py | Minedomain/hris_backend | 90aab497c076c2d4ce4e05a441db0ee7a175df57 | [
"MIT"
] | null | null | null | hris/apps/jobs/admin.py | Minedomain/hris_backend | 90aab497c076c2d4ce4e05a441db0ee7a175df57 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import *
| 24.125 | 80 | 0.73057 |
7d78bb6905459ba9f8b320facebb6b0cf69eca83 | 3,401 | py | Python | src/arche/readers/schema.py | WinterComes/arche | 6be3d7a4ec66f33f7af544aa7af4ea95c35bef2e | [
"MIT"
] | 52 | 2019-03-18T21:12:59.000Z | 2022-01-24T05:49:23.000Z | src/arche/readers/schema.py | WinterComes/arche | 6be3d7a4ec66f33f7af544aa7af4ea95c35bef2e | [
"MIT"
] | 173 | 2019-03-18T15:50:14.000Z | 2019-12-09T18:03:07.000Z | src/arche/readers/schema.py | WinterComes/arche | 6be3d7a4ec66f33f7af544aa7af4ea95c35bef2e | [
"MIT"
] | 21 | 2019-03-20T17:14:22.000Z | 2022-01-30T18:33:22.000Z | from collections import defaultdict
from enum import Enum
import json
import pprint
from typing import Dict, List, Union, Any, Set, DefaultDict
from arche.tools import s3
import perfect_jsonschema
EXTENDED_KEYWORDS = {"tag", "unique", "coverage_percentage"}
SchemaObject = Dict[str, Union[str, bool, int, float, None, List]]
RawSchema = Dict[str, SchemaObject]
SchemaSource = Union[str, RawSchema]
TaggedFields = Dict[str, List[str]]
| 31.490741 | 95 | 0.605116 |
7d7a5b43416629a61d913d56e3d15ecd4f2e0f5f | 5,620 | py | Python | tensorflow_probability/python/mcmc/eight_schools_hmc.py | hephaex/probability | 740d0db0bf2b1e1a04cfd0b55481c44380b3cb05 | [
"Apache-2.0"
] | 4 | 2019-03-07T05:15:13.000Z | 2019-06-13T20:35:45.000Z | tensorflow_probability/python/mcmc/eight_schools_hmc.py | hephaex/probability | 740d0db0bf2b1e1a04cfd0b55481c44380b3cb05 | [
"Apache-2.0"
] | 2 | 2019-08-01T18:31:41.000Z | 2019-08-01T19:42:15.000Z | tensorflow_probability/python/mcmc/eight_schools_hmc.py | hephaex/probability | 740d0db0bf2b1e1a04cfd0b55481c44380b3cb05 | [
"Apache-2.0"
] | 1 | 2019-09-18T15:17:53.000Z | 2019-09-18T15:17:53.000Z | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Shared library for `eight_schools_hmc_{graph,eager}_test.py`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
# Dependency imports
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
__all__ = [
'EightSchoolsHmcBenchmarkTestHarness',
'benchmark_eight_schools_hmc',
'eight_schools_joint_log_prob',
]
def mvn(*args, **kwargs):
"""Convenience function to efficiently construct a MultivariateNormalDiag."""
# Faster than using `tfd.MultivariateNormalDiag`.
return tfd.Independent(tfd.Normal(*args, **kwargs),
reinterpreted_batch_ndims=1)
def eight_schools_joint_log_prob(
treatment_effects, treatment_stddevs,
avg_effect, avg_stddev, school_effects_standard):
"""Eight-schools joint log-prob."""
rv_avg_effect = tfd.Normal(loc=0., scale=10.)
rv_avg_stddev = tfd.Normal(loc=5., scale=1.)
rv_school_effects_standard = mvn(
loc=tf.zeros_like(school_effects_standard),
scale=tf.ones_like(school_effects_standard))
rv_treatment_effects = mvn(
loc=(avg_effect + tf.exp(avg_stddev) * school_effects_standard),
scale=treatment_stddevs)
return (
rv_avg_effect.log_prob(avg_effect) +
rv_avg_stddev.log_prob(avg_stddev) +
rv_school_effects_standard.log_prob(school_effects_standard) +
rv_treatment_effects.log_prob(treatment_effects))
def benchmark_eight_schools_hmc(
num_results=int(5e3),
num_burnin_steps=int(3e3),
num_leapfrog_steps=3,
step_size=0.4):
"""Runs HMC on the eight-schools unnormalized posterior."""
num_schools = 8
treatment_effects = tf.constant(
[28, 8, -3, 7, -1, 1, 18, 12],
dtype=np.float32,
name='treatment_effects')
treatment_stddevs = tf.constant(
[15, 10, 16, 11, 9, 11, 10, 18],
dtype=np.float32,
name='treatment_stddevs')
def unnormalized_posterior_log_prob(
avg_effect, avg_stddev, school_effects_standard):
"""Eight-schools unnormalized log posterior."""
return eight_schools_joint_log_prob(
treatment_effects, treatment_stddevs,
avg_effect, avg_stddev, school_effects_standard)
if tf.executing_eagerly():
sample_chain = tf.function(tfp.mcmc.sample_chain)
else:
sample_chain = tfp.mcmc.sample_chain
def computation():
"""The benchmark computation."""
_, kernel_results = sample_chain(
num_results=num_results,
num_burnin_steps=num_burnin_steps,
current_state=(
tf.zeros([], name='init_avg_effect'),
tf.zeros([], name='init_avg_stddev'),
tf.ones([num_schools], name='init_school_effects_standard'),
),
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=unnormalized_posterior_log_prob,
step_size=step_size,
num_leapfrog_steps=num_leapfrog_steps))
return kernel_results.is_accepted
# Let's force evaluation of graph to ensure build time is not part of our time
# trial.
is_accepted_tensor = computation()
if not tf.executing_eagerly():
session = tf.compat.v1.Session()
session.run(is_accepted_tensor)
start_time = time.time()
if tf.executing_eagerly():
is_accepted = computation()
else:
is_accepted = session.run(is_accepted_tensor)
wall_time = time.time() - start_time
num_accepted = np.sum(is_accepted)
acceptance_rate = np.float32(num_accepted) / np.float32(num_results)
return dict(
iters=(num_results + num_burnin_steps) * num_leapfrog_steps,
extras={'acceptance_rate': acceptance_rate},
wall_time=wall_time)
| 34.691358 | 80 | 0.724377 |
7d7a5e990271c6f1b8c5e7eefd58b31203c16bfb | 16,456 | py | Python | src/pyspex/dem_io.py | rmvanhees/pyspex | 1e1370e57d131dba6880bdf7a56808e5ce638ca5 | [
"BSD-3-Clause"
] | null | null | null | src/pyspex/dem_io.py | rmvanhees/pyspex | 1e1370e57d131dba6880bdf7a56808e5ce638ca5 | [
"BSD-3-Clause"
] | 1 | 2022-02-06T14:21:48.000Z | 2022-03-22T15:19:40.000Z | src/pyspex/dem_io.py | rmvanhees/pyspex | 1e1370e57d131dba6880bdf7a56808e5ce638ca5 | [
"BSD-3-Clause"
] | null | null | null | """
This file is part of pyspex
https://github.com/rmvanhees/pyspex.git
Python implementation to read SPEXone DEM output
Copyright (c) 2019-2021 SRON - Netherlands Institute for Space Research
All Rights Reserved
License: BSD-3-Clause
"""
from pathlib import Path
import numpy as np
from .lib.tmtc_def import tmtc_def
# - global parameters ------------------------------
# - local functions --------------------------------
def det_dtype():
"""
Returns numpy dtype with the registers of the SPEXone CMV4000 detector
"""
return np.dtype([
('UNUSED_000', 'u1'),
('NUMBER_LINES', 'u1', (2)),
('START1', 'u1', (2)),
('START2', 'u1', (2)),
('START3', 'u1', (2)),
('START4', 'u1', (2)),
('START5', 'u1', (2)),
('START6', 'u1', (2)),
('START7', 'u1', (2)),
('START8', 'u1', (2)),
('NUMBER_LINES1', 'u1', (2)),
('NUMBER_LINES2', 'u1', (2)),
('NUMBER_LINES3', 'u1', (2)),
('NUMBER_LINES4', 'u1', (2)),
('NUMBER_LINES5', 'u1', (2)),
('NUMBER_LINES6', 'u1', (2)),
('NUMBER_LINES7', 'u1', (2)),
('NUMBER_LINES8', 'u1', (2)),
('SUB_S', 'u1', (2)),
('SUB_A', 'u1', (2)),
('MONO', 'u1'), # 1 bits
('IMAGE_FLIPPING', 'u1'), # 2 bits
('INTE_SYNC', 'u1'), # 3 bits: Int_sync, Exp_dual, Exp_ext
('EXP_TIME', 'u1', (3)),
('EXP_STEP', 'u1', (3)),
('EXP_KP1', 'u1', (3)),
('EXP_KP2', 'u1', (3)),
('NR_SLOPES', 'u1'), # 2 bits
('EXP_SEQ', 'u1'),
('EXP_TIME2', 'u1', (3)),
('EXP_STEP2', 'u1', (3)),
('UNUSED_062', 'u1'),
('UNUSED_063', 'u1'),
('UNUSED_064', 'u1'),
('UNUSED_065', 'u1'),
('UNUSED_066', 'u1'),
('UNUSED_067', 'u1'),
('UNUSED_068', 'u1'),
('EXP2_SEQ', 'u1'),
('NUMBER_FRAMES', 'u1', (2)),
('OUTPUT_MODE', 'u1'), # 2 bits
('FOT_LENGTH', 'u1'),
('I_LVDS_REC', 'u1'), # 4 bits
('UNUSED_075', 'u1'),
('UNUSED_076', 'u1'),
('COL_CALIB', 'u1'), # 2 bits: Col_calib, ADC_calib
('TRAINING_PATTERN', 'u1', (2)), # 12 bits
('CHANNEL_EN', 'u1', (3)), # 19 bits
('I_LVDS', 'u1'), # 4 bits
('I_COL', 'u1'), # 4 bits
('I_COL_PRECH', 'u1'), # 4 bits
('I_ADC', 'u1'), # 4 bits
('I_AMP', 'u1'), # 4 bits
('VTF_L1', 'u1'), # 7 bits
('VLOW2', 'u1'), # 7 bits
('VLOW3', 'u1'), # 7 bits
('VRES_LOW', 'u1'), # 7 bits
('UNUSED_092', 'u1'),
('UNUSED_093', 'u1'),
('V_PRECH', 'u1'), # 7 bits
('V_REF', 'u1'), # 7 bits
('UNUSED_096', 'u1'),
('UNUSED_097', 'u1'),
('VRAMP1', 'u1'), # 7 bits
('VRAMP2', 'u1'), # 7 bits
('OFFSET', 'u1', (2)), # 14 bits
('PGA_GAIN', 'u1'), # 2 bits
('ADC_GAIN', 'u1'),
('UNUSED_104', 'u1'),
('UNUSED_105', 'u1'),
('UNUSED_106', 'u1'),
('UNUSED_107', 'u1'),
('T_DIG1', 'u1'), # 4 bits
('T_DIG2', 'u1'), # 4 bits
('UNUSED_110', 'u1'),
('BIT_MODE', 'u1'), # 1 bits
('ADC_RESOLUTION', 'u1'), # 2 bits
('PLL_ENABLE', 'u1'), # 1 bits
('PLL_IN_FRE', 'u1'), # 2 bits
('PLL_BYPASS', 'u1'), # 1 bits
('PLL_RANGE', 'u1'), # 8 bits: PLL range(1), out_fre(3), div(4)
('PLL_LOAD', 'u1'),
('DUMMY', 'u1'),
('UNUSED_119', 'u1'),
('UNUSED_120', 'u1'),
('BLACK_COL_EN', 'u1'), # 2 bits: Black_col_en, PGA_gain
('UNUSED_122', 'u1'),
('V_BLACKSUN', 'u1'), # 6 bits
('UNUSED_124', 'u1'),
('UNUSED_125', 'u1'),
('TEMP', 'u1', (2))
])
# - class DEMio -------------------------
def exp_time(self, t_mcp=1e-7):
"""
Returns pixel exposure time [s].
"""
# Nominal fot_length = 20, except for very short exposure_time
reg_fot = self.hdr['FOT_LENGTH']
reg_exptime = ((self.hdr['EXP_TIME'][2] << 16)
+ (self.hdr['EXP_TIME'][1] << 8)
+ self.hdr['EXP_TIME'][0])
return 129 * t_mcp * (0.43 * reg_fot + reg_exptime)
def fot_time(self, t_mcp=1e-7):
"""
Returns frame overhead time [s]
"""
# Nominal fot_length = 20, except for very short exposure_time
reg_fot = self.hdr['FOT_LENGTH']
return 129 * t_mcp * (reg_fot + 2 * (16 // self.number_channels))
def rot_time(self, t_mcp=1e-7):
"""
Returns image read-out time [s]
"""
return 129 * t_mcp * (16 // self.number_channels) * self.number_lines
def frame_period(self, n_coad=1):
"""
Returns frame period [s]
"""
return 2.38 + (n_coad
* (self.exp_time() + self.fot_time() + self.rot_time()))
def get_sci_hk(self):
"""
Returns Science telemetry, a subset of MPS and housekeeping parameters
Returns
-------
numpy array
"""
def convert_val(key):
"""
Convert byte array to integer
"""
val = 0
for ii, bval in enumerate(self.__hdr[0][key]):
val += bval << (ii * 8)
return val
# convert original detector parameter values to telemetry parameters
convert_det_params = {
'DET_NUMLINES': convert_val('NUMBER_LINES'),
'DET_START1': convert_val('START1'),
'DET_START2': convert_val('START2'),
'DET_START3': convert_val('START3'),
'DET_START4': convert_val('START4'),
'DET_START5': convert_val('START5'),
'DET_START6': convert_val('START6'),
'DET_START7': convert_val('START7'),
'DET_START8': convert_val('START8'),
'DET_NUMLINES1': convert_val('NUMBER_LINES1'),
'DET_NUMLINES2': convert_val('NUMBER_LINES2'),
'DET_NUMLINES3': convert_val('NUMBER_LINES3'),
'DET_NUMLINES4': convert_val('NUMBER_LINES4'),
'DET_NUMLINES5': convert_val('NUMBER_LINES5'),
'DET_NUMLINES6': convert_val('NUMBER_LINES6'),
'DET_NUMLINES7': convert_val('NUMBER_LINES7'),
'DET_NUMLINES8': convert_val('NUMBER_LINES8'),
'DET_SUBS': convert_val('SUB_S'),
'DET_SUBA': convert_val('SUB_A'),
'DET_MONO': self.__hdr[0]['MONO'],
'DET_IMFLIP': self.__hdr[0]['IMAGE_FLIPPING'],
'DET_EXPCNTR': self.__hdr[0]['INTE_SYNC'],
'DET_EXPTIME': convert_val('EXP_TIME'),
'DET_EXPSTEP': convert_val('EXP_STEP'),
'DET_KP1': convert_val('EXP_KP1'),
'DET_KP2': convert_val('EXP_KP2'),
'DET_NOFSLOPES': self.__hdr[0]['NR_SLOPES'],
'DET_EXPSEQ': self.__hdr[0]['EXP_SEQ'],
'DET_EXPTIME2': convert_val('EXP_TIME2'),
'DET_EXPSTEP2': convert_val('EXP_STEP2'),
'DET_EXP2_SEQ': self.__hdr[0]['EXP2_SEQ'],
'DET_NOFFRAMES': convert_val('NUMBER_FRAMES'),
'DET_OUTMODE': self.__hdr[0]['OUTPUT_MODE'],
'DET_FOTLEN': self.__hdr[0]['FOT_LENGTH'],
'DET_ILVDSRCVR': self.__hdr[0]['I_LVDS_REC'],
'DET_CALIB': self.__hdr[0]['COL_CALIB'],
'DET_TRAINPTRN': convert_val('TRAINING_PATTERN'),
'DET_CHENA': convert_val('CHANNEL_EN'),
'DET_ILVDS': self.__hdr[0]['I_LVDS'],
'DET_ICOL': self.__hdr[0]['I_COL'],
'DET_ICOLPR': self.__hdr[0]['I_COL_PRECH'],
'DET_IADC': self.__hdr[0]['I_ADC'],
'DET_IAMP': self.__hdr[0]['I_AMP'],
'DET_VTFL1': self.__hdr[0]['VTF_L1'],
'DET_VTFL2': self.__hdr[0]['VLOW2'],
'DET_VTFL3': self.__hdr[0]['VLOW3'],
'DET_VRSTL': self.__hdr[0]['VRES_LOW'],
'DET_VPRECH': self.__hdr[0]['V_PRECH'],
'DET_VREF': self.__hdr[0]['V_REF'],
'DET_VRAMP1': self.__hdr[0]['VRAMP1'],
'DET_VRAMP2': self.__hdr[0]['VRAMP2'],
'DET_OFFSET': convert_val('OFFSET'),
'DET_PGAGAIN': self.__hdr[0]['PGA_GAIN'],
'DET_ADCGAIN': self.__hdr[0]['ADC_GAIN'],
'DET_TDIG1': self.__hdr[0]['T_DIG1'],
'DET_TDIG2': self.__hdr[0]['T_DIG2'],
'DET_BITMODE': self.__hdr[0]['BIT_MODE'],
'DET_ADCRES': self.__hdr[0]['ADC_RESOLUTION'],
'DET_PLLENA': self.__hdr[0]['PLL_ENABLE'],
'DET_PLLINFRE': self.__hdr[0]['PLL_IN_FRE'],
'DET_PLLBYP': self.__hdr[0]['PLL_BYPASS'],
'DET_PLLRATE': self.__hdr[0]['PLL_RANGE'],
'DET_PLLLOAD': self.__hdr[0]['PLL_LOAD'],
'DET_DETDUM': self.__hdr[0]['DUMMY'],
'DET_BLACKCOL': self.__hdr[0]['BLACK_COL_EN'],
'DET_VBLACKSUN': self.__hdr[0]['V_BLACKSUN'],
'DET_T': convert_val('TEMP')
}
sci_hk = np.zeros((1,), dtype=np.dtype(tmtc_def(0x350)))
sci_hk[0]['REG_FULL_FRAME'] = 1
sci_hk[0]['REG_CMV_OUTPUTMODE'] = 3
for key, value in convert_det_params.items():
sci_hk[0][key] = value
return sci_hk
def get_data(self, numlines=None):
"""
Returns data of a detector frame (numpy uint16 array)
Parameters
----------
numlines : int, optional
Provide number of detector rows when no headerfile is present
"""
if numlines is None:
# obtain number of rows
numlines = self.number_lines
# Read binary big-endian data
return np.fromfile(self.bin_file, dtype='>u2').reshape(numlines, -1)
| 34.426778 | 79 | 0.49131 |
7d7bdf74580e44ae7e0eab89dc294d34670eb290 | 7,827 | py | Python | tests/util/test_parsing_helpers.py | lkattis-signal/SignalSDK | f085b9cae0495f4e016b9982df271efc6fd0a8f5 | [
"Apache-2.0"
] | 10 | 2020-09-29T06:36:45.000Z | 2022-03-14T18:15:50.000Z | tests/util/test_parsing_helpers.py | lkattis-signal/SignalSDK | f085b9cae0495f4e016b9982df271efc6fd0a8f5 | [
"Apache-2.0"
] | 53 | 2020-10-08T10:05:00.000Z | 2022-03-29T14:21:18.000Z | tests/util/test_parsing_helpers.py | lkattis-signal/SignalSDK | f085b9cae0495f4e016b9982df271efc6fd0a8f5 | [
"Apache-2.0"
] | 5 | 2020-09-25T07:48:04.000Z | 2021-11-23T07:08:56.000Z | from dataclasses import dataclass, field
from datetime import datetime, timezone
from typing import Union, Type, List, Optional, Any, Tuple
import pytest
from signal_ocean.util import parsing_helpers
def test_parse_model():
data = {'ModelID': 1, 'ModelName': 'model1', 'ModelScore': .97,
'TouchedBy': 'signal',
'CreatedDate': '2010-01-01T01:00:00'}
parsed = parsing_helpers.parse_model(data, TestModel)
assert isinstance(parsed, TestModel)
assert parsed == TestModel(model_id=1, model_name='model1',
model_score=.97,
touched_by='signal',
created_date=datetime(2010, 1, 1, 1, 0, 0,
tzinfo=timezone.utc))
def test_parse_nested_model():
data = {'ModelID': 1, 'nested_model': {'ModelID': 3}}
parsed = parsing_helpers.parse_model(data, TestModel)
assert isinstance(parsed, TestModel)
assert parsed == TestModel(model_id=1, nested_model=TestNestedModel(3))
def test_parse_model_rename_key():
data = {'ModelID': 1, 'NAME': 'model1'}
rename_keys = {'NAME': 'model_name'}
parsed = parsing_helpers.parse_model(data, TestModel, rename_keys)
assert isinstance(parsed, TestModel)
assert parsed == TestModel(model_id=1, model_name='model1')
def test_parse_model_extra_attributes_are_ignored():
data = {'ModelID': 1, 'ModelName': 'model1', 'ModelScore': .97,
'TouchedBy': 'signal', 'CreatedDate': '2010-01-01'}
parsed = parsing_helpers.parse_model(data, TestModel)
assert isinstance(parsed, TestModel)
assert parsed == TestModel(model_id=1, model_name='model1')
def test_parse_model_default():
data = {'ModelID': 1}
parsed = parsing_helpers.parse_model(data, TestModel)
assert isinstance(parsed, TestModel)
assert parsed == TestModel(model_id=1, model_name='a')
def test_parse_model_default_factory():
data = {'ModelID': 1}
parsed = parsing_helpers.parse_model(data, TestModel)
assert isinstance(parsed, TestModel)
assert parsed == TestModel(model_id=1, model_lists=[])
def test_parse_model_missing_attribute_raises_type_error():
data = {'ModelID': 1}
with pytest.raises(TypeError):
parsing_helpers.parse_model(data, TestModel)
def test_parse_model_rename_key_extra_attribute_ignored():
data = {'ModelID': 1}
rename_keys = {'NAME': 'model_name'}
parsed = parsing_helpers.parse_model(data, TestModel, rename_keys)
assert isinstance(parsed, TestModel)
assert parsed == TestModel(model_id=1)
| 36.919811 | 84 | 0.54542 |
7d7ca170be35a492481ffa204124b3d8dffb5cdc | 2,931 | py | Python | density-based/train.py | ramonpeter/UnbinnedMeasurements | 31c0a8125d48216718c22721cba63544d6b8897a | [
"MIT"
] | null | null | null | density-based/train.py | ramonpeter/UnbinnedMeasurements | 31c0a8125d48216718c22721cba63544d6b8897a | [
"MIT"
] | null | null | null | density-based/train.py | ramonpeter/UnbinnedMeasurements | 31c0a8125d48216718c22721cba63544d6b8897a | [
"MIT"
] | null | null | null | import tensorflow as tf
import pandas as pd
import numpy as np
import sys
import time
from cflow import ConditionalFlow
from MoINN.modules.subnetworks import DenseSubNet
from utils import train_density_estimation, plot_loss, plot_tau_ratio
# import data
tau1_gen = np.reshape(np.load("../data/tau1s_Pythia_gen.npy"), (-1,1))
tau2_gen = np.reshape(np.load("../data/tau2s_Pythia_gen.npy"), (-1,1))
tau1_sim = np.reshape(np.load("../data/tau1s_Pythia_sim.npy"), (-1,1))
tau2_sim = np.reshape(np.load("../data/tau2s_Pythia_sim.npy"), (-1,1))
data_gen = tf.convert_to_tensor(np.concatenate([tau1_gen,tau2_gen], axis=-1), dtype=tf.float32)
data_sim = tf.convert_to_tensor(np.concatenate([tau1_sim,tau2_sim], axis=-1), dtype=tf.float32)
train_gen, test_gen = np.split(data_gen, 2)
train_sim, test_sim = np.split(data_sim, 2)
# Get the flow
meta = {
"units": 16,
"layers": 4,
"initializer": "glorot_uniform",
"activation": "leakyrelu",
}
cflow = ConditionalFlow(dims_in=[2], dims_c=[[2]], n_blocks=12, subnet_meta=meta, subnet_constructor=DenseSubNet)
# train the network
EPOCHS = 50
BATCH_SIZE = 1000
LR = 5e-3
DECAY_RATE=0.1
ITERS = len(train_gen)//BATCH_SIZE
DECAY_STEP=ITERS
#Prepare the tf.dataset
train_dataset = tf.data.Dataset.from_tensor_slices((train_gen, train_sim))
train_dataset = train_dataset.shuffle(buffer_size=500000).batch(BATCH_SIZE).prefetch(tf.data.AUTOTUNE)
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(LR, DECAY_STEP, DECAY_RATE)
opt = tf.keras.optimizers.Adam(lr_schedule)
train_losses = []
#train_all = np.concatenate([train_gen, train_sim], axis=-1)
start_time = time.time()
for e in range(EPOCHS):
batch_train_losses = []
# Iterate over the batches of the dataset.
for step, (batch_gen, batch_sim) in enumerate(train_dataset):
batch_loss = train_density_estimation(cflow, opt, batch_gen, [batch_sim])
batch_train_losses.append(batch_loss)
train_loss = tf.reduce_mean(batch_train_losses)
train_losses.append(train_loss)
if (e + 1) % 1 == 0:
# Print metrics
print(
"Epoch #{}: Loss: {}, Learning_Rate: {}".format(
e + 1, train_losses[-1], opt._decayed_lr(tf.float32)
)
)
end_time = time.time()
print("--- Run time: %s hour ---" % ((end_time - start_time)/60/60))
print("--- Run time: %s mins ---" % ((end_time - start_time)/60))
print("--- Run time: %s secs ---" % ((end_time - start_time)))
# Make plots and sample
plot_loss(train_losses, name="Log-likelihood", log_axis=False)
detector = tf.constant(test_sim, dtype=tf.float32)
unfold_gen = cflow.sample(int(5e5),[detector])
plot_tau_ratio(test_gen, unfold_gen, detector, name="tau_ratio")
unfold_gen = {}
for i in range(10):
unfold_gen[i] = cflow.sample(int(5e5),[detector])
unfold_pythia = np.stack([unfold_gen[i] for i in range(10)])
np.save("inn_pythia",unfold_pythia) | 32.566667 | 113 | 0.702491 |
7d7cdf2a362ccd086f161b36591ea27b0857e365 | 2,408 | py | Python | assignment5/code/src/decoder.py | jschmidtnj/cs584 | d1d4d485d1fac8743cdbbc2996792db249dcf389 | [
"MIT"
] | null | null | null | assignment5/code/src/decoder.py | jschmidtnj/cs584 | d1d4d485d1fac8743cdbbc2996792db249dcf389 | [
"MIT"
] | null | null | null | assignment5/code/src/decoder.py | jschmidtnj/cs584 | d1d4d485d1fac8743cdbbc2996792db249dcf389 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
decoder file
decoder class
"""
import tensorflow as tf
| 31.684211 | 102 | 0.581395 |
7d7cfad6e60102e07f57c14396b2297a35ac5b1c | 2,203 | py | Python | camos/model/inputdata.py | danilexn/camos | 88d2457d3d71bb9f60a9b376a4b2dbeb611fd90d | [
"MIT"
] | 1 | 2022-01-18T09:43:24.000Z | 2022-01-18T09:43:24.000Z | camos/model/inputdata.py | danilexn/camos | 88d2457d3d71bb9f60a9b376a4b2dbeb611fd90d | [
"MIT"
] | null | null | null | camos/model/inputdata.py | danilexn/camos | 88d2457d3d71bb9f60a9b376a4b2dbeb611fd90d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Created on Sat Jun 05 2021
# Last modified on Mon Jun 07 2021
# Copyright (c) CaMOS Development Team. All Rights Reserved.
# Distributed under a MIT License. See LICENSE for more info.
import numpy as np
import camos.model.image as img
from camos.utils.apptools import getGui
| 34.968254 | 177 | 0.625057 |
7d7f83cb6c3e80ad4e030d0441da9a9587d821b7 | 10,462 | py | Python | src/compas_fab/backends/ros/messages/services.py | Kathrin3010/compas_fab | 18230b70479ab57635b24832762c340e41102c10 | [
"MIT"
] | null | null | null | src/compas_fab/backends/ros/messages/services.py | Kathrin3010/compas_fab | 18230b70479ab57635b24832762c340e41102c10 | [
"MIT"
] | null | null | null | src/compas_fab/backends/ros/messages/services.py | Kathrin3010/compas_fab | 18230b70479ab57635b24832762c340e41102c10 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from .geometry_msgs import PoseStamped
from .moveit_msgs import Constraints
from .moveit_msgs import MoveItErrorCodes
from .moveit_msgs import PlannerParams
from .moveit_msgs import PlanningScene
from .moveit_msgs import PlanningSceneComponents
from .moveit_msgs import PositionIKRequest
from .moveit_msgs import RobotState
from .moveit_msgs import RobotTrajectory
from .moveit_msgs import TrajectoryConstraints
from .moveit_msgs import WorkspaceParameters
from .std_msgs import Header
from .std_msgs import ROSmsg
| 45.290043 | 124 | 0.677882 |
7d803a9aa0c5e2c7510ceac09d326b16dcb098e1 | 9,946 | py | Python | PP4E/Examples/PP4E/Ai/ExpertSystem/holmes/holmes2/forward.py | BeacherHou/Python-_Markdown- | 015d79a02d32f49395b80ca10919b3a09b72c4df | [
"MIT"
] | null | null | null | PP4E/Examples/PP4E/Ai/ExpertSystem/holmes/holmes2/forward.py | BeacherHou/Python-_Markdown- | 015d79a02d32f49395b80ca10919b3a09b72c4df | [
"MIT"
] | null | null | null | PP4E/Examples/PP4E/Ai/ExpertSystem/holmes/holmes2/forward.py | BeacherHou/Python-_Markdown- | 015d79a02d32f49395b80ca10919b3a09b72c4df | [
"MIT"
] | null | null | null | #
# module forward.py
#
# forward chaining inference engine
# see holmes/forward.py and holmes.doc for more info;
#
# optimization: uses known fact and rule 'if' indexes to avoid:
# a) exhaustive fact list search when matching an 'if'
# b) exhaustive fact list scan when seeing if fact redundant
# c) exhaustive fact list scan when seeing if should ask user
# d) reselecting and refiring rule/binding on each iteration
#
# only tries rules suggested (triggered) by facts added
# during the last iteration (restarts from top again);
#
# could be made slightly faster by using '(x,y)' tree rep
# for lists (proof list, etc.), but the gain would be minor
# compared to the index tree improvement;
#
# known fact list is now an index tree (members() generates
# the old list, but it is no longer in deduction-order);
###########################################################################
from match import *
from index import Index
from kbase import external, internal
from time import time
stop_chaining = 'stop_chaining'
#######################################################
# create fact index and init iteration counts;
# store_unique would remove redundant initial facts;
#######################################################
#################################################
# add 'then' parts of matched rules/bindings
# store_unique() might speed finding duplicates;
#################################################
#############################################
# pick rules with matched 'if' parts;
# returns list with no redundant rules;
#############################################
trigger_id = 1
#####################################################
# generate bindings for rule's 'if' conjunction,
# for all rules triggered by latest deductions;
# note: 'not' goals must match explicitly asserted
# 'not' facts: we just match the whole 'not';
#####################################################
########################################################
# assorted stuff; dictionary copies should be built-in,
# since dictionary assignment 'shares' the same object;
########################################################
##########################################################
# the 'why' explanation in forward chaining just lists
# the rule containing the asked goal;
##########################################################
######################################################
# 'how' explanations require us to construct proof
# trees for each fact added to the known facts list;
######################################################
| 28.096045 | 81 | 0.478082 |
7d8144c38e98997db49f5fa507e926dc5ff5e76c | 979 | py | Python | bert/tasks/read_file.py | rschoon/bert | 5aeb394dd7c1fcf5995d2f7cd6a25ef3ac81ce13 | [
"MIT"
] | null | null | null | bert/tasks/read_file.py | rschoon/bert | 5aeb394dd7c1fcf5995d2f7cd6a25ef3ac81ce13 | [
"MIT"
] | null | null | null | bert/tasks/read_file.py | rschoon/bert | 5aeb394dd7c1fcf5995d2f7cd6a25ef3ac81ce13 | [
"MIT"
] | null | null | null |
import tarfile
import tempfile
from . import Task, TaskVar
| 27.971429 | 81 | 0.544433 |
7d8289a62a068949c34be79180a4077eeeb19299 | 8,610 | py | Python | p2m/layers.py | dipaco/single-viewTo3D | 923a769afedd95651cc11c72bf4e744c783de87f | [
"Apache-2.0"
] | null | null | null | p2m/layers.py | dipaco/single-viewTo3D | 923a769afedd95651cc11c72bf4e744c783de87f | [
"Apache-2.0"
] | null | null | null | p2m/layers.py | dipaco/single-viewTo3D | 923a769afedd95651cc11c72bf4e744c783de87f | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2019 Nanyang Wang, Yinda Zhang, Zhuwen Li, Yanwei Fu, Wei Liu, Yu-Gang Jiang, Fudan University
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .inits import *
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
# global unique layer ID dictionary for layer name assignment
_LAYER_UIDS = {}
def get_layer_uid(layer_name=''):
"""Helper function, assigns unique layer IDs."""
if layer_name not in _LAYER_UIDS:
_LAYER_UIDS[layer_name] = 1
return 1
else:
_LAYER_UIDS[layer_name] += 1
return _LAYER_UIDS[layer_name]
def sparse_dropout(x, keep_prob, noise_shape):
"""Dropout for sparse tensors."""
random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape)
dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
pre_out = tf.sparse_retain(x, dropout_mask)
return pre_out * (1./keep_prob)
def dot(x, y, sparse=False):
"""Wrapper for tf.matmul (sparse vs dense)."""
if sparse:
res = tf.sparse_tensor_dense_matmul(x, y)
else:
res = tf.matmul(x, y)
return res
| 32.126866 | 111 | 0.617073 |
7d82c9d35fc41989289ca1ca70bcd714b7bacd76 | 6,477 | py | Python | models/swarm_algorithm.py | AlexanderKlanovets/swarm_algorithms | 8da851baccd4d074c747b7d2b4df9952918fab31 | [
"MIT"
] | 9 | 2019-10-29T13:30:57.000Z | 2022-01-30T14:23:26.000Z | models/swarm_algorithm.py | AlexanderKlanovets/swarm_algorithms | 8da851baccd4d074c747b7d2b4df9952918fab31 | [
"MIT"
] | 2 | 2021-06-08T22:11:11.000Z | 2022-03-12T00:44:37.000Z | models/swarm_algorithm.py | AlexanderKlanovets/swarm_algorithms | 8da851baccd4d074c747b7d2b4df9952918fab31 | [
"MIT"
] | 2 | 2020-02-11T09:26:48.000Z | 2020-05-11T17:47:22.000Z | from abc import ABC, abstractmethod
import numpy as np
| 28.407895 | 79 | 0.589007 |
7d8352a4615e2d80df5904ec6e1dc6850549b6ea | 1,376 | py | Python | Python-3/basic_examples/strings/python_str_to_datetime.py | ghiloufibelgacem/jornaldev | b9b27f9f7da595892520314b4ed1d2675556310a | [
"MIT"
] | 1,139 | 2018-05-09T11:54:36.000Z | 2022-03-31T06:52:50.000Z | Python-3/basic_examples/strings/python_str_to_datetime.py | iamharshverma/journaldev | af24242a1ac1b7dc3e8e2404ec916b77ccf5044a | [
"MIT"
] | 56 | 2018-06-20T03:52:53.000Z | 2022-02-09T22:57:41.000Z | Python-3/basic_examples/strings/python_str_to_datetime.py | iamharshverma/journaldev | af24242a1ac1b7dc3e8e2404ec916b77ccf5044a | [
"MIT"
] | 2,058 | 2018-05-09T09:32:17.000Z | 2022-03-29T13:19:42.000Z | from datetime import datetime
# string to datetime object
datetime_str = '09/19/18 13:55:26'
datetime_object = datetime.strptime(datetime_str, '%m/%d/%y %H:%M:%S')
print(type(datetime_object))
print(datetime_object) # printed in default format
# string to date object
date_str = '09-19-2018'
date_object = datetime.strptime(date_str, '%m-%d-%Y').date()
print(type(date_object))
print(date_object) # printed in default formatting
# string to time object
time_str = '13::55::26'
time_object = datetime.strptime(time_str, '%H::%M::%S').time()
print(type(time_object))
print(time_object)
# time module
import time
time_obj = time.strptime(time_str, '%H::%M::%S')
print(type(time_obj))
print(time_obj)
# default formatting - "%a %b %d %H:%M:%S %Y"
print(time.strptime('Wed Sep 19 14:55:02 2018'))
# exception handling example
datetime_str = '09/19/18 13:55:26'
try:
datetime_object = datetime.strptime(datetime_str, '%m/%d/%y')
except ValueError as ve:
print('ValueError Raised:', ve)
time_str = '99::55::26'
try:
time_object = time.strptime(time_str, '%H::%M::%S')
except ValueError as e:
print('ValueError:', e)
# str to datetime with locale
import locale
locale.setlocale(locale.LC_ALL, 'de_DE')
date_str_de_DE = '10-Dezember-2018 Montag' # de_DE locale
datetime_object = datetime.strptime(date_str_de_DE, '%d-%B-%Y %A')
print(datetime_object)
| 24.571429 | 70 | 0.713663 |
7d85c7a93fbd0155d7bd1fe3e1af5e36cc75c497 | 484 | py | Python | sshspawner/tests/__init__.py | 1kastner/SSHSpawner | 2634b3ed863f1dcbc3b48d7bee1ac3d98042e75e | [
"BSD-3-Clause"
] | 5 | 2019-09-23T19:04:59.000Z | 2020-08-06T18:07:48.000Z | sshspawner/tests/__init__.py | 1kastner/SSHSpawner | 2634b3ed863f1dcbc3b48d7bee1ac3d98042e75e | [
"BSD-3-Clause"
] | 1 | 2020-08-08T12:41:35.000Z | 2020-08-10T18:21:48.000Z | sshspawner/tests/__init__.py | 1kastner/SSHSpawner | 2634b3ed863f1dcbc3b48d7bee1ac3d98042e75e | [
"BSD-3-Clause"
] | 4 | 2020-02-25T22:37:02.000Z | 2021-04-13T14:43:16.000Z | ###############################################################################
# Copyright (c) 2018, Lawrence Livermore National Security, LLC
# Produced at the Lawrence Livermore National Laboratory
# Written by Thomas Mendoza mendoza33@llnl.gov
# LLNL-CODE-771750
# All rights reserved
#
# This file is part of SSHSpawner: https://github.com/LLNL/SSHSpawner
#
# SPDX-License-Identifier: BSD-3-Clause
###############################################################################
| 37.230769 | 79 | 0.520661 |
7d85e7f96f3d8e7fbfc3a65a4dfc184c2bae42cc | 7,697 | py | Python | vnpy/app/cta_strategy/strategies/tsmyo_bias_accu_strategy.py | TheSuperMyo/vnpy | e38b7f4de879f1756aa664d5dfe7e0bec65c9a1b | [
"MIT"
] | null | null | null | vnpy/app/cta_strategy/strategies/tsmyo_bias_accu_strategy.py | TheSuperMyo/vnpy | e38b7f4de879f1756aa664d5dfe7e0bec65c9a1b | [
"MIT"
] | null | null | null | vnpy/app/cta_strategy/strategies/tsmyo_bias_accu_strategy.py | TheSuperMyo/vnpy | e38b7f4de879f1756aa664d5dfe7e0bec65c9a1b | [
"MIT"
] | null | null | null | from datetime import time
from vnpy.app.cta_strategy import (
CtaTemplate,
StopOrder,
TickData,
BarData,
TradeData,
OrderData,
BarGenerator,
ArrayManager
)
from vnpy.app.cta_strategy.base import (
EngineType,
STOPORDER_PREFIX,
StopOrder,
StopOrderStatus,
)
from vnpy.app.cta_strategy.TSMtools import TSMArrayManager
import numpy as np | 31.545082 | 127 | 0.572951 |
7d86bb1a8869218343e11c5b17e9cc10ddeac450 | 4,249 | py | Python | test/test-beam-dataflow-nlp.py | tarrade/proj_NLP_text_classification_with_GCP | ac09d6dbf8c07470d03cfb8140a26db7cd5bef9f | [
"Apache-2.0"
] | 1 | 2020-07-19T16:10:19.000Z | 2020-07-19T16:10:19.000Z | test/test-beam-dataflow-nlp.py | tarrade/proj_NLP_text_classification_with_GCP | ac09d6dbf8c07470d03cfb8140a26db7cd5bef9f | [
"Apache-2.0"
] | 46 | 2019-11-01T08:53:32.000Z | 2022-01-15T10:27:56.000Z | test/test-beam-dataflow-nlp.py | tarrade/proj_NLP_text_classification_with_GCP | ac09d6dbf8c07470d03cfb8140a26db7cd5bef9f | [
"Apache-2.0"
] | null | null | null | import sys
import os
import pathlib
import logging
import subprocess
import datetime
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import SetupOptions
import src.preprocessing.preprocessing as pp
print(os.environ['PROJECT_ID'])
print(os.environ['BUCKET_NAME'])
print(os.environ['REGION'])
# define query table
table_schema = {'fields': [
{'name': 'id', 'type': 'NUMERIC', 'mode': 'REQUIRED'},
{'name': 'title', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'text_body', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'code_body', 'type': 'STRING', 'mode': 'NULLABLE'},
{"fields": [
{"mode": "NULLABLE",
"name": "value",
"type": "STRING"}
],
"mode": "REPEATED",
"name": "tags",
"type": "RECORD"
}
]}
def preprocess():
"""
Arguments:
-RUNNER: "DirectRunner" or "DataflowRunner". Specfy to run the pipeline locally or on Google Cloud respectively.
Side-effects:
-Creates and executes dataflow pipeline.
See https://beam.apache.org/documentation/programming-guide/#creating-a-pipeline
"""
job_name = 'test-stackoverflow' + '-' + datetime.datetime.now().strftime('%y%m%d-%H%M%S')
project = os.environ['PROJECT_ID']
region = os.environ['REGION']
output_dir = "gs://{0}/stackoverflow/".format(os.environ['BUCKET_NAME'])
# options
options = PipelineOptions()
google_cloud_options = options.view_as(GoogleCloudOptions)
google_cloud_options.project = project
google_cloud_options.job_name = job_name
google_cloud_options.region = region
google_cloud_options.staging_location = os.path.join(output_dir, 'tmp', 'staging')
google_cloud_options.temp_location = os.path.join(output_dir, 'tmp')
# done by command line
#options.view_as(StandardOptions).runner = RUNNER
options.view_as(SetupOptions).setup_file=os.environ['DIR_PROJ']+'/setup.py'
# instantantiate Pipeline object using PipelineOptions
print('Launching Dataflow job {} ... hang on'.format(job_name))
p = beam.Pipeline(options=options)
table = p | 'Read from BigQuery' >> beam.io.Read(beam.io.BigQuerySource(
# query
query=create_query(),
# use standard SQL for the above query
use_standard_sql=True)
)
clean_text = table | 'Clean Text' >> beam.ParDo(pp.NLPProcessing())
clean_text | 'Write to BigQuery' >> beam.io.WriteToBigQuery(
# The table name is a required argument for the BigQuery
table='test_stackoverflow_beam_nlp',
dataset='test',
project=project,
# Here we use the JSON schema read in from a JSON file.
# Specifying the schema allows the API to create the table correctly if it does not yet exist.
schema=table_schema,
# Creates the table in BigQuery if it does not yet exist.
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
# Deletes all data in the BigQuery table before writing.
write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE)
# not needed, from with clause
if options.view_as(StandardOptions).runner == 'DataflowRunner':
print('DataflowRunner')
p.run()
else:
print('Default: DirectRunner')
result = p.run()
result.wait_until_finish()
print('Done')
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
print('Starting main process ...')
preprocess()
# Usage
# python3 test-beam-dataflow.py --runner DataflowRunner
# python3 test-beam-dataflow.py
# python3 test-beam-dataflow.py --runner DataflowRunner --no_use_public_ips --subnetwork https://www.googleapis.com/compute/v1/projects/xxx/regions/europe-west1/subnetworks/yyyy --region=europe-west1 --zone=europe-west1-b
| 35.408333 | 221 | 0.676865 |
7d87158e11ce4ed100a35dda4334c28bbf1bf852 | 3,882 | py | Python | slixmpp/plugins/xep_0405/mix_pam.py | anirudhrata/slixmpp | 1fcee0e80a212eeb274d2f560e69099d8a61bf7f | [
"BSD-3-Clause"
] | 86 | 2016-07-04T13:26:02.000Z | 2022-02-19T10:26:21.000Z | slixmpp/plugins/xep_0405/mix_pam.py | anirudhrata/slixmpp | 1fcee0e80a212eeb274d2f560e69099d8a61bf7f | [
"BSD-3-Clause"
] | 10 | 2016-09-30T18:55:41.000Z | 2020-05-01T14:22:47.000Z | slixmpp/plugins/xep_0405/mix_pam.py | anirudhrata/slixmpp | 1fcee0e80a212eeb274d2f560e69099d8a61bf7f | [
"BSD-3-Clause"
] | 45 | 2016-09-30T18:48:41.000Z | 2022-03-18T21:39:33.000Z | # Slixmpp: The Slick XMPP Library
# Copyright (C) 2020 Mathieu Pasquet <mathieui@mathieui.net>
# This file is part of Slixmpp.
# See the file LICENSE for copying permission.
from typing import (
List,
Optional,
Set,
Tuple,
)
from slixmpp import JID, Iq
from slixmpp.exceptions import IqError, IqTimeout
from slixmpp.plugins import BasePlugin
from slixmpp.stanza.roster import RosterItem
from slixmpp.plugins.xep_0405 import stanza
from slixmpp.plugins.xep_0369 import stanza as mix_stanza
BASE_NODES = [
'urn:xmpp:mix:nodes:messages',
'urn:xmpp:mix:nodes:participants',
'urn:xmpp:mix:nodes:info',
]
| 34.660714 | 95 | 0.580629 |
7d872614c5ec53276181d661d5d56268e35d080a | 1,360 | py | Python | MoraisParkingPython/view/funcoes_areas.py | larissacauane/Morais-Parking-Python | 9063845cabef10459dde76b53d3a51975788a54d | [
"MIT"
] | null | null | null | MoraisParkingPython/view/funcoes_areas.py | larissacauane/Morais-Parking-Python | 9063845cabef10459dde76b53d3a51975788a54d | [
"MIT"
] | null | null | null | MoraisParkingPython/view/funcoes_areas.py | larissacauane/Morais-Parking-Python | 9063845cabef10459dde76b53d3a51975788a54d | [
"MIT"
] | null | null | null | from control.controller_veiculos import ControllerVeiculos
from control.controller_proprietario import ControllerProprietario
from control.controller_area import ControllerAreaEstacionamento
from model.constants import *
controller_veiculo = ControllerVeiculos()
controller_proprietario = ControllerProprietario()
controller_areas = ControllerAreaEstacionamento()
| 32.380952 | 73 | 0.675735 |
7d889fb0ab0b91db363297f53747bd0adaa5fe54 | 2,811 | py | Python | tests/gold_tests/h2/h2spec.test.py | a-canary/trafficserver | df01ace2b0bdffd3ddcc5b2c7587b6d6fed5234c | [
"Apache-2.0"
] | null | null | null | tests/gold_tests/h2/h2spec.test.py | a-canary/trafficserver | df01ace2b0bdffd3ddcc5b2c7587b6d6fed5234c | [
"Apache-2.0"
] | null | null | null | tests/gold_tests/h2/h2spec.test.py | a-canary/trafficserver | df01ace2b0bdffd3ddcc5b2c7587b6d6fed5234c | [
"Apache-2.0"
] | null | null | null | '''
Test HTTP/2 with h2spec
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Test.Summary = '''
Test HTTP/2 with httpspec
'''
Test.SkipUnless(
Condition.HasProgram("h2spec", "h2spec need to be installed on system for this test to work"),
)
Test.ContinueOnFail = True
# ----
# Setup httpbin Origin Server
# ----
httpbin = Test.MakeHttpBinServer("httpbin")
# ----
# Setup ATS. Disable the cache to simplify the test.
# ----
ts = Test.MakeATSProcess("ts", enable_tls=True, enable_cache=False)
# add ssl materials like key, certificates for the server
ts.addDefaultSSLFiles()
ts.Disk.remap_config.AddLine(
'map / http://127.0.0.1:{0}'.format(httpbin.Variables.Port)
)
ts.Disk.ssl_multicert_config.AddLine(
'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key'
)
ts.Disk.records_config.update({
'proxy.config.http.insert_request_via_str': 1,
'proxy.config.http.insert_response_via_str': 1,
'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.client.verify.server': 0,
'proxy.config.diags.debug.enabled': 0,
'proxy.config.diags.debug.tags': 'http',
})
# ----
# Test Cases
# ----
# In case you need to disable some of the tests, you can specify sections like http2/6.4.
h2spec_targets = "http2/1 http2/2 http2/3 http2/4 http2/5 http2/6 http2/7 http2/8 hpack"
test_run = Test.AddTestRun()
test_run.Processes.Default.Command = 'h2spec {0} -t -k --timeout 10 -p {1}'.format(h2spec_targets, ts.Variables.ssl_port)
test_run.Processes.Default.ReturnCode = 0
test_run.Processes.Default.StartBefore(httpbin, ready=When.PortOpen(httpbin.Variables.Port))
test_run.Processes.Default.StartBefore(Test.Processes.ts)
test_run.Processes.Default.Streams.stdout = "gold/h2spec_stdout.gold"
test_run.StillRunningAfter = httpbin
# Over riding the built in ERROR check since we expect some error cases
ts.Disk.diags_log.Content = Testers.ContainsExpression("ERROR: HTTP/2", "h2spec tests should have error log")
| 37.48 | 121 | 0.743863 |
7d8a92045f001897812e0811e27aaab163f27e32 | 576 | py | Python | examples/02/client.py | cjrh/aiosmartsock | a4ab5ffe5b673ada2a3002d7a9cb68ee1ea4a48f | [
"Apache-2.0"
] | 9 | 2019-03-25T23:25:08.000Z | 2022-01-17T00:49:26.000Z | examples/02/client.py | cjrh/aiomsg | 74b646675e3d7296f0334d3e17c1be0370c5d852 | [
"Apache-2.0"
] | 33 | 2019-04-13T02:31:07.000Z | 2022-03-21T19:12:14.000Z | examples/02/client.py | cjrh/aiosmartsock | a4ab5ffe5b673ada2a3002d7a9cb68ee1ea4a48f | [
"Apache-2.0"
] | 1 | 2021-04-26T09:07:36.000Z | 2021-04-26T09:07:36.000Z | import logging
import itertools
import asyncio
import random
import aiomsg
import aiorun
logging.basicConfig(level="DEBUG")
aiorun.run(main())
| 19.2 | 59 | 0.65625 |
7d8b956b2e624082889be95139c9c63feed50163 | 1,901 | py | Python | data_structures/class_dependency_injection.py | miguelgfierro/pybase | de8e4f11ed5c655e748178e65195c7e70a9c98af | [
"BSD-3-Clause"
] | 14 | 2020-02-07T21:36:39.000Z | 2022-03-12T22:37:04.000Z | data_structures/class_dependency_injection.py | miguelgfierro/pybase | de8e4f11ed5c655e748178e65195c7e70a9c98af | [
"BSD-3-Clause"
] | 19 | 2019-05-18T23:58:30.000Z | 2022-01-09T16:45:35.000Z | data_structures/class_dependency_injection.py | miguelgfierro/pybase | de8e4f11ed5c655e748178e65195c7e70a9c98af | [
"BSD-3-Clause"
] | 5 | 2020-10-06T06:10:27.000Z | 2021-07-08T12:58:46.000Z | # Dependency injection:
# Technique where one object (or static method) supplies the dependencies of another object.
# The objective is to decouple objects to the extent that no client code has to be changed
# simply because an object it depends on needs to be changed to a different one.
# Dependency injection is one form of the broader technique of inversion of control.
# Theoretically, the client is not allowed to call the injector code; it is the injecting code
# that constructs the services and calls the client to inject them. This means the client code
# does not need to know about the injecting code, just the interfaces. This separates the
# responsibilities of use and construction.
# In Python there are not many frameworks for dependency injection: https://stackoverflow.com/questions/2461702/why-is-ioc-di-not-common-in-python
#
# source code: http://stackoverflow.com/a/3076636/5620182
if __name__ == "__main__":
l1 = Shape("It's flat")
print(l1.number_of_edges()) # 1
l2 = Line()
print(l2.number_of_edges()) # 1
u = SomeShape()
print(u.number_of_edges()) # A shape can have many edges...
s = Shape("Hexagon") # ValueError: Invalid description: Hexagon.
| 35.203704 | 146 | 0.678064 |
7d8c2a23670b05afd3505faf37ad0aff75f308fd | 5,073 | py | Python | vcommand/libs/crypto.py | virink/vCommand | 328dd5a8bc9390c5edde80f5544d797f54690f91 | [
"MIT"
] | 7 | 2019-08-01T14:57:34.000Z | 2019-11-26T12:12:17.000Z | vcommand/libs/crypto.py | virink/vCommand | 328dd5a8bc9390c5edde80f5544d797f54690f91 | [
"MIT"
] | null | null | null | vcommand/libs/crypto.py | virink/vCommand | 328dd5a8bc9390c5edde80f5544d797f54690f91 | [
"MIT"
] | 2 | 2019-08-16T04:52:50.000Z | 2019-11-26T12:12:25.000Z | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
"""
Author : Virink <virink@outlook.com>
Date : 2019/04/18, 14:49
"""
import string
import re
L = string.ascii_lowercase
U = string.ascii_uppercase
A = string.ascii_letters
def func_atbash(*args):
""""""
arg = args[0]
arg = arg.lower().replace(' ', 'vvvzzzvvv')
res = [L[25 - j] for i in arg for j in range(26) if i == L[j]]
return ''.join(res).replace('eeeaaaeee', ' ')
def __caesar(offset, arg):
""" : """
result = ""
for ch in arg:
if ch.isupper():
result += U[((U.index(ch) + offset) % 26)]
elif ch.islower():
result += L[((L.index(ch) + offset) % 26)]
elif ch.isdigit():
result += ch
else:
result += ch
return result
def func_caesar(*args):
""""""
res = []
for offset in range(26):
res.append("[+] offset : %d\tresult : %s" %
(offset, __caesar(offset, args[0])))
return "\r\n".join(res)
def func_rot13(*args):
"""rot13"""
return __caesar(13, args[0])
def func_mpkc(*args):
""" Mobile Phone Keyboard Cipher"""
T = {
'A': 21, 'B': 22, 'C': 23, 'D': 31, 'E': 32, 'F': 33,
'G': 41, 'H': 42, 'I': 43, 'J': 51, 'K': 52, 'L': 53,
'M': 61, 'N': 62, 'O': 63, 'P': 71, 'Q': 72, 'R': 73, 'S': 74,
'T': 81, 'U': 82, 'V': 83, 'W': 91, 'X': 92, 'Y': 93, 'Z': 94
}
arg = args[0].upper()
if arg[0] in U:
return ','.join([str(T.get(i, i)) for i in arg])
else:
T = {str(T[k]): k for k in T}
if ',' in arg:
arg = arg.split(',')
elif ' ' in arg:
arg = arg.split(' ')
return ''.join([T.get(i, i) for i in arg])
def func_morse(*args):
""""""
T = {
'A': '.-', 'B': '-...', 'C': '-.-.',
'D': '-..', 'E': '.', 'F': '..-.',
'G': '--.', 'H': '....', 'I': '..',
'J': '.---', 'K': '-.-', 'L': '.-..',
'M': '--', 'N': '-.', 'O': '---',
'P': '.--.', 'Q': '--.-', 'R': '.-.',
'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-',
'Y': '-.--', 'Z': '--..',
'0': '-----', '1': '.----', '2': '..---',
'3': '...--', '4': '....-', '5': '.....',
'6': '-....', '7': '--...', '8': '---..',
'9': '----.',
',': '--..--', '.': '.-.-.-', ':': '---...', ';': '-.-.-.',
'?': '..--..', '=': '-...-', "'": '.----.', '/': '-..-.',
'!': '-.-.--', '-': '-....-', '_': '..--.-', '(': '-.--.',
')': '-.--.-', '$': '...-..-', '&': '. . . .', '@': '.--.-.',
'{': '----.--', '}': '-----.-'
}
arg = args[0]
if re.match(r'^[\.\-\/ ]+$', arg):
T = {str(T[k]): k for k in T}
if len(args) > 1:
arg = ' '.join(args)
arg = arg.replace('/', ' ').split(' ')
# TODO: morse auto decode when it is not sep
# p = 0
# res = ''
# d = 5
# while p < (len(arg)+7) and d > 0:
# print("[D] len : %d p : %d" % (len(arg), p))
# for j in [6, 5, 4, 3, 2, 1, 0]:
# tmp = T.get(arg[p:p+j], None)
# print("[D] tmp = arg[%d:%s] = %s => %s" %
# (p, j, arg[p:p+j], tmp))
# if tmp:
# p = p+j
# res += tmp
# break
# # p = p+j-1
# # break
# d -= 1
# print("[D] Result : %s" % res)
return ''.join([T.get(i) for i in arg])
else:
return '/'.join([str(T.get(i, '?')) for i in arg.upper()])
def func_peigen(*args):
""""""
T = {
'H': 'aabbb', 'G': 'aabba', 'R': 'baaab', 'Q': 'baaaa',
'Z': 'bbaab', 'Y': 'bbaaa', 'N': 'abbab', 'M': 'abbaa',
'U': 'babaa', 'V': 'babab', 'I': 'abaaa', 'J': 'abaab',
'F': 'aabab', 'E': 'aabaa', 'A': 'aaaaa', 'B': 'aaaab',
'T': 'baabb', 'S': 'baaba', 'C': 'aaaba', 'D': 'aaabb',
'P': 'abbbb', 'O': 'abbba', 'K': 'ababa', 'L': 'ababb',
'W': 'babba', 'X': 'babbb'
}
arg = args[0]
if re.match(r'^[ab]+$', arg):
T = {str(T[k]): k for k in T}
return ''.join([T.get(arg[i:i+5]) for i in range(0, len(arg), 5)])
else:
return ''.join([T.get(i.upper()) for i in arg])
def __vigenere(s, key='virink', de=0):
""""""
s = str(s).replace(" ", "").upper()
key = str(key).replace(" ", "").upper()
res = ''
i = 0
while i < len(s):
j = i % len(key)
k = U.index(key[j])
m = U.index(s[i])
if de:
if m < k:
m += 26
res += U[m - k]
else:
res += U[(m + k) % 26]
i += 1
return res
def func_vigenere(*args):
""""""
if len(args) < 2:
return '[-] Vigenere Usage : command key text [isdecode]'
return __vigenere(args[1], args[0], 1 if len(args) >= 3 else 0)
| 30.196429 | 74 | 0.350089 |
7d8c33c577dc39007eec8277d366b069630608c1 | 1,773 | py | Python | backend/risk_factors/tasks.py | Doctorinna/backend | cfff4fe751d668dcaf4834ebb730f5158c26e201 | [
"MIT"
] | 24 | 2021-09-13T06:16:44.000Z | 2022-01-08T08:56:04.000Z | backend/risk_factors/tasks.py | Doctorinna/backend | cfff4fe751d668dcaf4834ebb730f5158c26e201 | [
"MIT"
] | 32 | 2021-09-28T05:33:00.000Z | 2021-12-12T09:51:09.000Z | backend/risk_factors/tasks.py | Doctorinna/backend | cfff4fe751d668dcaf4834ebb730f5158c26e201 | [
"MIT"
] | 1 | 2021-10-04T21:52:15.000Z | 2021-10-04T21:52:15.000Z | from .utils import (get_prescription, get_attributes, get_group)
from .models import Disease, Result, Score, Question, SurveyResponse
from .analysis import cardio_risk_group, diabetes_risk_group, stroke_risk_group
from statistics import mean
from celery import shared_task
| 34.764706 | 79 | 0.668359 |
7d8c64c1f1dba35610d7552ede42b4b2192a13c9 | 419 | py | Python | augur/routes/__init__.py | Nayan-Das/augur | 857f4a4e7d688fd54356aa0f546834071fbabbf2 | [
"MIT"
] | 3 | 2019-10-31T19:07:48.000Z | 2019-11-20T23:14:15.000Z | augur/routes/__init__.py | Nayan-Das/augur | 857f4a4e7d688fd54356aa0f546834071fbabbf2 | [
"MIT"
] | 3 | 2021-03-09T22:54:52.000Z | 2021-05-10T19:19:00.000Z | augur/routes/__init__.py | Nayan-Das/augur | 857f4a4e7d688fd54356aa0f546834071fbabbf2 | [
"MIT"
] | 4 | 2019-11-05T20:22:12.000Z | 2019-12-12T18:08:30.000Z | import importlib
import os
import glob
from .user import create_user_routes
from .repo import create_repo_routes
from .broker import create_broker_routes
| 26.1875 | 55 | 0.778043 |
7d8fe3a63259aba89e6864813dbcb43ee8122092 | 2,117 | py | Python | stests/chain/set_transfer_native.py | goral09/stests | 4de26485535cadf1b708188a7133a976536ccba3 | [
"Apache-2.0"
] | 4 | 2020-03-10T15:28:17.000Z | 2021-10-02T11:41:17.000Z | stests/chain/set_transfer_native.py | goral09/stests | 4de26485535cadf1b708188a7133a976536ccba3 | [
"Apache-2.0"
] | 1 | 2020-03-25T11:31:44.000Z | 2020-03-25T11:31:44.000Z | stests/chain/set_transfer_native.py | goral09/stests | 4de26485535cadf1b708188a7133a976536ccba3 | [
"Apache-2.0"
] | 9 | 2020-02-25T18:43:42.000Z | 2021-08-10T17:08:42.000Z | import json
import random
import subprocess
from stests.core.logging import log_event
from stests.chain.utils import execute_cli
from stests.chain.utils import DeployDispatchInfo
from stests.core.types.chain import Account
from stests.core.types.infra import Network
from stests.core.types.infra import Node
from stests.core.utils import paths
from stests.events import EventType
# Method upon client to be invoked.
_CLIENT_METHOD = "transfer"
# Maximum value of a transfer ID.
_MAX_TRANSFER_ID = (2 ** 63) - 1
| 34.704918 | 146 | 0.687293 |
7d90aa90743d9451f50ce626438114785520c9d1 | 1,143 | py | Python | Binary Search Tree/235. Lowest Common Ancestor of a Binary Search Tree.py | beckswu/Leetcode | 480e8dc276b1f65961166d66efa5497d7ff0bdfd | [
"MIT"
] | 138 | 2020-02-08T05:25:26.000Z | 2021-11-04T11:59:28.000Z | Binary Search Tree/235. Lowest Common Ancestor of a Binary Search Tree.py | beckswu/Leetcode | 480e8dc276b1f65961166d66efa5497d7ff0bdfd | [
"MIT"
] | null | null | null | Binary Search Tree/235. Lowest Common Ancestor of a Binary Search Tree.py | beckswu/Leetcode | 480e8dc276b1f65961166d66efa5497d7ff0bdfd | [
"MIT"
] | 24 | 2021-01-02T07:18:43.000Z | 2022-03-20T08:17:54.000Z | """
235. Lowest Common Ancestor of a Binary Search Tree
"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
| 28.575 | 61 | 0.523185 |
7d9246bc05b6e5994b39b6b9455b5e82dd240f3c | 3,494 | py | Python | waliki/acl.py | sckevmit/waliki | 5baaf6f043275920a1174ff233726f7ff4bfb5cf | [
"BSD-3-Clause"
] | 324 | 2015-01-02T20:48:33.000Z | 2021-12-11T14:44:34.000Z | waliki/acl.py | sckevmit/waliki | 5baaf6f043275920a1174ff233726f7ff4bfb5cf | [
"BSD-3-Clause"
] | 103 | 2015-01-02T03:01:34.000Z | 2020-04-02T19:03:53.000Z | waliki/acl.py | sckevmit/waliki | 5baaf6f043275920a1174ff233726f7ff4bfb5cf | [
"BSD-3-Clause"
] | 84 | 2015-01-07T08:53:05.000Z | 2021-01-04T00:26:38.000Z | from functools import wraps
from collections import Iterable
from django.conf import settings
from django.shortcuts import render
from django.core.exceptions import PermissionDenied
from django.utils.decorators import available_attrs
from django.utils.encoding import force_str
from django.utils.six.moves.urllib.parse import urlparse
from django.utils.six import string_types
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.shortcuts import resolve_url
from waliki.utils import is_authenticated
from .models import ACLRule
from .settings import (WALIKI_ANONYMOUS_USER_PERMISSIONS,
WALIKI_LOGGED_USER_PERMISSIONS,
WALIKI_RENDER_403)
def check_perms(perms, user, slug, raise_exception=False):
"""a helper user to check if a user has the permissions
for a given slug"""
if isinstance(perms, string_types):
perms = {perms}
else:
perms = set(perms)
allowed_users = ACLRule.get_users_for(perms, slug)
if allowed_users:
return user in allowed_users
if perms.issubset(set(WALIKI_ANONYMOUS_USER_PERMISSIONS)):
return True
if is_authenticated(user) and perms.issubset(set(WALIKI_LOGGED_USER_PERMISSIONS)):
return True
# First check if the user has the permission (even anon users)
if user.has_perms(['waliki.%s' % p for p in perms]):
return True
# In case the 403 handler should be called raise the exception
if raise_exception:
raise PermissionDenied
# As the last resort, show the login form
return False
def permission_required(perms, login_url=None, raise_exception=False, redirect_field_name=REDIRECT_FIELD_NAME):
"""
this is analog to django's builtin ``permission_required`` decorator, but
improved to check per slug ACLRules and default permissions for
anonymous and logged in users
if there is a rule affecting a slug, the user needs to be part of the
rule's allowed users. If there isn't a matching rule, defaults permissions
apply.
"""
return decorator
| 39.704545 | 111 | 0.690326 |
7d9293e84f4a03376c976e40854cc463c3d0b2fe | 529 | py | Python | 2808.py | heltonricardo/URI | 160cca22d94aa667177c9ebf2a1c9864c5e55b41 | [
"MIT"
] | 6 | 2021-04-13T00:33:43.000Z | 2022-02-10T10:23:59.000Z | 2808.py | heltonricardo/URI | 160cca22d94aa667177c9ebf2a1c9864c5e55b41 | [
"MIT"
] | null | null | null | 2808.py | heltonricardo/URI | 160cca22d94aa667177c9ebf2a1c9864c5e55b41 | [
"MIT"
] | 3 | 2021-03-23T18:42:24.000Z | 2022-02-10T10:24:07.000Z |
e = str(input()).split()
a = conv(e[0])
b = conv(e[1])
ax = int(a[0])
ay = int(a[1])
bx = int(b[0])
by = int(b[1])
if (abs(ax - bx) == 1 and abs(ay - by) == 2) or (abs(ax - bx) == 2 and abs(ay - by) == 1):
print('VALIDO')
else: print('INVALIDO')
| 23 | 90 | 0.404537 |
7d92e1048d2857d5559e9d7bb1d06d56001488c0 | 4,095 | py | Python | RabiesRefNAAP_CLI.py | jiangweiyao/RabiesRefNAAP | bd10ca5d9b759381e09ecc25e1456370e94a0744 | [
"Apache-1.1"
] | null | null | null | RabiesRefNAAP_CLI.py | jiangweiyao/RabiesRefNAAP | bd10ca5d9b759381e09ecc25e1456370e94a0744 | [
"Apache-1.1"
] | null | null | null | RabiesRefNAAP_CLI.py | jiangweiyao/RabiesRefNAAP | bd10ca5d9b759381e09ecc25e1456370e94a0744 | [
"Apache-1.1"
] | 1 | 2021-03-01T22:20:26.000Z | 2021-03-01T22:20:26.000Z | #!/usr/bin/env python
import sys
import os
import glob
import re
from datetime import date
import argparse
import subprocess
from pathlib import Path
if __name__ == "__main__":
sys.exit(main())
| 47.616279 | 224 | 0.668132 |
7d93db8015155beda4e7ca3caccf0926ce883652 | 8,887 | py | Python | mtp_cashbook/apps/disbursements/tests/test_search.py | uk-gov-mirror/ministryofjustice.money-to-prisoners-cashbook | d35a621e21631e577faacaeacb5ab9f883c9b4f4 | [
"MIT"
] | 4 | 2016-01-05T12:21:39.000Z | 2016-12-22T15:56:37.000Z | mtp_cashbook/apps/disbursements/tests/test_search.py | uk-gov-mirror/ministryofjustice.money-to-prisoners-cashbook | d35a621e21631e577faacaeacb5ab9f883c9b4f4 | [
"MIT"
] | 132 | 2015-06-10T09:53:14.000Z | 2022-02-01T17:35:54.000Z | mtp_cashbook/apps/disbursements/tests/test_search.py | uk-gov-mirror/ministryofjustice.money-to-prisoners-cashbook | d35a621e21631e577faacaeacb5ab9f883c9b4f4 | [
"MIT"
] | 3 | 2015-07-07T14:40:33.000Z | 2021-04-11T06:20:14.000Z | import datetime
from django.test import SimpleTestCase
from django.urls import reverse
from django.utils.html import strip_tags
import responses
from cashbook.tests import MTPBaseTestCase, api_url
from disbursements.forms import SearchForm
| 47.271277 | 117 | 0.567008 |
7d93e9d98b1bfee0032c7712ee1027aadf9abac0 | 620 | py | Python | pipelines/pipeline_util/graphite_extract_utility.py | MatMoore/app-performance-summary | e94c63c26dec5da39b8458b1e46bcc4f922ab7dc | [
"MIT"
] | null | null | null | pipelines/pipeline_util/graphite_extract_utility.py | MatMoore/app-performance-summary | e94c63c26dec5da39b8458b1e46bcc4f922ab7dc | [
"MIT"
] | 10 | 2018-03-05T17:56:11.000Z | 2018-03-13T16:50:51.000Z | pipelines/pipeline_util/graphite_extract_utility.py | MatMoore/app-performance-summary | e94c63c26dec5da39b8458b1e46bcc4f922ab7dc | [
"MIT"
] | 1 | 2021-04-10T19:50:33.000Z | 2021-04-10T19:50:33.000Z | '''
Utility for extracting data from the graphite API
'''
import os
from urllib.parse import urlencode
import pandas as pd
| 28.181818 | 98 | 0.662903 |
7d953acfe0d26007513dac6a05f6317497155128 | 712 | py | Python | backend/streetsignup/migrations/0002_auto_20200901_1758.py | nicoepp/the-prayer-walk | 6c8217c33f399cfe46dc23075e13ca9464079cae | [
"MIT"
] | null | null | null | backend/streetsignup/migrations/0002_auto_20200901_1758.py | nicoepp/the-prayer-walk | 6c8217c33f399cfe46dc23075e13ca9464079cae | [
"MIT"
] | null | null | null | backend/streetsignup/migrations/0002_auto_20200901_1758.py | nicoepp/the-prayer-walk | 6c8217c33f399cfe46dc23075e13ca9464079cae | [
"MIT"
] | null | null | null | # Generated by Django 3.1.1 on 2020-09-01 17:58
from django.db import migrations, models
import django.db.models.deletion
| 28.48 | 132 | 0.634831 |
7d9767476bcf26c64a3560357db2dd0c005504a9 | 9,830 | py | Python | deepchem/feat/molecule_featurizers/coulomb_matrices.py | deloragaskins/deepchem | 234ab699cdb997e5963966a8b6926cb2cda7c064 | [
"MIT"
] | 3,782 | 2016-02-21T03:53:11.000Z | 2022-03-31T16:10:26.000Z | deepchem/feat/molecule_featurizers/coulomb_matrices.py | deloragaskins/deepchem | 234ab699cdb997e5963966a8b6926cb2cda7c064 | [
"MIT"
] | 2,666 | 2016-02-11T01:54:54.000Z | 2022-03-31T11:14:33.000Z | deepchem/feat/molecule_featurizers/coulomb_matrices.py | deloragaskins/deepchem | 234ab699cdb997e5963966a8b6926cb2cda7c064 | [
"MIT"
] | 1,597 | 2016-02-21T03:10:08.000Z | 2022-03-30T13:21:28.000Z | """
Generate coulomb matrices for molecules.
See Montavon et al., _New Journal of Physics_ __15__ (2013) 095003.
"""
import numpy as np
from typing import Any, List, Optional
from deepchem.utils.typing import RDKitMol
from deepchem.utils.data_utils import pad_array
from deepchem.feat.base_classes import MolecularFeaturizer
| 31.812298 | 88 | 0.653713 |
7d9822ec626534a501f48b72a69df1f8b8c72c49 | 2,882 | py | Python | edk2toollib/uefi/edk2/fmp_payload_header.py | mikeytdisco/edk2-pytool-library | eab28cab8cf26f1018f7cbfac510a503444f0f0d | [
"BSD-2-Clause-Patent"
] | 32 | 2019-06-28T06:04:30.000Z | 2022-03-11T10:44:44.000Z | edk2toollib/uefi/edk2/fmp_payload_header.py | mikeytdisco/edk2-pytool-library | eab28cab8cf26f1018f7cbfac510a503444f0f0d | [
"BSD-2-Clause-Patent"
] | 107 | 2019-07-10T19:09:51.000Z | 2022-03-10T22:52:58.000Z | edk2toollib/uefi/edk2/fmp_payload_header.py | mikeytdisco/edk2-pytool-library | eab28cab8cf26f1018f7cbfac510a503444f0f0d | [
"BSD-2-Clause-Patent"
] | 26 | 2019-07-24T03:27:14.000Z | 2022-03-11T10:44:49.000Z | ## @file
# Module that encodes and decodes a FMP_PAYLOAD_HEADER with a payload.
# The FMP_PAYLOAD_HEADER is processed by the FmpPayloadHeaderLib in the
# FmpDevicePkg.
#
# Copyright (c) 2018, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
FmpPayloadHeader
'''
import struct
| 33.905882 | 112 | 0.651631 |
7d9846b8c90e6af12c68768b068248c24ba1f30a | 1,580 | py | Python | 21-fs-ias-lec/15-AudioLink/Testing.py | paultroeger/BACnet | 855b931f2a0e9b64e9571f41de2a8cd71d7a01f4 | [
"MIT"
] | 8 | 2020-03-17T21:12:18.000Z | 2021-12-12T15:55:54.000Z | 21-fs-ias-lec/15-AudioLink/Testing.py | paultroeger/BACnet | 855b931f2a0e9b64e9571f41de2a8cd71d7a01f4 | [
"MIT"
] | 2 | 2021-07-19T06:18:43.000Z | 2022-02-10T12:17:58.000Z | 21-fs-ias-lec/15-AudioLink/Testing.py | paultroeger/BACnet | 855b931f2a0e9b64e9571f41de2a8cd71d7a01f4 | [
"MIT"
] | 25 | 2020-03-20T09:32:45.000Z | 2021-07-18T18:12:59.000Z | from Sender import Sender
from Receiver import Receiver
import scipy
import numpy as np
import scipy.io
import scipy.io.wavfile
import matplotlib.pyplot as plt
from scipy import signal
testData = readWav('testbitsnopilots.wav')
subset = readWav('wrongbitstest.wav')
r = Receiver()
rate = 160
corr = 235292
offset = r.findOffsetToFirstChange(testData)
truncated = r.truncateToTauS(testData, offset)
plt.plot(testData[corr - len(subset)//2:corr + len(subset)//2])
plt.show()
plt.plot(subset)
plt.show()
plt.plot(truncated)
plt.show()
demod = r.demodulate(truncated, 1/16, 1/40)
result = []
start = 0
for i in range(20):
if i == 2:
a = 5
plt.plot(truncated[start: start + 10 * 36 * 160])
plt.show
a = 6
#part_demod = r.demodulate(truncated[start: start + 10*36 * 160], 1/16, 1/40)
#result.append(list(r.repdecode(part_demod, 10)))
start = start + 10*36*160
print('result', result)
print(demod)
print(len(demod[1:]))
print(repdecode(demod[1:], 10))
sender = Sender()
demod = repdecode(demod, 10)
expected = sender.getTestDataAsBits()
error_sum = np.sum(np.abs(expected - demod))
print('error sum', error_sum)
print('error weight', np.sum(expected - demod))
print('error percentage', error_sum / len(expected) * 100) | 21.944444 | 81 | 0.68038 |
7d984b4f33bcef674a43431532ba484ab9af642d | 615 | py | Python | suppress.py | j0hntv/suppress | eea5dbdb904e67abdc792fd946ab51f4d550734f | [
"MIT"
] | null | null | null | suppress.py | j0hntv/suppress | eea5dbdb904e67abdc792fd946ab51f4d550734f | [
"MIT"
] | null | null | null | suppress.py | j0hntv/suppress | eea5dbdb904e67abdc792fd946ab51f4d550734f | [
"MIT"
] | null | null | null | """A simple wrapper around contextlib.suppress"""
import contextlib
from functools import wraps
__version__ = "0.1.1"
| 21.964286 | 50 | 0.604878 |
7d9928a0889c40b5a6ffd1d19e7ea9f5236cde32 | 7,015 | py | Python | anaconda_project/requirements_registry/requirements/conda_env.py | vertingo/Anaconda_Videos_Tutos | f30f2a0549a7b81c17f4d5d249edc59eb3c05458 | [
"BSD-3-Clause"
] | null | null | null | anaconda_project/requirements_registry/requirements/conda_env.py | vertingo/Anaconda_Videos_Tutos | f30f2a0549a7b81c17f4d5d249edc59eb3c05458 | [
"BSD-3-Clause"
] | null | null | null | anaconda_project/requirements_registry/requirements/conda_env.py | vertingo/Anaconda_Videos_Tutos | f30f2a0549a7b81c17f4d5d249edc59eb3c05458 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2016, Anaconda, Inc. All rights reserved.
#
# Licensed under the terms of the BSD 3-Clause License.
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
"""Conda-env-related requirements."""
from __future__ import absolute_import, print_function
from os.path import join
from anaconda_project.requirements_registry.requirement import EnvVarRequirement, RequirementStatus
from anaconda_project.conda_manager import new_conda_manager, CondaManagerError
from anaconda_project.internal import conda_api
def check_status(self, environ, local_state_file, default_env_spec_name, overrides, latest_provide_result=None):
"""Override superclass to get our status."""
return self._create_status_from_analysis(environ,
local_state_file,
default_env_spec_name,
overrides=overrides,
provider_class_name=self._provider_class_name,
status_getter=self._status_from_analysis,
latest_provide_result=latest_provide_result)
class CondaBootstrapEnvRequirement(CondaEnvRequirement):
"""A requirement for CONDA_PREFIX to point to a conda env."""
_provider_class_name = 'CondaBootstrapEnvProvider'
def __init__(self, registry, env_specs=None):
"""Extend superclass to default to CONDA_PREFIX and carry environment information.
Args:
registry (RequirementsRegistry): plugin registry
env_specs (dict): dict from env name to ``CondaEnvironment``
"""
super(CondaBootstrapEnvRequirement, self).__init__(registry=registry, env_var="BOOTSTRAP_ENV_PREFIX")
self.env_specs = env_specs
self._conda = new_conda_manager()
def _status_from_analysis(self, environ, local_state_file, analysis):
config = analysis.config
assert 'source' in config
# we expect the bootstrap env to not be the env running the cmd
assert config['source'] in ['unset', 'environ', 'project']
env_name = 'bootstrap-env'
prefix = join(environ['PROJECT_DIR'], 'envs', env_name)
if config['source'] == 'environ':
assert config['value'] == prefix
environment_spec = self.env_specs[env_name]
try:
deviations = self._conda.find_environment_deviations(prefix, environment_spec)
if not deviations.ok:
return (False, deviations.summary)
except CondaManagerError as e:
return (False, str(e))
current_env_setting = environ.get(self.env_var, None)
if current_env_setting is None:
# this is our vaguest / least-descriptionful message so only if we didn't do better above
return (False, "%s is not set." % self.env_var)
else:
return (True, "Using Conda environment %s." % prefix)
def _create_status_from_analysis(self, environ, local_state_file, default_env_spec_name, overrides,
latest_provide_result, provider_class_name, status_getter):
provider = self.registry.find_provider_by_class_name(provider_class_name)
analysis = provider.analyze(self, environ, local_state_file, default_env_spec_name, overrides)
(has_been_provided, status_description) = status_getter(environ, local_state_file, analysis)
# hardcode bootstrap env name since it's a very especial case
env_spec_name = 'bootstrap-env'
return RequirementStatus(self,
has_been_provided=has_been_provided,
status_description=status_description,
provider=provider,
analysis=analysis,
latest_provide_result=latest_provide_result,
env_spec_name=env_spec_name)
| 41.264706 | 116 | 0.623236 |
7d9a43e7079b4241b2e56a68cd01b2edf6c43289 | 1,697 | py | Python | data_utils/dataset/kodak_dataset.py | hieu1999210/image_compression | 3faf90d704782e1d6a186b0c8ea7fb1e2ec97a2c | [
"Apache-2.0"
] | null | null | null | data_utils/dataset/kodak_dataset.py | hieu1999210/image_compression | 3faf90d704782e1d6a186b0c8ea7fb1e2ec97a2c | [
"Apache-2.0"
] | null | null | null | data_utils/dataset/kodak_dataset.py | hieu1999210/image_compression | 3faf90d704782e1d6a186b0c8ea7fb1e2ec97a2c | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Hieu Nguyen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
from glob import glob
from PIL import Image
from torch.utils.data import Dataset
from ..transforms import get_transforms
from .build import DATASET_REGISTRY
| 26.936508 | 80 | 0.61815 |
7d9a756d138cef5d7f938318a3b5d1bd98451587 | 1,055 | py | Python | ohs/domain/create_component.py | codejsha/infrastructure | 01ff58fea0a7980fce30e37cb02a7c1217c46d9f | [
"Apache-2.0"
] | 4 | 2021-02-13T03:39:38.000Z | 2022-01-30T19:41:43.000Z | ohs/domain/create_component.py | codejsha/infrastructure | 01ff58fea0a7980fce30e37cb02a7c1217c46d9f | [
"Apache-2.0"
] | null | null | null | ohs/domain/create_component.py | codejsha/infrastructure | 01ff58fea0a7980fce30e37cb02a7c1217c46d9f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
domain_home = os.environ['DOMAIN_HOME']
node_manager_name = os.environ['NODE_MANAGER_NAME']
component_name = os.environ['COMPONENT_NAME']
component_admin_listen_address = os.environ['COMPONENT_ADMIN_LISTEN_ADDRESS']
component_admin_listen_port = os.environ['COMPONENT_ADMIN_LISTEN_PORT']
component_listen_address = os.environ['COMPONENT_LISTEN_ADDRESS']
component_listen_port = os.environ['COMPONENT_LISTEN_PORT']
component_ssl_listen_port = os.environ['COMPONENT_SSL_LISTEN_PORT']
######################################################################
readDomain(domain_home)
cd('/')
create(component_name, 'SystemComponent')
cd('/SystemComponent/' + component_name)
cmo.setComponentType('OHS')
set('Machine', node_manager_name)
cd('/OHS/' + component_name)
cmo.setAdminHost(component_admin_listen_address)
cmo.setAdminPort(component_admin_listen_port)
cmo.setListenAddress(component_listen_address)
cmo.setListenPort(component_listen_port)
cmo.setSSLListenPort(component_ssl_listen_port)
updateDomain()
closeDomain()
exit()
| 31.969697 | 77 | 0.777251 |
7d9ad66a69e3d43361db2e0fdcc4e1f1ce926057 | 1,965 | py | Python | ironicclient/tests/functional/test_driver.py | sapcc/python-ironicclient | 8dcbf5b6d0bc2c2dc3881dbc557e2e403e2fe2b4 | [
"Apache-2.0"
] | null | null | null | ironicclient/tests/functional/test_driver.py | sapcc/python-ironicclient | 8dcbf5b6d0bc2c2dc3881dbc557e2e403e2fe2b4 | [
"Apache-2.0"
] | null | null | null | ironicclient/tests/functional/test_driver.py | sapcc/python-ironicclient | 8dcbf5b6d0bc2c2dc3881dbc557e2e403e2fe2b4 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ironicclient.tests.functional import base
| 34.473684 | 79 | 0.686514 |
7d9bd1161fcdf87364f5ca0317aac04cfac291b2 | 380 | py | Python | hw2/2.3 - list.py | ArtemNikolaev/gb-hw | b82403e39dc1ca530dc438309fc98ba89ce4337b | [
"Unlicense"
] | null | null | null | hw2/2.3 - list.py | ArtemNikolaev/gb-hw | b82403e39dc1ca530dc438309fc98ba89ce4337b | [
"Unlicense"
] | 40 | 2021-12-30T15:57:10.000Z | 2022-01-26T16:44:24.000Z | hw2/2.3 - list.py | ArtemNikolaev/gb-hw | b82403e39dc1ca530dc438309fc98ba89ce4337b | [
"Unlicense"
] | 1 | 2022-03-12T19:17:26.000Z | 2022-03-12T19:17:26.000Z | # https://github.com/ArtemNikolaev/gb-hw/issues/18
seasons = [
'',
'',
'',
''
]
month = int(input(' : '))
if month < 1 or month > 12:
print(' 12. - 1, - 12')
else:
seasonInt = (month % 12) // 3
print(' : ' + seasons[seasonInt]) | 23.75 | 84 | 0.634211 |
7d9be08030c54e953623ba6d26f1efa4c9f9a3bb | 414 | py | Python | modoboa/admin/signals.py | vinaebizs/modoboa | fb1e7f4c023b7eb6be3aa77174bfa12fc653670e | [
"0BSD"
] | null | null | null | modoboa/admin/signals.py | vinaebizs/modoboa | fb1e7f4c023b7eb6be3aa77174bfa12fc653670e | [
"0BSD"
] | null | null | null | modoboa/admin/signals.py | vinaebizs/modoboa | fb1e7f4c023b7eb6be3aa77174bfa12fc653670e | [
"0BSD"
] | null | null | null | """Modoboa admin signals."""
import django.dispatch
use_external_recipients = django.dispatch.Signal(providing_args=["recipients"])
extra_domain_actions = django.dispatch.Signal(
providing_args=["user", "domain"])
extra_domain_dashboard_widgets = django.dispatch.Signal(
providing_args=["user", "domain"])
extra_account_dashboard_widgets = django.dispatch.Signal(
providing_args=["user", "account"])
| 34.5 | 79 | 0.772947 |
7d9c78ce7d3a0631fc266360f9979634e2fb0ff2 | 1,401 | py | Python | psono/restapi/tests/health_check.py | psono/psono-fileserver | 537fd392ea9b50807451dbb814266dfeed8c783b | [
"Apache-2.0"
] | 2 | 2020-02-12T15:10:02.000Z | 2021-07-02T18:35:34.000Z | psono/restapi/tests/health_check.py | psono/psono-fileserver | 537fd392ea9b50807451dbb814266dfeed8c783b | [
"Apache-2.0"
] | 2 | 2019-10-29T18:59:26.000Z | 2019-12-28T15:43:19.000Z | psono/restapi/tests/health_check.py | psono/psono-fileserver | 537fd392ea9b50807451dbb814266dfeed8c783b | [
"Apache-2.0"
] | 4 | 2019-10-04T00:41:27.000Z | 2021-04-28T13:25:37.000Z | from django.urls import reverse
from rest_framework import status
from .base import APITestCaseExtended
from mock import patch
from restapi import models
| 20.910448 | 82 | 0.631692 |
7d9d90a49a7ce7f5c4dc585757591fb9e4a928b7 | 1,217 | py | Python | conftest.py | elijahr/python-portaudio | 8434396cf7a9faa8934cab289749daf08b04d0b3 | [
"MIT"
] | null | null | null | conftest.py | elijahr/python-portaudio | 8434396cf7a9faa8934cab289749daf08b04d0b3 | [
"MIT"
] | null | null | null | conftest.py | elijahr/python-portaudio | 8434396cf7a9faa8934cab289749daf08b04d0b3 | [
"MIT"
] | null | null | null | import asyncio
import contextlib
import glob
import itertools
import logging
import os
import pytest
import uvloop
try:
import tracemalloc
tracemalloc.start()
except ImportError:
# Not available in pypy
pass
# clear compiled cython tests
for path in itertools.chain(
glob.glob(os.path.join('tests', '*.so')),
glob.glob(os.path.join('tests', '*.c'))):
os.unlink(path)
def event_loop(loop_mod):
loop = loop_mod.new_event_loop()
asyncio.set_event_loop(loop)
if loop_mod != uvloop:
# uvloop in debug mode calls extract_stack, which results in "ValueError: call stack is not deep enough"
# for Cython code
loop.set_debug(True)
with contextlib.closing(loop):
yield loop
def pytest_configure(config):
if config.getoption('verbose') > 0:
h = logging.StreamHandler()
h.setLevel(logging.DEBUG)
logger = logging.getLogger('portaudio')
logger.addHandler(h)
logger.setLevel(logging.DEBUG) | 21.350877 | 112 | 0.676253 |
7d9edb01d9ce450078aba93d6df890971eee58cc | 3,297 | py | Python | tests/test_storage.py | angru/datamodel | d242b393970dac1a8a53603454ed870fe70b27cf | [
"MIT"
] | 2 | 2020-06-17T21:00:09.000Z | 2020-07-07T15:49:00.000Z | tests/test_storage.py | angru/datamodel | d242b393970dac1a8a53603454ed870fe70b27cf | [
"MIT"
] | 14 | 2020-06-17T14:39:19.000Z | 2020-12-25T17:05:43.000Z | tests/test_storage.py | angru/corm | d242b393970dac1a8a53603454ed870fe70b27cf | [
"MIT"
] | null | null | null | from corm import Entity, Field, Storage, RelationType
| 22.127517 | 65 | 0.59721 |
7da0f8191abd59b72b6876b877822726d97f2ede | 2,268 | py | Python | server/test/test_serverInfoAPI.py | rmetcalf9/VirtualPresencePicture | 4822d2dac0be18d0da30bab9a4f7a8b34091799e | [
"MIT"
] | null | null | null | server/test/test_serverInfoAPI.py | rmetcalf9/VirtualPresencePicture | 4822d2dac0be18d0da30bab9a4f7a8b34091799e | [
"MIT"
] | null | null | null | server/test/test_serverInfoAPI.py | rmetcalf9/VirtualPresencePicture | 4822d2dac0be18d0da30bab9a4f7a8b34091799e | [
"MIT"
] | null | null | null | from TestHelperSuperClass import testHelperAPIClient, env
import unittest
import json
from appObj import appObj
import pytz
import datetime
serverInfoWithoutAnyPictures = {
'Server': {
'Version': env['APIAPP_VERSION']
},
'Pictures': []
}
samplePictureIdentifier = 'ABC123'
samplePictureContent = { 'SomeContent': 'abc' }
serverInfoWithSamplePictureContent = {
'Server': {
'Version': env['APIAPP_VERSION']
},
'Pictures': [{
'Identifier': samplePictureIdentifier,
'Expires': "2018-11-22T14:16:00+00:00",
'Contents': samplePictureContent
}]
}
| 36 | 111 | 0.738536 |
7da3966430bc2a6549730b528f313eb6f4d29793 | 7,990 | py | Python | zp_database/make_zp/create_hard_xray_zp.py | sajid-ali-nu/zone_plate_testing | c50afd575a6e733fce265db2ab8cc1c7b21cfe69 | [
"MIT"
] | null | null | null | zp_database/make_zp/create_hard_xray_zp.py | sajid-ali-nu/zone_plate_testing | c50afd575a6e733fce265db2ab8cc1c7b21cfe69 | [
"MIT"
] | null | null | null | zp_database/make_zp/create_hard_xray_zp.py | sajid-ali-nu/zone_plate_testing | c50afd575a6e733fce265db2ab8cc1c7b21cfe69 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# This script generates a zone plate pattern (based on partial filling) given the material, energy, grid size and number of zones as input
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
from numba import njit
from joblib import Parallel, delayed
from tqdm import tqdm, trange
import urllib,os,pickle
from os.path import dirname as up
# Importing all the required libraries. Numba is used to optimize functions.
# In[2]:
# *repeat_pattern* : produces the zone plate pattern given the pattern in only one quadrant(X,Y>0) as input.
# * *Inputs* : X and Y grid denoting the coordinates and Z containing the pattern in one quadrant.
# * *Outputs* : Z itself is modified to reflect the repition.
# In[3]:
# *get_property* : gets delta and beta for a given material at the specified energy from Henke et al.
# * *Inputs* : mat - material, energy - energy in eV
# * *Outputs* : delta, beta
# In[4]:
# *partial_fill* : workhorse function for determining the fill pattern. This function is thus used in a loop. njit is used to optimize the function.
# * *Inputs* : x,y - coordinates of the point, step - step size, r1,r2 - inner and outer radii of ring, n - resolution
# * *Outputs* : fill_factor - value of the pixel based on amount of ring passing through it
# In[5]:
#find the radius of the nth zone
def zone_radius(n,f,wavel):
return np.sqrt(n*wavel*f + ((n*wavel)/2)**2)
# *zone_radius* : functon to find the radius of a zone given the zone number and wavelength
# * *Inputs* : n - zone number, f - focal length, wavel - wavelength
# * *Outputs* : radius of the zone as specified by the inputs
# In[6]:
# *make_quadrant* : function used to create a quadrant of a ring given the inner and outer radius and zone number
# * *Inputs* : X,Y - grid, flag - specifies the quadrant to be filled (i.e. where X,Y>0), r1,r2 - inner and outer radii, n - parameter for the partial_fill function
# * *Outputs* : z - output pattern with one quadrant filled.
# In[7]:
#2D ZP
# *make_ring* : function used to create a ring given the relevant parameters
# * *Inputs* : i-zone number,radius - array of radii ,X,Y - grid, flag - specifies the quadrant to be filled (i.e. where X,Y>0),n - parameter for the partial_fill function
# * *Outputs* : None. Saves the rings to memory.
# In[8]:
mat = 'Au'
energy = 10000 #Energy in EV
f = 10e-3 #focal length in meters
wavel = (1239.84/energy)*10**(-9) #Wavelength in meters
delta,beta = get_property(mat,energy)
zones = 700 #number of zones
radius = np.zeros(zones)
# Setting up the parameters and initializing the variables.
# In[9]:
for k in range(zones):
radius[k] = zone_radius(k,f,wavel)
# Filling the radius array with the radius of zones for later use in making the rings.
# In the next few code blocks, we check if the parameters of the simulation make sense. First we print out the input and output pixel sizes assuming we will be using the 1FT propagator. Then we see if the pixel sizes are small enough compared to the outermost zone width. Finally we check if the focal spot can be contained for the given amount of tilt angle.
# In[10]:
grid_size = 55296
input_xrange = 262e-6
step_xy = input_xrange/grid_size
L_out = (1239.84/energy)*10**(-9)*f/(input_xrange/grid_size)
step_xy_output = L_out/grid_size
print(' Ouput L : ',L_out)
print(' output pixel size(nm) : ',step_xy_output*1e9)
print(' input pixel size(nm) : ',step_xy*1e9)
# In[11]:
drn = radius[-1]-radius[-2]
print(' maximum radius(um) : ',radius[-1]*1e6)
print(' outermost zone width(nm) :',drn*1e9)
# In[12]:
print(' max shift of focal spot(um) : ',(L_out/2)*1e6)
# invert the following to get max tilt allowance
# after which the focal spot falls of the
# simulation plane
# np.sin(theta*(np.pi/180))*f = (L_out/2)
theta_max = np.arcsin((L_out/2)*(1/f))*(180/np.pi)
print(' max wavefield aligned tilt(deg) : ',theta_max)
# In[13]:
if step_xy > 0.25*drn :
print(' WARNING ! input pixel size too small')
print(' ratio of input step size to outermost zone width', step_xy/drn)
if step_xy_output > 0.25*drn :
print(' WARNING ! output pixel size too small')
print(' ratio of output step size to outermost zone width', step_xy_output/drn)
# In[14]:
zones_to_fill = []
for i in range(zones):
if i%2 == 1 :
zones_to_fill.append(i)
zones_to_fill = np.array(zones_to_fill)
# Making a list of zones to fill. (Since only alternate zones are filled in our case. This can be modified as per convenience)
# In[ ]:
try :
os.chdir(up(os.getcwd())+str('/hard_xray_zp'))
except :
os.mkdir(up(os.getcwd())+str('/hard_xray_zp'))
os.chdir(up(os.getcwd())+str('/hard_xray_zp'))
# Store the location of each ring of the zone plate separately in a sub directory. This is more efficient than storing the whole zone plate array !
# In[ ]:
x1 = input_xrange/2
x = np.linspace(-x1,x1,grid_size)
step_xy = x[-1]-x[-2]
zp_coords =[-x1,x1,-x1,x1]
# In[ ]:
X,Y = np.meshgrid(x,x)
flag = np.where((X>0)&(Y>0)&(X>=Y))
# Creating the input 1D array and setting the parameters for use by the make ring function.
# Note that X,Y,flag and step_xy will be read by multiple processes which we will spawn using joblib.
# In[ ]:
get_ipython().run_cell_magic('capture', '', 'from joblib import Parallel, delayed \nresults = Parallel(n_jobs=5)(delayed(make_ring)(i) for i in zones_to_fill)')
# Creating the rings ! (Adjust the number of jobs depending on CPU cores.)
# In[ ]:
params = {'grid_size':grid_size,'step_xy':step_xy,'energy(in eV)':energy,'wavelength in m':wavel,'focal_length':f,'zp_coords':zp_coords,'delta':delta,'beta':beta}
pickle.dump(params,open('parameters.pickle','wb'))
# Pickling and saving all the associated parameters along with the rings for use in simulation!
| 29.592593 | 359 | 0.659324 |
7da45f218ab8516fdf8f91e39f9a7c42a449c690 | 1,740 | py | Python | model/kubernetes.py | adracus/cc-utils | dcd1ff544d8b18a391188903789d1cac929f50f9 | [
"Apache-2.0"
] | null | null | null | model/kubernetes.py | adracus/cc-utils | dcd1ff544d8b18a391188903789d1cac929f50f9 | [
"Apache-2.0"
] | null | null | null | model/kubernetes.py | adracus/cc-utils | dcd1ff544d8b18a391188903789d1cac929f50f9 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed
# under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from model.base import (
NamedModelElement,
ModelBase,
)
| 31.071429 | 99 | 0.7 |
7da75a749aad9d8e1c359fa964268c99722cc54e | 180 | py | Python | test/test.py | justifyzz/Python-Assignment-1 | 8386203a9cf7099754586c26ba6646ec77dc6165 | [
"MIT"
] | null | null | null | test/test.py | justifyzz/Python-Assignment-1 | 8386203a9cf7099754586c26ba6646ec77dc6165 | [
"MIT"
] | null | null | null | test/test.py | justifyzz/Python-Assignment-1 | 8386203a9cf7099754586c26ba6646ec77dc6165 | [
"MIT"
] | null | null | null | from pycoingecko import CoinGeckoAPI
number = int(input('Enter the number of coins: '))
for i in range(length):
print(i + 1, ':', listOfNames[i], listOfMarketCaps[i])
| 22.5 | 62 | 0.672222 |
7da76f883c897444204f5a70123af7ff361ec610 | 2,528 | py | Python | pymagnitude/third_party/allennlp/tests/data/dataset_readers/snli_reader_test.py | tpeng/magnitude | aec98628b5547773ca8c4114ec6d1ad51e21b230 | [
"MIT"
] | 1,520 | 2018-03-01T13:37:49.000Z | 2022-03-25T11:40:20.000Z | pymagnitude/third_party/allennlp/tests/data/dataset_readers/snli_reader_test.py | tpeng/magnitude | aec98628b5547773ca8c4114ec6d1ad51e21b230 | [
"MIT"
] | 87 | 2018-03-03T15:12:50.000Z | 2022-02-21T15:24:12.000Z | pymagnitude/third_party/allennlp/tests/data/dataset_readers/snli_reader_test.py | tpeng/magnitude | aec98628b5547773ca8c4114ec6d1ad51e21b230 | [
"MIT"
] | 121 | 2018-03-03T08:40:53.000Z | 2022-03-16T05:19:38.000Z | # pylint: disable=no-self-use,invalid-name
from __future__ import division
from __future__ import absolute_import
import pytest
from allennlp.data.dataset_readers import SnliReader
from allennlp.common.util import ensure_list
from allennlp.common.testing import AllenNlpTestCase
| 52.666667 | 109 | 0.560918 |
7da9d5721ae20d0a2dd2bfb648ef9c35e133f2d4 | 4,362 | py | Python | binding/python/setup.py | pmateusz/libgexf | a25355db141a1d4e178553f42e37acfd9f485e3e | [
"MIT"
] | null | null | null | binding/python/setup.py | pmateusz/libgexf | a25355db141a1d4e178553f42e37acfd9f485e3e | [
"MIT"
] | null | null | null | binding/python/setup.py | pmateusz/libgexf | a25355db141a1d4e178553f42e37acfd9f485e3e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
setup.py file for Libgexf
"""
from setuptools import Extension, setup
#from distutils.core import Extension, setup
libgexf_module = Extension(
'_libgexf', # genere un _libgexf.so
include_dirs=['/usr/include/libxml2'],
sources=[
# 'libgexf.i', # genere un libgexf.py (ne fonctionne que pour les sources C et pas C++)
# sources C: les .o seront automatiquement gnr,
# et automatiquement link avec le module
#io::input
'../../libgexf/filereader.cpp',
'../../libgexf/abstractparser.cpp',
'../../libgexf/gexfparser.cpp',
'../../libgexf/legacyparser.cpp',
'../../libgexf/rngvalidator.cpp',
'../../libgexf/schemavalidator.cpp',
#io::output
'../../libgexf/filewriter.cpp',
'../../libgexf/legacywriter.cpp',
#io::utils
'../../libgexf/conv.cpp',
#db::topo
'../../libgexf/graph.cpp',
'../../libgexf/dynamicgraph.cpp',
'../../libgexf/directedgraph.cpp',
'../../libgexf/undirectedgraph.cpp',
'../../libgexf/nodeiter.cpp',
'../../libgexf/edgeiter.cpp',
#db::data
'../../libgexf/data.cpp',
'../../libgexf/metadata.cpp',
'../../libgexf/attributeiter.cpp',
'../../libgexf/attvalueiter.cpp',
#main
'../../libgexf/gexf.cpp',
'../../libgexf/memoryvalidator.cpp',
# chemin du wrapper gnr automatiquement par SWIG (ce wrapper doit dj exister donc)
'libgexf_wrap.cpp',
],
# eventuellement, les librairies "linker"
# par exemple si on a besoin de libxml2, c'est ici qu'on le spcifie au compilateur
# attention aux habitus de gcc et de la compilation en ligne de commande:
# ici inutile de donner le format spcifique gcc ("-lpthread") ou spcifique visual studio etc..
# il suffit de mettre "pthread" et le script python va rajouter le "-l" devant si ncessaire
libraries=[
'stdc++',
'xml2' #see xml2-config --libs to get the linker flags
#'z', # zlib (compression) (inutile sous ubuntu par exemple, car dj intgr au packaging de base pour dvelopper)
#'pthread' # Posix Threads (multithreading posix) (inutile sous linux, car posix fait dj partie du systme)
]
)
setup (
name='libgexf', # important, c'est le vrai nom du module, qui sera utilis quand on fera un "import libgexf;" par exemple
# metadonnees diverses
version='0.1.2',
author="Sebastien Heymann",
author_email="sebastien.heymann@gephi.org",
url="http://gexf.net",
description="""Toolkit library for GEXF file format.""",
long_description="""""",
# liste des modules compiler.
# le module "libgexf_module" a t dfini ligne 12
#
ext_modules=[ libgexf_module, ],
# si on veut rajouter un package python
# par exemple
# packages = ["monpackage"]
# va rajouter le packag
# monpackage/
# puisqu'en python les packages sont enfait tout simplement des rpertoires contenant
# un fichier "constructeur" __init__.py (c'est un peu du systme de fichier orient objet)
# cela aura pour effet de rajouter de manire rcursive
# monpackage/__init__.py
# monpackage/sous/sous/sous/package/fichier.py
# etc..
#packages= ["monpackage", ], #
# si on veut rajouter des scripts python en plus
# par exemple
# py_modules = ["monmodule"]
# va rajouter le fichier
# monmodule.py (dans le rpertoire courant)
# dans le package
py_modules = ["libgexf"], # UNCOMMENT TO USE THE SWIG WRAPPER
# on peut rajouter des fichiers divers aussi (readme, examples, licences, doc html etc..)
#data_files = [('share/libgexf-python/',['readme.txt']),],
# encore des meta donnees, pour la base de donnees en ligne des modules python (python.org)
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"License :: Free for non-commercial use",
"Operating System :: POSIX :: Linux",
"Topic :: Software Development :: Libraries :: Python Modules", ],
)
| 36.049587 | 128 | 0.618294 |
7da9f98f6db4dd526d7eaf26e1220f285a37877a | 7,933 | bzl | Python | util/import/raze/crates.bzl | silas-enf/rules_rust | 41b39f0c9951dfda3bd0a95df31695578dd3f5ea | [
"Apache-2.0"
] | 1 | 2017-06-12T02:10:48.000Z | 2017-06-12T02:10:48.000Z | util/import/raze/crates.bzl | silas-enf/rules_rust | 41b39f0c9951dfda3bd0a95df31695578dd3f5ea | [
"Apache-2.0"
] | null | null | null | util/import/raze/crates.bzl | silas-enf/rules_rust | 41b39f0c9951dfda3bd0a95df31695578dd3f5ea | [
"Apache-2.0"
] | null | null | null | """
@generated
cargo-raze generated Bazel file.
DO NOT EDIT! Replaced on runs of cargo-raze
"""
load("@bazel_tools//tools/build_defs/repo:git.bzl", "new_git_repository") # buildifier: disable=load
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") # buildifier: disable=load
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe") # buildifier: disable=load
def rules_rust_util_import_fetch_remote_crates():
"""This function defines a collection of repos and should be called in a WORKSPACE file"""
maybe(
http_archive,
name = "rules_rust_util_import__aho_corasick__0_7_15",
url = "https://crates.io/api/v1/crates/aho-corasick/0.7.15/download",
type = "tar.gz",
sha256 = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5",
strip_prefix = "aho-corasick-0.7.15",
build_file = Label("//util/import/raze/remote:BUILD.aho-corasick-0.7.15.bazel"),
)
maybe(
http_archive,
name = "rules_rust_util_import__cfg_if__1_0_0",
url = "https://crates.io/api/v1/crates/cfg-if/1.0.0/download",
type = "tar.gz",
sha256 = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd",
strip_prefix = "cfg-if-1.0.0",
build_file = Label("//util/import/raze/remote:BUILD.cfg-if-1.0.0.bazel"),
)
maybe(
http_archive,
name = "rules_rust_util_import__env_logger__0_8_4",
url = "https://crates.io/api/v1/crates/env_logger/0.8.4/download",
type = "tar.gz",
sha256 = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3",
strip_prefix = "env_logger-0.8.4",
build_file = Label("//util/import/raze/remote:BUILD.env_logger-0.8.4.bazel"),
)
maybe(
http_archive,
name = "rules_rust_util_import__getrandom__0_2_3",
url = "https://crates.io/api/v1/crates/getrandom/0.2.3/download",
type = "tar.gz",
sha256 = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753",
strip_prefix = "getrandom-0.2.3",
build_file = Label("//util/import/raze/remote:BUILD.getrandom-0.2.3.bazel"),
)
maybe(
http_archive,
name = "rules_rust_util_import__lazy_static__1_4_0",
url = "https://crates.io/api/v1/crates/lazy_static/1.4.0/download",
type = "tar.gz",
sha256 = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646",
strip_prefix = "lazy_static-1.4.0",
build_file = Label("//util/import/raze/remote:BUILD.lazy_static-1.4.0.bazel"),
)
maybe(
http_archive,
name = "rules_rust_util_import__libc__0_2_112",
url = "https://crates.io/api/v1/crates/libc/0.2.112/download",
type = "tar.gz",
sha256 = "1b03d17f364a3a042d5e5d46b053bbbf82c92c9430c592dd4c064dc6ee997125",
strip_prefix = "libc-0.2.112",
build_file = Label("//util/import/raze/remote:BUILD.libc-0.2.112.bazel"),
)
maybe(
http_archive,
name = "rules_rust_util_import__log__0_4_14",
url = "https://crates.io/api/v1/crates/log/0.4.14/download",
type = "tar.gz",
sha256 = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710",
strip_prefix = "log-0.4.14",
build_file = Label("//util/import/raze/remote:BUILD.log-0.4.14.bazel"),
)
maybe(
http_archive,
name = "rules_rust_util_import__memchr__2_4_1",
url = "https://crates.io/api/v1/crates/memchr/2.4.1/download",
type = "tar.gz",
sha256 = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a",
strip_prefix = "memchr-2.4.1",
build_file = Label("//util/import/raze/remote:BUILD.memchr-2.4.1.bazel"),
)
maybe(
http_archive,
name = "rules_rust_util_import__proc_macro2__1_0_33",
url = "https://crates.io/api/v1/crates/proc-macro2/1.0.33/download",
type = "tar.gz",
sha256 = "fb37d2df5df740e582f28f8560cf425f52bb267d872fe58358eadb554909f07a",
strip_prefix = "proc-macro2-1.0.33",
build_file = Label("//util/import/raze/remote:BUILD.proc-macro2-1.0.33.bazel"),
)
maybe(
http_archive,
name = "rules_rust_util_import__quickcheck__1_0_3",
url = "https://crates.io/api/v1/crates/quickcheck/1.0.3/download",
type = "tar.gz",
sha256 = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6",
strip_prefix = "quickcheck-1.0.3",
build_file = Label("//util/import/raze/remote:BUILD.quickcheck-1.0.3.bazel"),
)
maybe(
http_archive,
name = "rules_rust_util_import__quote__1_0_10",
url = "https://crates.io/api/v1/crates/quote/1.0.10/download",
type = "tar.gz",
sha256 = "38bc8cc6a5f2e3655e0899c1b848643b2562f853f114bfec7be120678e3ace05",
strip_prefix = "quote-1.0.10",
build_file = Label("//util/import/raze/remote:BUILD.quote-1.0.10.bazel"),
)
maybe(
http_archive,
name = "rules_rust_util_import__rand__0_8_4",
url = "https://crates.io/api/v1/crates/rand/0.8.4/download",
type = "tar.gz",
sha256 = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8",
strip_prefix = "rand-0.8.4",
build_file = Label("//util/import/raze/remote:BUILD.rand-0.8.4.bazel"),
)
maybe(
http_archive,
name = "rules_rust_util_import__rand_core__0_6_3",
url = "https://crates.io/api/v1/crates/rand_core/0.6.3/download",
type = "tar.gz",
sha256 = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7",
strip_prefix = "rand_core-0.6.3",
build_file = Label("//util/import/raze/remote:BUILD.rand_core-0.6.3.bazel"),
)
maybe(
http_archive,
name = "rules_rust_util_import__regex__1_4_6",
url = "https://crates.io/api/v1/crates/regex/1.4.6/download",
type = "tar.gz",
sha256 = "2a26af418b574bd56588335b3a3659a65725d4e636eb1016c2f9e3b38c7cc759",
strip_prefix = "regex-1.4.6",
build_file = Label("//util/import/raze/remote:BUILD.regex-1.4.6.bazel"),
)
maybe(
http_archive,
name = "rules_rust_util_import__regex_syntax__0_6_25",
url = "https://crates.io/api/v1/crates/regex-syntax/0.6.25/download",
type = "tar.gz",
sha256 = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b",
strip_prefix = "regex-syntax-0.6.25",
build_file = Label("//util/import/raze/remote:BUILD.regex-syntax-0.6.25.bazel"),
)
maybe(
http_archive,
name = "rules_rust_util_import__syn__1_0_82",
url = "https://crates.io/api/v1/crates/syn/1.0.82/download",
type = "tar.gz",
sha256 = "8daf5dd0bb60cbd4137b1b587d2fc0ae729bc07cf01cd70b36a1ed5ade3b9d59",
strip_prefix = "syn-1.0.82",
build_file = Label("//util/import/raze/remote:BUILD.syn-1.0.82.bazel"),
)
maybe(
http_archive,
name = "rules_rust_util_import__unicode_xid__0_2_2",
url = "https://crates.io/api/v1/crates/unicode-xid/0.2.2/download",
type = "tar.gz",
sha256 = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3",
strip_prefix = "unicode-xid-0.2.2",
build_file = Label("//util/import/raze/remote:BUILD.unicode-xid-0.2.2.bazel"),
)
maybe(
http_archive,
name = "rules_rust_util_import__wasi__0_10_2_wasi_snapshot_preview1",
url = "https://crates.io/api/v1/crates/wasi/0.10.2+wasi-snapshot-preview1/download",
type = "tar.gz",
sha256 = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6",
strip_prefix = "wasi-0.10.2+wasi-snapshot-preview1",
build_file = Label("//util/import/raze/remote:BUILD.wasi-0.10.2+wasi-snapshot-preview1.bazel"),
)
| 41.103627 | 103 | 0.658767 |
7dab84050bffe62a65b369edcbc5f292e22e4734 | 747 | py | Python | scripts/print_thread_name.py | Satheeshcharon/Multithreading-python | 4dcc18d5d417701d8f67f4d92ffa915e5c051a60 | [
"MIT"
] | null | null | null | scripts/print_thread_name.py | Satheeshcharon/Multithreading-python | 4dcc18d5d417701d8f67f4d92ffa915e5c051a60 | [
"MIT"
] | null | null | null | scripts/print_thread_name.py | Satheeshcharon/Multithreading-python | 4dcc18d5d417701d8f67f4d92ffa915e5c051a60 | [
"MIT"
] | null | null | null | #!/usr/bin/python
## This program creates a thread,
## officially names it and
## tries to print the name
import threading
import time
if __name__ == "__main__":
Main()
| 16.977273 | 55 | 0.710843 |
7dac2231269fa172423e388357c676a691296ba3 | 6,241 | py | Python | scripts/first_trace_success_test.py | axelzedigh/DLSCA | f4a04bbc008784cb3f48832a2b4394850048f116 | [
"Unlicense"
] | 9 | 2019-09-23T16:21:50.000Z | 2021-11-23T13:14:27.000Z | scripts/first_trace_success_test.py | axelzedigh/DLSCA | f4a04bbc008784cb3f48832a2b4394850048f116 | [
"Unlicense"
] | null | null | null | scripts/first_trace_success_test.py | axelzedigh/DLSCA | f4a04bbc008784cb3f48832a2b4394850048f116 | [
"Unlicense"
] | 7 | 2019-07-12T06:30:23.000Z | 2021-11-23T13:14:29.000Z | import os.path
import sys
import h5py
import numpy as np
import matplotlib.pyplot as plt
from keras.models import load_model
from keras.losses import categorical_crossentropy
import tensorflow as tf
import heapq
import re
modelName = 'CW_validation.h5'
successResultsNPY = []
############################################################################################################
# #
# this test was designed to measure the first attempt success rate of classification, and thus of keybyte #
# recovery from a single trace. It plots this in terms of keybyte values to investigate if there is a #
# difference in performance depending on the value of the Sbox output. #
# #
############################################################################################################
Sbox = np.array([
0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76,
0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0,
0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15,
0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75,
0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84,
0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF,
0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8,
0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2,
0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73,
0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB,
0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79,
0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08,
0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A,
0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,
0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,
0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16
])
#create a (256, 2) shaped matrix with "number of checks for each keybyte" as [:,0] and
#"number of successes" for [:,1]
#check first try accuracy of model against XMega2 test data
############################
#CODE STARTS EXECUTING HERE#
############################
#=========================================#
#the interval size is by default set to 96
#which corresponds to the interval size
#of an ATxmega128D4 traces captured using
#ChipWhisperer. Analyze the trace if you
#are using something different and change
#this value!
#=========================================#
#******************
INTERVAL_SIZE = 96
#******************
#model can be hard coded here, but I recommend using the terminal instead
to_check_all = []
if len(sys.argv) >= 3:
numtraces = int(sys.argv[1])
numiter = int(sys.argv[2])
tracestart = int(sys.argv[3])
traceend = int(sys.argv[4])
keybytepos = int(sys.argv[5])
tracefile = sys.argv[6]
ptfile = sys.argv[7]
keyfile = sys.argv[8]
to_check_all = [i for i in sys.argv][9:]
to_check_all = [i for i in to_check_all if i[-3:] == ".h5"]
traces, plaintext, keys = load_traces(tracefile, ptfile, keyfile)
interval = slice(tracestart+INTERVAL_SIZE*keybytepos, traceend+INTERVAL_SIZE*keybytepos)
print(traces.shape)
print(plaintext.shape)
print(keys.shape)
traces = traces[:,interval]
plaintext = plaintext[:,keybytepos]
keys = keys[:,keybytepos]
# No argument: check all the trained models
for m in to_check_all:
check_model(m, traces, plaintext, keys)
try:
np.save("results/npyresults/first_trace_success_rates.npy",np.array(successResultsNPY))
print("results stored in the ./results folder")
input("Test finished, press enter to continue ...")
except SyntaxError:
pass
| 39.751592 | 108 | 0.628425 |
7dacf9f865f47f80badfe339d0f2b8574ea5fb66 | 360 | py | Python | raptrcontainer/appropriated/admin.py | richard-parks/RAPTR | ff1342af4ee6447ab9cc21735e79efb7623df805 | [
"Unlicense"
] | null | null | null | raptrcontainer/appropriated/admin.py | richard-parks/RAPTR | ff1342af4ee6447ab9cc21735e79efb7623df805 | [
"Unlicense"
] | 2 | 2018-11-29T21:03:54.000Z | 2018-12-02T04:41:36.000Z | raptrcontainer/appropriated/admin.py | NOAA-PMEL/Admin_RAPTR | 2353aaa9500dce2e2e65a8d21e802b37c6990054 | [
"Unlicense"
] | null | null | null | from django.contrib import admin
from .models import AppropriatedHistory
| 18.947368 | 49 | 0.644444 |