hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a4404fe1d92ad81158f3995d99e25353d3c8492 | 4,315 | py | Python | NitroGenerator.py | ATRS7391/Discord_Nitro_Generator_And_Checker_Python_Version | 65c6e6e18e640afb4fc433394a9e646c7fe4f4fa | [
"MIT"
] | 2 | 2021-07-27T06:57:36.000Z | 2021-08-16T04:17:41.000Z | NitroGenerator.py | ATRS7391/Discord_Nitro_Generator_And_Checker_Python_Version | 65c6e6e18e640afb4fc433394a9e646c7fe4f4fa | [
"MIT"
] | null | null | null | NitroGenerator.py | ATRS7391/Discord_Nitro_Generator_And_Checker_Python_Version | 65c6e6e18e640afb4fc433394a9e646c7fe4f4fa | [
"MIT"
] | 1 | 2021-11-06T05:32:40.000Z | 2021-11-06T05:32:40.000Z | import random
import sys
import subprocess
try:
import requests
except:
print("'requests' module not found! Trying to install... ")
pip_install("requests")
import requests
print_header()
user_nitro_type = get_nitro_type()
print("Enter the number of Nitro Codes you want: ")
amount = int(input("> "))
valid_codes = 0
invalid_codes = 0
unchecked_codes = 0
print()
print()
f = open("All_Nitro_Codes.txt", "w", encoding='utf-8')
for i in range(amount):
user_nitro_code = get_code(nitro_type=user_nitro_type)
validity = check_code(nitro_code=user_nitro_code)
if validity == "True":
display = f"Valid. | https://discord.com/gifts/{user_nitro_code}"
valid_codes += 1
print(display)
f.writelines(display + "\n")
elif validity == "False":
display = f"Invalid. | https://discord.com/gifts/{user_nitro_code}"
invalid_codes += 1
print(display)
f.writelines(display + "\n")
elif validity == "None":
display = f"Unchecked. Rate limited. | https://discord.com/gifts/{user_nitro_code}"
unchecked_codes += 1
print(display)
f.writelines(display + "\n")
print("\n\nSuccessfully generated Nitro Codes. ")
print("Valid Nitro Codes: " + str(valid_codes))
print("Invalid Nitro Codes: " + str(invalid_codes))
print("Unchecked Nitro Codes: " + str(unchecked_codes))
print("\nEnter any key to exit.")
input()
quit()
| 36.567797 | 145 | 0.526999 |
4a4408798c8290d4f3dfdd7e187e5ce0fde47eee | 1,018 | py | Python | 2015/main/13/part2.py | sgravrock/adventofcode | 1f5263ee242c8446ac1c08d2aef195a0a4595ccb | [
"MIT"
] | null | null | null | 2015/main/13/part2.py | sgravrock/adventofcode | 1f5263ee242c8446ac1c08d2aef195a0a4595ccb | [
"MIT"
] | null | null | null | 2015/main/13/part2.py | sgravrock/adventofcode | 1f5263ee242c8446ac1c08d2aef195a0a4595ccb | [
"MIT"
] | null | null | null | import sys
import itertools
if __name__ == "__main__":
print optimal(readfile(sys.stdin))
| 23.674419 | 76 | 0.650295 |
4a444c988302d74c981cef9771e8cb5c4e9d2945 | 29,855 | py | Python | networking/connection/stun_client.py | bcgrendel/python_networking | b4c847d9eeeea078868b8dcb3d385e02eb0b8e96 | [
"MIT"
] | null | null | null | networking/connection/stun_client.py | bcgrendel/python_networking | b4c847d9eeeea078868b8dcb3d385e02eb0b8e96 | [
"MIT"
] | null | null | null | networking/connection/stun_client.py | bcgrendel/python_networking | b4c847d9eeeea078868b8dcb3d385e02eb0b8e96 | [
"MIT"
] | null | null | null | import socket
import sys
import traceback
import struct
import threading;
from threading import Thread;
import time;
import datetime;
import json
#import buffered_message;
import hashlib
from Crypto.PublicKey import RSA
from connection_state import ConnectionState
# publickey = RSA.importKey(key_string)
import tcp;
import udp;
# *************
# EXAMPLE USAGE
# *************
'''
import socket
import tcp
import udp
import stun_client
import time
start_listening = True
local_ip = socket.gethostbyname(socket.gethostname())
local_port = 30779
server_ip = socket.gethostbyname(socket.gethostname())
server_port = 30788
socket_timeout = 3.0
peer_block_manager = None
client = stun_client.STUN_Client(start_listening, local_ip, local_port, server_ip, server_port, socket_timeout, peer_block_manager)
# Set your available listening port ranges
client.available_ports = [[35000, 35100], [36500, 36700],]
# Register a user acccount with the stun server.
class RegisterCallback:
def __init__(self):
self.error_message = ""
self.success = None
def handle_timeout(self, params=None):
self.success = False
self.error_message = "Registration request to server has timed-out."
def complete_registration(self, success, error_message=""):
self.success = success
self.error_message = error_message
username = "test_user"
password = "test_pass123"
profile_map = {}
callback_object = RegisterCallback()
registration_type = "permanent"
client.register(username, password, profile_map, callback_object, registration_type)
response_check_interval = 0.5;
while callback_object.success == None:
time.sleep(response_check_interval)
if not callback_object.success:
print "Error: %s" % callback_object.error_message
exit()
# Login with username and password.
class AuthCallback:
def __init__(self):
self.error_message = ""
self.success = None
def handle_timeout(self, params=None):
self.success = False
self.error_message = "Authentication request to server has timed-out."
def complete_authentication(self, success, error_message=""):
self.success = success
self.error_message = error_message
callback_object = AuthCallback()
login = True # this authentication is to login. It'd be False if we wanted to log out.
client.authenticate(username, password, callback_object, login)
while callback_object.success == None:
time.sleep(response_check_interval)
if not callback_object.success:
print "Error: %s" % callback_object.error_message
exit()
# Now we can access the list of peers connected to the server.
# Alternatively, assign a function reference to client.peer_map_callback (argument will be a reference to client.peer_map) to be notified of peer list updates as they are received.
#
# sample peer_map:
# ["test_user":["test_user", None], "another_user":["another_user", None],]
# Get a peer from the list.
peer_username = None;
for _username, data in client.peer_map.iteritems():
if username != _username:
peer_username = _username
break
# Connect to that peer (hole-punch)
class ConnectionCallback:
def __init__(self):
self.error_message = ""
self.success = None
self.client_key = None
def handle_timeout(self, params=None):
self.success = False
self.error_message = "Connection request to server has timed-out."
def complete_connection(self, peer_username, success, error_message=""):
self.success = success
if success:
self.client_key = error_message
else:
self.error_message = error_message
buffer_size = 128
callback_object = ConnectionCallback()
client.connect_to_peer(peer_username, buffer_size, callback_object)
while callback_object.success == None:
time.sleep(response_check_interval)
if not callback_object.success:
print "Error: %s" % callback_object.error_message
exit()
client_key = callback_object.client_key
udp_client = client.client_map[client_key]
# Now you can communicate with that peer.
udp_client.send_message("Greetings!")
udp_client.pop_all_messages()
'''
| 38.374036 | 197 | 0.73589 |
4a4611f60a1d159391b648d9954a9b9efff56f91 | 11,842 | py | Python | tools/wptserve/tests/functional/test_response.py | qanat/wpt | 7c61a4594a95682531367b6956d1c37f8b8fd486 | [
"BSD-3-Clause"
] | 1 | 2021-12-12T18:13:24.000Z | 2021-12-12T18:13:24.000Z | tools/wptserve/tests/functional/test_response.py | qanat/wpt | 7c61a4594a95682531367b6956d1c37f8b8fd486 | [
"BSD-3-Clause"
] | 112 | 2021-09-27T14:39:02.000Z | 2022-03-30T14:26:35.000Z | tools/wptserve/tests/functional/test_response.py | qanat/wpt | 7c61a4594a95682531367b6956d1c37f8b8fd486 | [
"BSD-3-Clause"
] | null | null | null | import os
import unittest
import json
import types
from http.client import BadStatusLine
from io import BytesIO
import pytest
wptserve = pytest.importorskip("wptserve")
from .base import TestUsingServer, TestUsingH2Server, doc_root
if __name__ == '__main__':
unittest.main()
| 36.549383 | 123 | 0.616703 |
4a48326e1bcc0c4ce67dffee3193eed37eb8dfe4 | 2,881 | py | Python | bbc1/core/command.py | ks91/bbc1-pub | 6b9c33c6c8aec7d410ba9b704eeeb8c3772012d0 | [
"Apache-2.0"
] | 89 | 2017-10-31T05:38:30.000Z | 2021-11-06T11:53:19.000Z | bbc1/core/command.py | ks91/bbc1-pub | 6b9c33c6c8aec7d410ba9b704eeeb8c3772012d0 | [
"Apache-2.0"
] | 74 | 2017-11-07T13:06:33.000Z | 2021-05-06T14:26:19.000Z | bbc1/core/command.py | ks91/bbc1-pub | 6b9c33c6c8aec7d410ba9b704eeeb8c3772012d0 | [
"Apache-2.0"
] | 56 | 2017-11-04T13:54:56.000Z | 2021-06-18T18:05:46.000Z | # -*- coding: utf-8 -*-
"""
Copyright (c) 2017 beyond-blockchain.org.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from argparse import ArgumentParser
import sys
sys.path.extend(["../../"])
from bbc1.core.bbc_config import DEFAULT_CORE_PORT, DEFAULT_P2P_PORT
DEFAULT_SERV_ADDR = '127.0.0.1'
| 57.62 | 116 | 0.701493 |
4a4861b9f42f405c3f1bc83a1f33fe81d2ee9835 | 33,928 | py | Python | main.py | cmcquinn/cmake-uvision-syncer | 26f34b79b3102a326ced2b0bca2524a98b69abf4 | [
"MIT"
] | null | null | null | main.py | cmcquinn/cmake-uvision-syncer | 26f34b79b3102a326ced2b0bca2524a98b69abf4 | [
"MIT"
] | null | null | null | main.py | cmcquinn/cmake-uvision-syncer | 26f34b79b3102a326ced2b0bca2524a98b69abf4 | [
"MIT"
] | 1 | 2022-03-31T13:47:50.000Z | 2022-03-31T13:47:50.000Z | """
Usage:
main.py [<project>]
Options:
<project> Path to the .uvprojx file (Keil Vision5 Project File).
The .uvoptx file (Keil Vision5 Project Options file) will
be located automatically as it shall be adjacent to the
.uvprojx file, having the same filename.
If this is a directory, .uvprojx is found automatically (if
multiple found then the latest changed is chosen).
If not provided then the current working directory is chosen
as a project directory.
"""
import enum
import operator
import os
import warnings
from collections import defaultdict
from dataclasses import dataclass
from os import DirEntry
from pathlib import Path
from typing import List, Optional, Union, Iterable, Collection, Set, Tuple, Callable, Dict, Iterator
from docopt import docopt
from lxml import etree
__author__ = "Bojan Potonik"
UnknownInt = int
UnknownBool = bool
# region XML data structures for Project File
# endregion XML data structures for Project File
# region XML data structures for Project Options file
# endregion XML data structures for Project Options file
# region XML parsing helper functions
def text(element: etree.ElementBase, name: str, is_attribute: bool = False, nullable: bool = False) -> Optional[str]:
if is_attribute:
if nullable:
return element.attrib.get(name)
else:
return element.attrib[name]
value = element.xpath(name)
if (not value) and nullable:
return None
if len(value) != 1:
raise ValueError(f"Only one '{name}' tag per tree is supported, {len(value)} found")
return value[0].text
def strict_bool(element: etree.ElementBase, name: str, nullable: bool = False, *,
false_value: str = "0", true_value: str = "1") -> Optional[bool]:
value = text(element, name, nullable=nullable)
if value == false_value:
return False
if value == true_value:
return True
if (value is None) and nullable:
return None
raise ValueError(f"'{value}' (of {name}) is not valid boolean value")
def strict_hex(element: etree.ElementBase, name: str) -> int:
value = text(element, name)
if not value.startswith("0x"):
raise ValueError(f"'{value}' (of {name}) is not valid hexadecimal value")
return int(value, 16)
# endregion XML parsing helper functions
def add_defines(self, defines: Union[str, Iterable[str]], languages: Union[Language, Collection[Language]],
comment: str = None) -> None:
self._add_values(self.defines, defines, languages, comment)
def add_undefines(self, undefines: Union[str, Iterable[str]], languages: Union[Language, Collection[Language]],
comment: str = None) -> None:
self._add_values(self.undefines, undefines, languages, comment)
def add_source_files(self, paths: Union[None, str, Iterable[str]],
languages: Union[Language, Collection[Language], None],
comment: str = None, include_in_build: bool = True) -> None:
paths = self._clean_paths(paths)
# If file is not included in the build, comment it
if include_in_build is False:
paths = ["# " + path for path in paths]
self._add_values(self.source_file_paths if languages else self.other_file_paths, paths, languages, comment)
def add_other_files(self, paths: Union[str, Iterable[str]], comment: str = None) -> None:
self.add_source_files(paths, None, comment)
def check_common(self) -> Set[Language]:
"""
Check which properties are common to all language configurations.
:return: Set of all used languages (languages with at least one property)
"""
all_props = (self.include_paths, self.defines, self.undefines, self.source_file_paths)
# Get all of the defined languages used
languages = {lang
for props in all_props
for prop in props
for lang in prop.languages}
for props in all_props:
for prop in props:
prop.common = (prop.languages == languages)
return languages
def __str__(self) -> str:
languages = sorted(self.check_common(), key=operator.attrgetter('value'))
ret_str = [
"# Made with CMake <> uVision project file synchronizer"
"# https://github.com/bojanpotocnik/cmake-uvision-syncer"
]
# Set of the build properties
prop_sets: List[Tuple[str, str, List[CMake.String], str]] = [
("definitions", "DEFINES", self.defines, "-D"),
("un-defines", "UNDEFINES", self.undefines, ""),
("include directories", "INCLUDE_DIRS", self.include_paths, ""),
("source files", "SOURCES", self.source_file_paths, ""),
]
# Set of the language configs per build property
sub_prop_sets: List[Tuple[str, str, Callable[[CMake.String], bool]]] = [
("Common", "COMMON", lambda prop: prop.common),
*((lang.value + " specific", lang.name,
lambda prop, lang_=lang: (not prop.common) and (lang_ in prop.languages))
for lang in languages)
]
for section_comment, section_var_prefix, section_props, val_prefix in prop_sets:
ss_str = []
for prop_set_comment, var_suffix, filter_fun in sub_prop_sets:
section_files = _add_section_files(
comment=f"{prop_set_comment} {section_comment}",
var_name=f"{section_var_prefix}_{var_suffix}",
value_iterator=filter(filter_fun, section_props),
value_prefix=val_prefix
)
if section_files is not None:
ss_str.append(section_files)
ret_str.append("\n\n".join(ss_str))
other_files = _add_section_files(
comment="Other files",
var_name="OTHER_FILES",
value_iterator=self.other_file_paths
)
if other_files is not None:
ret_str.append(other_files)
return "\n\n\n".join(ret_str)
def main() -> None:
# region Parse arguments
arguments = docopt(__doc__)
project_path: str = arguments["<project>"] or "."
if not os.path.isfile(project_path):
with os.scandir(project_path) as dirs: # type: Iterator[DirEntry]
projects = [de.path for de in dirs if (de.is_file() and (os.path.splitext(de.name)[1] == ".uvprojx"))]
if not projects:
raise FileNotFoundError(f"Could not find any .uvprojx file in '{project_path}'")
elif len(projects) > 1:
# Choose the latest file by modification time.
project_path = max(projects, key=os.path.getmtime)
else:
project_path = projects[0]
project_path = os.path.realpath(project_path)
# endregion Parse arguments
print(f"Using Vision5 Project File '{project_path}'")
# Parse uVision project XML files
uvp = UVisionProject.new(project_path)
# Generate CMake file and populate it with information from uVision project
cmake = CMake()
# Add Assembler properties
cmake.add_include_paths(uvp.targets[0].build.asm.include_paths, Language.ASM)
cmake.add_defines(uvp.targets[0].build.asm.defines, Language.ASM)
cmake.add_undefines(uvp.targets[0].build.asm.undefines, Language.ASM)
# Add C properties
cmake.add_include_paths(uvp.targets[0].build.c.include_paths, Language.C)
cmake.add_defines(uvp.targets[0].build.c.defines, Language.C)
cmake.add_undefines(uvp.targets[0].build.c.undefines, Language.C)
# Add source and other files
for file, lang, comment in uvp.source_files():
cmake.add_source_files(file.path, lang, comment, file.include_in_build)
fp_proj_cmake = os.path.join(os.path.dirname(uvp.project_file_path),
os.path.splitext(os.path.basename(uvp.project_file_path))[0] + ".cmake")
with open(fp_proj_cmake, 'w') as f:
print(cmake, file=f)
print(f"Generated CMake file '{fp_proj_cmake}'")
if __name__ == "__main__":
main()
| 38.207207 | 122 | 0.528855 |
4a48ec3aeae99c16ed4de0cce8fcde590af1ac0c | 3,434 | py | Python | scipy/weave/base_spec.py | lesserwhirls/scipy-cwt | ee673656d879d9356892621e23ed0ced3d358621 | [
"BSD-3-Clause"
] | 8 | 2015-10-07T00:37:32.000Z | 2022-01-21T17:02:33.000Z | scipy/weave/base_spec.py | lesserwhirls/scipy-cwt | ee673656d879d9356892621e23ed0ced3d358621 | [
"BSD-3-Clause"
] | null | null | null | scipy/weave/base_spec.py | lesserwhirls/scipy-cwt | ee673656d879d9356892621e23ed0ced3d358621 | [
"BSD-3-Clause"
] | 8 | 2015-05-09T14:23:57.000Z | 2018-11-15T05:56:00.000Z |
import UserList
import base_info
| 35.040816 | 80 | 0.609785 |
4a48f5c7b324b298a0d8541fe2c9610bbecc1796 | 96 | py | Python | xception/test.py | latentai/model-zoo-models | 70a96e955b3b1245f8417613cd9debdae91b1d28 | [
"Apache-2.0"
] | 8 | 2020-05-16T20:14:27.000Z | 2020-07-08T09:23:24.000Z | inceptionv3/test.py | latentai/model-zoo-models | 70a96e955b3b1245f8417613cd9debdae91b1d28 | [
"Apache-2.0"
] | 9 | 2020-03-26T10:25:12.000Z | 2022-02-28T19:54:14.000Z | audio_recognition/test.py | latentai/model-zoo-models | 70a96e955b3b1245f8417613cd9debdae91b1d28 | [
"Apache-2.0"
] | 6 | 2020-03-19T20:52:09.000Z | 2022-03-06T01:33:29.000Z | #!/usr/bin/env python3
from utils.model_config_helpers import run_model_test
run_model_test()
| 16 | 53 | 0.822917 |
4a4b8d448257463b5f6347e3da0f24a94bac2394 | 10,816 | py | Python | mpunet/bin/cv_split.py | alexsosn/MultiPlanarUNet | 2d1cecdee391be8e9f72da95e33077ed82a2183a | [
"MIT"
] | null | null | null | mpunet/bin/cv_split.py | alexsosn/MultiPlanarUNet | 2d1cecdee391be8e9f72da95e33077ed82a2183a | [
"MIT"
] | null | null | null | mpunet/bin/cv_split.py | alexsosn/MultiPlanarUNet | 2d1cecdee391be8e9f72da95e33077ed82a2183a | [
"MIT"
] | 1 | 2020-10-07T12:44:47.000Z | 2020-10-07T12:44:47.000Z | from glob import glob
import sys
import os
import numpy as np
import random
from mpunet.utils import create_folders
import argparse
def _add_to_file_list_fallback(rel_image_path, image_path,
fname="LIST_OF_FILES.txt"):
"""
On some system synlinks are not supported, if --files_list flag is set,
uses this function to add each absolute file path to a list at the final
subfolder that is supposed to store images and label links or actual files
At run-time, these files must be loaded by reading in the path from these
files instead.
"""
# Get folder where list of files should be stored
folder = os.path.split(image_path)[0]
# Get absolute path to image
# We change dir to get the correct abs path from the relative
os.chdir(folder)
abs_file_path = os.path.abspath(rel_image_path)
# Get path to the list of files
list_file_path = os.path.join(folder, fname)
with open(list_file_path, "a") as out_f:
out_f.write(abs_file_path + "\n")
if __name__ == "__main__":
entry_func()
| 41.125475 | 91 | 0.606971 |
4a4cc74674f055ddea956ccb55ba03b1e2719b21 | 1,964 | py | Python | src/client/pydaos/raw/conversion.py | gczsjdy/daos | abbd900010562f3acea9c6b1dc2ca98a8d3c71fa | [
"Apache-2.0"
] | 1 | 2021-12-04T14:57:48.000Z | 2021-12-04T14:57:48.000Z | src/client/pydaos/raw/conversion.py | gczsjdy/daos | abbd900010562f3acea9c6b1dc2ca98a8d3c71fa | [
"Apache-2.0"
] | 52 | 2019-12-04T05:47:10.000Z | 2020-06-09T03:26:12.000Z | src/client/pydaos/raw/conversion.py | gczsjdy/daos | abbd900010562f3acea9c6b1dc2ca98a8d3c71fa | [
"Apache-2.0"
] | 8 | 2019-12-04T08:26:00.000Z | 2020-06-09T07:40:11.000Z | #!/usr/bin/python
"""
(C) Copyright 2018 Intel Corporation.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE
The Government's rights to use, modify, reproduce, release, perform, display,
or disclose this software are subject to the terms of the Apache License as
provided in Contract No. B609815.
Any reproduction of computer software, computer software documentation, or
portions thereof marked with this legend must also reproduce the markings.
"""
import ctypes
import uuid
def c_uuid_to_str(uuid):
""" utility function to convert a C uuid into a standard string format """
uuid_str = '{:02X}{:02X}{:02X}{:02X}-{:02X}{:02X}-{:02X}{:02X}-{:02X}'\
'{:02X}-{:02X}{:02X}{:02X}{:02X}{:02X}{:02X}'.format(
uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5],
uuid[6], uuid[7], uuid[8], uuid[9], uuid[10], uuid[11],
uuid[12], uuid[13], uuid[14], uuid[15])
return uuid_str
def c_uuid(p_uuid, c_uuid):
""" utility function to create a UUID in C format from a python UUID """
hexstr = p_uuid.hex
for i in range(0, 31, 2):
c_uuid[int(i/2)] = int(hexstr[i:i+2], 16)
def str_to_c_uuid(uuidstr):
""" utility function to convert string format uuid to a C uuid """
uuidstr2 = '{' + uuidstr + '}'
puuid = uuid.UUID(uuidstr2)
cuuid = (ctypes.c_ubyte * 16)()
c_uuid(puuid, cuuid)
return cuuid
| 40.081633 | 79 | 0.67057 |
4a4d871b786cc8a162c159d5da63831c271b0be6 | 956 | py | Python | experiments/nmt/utils/vocabulary_coverage.py | lvapeab/GroundHog_INMT | d5ad1d466eaf5040e99b9aaaa1b28c96402436ce | [
"BSD-3-Clause"
] | null | null | null | experiments/nmt/utils/vocabulary_coverage.py | lvapeab/GroundHog_INMT | d5ad1d466eaf5040e99b9aaaa1b28c96402436ce | [
"BSD-3-Clause"
] | null | null | null | experiments/nmt/utils/vocabulary_coverage.py | lvapeab/GroundHog_INMT | d5ad1d466eaf5040e99b9aaaa1b28c96402436ce | [
"BSD-3-Clause"
] | null | null | null | import cPickle
import argparse
parser = argparse.ArgumentParser(
"Computes the coverage of a shortlist in a corpus file")
parser.add_argument("--vocab",
required=True, help="Vocabulary to use (.pkl)")
parser.add_argument("--text",
required=True, help="Beam size, turns on beam-search")
args = parser.parse_args()
with open(args.vocab, 'rb') as f:
d = cPickle.load(f)
with open(args.text, 'rb') as f:
text = f.read().splitlines()
n_words = 0
n_unks = 0
split_vocab = 0
split_vocabulary = {}
for line in text:
for word in line.split():
if split_vocabulary.get(word) is None:
split_vocabulary[word] = split_vocab
split_vocab += 1
if d.get(word) is None:
n_unks += 1
n_words += 1
print "Coverage: %f (%d unknown words out of %d of a total of %d)"%((float)(split_vocab - n_unks)/split_vocab, n_unks, split_vocab, n_words)
| 28.117647 | 140 | 0.621339 |
4a4d9078d162889cc7a0df9b67742f350806db8d | 13,952 | py | Python | stores/apps/inventory/migrations/0001_initial.py | diassor/CollectorCity-Market-Place | 892ad220b8cf1c0fc7433f625213fe61729522b2 | [
"Apache-2.0"
] | 135 | 2015-03-19T13:28:18.000Z | 2022-03-27T06:41:42.000Z | stores/apps/inventory/migrations/0001_initial.py | dfcoding/CollectorCity-Market-Place | e59acec3d600c049323397b17cae14fdcaaaec07 | [
"Apache-2.0"
] | null | null | null | stores/apps/inventory/migrations/0001_initial.py | dfcoding/CollectorCity-Market-Place | e59acec3d600c049323397b17cae14fdcaaaec07 | [
"Apache-2.0"
] | 83 | 2015-01-30T01:00:15.000Z | 2022-03-08T17:25:10.000Z | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
| 76.240437 | 181 | 0.573825 |
4a4e581c499165152bc4c54e7fe90ad3b4939698 | 48,733 | py | Python | src/ralph/deployment/migrations/0005_auto__add_field_archiveddeployment_service__add_field_archiveddeployme.py | vi4m/ralph | 2af767ee23d89be9e6cec0a537350a1ce8840bd1 | [
"Apache-2.0"
] | 1 | 2018-09-01T14:14:08.000Z | 2018-09-01T14:14:08.000Z | src/ralph/deployment/migrations/0005_auto__add_field_archiveddeployment_service__add_field_archiveddeployme.py | srikanth4372/sample | 127b5742ae464d42909a14d71e3c10c241ec3a23 | [
"Apache-2.0"
] | 1 | 2019-08-14T10:03:45.000Z | 2019-08-14T10:03:45.000Z | src/ralph/deployment/migrations/0005_auto__add_field_archiveddeployment_service__add_field_archiveddeployme.py | srikanth4372/sample | 127b5742ae464d42909a14d71e3c10c241ec3a23 | [
"Apache-2.0"
] | 1 | 2019-08-14T09:59:42.000Z | 2019-08-14T09:59:42.000Z | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
| 94.996101 | 239 | 0.572487 |
4a4f4bc06c12566c84246f7896cf490e49f35766 | 2,059 | py | Python | SPH/sphbwr_example2.py | RLReed/unotran | b317107e1a39490dda732f86a731872f5207a167 | [
"MIT"
] | null | null | null | SPH/sphbwr_example2.py | RLReed/unotran | b317107e1a39490dda732f86a731872f5207a167 | [
"MIT"
] | null | null | null | SPH/sphbwr_example2.py | RLReed/unotran | b317107e1a39490dda732f86a731872f5207a167 | [
"MIT"
] | 3 | 2019-12-02T23:01:24.000Z | 2022-01-26T04:48:41.000Z | import numpy as np
import sys
sys.path.append('/homes/rlreed/workspace/unotran/src')
from coarseBounds import computeBounds, Grouping
import pickle
from makeDLPbasis import makeBasis as makeDLP
from makeKLTbasis import makeBasis as makeKLT
import sph
import sph_dgm
import pydgm
if __name__ == '__main__':
np.set_printoptions(precision=6)
G = 44
dgmstructure = computeBounds(G, 'full', 1, 0.0, 1.3, 60)
fname = dgmstructure.fname
xs_name = 'XS/{}gXS.anlxs'.format(G)
pin_map = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
data_path = 'data2'
# Get the homogenized cross sections
refXS = pickle.load(open('{}/refXS_sph_space_{}.p'.format(data_path, G), 'rb'))
for basis in ['dlp', 'klt_full', 'klt_combine', 'klt_pins_full']:
dgmstructure.fname = fname
XS = makeDGMXS(G, refXS, dgmstructure, basis)
pickle.dump(XS, open('{}/refXS_dgm_{}_{}_h{}.p'.format(data_path, dgmstructure.fname, 'fine_mu', 0), 'wb'))
| 27.453333 | 115 | 0.644973 |
4a4fd2b57960e4af2acbb3603c634154bea6e80b | 9,280 | py | Python | src/oci/management_agent/models/management_agent_aggregation_dimensions.py | CentroidChef/oci-python-sdk | fa406e27a52b40c70e220c20f52dfe2abe6236a3 | [
"Apache-2.0",
"BSD-3-Clause"
] | 249 | 2017-09-11T22:06:05.000Z | 2022-03-04T17:09:29.000Z | src/oci/management_agent/models/management_agent_aggregation_dimensions.py | CentroidChef/oci-python-sdk | fa406e27a52b40c70e220c20f52dfe2abe6236a3 | [
"Apache-2.0",
"BSD-3-Clause"
] | 228 | 2017-09-11T23:07:26.000Z | 2022-03-23T10:58:50.000Z | src/oci/management_agent/models/management_agent_aggregation_dimensions.py | CentroidChef/oci-python-sdk | fa406e27a52b40c70e220c20f52dfe2abe6236a3 | [
"Apache-2.0",
"BSD-3-Clause"
] | 224 | 2017-09-27T07:32:43.000Z | 2022-03-25T16:55:42.000Z | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
| 37.57085 | 245 | 0.691487 |
4a5059beb09af2b372b1d15c442329a32a505195 | 1,770 | py | Python | py_buycoins/sending.py | Bashorun97/BuyCoins-Python-SDK | 5b5e1ca6bfbfb56d30e99a737e431ca35b2e712b | [
"MIT"
] | 1 | 2021-02-16T14:26:30.000Z | 2021-02-16T14:26:30.000Z | py_buycoins/sending.py | Bashorun97/BuyCoins-Python-SDK | 5b5e1ca6bfbfb56d30e99a737e431ca35b2e712b | [
"MIT"
] | null | null | null | py_buycoins/sending.py | Bashorun97/BuyCoins-Python-SDK | 5b5e1ca6bfbfb56d30e99a737e431ca35b2e712b | [
"MIT"
] | null | null | null | from .gcore.queries import GetNetworkFee, GetBalance
from .gcore.mutations import SendCoin
from typing import List, Optional
from .exc import SendLimitError, InvalidClientObject
| 33.396226 | 114 | 0.627684 |
4a51566e6f537d3c7defee7d9f6dd2e1ce52fbb6 | 2,190 | py | Python | snippet/example/python/url.py | yp2800/snippet | 054af596655007cbec81340bd166489e706fffe6 | [
"MIT"
] | 94 | 2016-09-22T09:13:19.000Z | 2022-03-30T07:35:35.000Z | snippet/example/python/url.py | yp2800/snippet | 054af596655007cbec81340bd166489e706fffe6 | [
"MIT"
] | 1 | 2020-11-22T03:05:05.000Z | 2020-11-22T03:05:05.000Z | snippet/example/python/url.py | yp2800/snippet | 054af596655007cbec81340bd166489e706fffe6 | [
"MIT"
] | 38 | 2017-06-11T22:03:04.000Z | 2022-03-10T07:46:39.000Z | # -*- coding: utf-8 -*-
try:
from urlparse import urlparse, urlunsplit
except ImportError:
from urllib.parse import urlparse, urlunsplit
| 31.73913 | 94 | 0.581279 |
4a533004a2f846794254f71446a4268346a94d9f | 550 | py | Python | netvisor_api_client/services/dimension.py | tristen-tooming/netvisor-api-client | 37c974dc1e6acf1d0bde7e6298b23ca4d14ffd69 | [
"MIT"
] | null | null | null | netvisor_api_client/services/dimension.py | tristen-tooming/netvisor-api-client | 37c974dc1e6acf1d0bde7e6298b23ca4d14ffd69 | [
"MIT"
] | null | null | null | netvisor_api_client/services/dimension.py | tristen-tooming/netvisor-api-client | 37c974dc1e6acf1d0bde7e6298b23ca4d14ffd69 | [
"MIT"
] | null | null | null | from .base import Service
from ..requests.dimension import CreateDimensionsRequest, DimensionsListRequest
| 28.947368 | 79 | 0.616364 |
4a54146d12e005b9045dcbb5b4f63178061f1a78 | 7,338 | py | Python | cishouseholds/filter.py | ONS-SST/cis_households | e475df5929e6763a46cd05aff1f7e960ccbe8e21 | [
"MIT"
] | null | null | null | cishouseholds/filter.py | ONS-SST/cis_households | e475df5929e6763a46cd05aff1f7e960ccbe8e21 | [
"MIT"
] | 252 | 2021-05-19T11:12:43.000Z | 2022-03-02T10:39:10.000Z | cishouseholds/filter.py | ONS-SST/cis_households | e475df5929e6763a46cd05aff1f7e960ccbe8e21 | [
"MIT"
] | null | null | null | from typing import List
from typing import Union
from pyspark.sql import DataFrame
from pyspark.sql import functions as F
from pyspark.sql.window import Window
def filter_all_not_null(df: DataFrame, reference_columns: List[str]) -> DataFrame:
"""
Filter rows which have NULL values in all the specified columns.
From households_aggregate_processes.xlsx, filter number 2.
Parameters
----------
df
reference_columns
Columns to check for missing values in, all
must be missing for the record to be dropped.
"""
return df.na.drop(how="all", subset=reference_columns)
def filter_duplicates_by_time_and_threshold(
df: DataFrame,
first_reference_column: str,
second_reference_column: str,
third_reference_column: str,
fourth_reference_column: str,
time_threshold: float = 1.5,
float_threshold: float = 0.00001,
) -> DataFrame:
"""
Drop duplicates based on two identitical column values if third and fourth column and not both within
a threshold difference from the first duplicate record.
From households_aggregate_processes.xlsx, filter number 4.
Parameters
----------
df
first_reference_column
First column with duplicate value
second_reference_column
Second column with duplicate value
third_reference_column
Column used for time based threshold difference, timestamp
fourth_reference_column
Column used for numeric based threshold difference, float
"""
window = Window.partitionBy(first_reference_column, second_reference_column).orderBy(third_reference_column)
df = df.withColumn("duplicate_id", F.row_number().over(window))
df = df.withColumn(
"within_time_threshold",
(
F.abs(
F.first(third_reference_column).over(window).cast("long") - F.col(third_reference_column).cast("long")
)
/ (60 * 60)
)
< time_threshold,
)
df = df.withColumn(
"within_float_threshold",
F.abs(F.first(fourth_reference_column).over(window) - F.col(fourth_reference_column)) < float_threshold,
)
df = df.filter((F.col("duplicate_id") == 1) | ~(F.col("within_time_threshold") & (F.col("within_float_threshold"))))
return df.drop("duplicate_id", "within_time_threshold", "within_float_threshold")
def filter_by_cq_diff(
df: DataFrame, comparing_column: str, ordering_column: str, tolerance: float = 0.00001
) -> DataFrame:
"""
This function works out what columns have a float value difference less than 10-^5 or 0.00001
(or any other tolerance value inputed) given all the other columns are the same and
considers it to be the same dropping or deleting the repeated values and only keeping one entry.
Parameters
----------
df
comparing_column
ordering_column
tolerance
"""
column_list = df.columns
column_list.remove(comparing_column)
windowSpec = Window.partitionBy(column_list).orderBy(ordering_column)
df = df.withColumn("first_value_in_duplicates", F.first(comparing_column).over(windowSpec))
df = df.withColumn(
"duplicates_first_record", F.abs(F.col("first_value_in_duplicates") - F.col(comparing_column)) < tolerance
)
difference_window = Window.partitionBy(column_list + ["duplicates_first_record"]).orderBy(ordering_column)
df = df.withColumn("duplicate_number", F.row_number().over(difference_window))
df = df.filter(~(F.col("duplicates_first_record") & (F.col("duplicate_number") != 1)))
df = df.drop("first_value_in_duplicates", "duplicates_first_record", "duplicate_number")
return df
def assign_date_interval_and_flag(
df: DataFrame,
column_name_inside_interval: str,
column_name_time_interval: str,
start_datetime_reference_column: str,
end_datetime_reference_column: str,
lower_interval: Union[int, float],
upper_interval: Union[int, float],
interval_format: str = "hours",
) -> DataFrame:
"""
This function gives the time interval in either hours (by default) or days
in a column by given two date columns and says whether it is inside and
upper and lower interval. If the difference of dates is within the upper and
lower time intervals, the function will output None and an integer 1 if the
difference in dates are outside of those intervals.
Parameters
----------
df
column_name_inside_interval
Name of the column that returns whether the difference in dates are
within the upper/lower limits if within, it will return None, if outside
will return an integer 1.
column_name_time_interval
Name of the column that returns the difference between start and end
date and adds at the end of the column name whether it is in hours or
days
start_datetime_reference_column
Earliest date in string format yyyy-mm-dd hh:mm:ss.
end_datetime_reference_column
Latest date in string format yyyy-mm-dd hh:mm:ss.
lower_interval
Marks how much NEGATIVE time difference can have between
end_datetime_reference_column and start_datetime_reference_column.
Meaning how the end_datetime_reference_column can be earlier than
start_datetime_reference_column
upper_interval
Marks how much POSITIVE time difference can have between
end_datetime_reference_column and start_datetime_reference_column
interval_format
By default will be a string called 'hours' if upper and lower
intervals are input as days, define interval_format to 'days'.
These are the only two possible formats.
Notes
-----
Lower_interval should be a negative value if start_datetime_reference_column
is after end_datetime_reference_column."""
# by default, Hours but if days, apply change factor
if interval_format == "hours": # to convert hours to seconds
conversion_factor = 3600 # 1h has 60s*60min seconds = 3600 seconds
elif interval_format == "days":
conversion_factor = 86400 # 1 day has 60s*60min*24h seconds = 86400 seconds
column_name_time_interval = column_name_time_interval + "_" + interval_format
# FORMULA: (end_datetime_reference_column - start_datetime_reference_column) in
# seconds/conversion_factor in seconds
df = df.withColumn(
column_name_time_interval,
(
F.to_timestamp(F.col(end_datetime_reference_column)).cast("long")
- F.to_timestamp(F.col(start_datetime_reference_column)).cast("long")
)
/ conversion_factor, # 1 day has 60s*60min*24h seconds = 86400 seconds
)
return df.withColumn(
column_name_inside_interval,
F.when(~F.col(column_name_time_interval).between(lower_interval, upper_interval), 1).otherwise(None),
)
def file_exclude(df: DataFrame, source_file_col: str, files_to_exclude: list):
"""
Function to exclude specific files from pipeline processing
Parameters
--------
df
source_file_column = Column in input dataframe which contains the source file
files_to_exclude = List of files to exclude (feed in from config)
"""
for item in files_to_exclude:
df = df.filter(~F.col(source_file_col).isin(item))
return df
| 38.020725 | 120 | 0.710139 |
4a544c66c68a458b980a2174bdc25da63354dc6e | 6,088 | py | Python | cscs-checks/cuda/multi_gpu.py | hpc-unibe-ch/reframe | 07f97e25cf4e7319782c37dd1923f7e70a368b99 | [
"BSD-3-Clause"
] | null | null | null | cscs-checks/cuda/multi_gpu.py | hpc-unibe-ch/reframe | 07f97e25cf4e7319782c37dd1923f7e70a368b99 | [
"BSD-3-Clause"
] | null | null | null | cscs-checks/cuda/multi_gpu.py | hpc-unibe-ch/reframe | 07f97e25cf4e7319782c37dd1923f7e70a368b99 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2016-2020 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import os
import reframe.utility.sanity as sn
import reframe as rfm
| 42.873239 | 79 | 0.53433 |
4a548d3916f1d9f7cfe21d9195722cae0fa08812 | 5,094 | py | Python | sympy/series/tests/test_demidovich.py | msgoff/sympy | 1e7daef7514902f5e89718fa957b7b36c6669a10 | [
"BSD-3-Clause"
] | null | null | null | sympy/series/tests/test_demidovich.py | msgoff/sympy | 1e7daef7514902f5e89718fa957b7b36c6669a10 | [
"BSD-3-Clause"
] | null | null | null | sympy/series/tests/test_demidovich.py | msgoff/sympy | 1e7daef7514902f5e89718fa957b7b36c6669a10 | [
"BSD-3-Clause"
] | null | null | null | from sympy import (
limit,
Symbol,
oo,
sqrt,
Rational,
log,
exp,
cos,
sin,
tan,
pi,
asin,
together,
root,
S,
)
# Numbers listed with the tests refer to problem numbers in the book
# "Anti-demidovich, problemas resueltos, Ed. URSS"
x = Symbol("x")
| 30.686747 | 86 | 0.458186 |
4a54b5369073023cda9e88293fbf883952f8a99e | 493 | py | Python | notion/ctx.py | jfhbrook/notion-tools | dd7c7fb07f98deaf5bba236aa5b4ea3d09ff0f3d | [
"MIT"
] | 1 | 2022-01-19T22:24:35.000Z | 2022-01-19T22:24:35.000Z | notion/ctx.py | jfhbrook/notion-tools | dd7c7fb07f98deaf5bba236aa5b4ea3d09ff0f3d | [
"MIT"
] | 4 | 2021-12-28T05:15:49.000Z | 2021-12-28T05:18:25.000Z | notion/ctx.py | jfhbrook/notion-tools | dd7c7fb07f98deaf5bba236aa5b4ea3d09ff0f3d | [
"MIT"
] | null | null | null | from notion.client import NotionClient
from notion.settings import Settings
| 27.388889 | 84 | 0.6714 |
4a5505f918153846b19b1a912cedc52b11e1b4e9 | 1,552 | py | Python | setup.py | rgooler/bootstrap-pip | 34eaa648c81e3f8213b97cd33bda23b50743122a | [
"Unlicense"
] | null | null | null | setup.py | rgooler/bootstrap-pip | 34eaa648c81e3f8213b97cd33bda23b50743122a | [
"Unlicense"
] | null | null | null | setup.py | rgooler/bootstrap-pip | 34eaa648c81e3f8213b97cd33bda23b50743122a | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def read(*paths):
"""Build a file path from *paths* and return the contents."""
with open(os.path.join(*paths), 'r') as f:
return f.read()
install_requires = []
# install_requires = ['requests >= 2.1.0']
# For SNI support in Python 2, must install the following packages
# if sys.version_info[0] == 2:
# install_requires.append('pyOpenSSL >= 0.14')
# install_requires.append('ndg-httpsclient >= 0.3.3')
# install_requires.append('pyasn1 >= 0.1.7')
setup(
name='mymodule',
packages=['mymodule'],
version='0.1',
description='Desc',
long_description=(read('README.rst') + '\n\n' +
read('HISTORY.rst') + '\n\n' +
read('AUTHORS.rst')),
url='http://github.com/rgooler/bootstrap-pip/',
license='MIT',
author='Ryan Gooler',
author_email='ryan.gooler@gmail.com',
py_modules=['mymodule'],
install_requires=install_requires,
include_package_data=True,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| 30.431373 | 71 | 0.614046 |
4a553a81b1d7bdf7e54e2eefdce19b67fef643fd | 138 | py | Python | cfdata/tabular/converters/__init__.py | carefree0910/carefree-data | ae0f4ea5724b4efd5d76f2a9d420acf3322c1d19 | [
"MIT"
] | 9 | 2020-10-25T11:52:34.000Z | 2022-01-23T02:45:41.000Z | cfdata/tabular/converters/__init__.py | carefree0910/carefree-data | ae0f4ea5724b4efd5d76f2a9d420acf3322c1d19 | [
"MIT"
] | 2 | 2020-08-02T01:58:48.000Z | 2021-02-26T11:24:19.000Z | cfdata/tabular/converters/__init__.py | carefree0910/carefree-data | ae0f4ea5724b4efd5d76f2a9d420acf3322c1d19 | [
"MIT"
] | 1 | 2021-11-04T14:34:13.000Z | 2021-11-04T14:34:13.000Z | from .base import *
from .string import *
from .categorical import *
from .numerical import *
__all__ = ["Converter", "converter_dict"]
| 17.25 | 41 | 0.724638 |
4a579bbd92a904600d51867b452bca5458bcdea4 | 114 | py | Python | hello_world.py | BronWang/first_github | 9cdd40458014a448a5121268ebca907e3cba1eee | [
"MIT"
] | null | null | null | hello_world.py | BronWang/first_github | 9cdd40458014a448a5121268ebca907e3cba1eee | [
"MIT"
] | null | null | null | hello_world.py | BronWang/first_github | 9cdd40458014a448a5121268ebca907e3cba1eee | [
"MIT"
] | null | null | null | def hello_world():
"""Hello world"""
message = 'hello world'
print(message.title())
hello_world()
| 14.25 | 27 | 0.622807 |
4a5c703189126174bfc5a0bc0302603a5b45186d | 583 | py | Python | Python/Samples/Observer/UtObserver.py | plasroom46/DesignPattern.Sample | 86c05c5ae356cb01f3d075f248c45da3e6534d07 | [
"MIT"
] | 9 | 2019-03-14T01:54:31.000Z | 2021-11-26T13:00:32.000Z | Python/Samples/Observer/UtObserver.py | plasroom46/DesignPattern.Sample | 86c05c5ae356cb01f3d075f248c45da3e6534d07 | [
"MIT"
] | null | null | null | Python/Samples/Observer/UtObserver.py | plasroom46/DesignPattern.Sample | 86c05c5ae356cb01f3d075f248c45da3e6534d07 | [
"MIT"
] | 2 | 2019-08-19T06:00:04.000Z | 2021-07-15T01:23:52.000Z | import unittest
from Observers import Observer, ObserverMailServer, ObserverPbx
from Subjects import Subject, SubjectEflow
if __name__ == '__main__':
unittest.main()
| 21.592593 | 63 | 0.626072 |
4a5f2654f1609f5c5550084dae95f8a37c34d9e6 | 4,247 | py | Python | Python/2021/day_04/day_04.py | JonoRicci/Advent-Of-Code | 1c092410d6ece195f4689788af4b1091acf10fbb | [
"MIT"
] | null | null | null | Python/2021/day_04/day_04.py | JonoRicci/Advent-Of-Code | 1c092410d6ece195f4689788af4b1091acf10fbb | [
"MIT"
] | null | null | null | Python/2021/day_04/day_04.py | JonoRicci/Advent-Of-Code | 1c092410d6ece195f4689788af4b1091acf10fbb | [
"MIT"
] | null | null | null | """
Day 04
"""
from logger import logger
def main() -> None:
"""
Import the puzzle input, process and display the results.
"""
puzzle_input = import_list()
logger.debug(puzzle_input)
final_score = play_bingo(puzzle_input)
for result in final_score:
logger.info(f"The final score is: {result}.")
def import_list() -> list:
"""
Import the puzzle input and return a list.
:return: Puzzle input text file as list
:rtype: list
"""
file = open("puzzle-input", "r")
string_list = file.read().splitlines()
file.close()
return string_list
def play_bingo(bingo_cards: list) -> list:
"""
Extract winning numbers, bingo boards from input.
Make a separate 2D list tracking wins.
For each winning number, check every board row and column for a match.
Add matches to the 2D list tracking wins.
Once done, check 2D list for winning columns / rows.
Add winning boards to new list along with winning number.
Multiply to get score.
:param bingo_cards: puzzle input where each line is a string
:return: First and last winning board score
:rtype: list
"""
winning_numbers = [int(x) for x in bingo_cards[0].split(",")]
logger.debug(f" Winning numbers: {winning_numbers}")
single_board = []
all_boards = []
final_score_list = []
# Get Bingo Boards
for line in range(len(bingo_cards)):
if "," not in bingo_cards[line]:
row = [int(x) for x in bingo_cards[line].split()]
if row:
logger.debug(row)
single_board.append(row)
elif single_board:
all_boards.append(single_board)
single_board = []
# Set up separate 2D list tracking matches to winning numbers.
unmarked_tracker = []
for board in all_boards:
assert len(board) == 5 and len(board[0]) == 5
unmarked_tracker.append([[False for _ in range(5)] for _ in range(5)])
# Set up list to track winning boards.
winning_board = [False for _ in range(len(all_boards))]
for number in winning_numbers:
for index, board in enumerate(all_boards):
logger.debug(f"Checking board: {index} for {number}")
# Check for winning numbers.
for row in range(5):
for column in range(5):
if board[row][column] == number:
logger.debug(f"{unmarked_tracker[index][row][column]} "
f"is True.")
unmarked_tracker[index][row][column] = True
# Check for 5 in a row.
won = False
for row in range(5):
ok = True
for column in range(5):
if not unmarked_tracker[index][row][column]:
ok = False
if ok:
won = True
# Check for 5 in a column.
for column in range(5):
ok = True
for row in range(5):
if not unmarked_tracker[index][row][column]:
ok = False
if ok:
won = True
# Check for each winning board.
if won and not winning_board[index]:
winning_board[index] = True
winning_boards_count = len([j for j in range(len(all_boards))
if winning_board[j]])
# If first or last board.
if winning_boards_count == 1 or winning_boards_count == \
len(all_boards):
# Calculate all unmarked.
unmarked = 0
for row in range(5):
for column in range(5):
if not unmarked_tracker[index][row][column]:
unmarked += board[row][column]
final_score_list.append(unmarked * number)
logger.debug(f"The final score is: {final_score_list[-1]}, "
f"which is {unmarked} * {number}.")
return final_score_list
if __name__ == "__main__":
main()
| 32.419847 | 80 | 0.53732 |
4a612749e70c643dade9a21e3ef7dab25d3f46e9 | 1,982 | py | Python | timeeval_experiments/algorithms/eif.py | HPI-Information-Systems/TimeEval | 9b2717b89decd57dd09e04ad94c120f13132d7b8 | [
"MIT"
] | 2 | 2022-01-29T03:46:31.000Z | 2022-02-14T14:06:35.000Z | timeeval_experiments/algorithms/eif.py | HPI-Information-Systems/TimeEval | 9b2717b89decd57dd09e04ad94c120f13132d7b8 | [
"MIT"
] | null | null | null | timeeval_experiments/algorithms/eif.py | HPI-Information-Systems/TimeEval | 9b2717b89decd57dd09e04ad94c120f13132d7b8 | [
"MIT"
] | null | null | null | from durations import Duration
from typing import Any, Dict, Optional
from timeeval import Algorithm, TrainingType, InputDimensionality
from timeeval.adapters import DockerAdapter
from timeeval.params import ParameterConfig
_eif_parameters: Dict[str, Dict[str, Any]] = {
"extension_level": {
"defaultValue": None,
"description": "Extension level 0 resembles standard isolation forest. If unspecified (`None`), then `extension_level=X.shape[1] - 1`.",
"name": "extension_level",
"type": "int"
},
"limit": {
"defaultValue": None,
"description": "The maximum allowed tree depth. This is by default set to average length of unsucessful search in a binary tree.",
"name": "limit",
"type": "int"
},
"max_samples": {
"defaultValue": None,
"description": "The number of samples to draw from X to train each base estimator: `max_samples * X.shape[0]`. If unspecified (`None`), then `max_samples=min(256, X.shape[0])`.",
"name": "max_samples",
"type": "float"
},
"n_trees": {
"defaultValue": 200,
"description": "The number of decision trees (base estimators) in the forest (ensemble).",
"name": "n_trees",
"type": "int"
},
"random_state": {
"defaultValue": 42,
"description": "Seed for random number generation.",
"name": "random_state",
"type": "int"
}
}
| 33.033333 | 180 | 0.676085 |
4a614519b633b8e43e30737c32c7066d2365e9ab | 5,548 | py | Python | deepchem/models/tf_new_models/graph_models.py | KEHANG/deepchem | 367bea14cab47b1093bf106e0c196bb02d55c755 | [
"MIT"
] | null | null | null | deepchem/models/tf_new_models/graph_models.py | KEHANG/deepchem | 367bea14cab47b1093bf106e0c196bb02d55c755 | [
"MIT"
] | null | null | null | deepchem/models/tf_new_models/graph_models.py | KEHANG/deepchem | 367bea14cab47b1093bf106e0c196bb02d55c755 | [
"MIT"
] | 1 | 2021-07-09T19:58:54.000Z | 2021-07-09T19:58:54.000Z | """
Convenience classes for assembling graph models.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "Han Altae-Tran and Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import tensorflow as tf
from deepchem.nn.layers import GraphGather
from deepchem.models.tf_new_models.graph_topology import GraphTopology
| 32.635294 | 82 | 0.6469 |
4a618ed57cbfdde42c612f538425cdaf22f7923a | 20,082 | py | Python | yandex/cloud/access/access_pb2.py | IIKovalenko/python-sdk | 980e2c5d848eadb42799132b35a9f58ab7b27157 | [
"MIT"
] | 1 | 2019-06-07T10:45:58.000Z | 2019-06-07T10:45:58.000Z | yandex/cloud/access/access_pb2.py | IIKovalenko/python-sdk | 980e2c5d848eadb42799132b35a9f58ab7b27157 | [
"MIT"
] | null | null | null | yandex/cloud/access/access_pb2.py | IIKovalenko/python-sdk | 980e2c5d848eadb42799132b35a9f58ab7b27157 | [
"MIT"
] | null | null | null | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: yandex/cloud/access/access.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from yandex.cloud import validation_pb2 as yandex_dot_cloud_dot_validation__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='yandex/cloud/access/access.proto',
package='yandex.cloud.access',
syntax='proto3',
serialized_options=_b('Z>github.com/yandex-cloud/go-genproto/yandex/cloud/access;access'),
serialized_pb=_b('\n yandex/cloud/access/access.proto\x12\x13yandex.cloud.access\x1a\x1dyandex/cloud/validation.proto\"-\n\x07Subject\x12\x14\n\x02id\x18\x01 \x01(\tB\x08\x8a\xc8\x31\x04<=50\x12\x0c\n\x04type\x18\x02 \x01(\t\"_\n\rAccessBinding\x12\x19\n\x07role_id\x18\x01 \x01(\tB\x08\x8a\xc8\x31\x04<=50\x12\x33\n\x07subject\x18\x02 \x01(\x0b\x32\x1c.yandex.cloud.access.SubjectB\x04\xe8\xc7\x31\x01\"|\n\x19ListAccessBindingsRequest\x12!\n\x0bresource_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12\x1d\n\tpage_size\x18\x02 \x01(\x03\x42\n\xfa\xc7\x31\x06<=1000\x12\x1d\n\npage_token\x18\x03 \x01(\tB\t\x8a\xc8\x31\x05<=100\"r\n\x1aListAccessBindingsResponse\x12;\n\x0f\x61\x63\x63\x65ss_bindings\x18\x01 \x03(\x0b\x32\".yandex.cloud.access.AccessBinding\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"\x80\x01\n\x18SetAccessBindingsRequest\x12!\n\x0bresource_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12\x41\n\x0f\x61\x63\x63\x65ss_bindings\x18\x02 \x03(\x0b\x32\".yandex.cloud.access.AccessBindingB\x04\xe8\xc7\x31\x01\"0\n\x19SetAccessBindingsMetadata\x12\x13\n\x0bresource_id\x18\x01 \x01(\t\"\x8e\x01\n\x1bUpdateAccessBindingsRequest\x12!\n\x0bresource_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\x12L\n\x15\x61\x63\x63\x65ss_binding_deltas\x18\x02 \x03(\x0b\x32\'.yandex.cloud.access.AccessBindingDeltaB\x04\xe8\xc7\x31\x01\"3\n\x1cUpdateAccessBindingsMetadata\x12\x13\n\x0bresource_id\x18\x01 \x01(\t\"\x96\x01\n\x12\x41\x63\x63\x65ssBindingDelta\x12>\n\x06\x61\x63tion\x18\x01 \x01(\x0e\x32(.yandex.cloud.access.AccessBindingActionB\x04\xe8\xc7\x31\x01\x12@\n\x0e\x61\x63\x63\x65ss_binding\x18\x02 \x01(\x0b\x32\".yandex.cloud.access.AccessBindingB\x04\xe8\xc7\x31\x01*Q\n\x13\x41\x63\x63\x65ssBindingAction\x12%\n!ACCESS_BINDING_ACTION_UNSPECIFIED\x10\x00\x12\x07\n\x03\x41\x44\x44\x10\x01\x12\n\n\x06REMOVE\x10\x02\x42@Z>github.com/yandex-cloud/go-genproto/yandex/cloud/access;accessb\x06proto3')
,
dependencies=[yandex_dot_cloud_dot_validation__pb2.DESCRIPTOR,])
_ACCESSBINDINGACTION = _descriptor.EnumDescriptor(
name='AccessBindingAction',
full_name='yandex.cloud.access.AccessBindingAction',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='ACCESS_BINDING_ACTION_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ADD', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='REMOVE', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=1006,
serialized_end=1087,
)
_sym_db.RegisterEnumDescriptor(_ACCESSBINDINGACTION)
AccessBindingAction = enum_type_wrapper.EnumTypeWrapper(_ACCESSBINDINGACTION)
ACCESS_BINDING_ACTION_UNSPECIFIED = 0
ADD = 1
REMOVE = 2
_SUBJECT = _descriptor.Descriptor(
name='Subject',
full_name='yandex.cloud.access.Subject',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='yandex.cloud.access.Subject.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\212\3101\004<=50'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='yandex.cloud.access.Subject.type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=88,
serialized_end=133,
)
_ACCESSBINDING = _descriptor.Descriptor(
name='AccessBinding',
full_name='yandex.cloud.access.AccessBinding',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='role_id', full_name='yandex.cloud.access.AccessBinding.role_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\212\3101\004<=50'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='subject', full_name='yandex.cloud.access.AccessBinding.subject', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\350\3071\001'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=135,
serialized_end=230,
)
_LISTACCESSBINDINGSREQUEST = _descriptor.Descriptor(
name='ListAccessBindingsRequest',
full_name='yandex.cloud.access.ListAccessBindingsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_id', full_name='yandex.cloud.access.ListAccessBindingsRequest.resource_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\350\3071\001\212\3101\004<=50'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='page_size', full_name='yandex.cloud.access.ListAccessBindingsRequest.page_size', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\372\3071\006<=1000'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='page_token', full_name='yandex.cloud.access.ListAccessBindingsRequest.page_token', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\212\3101\005<=100'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=232,
serialized_end=356,
)
_LISTACCESSBINDINGSRESPONSE = _descriptor.Descriptor(
name='ListAccessBindingsResponse',
full_name='yandex.cloud.access.ListAccessBindingsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='access_bindings', full_name='yandex.cloud.access.ListAccessBindingsResponse.access_bindings', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='next_page_token', full_name='yandex.cloud.access.ListAccessBindingsResponse.next_page_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=358,
serialized_end=472,
)
_SETACCESSBINDINGSREQUEST = _descriptor.Descriptor(
name='SetAccessBindingsRequest',
full_name='yandex.cloud.access.SetAccessBindingsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_id', full_name='yandex.cloud.access.SetAccessBindingsRequest.resource_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\350\3071\001\212\3101\004<=50'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='access_bindings', full_name='yandex.cloud.access.SetAccessBindingsRequest.access_bindings', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\350\3071\001'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=475,
serialized_end=603,
)
_SETACCESSBINDINGSMETADATA = _descriptor.Descriptor(
name='SetAccessBindingsMetadata',
full_name='yandex.cloud.access.SetAccessBindingsMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_id', full_name='yandex.cloud.access.SetAccessBindingsMetadata.resource_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=605,
serialized_end=653,
)
_UPDATEACCESSBINDINGSREQUEST = _descriptor.Descriptor(
name='UpdateAccessBindingsRequest',
full_name='yandex.cloud.access.UpdateAccessBindingsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_id', full_name='yandex.cloud.access.UpdateAccessBindingsRequest.resource_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\350\3071\001\212\3101\004<=50'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='access_binding_deltas', full_name='yandex.cloud.access.UpdateAccessBindingsRequest.access_binding_deltas', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\350\3071\001'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=656,
serialized_end=798,
)
_UPDATEACCESSBINDINGSMETADATA = _descriptor.Descriptor(
name='UpdateAccessBindingsMetadata',
full_name='yandex.cloud.access.UpdateAccessBindingsMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_id', full_name='yandex.cloud.access.UpdateAccessBindingsMetadata.resource_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=800,
serialized_end=851,
)
_ACCESSBINDINGDELTA = _descriptor.Descriptor(
name='AccessBindingDelta',
full_name='yandex.cloud.access.AccessBindingDelta',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='action', full_name='yandex.cloud.access.AccessBindingDelta.action', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\350\3071\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='access_binding', full_name='yandex.cloud.access.AccessBindingDelta.access_binding', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\350\3071\001'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=854,
serialized_end=1004,
)
_ACCESSBINDING.fields_by_name['subject'].message_type = _SUBJECT
_LISTACCESSBINDINGSRESPONSE.fields_by_name['access_bindings'].message_type = _ACCESSBINDING
_SETACCESSBINDINGSREQUEST.fields_by_name['access_bindings'].message_type = _ACCESSBINDING
_UPDATEACCESSBINDINGSREQUEST.fields_by_name['access_binding_deltas'].message_type = _ACCESSBINDINGDELTA
_ACCESSBINDINGDELTA.fields_by_name['action'].enum_type = _ACCESSBINDINGACTION
_ACCESSBINDINGDELTA.fields_by_name['access_binding'].message_type = _ACCESSBINDING
DESCRIPTOR.message_types_by_name['Subject'] = _SUBJECT
DESCRIPTOR.message_types_by_name['AccessBinding'] = _ACCESSBINDING
DESCRIPTOR.message_types_by_name['ListAccessBindingsRequest'] = _LISTACCESSBINDINGSREQUEST
DESCRIPTOR.message_types_by_name['ListAccessBindingsResponse'] = _LISTACCESSBINDINGSRESPONSE
DESCRIPTOR.message_types_by_name['SetAccessBindingsRequest'] = _SETACCESSBINDINGSREQUEST
DESCRIPTOR.message_types_by_name['SetAccessBindingsMetadata'] = _SETACCESSBINDINGSMETADATA
DESCRIPTOR.message_types_by_name['UpdateAccessBindingsRequest'] = _UPDATEACCESSBINDINGSREQUEST
DESCRIPTOR.message_types_by_name['UpdateAccessBindingsMetadata'] = _UPDATEACCESSBINDINGSMETADATA
DESCRIPTOR.message_types_by_name['AccessBindingDelta'] = _ACCESSBINDINGDELTA
DESCRIPTOR.enum_types_by_name['AccessBindingAction'] = _ACCESSBINDINGACTION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Subject = _reflection.GeneratedProtocolMessageType('Subject', (_message.Message,), dict(
DESCRIPTOR = _SUBJECT,
__module__ = 'yandex.cloud.access.access_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.access.Subject)
))
_sym_db.RegisterMessage(Subject)
AccessBinding = _reflection.GeneratedProtocolMessageType('AccessBinding', (_message.Message,), dict(
DESCRIPTOR = _ACCESSBINDING,
__module__ = 'yandex.cloud.access.access_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.access.AccessBinding)
))
_sym_db.RegisterMessage(AccessBinding)
ListAccessBindingsRequest = _reflection.GeneratedProtocolMessageType('ListAccessBindingsRequest', (_message.Message,), dict(
DESCRIPTOR = _LISTACCESSBINDINGSREQUEST,
__module__ = 'yandex.cloud.access.access_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.access.ListAccessBindingsRequest)
))
_sym_db.RegisterMessage(ListAccessBindingsRequest)
ListAccessBindingsResponse = _reflection.GeneratedProtocolMessageType('ListAccessBindingsResponse', (_message.Message,), dict(
DESCRIPTOR = _LISTACCESSBINDINGSRESPONSE,
__module__ = 'yandex.cloud.access.access_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.access.ListAccessBindingsResponse)
))
_sym_db.RegisterMessage(ListAccessBindingsResponse)
SetAccessBindingsRequest = _reflection.GeneratedProtocolMessageType('SetAccessBindingsRequest', (_message.Message,), dict(
DESCRIPTOR = _SETACCESSBINDINGSREQUEST,
__module__ = 'yandex.cloud.access.access_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.access.SetAccessBindingsRequest)
))
_sym_db.RegisterMessage(SetAccessBindingsRequest)
SetAccessBindingsMetadata = _reflection.GeneratedProtocolMessageType('SetAccessBindingsMetadata', (_message.Message,), dict(
DESCRIPTOR = _SETACCESSBINDINGSMETADATA,
__module__ = 'yandex.cloud.access.access_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.access.SetAccessBindingsMetadata)
))
_sym_db.RegisterMessage(SetAccessBindingsMetadata)
UpdateAccessBindingsRequest = _reflection.GeneratedProtocolMessageType('UpdateAccessBindingsRequest', (_message.Message,), dict(
DESCRIPTOR = _UPDATEACCESSBINDINGSREQUEST,
__module__ = 'yandex.cloud.access.access_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.access.UpdateAccessBindingsRequest)
))
_sym_db.RegisterMessage(UpdateAccessBindingsRequest)
UpdateAccessBindingsMetadata = _reflection.GeneratedProtocolMessageType('UpdateAccessBindingsMetadata', (_message.Message,), dict(
DESCRIPTOR = _UPDATEACCESSBINDINGSMETADATA,
__module__ = 'yandex.cloud.access.access_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.access.UpdateAccessBindingsMetadata)
))
_sym_db.RegisterMessage(UpdateAccessBindingsMetadata)
AccessBindingDelta = _reflection.GeneratedProtocolMessageType('AccessBindingDelta', (_message.Message,), dict(
DESCRIPTOR = _ACCESSBINDINGDELTA,
__module__ = 'yandex.cloud.access.access_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.access.AccessBindingDelta)
))
_sym_db.RegisterMessage(AccessBindingDelta)
DESCRIPTOR._options = None
_SUBJECT.fields_by_name['id']._options = None
_ACCESSBINDING.fields_by_name['role_id']._options = None
_ACCESSBINDING.fields_by_name['subject']._options = None
_LISTACCESSBINDINGSREQUEST.fields_by_name['resource_id']._options = None
_LISTACCESSBINDINGSREQUEST.fields_by_name['page_size']._options = None
_LISTACCESSBINDINGSREQUEST.fields_by_name['page_token']._options = None
_SETACCESSBINDINGSREQUEST.fields_by_name['resource_id']._options = None
_SETACCESSBINDINGSREQUEST.fields_by_name['access_bindings']._options = None
_UPDATEACCESSBINDINGSREQUEST.fields_by_name['resource_id']._options = None
_UPDATEACCESSBINDINGSREQUEST.fields_by_name['access_binding_deltas']._options = None
_ACCESSBINDINGDELTA.fields_by_name['action']._options = None
_ACCESSBINDINGDELTA.fields_by_name['access_binding']._options = None
# @@protoc_insertion_point(module_scope)
| 40.900204 | 1,963 | 0.7684 |
4a629d479574b8f27c92b3a96ac0d80522d6e255 | 992 | py | Python | questionbank/users/urls.py | SyafiqTermizi/questionbank | 33e58db1a1610a85bd30a85d2f52e819bc27058b | [
"MIT"
] | 1 | 2018-04-17T23:58:46.000Z | 2018-04-17T23:58:46.000Z | questionbank/users/urls.py | SyafiqTermizi/questionbank | 33e58db1a1610a85bd30a85d2f52e819bc27058b | [
"MIT"
] | 8 | 2019-12-04T23:08:00.000Z | 2022-02-13T22:48:26.000Z | questionbank/users/urls.py | SyafiqTermizi/questionbank | 33e58db1a1610a85bd30a85d2f52e819bc27058b | [
"MIT"
] | null | null | null | from django.urls import path
from .views import (
UserListView, UserUpdateView, UserProfileView, UserDeleteView,
AcceptInvitationView, SpecialtyListView, SpecialtyCreateView, SpecialtyUpdateView,
SpecialtyDeleteView
)
app_name = 'users'
urlpatterns = [
path('', UserListView.as_view(), name='list'),
path('<int:pk>/', UserUpdateView.as_view(), name='update'),
path('<int:pk>/delete/', UserDeleteView.as_view(), name='delete'),
path('profile/', UserProfileView.as_view(), name='profile'),
path(
'invite/<str:token>/', AcceptInvitationView.as_view(),
name='accept_invite'
),
path('specialties/', SpecialtyListView.as_view(), name='specialty_list'),
path('specialties/create/', SpecialtyCreateView.as_view(), name='specialty_create'),
path('specialties/<int:pk>/update/', SpecialtyUpdateView.as_view(), name='specialty_update'),
path('specialties/<int:pk>/delete/', SpecialtyDeleteView.as_view(), name='specialty_delete')
]
| 39.68 | 97 | 0.708669 |
4a6439ff07d926ead0739ddd1b337b6e86927570 | 8,197 | py | Python | qiskit_machine_learning/algorithms/regressors/neural_network_regressor.py | Zoufalc/qiskit-machine-learning | aae3941214cd9667a53b643f229d11d0bff32c60 | [
"Apache-2.0"
] | 1 | 2021-07-07T21:23:38.000Z | 2021-07-07T21:23:38.000Z | qiskit_machine_learning/algorithms/regressors/neural_network_regressor.py | Zoufalc/qiskit-machine-learning | aae3941214cd9667a53b643f229d11d0bff32c60 | [
"Apache-2.0"
] | null | null | null | qiskit_machine_learning/algorithms/regressors/neural_network_regressor.py | Zoufalc/qiskit-machine-learning | aae3941214cd9667a53b643f229d11d0bff32c60 | [
"Apache-2.0"
] | 1 | 2021-04-11T14:30:32.000Z | 2021-04-11T14:30:32.000Z | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Neural network regressor """
from typing import Union
import numpy as np
from qiskit.algorithms.optimizers import Optimizer
from ...exceptions import QiskitMachineLearningError
from ...neural_networks import NeuralNetwork
from ...utils.loss_functions import (Loss, L1Loss, L2Loss, CrossEntropyLoss,
CrossEntropySigmoidLoss)
def predict(self, X: np.ndarray) -> np.ndarray: # pylint: disable=invalid-name
"""
Predict using the network specified to the regression.
Args:
X: The input data.
Raises:
QiskitMachineLearningError: Model needs to be fit to some training data first
Returns:
The predicted values.
"""
if self._fit_result is None:
raise QiskitMachineLearningError('Model needs to be fit to some training data first!')
# TODO: proper handling of batching
return self._neural_network.forward(X, self._fit_result[0])
def score(self, X: np.ndarray, y: np.ndarray) -> int: # pylint: disable=invalid-name
"""
Return R-squared on the given test data and targeted values.
Args:
X: Test samples.
y: True target values given `X`.
Raises:
QiskitMachineLearningError: Model needs to be fit to some training data first
Returns:
R-squared value.
"""
if self._fit_result is None:
raise QiskitMachineLearningError('Model needs to be fit to some training data first!')
predict = self.predict(X)
# Compute R2 for score
ss_res = sum(map(lambda k: (k[0] - k[1]) ** 2, zip(y, predict)))
ss_tot = sum([(k - np.mean(y)) ** 2 for k in y])
score = 1 - (ss_res / ss_tot)
if len(np.array(score).shape) > 0:
return score[0]
else:
return score
| 39.791262 | 99 | 0.595218 |
4a655d791ecdecd8d04559095721de06fb34dc2a | 2,380 | py | Python | residuals.py | fbob/mplFOAM | 90c9a970ba9975ce115ef5a66eb22fc463b54003 | [
"MIT"
] | 8 | 2016-11-01T05:43:48.000Z | 2022-01-27T02:12:29.000Z | residuals.py | fbob/mplFOAM | 90c9a970ba9975ce115ef5a66eb22fc463b54003 | [
"MIT"
] | null | null | null | residuals.py | fbob/mplFOAM | 90c9a970ba9975ce115ef5a66eb22fc463b54003 | [
"MIT"
] | 3 | 2016-11-01T05:44:01.000Z | 2019-05-15T04:04:57.000Z | #!/usr/bin/env python
# encoding: utf-8
import sys
import getopt
import re
import os
import pylab as plt
import numpy as np
# Define the variables for which the residuals will be plotted
variables = ["Ux", "Uy", "T", "p_rgh", "k", "epsilon"]
# Get the arguments of the script
try:
options, args = getopt.getopt(sys.argv[1:], 'l:h', ['help', 'logfile='])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in options:
if opt in ("-l", "--logfile"):
log_file = arg
elif opt in ("-h", "--help"):
usage()
sys.exit(1)
# Get the lines of the logfile 'log_file'
lines = open(log_file, "r" ).readlines()
# Get the time and continuity values
time = [] # Time(s) or iterations counter
continuity = [] # Continuity values
for line in lines:
if re.search(r"^Time = ", line): # Search for string 'Time' at the begining of the line in file
start = 'Time = '
value = line.split(start)[1] # Take the Time value as the string just after start
time.append(np.float(value)) # Transform the string in a float value
elif re.search(r"continuity errors :", line): # Search for string 'continuity' in the lines of file 'log_file'
start = 'sum local = '
end = ', global'
value = line.split(start)[1].split(end)[0] # Take the continuity value as string between start and end
continuity.append(np.float(value)) # Transform the string in a float value
# Get the residual values for each variable
for variable in variables:
data = []
for line in lines:
if re.search(r"Solving for " + variable, line):# Search for string variable in line of file 'log_file'
start = 'Final residual = '
end = ', No Iterations'
value = line.split(start)[1].split(end)[0]
data.append(np.float(value))
plt.plot(np.array(time),np.array(data), label=variable) # Plot the residual values of variable
plt.plot(np.array(time),np.array(continuity), label="Continuity") # Plot the continuity values
# Plot
plt.title("Residuals plot:\n * logfile: " + log_file + "\n * case dir: " + os.getcwd().split('/')[-1], loc='left')
plt.xlabel("Time(s)/Iterations")
plt.ylabel("Residuals (Log Scale)")
plt.yscale('log')
plt.legend()
plt.grid()
plt.show()
| 34.492754 | 114 | 0.64958 |
4a6725140b49d63b56d6ce94163eb9cfc057133e | 4,295 | py | Python | content_generator/vitae.py | empiricalstateofmind/personal_website | cb361f9fd6bd1b1dc8dd39c87cc003d847ae4a2c | [
"MIT"
] | null | null | null | content_generator/vitae.py | empiricalstateofmind/personal_website | cb361f9fd6bd1b1dc8dd39c87cc003d847ae4a2c | [
"MIT"
] | 3 | 2015-09-10T09:26:29.000Z | 2015-10-30T10:47:33.000Z | content_generator/vitae.py | empiricalstateofmind/personal_website | cb361f9fd6bd1b1dc8dd39c87cc003d847ae4a2c | [
"MIT"
] | null | null | null | # Generate the vitae.json file used to populate the Vitae section of the website.
import pandas as pd
import re
from datetime import datetime
from collections import defaultdict
import json
# Publications
if __name__ == "__main__":
# FILEPATH = "D:/Dropbox/projects/personal_cv/vitae.xlsx" # We can pass this as an argument later
FILEPATH = "../../../Projects/personal_cv/vitae.xlsx"
vitae = {'publications':create_publications(FILEPATH),
'conferences':create_conferences(FILEPATH),
'teaching':create_teaching(FILEPATH),
'reviewing':create_reviewing(FILEPATH)}
with open('../app/mod_home/static/vitae.json', 'w') as file:
json.dump(vitae, file, sort_keys=True, indent=4)
with open('../app/static/vitae.json', 'w') as file:
json.dump(vitae, file, sort_keys=True, indent=4) | 33.818898 | 102 | 0.563213 |
4a6776593c88474050fcd17038b16a7c7bc8d4c6 | 7,509 | py | Python | cement/ext/ext_generate.py | tomekr/cement | fece8629c48bcd598fd61d8aa7457a5df4c4f831 | [
"BSD-3-Clause"
] | 826 | 2015-01-09T13:23:35.000Z | 2022-03-18T01:19:40.000Z | cement/ext/ext_generate.py | tomekr/cement | fece8629c48bcd598fd61d8aa7457a5df4c4f831 | [
"BSD-3-Clause"
] | 316 | 2015-01-14T10:35:22.000Z | 2022-03-08T17:18:10.000Z | cement/ext/ext_generate.py | tomekr/cement | fece8629c48bcd598fd61d8aa7457a5df4c4f831 | [
"BSD-3-Clause"
] | 112 | 2015-01-10T15:04:26.000Z | 2022-03-16T08:11:58.000Z | """
Cement generate extension module.
"""
import re
import os
import inspect
import yaml
import shutil
from .. import Controller, minimal_logger, shell
from ..utils.version import VERSION, get_version
LOG = minimal_logger(__name__)
| 35.088785 | 80 | 0.481555 |
4a681f9f92ee718dd3a1a15638701370f778139a | 169 | py | Python | ditto/core/__init__.py | Kvoti/ditto | eb4efb241e54bf679222d14afeb71d9d5441c122 | [
"BSD-3-Clause"
] | null | null | null | ditto/core/__init__.py | Kvoti/ditto | eb4efb241e54bf679222d14afeb71d9d5441c122 | [
"BSD-3-Clause"
] | 9 | 2015-11-10T15:17:22.000Z | 2015-11-12T11:07:02.000Z | ditto/core/__init__.py | Kvoti/ditto | eb4efb241e54bf679222d14afeb71d9d5441c122 | [
"BSD-3-Clause"
] | null | null | null | from . import forms
from . import views
ADMIN_ROLE = "Administrator"
MEMBER_ROLE = "Member"
GUEST_ROLE = "Guest"
DEFAULT_ROLES = [ADMIN_ROLE, MEMBER_ROLE, GUEST_ROLE]
| 18.777778 | 53 | 0.763314 |
4a6a1474e56bbc2b491bd544f9d2c60a78d79285 | 1,216 | py | Python | training_stats/hrm.py | salwator/training_stats | 3f3bacbaa01e90e8658cf5b66bede42a37e3fb6e | [
"MIT"
] | 4 | 2018-01-02T01:10:03.000Z | 2019-02-09T23:37:13.000Z | training_stats/hrm.py | salwator/training_stats | 3f3bacbaa01e90e8658cf5b66bede42a37e3fb6e | [
"MIT"
] | 4 | 2018-01-05T16:46:35.000Z | 2019-03-19T22:10:36.000Z | training_stats/hrm.py | salwator/training_stats | 3f3bacbaa01e90e8658cf5b66bede42a37e3fb6e | [
"MIT"
] | 2 | 2016-12-09T22:36:58.000Z | 2018-07-22T12:58:06.000Z | from .gpxfile import get_hr_measurements
from .utils import interpolate
from operator import itemgetter
def __calculate_moving_sums(points, window):
""" Calculates hr moving sums of the window len """
time, hrs = zip(*points)
moving_sum = sum(hrs[0:window])
sums = [(time[0], moving_sum)]
for i, t in enumerate(time[1:-1 * window]):
moving_sum += hrs[i + window] - hrs[i]
sums.append((t, moving_sum))
return sums
def calculate_lactate_threshold(hrdata):
""" Given list of (time, hr), returns lactate threshold and selected data"""
test_period = 60 * 30 # test time
measured_period = 60 * 20 # measured period in seconds
hrs = interpolate(hrdata)
time_stamp, max_sum = max(__calculate_moving_sums(hrs, test_period),
key=itemgetter(1))
# your lactate threshold is average of last 20 in 30 minutes of tempo run
start_measure = time_stamp + (test_period - measured_period)
stop_measure = start_measure + measured_period
measured_time, measured_hrs = zip(*hrs[start_measure:stop_measure])
lactate_thr = round(sum(measured_hrs) / measured_period)
return (lactate_thr, measured_time, measured_hrs)
| 39.225806 | 80 | 0.693257 |
4a6b2e5b7cf0173afb424be4c44105af0dae9900 | 7,577 | py | Python | scripts/utils/import_languages.py | mozilla-releng/staging-mozilla-vpn-client | f31d3762a607ccf2d7c6a016f7b800305fbf0113 | [
"Apache-2.0"
] | null | null | null | scripts/utils/import_languages.py | mozilla-releng/staging-mozilla-vpn-client | f31d3762a607ccf2d7c6a016f7b800305fbf0113 | [
"Apache-2.0"
] | null | null | null | scripts/utils/import_languages.py | mozilla-releng/staging-mozilla-vpn-client | f31d3762a607ccf2d7c6a016f7b800305fbf0113 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import xml.etree.ElementTree as ET
import os
import sys
import shutil
import atexit
import subprocess
# Use the project root as the working directory
prevdir = os.getcwd()
workdir = os.path.join(os.path.dirname(__file__), '..', '..')
os.chdir(workdir)
atexit.register(os.chdir, prevdir)
# Include only locales above this threshold (e.g. 70%) in production
l10n_threshold = 0.70
parser = argparse.ArgumentParser()
parser.add_argument(
'-m', '--macos', default=False, action="store_true", dest="ismacos",
help='Include the MacOS bundle data')
parser.add_argument(
'-q', '--qt_path', default=None, dest="qtpath",
help='The QT binary path. If not set, we try to guess.')
args = parser.parse_args()
stepnum = 1
# Step 0
title("Find the Qt localization tools...")
qtbinpath = args.qtpath
if qtbinpath is None:
qtbinpath = qtquery('qmake', 'QT_INSTALL_BINS')
if qtbinpath is None:
qtbinpath = qtquery('qmake6', 'QT_INSTALL_BINS')
if qtbinpath is None:
qtbinpath = qtquery('qmake5', 'QT_INSTALL_BINS')
if qtbinpath is None:
qtbinpath = qtquery('qmake-qt5', 'QT_INSTALL_BINS')
if qtbinpath is None:
print('Unable to locate qmake tool.')
sys.exit(1)
if not os.path.isdir(qtbinpath):
print(f"QT path is not a diretory: {qtbinpath}")
sys.exit(1)
lupdate = os.path.join(qtbinpath, 'lupdate')
lconvert = os.path.join(qtbinpath, 'lconvert')
lrelease = os.path.join(qtbinpath, 'lrelease')
# Step 0
# Let's update the i18n repo
os.system(f"git submodule init")
os.system(f"git submodule update --remote --depth 1 i18n")
# Step 1
# Go through the i18n repo, check each XLIFF file and take
# note which locale is complete above the minimum threshold.
# Adds path of .xliff and .ts to l10n_files.
title("Validate the XLIFF file...")
l10n_files = []
for locale in os.listdir('i18n'):
# Skip non folders
if not os.path.isdir(os.path.join('i18n', locale)):
continue
# Skip hidden folders
if locale.startswith('.'):
continue
xliff_path = os.path.join('i18n', locale, 'mozillavpn.xliff')
# If it's the source locale (en), ignore parsing for completeness and
# add it to the list.
if locale == 'en':
print(f'OK\t- en added (reference locale)')
l10n_files.append({
'locale': 'en',
'ts': os.path.join('translations', 'generated', 'mozillavpn_en.ts'),
'xliff': xliff_path
})
continue
tree = ET.parse(xliff_path)
root = tree.getroot()
sources = 0
translations = 0
for element in root.iter('{urn:oasis:names:tc:xliff:document:1.2}source'):
sources += 1
for element in root.iter('{urn:oasis:names:tc:xliff:document:1.2}target'):
translations += 1
completeness = translations/(sources*1.0)
# Ignore locale with less than 70% of completeness
if completeness < l10n_threshold:
print(f'KO\t- {locale} is translated at {round(completeness*100, 2)}%, at least {l10n_threshold*100}% is needed')
continue # Not enough translations next file please
print(f'OK\t- {locale} added ({round(completeness*100, 2)}% translated)')
l10n_files.append({
'locale': locale,
'ts': os.path.join('translations', 'generated', f'mozillavpn_{locale}.ts'),
'xliff': xliff_path
})
# Step 2
title("Create folders and localization files for the languages...")
for file in l10n_files:
locdirectory = os.path.join('translations', 'generated', file['locale'])
os.makedirs(locdirectory, exist_ok=True)
locversion = os.path.join(locdirectory, f'locversion.plist')
with open(locversion, 'w') as locversion_file:
locversion_file.write(f"""<?xml version=\"1.0\" encoding=\"UTF-8\"?>
<!DOCTYPE plist PUBLIC \"-//Apple Computer//DTD PLIST 1.0//EN\"
\"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">
<plist version=\"1.0\">
<dict>
<key>LprojCompatibleVersion</key>
<string>123</string>
<key>LprojLocale</key>
<string>{file['locale']}</string>
<key>LprojRevisionLevel</key>
<string>1</string>
<key>LprojVersion</key>
<string>123</string>
</dict>
</plist>""")
with open(os.path.join('translations', 'generated', 'macos.pri'), 'w') as macospri:
macospri.write('### AUTOGENERATED! DO NOT EDIT!! ###\n')
for file in l10n_files:
macospri.write(f"LANGUAGES_FILES_{file['locale']}.files += $$PWD/{file['locale']}/locversion.plist\n")
macospri.write(f"LANGUAGES_FILES_{file['locale']}.path = Contents/Resources/{file['locale']}.lproj\n")
macospri.write(f"QMAKE_BUNDLE_DATA += LANGUAGES_FILES_{file['locale']}\n\n")
# Step 3
title("Write resource file to import the locales that are ready...")
with open('translations/generated/translations.qrc', 'w') as qrcfile:
qrcfile.write('<!-- AUTOGENERATED! DO NOT EDIT!! -->\n')
qrcfile.write('<RCC>\n')
qrcfile.write(' <qresource prefix="/i18n">\n')
for file in l10n_files:
qrcfile.write(f' <file>mozillavpn_{file["locale"]}.qm</file>\n')
qrcfile.write(' </qresource>\n')
qrcfile.write('</RCC>\n')
# Step 4
title("Generate the Js/C++ string definitions...")
try:
subprocess.call([sys.executable, os.path.join('scripts', 'utils', 'generate_strings.py'),
'-o', os.path.join('translations', 'generated'),
os.path.join('translations', 'strings.yaml')])
except Exception as e:
print("generate_strings.py failed. Try with:\n\tpip3 install -r requirements.txt --user")
print(e)
exit(1)
# Build a dummy project to glob together everything that might contain strings.
title("Scanning for new strings...")
with open('translations/generated/dummy.pro', 'w') as dummyproj:
dummyproj.write('### AUTOGENERATED! DO NOT EDIT!! ###\n')
dummyproj.write(f"HEADERS += l18nstrings.h\n")
dummyproj.write(f"SOURCES += l18nstrings_p.cpp\n")
dummyproj.write(f"SOURCES += ../l18nstrings.cpp\n\n")
for l10n_file in l10n_files:
dummyproj.write(f"TRANSLATIONS += {os.path.basename(l10n_file['ts'])}\n")
dummyproj.write("\n")
scan_sources(dummyproj, '../../src')
scan_sources(dummyproj, '../../nebula')
# Step 5
title("Generate translation resources...")
for l10n_file in l10n_files:
os.system(f"{lconvert} -if xlf -i {l10n_file['xliff']} -o {l10n_file['ts']}")
os.system(f"{lupdate} translations/generated/dummy.pro")
for l10n_file in l10n_files:
os.system(f"{lrelease} -idbased {l10n_file['ts']}")
print(f'Imported {len(l10n_files)} locales')
git = os.popen(f'git submodule status i18n')
git_commit_hash = git.read().strip().replace("+","").split(' ')[0]
print(f'Current commit: https://github.com/mozilla-l10n/mozilla-vpn-client-l10n/commit/{git_commit_hash}')
| 35.57277 | 121 | 0.665171 |
4a6e93c38ff63c100497bb656432f8f40340791b | 1,026 | py | Python | cogs/filter.py | Velgaster/Discord-User-Vote | 4aacc0bf01a11b948fa5355a3775ef8c7ae9751e | [
"MIT"
] | null | null | null | cogs/filter.py | Velgaster/Discord-User-Vote | 4aacc0bf01a11b948fa5355a3775ef8c7ae9751e | [
"MIT"
] | null | null | null | cogs/filter.py | Velgaster/Discord-User-Vote | 4aacc0bf01a11b948fa5355a3775ef8c7ae9751e | [
"MIT"
] | null | null | null | from discord.ext import commands
import discord
| 35.37931 | 84 | 0.665692 |
4a6fe4cb292136ed5cb190cbef1dbace08d2c9c3 | 1,975 | py | Python | api/app.py | sai-krishna-msk/KickAssist | 7fb256e3ef4beff231332f6491ebb975f3fe4b43 | [
"MIT"
] | null | null | null | api/app.py | sai-krishna-msk/KickAssist | 7fb256e3ef4beff231332f6491ebb975f3fe4b43 | [
"MIT"
] | 7 | 2021-06-08T21:18:49.000Z | 2022-03-12T00:24:33.000Z | api/app.py | sai-krishna-msk/KickAssist | 7fb256e3ef4beff231332f6491ebb975f3fe4b43 | [
"MIT"
] | null | null | null | from ml_model.model import KickModel
import numpy as np
import pandas as pd
import eli5
import joblib
import flask
from flask import Flask, render_template, request, jsonify
app = Flask(__name__)
model_oh = joblib.load('ml_model/estimators/model_oh.sav')
model_hel = joblib.load('ml_model/estimators/model_hel.sav')
encoder_oh = joblib.load('ml_model/estimators/encoder_oh.sav')
encoder_hel = joblib.load('ml_model/estimators/encoder_hel.sav')
encoder_label = joblib.load('ml_model/estimators/encoder_label.sav')
if __name__=="__main__":
app.run(debug =True) | 33.474576 | 132 | 0.716456 |
4a70669d9d055da240cf688e557bf0a87257569e | 2,810 | py | Python | snowddl/resolver/primary_key.py | littleK0i/SnowDDL | b24cb3676e41fec8876d61a101ba242e7272a18f | [
"Apache-2.0"
] | 21 | 2022-02-10T16:52:03.000Z | 2022-03-18T15:27:18.000Z | snowddl/resolver/primary_key.py | littleK0i/SnowDDL | b24cb3676e41fec8876d61a101ba242e7272a18f | [
"Apache-2.0"
] | null | null | null | snowddl/resolver/primary_key.py | littleK0i/SnowDDL | b24cb3676e41fec8876d61a101ba242e7272a18f | [
"Apache-2.0"
] | 1 | 2022-03-05T11:02:42.000Z | 2022-03-05T11:02:42.000Z | from snowddl.blueprint import PrimaryKeyBlueprint
from snowddl.resolver.abc_schema_object_resolver import AbstractSchemaObjectResolver, ResolveResult, ObjectType
| 36.973684 | 111 | 0.5879 |
4a713700e9c156f74125bcaeca0299290201d914 | 675 | py | Python | modules/module0/02_datastructures_and_geometry/datastructures_2b.py | tetov/ITA19 | 1af68a8885caf83acd98f4136d0286539ccbe63b | [
"MIT"
] | 7 | 2019-11-13T20:29:54.000Z | 2020-02-26T14:30:54.000Z | modules/module0/02_datastructures_and_geometry/datastructures_2b.py | GeneKao/ITA19 | c4b10dc183599eed4ed60d922b6ef5922d173bdb | [
"MIT"
] | 4 | 2019-11-07T20:57:51.000Z | 2020-03-04T11:43:18.000Z | modules/module0/02_datastructures_and_geometry/datastructures_2b.py | GeneKao/ITA19 | c4b10dc183599eed4ed60d922b6ef5922d173bdb | [
"MIT"
] | 6 | 2019-10-30T13:25:54.000Z | 2020-02-14T14:06:09.000Z | import os
import compas
from compas.datastructures import Mesh
from compas_rhino.artists import MeshArtist
HERE = os.path.dirname(__file__)
DATA = os.path.join(HERE, 'data')
FILE = os.path.join(DATA, 'faces.obj')
mesh = Mesh.from_obj(FILE)
artist = MeshArtist(mesh, layer="Mesh")
artist.draw_vertices(
color={key: (255, 0, 0) for key in mesh.vertices_on_boundary()})
artist.draw_vertexlabels(
text={key: str(mesh.vertex_degree(key)) for key in mesh.vertices()})
artist.draw_edges(
keys=list(mesh.edges_on_boundary()),
color=(255, 0, 0))
artist.draw_faces(
color={key: (150, 255, 150) for key in mesh.faces() if not mesh.is_face_on_boundary(key)})
| 25 | 94 | 0.722963 |
4a7152ca8736c0b2b62e12278fe928d5690e8c0b | 461 | py | Python | OOP/Exercises/First_steps_in_OOP_Exercises/8_pokemon/project/pokemon.py | tankishev/Python | 60e511fc901f136b88c681f77f209fe2f8c46447 | [
"MIT"
] | 2 | 2022-03-04T11:39:03.000Z | 2022-03-13T07:13:23.000Z | OOP/Exercises/First_steps_in_OOP_Exercises/8_pokemon/project/pokemon.py | tankishev/Python | 60e511fc901f136b88c681f77f209fe2f8c46447 | [
"MIT"
] | null | null | null | OOP/Exercises/First_steps_in_OOP_Exercises/8_pokemon/project/pokemon.py | tankishev/Python | 60e511fc901f136b88c681f77f209fe2f8c46447 | [
"MIT"
] | null | null | null | # The Pokemon class should receive a name (string) and health (int) upon initialization.
# It should also have a method called pokemon_details that returns the information about the pokemon:
# "{pokemon_name} with health {pokemon_health}"
| 35.461538 | 101 | 0.70282 |
4a71705f7aaede9643300a7a698cb26841f08adc | 1,936 | py | Python | tests/test_pandas.py | ONSdigital/ons_utils | 5ff0952c174984deb601af8ad4c21f26c7b24623 | [
"MIT"
] | null | null | null | tests/test_pandas.py | ONSdigital/ons_utils | 5ff0952c174984deb601af8ad4c21f26c7b24623 | [
"MIT"
] | null | null | null | tests/test_pandas.py | ONSdigital/ons_utils | 5ff0952c174984deb601af8ad4c21f26c7b24623 | [
"MIT"
] | 1 | 2022-03-17T08:03:17.000Z | 2022-03-17T08:03:17.000Z | """Tests for the pandas helpers in the pd_helpers.py module."""
import pytest
from pandas.testing import assert_frame_equal
from tests.conftest import create_dataframe
from ons_utils.pandas import *
def test_nested_dict_to_df():
"""Test for nested_dict_to_df."""
input_d = {
'bones': {
'femur': {'tendons': 24},
'humerus': {'tendons': 14},
},
'muscles': {
'gluteus_maximus': {'tendons': 18},
},
'cars': 7,
}
actual = nested_dict_to_df(
input_d,
columns=['number'],
level_names=('a', 'b', 'c'),
)
expected = create_dataframe([
('a', 'b', 'c', 'number'),
('bones', 'femur', 'tendons', 24),
('bones', 'humerus', 'tendons', 14),
('cars', None, None, 7),
('muscles', 'gluteus_maximus', 'tendons', 18),
])
assert_frame_equal(
# Sort values as dict order not preserved.
actual.sort_values(['a', 'b']),
# Set index because function returns a MultiIndex.
expected.set_index(['a', 'b', 'c'])
)
| 22.776471 | 63 | 0.591426 |
4a71f720f8188e39f1b7b64f6e15744bd236efe6 | 72 | py | Python | lsf_ibutils/ibsub/__init__.py | seanfisk/lsf-ibutils | a22c738376d656ab38f4bfa3572d4693288098cb | [
"MIT"
] | null | null | null | lsf_ibutils/ibsub/__init__.py | seanfisk/lsf-ibutils | a22c738376d656ab38f4bfa3572d4693288098cb | [
"MIT"
] | null | null | null | lsf_ibutils/ibsub/__init__.py | seanfisk/lsf-ibutils | a22c738376d656ab38f4bfa3572d4693288098cb | [
"MIT"
] | 1 | 2021-06-03T22:32:54.000Z | 2021-06-03T22:32:54.000Z | """:mod:`lsf_ibutils.ibsub` -- Interactive batch submission utility
"""
| 24 | 67 | 0.722222 |
4a72355337ea53a1937c776fab78aa381734b4c1 | 193 | py | Python | build/lib/configger/fishes/__init__.py | PaperDevil/pyconfigger | 75c6e3f74e6e70d8ec9565397e2be9ae8815d44e | [
"MIT"
] | 2 | 2021-02-04T14:29:19.000Z | 2021-03-04T12:56:58.000Z | build/lib/configger/fishes/__init__.py | PaperDevil/pyconfigger | 75c6e3f74e6e70d8ec9565397e2be9ae8815d44e | [
"MIT"
] | null | null | null | build/lib/configger/fishes/__init__.py | PaperDevil/pyconfigger | 75c6e3f74e6e70d8ec9565397e2be9ae8815d44e | [
"MIT"
] | 2 | 2020-08-19T21:50:30.000Z | 2020-11-04T03:51:33.000Z | import os
splited_path = os.path.realpath(__file__).split('\\')[:-1]
fish_path = '\\'.join(splited_path)
fish_json_name = "fish.json"
fish_json_path = os.path.join(fish_path, fish_json_name)
| 24.125 | 58 | 0.735751 |
4a73c0e8a1979c239e091749b325602ad4a40468 | 5,620 | py | Python | setup.py | IntuitionEngineeringTeam/RedBlackPy | 99630408153bea7494415c402eb2d9881f3168ee | [
"Apache-2.0"
] | 12 | 2018-08-24T20:46:38.000Z | 2022-01-20T16:25:23.000Z | setup.py | IntuitionEngineeringTeam/RedBlackPy | 99630408153bea7494415c402eb2d9881f3168ee | [
"Apache-2.0"
] | 1 | 2019-04-02T04:19:58.000Z | 2019-04-02T04:19:58.000Z | setup.py | IntuitionEngineeringTeam/RedBlackPy | 99630408153bea7494415c402eb2d9881f3168ee | [
"Apache-2.0"
] | 3 | 2018-07-05T22:47:27.000Z | 2019-05-25T06:40:40.000Z | #
# Created by Soldoskikh Kirill.
# Copyright 2018 Intuition. All rights reserved.
#
import os
import platform
from setuptools import setup
from setuptools.command.build_ext import build_ext
from distutils.extension import Extension
from Cython.Build import cythonize
from rbp_setup_tools.code_generation import generate_from_cython_src
from rbp_setup_tools.types import TYPES
if platform.system() == 'Darwin':
compile_opts = [ '-std=c++11',
'-mmacosx-version-min={:}'.format( platform.mac_ver()[0] ),
'-Ofast' ]
elif platform.system() == 'Linux':
compile_opts = [ '-std=c++11',
'-Ofast' ]
elif platform.system() == 'Windows':
compile_opts = [ '-std=c++11',
'-Ofast' ]
else:
raise EnvironmentError( 'Not supported platform: {plat}'.format(plat=platform.system()) )
#--------------------------------------------------------------------------------------------
# Generate cython code for all supporting types
#--------------------------------------------------------------------------------------------
src_1 = './redblackpy/cython_source/__dtype_tree_processing.pxi'
src_2 = './redblackpy/cython_source/__tree_series_dtype.pxi'
src_3 = './redblackpy/cython_source/__interpolation.pxi'
src_4 = './redblackpy/cython_source/__arithmetic.pxi'
src_1 = open(src_1, 'r')
src_2 = open(src_2, 'r')
src_3 = open(src_3, 'r')
src_4 = open(src_4, 'r')
output_1 = open('./redblackpy/cython_source/dtype_tree_processing.pxi', 'w')
output_2 = open('./redblackpy/cython_source/tree_series_dtype.pxi', 'w')
output_3 = open('./redblackpy/cython_source/interpolation.pxi', 'w')
output_4 = open('./redblackpy/cython_source/arithmetic.pxi', 'w')
generate_from_cython_src(src_1, output_1, TYPES[:-1], 0)
generate_from_cython_src(src_2, output_2, TYPES, 14)
generate_from_cython_src(src_3, output_3, TYPES, 0)
generate_from_cython_src(src_4, output_4, TYPES, 0)
src_1.close()
src_2.close()
src_3.close()
src_4.close()
output_1.close()
output_2.close()
output_3.close()
output_4.close()
#--------------------------------------------------------------------------------------------
ext_modules=[ Extension( "redblackpy.series.tree_series",
sources=["redblackpy/series/tree_series.pyx"],
extra_compile_args=compile_opts,
extra_link_args=compile_opts[:-1],
language = "c++",
include_dirs=['./redblackpy'],
depends=[ 'core/tree/tree.hpp',
'core/tree/rb_tree.tpp'
'core/tree/rb_node.tpp',
'core/tree/rb_node_valued.tpp',
'core/trees_iterator/iterator.hpp',
'core/trees_iterator/iterator.tpp' ], ),
Extension( "redblackpy.series.series_iterator",
sources=["redblackpy/series/series_iterator.pyx"],
extra_compile_args=compile_opts,
extra_link_args=compile_opts[:-1],
language = "c++",
include_dirs=['./redblackpy'],
depends=[ 'core/tree/tree.hpp',
'core/tree/rb_tree.tpp'
'core/tree/rb_node.tpp',
'core/tree/rb_node_valued.tpp',
'core/trees_iterator/iterator.hpp',
'core/trees_iterator/iterator.tpp' ], ),
Extension( "redblackpy.benchmark.timer",
sources=["redblackpy/benchmark/timer.pyx"],
extra_compile_args=compile_opts,
extra_link_args=compile_opts[:-1],
language = "c++",
include_dirs=['./redblackpy'] ) ]
setup( name='redblackpy',
ext_modules = cythonize(ext_modules),
version='0.1.3.0',
author='Solodskikh Kirill',
author_email='hypo@intuition.engineering',
maintainer='Intuition',
maintainer_email='dev@intuition.engineering',
install_requires=['cython'],
description='Data structures based on red-black trees.',
url='https://intuitionengineeringteam.github.io/RedBlackPy/',
download_url='https://github.com/IntuitionEngineeringTeam/RedBlackPy/archive/master.zip',
zip_safe=False,
packages=[ 'redblackpy', 'redblackpy.series',
'redblackpy.benchmark', 'redblackpy.tree_cython_api'],
package_data={'redblackpy.series': ['*.pxd']},
include_package_data=True,
license='Apache License 2.0',
long_description='RedBlackPy is a light Python library that provides data structures \
aimed to fast insertion, removal and self sorting to manipulating ordered data in efficient way.\
The core part of the library had been written on C++ and then was wrapped in Cython. \
Hope that many would find the primary data structures of this library very handy in working \
with time series. One of the main feature of this structures is an access by arbitrary \
key using interpolation, what makes processing of multiple non synchronized time series very simple.\
All data structures based on red black trees.',
classifiers = [ 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3' ] )
| 44.251969 | 108 | 0.57242 |
4a73d46ee78874a78fab6b3b0aaa918a453b1649 | 8,296 | py | Python | source/accounts/views.py | kishan2064/hashpy1 | 2f8c6fddb07e80e9a7b37a5632ed8ab8bf68d264 | [
"BSD-3-Clause"
] | null | null | null | source/accounts/views.py | kishan2064/hashpy1 | 2f8c6fddb07e80e9a7b37a5632ed8ab8bf68d264 | [
"BSD-3-Clause"
] | 5 | 2020-02-11T22:31:59.000Z | 2021-06-10T17:45:14.000Z | source/accounts/views.py | kishan2064/hashpy1 | 2f8c6fddb07e80e9a7b37a5632ed8ab8bf68d264 | [
"BSD-3-Clause"
] | null | null | null | from django.contrib.auth import login, authenticate, REDIRECT_FIELD_NAME, get_user_model
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.views import PasswordResetView as BasePasswordResetView, SuccessURLAllowedHostsMixin
from django.shortcuts import get_object_or_404, resolve_url
from django.utils.crypto import get_random_string
from django.utils.decorators import method_decorator
from django.utils.http import is_safe_url
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
from django.utils.translation import gettext_lazy as _
from django.views.generic import RedirectView
from django.views.generic.edit import FormView
from django.conf import settings
from .utils import (
get_login_form, send_activation_email, get_password_reset_form, send_reset_password_email,
send_activation_change_email, is_username_disabled, get_resend_ac_form
)
from .forms import SignUpForm, ProfileEditForm, ChangeEmailForm
from .models import Activation
UserModel = get_user_model()
| 32.155039 | 118 | 0.690333 |
4a7405fc354c53785ef8307b7ce20355175f5c8f | 7,320 | py | Python | conversationkg/kgs/writers.py | INDElab/conversationkg | 8bfe09b0afb4954f633a9287f723c61dcd21ce46 | [
"Apache-2.0"
] | 3 | 2021-01-18T10:07:44.000Z | 2021-05-27T07:39:35.000Z | conversationkg/kgs/writers.py | INDElab/conversationkg | 8bfe09b0afb4954f633a9287f723c61dcd21ce46 | [
"Apache-2.0"
] | 3 | 2020-12-09T23:20:27.000Z | 2021-03-06T11:08:24.000Z | conversationkg/kgs/writers.py | INDElab/conversationkg | 8bfe09b0afb4954f633a9287f723c61dcd21ce46 | [
"Apache-2.0"
] | 1 | 2021-02-19T12:10:11.000Z | 2021-02-19T12:10:11.000Z | from ..conversations.corpus import Conversation
from ..conversations.emails import Email
from collections import Counter
import matplotlib
import pandas as pd
import json
from neo4j import GraphDatabase
| 30.247934 | 124 | 0.454645 |
4a74f67398645a5ea142cd4ebc8cc51cbdd14233 | 590 | py | Python | model-test.py | shikew/Handwriting-calculator | 5e0da9f8ceac6dcc815139c6855dfc6fb5af909f | [
"Apache-2.0"
] | null | null | null | model-test.py | shikew/Handwriting-calculator | 5e0da9f8ceac6dcc815139c6855dfc6fb5af909f | [
"Apache-2.0"
] | null | null | null | model-test.py | shikew/Handwriting-calculator | 5e0da9f8ceac6dcc815139c6855dfc6fb5af909f | [
"Apache-2.0"
] | 1 | 2019-09-11T11:48:47.000Z | 2019-09-11T11:48:47.000Z | import numpy as np
from PIL import Image
from keras.models import load_model
img_gray = Image.open('1002.png')
number = np.array(img_gray)
print(number.shape)
print('shape',number.flatten().shape)
print('number:',number)
number = number.astype('float32')
number = number/255 #
number = number.flatten()
print('number.shape:',number.shape)
model = load_model('mnist-dnn.h5')
# model.load_weights('mnist.model.best.hdf5')
# def recognize(photo_data):
# return clf.predict(photo_data)
print(model.predict_classes(np.array([number])))
#print('',test_target[8000]) | 28.095238 | 48 | 0.749153 |
4a752e0adb3dfdb8832eacdb68f81c47021fa651 | 378 | gyp | Python | deps/libgdal/gyp-formats/ogr_mem.gyp | khrushjing/node-gdal-async | 6546b0c8690f2db677d5385b40b407523503b314 | [
"Apache-2.0"
] | 42 | 2021-03-26T17:34:52.000Z | 2022-03-18T14:15:31.000Z | deps/libgdal/gyp-formats/ogr_mem.gyp | khrushjing/node-gdal-async | 6546b0c8690f2db677d5385b40b407523503b314 | [
"Apache-2.0"
] | 29 | 2021-06-03T14:24:01.000Z | 2022-03-23T15:43:58.000Z | deps/libgdal/gyp-formats/ogr_mem.gyp | khrushjing/node-gdal-async | 6546b0c8690f2db677d5385b40b407523503b314 | [
"Apache-2.0"
] | 8 | 2021-05-14T19:26:37.000Z | 2022-03-21T13:44:42.000Z | {
"includes": [
"../common.gypi"
],
"targets": [
{
"target_name": "libgdal_ogr_mem_frmt",
"type": "static_library",
"sources": [
"../gdal/ogr/ogrsf_frmts/mem/ogrmemdatasource.cpp",
"../gdal/ogr/ogrsf_frmts/mem/ogrmemlayer.cpp",
"../gdal/ogr/ogrsf_frmts/mem/ogrmemdriver.cpp"
],
"include_dirs": [
"../gdal/ogr/ogrsf_frmts/mem"
]
}
]
}
| 18.9 | 55 | 0.595238 |
4a75b7b70277fd3cd807924be5321a95f06ea318 | 72,121 | py | Python | iblviewer/volume.py | nantille/iblviewer | a5dad67e8f4b99a535297ba0803caf07b1107ca1 | [
"MIT"
] | null | null | null | iblviewer/volume.py | nantille/iblviewer | a5dad67e8f4b99a535297ba0803caf07b1107ca1 | [
"MIT"
] | null | null | null | iblviewer/volume.py | nantille/iblviewer | a5dad67e8f4b99a535297ba0803caf07b1107ca1 | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
from typing import Mapping, List, Any
from datetime import datetime
import logging
import pandas as pd
import glob
import numpy as np
import logging
import os
from collections import OrderedDict
import nrrd
import vtk
import vedo
from vtk.util.numpy_support import numpy_to_vtk
from iblviewer.collection import Collection
import iblviewer.objects as obj
import iblviewer.utils as utils
def blend_maps(map1, map2, time, total_time):
"""
Blend color maps
"""
weight1 = max(0.0, total_time - time)
weight2 = max(0.0, time)
return map1 * weight1 + map2 * weight2
| 39.867883 | 119 | 0.609892 |
4a76ff4e7600c0692264f843891e33f896e8b3a4 | 12,670 | py | Python | modeling/dataset.py | LaudateCorpus1/ml-cread | b5d5aa87faa0ddad0b41b6b0672395a8bf6147ae | [
"AML"
] | 18 | 2021-05-25T17:06:46.000Z | 2021-11-08T09:47:48.000Z | modeling/dataset.py | LaudateCorpus1/ml-cread | b5d5aa87faa0ddad0b41b6b0672395a8bf6147ae | [
"AML"
] | null | null | null | modeling/dataset.py | LaudateCorpus1/ml-cread | b5d5aa87faa0ddad0b41b6b0672395a8bf6147ae | [
"AML"
] | 6 | 2021-06-03T21:29:34.000Z | 2022-03-26T11:38:37.000Z | #
# For licensing see accompanying LICENSE file.
# Copyright (C) 2021 Apple Inc. All Rights Reserved.
#
'''
Dataset file
'''
import sys
import time
import json
import copy
from itertools import chain
from tqdm import tqdm, trange
import torch
from torch.utils.data import DataLoader, RandomSampler
SPECIAL_TOKENS = {
"bos_token": "<BOS>",
"eos_token": "<EOS>",
"pad_token": "<PAD>",
"sep_token": "<SEP>",
"additional_special_tokens": ["<USR>", "<SYS>", "<M>", "</M>", "<R>", "</R>", "<CUR>"]
}
SPECIAL_TOKENS_VALUES = ["<BOS>", "<EOS>", "<PAD>", "<SEP>", "<USR>", "<SYS>", "<M>", "</M>", "<R>", "</R>", "<CUR>"]
if __name__ == '__main__':
pass
| 38.510638 | 139 | 0.728808 |
4a770f589bb75a8f2ce9da24f74f5b68103d69bf | 2,431 | py | Python | hy/lex/lexer.py | schuster-rainer/hy | d969ed63d67c4a9070fd41a8fbff35da845e0619 | [
"MIT"
] | 12 | 2015-01-01T21:21:31.000Z | 2021-06-14T19:51:59.000Z | hy/lex/lexer.py | schuster-rainer/hy | d969ed63d67c4a9070fd41a8fbff35da845e0619 | [
"MIT"
] | null | null | null | hy/lex/lexer.py | schuster-rainer/hy | d969ed63d67c4a9070fd41a8fbff35da845e0619 | [
"MIT"
] | 2 | 2016-01-17T21:59:29.000Z | 2016-09-06T20:56:41.000Z | # Copyright (c) 2013 Nicolas Dandrimont <nicolas.dandrimont@crans.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from rply import LexerGenerator
lg = LexerGenerator()
# A regexp for something that should end a quoting/unquoting operator
# i.e. a space or a closing brace/paren/curly
end_quote = r'(?![\s\)\]\}])'
lg.add('LPAREN', r'\(')
lg.add('RPAREN', r'\)')
lg.add('LBRACKET', r'\[')
lg.add('RBRACKET', r'\]')
lg.add('LCURLY', r'\{')
lg.add('RCURLY', r'\}')
lg.add('HLCURLY', r'#\{')
lg.add('QUOTE', r'\'%s' % end_quote)
lg.add('QUASIQUOTE', r'`%s' % end_quote)
lg.add('UNQUOTESPLICE', r'~@%s' % end_quote)
lg.add('UNQUOTE', r'~%s' % end_quote)
lg.add('HASHBANG', r'#!.*[^\r\n]')
lg.add('HASHREADER', r'#[^{]')
# A regexp which matches incomplete strings, used to support
# multi-line strings in the interpreter
partial_string = r'''(?x)
(?:u|r|ur|ru)? # prefix
" # start string
(?:
| [^"\\] # non-quote or backslash
| \\(.|\n) # or escaped single character or newline
| \\x[0-9a-fA-F]{2} # or escaped raw character
| \\u[0-9a-fA-F]{4} # or unicode escape
| \\U[0-9a-fA-F]{8} # or long unicode escape
)* # one or more times
'''
lg.add('STRING', r'%s"' % partial_string)
lg.add('PARTIAL_STRING', partial_string)
lg.add('IDENTIFIER', r'[^()\[\]{}\'"\s;]+')
lg.ignore(r';.*(?=\r|\n|$)')
lg.ignore(r'\s+')
lexer = lg.build()
| 34.239437 | 76 | 0.667626 |
4a77208eebfdf92ef53ffabde97b664e8625e12d | 1,319 | py | Python | week6/shuffle.py | solideveloper/afs-210 | 2ba0bb7c7617cd3169907458f657696a6987689d | [
"Apache-2.0"
] | 1 | 2022-01-06T01:22:17.000Z | 2022-01-06T01:22:17.000Z | week6/shuffle.py | solideveloper/afs-210 | 2ba0bb7c7617cd3169907458f657696a6987689d | [
"Apache-2.0"
] | null | null | null | week6/shuffle.py | solideveloper/afs-210 | 2ba0bb7c7617cd3169907458f657696a6987689d | [
"Apache-2.0"
] | null | null | null | # Python provides a built-in method called random.shuffle that will shuffle the list data type. Do not use this.
# For this assignment, you are to create your own shuffle algorithm that will take as input a sorted list and randomly shuffle the items before returning the list. Try to make your algorithm as efficient as possible.
# Add a comment to your code stating what the time complexity of your algorithm is and why.
# Display list before and after shuffle. Call your shuffle function multiple times, each time on the original sorted list to show the random order of the list items.
data = [7, 20, 26, 31, 40, 51, 55, 63, 74, 81]
ndata = len(data)
import random
print(data)
print(shuffleAlgorithm(data,ndata))
print(shuffleAlgorithm(data,ndata))
print(shuffleAlgorithm(data,ndata))
print(shuffleAlgorithm(data,ndata))
# fisher yates algorithm - O(n) time complexity because i'm not creating a copy of the list to shuffle through it.
# instead i'm modifying the list in place or at a 'constant space' making it O(n)
# swapping the last item with a random -not previously selected- item and repeating until all items in list have been selected | 54.958333 | 217 | 0.749052 |
4a78cf1db1ffe2307d7c782737a9e5d96a2685ca | 1,254 | py | Python | workbox/workbox/lib/helpers.py | pr3sto/workbox | 558147a1a387dcfbe03be0fbc366d1d793364da6 | [
"MIT"
] | null | null | null | workbox/workbox/lib/helpers.py | pr3sto/workbox | 558147a1a387dcfbe03be0fbc366d1d793364da6 | [
"MIT"
] | null | null | null | workbox/workbox/lib/helpers.py | pr3sto/workbox | 558147a1a387dcfbe03be0fbc366d1d793364da6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Template Helpers used in workbox"""
import logging
import socket
from datetime import datetime
from markupsafe import Markup
import psutil
import tg
log = logging.getLogger(__name__)
def current_year():
""" Return current year. """
now = datetime.now()
return now.strftime('%Y')
def is_docker_enabled():
""" Detect if docker service is started. """
for proc in psutil.process_iter():
if 'docker' in proc.name():
return True
return False
def get_server_load_value():
""" Get server load value. """
return psutil.virtual_memory().percent
def get_free_port():
""" Find and returns free port number. """
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
soc.bind(("", 0))
free_port = soc.getsockname()[1]
soc.close()
return free_port
def get_vagrantfiles_base_folder():
""" Return base folder for vagrantfiles. """
return tg.config.get('workbox.vagrantfiles.basefolder')
def get_hostname():
""" Return hostname. """
return tg.config.get('workbox.hostname')
try:
from webhelpers2 import date, html, number, misc, text
except SyntaxError:
log.error("WebHelpers2 helpers not available with this Python Version")
| 22.392857 | 75 | 0.679426 |
4a79466df9295fa5ad7c3a62c359310229ec684a | 5,647 | py | Python | tadataka/dataset/new_tsukuba.py | IshitaTakeshi/Tadataka | 852c7afb904503005e51884408e1492ef0be836f | [
"Apache-2.0"
] | 54 | 2019-11-15T16:30:34.000Z | 2022-01-13T15:18:54.000Z | tadataka/dataset/new_tsukuba.py | IshitaTakeshi/Tadataka | 852c7afb904503005e51884408e1492ef0be836f | [
"Apache-2.0"
] | 11 | 2019-02-28T08:28:24.000Z | 2020-04-07T04:47:12.000Z | tadataka/dataset/new_tsukuba.py | IshitaTakeshi/Tadataka | 852c7afb904503005e51884408e1492ef0be836f | [
"Apache-2.0"
] | 1 | 2020-02-26T13:59:40.000Z | 2020-02-26T13:59:40.000Z | import csv
import os
from pathlib import Path
from xml.etree import ElementTree as ET
from tqdm import tqdm
from scipy.spatial.transform import Rotation
from skimage.io import imread
import numpy as np
from tadataka.camera import CameraModel, CameraParameters, FOV
from tadataka.dataset.frame import Frame
from tadataka.dataset.base import BaseDataset
from tadataka.pose import Pose
# TODO download and set dataset_root automatically
| 34.644172 | 81 | 0.673278 |
4a798e4f49354ed1b300d7ffad5bbb4e1e929e1a | 2,015 | py | Python | krogon/maybe.py | enamrik/krogon | a41a10ed346b7198509929ed9ba1e9fcf778dc78 | [
"MIT"
] | 1 | 2020-03-02T14:17:02.000Z | 2020-03-02T14:17:02.000Z | krogon/maybe.py | enamrik/krogon | a41a10ed346b7198509929ed9ba1e9fcf778dc78 | [
"MIT"
] | null | null | null | krogon/maybe.py | enamrik/krogon | a41a10ed346b7198509929ed9ba1e9fcf778dc78 | [
"MIT"
] | null | null | null | from typing import Callable, TypeVar, Union, Tuple
from krogon.infix import Infix
A = TypeVar('A')
B = TypeVar('B')
E = TypeVar('E')
Maybe = Union[Tuple['just', A], Tuple['nothing']]
def _cast_to_maybe(result):
if result is None:
return nothing()
if isinstance(result, tuple) and len(result) == 2:
maybe_type, value = result
if maybe_type == "just" or maybe_type == "nothing":
return result
return just(result)
| 24.573171 | 93 | 0.629777 |
4a7b9c4a8cadc7353c88a38e25dd9423d5d9fd02 | 1,224 | py | Python | Python (desafios)/desafio 009.py | EbersonDias/html-css | b05ec122dc7649656bcfce92dc92ded127bbb2cf | [
"MIT"
] | null | null | null | Python (desafios)/desafio 009.py | EbersonDias/html-css | b05ec122dc7649656bcfce92dc92ded127bbb2cf | [
"MIT"
] | null | null | null | Python (desafios)/desafio 009.py | EbersonDias/html-css | b05ec122dc7649656bcfce92dc92ded127bbb2cf | [
"MIT"
] | null | null | null | # Desafio 009
# Faa um programa que leia um numero inteiro qualquer
# e mostre na tela a sua tabuada.
n = int(input('digite um numero. '))
r1 = n * 1
r2 = (n * 2)
r3 = (n * 3)
r4 = (n * 4)
r5 = (n * 5)
r6 = (n * 6)
r7 = (n * 7)
r8 = (n * 8)
r9 = (n * 9)
r10 = (n * 10)
print('A Tabuada de {} '.format(n))
print ('{} x 1 = {}'.format(n,r1))
print ('{} x 2 = {}'.format(n,r2))
print ('{} x 3 = {}'.format(n,r3))
print ('{} x 4 = {}'.format(n,r4))
print ('{} x 5 = {}'.format(n,r5))
print ('{} x 6 = {}'.format(n,r6))
print ('{} x 7 = {}'.format(n,r7))
print ('{} x 8 = {}'.format(n,r8))
print ('{} x 9 = {}'.format(n,r9))
print ('{} x 10 = {}'.format(n,r10))
#Outra forma de ser feito
n = int(input('Quanto a Tabuada de '))
print('A Tabuada de {} '.format(n))
print('-'*12)
print ('{} x {:2} = {}'.format(n, 1, n*1))
print ('{} x {:2} = {}'.format(n, 2, n*2))
print ('{} x {:2} = {}'.format(n, 3, n*3))
print ('{} x {:2} = {}'.format(n, 4, n*4))
print ('{} x {:2} = {}'.format(n, 5, n*5))
print ('{} x {:2} = {}'.format(n, 6, n*6))
print ('{} x {:2} = {}'.format(n, 7, n*7))
print ('{} x {:2} = {}'.format(n, 8, n*8))
print ('{} x {:2} = {}'.format(n, 9, n*9))
print ('{} x {:2} = {}'.format(n, 10, n*10))
print('-'*12) | 29.142857 | 54 | 0.476307 |
4a7be356f01ce20843ac2c23c55739f318ee8ab2 | 110 | py | Python | tools/__init__.py | supercatex/TelloEdu | 8f434dbc9866be3025cb119175c40f1d2d7fb5f3 | [
"MIT"
] | 1 | 2019-12-04T04:30:06.000Z | 2019-12-04T04:30:06.000Z | tools/__init__.py | supercatex/TelloEdu | 8f434dbc9866be3025cb119175c40f1d2d7fb5f3 | [
"MIT"
] | null | null | null | tools/__init__.py | supercatex/TelloEdu | 8f434dbc9866be3025cb119175c40f1d2d7fb5f3 | [
"MIT"
] | null | null | null | from tools.TelloEdu import TelloEdu
from tools.Controller import *
from tools.SocketObject import SocketClient | 36.666667 | 43 | 0.863636 |
4a7c28f2d0e401facd4b7a43c6ef059a3a83d500 | 1,193 | py | Python | neutron/agent/ovsdb/native/helpers.py | congnt95/neutron | 6a73a362c5ff5b7c28c15a49f47a9900c0d2b4e1 | [
"Apache-2.0"
] | 1,080 | 2015-01-04T08:35:00.000Z | 2022-03-27T09:15:52.000Z | neutron/agent/ovsdb/native/helpers.py | congnt95/neutron | 6a73a362c5ff5b7c28c15a49f47a9900c0d2b4e1 | [
"Apache-2.0"
] | 24 | 2015-02-21T01:48:28.000Z | 2021-11-26T02:38:56.000Z | neutron/agent/ovsdb/native/helpers.py | congnt95/neutron | 6a73a362c5ff5b7c28c15a49f47a9900c0d2b4e1 | [
"Apache-2.0"
] | 1,241 | 2015-01-02T10:47:10.000Z | 2022-03-27T09:42:23.000Z | # Copyright (c) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from oslo_config import cfg
from neutron.conf.agent import ovs_conf as agent_ovs_conf
from neutron.conf.plugins.ml2.drivers import ovs_conf as ml2_ovs_conf
from neutron.privileged.agent.ovsdb.native import helpers as priv_helpers
agent_ovs_conf.register_ovs_agent_opts(cfg.CONF)
ml2_ovs_conf.register_ovs_opts(cfg=cfg.CONF)
enable_connection_uri = functools.partial(
priv_helpers.enable_connection_uri,
log_fail_as_error=False, check_exit_code=False,
timeout=cfg.CONF.OVS.ovsdb_timeout,
inactivity_probe=cfg.CONF.OVS.of_inactivity_probe * 1000)
| 37.28125 | 78 | 0.776194 |
4a7c2f9da4b8409fc6aa35e9cf22595f9dcc870b | 302 | py | Python | conlo/serializer/json_serializer.py | kira607/config_loader | 024f33d48fee1635dfa9ed286f84bb96f22c134a | [
"MIT"
] | null | null | null | conlo/serializer/json_serializer.py | kira607/config_loader | 024f33d48fee1635dfa9ed286f84bb96f22c134a | [
"MIT"
] | null | null | null | conlo/serializer/json_serializer.py | kira607/config_loader | 024f33d48fee1635dfa9ed286f84bb96f22c134a | [
"MIT"
] | null | null | null | import json
from .base_serializer import BaseSerializer
| 21.571429 | 56 | 0.675497 |
4a7c6a7695f0b0415525906b878d73cc448533e5 | 264 | py | Python | console_weather.py | AlBan52/API_weather | 86779a2da622ad7a4537070e5c28a04235415161 | [
"MIT"
] | null | null | null | console_weather.py | AlBan52/API_weather | 86779a2da622ad7a4537070e5c28a04235415161 | [
"MIT"
] | null | null | null | console_weather.py | AlBan52/API_weather | 86779a2da622ad7a4537070e5c28a04235415161 | [
"MIT"
] | null | null | null | import requests
locations = ['', '', '']
payload = {'mnTq': '', 'lang': 'ru'}
for location in locations:
response = requests.get(f'http://wttr.in/{location}', params=payload)
response.raise_for_status()
print(response.text)
| 26.4 | 73 | 0.681818 |
4a7c6e1277408f69b722e24dda7d218cc70dda0f | 1,192 | py | Python | migrations/versions/576712576c48_added_model_for_photo_comments.py | Torniojaws/vortech-backend | f775a97eeae089fa720088d86fe92d40bc5d65bc | [
"MIT"
] | null | null | null | migrations/versions/576712576c48_added_model_for_photo_comments.py | Torniojaws/vortech-backend | f775a97eeae089fa720088d86fe92d40bc5d65bc | [
"MIT"
] | 93 | 2017-09-01T22:24:10.000Z | 2021-12-22T14:07:06.000Z | migrations/versions/576712576c48_added_model_for_photo_comments.py | Torniojaws/vortech-backend | f775a97eeae089fa720088d86fe92d40bc5d65bc | [
"MIT"
] | null | null | null | """Added model for photo comments
Revision ID: 576712576c48
Revises: 75bb906df167
Create Date: 2018-03-30 02:06:22.877079
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '576712576c48'
down_revision = '75bb906df167'
branch_labels = None
depends_on = None
| 30.564103 | 81 | 0.672819 |
4a7c8678af28d04fe1e6fb14eef66f905c9017b0 | 164 | py | Python | __init__.py | m3sserschmitt/basic-http | bc09a888b44a11154e2cc9bfaf46fc9fd3a79b82 | [
"MIT"
] | null | null | null | __init__.py | m3sserschmitt/basic-http | bc09a888b44a11154e2cc9bfaf46fc9fd3a79b82 | [
"MIT"
] | null | null | null | __init__.py | m3sserschmitt/basic-http | bc09a888b44a11154e2cc9bfaf46fc9fd3a79b82 | [
"MIT"
] | null | null | null | import basic_http.session
basic_http.session.LIB_VERSION = 'v0.0.4-beta'
basic_http.session.DEFAULT_AGENT = 'basic-http version ' + basic_http.session.LIB_VERSION
| 32.8 | 89 | 0.810976 |
4a7f99985562db134bffd977ed750d635522a7a2 | 12,364 | py | Python | usaspending_api/etl/helpers.py | truthiswill/usaspending-api | bd7d915442e2ec94cc830c480ceeffd4479be6c0 | [
"CC0-1.0"
] | null | null | null | usaspending_api/etl/helpers.py | truthiswill/usaspending-api | bd7d915442e2ec94cc830c480ceeffd4479be6c0 | [
"CC0-1.0"
] | 3 | 2020-02-12T01:16:46.000Z | 2021-06-10T20:36:57.000Z | usaspending_api/etl/helpers.py | truthiswill/usaspending-api | bd7d915442e2ec94cc830c480ceeffd4479be6c0 | [
"CC0-1.0"
] | null | null | null | from datetime import datetime
import warnings
import logging
from django.db.models import Q, Case, Value, When
from django.core.cache import caches, CacheKeyWarning
import django.apps
from usaspending_api.references.models import Agency, Location, RefCountryCode
from usaspending_api.references.helpers import canonicalize_location_dict
from usaspending_api.submissions.models import SubmissionAttributes
from usaspending_api.data.daims_maps import daims_maps
warnings.simplefilter("ignore", CacheKeyWarning)
def cleanse_values(row):
"""
Remove textual quirks from CSV values.
"""
row = {k: v.strip() for (k, v) in row.items()}
row = {k: (None if v.lower() == 'null' else v) for (k, v) in row.items()}
return row
def get_subtier_agency_dict():
"""Returns a dictionary with key = subtier agency code and value = agency id."""
# there's no unique constraint on subtier_code, so the order by below ensures that in the case of duplicate subtier
# codes, the dictionary we return will reflect the most recently updated one
agencies = Agency.objects.all().values(
'id',
'subtier_agency__subtier_code').order_by('subtier_agency__update_date')
subtier_agency_dict = {
a['subtier_agency__subtier_code']: a['id'] for a in agencies
}
return subtier_agency_dict
location_cache = caches['locations']
def up2colon(input_string):
'Takes the part of a string before `:`, if any.'
if input_string:
return input_string.split(':')[0].strip()
return ''
def get_fiscal_quarter(fiscal_reporting_period):
"""
Return the fiscal quarter.
Note: the reporting period being passed should already be in "federal fiscal format",
where period 1 = Oct. and period 12 = Sept.
"""
if fiscal_reporting_period in [1, 2, 3]:
return 1
elif fiscal_reporting_period in [4, 5, 6]:
return 2
elif fiscal_reporting_period in [7, 8, 9]:
return 3
elif fiscal_reporting_period in [10, 11, 12]:
return 4
def get_previous_submission(cgac_code, fiscal_year, fiscal_period):
"""
For the specified CGAC (e.g., department/top-tier agency) and specified fiscal year and quarter, return the
previous submission within the same fiscal year.
"""
previous_submission = SubmissionAttributes.objects \
.filter(
cgac_code=cgac_code,
reporting_fiscal_year=fiscal_year,
reporting_fiscal_period__lt=fiscal_period,
quarter_format_flag=True) \
.order_by('-reporting_fiscal_period') \
.first()
return previous_submission
def update_model_description_fields():
"""
This method searches through every model Django has registered, checks if it
belongs to a list of apps we should update, and updates all fields with
'_description' at the end with their relevant information.
Dictionaries for DAIMS definitions should be stored in:
usaspending_api/data/daims_maps.py
Each map should be <field_name>_map for discoverability.
If there are conflicting maps (i.e., two models use type_description, but
different enumerations) prepend the map name with the model name and a dot.
For examples of these situations, see the documentation in daims_maps.py
"""
logger = logging.getLogger('console')
# This is a list of apps whose models will be checked for description fields
updatable_apps = [
"accounts",
"awards",
"common",
"financial_activities",
"references",
"submissions"
]
# This iterates over every model that Django has registered
for model in django.apps.apps.get_models():
# This checks the app_label of the model, and thus we can skip it if it is not in one of our updatable_apps.
# Thus, we'll skip any django admin apps, like auth, corsheaders, etc.
if model._meta.app_label not in updatable_apps:
continue
if model.__name__[:10] == "Historical":
continue
model_fields = [f.name for f in model._meta.get_fields()]
# This supports multi-case DAIMS
# We must filter on the model level rather than add them to the when clauses, because if there is a FK in the
# when clause Django is not guaranteed to join on that table properly.
#
# This is an array of tuples of the following format
# (Q object of filter, field_names -> case objects map for this filter)
#
# It is initialized with a blank filter and empty list, which is where default updates are stored
model_filtered_update_case_map = [(Q(), {})]
desc_fields = [field for field in model_fields if field.split('_')[-1] ==
"description"[:len(field.split('_')[-1])]]
non_desc_fields = [field for field in model_fields if field not in desc_fields]
desc_fields_mapping = {}
for desc_field in desc_fields:
actual_field_short = "_".join(desc_field.split('_')[:-1])
actual_field = None
for field in non_desc_fields:
if actual_field_short == field:
actual_field = field
elif actual_field_short == field[:len(actual_field_short)]:
actual_field = field
desc_fields_mapping[desc_field] = actual_field
# Loop through each of the models fields to construct a case for each applicable field
for field in model_fields:
# We're looking for field names ending in _description
split_name = field.split("_")
# If the last element in our split name isn't description, skip it
if len(split_name) == 1 or split_name[-1] != "description"[:len(split_name[-1])]:
continue
source_field = "_".join(split_name[:-1])
destination_field = field
# This is the map name, prefixed by model name for when there are non-unique description fields
source_field = desc_fields_mapping[field] if field in desc_fields_mapping else source_field
model_map_name = "{}.{}_map".format(model.__name__, source_field)
map_name = "{}_map".format(source_field)
# This stores a direct reference to the enumeration mapping
code_map = None
# Validate we have the source field
if source_field not in model_fields:
logger.debug("Tried to update '{}' on model '{}', but source field '{}' does not exist.".
format(destination_field, model.__name__, source_field))
continue
# Validate we have a map
# Prefer model_map_name over map_name
if model_map_name in daims_maps.keys():
code_map = daims_maps[model_map_name]
elif map_name in daims_maps.keys():
code_map = daims_maps[map_name]
else:
logger.warn("Tried to update '{}' on model '{}', but neither map '{}' nor '{}' exists.".
format(destination_field, model.__name__, model_map_name, map_name))
continue
# Cases start from 1
case_number = 1
case_name = "case_1"
case_map = "case_1_map"
while case_name in code_map.keys():
case_object = create_case(code_map[case_map], source_field)
# Construct a Q filter for this case
case_filter = Q(**code_map[case_name])
# See if we already have a tuple for this filter
case_tuple = [x for x in model_filtered_update_case_map if x[0] == case_filter]
if len(case_tuple) == 0:
# We don't, so create the tuple
temp_case_dict = {}
temp_case_dict[field] = case_object
model_filtered_update_case_map.append((case_filter, temp_case_dict))
else:
# We do, so just add our case object to that dictionary
case_tuple[0][1][field] = case_object
# Check for the next case
case_number += 1
case_name = "case_{}".format(case_number)
case_map = "case_{}_map".format(case_number)
# If our case number is still 1, then we didn't have any cases. Therefore, we perform the default
if case_number == 1:
case_object = create_case(code_map, source_field)
# Grab the first tuple, which has no filters
case_tuple = model_filtered_update_case_map[0]
# Add it to our dictionary
case_tuple[1][field] = case_object
for filter_tuple in model_filtered_update_case_map:
# For each filter tuple, check if the dictionary has any entries
if len(filter_tuple[1].keys()) > 0:
print("Updating model {}\n FILTERS:\n {}\n FIELDS:\n {}".
format(model.__name__, str(filter_tuple[0]), "\n ".join(filter_tuple[1].keys())))
try:
model.objects.filter(filter_tuple[0]).update(**filter_tuple[1])
except django.db.utils.ProgrammingError as e:
logger.warn(str(e))
logger.warn("(OK if invoked from a migration, when the table may not yet have been created)")
# Utility method for update_model_description_fields, creates the Case object
| 38.397516 | 119 | 0.63604 |
4a7ff589828eca63a17e67bce0eb8c34992e953a | 158 | py | Python | {{cookiecutter.project_name}}/{{cookiecutter.app_name}}/extensions.py | DevAerial/flask-api-template | 6d3f745f2dacb793c4bdc6aaaceb86eb472efe55 | [
"MIT"
] | null | null | null | {{cookiecutter.project_name}}/{{cookiecutter.app_name}}/extensions.py | DevAerial/flask-api-template | 6d3f745f2dacb793c4bdc6aaaceb86eb472efe55 | [
"MIT"
] | null | null | null | {{cookiecutter.project_name}}/{{cookiecutter.app_name}}/extensions.py | DevAerial/flask-api-template | 6d3f745f2dacb793c4bdc6aaaceb86eb472efe55 | [
"MIT"
] | null | null | null | from flask_marshmallow import Marshmallow{% if cookiecutter.use_celery == 'yes'%}
from celery import Celery
celery = Celery(){% endif %}
ma = Marshmallow()
| 22.571429 | 81 | 0.740506 |
4a80119456047b966a3757d7fd0f105dc0f5c4f6 | 9,193 | py | Python | code/mapplot.py | young-astronomer/vlpy | 7fd434d307a7cc3593f84a7c6c2f4a4a86865afe | [
"Apache-2.0"
] | null | null | null | code/mapplot.py | young-astronomer/vlpy | 7fd434d307a7cc3593f84a7c6c2f4a4a86865afe | [
"Apache-2.0"
] | null | null | null | code/mapplot.py | young-astronomer/vlpy | 7fd434d307a7cc3593f84a7c6c2f4a4a86865afe | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 21 11:11:56 2020
This program is use to plot polarization map from vlbi fits image.
You should specify the input fits images by -i or --infile,
output file by -o or --output,
contour levs by -l or --levs
contour base by -c or --cmul
polarization parameters by -p or --pol: "icut pcut inc scale"
plot window by -w or --win
restore beam position by -b or --bpos
figsize by -f or --figsize
Installation:
1. copy file
chmod a+x mapplot.py
cp mapplot.py ~/myapp
2. set envioment parameters
Add the following line to ~/.bashrc
export PATH=$PATH:/home/usename/myapp
source ~/.bashrc
Running like this:
mapplot.py -w <win> -f <figsize> -n <normalize> <infile> <cmul>
mapplot.py i <input file list> -o <out.pdf> -c <cmul> -w <win> -p <pol>
Examples:
1. mapplot.py -i cta102.fits -o cta102-color.pdf -c 1.8e-3 -w '18 -8 -20 6' -f '7 6' -n 'power 0.5'
2. mapplot.py -w '18 -8 -20 6' -f '4.0 6' -n 'power 0.5' cta102.fits 1.8e-3
https://matplotlib.org/3.1.1/tutorials/colors/colormaps.html
https://matplotlib.org/3.2.1/api/_as_gen/matplotlib.colors.Normalize.html#matplotlib.colors.Normalize
@author: Li, Xiaofeng
Shanghai Astronomical Observatory, Chinese Academy of Sciences
E-mail: lixf@shao.ac.cn; 1650152531@qq.com
"""
import sys
import getopt
from astropy.io import fits
from astropy.table import Table
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import matplotlib.colors as mcolors
# ax.annotate('%s' % h['object'], xy=(0.125,0.91), xycoords='figure fraction')
# ax.annotate('%.1f GHz' % (h['crval3']/1.0E9), xy=(0.83, 0.91), xycoords='figure fraction')
if __name__ == '__main__' :
main(sys.argv[1:]) | 30.042484 | 101 | 0.617753 |
4a81890c9e9eec4855a38a91238cf619244d9278 | 2,174 | py | Python | umbrella/api/v1/router.py | pizhi/umbrella | 95027e6e11a6c8df2ab5f7c202b0c1d2183f839a | [
"Apache-2.0"
] | 1 | 2018-01-13T11:45:24.000Z | 2018-01-13T11:45:24.000Z | umbrella/api/v1/router.py | pizhi/umbrella | 95027e6e11a6c8df2ab5f7c202b0c1d2183f839a | [
"Apache-2.0"
] | null | null | null | umbrella/api/v1/router.py | pizhi/umbrella | 95027e6e11a6c8df2ab5f7c202b0c1d2183f839a | [
"Apache-2.0"
] | 2 | 2018-01-01T11:39:49.000Z | 2018-08-07T07:16:45.000Z | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from umbrella.api.v1 import api
from umbrella.common import wsgi
| 36.233333 | 78 | 0.559798 |
4a8279873b5f73ab9eb14c009ec624c039c590a5 | 943 | py | Python | exemples/test_thomson_simu.py | butala/TomograPy | a1da41f1e0b7406a1b770e56428789c54175de20 | [
"CECILL-B"
] | 7 | 2016-07-05T08:31:42.000Z | 2022-03-31T20:24:13.000Z | exemples/test_thomson_simu.py | esoubrie/TomograPy | a1da41f1e0b7406a1b770e56428789c54175de20 | [
"CECILL-B"
] | null | null | null | exemples/test_thomson_simu.py | esoubrie/TomograPy | a1da41f1e0b7406a1b770e56428789c54175de20 | [
"CECILL-B"
] | 4 | 2018-08-14T01:54:21.000Z | 2022-03-10T19:44:43.000Z | #!/usr/bin/env python
import time
import numpy as np
import tomograpy
import lo
# object
obj = tomograpy.centered_cubic_map(10, 64)
obj[:] = tomograpy.phantom.shepp_logan(obj.shape)
# data
radius = 200
a = tomograpy.fov(obj, radius)
data = tomograpy.centered_stack(a, 128, n_images=60, radius=radius, max_lon=np.pi)
# model
kwargs = {"pb":"pb", "obj_rmin":1.5, "data_rmin":1.5}
P, D, obj_mask, data_mask = tomograpy.models.thomson(data, obj, u=.5, **kwargs)
# projection
t = time.time()
data[:] = (P * obj.ravel()).reshape(data.shape)
print("projection time : " + str(time.time() - t))
# data
# backprojection
t = time.time()
x0 = P.T * data.ravel()
bpj = x0.reshape(obj.shape)
print("backprojection time : " + str(time.time() - t))
# inversion using scipy.sparse.linalg
t = time.time()
sol = lo.acg(P, data.ravel(), D, 1e-3 * np.ones(3), maxiter=100, tol=1e-8)
sol = sol.reshape(obj.shape)
print("inversion time : " + str(time.time() - t))
| 30.419355 | 82 | 0.680806 |
4a82ccd998802091de5e9ed946344d30c5ebeba5 | 8,124 | py | Python | geopy/geocoders/google.py | ulope/geopy | 605d0d84137a93949ad03820fa31dc2dab77f089 | [
"MIT"
] | 1 | 2021-03-12T15:31:30.000Z | 2021-03-12T15:31:30.000Z | geopy/geocoders/google.py | ulope/geopy | 605d0d84137a93949ad03820fa31dc2dab77f089 | [
"MIT"
] | null | null | null | geopy/geocoders/google.py | ulope/geopy | 605d0d84137a93949ad03820fa31dc2dab77f089 | [
"MIT"
] | null | null | null | import logging
from urllib import urlencode
from urllib2 import urlopen
import simplejson
import xml
from xml.parsers.expat import ExpatError
from geopy.geocoders.base import Geocoder
from geopy import Point, Location, util
| 41.238579 | 139 | 0.601674 |
4a845cfff802e634071ade849b849c82adc47ef1 | 395 | py | Python | interactive_grabcut/repo/drag2draw.py | hiankun/py_sandbox | 6623edd0c8ab17641e1ce09fba7da34c4865fc4f | [
"MIT"
] | null | null | null | interactive_grabcut/repo/drag2draw.py | hiankun/py_sandbox | 6623edd0c8ab17641e1ce09fba7da34c4865fc4f | [
"MIT"
] | null | null | null | interactive_grabcut/repo/drag2draw.py | hiankun/py_sandbox | 6623edd0c8ab17641e1ce09fba7da34c4865fc4f | [
"MIT"
] | null | null | null | # source: https://www.youtube.com/watch?v=U0sVp1xLiyo
from tkinter import *
master = Tk()
c = Canvas(master, width=600, height=400, bg='white')
c.pack(expand=True, fill=BOTH)
c.bind('<B1-Motion>', paint)
master.mainloop()
| 21.944444 | 55 | 0.648101 |
4a85a5edb74a35f6879d8683f009ca6b7f10f18c | 194 | py | Python | migrations/20220114_03_Heqaz-insert-default-serverinfo.py | lin483/Funny-Nations | 2bb1cd23a3d5f1e4a4854c73ac27f62c98127ef6 | [
"MIT"
] | 126 | 2022-01-15T02:29:07.000Z | 2022-03-30T09:57:40.000Z | migrations/20220114_03_Heqaz-insert-default-serverinfo.py | lin483/Funny-Nations | 2bb1cd23a3d5f1e4a4854c73ac27f62c98127ef6 | [
"MIT"
] | 18 | 2022-01-11T22:24:35.000Z | 2022-03-16T00:13:01.000Z | migrations/20220114_03_Heqaz-insert-default-serverinfo.py | lin483/Funny-Nations | 2bb1cd23a3d5f1e4a4854c73ac27f62c98127ef6 | [
"MIT"
] | 25 | 2022-01-22T15:06:27.000Z | 2022-03-01T04:34:19.000Z | """
insert default serverInfo
"""
from yoyo import step
__depends__ = {'20220114_02_lHBKM-new-table-serverinfo'}
steps = [
step("INSERT INTO `serverInfo` (`onlineMinute`) VALUES (0);")
]
| 16.166667 | 65 | 0.695876 |
4a861f0810192c03917c1a4cb2de99fa5681f49e | 14,913 | py | Python | neutronclient/osc/v2/vpnaas/ipsec_site_connection.py | slawqo/python-neutronclient | ee08644c5f2424a40c70010dcf0fa2ad84809bfc | [
"Apache-2.0"
] | 120 | 2015-01-07T00:38:58.000Z | 2021-12-26T13:05:53.000Z | neutronclient/osc/v2/vpnaas/ipsec_site_connection.py | slawqo/python-neutronclient | ee08644c5f2424a40c70010dcf0fa2ad84809bfc | [
"Apache-2.0"
] | 1 | 2021-08-11T18:42:30.000Z | 2021-08-11T22:25:21.000Z | neutronclient/osc/v2/vpnaas/ipsec_site_connection.py | slawqo/python-neutronclient | ee08644c5f2424a40c70010dcf0fa2ad84809bfc | [
"Apache-2.0"
] | 153 | 2015-01-05T16:50:50.000Z | 2021-09-13T12:01:23.000Z | # Copyright 2017 FUJITSU LIMITED
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from osc_lib.cli import format_columns
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
from osc_lib.utils import columns as column_util
from oslo_log import log as logging
from neutronclient._i18n import _
from neutronclient.common import utils as nc_utils
from neutronclient.osc import utils as osc_utils
from neutronclient.osc.v2.vpnaas import utils as vpn_utils
LOG = logging.getLogger(__name__)
_formatters = {
'peer_cidrs': format_columns.ListColumn
}
_attr_map = (
('id', 'ID', column_util.LIST_BOTH),
('name', 'Name', column_util.LIST_BOTH),
('peer_address', 'Peer Address', column_util.LIST_BOTH),
('auth_mode', 'Authentication Algorithm', column_util.LIST_BOTH),
('status', 'Status', column_util.LIST_BOTH),
('tenant_id', 'Project', column_util.LIST_LONG_ONLY),
('peer_cidrs', 'Peer CIDRs', column_util.LIST_LONG_ONLY),
('vpnservice_id', 'VPN Service', column_util.LIST_LONG_ONLY),
('ipsecpolicy_id', 'IPSec Policy', column_util.LIST_LONG_ONLY),
('ikepolicy_id', 'IKE Policy', column_util.LIST_LONG_ONLY),
('mtu', 'MTU', column_util.LIST_LONG_ONLY),
('initiator', 'Initiator', column_util.LIST_LONG_ONLY),
('admin_state_up', 'State', column_util.LIST_LONG_ONLY),
('description', 'Description', column_util.LIST_LONG_ONLY),
('psk', 'Pre-shared Key', column_util.LIST_LONG_ONLY),
('route_mode', 'Route Mode', column_util.LIST_LONG_ONLY),
('local_id', 'Local ID', column_util.LIST_LONG_ONLY),
('peer_id', 'Peer ID', column_util.LIST_LONG_ONLY),
('local_ep_group_id', 'Local Endpoint Group ID',
column_util.LIST_LONG_ONLY),
('peer_ep_group_id', 'Peer Endpoint Group ID', column_util.LIST_LONG_ONLY),
)
| 39.768 | 79 | 0.632468 |
4a866ef89141cc4c966674193758ad5f52e83702 | 551 | py | Python | Arknights/flags.py | AlaricGilbert/ArknightsAutoHelper | 9e2db0c4e0d1be30856df731ab192da396121d94 | [
"MIT"
] | null | null | null | Arknights/flags.py | AlaricGilbert/ArknightsAutoHelper | 9e2db0c4e0d1be30856df731ab192da396121d94 | [
"MIT"
] | 1 | 2019-09-10T13:58:24.000Z | 2019-09-10T13:58:24.000Z | Arknights/flags.py | AlaricGilbert/ArknightsAutoHelper | 9e2db0c4e0d1be30856df731ab192da396121d94 | [
"MIT"
] | null | null | null | TINY_WAIT = 1
SMALL_WAIT = 3
MEDIUM_WAIT = 5
BIG_WAIT = 10
SECURITY_WAIT = 15
BATTLE_FINISH_DETECT = 12
BATTLE_NONE_DETECT_TIME = 90
BATTLE_END_SIGNAL_MAX_EXECUTE_TIME = 15
#
FLAGS_START_BATTLE_BIAS = (50, 25)
FLAGS_ENSURE_TEAM_INFO_BIAS = (25, 50)
#
FLAGS_CLICK_BIAS_TINY = (3, 3)
FLAGS_CLICK_BIAS_SMALL = (5, 5)
FLAGS_CLICK_BIAS_MEDIUM = (10, 10)
FLAGS_CLICK_BIAS_BIG = (15, 15)
FLAGS_CLICK_BIAS_HUGE = (30, 30)
#
#
FLAGS_SWIPE_BIAS_TO_LEFT = ((1, 1), (1, 1))
FLAGS_SWIPE_BIAS_TO_RIGHT = ((1, 1), (1, 1))
| 21.192308 | 44 | 0.751361 |
4a868fe7e98135f318566006794d9b95f620108a | 3,229 | py | Python | elasticsearch/client/shutdown.py | Conky5/elasticsearch-py | 93543a7fee51c0da6e898c9155bdb5f965c5bb53 | [
"Apache-2.0"
] | 4 | 2021-05-31T19:34:27.000Z | 2021-06-01T18:14:31.000Z | elasticsearch/client/shutdown.py | Conky5/elasticsearch-py | 93543a7fee51c0da6e898c9155bdb5f965c5bb53 | [
"Apache-2.0"
] | 22 | 2021-05-15T00:01:49.000Z | 2022-02-26T00:08:00.000Z | elasticsearch/client/shutdown.py | Conky5/elasticsearch-py | 93543a7fee51c0da6e898c9155bdb5f965c5bb53 | [
"Apache-2.0"
] | null | null | null | # Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params
| 33.989474 | 85 | 0.637039 |
4a893fcf944a3942d0a9e7e6cc93c141d9894e31 | 13,620 | py | Python | sushichef.py | RechercheTech/sushi-chef-arvind-gupta-toys | 2b381d8942c16ed16b4a44d8fc020fe0a81a18c0 | [
"MIT"
] | 1 | 2020-05-10T06:16:48.000Z | 2020-05-10T06:16:48.000Z | sushichef.py | RechercheTech/sushi-chef-arvind-gupta-toys | 2b381d8942c16ed16b4a44d8fc020fe0a81a18c0 | [
"MIT"
] | 5 | 2019-10-04T11:35:21.000Z | 2020-05-25T14:19:41.000Z | sushichef.py | RechercheTech/sushi-chef-arvind-gupta-toys | 2b381d8942c16ed16b4a44d8fc020fe0a81a18c0 | [
"MIT"
] | 3 | 2019-09-24T00:15:00.000Z | 2020-02-06T16:25:36.000Z | #!/usr/bin/env python
import os
import requests
import re
import shutil
from arvind import ArvindVideo, ArvindLanguage, YOUTUBE_CACHE_DIR
from bs4 import BeautifulSoup
from bs4.element import NavigableString
from ricecooker.chefs import SushiChef
from ricecooker.classes.files import YouTubeVideoFile
from ricecooker.classes.licenses import get_license
from ricecooker.classes.nodes import VideoNode, TopicNode
ARVIND = "Arvind Gupta Toys"
ARVIND_URL = "http://www.arvindguptatoys.com/films.html"
ROOT_DIR_PATH = os.getcwd()
DOWNLOADS_PATH = os.path.join(ROOT_DIR_PATH, "downloads")
DOWNLOADS_VIDEOS_PATH = os.path.join(DOWNLOADS_PATH, "videos/")
SKIP_VIDEOS_PATH = os.path.join(ROOT_DIR_PATH, "skip_videos.txt")
# These are the languages that has no sub topics on its videos.
SINGLE_TOPIC_LANGUAGES = [
"bhojpuri; bajpuri; bhojapuri", # actual lang_obj.name in le-utils
"bhojpuri", # future-proofing for upcoming lang_obj.name changes
"nepali",
"malayalam",
"telugu",
"bengali",
"odiya",
"punjabi",
"marwari; marwadi", # actual lang_obj.name in le-utils
"marwari", # future-proofing for upcoming lang_obj.name changes
"assamese",
"urdu",
"spanish",
"chinese",
"indonesian",
"sci_edu",
"science/educational",
]
# List of multiple languages on its topics
MULTI_LANGUAGE_TOPIC = ["russian", "french",]
# This are the estimate total count of arvind gupta toys language contents
TOTAL_ARVIND_LANG = 23
SINGLE_TOPIC = "single"
STANDARD_TOPIC = "standard"
MULTI_LANGUAGE = "multi"
YOUTUBE_DOMAINS = ["youtu.be", "youtube.com"]
DEBUG_MODE = True # Print extra debug info durig the chef run (disable in prod)
def download_video_topics(data, topic, topic_node, lang_obj):
"""
Scrape, collect, and download the videos and their thumbnails.
"""
video_source_ids = []
for vinfo in data[topic]:
try:
video = ArvindVideo(
url=vinfo['video_url'],
title=vinfo['video_title'],
language=lang_obj.code)
if video.download_info():
if video.license_common:
video_source_id = 'arvind-video-{0}'.format(video.uid)
if video_source_id not in video_source_ids:
include_video_topic(topic_node, video, lang_obj)
video_source_ids.append(video_source_id)
else:
print('Skipping duplicate video: ' + str(vinfo['video_url']))
else:
save_skip_videos(video, topic, lang_obj)
else:
save_skip_videos(video, topic, lang_obj)
except Exception as e:
print('Error downloading this video:', e)
def create_language_data(lang_data, lang_obj):
"""
Process the list of elements in `lang_data` to extract video links.
"""
topic_contents = {}
initial_topics = []
prev_topic = ""
first_count = 1
total_loop = len(lang_data)
lang_name = lang_obj.name.lower()
for item in lang_data:
total_loop -= 1
if isinstance(item, NavigableString) or item.name == 'br':
continue # skip whitespace and <br/> tags
try:
title = item.text.rstrip().strip()
video_link = ""
try:
video_a_tag = item.find('a')
if video_a_tag:
video_link = video_a_tag.get("href") # for videos
else:
video_link = "" # for headings
topic_details = {}
if any(ytd in video_link for ytd in YOUTUBE_DOMAINS):
if lang_name in MULTI_LANGUAGE_TOPIC:
current_lang = title.split()[0].lower()
if first_count == 1:
first_count = 0
prev_topic = current_lang
topic_details['video_url'] = video_link
topic_details['video_title'] = title
if lang_name in MULTI_LANGUAGE_TOPIC:
if prev_topic != current_lang:
topic_contents[prev_topic] = initial_topics
initial_topics = []
prev_topic = current_lang
initial_topics.append(topic_details)
except Exception as e:
print('>> passing on', e)
pass
if first_count == 1:
if ":" in title:
first_count = 0
prev_topic = title.replace(":", "").strip()
if video_link == "":
if ":" in title:
topic_contents[prev_topic] = initial_topics
prev_topic = title.replace(":", "").strip()
initial_topics = []
except Exception as e:
print('>>> passing on', e)
pass
# This wasn't working (last topic in each standard language was missing) ...
# if total_loop == 0:
# topic_contents[prev_topic] = initial_topics
# ... so changed to this:
topic_contents[prev_topic] = initial_topics
return topic_contents
if __name__ == "__main__":
"""
Run this script on the command line using:
python sushichef.py -v --reset --token=YOURTOKENHERE9139139f3a23232
"""
chef = ArvindChef()
chef.main()
| 37.01087 | 113 | 0.606608 |
4a89792f0a691e63a2efbaa3d996bdb8f827265c | 1,170 | py | Python | api/views/domain.py | lndba/apasa_backend | e0bb96e22a22f6e2a5a2826f225388113473e7e2 | [
"Apache-2.0"
] | 1 | 2019-08-06T07:31:40.000Z | 2019-08-06T07:31:40.000Z | api/views/domain.py | lndba/apasa_backend | e0bb96e22a22f6e2a5a2826f225388113473e7e2 | [
"Apache-2.0"
] | null | null | null | api/views/domain.py | lndba/apasa_backend | e0bb96e22a22f6e2a5a2826f225388113473e7e2 | [
"Apache-2.0"
] | null | null | null | from rest_framework.viewsets import ModelViewSet,GenericViewSet
from rest_framework.response import Response
from api.serializers.domain import *
from api.pagination.page import MyPageNumberPagination
from api.models import *
| 35.454545 | 82 | 0.737607 |
4a89890f028ab800ae7dcb96dcff01c0b7e8d98a | 1,184 | py | Python | 90-subsets-ii.py | yuenliou/leetcode | e8a1c6cae6547cbcb6e8494be6df685f3e7c837c | [
"MIT"
] | null | null | null | 90-subsets-ii.py | yuenliou/leetcode | e8a1c6cae6547cbcb6e8494be6df685f3e7c837c | [
"MIT"
] | null | null | null | 90-subsets-ii.py | yuenliou/leetcode | e8a1c6cae6547cbcb6e8494be6df685f3e7c837c | [
"MIT"
] | null | null | null | #!/usr/local/bin/python3.7
# -*- coding: utf-8 -*-
from typing import List
'''90. II
nums
:
: [1,2,2]
:
[
[2],
[1],
[1,2,2],
[2,2],
[1,2],
[]
]
LeetCode
https://leetcode-cn.com/problems/subsets-ii
'''
if __name__ == '__main__':
main()
| 19.733333 | 112 | 0.516047 |
4a8a19db97a47f9f1fc1395728868b9d716366fe | 450 | py | Python | tools/output_tool.py | climberwb/bert-pli | 0e6eda7a23b7502c86eab4c0d889fad1bbb57155 | [
"MIT"
] | 5 | 2020-12-24T01:46:40.000Z | 2022-03-18T19:15:10.000Z | tools/output_tool.py | climberwb/bert-pli | 0e6eda7a23b7502c86eab4c0d889fad1bbb57155 | [
"MIT"
] | 1 | 2021-04-05T14:27:24.000Z | 2021-04-05T14:27:24.000Z | tools/output_tool.py | climberwb/bert-pli | 0e6eda7a23b7502c86eab4c0d889fad1bbb57155 | [
"MIT"
] | 4 | 2020-12-28T09:20:13.000Z | 2021-12-10T13:33:21.000Z | import json
from .accuracy_tool import gen_micro_macro_result
| 25 | 77 | 0.653333 |
4a8ae0336fc8e8f4551cb0d621a28672bac709c0 | 27,100 | py | Python | python/drydock_provisioner/ingester/plugins/deckhand.py | Vjrx/airship-drydock | 315fb9864e6d55a66d5266f76c160be55d22c98b | [
"Apache-2.0"
] | 14 | 2018-05-19T11:58:22.000Z | 2019-05-10T12:31:36.000Z | python/drydock_provisioner/ingester/plugins/deckhand.py | Vjrx/airship-drydock | 315fb9864e6d55a66d5266f76c160be55d22c98b | [
"Apache-2.0"
] | 10 | 2019-11-12T17:21:16.000Z | 2021-11-10T18:16:06.000Z | python/drydock_provisioner/ingester/plugins/deckhand.py | Vjrx/airship-drydock | 315fb9864e6d55a66d5266f76c160be55d22c98b | [
"Apache-2.0"
] | 11 | 2018-06-05T16:21:18.000Z | 2019-04-03T11:44:34.000Z | # Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This data ingester will consume YAML site topology documents."""
import yaml
import logging
import jsonschema
import os
import pkg_resources
import copy
import hashlib
import drydock_provisioner.objects.fields as hd_fields
from beaker.cache import CacheManager
from beaker.util import parse_cache_config_options
from drydock_provisioner import error as errors
from drydock_provisioner import objects
from drydock_provisioner.ingester.plugins import IngesterPlugin
cache_opts = {
'cache.type': 'memory',
'expire': 1800,
}
cache = CacheManager(**parse_cache_config_options(cache_opts))
| 37.225275 | 108 | 0.573579 |
4a8ca4ac28e4f99e7596ac67b54b694b5e38191d | 5,517 | py | Python | porting_tools/package_xml_porter.py | nreplogle/ros2-migration-tools | 8e422731dea52df19da6de780319a17516f60f7c | [
"Apache-2.0"
] | 92 | 2018-10-17T22:18:01.000Z | 2022-03-19T22:03:16.000Z | porting_tools/package_xml_porter.py | nreplogle/ros2-migration-tools | 8e422731dea52df19da6de780319a17516f60f7c | [
"Apache-2.0"
] | 12 | 2019-02-21T22:29:15.000Z | 2021-06-28T22:33:31.000Z | porting_tools/package_xml_porter.py | nreplogle/ros2-migration-tools | 8e422731dea52df19da6de780319a17516f60f7c | [
"Apache-2.0"
] | 19 | 2018-10-18T11:47:07.000Z | 2022-02-04T18:41:03.000Z | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
""" Contains a class and method for porting a package.xml file from catkin to ament"""
import xml.etree.ElementTree as etree
from .constants import CatkinToAmentMigration, PACKAGE_XML_ELEMENT_ORDER
from .utils import get_functions_with
def new_element(tag, text="", tail="\n", attrib=None):
""" Helper function to make creating an element with a text and tail easier """
if not attrib:
attrib = {}
element = etree.Element(tag, attrib=attrib)
element.text = text
element.tail = tail
return element
def tag_order(tag):
""" Returns integer to order tags """
if tag in PACKAGE_XML_ELEMENT_ORDER:
return PACKAGE_XML_ELEMENT_ORDER.index(tag)
return float("inf")
if __name__ == '__main__':
tree = etree.parse("package.xml")
PackageXMLPorter.port(tree=tree)
tree.write("updated_package.xml", encoding="utf-8", xml_declaration=True)
| 39.12766 | 104 | 0.649085 |
4a8eaddf7ae51bc116bee8d180b8c5c1f2cfecaf | 4,739 | py | Python | endpoints/api/permission_models_interface.py | giuseppe/quay | a1b7e4b51974edfe86f66788621011eef2667e6a | [
"Apache-2.0"
] | 2,027 | 2019-11-12T18:05:48.000Z | 2022-03-31T22:25:04.000Z | endpoints/api/permission_models_interface.py | giuseppe/quay | a1b7e4b51974edfe86f66788621011eef2667e6a | [
"Apache-2.0"
] | 496 | 2019-11-12T18:13:37.000Z | 2022-03-31T10:43:45.000Z | endpoints/api/permission_models_interface.py | giuseppe/quay | a1b7e4b51974edfe86f66788621011eef2667e6a | [
"Apache-2.0"
] | 249 | 2019-11-12T18:02:27.000Z | 2022-03-22T12:19:19.000Z | import sys
from abc import ABCMeta, abstractmethod
from collections import namedtuple
from six import add_metaclass
| 21.15625 | 99 | 0.556447 |
4a8f1c2b21e9f7321bc8056b973b7bad4e6c12de | 754 | py | Python | configs/mot/tracktor/tracktor_faster-rcnn_r50_fpn_4e_mot17-public.py | sht47/mmtracking | 5a25e418e9c598d1b576bce8702f5e156cbbefe7 | [
"Apache-2.0"
] | 12 | 2021-09-05T20:47:16.000Z | 2022-03-23T07:00:35.000Z | configs/mot/tracktor/tracktor_faster-rcnn_r50_fpn_4e_mot17-public.py | hellock/mmtracking | a22a36b2055d80cf4a7a5ef3913849abb56defcb | [
"Apache-2.0"
] | 2 | 2021-09-06T13:20:09.000Z | 2022-01-13T05:36:14.000Z | configs/mot/tracktor/tracktor_faster-rcnn_r50_fpn_4e_mot17-public.py | hellock/mmtracking | a22a36b2055d80cf4a7a5ef3913849abb56defcb | [
"Apache-2.0"
] | 1 | 2021-07-15T00:26:35.000Z | 2021-07-15T00:26:35.000Z | _base_ = ['./tracktor_faster-rcnn_r50_fpn_4e_mot17-public-half.py']
model = dict(
pretrains=dict(
detector= # noqa: E251
'https://download.openmmlab.com/mmtracking/mot/faster_rcnn/faster-rcnn_r50_fpn_4e_mot17-ffa52ae7.pth' # noqa: E501
))
data_root = 'data/MOT17/'
test_set = 'test'
data = dict(
train=dict(ann_file=data_root + 'annotations/train_cocoformat.json'),
val=dict(
ann_file=data_root + 'annotations/train_cocoformat.json',
detection_file=data_root + 'annotations/train_detections.pkl'),
test=dict(
ann_file=data_root + f'annotations/{test_set}_cocoformat.json',
img_prefix=data_root + test_set,
detection_file=data_root + f'annotations/{test_set}_detections.pkl'))
| 41.888889 | 123 | 0.708223 |
4a8f5a90f2c6e24db504d3e023a88b1bddaccca9 | 2,277 | py | Python | browserstack/first_sample_build.py | Shaimyst/scrive_test | 38e3ea0192885d1776d24afdbea110d73adc4e8b | [
"MIT"
] | null | null | null | browserstack/first_sample_build.py | Shaimyst/scrive_test | 38e3ea0192885d1776d24afdbea110d73adc4e8b | [
"MIT"
] | null | null | null | browserstack/first_sample_build.py | Shaimyst/scrive_test | 38e3ea0192885d1776d24afdbea110d73adc4e8b | [
"MIT"
] | null | null | null | from threading import Thread
from time import sleep
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
# This array 'caps' defines the capabilities browser, device and OS combinations where the test will run
caps=[{
'os_version': '10',
'os': 'Windows',
'browser': 'ie',
'browser_version': '11.0',
'name': 'Parallel Test1', # test name
'build': 'browserstack-build-1' # Your tests will be organized within this build
},
{
'os_version': 'Big Sur',
'os': 'OS X',
'browser': 'chrome',
'browser_version': '95.0',
'name': 'Parallel Test2',
'build': 'browserstack-build-1'
},
{
'os_version': 'Big Sur',
'os': 'OS X',
'browser': 'firefox',
'browser_version': '93.0',
'name': 'Parallel Test3',
'build': 'browserstack-build-1'
}]
#run_session function searches for 'BrowserStack' on google.com
#The Thread function takes run_session function and each set of capability from the caps array as an argument to run each session parallelly
for cap in caps:
Thread(target=run_session, args=(cap,)).start() | 42.166667 | 149 | 0.700044 |
4a912235328cee2f8b87e2aaba4351c27f0e4c61 | 2,396 | py | Python | sanitizers/mvj.py | suutari-ai/mvj | c39dbc692afcb3b26366783414c2d5a88a57b25a | [
"MIT"
] | 1 | 2021-01-12T08:14:10.000Z | 2021-01-12T08:14:10.000Z | sanitizers/mvj.py | suutari-ai/mvj | c39dbc692afcb3b26366783414c2d5a88a57b25a | [
"MIT"
] | 249 | 2017-04-18T14:00:13.000Z | 2022-03-30T12:18:03.000Z | sanitizers/mvj.py | suutari-ai/mvj | c39dbc692afcb3b26366783414c2d5a88a57b25a | [
"MIT"
] | 7 | 2017-04-18T08:43:54.000Z | 2021-07-28T07:29:30.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import random
from random import choice
from string import digits
from faker import Faker
fake = Faker("fi_FI")
| 18.866142 | 79 | 0.736227 |
4a914cd003bec02fcf0ace8e2f7e5de8208c8146 | 11,024 | py | Python | ISM_catalog_profile/scripts/ISM/ISM.py | rhmdnd/compliance-trestle-demos | 1d92c91cca1d23cf707f82f035b2d58ec67c953a | [
"Apache-2.0"
] | 10 | 2021-09-03T05:07:19.000Z | 2022-03-26T13:24:51.000Z | ISM_catalog_profile/scripts/ISM/ISM.py | rhmdnd/compliance-trestle-demos | 1d92c91cca1d23cf707f82f035b2d58ec67c953a | [
"Apache-2.0"
] | null | null | null | ISM_catalog_profile/scripts/ISM/ISM.py | rhmdnd/compliance-trestle-demos | 1d92c91cca1d23cf707f82f035b2d58ec67c953a | [
"Apache-2.0"
] | 4 | 2021-12-14T22:15:06.000Z | 2022-03-29T16:16:19.000Z | #!/usr/bin/env python3
# # -*- mode:python; coding:utf-8 -*-
# Copyright (c) 2020 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# limitations under the License.
"""Create ISM catalogs.
This script is used to convert Australian Government Information Security Manual (ISM) into OSCAL formats.
The ISM is the equivalent of NIST 800-53 / FedRAMP / IL6 and similar documents in the USA. The goal is to produce a
similar set OSCAL documents to what NIST and FedRAMP are currently publishing.
It does this via pulling the ISM xml doc and creating:
1 Catalog for all the controls
4 profiles (Official, protected, secret, TS)
Ideally this would be a cron job based script, however, as ACSC publish revisions
with specific names this would need to be discovered by crawling. This will be a potential future enhancement.
This script pulls down the controls in a 'dumb' way from the xml to get the actual controls. A full featured catalog
will need to parse appropriate word / xml documents to provide groups /guidance.
"""
import io
import json
import logging
import pathlib
import sys
import urllib.request
import zipfile
from datetime import datetime
from uuid import uuid4
from ilcli import Command
import trestle.oscal.catalog as catalog
import trestle.oscal.common as common
import trestle.oscal.profile as profile
import xmltodict
# Globally define logging behaviour.
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stdout))
remarks_tuple = '\n'.join(
[
'This is not an official version of the Australian Government Information Security Manual.',
'',
'Find the official versions here: https://www.cyber.gov.au/acsc/view-all-content/ism',
'This content was generated using scrips/ISM/ISM.py'
]
)
if __name__ == '__main__':
sys.exit(ISM().run())
| 42.4 | 119 | 0.642144 |
4a9299355672fcaae0c15f4927fbd9d0f9c5887b | 642 | py | Python | logistics/permissions.py | geo2tag-logistics/main | 3b55185ea97481bbabe38497e4608abefbf1ece1 | [
"Apache-2.0"
] | null | null | null | logistics/permissions.py | geo2tag-logistics/main | 3b55185ea97481bbabe38497e4608abefbf1ece1 | [
"Apache-2.0"
] | 5 | 2016-09-25T20:01:43.000Z | 2016-09-25T20:32:44.000Z | logistics/permissions.py | geo2tag-logistics/main | 3b55185ea97481bbabe38497e4608abefbf1ece1 | [
"Apache-2.0"
] | null | null | null | from rest_framework import permissions
| 25.68 | 64 | 0.755452 |
4a92c1904e0ba01d29ac9f188cf088ddb5d2ab71 | 1,488 | py | Python | src/python/reduce_fps_parallel.py | blancKaty/alignmentFralework_and_classif | 192565a928dad0d98553e0602e91eed59c4a193d | [
"Apache-2.0"
] | null | null | null | src/python/reduce_fps_parallel.py | blancKaty/alignmentFralework_and_classif | 192565a928dad0d98553e0602e91eed59c4a193d | [
"Apache-2.0"
] | null | null | null | src/python/reduce_fps_parallel.py | blancKaty/alignmentFralework_and_classif | 192565a928dad0d98553e0602e91eed59c4a193d | [
"Apache-2.0"
] | 1 | 2019-10-05T05:40:08.000Z | 2019-10-05T05:40:08.000Z | import os
import shutil
import sys
import multiprocessing
import glob
if __name__ == '__main__':
main()
| 24 | 72 | 0.608871 |
4a93ca990a939c4bbe34b2ca2569173da90ecbc7 | 3,598 | py | Python | ansible/utils/module_docs_fragments/docker.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 1 | 2021-04-02T08:08:39.000Z | 2021-04-02T08:08:39.000Z | ansible/utils/module_docs_fragments/docker.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | ansible/utils/module_docs_fragments/docker.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 1 | 2020-05-03T01:13:16.000Z | 2020-05-03T01:13:16.000Z | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
| 38.276596 | 118 | 0.660645 |
4a9405edbd8cfdcda2cba6e2d4bef4fc6c17c93b | 806 | py | Python | setup.py | cyberjunky/python-garminconnect-aio | fb913a15107edee5c5530f3bded7c553ec57923b | [
"MIT"
] | 11 | 2021-06-08T14:55:33.000Z | 2022-02-03T03:12:14.000Z | setup.py | cyberjunky/python-garminconnect-aio | fb913a15107edee5c5530f3bded7c553ec57923b | [
"MIT"
] | 1 | 2021-08-07T09:24:35.000Z | 2021-08-07T17:30:40.000Z | setup.py | cyberjunky/python-garminconnect-aio | fb913a15107edee5c5530f3bded7c553ec57923b | [
"MIT"
] | 2 | 2021-06-04T15:34:22.000Z | 2021-10-02T19:48:13.000Z | #!/usr/bin/env python
from setuptools import setup
with open("README.md") as readme_file:
readme = readme_file.read()
setup(
author="Ron Klinkien",
author_email="ron@cyberjunky.nl",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
description="Asynchronous Garmin Connect Python 3 API wrapper",
name="garminconnect_aio",
keywords=["garmin connect", "api", "client"],
license="MIT license",
install_requires=["aiohttp >= 3.6", "yarl", "brotlipy"],
long_description_content_type="text/markdown",
long_description=readme,
url="https://github.com/cyberjunky/python-garminconnect-aio",
packages=["garminconnect_aio"],
version="0.1.4",
)
| 29.851852 | 67 | 0.666253 |
4a940fa45e0ab9b5f708abce624a09bc0ed42b1a | 9,513 | py | Python | nova/tests/unit/virt/libvirt/fake_imagebackend.py | ChameleonCloud/nova | 4bb9421b02b71f2b218278aa6f97abace871b111 | [
"Apache-2.0"
] | 1 | 2016-07-18T22:05:01.000Z | 2016-07-18T22:05:01.000Z | nova/tests/unit/virt/libvirt/fake_imagebackend.py | ChameleonCloud/nova | 4bb9421b02b71f2b218278aa6f97abace871b111 | [
"Apache-2.0"
] | null | null | null | nova/tests/unit/virt/libvirt/fake_imagebackend.py | ChameleonCloud/nova | 4bb9421b02b71f2b218278aa6f97abace871b111 | [
"Apache-2.0"
] | 1 | 2021-11-12T03:55:41.000Z | 2021-11-12T03:55:41.000Z | # Copyright 2012 Grid Dynamics
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import functools
import os
import fixtures
import mock
from nova.virt.libvirt import config
from nova.virt.libvirt import driver
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import utils as libvirt_utils
| 41.723684 | 79 | 0.654157 |
4a94f180b8cf8188f6c73d72cff2209e4e3ba275 | 8,504 | py | Python | tests/test_email_subscriptions.py | coolboi567/dnstwister | b809ca721a13efc6b59e11587c582f6ba4b11587 | [
"Unlicense"
] | null | null | null | tests/test_email_subscriptions.py | coolboi567/dnstwister | b809ca721a13efc6b59e11587c582f6ba4b11587 | [
"Unlicense"
] | null | null | null | tests/test_email_subscriptions.py | coolboi567/dnstwister | b809ca721a13efc6b59e11587c582f6ba4b11587 | [
"Unlicense"
] | null | null | null | """Tests of the email subscription mechanism."""
import binascii
import flask_webtest
import mock
import pytest
import webtest.app
import dnstwister
import dnstwister.tools
import patches
def test_bad_error_codes(webapp):
"""Test the email error codes being weird doesn't break the page."""
normal_html = webapp.get('/email/subscribe/7777772e6578616d706c652e636f6d').html
assert webapp.get(
'/email/subscribe/7777772e6578616d706c652e636f6d/9',
expect_errors=True
).html == normal_html
| 32.212121 | 118 | 0.688147 |
4a95eafd7882de8499fc568c3c76a78f53505995 | 6,671 | py | Python | ershoufang/crawler_v2.py | zlikun/python-crawler-lianjia | 7e7bf0cbd333486ee62ac015e72b96d6003c8713 | [
"Apache-2.0"
] | 2 | 2018-10-25T05:52:33.000Z | 2021-12-22T06:39:30.000Z | ershoufang/crawler_v2.py | zlikun/python-crawler-lianjia | 7e7bf0cbd333486ee62ac015e72b96d6003c8713 | [
"Apache-2.0"
] | null | null | null | ershoufang/crawler_v2.py | zlikun/python-crawler-lianjia | 7e7bf0cbd333486ee62ac015e72b96d6003c8713 | [
"Apache-2.0"
] | 2 | 2019-02-02T14:38:26.000Z | 2020-07-21T01:57:17.000Z | """
1.
2.
3.
"""
import csv
import datetime
import logging
import multiprocessing as mp
import re
import time
from collections import OrderedDict
import requests
from pyquery import PyQuery
from requests import RequestException
base_url = r'https://sh.lianjia.com/ershoufang'
# URL+
seen_urls = set()
lock = mp.Lock()
#
retries = 3
#
today = datetime.date.today()
# URL
list_page_pattern = '^{}/(pg\d+/)?$'.format(base_url)
item_page_pattern = '^{}/\d+.html$'.format(base_url)
#
csv_file_path = r'../.data/ershoufang-{}.csv'.format(today)
#
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(process)05d - %(levelname)s - %(message)s')
def start_download_job(data_writer, init_tasks):
"""
:param data_writer:
:param init_tasks:
:return:
"""
# CPU44CPU
pool_size = mp.cpu_count() > 4 and mp.cpu_count() or 4
pool = mp.Pool(pool_size)
#
(task_reader, task_writer) = mp.Pipe(duplex=False)
#
#
task_writer.send(init_tasks)
#
while True:
# URL
urls = task_reader.recv()
# URLurl
for url in urls:
#
with lock:
if url in seen_urls:
continue
else:
seen_urls.add(url)
#
pool.apply_async(download, (url, task_writer, data_writer))
pool.close()
pool.join()
def download(url, task_writer, data_writer):
"""
3
:param url: url
:param task_writer:
:param data_writer:
:return:
"""
for _ in range(retries + 1):
try:
logging.info('download page {}'.format(url))
content = requests.get(url).text
if content is None:
continue
#
if is_list_page(url):
links = parse_list_page(content, url)
#
if links and len(links) > 0:
task_writer.send(links)
else:
data_writer.send((content, url))
return
except RequestException:
# 2
time.sleep(2)
#
logging.error('{}{}'.format(retries, url))
# url
task_writer.send(set([url]))
def is_list_page(url):
"""
:param url:
:return:
"""
return re.match(list_page_pattern, url)
def parse_list_page(content, url):
"""
:param content:
:param url:
:return:
"""
pq = PyQuery(content, url=url)
return set([li.attr('href') for li in pq('ul.sellListContent div.title > a').items()])
def parse_item_page(content, url):
"""
:param content:
:param url:
:return:
"""
pq = PyQuery(content, url=url)
return OrderedDict({'title': pq('div.content > div.title > h1').text().strip(),
'sub_title': pq('div.content > div.title > div.sub').text().strip(),
'price': pq('div.price > span.total').text().strip(),
'unit_price': pq('div.unitPrice > span.unitPriceValue').text().replace('/', '').strip(),
'down_payment_info': pq('div.tax > span.taxtext').text().strip(),
'area': re.search('(\d+\.?\d*)', pq('div.area > div.mainInfo').text()).group(1),
'year_info': pq('div.area > div.subInfo').text().strip(),
'house_type': pq('div.room > div.mainInfo').text().strip(),
'floor': pq('div.room > div.subInfo').text().strip(),
'towards': pq('div.type > div.mainInfo').text().strip(),
'housing_estate': pq('div.communityName > a:first').text().strip(),
'housing_estate_link': pq('div.communityName > a:first').attr('href'),
'location': tuple([i.text().strip() for i in pq('div.areaName > span > a').items()]),
'broker': pq('div.brokerName > a').text().strip(),
'broker_homepage': pq('div.brokerName > a').attr('href'),
'number': pq('div.houseRecord > span.info').text().replace('', '').strip()})
def start_parse_job(data_reader):
"""
:param data_reader:
:return:
"""
# CPU44CPU
pool_size = mp.cpu_count() > 4 and mp.cpu_count() or 4
#
pool = mp.Pool(pool_size // 2)
while True:
args = data_reader.recv()
if args is not None:
pool.apply_async(parse, args, callback=process)
pool.close()
pool.join()
def parse(content, url):
"""
:param content:
:param url:
:return:
"""
if content is None or url is None:
return
try:
#
return parse_item_page(content, url)
except Exception as e:
logging.error(e)
def process(data):
"""
:param data:
:return:
"""
if data is None:
return
#
#
if 'housing_estate_link' in data and not data['housing_estate_link'].startswith('https://'):
data['housing_estate_link'] = 'https://sh.lianjia.com' + data['housing_estate_link']
#
#
if 'house_type' in data:
data['house_type'] = (data['house_type'].split('')[0], data['house_type'])
# CSV
with open(csv_file_path,
'a', encoding='utf-8', newline='') as f:
writer = csv.writer(f)
writer.writerow(data.values())
if __name__ == '__main__':
#
init_tasks = set([base_url + '/'] + ['{}/pg{}/'.format(base_url, i) for i in range(2, 101)])
#
(data_reader, data_writer) = mp.Pipe(duplex=False)
#
mp.Process(target=start_download_job, args=(data_writer, init_tasks)).start()
#
mp.Process(target=start_parse_job, args=(data_reader,)).start()
logging.info('--running--')
| 28.75431 | 115 | 0.582072 |
4a960357ff5666b9fe043faf558321c7ac02d8e5 | 8,415 | py | Python | desktop/core/ext-py/pyu2f-0.1.4/pyu2f/convenience/customauthenticator.py | yetsun/hue | 2e48f0cc70e233ee0e1b40733d4b2a18d8836c66 | [
"Apache-2.0"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | desktop/core/ext-py/pyu2f-0.1.4/pyu2f/convenience/customauthenticator.py | yetsun/hue | 2e48f0cc70e233ee0e1b40733d4b2a18d8836c66 | [
"Apache-2.0"
] | 4,640 | 2015-07-08T16:19:08.000Z | 2019-12-02T15:01:27.000Z | desktop/core/ext-py/pyu2f-0.1.4/pyu2f/convenience/customauthenticator.py | yetsun/hue | 2e48f0cc70e233ee0e1b40733d4b2a18d8836c66 | [
"Apache-2.0"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class to offload the end to end flow of U2F signing."""
import base64
import hashlib
import json
import os
import struct
import subprocess
import sys
from pyu2f import errors
from pyu2f import model
from pyu2f.convenience import baseauthenticator
SK_SIGNING_PLUGIN_ENV_VAR = 'SK_SIGNING_PLUGIN'
U2F_SIGNATURE_TIMEOUT_SECONDS = 5
SK_SIGNING_PLUGIN_NO_ERROR = 0
SK_SIGNING_PLUGIN_TOUCH_REQUIRED = 0x6985
SK_SIGNING_PLUGIN_WRONG_DATA = 0x6A80
| 34.487705 | 80 | 0.684848 |
4a9667d37782748097516470365e83980101a92e | 1,681 | py | Python | kive/portal/management/commands/graph_kive.py | dmacmillan/Kive | 76bc8f289f66fb133f78cb6d5689568b7d015915 | [
"BSD-3-Clause"
] | 1 | 2021-12-22T06:10:01.000Z | 2021-12-22T06:10:01.000Z | kive/portal/management/commands/graph_kive.py | dmacmillan/Kive | 76bc8f289f66fb133f78cb6d5689568b7d015915 | [
"BSD-3-Clause"
] | null | null | null | kive/portal/management/commands/graph_kive.py | dmacmillan/Kive | 76bc8f289f66fb133f78cb6d5689568b7d015915 | [
"BSD-3-Clause"
] | null | null | null | import itertools
import os
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import BaseCommand
| 38.204545 | 86 | 0.543129 |
4a971f56d894bd93c1e6642fd2fd7e799cec7a1d | 8,543 | py | Python | summary.py | rpls/openlane_summary | 5057fab80a4acaf08e6503ced7abb932684145a5 | [
"Apache-2.0"
] | null | null | null | summary.py | rpls/openlane_summary | 5057fab80a4acaf08e6503ced7abb932684145a5 | [
"Apache-2.0"
] | null | null | null | summary.py | rpls/openlane_summary | 5057fab80a4acaf08e6503ced7abb932684145a5 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import argparse
import os
import glob
import csv
import sys
import re
from shutil import which
import datetime
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="OpenLANE summary tool")
group = parser.add_mutually_exclusive_group(required=True)
# either choose the design and interation
group.add_argument('--design', help="only run checks on specific design", action='store')
# or show standard cells
group.add_argument('--show-sky130', help='show all standard cells', action='store_const', const=True)
# optionally choose different name for top module and which run to use (default latest)
parser.add_argument('--top', help="name of top module if not same as design", action='store')
parser.add_argument('--run', help="choose a specific run. If not given use latest. If not arg, show a menu", action='store', default=-1, nargs='?', type=int)
# what to show
parser.add_argument('--drc', help='show DRC report', action='store_const', const=True)
parser.add_argument('--summary', help='show violations, area & status from summary report', action='store_const', const=True)
parser.add_argument('--full-summary', help='show the full summary report csv file', action='store_const', const=True)
parser.add_argument('--synth', help='show post techmap synth', action='store_const', const=True)
parser.add_argument('--yosys-report', help='show cell usage after yosys synth', action='store_const', const=True)
# klayout for intermediate files
parser.add_argument('--floorplan', help='show floorplan', action='store_const', const=True)
parser.add_argument('--pdn', help='show PDN', action='store_const', const=True)
parser.add_argument('--global-placement', help='show global placement PDN', action='store_const', const=True)
parser.add_argument('--detailed-placement', help='show detailed placement', action='store_const', const=True)
parser.add_argument('--gds', help='show final GDS', action='store_const', const=True)
# GDS3D for 3d view
parser.add_argument('--gds-3d', help='show final GDS in 3D', action='store_const', const=True)
parser.add_argument('--caravel', help='use caravel directory structure instead of standard openlane', action='store_const', const=True)
args = parser.parse_args()
if not args.top:
args.top = args.design
if not 'OPENLANE_ROOT' in os.environ:
exit("pls set OPENLANE_ROOT to where your OpenLANE is installed")
klayout_def = os.path.join(os.path.dirname(sys.argv[0]), 'klayout_def.xml')
klayout_gds = os.path.join(os.path.dirname(sys.argv[0]), 'klayout_gds.xml')
gds3d_tech = os.path.join(os.path.dirname(sys.argv[0]), 'sky130.txt')
# if showing off the sky130 cells
if args.show_sky130:
if not os.environ['PDK_ROOT']:
exit("pls set PDK_ROOT to where your PDK is installed")
path = check_path(os.path.join(os.environ['PDK_ROOT'], "sky130A", "libs.ref", "sky130_fd_sc_hd", "gds", "sky130_fd_sc_hd.gds"))
os.system("klayout -l %s %s" % (klayout_gds, path))
exit()
# otherwise need to know where openlane and the designs are
openlane_designs = ''
if args.caravel:
if os.path.exists('openlane'):
openlane_designs = 'openlane'
else:
openlane_designs = '.'
run_dir = os.path.join(openlane_designs, args.design, 'runs/*')
else:
openlane_designs = os.path.join(os.environ['OPENLANE_ROOT'], 'designs')
run_dir = os.path.join(openlane_designs, args.design, 'runs/*-*')
list_of_files = glob.glob(run_dir)
if len(list_of_files) == 0:
exit("couldn't find that design")
list_of_files.sort(key=openlane_date_sort)
# what run to show?
if args.run == -1:
# default is to use the latest
print("using latest run:")
run_path = max(list_of_files, key=os.path.getctime)
elif args.run is None:
# UI for asking for which run to use
for run_index, run in enumerate(list_of_files):
print("\n%2d: %s" % (run_index, os.path.basename(run)), end='')
print(" <default>\n")
n = input("which run? <enter for default>: ") or run_index
run_path = list_of_files[int(n)]
else:
# use the given run
print("using run %d:" % args.run)
run_path = list_of_files[args.run]
print(run_path)
if args.summary:
path = check_path(os.path.join(run_path, 'reports', 'final_summary_report.csv'))
summary_report(path)
if args.full_summary:
path = check_path(os.path.join(run_path, 'reports', 'final_summary_report.csv'))
full_summary_report(path)
if args.drc:
path = os.path.join(run_path, 'logs', 'magic', 'magic.drc') # don't check path because if DRC is clean, don't get the file
if os.path.exists(path):
drc_report(path)
else:
print("no DRC file, DRC clean?")
if args.synth:
path = check_path(os.path.join(run_path, "tmp", "synthesis", "post_techmap.dot")) # post_techmap is created by https://github.com/efabless/openlane/pull/282
os.system("xdot %s" % path)
if args.yosys_report:
filename = "*yosys_*.stat.rpt"
path = check_path(os.path.join(run_path, "reports", "synthesis", filename))
os.system("cat %s" % path)
if args.floorplan:
path = check_path(os.path.join(run_path, "results", "floorplan", args.top + ".floorplan.def"))
os.system("klayout -l %s %s" % (klayout_def, path))
if args.pdn:
filename = "*pdn.def"
path = check_path(os.path.join(run_path, "tmp", "floorplan", filename))
os.system("klayout -l %s %s" % (klayout_def, path))
if args.global_placement:
filename = "*replace.def"
path = check_path(os.path.join(run_path, "tmp", "placement", filename))
os.system("klayout -l %s %s" % (klayout_def, path))
if args.detailed_placement:
path = check_path(os.path.join(run_path, "results", "placement", args.top + ".placement.def"))
os.system("klayout -l %s %s" % (klayout_def, path))
if args.gds:
path = check_path(os.path.join(run_path, "results", "magic", args.top + ".gds"))
os.system("klayout -l %s %s" % (klayout_gds, path))
if args.gds_3d:
if not is_tool('GDS3D'):
exit("pls install GDS3D from https://github.com/trilomix/GDS3D")
path = check_path(os.path.join(run_path, "results", "magic", args.top + ".gds"))
os.system("GDS3D -p %s -i %s" % (gds3d_tech, path))
| 40.29717 | 164 | 0.628 |
4a9a27b8be786f9438239fbfe717a4e94dce8571 | 992 | py | Python | var/spack/repos/builtin/packages/py-cupy/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11 | 2015-10-04T02:17:46.000Z | 2018-02-07T18:23:00.000Z | var/spack/repos/builtin/packages/py-cupy/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22 | 2017-08-01T22:45:10.000Z | 2022-03-10T07:46:31.000Z | var/spack/repos/builtin/packages/py-cupy/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4 | 2016-06-10T17:57:39.000Z | 2018-09-11T04:59:38.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
| 35.428571 | 95 | 0.704637 |
4a9acf16b780b19cf351bb2d89e76f1956c1db38 | 1,742 | py | Python | simple_rest_client/decorators.py | cfytrok/python-simple-rest-client | 4896e8226ffe194625c63773ea6f49531293b308 | [
"MIT"
] | null | null | null | simple_rest_client/decorators.py | cfytrok/python-simple-rest-client | 4896e8226ffe194625c63773ea6f49531293b308 | [
"MIT"
] | null | null | null | simple_rest_client/decorators.py | cfytrok/python-simple-rest-client | 4896e8226ffe194625c63773ea6f49531293b308 | [
"MIT"
] | null | null | null | import logging
from functools import wraps
import status
from httpx import exceptions
from .exceptions import AuthError, ClientConnectionError, ClientError, NotFoundError, ServerError
logger = logging.getLogger(__name__)
| 29.033333 | 97 | 0.675086 |
4a9ad45bc6d5f8001c81f4145b812d1bf0d096f9 | 100 | py | Python | HPOBenchExperimentUtils/resource_manager/__init__.py | PhMueller/TrajectoryParser | 9c19d37a3ff29a593c9b6d3e7fd3857e8c2d724f | [
"Apache-2.0"
] | null | null | null | HPOBenchExperimentUtils/resource_manager/__init__.py | PhMueller/TrajectoryParser | 9c19d37a3ff29a593c9b6d3e7fd3857e8c2d724f | [
"Apache-2.0"
] | 1 | 2021-09-01T16:35:21.000Z | 2021-11-05T19:53:25.000Z | HPOBenchExperimentUtils/resource_manager/__init__.py | automl/HPOBenchExperimentUtils | 9c19d37a3ff29a593c9b6d3e7fd3857e8c2d724f | [
"Apache-2.0"
] | null | null | null | from HPOBenchExperimentUtils.resource_manager.file_resource_manager import FileBasedResourceManager
| 50 | 99 | 0.94 |