hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3f599143770c118307d670eee5b87e03976f168c | 9,712 | py | Python | scluster/aws_create_resources.py | dorgun/ncluster | 20ba95fb7250a5f7239d704b01bf468a57e8fb7b | [
"MIT"
] | null | null | null | scluster/aws_create_resources.py | dorgun/ncluster | 20ba95fb7250a5f7239d704b01bf468a57e8fb7b | [
"MIT"
] | null | null | null | scluster/aws_create_resources.py | dorgun/ncluster | 20ba95fb7250a5f7239d704b01bf468a57e8fb7b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Creates resources
# This script creates VPC/security group/keypair if not already present
import logging
import os
import sys
import time
from . import aws_util as u
from . import util
DRYRUN = False
DEBUG = True
# Names of Amazon resources that are created. These settings are fixed across
# all runs, and correspond to resources created once per user per region.
PUBLIC_TCP_RANGES = [
22, # ssh
(8888, 8899), # ipython notebook ports
6379, # redis port
(6006, 6016) # tensorboard ports
]
PUBLIC_UDP_RANGES = [(60000, 61000)] # mosh ports
logger = logging.getLogger(__name__)
def network_setup():
"""Creates VPC if it doesn't already exists, configures it for public
internet access, returns vpc, subnet, security_group"""
ec2 = u.get_ec2_resource()
client = u.get_ec2_client()
existing_vpcs = u.get_vpc_dict()
zones = u.get_zones()
# create VPC from scratch. Remove this if default VPC works well enough.
vpc_name = u.get_vpc_name()
if u.get_vpc_name() in existing_vpcs:
logger.info("Reusing VPC " + vpc_name)
vpc = existing_vpcs[vpc_name]
else:
logger.info("Creating VPC " + vpc_name)
vpc = ec2.create_vpc(CidrBlock='192.168.0.0/16')
# enable DNS on the VPC
local_response = vpc.modify_attribute(EnableDnsHostnames={"Value": True})
assert u.is_good_response(local_response)
local_response = vpc.modify_attribute(EnableDnsSupport={"Value": True})
assert u.is_good_response(local_response)
vpc.create_tags(Tags=u.create_name_tags(vpc_name))
vpc.wait_until_available()
gateways = u.get_gateway_dict(vpc)
gateway_name = u.get_gateway_name()
if gateway_name in gateways:
logger.info("Reusing gateways " + gateway_name)
else:
logger.info("Creating internet gateway " + gateway_name)
ig = ec2.create_internet_gateway()
ig.attach_to_vpc(VpcId=vpc.id)
ig.create_tags(Tags=u.create_name_tags(gateway_name))
# check that attachment succeeded
attach_state = u.extract_attr_for_match(ig.attachments, State=-1, VpcId=vpc.id)
assert attach_state == 'available', "vpc %s is in state %s" % (vpc.id, attach_state)
route_table = vpc.create_route_table()
route_table_name = u.get_route_table_name()
route_table.create_tags(Tags=u.create_name_tags(route_table_name))
dest_cidr = '0.0.0.0/0'
route_table.create_route(DestinationCidrBlock=dest_cidr, GatewayId=ig.id)
assert len(zones) <= 16 # for cidr/20 to fit into cidr/16
ip = 0
for zone in zones:
cidr_block = '192.168.%d.0/20' % (ip,)
ip += 16
logging.info("Creating subnet %s in zone %s" % (cidr_block, zone))
subnet = vpc.create_subnet(CidrBlock=cidr_block, AvailabilityZone=zone)
subnet.create_tags(Tags=[{'Key': 'Name', 'Value': f'{vpc_name}-subnet'}, {'Key': 'Region', 'Value': zone}])
local_response = client.modify_subnet_attribute(MapPublicIpOnLaunch={'Value': True}, SubnetId=subnet.id)
assert u.is_good_response(local_response)
u.wait_until_available(subnet)
assert subnet.map_public_ip_on_launch, "Subnet doesn't enable public IP by default, why?"
route_table.associate_with_subnet(SubnetId=subnet.id)
existing_security_groups = u.get_security_group_dict(vpc.id)
security_group_name = u.get_security_group_name()
if security_group_name in existing_security_groups:
logger.info("Reusing security group " + security_group_name)
security_group = existing_security_groups[security_group_name]
assert security_group.vpc_id == vpc.id, f"Found security group {security_group} " \
f"attached to {security_group.vpc_id} but expected {vpc.id}"
else:
logging.info("Creating security group " + security_group_name)
security_group = ec2.create_security_group(
GroupName=security_group_name, Description=security_group_name,
VpcId=vpc.id)
cidr_ip = os.environ.get('SCLUSTER_SECURITY_GROUP_CidrIp', '0.0.0.0/0')
security_group.create_tags(Tags=u.create_name_tags(security_group_name))
# allow ICMP access for public ping
security_group.authorize_ingress(
CidrIp='0.0.0.0/0',
IpProtocol='icmp',
FromPort=-1,
ToPort=-1
)
# open public ports
# always include SSH port which is required for basic functionality
assert 22 in PUBLIC_TCP_RANGES, "Must enable SSH access"
for port in PUBLIC_TCP_RANGES:
if util.is_iterable(port):
assert len(port) == 2
from_port, to_port = port
else:
from_port, to_port = port, port
response = security_group.authorize_ingress(
IpProtocol="tcp",
CidrIp=cidr_ip,
FromPort=from_port,
ToPort=to_port
)
assert u.is_good_response(response)
for port in PUBLIC_UDP_RANGES:
if util.is_iterable(port):
assert len(port) == 2
from_port, to_port = port
else:
from_port, to_port = port, port
response = security_group.authorize_ingress(IpProtocol="udp",
CidrIp=cidr_ip,
FromPort=from_port,
ToPort=to_port)
assert u.is_good_response(response)
return vpc, security_group
def keypair_setup():
"""Creates keypair if necessary, saves private key locally, returns contents
of private key file."""
os.system('mkdir -p ' + u.PRIVATE_KEY_LOCATION)
keypair_name = u.get_keypair_name()
keypair = u.get_keypair_dict().get(keypair_name, None)
keypair_fn = u.get_keypair_fn()
if keypair:
print("Reusing keypair " + keypair_name)
# check that local pem file exists and is readable
assert os.path.exists(
keypair_fn), "Keypair %s exists, but corresponding .pem file %s is not found, delete keypair %s through " \
"console and run again to recreate keypair/.pem together" % (
keypair_name, keypair_fn, keypair_name)
keypair_contents = open(keypair_fn).read()
assert len(keypair_contents) > 0
else:
print("Creating keypair " + keypair_name)
ec2 = u.get_ec2_resource()
assert not os.path.exists(
keypair_fn), "previous keypair exists, delete it with 'sudo rm %s' and also delete corresponding " \
"keypair through console" % (keypair_fn)
keypair = ec2.create_key_pair(KeyName=keypair_name)
open(keypair_fn, 'w').write(keypair.key_material)
os.system('chmod 400 ' + keypair_fn)
return keypair
def placement_group_setup(group_name):
"""Creates placement_group group if necessary. Returns True if new placement_group
group was created, False otherwise."""
existing_placement_groups = u.get_placement_group_dict()
group = existing_placement_groups.get(group_name, None)
if group:
assert group.state == 'available'
assert group.strategy == 'cluster'
print("Reusing group ", group.name)
return group
print("Creating group " + group_name)
ec2 = u.get_ec2_resource()
group = ec2.create_placement_group(GroupName=group_name, Strategy='cluster')
return group
if __name__ == '__main__':
create_resources()
| 37.9375 | 198 | 0.631899 |
3f59a6465e2607784678cb918b686e6250106802 | 142 | py | Python | ex005-antecessorSucessor/005.py | KaiqueCassal/cursoEmVideoPython | 9d37563045091e4d558e283d47a5a49378e9df71 | [
"MIT"
] | 1 | 2021-08-11T04:38:33.000Z | 2021-08-11T04:38:33.000Z | ex005-antecessorSucessor/005.py | KaiqueCassal/cursoEmVideoPython | 9d37563045091e4d558e283d47a5a49378e9df71 | [
"MIT"
] | null | null | null | ex005-antecessorSucessor/005.py | KaiqueCassal/cursoEmVideoPython | 9d37563045091e4d558e283d47a5a49378e9df71 | [
"MIT"
] | null | null | null | num = int(input('Digite um nmero inteiro: '))
print(f'O nmero: {num}'
f'\nO antecessor: {num - 1}'
f'\nO sucessor: {num + 1}')
| 23.666667 | 46 | 0.56338 |
3f59d7981fb0df6af9168f9da7f7187aa7eb35ac | 2,253 | py | Python | homeassistant/components/ihc/binary_sensor.py | jasperro/core | 26d7b2164e8a971506790ae5af06f31abdf278b5 | [
"Apache-2.0"
] | 7 | 2019-02-07T14:14:12.000Z | 2019-07-28T06:56:10.000Z | homeassistant/components/ihc/binary_sensor.py | jasperro/core | 26d7b2164e8a971506790ae5af06f31abdf278b5 | [
"Apache-2.0"
] | 6 | 2021-02-08T20:54:31.000Z | 2022-03-12T00:50:43.000Z | homeassistant/components/ihc/binary_sensor.py | jasperro/core | 26d7b2164e8a971506790ae5af06f31abdf278b5 | [
"Apache-2.0"
] | 2 | 2020-04-19T13:35:24.000Z | 2020-04-19T13:35:51.000Z | """Support for IHC binary sensors."""
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.const import CONF_TYPE
from . import IHC_CONTROLLER, IHC_INFO
from .const import CONF_INVERTING
from .ihcdevice import IHCDevice
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the IHC binary sensor platform."""
if discovery_info is None:
return
devices = []
for name, device in discovery_info.items():
ihc_id = device["ihc_id"]
product_cfg = device["product_cfg"]
product = device["product"]
# Find controller that corresponds with device id
ctrl_id = device["ctrl_id"]
ihc_key = f"ihc{ctrl_id}"
info = hass.data[ihc_key][IHC_INFO]
ihc_controller = hass.data[ihc_key][IHC_CONTROLLER]
sensor = IHCBinarySensor(
ihc_controller,
name,
ihc_id,
info,
product_cfg.get(CONF_TYPE),
product_cfg[CONF_INVERTING],
product,
)
devices.append(sensor)
add_entities(devices)
| 28.884615 | 74 | 0.624057 |
3f5c9bb50fc14ea221608e07d43fdec0123aef80 | 1,009 | py | Python | script/TuneLR.py | yipeiw/parameter_server | 07cbfbf2dc727ee0787d7e66e58a1f7fd8333aff | [
"Apache-2.0"
] | null | null | null | script/TuneLR.py | yipeiw/parameter_server | 07cbfbf2dc727ee0787d7e66e58a1f7fd8333aff | [
"Apache-2.0"
] | null | null | null | script/TuneLR.py | yipeiw/parameter_server | 07cbfbf2dc727ee0787d7e66e58a1f7fd8333aff | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os.path as path
import sys
tmpDir = '../config/tmp/'
logDir = '../config/tmp/log/'
conffile = sys.argv[1]
runfile=sys.argv[2]
lr = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0]
fout = open(runfile, 'w')
fout.write("#!/bin/bash\n\n\n")
fws = {}
confname = path.splitext(path.basename(conffile))[0]
loglist = confname+'.meta.log'
fl = open(loglist, 'w')
for i in range(0, len(lr)):
filename = confname+'_'+str(lr[i])
tmpfile = path.join(tmpDir, filename+'.conf')
logfile = path.join(logDir, filename + '.txt')
fws[i] = open(tmpfile, 'w')
fout.write("echo \""+"./local.sh 1 4 "+tmpfile + " 2>"+logfile+'\"\n\n')
fout.write("./local.sh 1 4 "+tmpfile + " 2>"+logfile+'\n\n\n')
fl.write(logfile+'\n')
fout.close()
fl.close()
for line in open(conffile):
if line.find("eta")==0:
for i in range(0, len(lr)):
output = "eta: "+str(lr[i]) + '\n'
fws[i].write(output)
else:
for i in range(0, len(lr)):
fws[i].write(line)
for i in range(0, len(lr)):
fws[i].close()
| 22.422222 | 73 | 0.607532 |
3f5ccedf65dad52ce01efa31808cc2b63ebe8af6 | 9,770 | py | Python | pc.py | Omar8345/tic-tac-toe | e7fe6d9699bef3297227058defbb6d4ff35f10f4 | [
"MIT"
] | null | null | null | pc.py | Omar8345/tic-tac-toe | e7fe6d9699bef3297227058defbb6d4ff35f10f4 | [
"MIT"
] | null | null | null | pc.py | Omar8345/tic-tac-toe | e7fe6d9699bef3297227058defbb6d4ff35f10f4 | [
"MIT"
] | 1 | 2022-02-10T17:47:27.000Z | 2022-02-10T17:47:27.000Z | # Tic Tac Toe Game
# Original repository: (https://github.com/Omar8345/tic-tac-toe)
# Author: Omar Mostafa
# Date: 08/02/2022
# Version: 1.0
# Description: Tic Tac Toe Game made using Python Tkitner (Open Source)
# This game is a simple game that can be played with two players
# and can be played with a computer.
##### CODING STARTS HERE #####
# Importing the necessary libraries
from itertools import tee
import tkinter
import random
import time
from tkinter import messagebox
from numpy import empty
from time import sleep as sleep
try:
# Tkinter
window = tkinter.Tk()
window.title("Tic Tac Toe")
window.resizable(0, 0) # It makes everything needed to fit the window! WoW!
# Window icon
window.iconbitmap("img\XO.ico")
# Tkinter game buttons
# create 9 tkinter buttons
b1 = tkinter.Button(window, text=" ", font=('Times 26 bold'), height=4, width=8, command=lambda: btn_click(b1))
b1.grid(row=1, column=0)
b2 = tkinter.Button(window, text=" ", font=('Times 26 bold'), height=4, width=8, command=lambda: btn_click(b2))
b2.grid(row=1, column=1)
b3 = tkinter.Button(window, text=" ", font=('Times 26 bold'), height=4, width=8, command=lambda: btn_click(b3))
b3.grid(row=1, column=2)
b4 = tkinter.Button(window, text=" ", font=('Times 26 bold'), height=4, width=8, command=lambda: btn_click(b4))
b4.grid(row=2, column=0)
b5 = tkinter.Button(window, text=" ", font=('Times 26 bold'), height=4, width=8, command=lambda: btn_click(b5))
b5.grid(row=2, column=1)
b6 = tkinter.Button(window, text=" ", font=('Times 26 bold'), height=4, width=8, command=lambda: btn_click(b6))
b6.grid(row=2, column=2)
b7 = tkinter.Button(window, text=" ", font=('Times 26 bold'), height=4, width=8, command=lambda: btn_click(b7))
b7.grid(row=3, column=0)
b8 = tkinter.Button(window, text=" ", font=('Times 26 bold'), height=4, width=8, command=lambda: btn_click(b8))
b8.grid(row=3, column=1)
b9 = tkinter.Button(window, text=" ", font=('Times 26 bold'), height=4, width=8, command=lambda: btn_click(b9))
b9.grid(row=3, column=2)
# create a list to store the buttons
buttons = [b1, b2, b3, b4, b5, b6, b7, b8, b9]
# create a list to store the values of the buttons
values = []
# when button clicked, it puts X in the button
except:
None
if __name__ == "__main__":
run_game()
else:
print("If you run the game using the launcher.py or launcher.exe.")
sleep(1)
print('Ignore this message, thank you.')
print('------------------------------------------------------')
print("Error: This is a module and not a script.")
sleep(2)
print("Please run this module as a script.")
sleep(2)
print("If you actually did run it as a script, please report this bug.")
sleep(2)
print("Raise an issue on GitHub. More details:")
sleep(2)
print("__name__ != __main__")
sleep(2)
print(" __name__ does not equal __main__ and this was made to prevent errors.")
sleep(2)
print("If you are a developer and you are seeing this message, please report this bug and (if possible, more details).")
sleep(2)
print("If you are not a developer and you are seeing this message, please report the details gaven above.")
sleep(2)
print("Thank you.")
sleep(2)
print("Omar Mostafa")
sleep(2)
print("Hope you in good health. Stay safe.")
sleep(1) | 39.877551 | 125 | 0.523439 |
3f5d917c88eccde66a033389ff984f57d3efa801 | 2,336 | py | Python | nvtabular/utils.py | deepyaman/NVTabular | b814b5ed9866be29d3c13fd00154965a3fec7fc0 | [
"Apache-2.0"
] | null | null | null | nvtabular/utils.py | deepyaman/NVTabular | b814b5ed9866be29d3c13fd00154965a3fec7fc0 | [
"Apache-2.0"
] | null | null | null | nvtabular/utils.py | deepyaman/NVTabular | b814b5ed9866be29d3c13fd00154965a3fec7fc0 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
try:
from numba import cuda
except ImportError:
cuda = None
try:
import psutil
except ImportError:
psutil = None
| 31.567568 | 99 | 0.675086 |
3f5e544534b4294729194a2befc9af168507a49c | 1,908 | py | Python | tests/segmentation/segmanagetest.py | j-h-m/Media-Journaling-Tool | 4ab6961e2768dc002c9bbad182f83188631f01bd | [
"BSD-3-Clause"
] | null | null | null | tests/segmentation/segmanagetest.py | j-h-m/Media-Journaling-Tool | 4ab6961e2768dc002c9bbad182f83188631f01bd | [
"BSD-3-Clause"
] | null | null | null | tests/segmentation/segmanagetest.py | j-h-m/Media-Journaling-Tool | 4ab6961e2768dc002c9bbad182f83188631f01bd | [
"BSD-3-Clause"
] | null | null | null | import unittest
from maskgen import image_wrap
import numpy
from maskgen.segmentation.segmanage import select_region,segmentation_classification,convert_color
from tests.test_support import TestSupport
if __name__ == '__main__':
unittest.main()
| 44.372093 | 109 | 0.682914 |
3f5ea42854995b843b23cfd97be8ee560fd1c66b | 6,932 | py | Python | aimacode/tests/test_text.py | juandarr/AIND-planning | f74d41657d6f3d95a3b57ec4fd6e766d894d2f0d | [
"MIT"
] | null | null | null | aimacode/tests/test_text.py | juandarr/AIND-planning | f74d41657d6f3d95a3b57ec4fd6e766d894d2f0d | [
"MIT"
] | null | null | null | aimacode/tests/test_text.py | juandarr/AIND-planning | f74d41657d6f3d95a3b57ec4fd6e766d894d2f0d | [
"MIT"
] | null | null | null | import pytest
import os
import random
from text import * # noqa
from utils import isclose, DataFile
# TODO: for .ipynb
"""
>>> P1.samples(20)
'you thought known but were insides of see in depend by us dodecahedrons just but i words are instead degrees'
>>> P2.samples(20)
'flatland well then can anything else more into the total destruction and circles teach others confine women must be added'
>>> P3.samples(20)
'flatland by edwin a abbott 1884 to the wake of a certificate from nature herself proving the equal sided triangle'
"""
if __name__ == '__main__':
pytest.main()
| 33.326923 | 124 | 0.542268 |
3f5f2e64673d1e50e2bb1b2fb203375596490210 | 5,983 | py | Python | implementations/python3/pysatl/apdu_tool.py | sebastien-riou/SATL | b95d0e784d2e8e1384381d4d5b8b448d3d1798cf | [
"Apache-2.0"
] | 4 | 2020-05-13T10:13:55.000Z | 2021-10-20T04:43:07.000Z | implementations/python3/pysatl/apdu_tool.py | TiempoSecure/SATL | b95d0e784d2e8e1384381d4d5b8b448d3d1798cf | [
"Apache-2.0"
] | 4 | 2020-07-22T16:06:31.000Z | 2021-07-25T19:51:41.000Z | implementations/python3/pysatl/apdu_tool.py | TiempoSecure/SATL | b95d0e784d2e8e1384381d4d5b8b448d3d1798cf | [
"Apache-2.0"
] | 2 | 2019-05-12T21:15:00.000Z | 2020-09-23T09:05:24.000Z | import re
import argparse
import os
import sys
import logging
import traceback
import pysatl
if __name__ == "__main__":
ApduTool(sys.argv)
| 35.194118 | 91 | 0.503594 |
3f5f46e8ad8a46b0f24a67ae6817aebda546ccdc | 11,979 | py | Python | host-software/easyhid.py | kavka1983/key | 9185c156fd40a1cab358b2464af4b27cacf71935 | [
"MIT"
] | 1 | 2020-06-14T22:31:20.000Z | 2020-06-14T22:31:20.000Z | host-software/easyhid.py | kavka1983/key | 9185c156fd40a1cab358b2464af4b27cacf71935 | [
"MIT"
] | null | null | null | host-software/easyhid.py | kavka1983/key | 9185c156fd40a1cab358b2464af4b27cacf71935 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2017 jem@seethis.link
# Licensed under the MIT license (http://opensource.org/licenses/MIT)
import cffi
import ctypes.util
import platform
ffi = cffi.FFI()
ffi.cdef("""
struct hid_device_info {
char *path;
unsigned short vendor_id;
unsigned short product_id;
wchar_t *serial_number;
unsigned short release_number;
wchar_t *manufacturer_string;
wchar_t *product_string;
unsigned short usage_page;
unsigned short usage;
int interface_number;
struct hid_device_info *next;
};
typedef struct hid_device_ hid_device;
int hid_init(void);
int hid_exit(void);
struct hid_device_info* hid_enumerate(unsigned short, unsigned short);
void hid_free_enumeration (struct hid_device_info *devs);
hid_device* hid_open (unsigned short vendor_id, unsigned short product_id, const wchar_t *serial_number);
hid_device* hid_open_path (const char *path);
int hid_write (hid_device *device, const unsigned char *data, size_t length);
int hid_read_timeout (hid_device *dev, unsigned char *data, size_t length, int milliseconds);
int hid_read (hid_device *device, unsigned char *data, size_t length);
int hid_set_nonblocking (hid_device *device, int nonblock);
int hid_send_feature_report (hid_device *device, const unsigned char *data, size_t length);
int hid_get_feature_report (hid_device *device, unsigned char *data, size_t length);
void hid_close (hid_device *device);
int hid_get_manufacturer_string (hid_device *device, wchar_t *string, size_t maxlen);
int hid_get_product_string (hid_device *device, wchar_t *string, size_t maxlen);
int hid_get_serial_number_string (hid_device *device, wchar_t *string, size_t maxlen);
int hid_get_indexed_string (hid_device *device, int string_index, wchar_t *string, size_t maxlen);
const wchar_t* hid_error (hid_device *device);
""")
if "Windows" in platform.platform():
try:
hidapi = ffi.dlopen('hidapi.dll')
except:
hidapi = ffi.dlopen(ctypes.util.find_library('hidapi.dll'))
else:
try:
hidapi = ffi.dlopen('hidapi-libusb')
except:
hidapi = ffi.dlopen(ctypes.util.find_library('hidapi-libusb'))
def _hid_enumerate(vendor_id=0, product_id=0):
"""
Enumerates all the hid devices for VID:PID. Returns a list of `DeviceInfo`.
If vid is 0, then match any vendor id. Similarly, if pid is 0, match any
product id. If both are zero, enumerate all HID devices.
"""
start = hidapi.hid_enumerate(vendor_id, product_id)
result = []
cur = ffi.new("struct hid_device_info*");
cur = start
# Copy everything into python list
while cur != ffi.NULL:
result.append(Device(cur))
cur = cur.next
# Free the C memory
hidapi.hid_free_enumeration(start)
return result
# def hid_open(vendor_id, product_id, serial=None):
# """
# """
# if serial == None:
# serial = ffi.NULL
# else:
# if type(serial) == bytes or type(serial) == bytearray:
# serial = serial.decode('utf-8')
# serial = ffi.new("wchar_t[]", serial)
# dev = hidapi.hid_open(vendor_id, product_id, serial)
# if dev:
# return Device(dev)
# else:
# None
if __name__ == "__main__":
# Examples
from easyhid import Enumeration
# Stores an enumertion of all the connected USB HID devices
en = Enumeration()
# return a list of devices based on the search parameters
devices = en.find(manufacturer="Company", product="Widget", interface=3)
# print a description of the devices found
for dev in devices:
print(dev.description())
# open a device
dev.open()
# write some bytes to the device
dev.write(bytearray([0, 1, 2, 3]))
# read some bytes
print(dev.read())
# close a device
dev.close()
| 31.690476 | 105 | 0.622339 |
3f6013f8688ca16e27ef6533df61114f0ade964b | 22,620 | py | Python | rules/check_imported_dlls.py | deptofdefense/SalSA | 7ef771398e3d59597bade95d0a23540de0842e2a | [
"MIT"
] | 84 | 2018-01-07T19:43:45.000Z | 2021-12-23T14:17:44.000Z | rules/check_imported_dlls.py | deptofdefense/SalSA | 7ef771398e3d59597bade95d0a23540de0842e2a | [
"MIT"
] | 7 | 2018-04-02T20:24:28.000Z | 2019-06-07T21:48:04.000Z | rules/check_imported_dlls.py | deptofdefense/SalSA | 7ef771398e3d59597bade95d0a23540de0842e2a | [
"MIT"
] | 18 | 2017-12-26T19:44:46.000Z | 2021-09-13T12:21:02.000Z | """
Align imported dlls/functions to executable functionality.
"""
import sys
# import all supported ordinal decodings
from rules.ordinal_mappings import advapi32
from rules.ordinal_mappings import cabinet
from rules.ordinal_mappings import comctl32
from rules.ordinal_mappings import mfc42
from rules.ordinal_mappings import msvbvm60
from rules.ordinal_mappings import ntdll
from rules.ordinal_mappings import odbc32
from rules.ordinal_mappings import oleaut32
from rules.ordinal_mappings import oledlg
from rules.ordinal_mappings import propsys
from rules.ordinal_mappings import shell32
from rules.ordinal_mappings import shlwapi
from rules.ordinal_mappings import ws2_32
from rules.ordinal_mappings import wsock32
# create ordinal mappings dictionary
ords2names = {}
ords2names['advapi32.dll'] = advapi32.mapping
ords2names['cabinet.dll'] = cabinet.mapping
ords2names['comctl32.dll'] = comctl32.mapping
ords2names['mfc42.dll'] = mfc42.mapping
ords2names['msvbvm60.dll'] = msvbvm60.mapping
ords2names['ntdll.dll'] = ntdll.mapping
ords2names['odbc32.dll'] = odbc32.mapping
ords2names['oleaut32.dll'] = oleaut32.mapping
ords2names['oledlg.dll'] = oledlg.mapping
ords2names['propsys.dll'] = propsys.mapping
ords2names['shell32.dll'] = shell32.mapping
ords2names['shlwapi.dll'] = shlwapi.mapping
ords2names['ws2_32.dll'] = ws2_32.mapping
ords2names['wsock32.dll'] = wsock32.mapping
# list of targeted functions and their descriptions
targets = {
'accept': 'This function is used to listen for incoming connections. This function indicates that the program will listen for incoming connections on a socket. It is mostly used by malware to communicate with their Command and Communication server.',
'AdjustTokenPrivileges': 'This function is used to enable or disable specific access privileges. In a process injection attack, this function is used by malware to gain additional permissions.',
'AttachThreadInput': 'This function attaches the input processing from one thread to another so that the second thread receives input events such as keyboard and mouse events. Keyloggers and other spyware use this function.',
'bind': 'This function is used to associate a local address to a socket in order to listen for incoming connections.',
'BitBlt': 'This function is used to copy graphic data from one device to another. Spyware sometimes uses this function to capture screenshots.',
'CertOpenSystemStore': 'This function is used to access the certificates stored on the local system.',
'CheckRemoteDebuggerPresent': 'Determines whether the specified process is being debugged. Used by malware to detect and evade reversing.',
'connect': 'This function is used to connect to a remote socket. Malware often uses low-level functionality to connect to a command-and-control server. It is mostly used by malware to communicate with their Command and Communication server.',
'ConnectNamedPipe': 'This function is used to create a server pipe for interprocess communication that will wait for a client pipe to connect. Backdoors and reverse shells sometimes use ConnectNamedPipe to simplify connectivity to a command-and-control server.',
'ControlService': 'This function is used to start, stop, modify, or send a signal to a running service. If malware is using its own malicious service, code needs to be analyzed that implements the service in order to determine the purpose of the call.',
'CreateFile': 'Creates a new file or opens an existing file.',
'CreateFileMapping': 'This function is used to create a handle to a file mapping that loads a file into memory and makes it accessible via memory addresses. Launchers, loaders, and injectors use this function to read and modify PE files.',
'CreateMutex': 'This function creates a mutual exclusion object that can be used by malware to ensure that only a single instance of the malware is running on a system at any given time. Malware often uses fixed names for mutexes, which can be good host-based indicators to detect additional installations of the malware.',
'CreateProcess': 'This function creates and launches a new process. If malware creates a new process, new process needs to be analyzed as well.',
'CreateRemoteThread': 'This function is used to start a thread in a remote process. Launchers and stealth malware use CreateRemoteThread to inject code into a different process.',
'CreateService': 'This function is used to create a service that can be started at boot time. Malware uses CreateService for persistence, stealth, or to load kernel drivers.',
'CreateToolhelp32Snapshot': 'This function is used to create a snapshot of processes, heaps, threads, and modules. Malware often uses this function as part of code that iterates through processes or threads.',
'CryptAcquireContext': 'This function is often the first function used by malware to initialize the use of Windows encryption.',
'DeviceIoControl': 'This function sends a control message from user space to a device driver. Kernel malware that needs to pass information between user space and kernel space often use this function.',
'DllFunctionCall': 'THis function is used to import a DLL within a visual basic executable. This indicates malware with visual basic functionality.',
'EnableExecuteProtectionSupport': 'This function is used to modify the Data Execution Protection (DEP) settings of the host, making it more susceptible to attack.',
'EnumProcesses': 'This function is used to enumerate through running processes on the system. Malware often enumerates through processes to find a process into which to inject.',
'EnumProcessModules': 'This function is used to enumerate the loaded modules (executables and DLLs) for a given process. Malware enumerates through modules when doing an injection.',
'FindFirstFile': 'This function is used to search through a directory and enumerate the file system.',
'FindNextFile': 'This function is used to search through a directory and enumerate the file system.',
'FindResource': 'This function is used to find a resource in an executable or loaded DLL. Malware sometimes uses resources to store strings, configuration information, or other malicious files. If this function is used, then check for an .rsrc section in the malware`s PE header.',
'FindWindow': 'This function is used to search for an open window on the desktop. Sometimes this function is used as an anti-debugging technique to search for OllyDbg windows.',
'FtpOpenFile': 'This function is used to open a file on a remote FTP server.',
'FtpPutFile': 'This function is used to upload a file to remote FTP server.',
'GetAdaptersInfo': 'This function is used to obtain information about the network adapters on the system. Backdoors sometimes call GetAdaptersInfo in the information-gathering phase to gather information about infected machines. In some cases, it`s used to gather MAC addresses to check for VMware as part of anti-virtual machine techniques.',
'GetAsyncKeyState': 'This function is used to determine whether a particular key is being pressed. Malware sometimes uses this function to implement a keylogger.',
'GetClipboardData': 'This function is used to read user clipboard data and is sometimes used in keyloggers.',
'GetDC': 'This function returns a handle to a device context for a window or the whole screen. Spyware that takes screen captures often uses this function.',
'GetForegroundWindow': 'This function returns a handle to the window currently in the foreground of the desktop. Keyloggers commonly use this function to determine in which window the user is entering his keystrokes.',
'gethostbyname': 'This function is used to perform a DNS lookup on a particular hostname prior to making an IP connection to a remote host. Hostnames that serve as command-and-control servers often make good network-based signatures.',
'gethostname': 'This function is used to retrieve the hostname of the computer. Backdoors sometimes use gethostname in information gathering phase of the victim machine.',
'GetKeyState': 'This function is used by keyloggers to obtain the status of a particular key on the keyboard.',
'GetModuleFilename': 'This function returns the filename of a module that is loaded in the current process. Malware can use this function to modify or copy files in the currently running process.',
'GetModuleHandle': 'This function is used to obtain a handle to an already loaded module. Malware may use GetModuleHandle to locate and modify code in a loaded module or to search for a good location to inject code.',
'GetProcAddress': 'This function is used to retrieve the address of a function in a DLL loaded into memory. This is used to import functions from other DLLs in addition to the functions imported in the PE file header.',
'GetStartupInfo': 'This function is used to retrieve a structure containing details about how the current process was configured to run, such as where the standard handles are directed.',
'GetSystemDefaultLangId': 'This function returns the default language settings for the system. These are used by malwares by specifically designed for region-based attacks.',
'GetTempPath': 'This function returns the temporary file path. If malware call this function, check whether it reads or writes any files in the temporary file path.',
'GetThreadContext': 'This function returns the context structure of a given thread. The context for a thread stores all the thread information, such as the register values and current state.',
'GetVersionEx': 'This function returns information about which version of Windows is currently running. This can be used as part of a victim survey, or to select between different offsets for undocumented structures that have changed between different versions of Windows.',
'GetWindowDC': 'This function retrieves the device context (DC) for the entire window, including title bar, menus, and scroll bars. Used to take a screenshot of a particular GUI window (like a browser).',
'GetWindowsDirectory': 'This function returns the file path to the Windows directory (usually C:\\Windows). Malware sometimes uses this call to determine into which directory to install additional malicious programs.',
'GetWindowText': 'This function gets the title of all program windows for the current user. Used to enumerate processes that have a GUI interface.',
'HttpOpenRequest': 'This function sets up the OS resources for an HTTP request.',
'HttpSendRequest': 'This function actually makes an outgoing HTTP connection.',
'inet_addr': 'This function converts an IP address string like 127.0.0.1 so that it can be used by functions such as connect. The string specified can sometimes be used as a network-based signature.',
'InternetOpen': 'This function initializes the high-level Internet access functions from WinINet, such as InternetOpenUrl and InternetReadFile. Searching for InternetOpen is a good way to find the start of Internet access functionality. One of the parameters to InternetOpen is the User-Agent, which can sometimes make a good network-based signature.',
'InternetOpenUrl': 'This function opens a specific URL for a connection using FTP, HTTP, or HTTPS.URLs, if fixed, can often be good network-based signatures.',
'InternetReadFile': 'This function reads data from a previously opened URL.',
'InternetWriteFile': 'This function writes data to a previously opened URL.',
'IsDebuggerPresent': 'Determines whether the calling process is being debugged by a user-mode debugger. Used by malware to detect and evade reversing.',
'IsNTAdmin': 'This function checks if the user has administrator privileges.',
'IsUserAnAdmin': 'This function checks if the user has administrator privileges.',
'IsWoW64Process': 'This function is used by a 32-bit process to determine if it is running on a 64-bit operating system.',
'LdrLoadDll': 'This is a low-level function to load a DLL into a process, just like LoadLibrary. Normal programs use LoadLibrary, and the presence of this import may indicate a program that is attempting to be stealthy.',
'LoadLibrary': 'This is the standard fucntion to load a DLL into a process at runtime.',
'LoadResource': 'This function loads a resource from a PE file into memory. Malware sometimes uses resources to store strings, configuration information, or other malicious files.',
'LsaEnumerateLogonSessions': 'This function is used to enumerate through logon sessions on the current system, which can be used as part of a credential stealer.',
'MapViewOfFile': 'This function is used to map a file into memory and makes the contents of the file accessible via memory addresses. Launchers, loaders, and injectors use this function to read and modify PE files. By using MapViewOfFile, the malware can avoid using WriteFile to modify the contents of a file.',
'MapVirtualKey': 'This function is used to translate a virtual-key code into a character value. It is often used by keylogging malware.',
'Module32First/Module32Next': 'This function is used to enumerate through modules loaded into a process. Injectors use this function to determine where to inject code.',
'NetScheduleJobAdd': 'This function submits a request for a program to be run at a specified date and time. Malware can use NetScheduleJobAdd to run a different program. This is an important indicator to see the program that is scheduled to run at future time.',
'NetShareEnum': 'This function is used to enumerate network shares.',
'NtQueryDirectoryFile': 'This function returns information about files in a directory. Rootkits commonly hook this function in order to hide files.',
'NtQueryInformationProcess': 'This function is used to return various information about a specified process. This function is sometimes used as an anti-debugging technique because it can return the same information as CheckRemoteDebuggerPresent.',
'NtSetInformationProcess': 'This function is used to change the privilege level of a program or to bypass Data Execution Prevention (DEP).',
'OpenMutex': 'This function opens a handle to a mutual exclusion object that can be used by malware to ensure that only a single instance of malware is running on a system at any given time. Malware often uses fixed names for mutexes, which can be good host-based indicators.',
'OpenProcess': 'This function is used to open a handle to another process running on the system. This handle can be used to read and write to the other process memory or to inject code into the other process.',
'OutputDebugString': 'This function is used to output a string to a debugger if one is attached. This can be used as an anti-debugging technique.',
'PeekNamedPipe': 'This function is used to copy data from a named pipe without removing data from the pipe. This function is popular with reverse shells.',
'Process32First': 'This function is used to begin enumerating processes from a previous call to CreateToolhelp32Snapshot. Malware often enumerates through processes to find a process into which to inject.',
'Process32Next': 'This function is used to begin enumerating processes from a previous call to CreateToolhelp32Snapshot. Malware often enumerates through processes to find a process into which to inject.',
'QueueUserAPC': 'This function is used to execute code for a different thread. Malware sometimes uses QueueUserAPC to inject code into another process.',
'ReadProcessMemory': 'This function is used to read the memory of a remote process.',
'recv': 'This function is used to receive data from a remote machine. Malware often uses this function to receive data from a remote command-and-control server.',
'RegCreateKey': 'This function is used to create a handle to a new registry key for reading and editing. Registry keys are sometimes written as a way for software to achieve persistence on a host. The registry also contains a whole host of operating system and application setting information.',
'RegisterHotKey': 'This function is used to register a handler to be notified anytime a user enters a particular key combination (like CTRL-ALT-J), regardless of which window is active when the user presses the key combination. This function is sometimes used by spyware that remains hidden from the user until the key combination is pressed.',
'RegOpenKey': 'This function is used to open a handle to a registry key for reading and editing. Registry keys are sometimes written as a way for software to achieve persistence on a host. The registry also contains a whole host of operating system and application setting information.',
'ResumeThread': 'This function is used to resume a previously suspended thread. ResumeThread is used as part of several injection techniques.',
'RtlCreateRegistryKey': 'This function is used to create a registry from kernel-mode code.',
'RtlWriteRegistryValue': 'This function is used to write a value to the registry from kernel-mode code.',
'SamIConnect': 'This function is used to connect to the Security Account Manager (SAM) in order to make future calls that access credential information. Hash-dumping programs access the SAM database in order to retrieve the hash of users` login passwords.',
'SamIGetPrivateData': 'This function is used to query the private information about a specific user from the Security Account Manager (SAM) database. Hash-dumping programs access the SAM database in order to retrieve the hash of users` login passwords.',
'SamQueryInformationUse': 'This function is used to query information about a specific user in the Security Account Manager (SAM) database. Hash-dumping programs access the SAM database in order to retrieve the hash of users` login passwords.',
'send': 'This function is used to send data to a remote machine. It is often used by malwares to send data to a remote command-and-control server.',
'SetFileTime': 'This function is used to modify the creation, access, or last modified time of a file. Malware often uses this function to conceal malicious activity.',
'SetThreadContext': 'This function is used to modify the context of a given thread. Some injection techniques use SetThreadContext.',
'SetWindowsHookEx': 'This function is used to set a hook function to be called whenever a certain event is called. Commonly used with keyloggers and spyware, this function also provides an easy way to load a DLL into all GUI processes on the system. This function is sometimes added by the compiler.',
'SfcTerminateWatcherThread': 'This function is used to disable Windows file protection and modify files that otherwise would be protected.',
'ShellExecute': 'This function is used to execute another program.',
'StartServiceCtrlDispatcher': 'This function is used by a service to connect the main thread of the process to the service control manager. Any process that runs as a service must call this function within 30 seconds of startup. Locating this function in malware will tell that the function should be run as a service.',
'SQLConnect': 'This function establishes a connection with a driver and data source to allow for data to be shared with the driver/data source.',
'SuspendThread': 'This function is used to suspend a thread so that it stops running. Malware will sometimes suspend a thread in order to modify it by performing code injection.',
'System': 'This function is used to run another program provided by some C runtime libraries. On Windows, this function serves as a wrapper function to CreateProcess.',
'Thread32First/Thread32Next': 'This function is used to iterate through the threads of a process. Injectors use these functions to find an appropriate thread into which to inject.',
'ThunRTMain': 'Thsi function is used as the entry point to a visual basic executable. This indicates malware with visual basic functionality.',
'Toolhelp32ReadProcessMemory': 'This function is used to read the memory of a remote process.',
'URLDownloadToFile': 'This function is used to download a file from a web server and save it to disk. This function is popular with downloaders because it implements all the functionality of a downloader in one function call.',
'VirtualAllocEx': 'This function is a memory-allocation routine that can allocate memory in a remote process. Malware sometimes uses VirtualAllocEx as part of process injection.',
'VirtualProtectEx': 'This function is used to change the protection on a region of memory. Malware may use this function to change a read-only section of memory to an executable.',
'WideCharToMultiByte': 'This function is used to convert a Unicode string into an ASCII string.',
'WinExec': 'This function is used to execute another program.',
'WriteProcessMemory': 'This function is used to write data to a remote process. Malware uses WriteProcessMemory as part of process injection.',
'WSAStartup': 'This function is used to initialize low-level network functionality. Finding calls to WSAStartup can often be an easy way to locate the start of network related functionality.',
'Zombie_AddRef': 'This function is used to make a call to a visual basic subroutine. This indicates malware with visual basic functionality.'
}
# constant for an unknown import by ordinal
ORDINAL_DESC = 'Ordinal is decoded at runtime. To see ordinal mapping, Download the DLL and use the parse_exports() method of the PE class.'
| 119.052632 | 354 | 0.780858 |
3f6145c13e10fe4a1dbf8c0b4288b82e127765e5 | 4,046 | py | Python | mqttVec.py | Hamlet3000/mqttVec | 65b02446d23ce7c4583b4bf5c7cbe7a84cab0c67 | [
"CC0-1.0"
] | null | null | null | mqttVec.py | Hamlet3000/mqttVec | 65b02446d23ce7c4583b4bf5c7cbe7a84cab0c67 | [
"CC0-1.0"
] | null | null | null | mqttVec.py | Hamlet3000/mqttVec | 65b02446d23ce7c4583b4bf5c7cbe7a84cab0c67 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
import anki_vector
import paho.mqtt.client as mqtt
import time
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
if __name__ == "__main__":
main()
| 31.364341 | 108 | 0.579832 |
3f6337b447bfd743f2d66c927077a80e24dcc381 | 428 | py | Python | examples/scanner_ibeacon_example.py | hbcho/beacontools1 | 82606d871c72bccb8962f50cb374595bcc2ab77d | [
"MIT"
] | null | null | null | examples/scanner_ibeacon_example.py | hbcho/beacontools1 | 82606d871c72bccb8962f50cb374595bcc2ab77d | [
"MIT"
] | null | null | null | examples/scanner_ibeacon_example.py | hbcho/beacontools1 | 82606d871c72bccb8962f50cb374595bcc2ab77d | [
"MIT"
] | null | null | null | import time
from beacontools import BeaconScanner, IBeaconFilter
# scan for all iBeacon advertisements from beacons with the specified uuid
scanner = BeaconScanner(callback,
device_filter=IBeaconFilter(uuid="e2c56db5-dffb-48d2-b060-d0f5a71096e0")
)
scanner.start()
time.sleep(10)
scanner.stop()
| 30.571429 | 76 | 0.764019 |
3f69604976a8b164013d06794b381ad086d3bb5d | 499 | py | Python | desafio61.py | rafarbop/Python | e304993a6c73eacd8fffd7c67286206516e5faea | [
"MIT"
] | null | null | null | desafio61.py | rafarbop/Python | e304993a6c73eacd8fffd7c67286206516e5faea | [
"MIT"
] | null | null | null | desafio61.py | rafarbop/Python | e304993a6c73eacd8fffd7c67286206516e5faea | [
"MIT"
] | null | null | null | # Desafio 61 Curso em Video Python
# By Rafabr
from estrutura_modelo import cabecalho, rodape
cabecalho(61, "Termos de uma Progresso Aritmtica - II")
while True:
try:
p0 = float(input('Digite o Termo inicial da PA: '))
r = float(input('Digite a razo da PA: '))
except ValueError:
print('Voe digitou um valor indevido!\n')
continue
break
n = 1
print()
while (n <= 10):
print(f'Termo {n}:'.ljust(10) + f'{p0 + (n-1)*r}')
n += 1
rodape()
| 18.481481 | 59 | 0.607214 |
3f69df2d4bab9abaeedc1a340100793f3dcde991 | 485 | py | Python | setup.py | leandron/steinlib | bbc0295fb1ebf8dce7d06c750db126f6240b4617 | [
"MIT"
] | 4 | 2017-04-02T14:14:32.000Z | 2021-10-12T23:22:17.000Z | setup.py | leandron/steinlib | bbc0295fb1ebf8dce7d06c750db126f6240b4617 | [
"MIT"
] | null | null | null | setup.py | leandron/steinlib | bbc0295fb1ebf8dce7d06c750db126f6240b4617 | [
"MIT"
] | null | null | null | from setuptools import setup
tests_require = [
'cov-core',
'mock',
'nose2',
]
setup(name='steinlib',
version='0.1',
description='Python bindings for Steinlib format.',
url='http://github.com/leandron/steinlib',
author='Leandro Nunes',
author_email='leandron85@gmail.com',
license='MIT',
packages=['steinlib'],
tests_require=tests_require,
test_suite='nose2.collector.collector',
zip_safe=False)
| 24.25 | 57 | 0.618557 |
3f6a18b0d4c80fcdd062def647e4e3d88b2df3b9 | 55,602 | py | Python | usdzconvert/usdStageWithFbx.py | summertriangle-dev/usdzconvert-docker | 9953845f3a83f8cc3d5380a4ccae8bc39753d550 | [
"MIT"
] | 3 | 2021-03-10T00:34:18.000Z | 2021-10-14T02:52:41.000Z | usdzconvert/usdStageWithFbx.py | summertriangle-dev/usdzconvert-docker | 9953845f3a83f8cc3d5380a4ccae8bc39753d550 | [
"MIT"
] | null | null | null | usdzconvert/usdStageWithFbx.py | summertriangle-dev/usdzconvert-docker | 9953845f3a83f8cc3d5380a4ccae8bc39753d550 | [
"MIT"
] | null | null | null | from pxr import *
import os, os.path
import numpy
import re
import usdUtils
import math
import imp
usdStageWithFbxLoaded = True
try:
imp.find_module('fbx')
import fbx
except ImportError:
usdUtils.printError("Failed to import fbx module. Please install FBX Python bindings from http://www.autodesk.com/fbx and add path to FBX Python SDK to your PYTHONPATH")
usdStageWithFbxLoaded = False
def usdStageWithFbx(fbxPath, usdPath, legacyModifier, copyTextures, searchPaths, verbose):
if usdStageWithFbxLoaded == False:
return None
try:
fbxConverter = FbxConverter(fbxPath, usdPath, legacyModifier, copyTextures, searchPaths, verbose)
return fbxConverter.makeUsdStage()
except ConvertError:
return None
except:
raise
return None
| 45.168156 | 176 | 0.635229 |
3f6b95561ed162423b6adee3e5e40b725abe8dde | 3,291 | py | Python | modules/ghautoknit/EmbeddedConstraint.py | fstwn/ghautokn | 5ca6d07df601d34be5a67fe6c76a942daef50a85 | [
"MIT"
] | 2 | 2021-02-19T19:55:21.000Z | 2021-10-13T23:55:56.000Z | modules/ghautoknit/EmbeddedConstraint.py | fstwn/ghautoknit | 5ca6d07df601d34be5a67fe6c76a942daef50a85 | [
"MIT"
] | null | null | null | modules/ghautoknit/EmbeddedConstraint.py | fstwn/ghautoknit | 5ca6d07df601d34be5a67fe6c76a942daef50a85 | [
"MIT"
] | null | null | null | # PYTHON STANDARD LIBRARY IMPORTS ----------------------------------------------
from __future__ import absolute_import
from __future__ import division
# LOCAL MODULE IMPORTS ---------------------------------------------------------
from ghautoknit.StoredConstraint import StoredConstraint
# ALL LIST ---------------------------------------------------------------------
__all__ = [
"EmbeddedConstraint"
]
# ACTUAL CLASS -----------------------------------------------------------------
# MAIN -------------------------------------------------------------------------
if __name__ == '__main__':
pass
| 35.387097 | 80 | 0.485567 |
3f6d7159e38e2302b0b79887ec33606e37733f75 | 1,516 | py | Python | vr/server/tests/test_build.py | isabella232/vr.server | 705511f8176bda0627be1ae86a458178589ee3db | [
"MIT"
] | null | null | null | vr/server/tests/test_build.py | isabella232/vr.server | 705511f8176bda0627be1ae86a458178589ee3db | [
"MIT"
] | 3 | 2016-12-15T21:55:02.000Z | 2019-02-13T11:43:29.000Z | vr/server/tests/test_build.py | isabella232/vr.server | 705511f8176bda0627be1ae86a458178589ee3db | [
"MIT"
] | 2 | 2017-01-16T09:31:03.000Z | 2022-03-26T09:21:36.000Z | import tempfile
import pytest
from dateutil.relativedelta import relativedelta
from django.utils import timezone
from django.core.files import File
from vr.server import models
from vr.server.tests import randurl
from vr.common.utils import randchars
pytestmark = pytest.mark.usefixtures('postgresql')
pytestmark = pytest.mark.usefixtures('gridfs')
| 25.694915 | 70 | 0.622691 |
3f6f2cf82e789df4f6fa6a684b85e6cb348c35e2 | 629 | py | Python | apps/api/v1/pagination.py | asmuratbek/oobamarket | 1053976a13ea84b9aabfcbbcbcffd79549ce9538 | [
"MIT"
] | null | null | null | apps/api/v1/pagination.py | asmuratbek/oobamarket | 1053976a13ea84b9aabfcbbcbcffd79549ce9538 | [
"MIT"
] | 7 | 2020-06-05T23:36:01.000Z | 2022-01-13T01:42:07.000Z | apps/api/v1/pagination.py | asmuratbek/oobamarket | 1053976a13ea84b9aabfcbbcbcffd79549ce9538 | [
"MIT"
] | null | null | null | from rest_framework.pagination import LimitOffsetPagination, PageNumberPagination
| 24.192308 | 81 | 0.779014 |
3f715eb5609a277ea4d93cce4d190d4b920a7477 | 7,796 | py | Python | GraphOfDocs_Representation/graph_algos.py | imis-lab/book-chapter | 8260a60ec91dd29616eeed80f34bdea00fb73cd7 | [
"MIT"
] | null | null | null | GraphOfDocs_Representation/graph_algos.py | imis-lab/book-chapter | 8260a60ec91dd29616eeed80f34bdea00fb73cd7 | [
"MIT"
] | null | null | null | GraphOfDocs_Representation/graph_algos.py | imis-lab/book-chapter | 8260a60ec91dd29616eeed80f34bdea00fb73cd7 | [
"MIT"
] | null | null | null | import time
import json
import traceback
import numpy as np
from statistics import mean
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
# Automatic cleanup of the created graph of this class.
def __exit__(self, exc_type, exc_value, tb):
if exc_type is not None:
traceback.print_exception(exc_type, exc_value, tb)
| 41.913978 | 124 | 0.608261 |
3f730b00ede0a815c4c62737f803ff84e093f24f | 3,124 | py | Python | Code/Components/Synthesis/testdata/current/simulation/synthregression/wtermtest.py | rtobar/askapsoft | 6bae06071d7d24f41abe3f2b7f9ee06cb0a9445e | [
"BSL-1.0",
"Apache-2.0",
"OpenSSL"
] | 1 | 2020-06-18T08:37:43.000Z | 2020-06-18T08:37:43.000Z | Code/Components/Synthesis/testdata/current/simulation/synthregression/wtermtest.py | ATNF/askapsoft | d839c052d5c62ad8a511e58cd4b6548491a6006f | [
"BSL-1.0",
"Apache-2.0",
"OpenSSL"
] | null | null | null | Code/Components/Synthesis/testdata/current/simulation/synthregression/wtermtest.py | ATNF/askapsoft | d839c052d5c62ad8a511e58cd4b6548491a6006f | [
"BSL-1.0",
"Apache-2.0",
"OpenSSL"
] | null | null | null | # regression tests with gridders taking w-term into account
# some fixed parameters are given in wtermtest_template.in
from synthprogrunner import *
def analyseResult(spr, checkWeights=True):
'''
spr - synthesis program runner (to run imageStats)
throws exceptions if something is wrong, otherwise just
returns
'''
src_offset = 0.006/math.pi*180.
psf_peak=[-172.5,-45]
true_peak=sinProjection(psf_peak,src_offset,src_offset)
stats = spr.imageStats('image.field1.restored')
print "Statistics for restored image: ",stats
disterr = getDistance(stats,true_peak[0],true_peak[1])*3600.
if disterr > 8:
raise RuntimeError, "Offset between true and expected position exceeds 1 cell size (8 arcsec), d=%f, true_peak=%s" % (disterr,true_peak)
if abs(stats['peak']-1.)>0.1:
raise RuntimeError, "Peak flux in the image is notably different from 1 Jy, F=%f" % stats['peak']
stats = spr.imageStats('image.field1')
print "Statistics for modelimage: ",stats
disterr = getDistance(stats,true_peak[0],true_peak[1])*3600.
if disterr > 8:
raise RuntimeError, "Offset between true and expected position exceeds 1 cell size (8 arcsec), d=%f, true_peak=%s" % (disterr,true_peak)
stats = spr.imageStats('psf.field1')
print "Statistics for psf image: ",stats
disterr = getDistance(stats,psf_peak[0],psf_peak[1])*3600.
if disterr > 8:
raise RuntimeError, "Offset between true and expected position exceeds 1 cell size (8 arcsec), d=%f, true_peak=%s" % (disterr,true_peak)
stats = spr.imageStats('psf.image.field1')
print "Statistics for preconditioned psf image: ",stats
disterr = getDistance(stats,psf_peak[0],psf_peak[1])*3600.
if disterr > 8:
raise RuntimeError, "Offset between true and expected position exceeds 1 cell size (8 arcsec), d=%f, true_peak=%s" % (disterr,true_peak)
if abs(stats['peak']-1.)>0.01:
raise RuntimeError, "Peak flux in the preconditioned psf image is notably different from 1.0, F=%f" % stats['peak']
if checkWeights:
stats = spr.imageStats('weights.field1')
print "Statistics for weight image: ",stats
if abs(stats['rms']-stats['peak'])>0.1 or abs(stats['rms']-stats['median'])>0.1 or abs(stats['peak']-stats['median'])>0.1:
raise RuntimeError, "Weight image is expected to be constant for WProject and WStack gridders"
stats = spr.imageStats('residual.field1')
print "Statistics for residual image: ",stats
if stats['rms']>0.01 or abs(stats['median'])>0.0001:
raise RuntimeError, "Residual image has too high rms or median. Please verify"
spr = SynthesisProgramRunner(template_parset = 'wtermtest_template.in')
spr.runSimulator()
spr.addToParset("Cimager.gridder = WProject")
spr.runImager()
analyseResult(spr)
spr.initParset()
spr.addToParset("Cimager.gridder = WStack")
spr.runImager()
analyseResult(spr)
spr.initParset()
spr.addToParset("Cimager.gridder = WProject")
spr.addToParset("Cimager.gridder.snapshotimaging = true")
spr.addToParset("Cimager.gridder.snapshotimaging.wtolerance = 500")
spr.runImager()
analyseResult(spr,False)
| 42.216216 | 142 | 0.717029 |
3f731bc8d56706afd6b8d8a2244161c707b604bd | 6,047 | py | Python | manage/fuzzytranslation.py | Acidburn0zzz/browser-update | fed7b4c52deccd582fcf8b8cca4809607bbb32cd | [
"MIT"
] | 2 | 2017-10-06T15:53:23.000Z | 2017-10-06T15:53:38.000Z | manage/fuzzytranslation.py | Acidburn0zzz/browser-update | fed7b4c52deccd582fcf8b8cca4809607bbb32cd | [
"MIT"
] | null | null | null | manage/fuzzytranslation.py | Acidburn0zzz/browser-update | fed7b4c52deccd582fcf8b8cca4809607bbb32cd | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun Jun 12 14:21:31 2016
@author: TH
"""
#%%
import polib
#%%
#old (translated) string
#new renamed string
pairs="""
An initiative by web designers to inform users about browser-updates
An initiative by websites to inform users to update their web browser
If you are on a computer that is maintained by an admin and you cannot install a new browser, ask your admin about it.
Ask your admin to update your browser if you cannot install updates yourself.
blaasdasdfsdaf
faselsdfsadf""";
pairs=pairs.replace("\r","")[1:-1].split("\n\n")
mappings={s.split("\n")[0]:s.split("\n")[1] for s in pairs}
#%%
po = polib.pofile('lang/de_DE/LC_MESSAGES/update.po')
valid_entries = [e for e in po if not e.obsolete]
for entry in valid_entries:
#print(entry.msgid)
if entry.msgid in mappings:
print("replacing", entry.msgid[:10], "with",mappings[entry.msgid][:10])
entry.msgid=mappings[entry.msgid]
po.save()
po.save_as_mofile('lang/de_DE/LC_MESSAGES/update.mo')
#%%
pairs="""aaa
bbb
Subtle
Unobtrusive
bla
fasel"""
pairs=pairs.replace("\r","")[1:-1].split("\n\n")
mappings={s.split("\n")[0]:s.split("\n")[1] for s in pairs}
#%%
po = polib.pofile('lang/de_DE/LC_MESSAGES/site.po')
valid_entries = [e for e in po if not e.obsolete]
for entry in valid_entries:
#print(entry.msgid)
if entry.msgid in mappings:
print("replacing", entry.msgid[:10], "with",mappings[entry.msgid][:10])
entry.msgid=mappings[entry.msgid]
po.save()
po.save_as_mofile('lang/de_DE/LC_MESSAGES/site.mo')
#%%
pot = polib.pofile('lang/update.pot')
for entry in pot:
print (entry.msgid, entry.msgstr)
#%%
#%% display old translations
po = polib.pofile('lang/de_DE/LC_MESSAGES/update.po')
valid_entries = [e for e in po if not e.obsolete]
for entry in valid_entries:
print(entry.msgid)
#%%
#%% getting files
from glob import glob
paths = glob('lang/*/LC_MESSAGES/')
paths=[p[5:10] for p in paths]
paths
#%% updating all site.po
for p in paths:
print("updating %s"%p)
try:
po = polib.pofile('lang/%s/LC_MESSAGES/site.po'%p)
except OSError:
print("no file found")
continue
valid_entries = [e for e in po if not e.obsolete]
for entry in valid_entries:
#print(entry.msgid)
if entry.msgid in mappings:
print(" ", entry.msgid[:10], "-->",mappings[entry.msgid][:10])
entry.msgid=mappings[entry.msgid]
po.save()
po.save_as_mofile('lang/%s/LC_MESSAGES/site.mo'%p)
#%% updating all update.po
for p in paths:
print("updating %s"%p)
try:
po = polib.pofile('lang/%s/LC_MESSAGES/update.po'%p)
except OSError:
print("no file found")
continue
valid_entries = [e for e in po if not e.obsolete]
for entry in valid_entries:
#print(entry.msgid)
if entry.msgid in mappings:
print(" ", entry.msgid[:10], "-->",mappings[entry.msgid][:10])
entry.msgid=mappings[entry.msgid]
po.save()
po.save_as_mofile('lang/%s/LC_MESSAGES/update.mo'%p)
#%%
pairs="""aaa
bbb
Optionally include up to two placeholders "%s" which will be replaced with the browser version and contents of the link tag. Example: "Your browser (%s) is old. Please <a%s>update</a>"
Optionally include up to two placeholders "%s" which will be replaced with the browser version and contents of the link tag. Example: "Your browser (%s) is old. Please <a%s>update</a>"
bla
fasel"""
pairs=pairs.replace("\r","")[1:-1].split("\n\n")
mappings={s.split("\n")[0]:s.split("\n")[1] for s in pairs}
#%%
from glob import glob
paths = glob('lang/*/LC_MESSAGES/')
paths=[p[5:10] for p in paths]
paths
#%% updating all site.po
for p in paths:
print("customize %s"%p)
try:
po = polib.pofile('lang/%s/LC_MESSAGES/customize.po'%p)
except OSError:
print("no file found")
continue
valid_entries = [e for e in po if not e.obsolete]
for entry in valid_entries:
#print(entry.msgid)
if entry.msgid in mappings:
print(" ", entry.msgid[:10], "-->",mappings[entry.msgid][:10])
entry.msgid=mappings[entry.msgid]
po.save()
po.save_as_mofile('lang/%s/LC_MESSAGES/customize.mo'%p)
#%% extract strings
import subprocess
subprocess.call(['xgettext',
"header.php",
"footer.php",
"update-browser.php",
"--keyword=T_gettext",
"--keyword=T_",
"--keyword=T_ngettext:1,2",
"--from-code=utf-8",
"--package-name=browser-update-update",
"--language=PHP",
"--output=lang/update.pot"])
#%% extract site strings
import subprocess
subprocess.call(['xgettext',
"blog.php",
"stat.php",
"index.php",
"contact.php",
"update.testing.php",
"--keyword=T_gettext",
"--keyword=T_",
"--keyword=T_ngettext:1,2",
"--from-code=utf-8",
"--package-name=browser-update-site",
"--language=PHP",
"--output=lang/site.pot"])
#%% extract customize strings
import subprocess
subprocess.call(['xgettext',
"customize.php",
"--keyword=T_gettext",
"--keyword=T_",
"--keyword=T_ngettext:1,2",
"--from-code=utf-8",
"--package-name=browser-update-customize",
"--language=PHP",
"--output=lang/customize.pot"])
#%% upload new sources for translations
import subprocess
subprocess.call(['crowdin-cli-py', 'upload', 'sources'])
#subprocess.call(['java', '-jar', 'manage\crowdin-cli.jar', 'upload', 'sources','--config','manage\crowdin.yaml'])
#subprocess.call(['java', '-jar', 'manage\crowdin-cli.jar', 'upload', 'sources']) | 29.21256 | 197 | 0.599471 |
58ac40be7eaf9e54cb6bdb3fcb14931b75949783 | 2,401 | py | Python | scripts/test_template.py | 1466899531/auto_api_test | cb0f474394ef776d4b7710821b74065307a551b2 | [
"MIT"
] | 16 | 2021-01-21T05:07:15.000Z | 2022-02-20T06:34:28.000Z | scripts/test_template.py | liucong233/auto_api_test | 303dc2962ad9b9917cb6a31e311e4ca711f7c7bf | [
"MIT"
] | null | null | null | scripts/test_template.py | liucong233/auto_api_test | 303dc2962ad9b9917cb6a31e311e4ca711f7c7bf | [
"MIT"
] | 7 | 2021-01-21T05:07:42.000Z | 2022-01-04T03:56:25.000Z | # -*- coding:utf-8 -*-
"""
@File : test_template
@Author : Chen
@Contact : nonevxx@gmail.com
@Date : 2021/1/20 20:09
@Desc :
"""
#
import pytest
import requests
from time import sleep
from api.template_api import TemplateAPI
from tools.get_log import GetLog
from tools.read_file import read_json
import allure
#
log = GetLog.get_log()
| 28.583333 | 92 | 0.634736 |
58af6add6122e05924d2e3ff9cd5a71f9446a5c7 | 5,090 | py | Python | dockerfilegenerator/generator.py | ccurcanu/aws-serverless-dockerfile-generator | cb1e272d21337074870c0b1f17c7535cd1e7c217 | [
"Apache-2.0"
] | 2 | 2019-08-05T02:24:56.000Z | 2020-04-30T22:16:00.000Z | dockerfilegenerator/generator.py | ccurcanu/aws-serverless-dockerfile-generator | cb1e272d21337074870c0b1f17c7535cd1e7c217 | [
"Apache-2.0"
] | 1 | 2020-04-11T16:34:08.000Z | 2020-04-11T19:29:26.000Z | dockerfilegenerator/generator.py | ccurcanu/aws-serverless-dockerfile-generator | cb1e272d21337074870c0b1f17c7535cd1e7c217 | [
"Apache-2.0"
] | 1 | 2019-08-05T02:24:59.000Z | 2019-08-05T02:24:59.000Z | # -*- coding: utf-8 -*-
import botocore.exceptions
import logging
import dockerfilegenerator.lib.constants as constants
import dockerfilegenerator.lib.exceptions as exceptions
import dockerfilegenerator.lib.versions as versions
import dockerfilegenerator.lib.jsonstore as jsonstore
import dockerfilegenerator.lib.s3store as s3store
import dockerfilegenerator.lib.github as github
logger = logging.getLogger()
TRACKED_TOOLS = {
"terraform": versions.get_latest_hashicorp_terraform_version,
"packer": versions.get_latest_hashicorp_packer_version,
"go": versions.get_latest_golango_go_version
}
class DockerfileGeneratorLambda(UtilsMixin):
| 39.765625 | 78 | 0.662279 |
58af85873ae33fb22513395345bd12fb92d5791b | 1,179 | py | Python | ajustes_UM/tesis/main/urls.py | abelgonzalez/ajustes | f6f99aea18cfb82750805321abfc822d8a6ec5ed | [
"MIT"
] | 1 | 2015-03-04T13:04:33.000Z | 2015-03-04T13:04:33.000Z | ajustes_UM/tesis/main/urls.py | abelgonzalez/ajustes | f6f99aea18cfb82750805321abfc822d8a6ec5ed | [
"MIT"
] | null | null | null | ajustes_UM/tesis/main/urls.py | abelgonzalez/ajustes | f6f99aea18cfb82750805321abfc822d8a6ec5ed | [
"MIT"
] | null | null | null | from django.conf.urls import patterns, url
from main import views
urlpatterns = patterns('',
url(r'^$', views.inicio, name='inicio'),
url(r'^acerca/', views.acerca, name='acerca'),
url(r'^contacto/', views.contacto, name='contacto'),
url(r'^autenticar/', views.autenticar, name='autenticar'),
url(r'^cerrar_sesion/', views.cerrar_sesion, name='cerrar_sesion'),
url(r'^tiempo/', views.tiempo, name='tiempo'),
url(r'^perfil/(?P<usuario>\d+)/$', views.perfil, name='perfil'),
url(r'^imprimir_ajuste/', views.imprimir_ajuste, name='imprimir_ajuste'),
url(r'^imprimir_ajusteId/(?P<ajusteEstudianteId>\d+)/$', views.imprimir_ajusteId,
name='imprimir_ajusteId'),
url(r'^imprimir_expediente/', views.imprimir_expediente, name='imprimir_expediente'),
url(r'^imprimir_expedienteId/(?P<expedienteEstudianteId>\d+)/$', views.imprimir_expedienteId,
name='imprimir_expedienteId'),
) | 56.142857 | 116 | 0.546226 |
58b28e8645b762a35a626046be48d346a6bd215a | 15,595 | py | Python | test/test_views.py | Nemoden/Simblin | 1f97a985125023e64dfc6f4db6292cf3a2b904c9 | [
"BSD-3-Clause"
] | 53 | 2015-02-01T14:06:48.000Z | 2022-01-02T15:46:00.000Z | test/test_views.py | Aaron1992/Simblin | 1f97a985125023e64dfc6f4db6292cf3a2b904c9 | [
"BSD-3-Clause"
] | null | null | null | test/test_views.py | Aaron1992/Simblin | 1f97a985125023e64dfc6f4db6292cf3a2b904c9 | [
"BSD-3-Clause"
] | 23 | 2015-01-04T08:11:27.000Z | 2019-11-24T13:18:25.000Z | # -*- coding: utf-8 -*-
"""
Simblin Test Views
~~~~~~~~~~~~~~~~~~
Test the different views of the blogging application.
:copyright: (c) 2010 by Eugen Kiss.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import datetime
import flask
from simblin.extensions import db
from simblin.models import Post, Tag, Category, post_tags, post_categories, Admin
from nose.tools import assert_equal, assert_true, assert_false
from test import TestCase
| 35.93318 | 81 | 0.58833 |
58b31cded44ccfc6677efd1c2715c62d51feaad9 | 14,174 | py | Python | freeclimb/models/message_result.py | FreeClimbAPI/python-sdk | 1ec89eddc0069a39989579552b979a9d21418117 | [
"MIT"
] | null | null | null | freeclimb/models/message_result.py | FreeClimbAPI/python-sdk | 1ec89eddc0069a39989579552b979a9d21418117 | [
"MIT"
] | 6 | 2020-03-03T20:14:26.000Z | 2021-12-06T22:11:15.000Z | freeclimb/models/message_result.py | FreeClimbAPI/python-sdk | 1ec89eddc0069a39989579552b979a9d21418117 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
FreeClimb API
FreeClimb is a cloud-based application programming interface (API) that puts the power of the Vail platform in your hands. FreeClimb simplifies the process of creating applications that can use a full range of telephony features without requiring specialized or on-site telephony equipment. Using the FreeClimb REST API to write applications is easy! You have the option to use the language of your choice or hit the API directly. Your application can execute a command by issuing a RESTful request to the FreeClimb API. The base URL to send HTTP requests to the FreeClimb REST API is: /apiserver. FreeClimb authenticates and processes your request. # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@freeclimb.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from freeclimb.configuration import Configuration
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MessageResult):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, MessageResult):
return True
return self.to_dict() != other.to_dict()
| 31.851685 | 667 | 0.605686 |
58b39d610eae8b36afa5ec0f450ede4efe4c78d4 | 342 | py | Python | blog/views.py | artkapl/django-blog-project | 16494465042dd6846f3a2cd560c0cfe7737cc8e0 | [
"MIT"
] | null | null | null | blog/views.py | artkapl/django-blog-project | 16494465042dd6846f3a2cd560c0cfe7737cc8e0 | [
"MIT"
] | null | null | null | blog/views.py | artkapl/django-blog-project | 16494465042dd6846f3a2cd560c0cfe7737cc8e0 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from .models import Post
| 24.428571 | 95 | 0.701754 |
58b410177d19ca32957d1f156f14c3e7bd5b9089 | 718 | bzl | Python | az/private/common/utils.bzl | jullianoacqio/rules_microsoft_azure | 85f8d633db46c7c6aefd5c9c1856aa57473d10fb | [
"Apache-2.0"
] | 4 | 2020-08-05T18:21:43.000Z | 2020-11-10T19:42:48.000Z | terraform/private/common/utils.bzl | acqio/rules_hashicorp_terraform | add89d62abf3739dbd8908a43df366511027e4fc | [
"Apache-2.0"
] | 3 | 2020-09-01T14:35:18.000Z | 2021-03-29T17:27:12.000Z | az/private/common/utils.bzl | acqio/rules_microsoft_azure | 85f8d633db46c7c6aefd5c9c1856aa57473d10fb | [
"Apache-2.0"
] | 7 | 2020-08-04T20:14:10.000Z | 2021-02-18T17:10:55.000Z |
utils = struct(
resolve_stamp = _resolve_stamp,
check_stamping_format = _check_stamping_format,
)
| 28.72 | 62 | 0.628134 |
58b4a5438c1537dcf99f56657476da7aa2cae99e | 4,166 | py | Python | hue.py | desheffer/hue-adapter | 724e296c8dd52302c0380a58f4390fc3059705dc | [
"MIT"
] | null | null | null | hue.py | desheffer/hue-adapter | 724e296c8dd52302c0380a58f4390fc3059705dc | [
"MIT"
] | null | null | null | hue.py | desheffer/hue-adapter | 724e296c8dd52302c0380a58f4390fc3059705dc | [
"MIT"
] | null | null | null | from config import Config
import flask
import json
import os
from ssdp import SSDP
from threading import Thread
import urllib3
config = None
config_file_paths = [
os.path.dirname(os.path.realpath(__file__)) + "/config/default.cfg.local",
"/etc/hue-adapter/default.cfg.local",
]
for config_file_path in config_file_paths:
if os.path.isfile(config_file_path):
config = Config(file(config_file_path))
if not config:
print "Cannot find configuration file"
exit(1)
app = flask.Flask(__name__)
if __name__ == "__main__":
ssdp = SSDP(config.web.addr, config.web.port)
ssdp_thread = Thread(target=ssdp.run)
ssdp_thread.setDaemon(True)
ssdp_thread.start()
app.run(host=config.web.addr, port=config.web.port)
| 26.877419 | 87 | 0.520163 |
58b676c8df734180c643826f2bc368889a0790b4 | 2,820 | py | Python | safe/geokdbush/kdbushTest.py | s-a-f-e/backend | 6018f51466df9abd58f25729d91856842eee9509 | [
"MIT"
] | 1 | 2019-05-06T19:40:43.000Z | 2019-05-06T19:40:43.000Z | safe/geokdbush/kdbushTest.py | s-a-f-e/backend | 6018f51466df9abd58f25729d91856842eee9509 | [
"MIT"
] | 9 | 2019-12-04T22:57:46.000Z | 2022-02-10T07:15:11.000Z | safe/geokdbush/kdbushTest.py | s-a-f-e/backend | 6018f51466df9abd58f25729d91856842eee9509 | [
"MIT"
] | 3 | 2019-05-01T20:41:33.000Z | 2019-10-03T20:57:00.000Z | from kdbush import KDBush
# test data
points = [
[54,1],[97,21],[65,35],[33,54],[95,39],[54,3],[53,54],[84,72],[33,34],[43,15],[52,83],[81,23],[1,61],[38,74],
[11,91],[24,56],[90,31],[25,57],[46,61],[29,69],[49,60],[4,98],[71,15],[60,25],[38,84],[52,38],[94,51],[13,25],
[77,73],[88,87],[6,27],[58,22],[53,28],[27,91],[96,98],[93,14],[22,93],[45,94],[18,28],[35,15],[19,81],[20,81],
[67,53],[43,3],[47,66],[48,34],[46,12],[32,38],[43,12],[39,94],[88,62],[66,14],[84,30],[72,81],[41,92],[26,4],
[6,76],[47,21],[57,70],[71,82],[50,68],[96,18],[40,31],[78,53],[71,90],[32,14],[55,6],[32,88],[62,32],[21,67],
[73,81],[44,64],[29,50],[70,5],[6,22],[68,3],[11,23],[20,42],[21,73],[63,86],[9,40],[99,2],[99,76],[56,77],
[83,6],[21,72],[78,30],[75,53],[41,11],[95,20],[30,38],[96,82],[65,48],[33,18],[87,28],[10,10],[40,34],
[10,20],[47,29],[46,78]]
ids = [
97, 74, 95, 30, 77, 38, 76, 27, 80, 55, 72, 90, 88, 48, 43, 46, 65, 39, 62, 93, 9, 96, 47, 8, 3, 12, 15, 14, 21, 41, 36, 40, 69, 56, 85, 78, 17, 71, 44,
19, 18, 13, 99, 24, 67, 33, 37, 49, 54, 57, 98, 45, 23, 31, 66, 68, 0, 32, 5, 51, 75, 73, 84, 35, 81, 22, 61, 89, 1, 11, 86, 52, 94, 16, 2, 6, 25, 92,
42, 20, 60, 58, 83, 79, 64, 10, 59, 53, 26, 87, 4, 63, 50, 7, 28, 82, 70, 29, 34, 91]
coords = [
10,20,6,22,10,10,6,27,20,42,18,28,11,23,13,25,9,40,26,4,29,50,30,38,41,11,43,12,43,3,46,12,32,14,35,15,40,31,33,18,
43,15,40,34,32,38,33,34,33,54,1,61,24,56,11,91,4,98,20,81,22,93,19,81,21,67,6,76,21,72,21,73,25,57,44,64,47,66,29,
69,46,61,38,74,46,78,38,84,32,88,27,91,45,94,39,94,41,92,47,21,47,29,48,34,60,25,58,22,55,6,62,32,54,1,53,28,54,3,
66,14,68,3,70,5,83,6,93,14,99,2,71,15,96,18,95,20,97,21,81,23,78,30,84,30,87,28,90,31,65,35,53,54,52,38,65,48,67,
53,49,60,50,68,57,70,56,77,63,86,71,90,52,83,71,82,72,81,94,51,75,53,95,39,78,53,88,62,84,72,77,73,99,76,73,81,88,
87,96,98,96,82]
index = KDBush(points)
result = index.range(20, 30, 50, 70)
print(result) # [60, 20, 45, 3, 17, 71, 44, 19, 18, 15, 69, 90, 62, 96, 47, 8, 77, 72]
for id in result:
p = points[id]
if p[0] < 20 or p[0] > 50 or p[1] < 30 or p[1] > 70:
print("FAIL")
for id in result:
p = points[id]
if id not in result and p[0] >= 20 and p[0] <= 50 and p[1] >= 30 and p[1] <= 70:
print("FAIL: outside point not in range")
index2 = KDBush(points)
qp = [50, 50]
r = 20
r2 = 20 * 20
result = index.within(qp[0], qp[1], r)
print(result) # [60, 6, 25, 92, 42, 20, 45, 3, 71, 44, 18, 96]
for id in result:
p = points[id]
if (sqDist2(p, qp) > r2): print('FAIL: result point in range')
for id in result:
p = points[id]
if (id not in result and sqDist2(p, qp) <= r2):
print('FAIL: result point not in range')
| 46.229508 | 156 | 0.537234 |
58b7a94417cb2c171bbf2548469ad555f0dc6eca | 6,662 | py | Python | buckit/compiler.py | martarozek/buckit | 343cc5a5964c1d43902b6a77868652adaefa0caa | [
"BSD-3-Clause"
] | null | null | null | buckit/compiler.py | martarozek/buckit | 343cc5a5964c1d43902b6a77868652adaefa0caa | [
"BSD-3-Clause"
] | null | null | null | buckit/compiler.py | martarozek/buckit | 343cc5a5964c1d43902b6a77868652adaefa0caa | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import logging
import os
import subprocess
import platform
from constants import BUCKCONFIG_LOCAL
from configure_buck import update_config
def configure_compiler(project_root):
"""
Sets up .buckconfig.local in the root project with
basic c++/c compiler settings. More advanced probing
will probably be done in the future
"""
buckconfig_local = os.path.join(project_root, BUCKCONFIG_LOCAL)
logging.info("{bold}Detecting compiler{clear}")
current_platform = get_current_platform_flavor()
cc = detect_cc()
cxx = detect_cxx()
if not cc or not cxx:
logging.warn("Could not find clang or g++ in PATH")
return 0
c_standard = detect_c_standard(cc)
if c_standard:
cflags = [c_standard]
else:
cflags = []
cxx_standard = detect_cxx_standard(cxx)
if cxx_standard:
cxxflags = [cxx_standard]
else:
cxxflags = []
py2 = detect_py2()
py3 = detect_py3()
py2_include = detect_python_include(py2)
py2_libs = detect_python_libs(py2)
py3_include = detect_python_include(py3)
py3_libs = detect_python_libs(py3)
to_set = {
'cxx': {
'cflags': cflags + ['-pthread', '-g'],
'cxxflags': cxxflags + ['-pthread', '-g'],
'ldflags': ['-pthread'],
'cxx': [cxx],
'cc': [cc],
},
}
to_set['cxx#' + current_platform] = to_set['cxx'].copy()
to_set['cxx']['default_platform'] = current_platform
py2_settings = {
'interpreter': py2,
'includes': py2_include,
'libs': py2_libs,
}
py3_settings = {
'interpreter': py3,
'includes': py3_include,
'libs': py3_libs,
}
if py2:
to_set['python#py2'] = py2_settings
to_set['python#py2-%s' % current_platform] = py2_settings
if py3:
to_set['python#py3'] = py3_settings
to_set['python#py3-%s' % current_platform] = py3_settings
to_set['buckit'] = {'system_lib_paths': ','.join(get_system_lib_paths())}
update_config(project_root, buckconfig_local, to_set)
return 0
| 25.427481 | 80 | 0.564545 |
58b7d71f96b456407bfe5eac83c75c43ba5fd90a | 8,002 | py | Python | test/python/.dbwebb/test/suite.d/kmom06/analyzer/test_analyzer.py | AndreasArne/python-examination | a24297f3f73b181e64e744d0b8b52d88d03e844b | [
"MIT"
] | null | null | null | test/python/.dbwebb/test/suite.d/kmom06/analyzer/test_analyzer.py | AndreasArne/python-examination | a24297f3f73b181e64e744d0b8b52d88d03e844b | [
"MIT"
] | 14 | 2021-02-09T10:40:43.000Z | 2022-02-18T12:24:39.000Z | test/python/.dbwebb/test/suite.d/kmom06/analyzer/test_analyzer.py | AndreasArne/python-examination | a24297f3f73b181e64e744d0b8b52d88d03e844b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Contains testcases for the individual examination.
"""
import unittest
from io import StringIO
import os
import sys
from unittest.mock import patch
from examiner import ExamTestCase, ExamTestResult, tags
from examiner import import_module, find_path_to_assignment
FILE_DIR = os.path.dirname(os.path.realpath(__file__))
REPO_PATH = find_path_to_assignment(FILE_DIR)
if REPO_PATH not in sys.path:
sys.path.insert(0, REPO_PATH)
# Path to file and basename of the file to import
main = import_module(REPO_PATH, "main")
if __name__ == '__main__':
runner = unittest.TextTestRunner(resultclass=ExamTestResult, verbosity=2)
unittest.main(testRunner=runner, exit=False)
| 28.784173 | 86 | 0.530242 |
58b8667325936944d69237ad194f47d738bc7912 | 831 | py | Python | olha_boca/infratores/admin.py | Perceu/olha-boca | 022ff941d6bd20bb79bd1e66cd293dd2f59bf55b | [
"MIT"
] | null | null | null | olha_boca/infratores/admin.py | Perceu/olha-boca | 022ff941d6bd20bb79bd1e66cd293dd2f59bf55b | [
"MIT"
] | null | null | null | olha_boca/infratores/admin.py | Perceu/olha-boca | 022ff941d6bd20bb79bd1e66cd293dd2f59bf55b | [
"MIT"
] | 1 | 2022-02-20T18:43:45.000Z | 2022-02-20T18:43:45.000Z | from django.contrib import admin
from olha_boca.infratores.models import Infratores
# Register your models here.
admin.site.register(Infratores, InfratoresAdmin) | 33.24 | 84 | 0.688327 |
58b8a93616ab18e3bdc1bf278f2e0062041167f0 | 6,241 | py | Python | model.py | ogugugugugua/Cycle-Gan-Pytorch-Implementation | 119c7c8b3061a14f0ca988672458351d00f144aa | [
"MIT"
] | null | null | null | model.py | ogugugugugua/Cycle-Gan-Pytorch-Implementation | 119c7c8b3061a14f0ca988672458351d00f144aa | [
"MIT"
] | null | null | null | model.py | ogugugugugua/Cycle-Gan-Pytorch-Implementation | 119c7c8b3061a14f0ca988672458351d00f144aa | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import functools
import torch.nn as nn
from torch.nn import init
import torch.functional as F
from torch.autograd import Variable
print('ok')
print ('kkk')
# class te(nn.Module):
# def __init__(self):
# super(te,self).__init__()
# norm_layer=nn.InstanceNorm2d
# kw = 4
# padw = 1
# input_nc=3
# n_layers=3
# ndf=64
# use_bias = False
# sequence = [
# nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
# nn.LeakyReLU(0.2, True)
# ]
#
# nf_mult = 1
# nf_mult_prev = 1
# for n in range(1, n_layers):
# nf_mult_prev = nf_mult
# nf_mult = min(2**n, 8)
# sequence += [
# nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
# kernel_size=kw, stride=2, padding=padw, bias=use_bias),
# norm_layer(ndf * nf_mult),
# nn.LeakyReLU(0.2, True)
# ]
#
# nf_mult_prev = nf_mult
# nf_mult = min(2**n_layers, 8)
# sequence += [
# nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
# kernel_size=kw, stride=1, padding=padw, bias=use_bias),
# norm_layer(ndf * nf_mult),
# nn.LeakyReLU(0.2, True)
# ]
#
# sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
#
# self.model1 = nn.Sequential(*sequence)
# def forward(self,x):
# return self.model1(x)
| 36.497076 | 94 | 0.552155 |
58b91dc41ea5155b61915a8cc460140b8ef148b0 | 15,872 | py | Python | hansberger/analysis/migrations/0001_initial.py | 097475/hansberger | bb4ba1cbc410e7242a12f841e447b4d68f4298f6 | [
"MIT"
] | 1 | 2019-04-03T13:44:38.000Z | 2019-04-03T13:44:38.000Z | hansberger/analysis/migrations/0001_initial.py | sebastianoverdolini/hansberger | bb4ba1cbc410e7242a12f841e447b4d68f4298f6 | [
"MIT"
] | 4 | 2019-05-22T09:43:09.000Z | 2019-05-29T12:22:00.000Z | hansberger/analysis/migrations/0001_initial.py | 097475/hansberger | bb4ba1cbc410e7242a12f841e447b4d68f4298f6 | [
"MIT"
] | 2 | 2019-04-17T09:23:32.000Z | 2019-05-03T10:38:16.000Z | # Generated by Django 2.0.13 on 2019-06-27 17:04
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
| 99.823899 | 951 | 0.599609 |
58b94a18dc5fb864b68af5b8440bfcf6bdf51d21 | 1,589 | py | Python | src/DOMObjects/schema.py | villagertech/DOMObjects | 6a86caca5160d2488cc19823e71e9e7ba99a4a0c | [
"MIT"
] | null | null | null | src/DOMObjects/schema.py | villagertech/DOMObjects | 6a86caca5160d2488cc19823e71e9e7ba99a4a0c | [
"MIT"
] | null | null | null | src/DOMObjects/schema.py | villagertech/DOMObjects | 6a86caca5160d2488cc19823e71e9e7ba99a4a0c | [
"MIT"
] | null | null | null | __author__ = "Rob MacKinnon <rome@villagertech.com>"
__package__ = "DOMObjects"
__name__ = "DOMObjects.schema"
__license__ = "MIT"
| 33.104167 | 71 | 0.574575 |
58ba74567e6fec0a65ad5136fbd9ca609c0ebda8 | 416 | py | Python | Python/6 - kyu/6 kyu - Detect Pangram.py | danielbom/codewars | d45b5a813c6f1d952a50d22f0b2fcea4ef3d0e27 | [
"MIT"
] | null | null | null | Python/6 - kyu/6 kyu - Detect Pangram.py | danielbom/codewars | d45b5a813c6f1d952a50d22f0b2fcea4ef3d0e27 | [
"MIT"
] | null | null | null | Python/6 - kyu/6 kyu - Detect Pangram.py | danielbom/codewars | d45b5a813c6f1d952a50d22f0b2fcea4ef3d0e27 | [
"MIT"
] | null | null | null | # https://www.codewars.com/kata/detect-pangram/train/python
# My solution
import string
# ...
import string
# ...
import string
| 23.111111 | 80 | 0.658654 |
58bb3b75ffbc07bac010c5a20ea7da7ddc296fd6 | 3,709 | py | Python | L1Trigger/L1TCalorimeter/python/customiseReEmulateCaloLayer2.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
] | null | null | null | L1Trigger/L1TCalorimeter/python/customiseReEmulateCaloLayer2.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
] | null | null | null | L1Trigger/L1TCalorimeter/python/customiseReEmulateCaloLayer2.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
] | null | null | null |
import FWCore.ParameterSet.Config as cms
| 44.686747 | 117 | 0.758425 |
58bb3bdee68016c8f1865176bbbb0531b4055727 | 855 | py | Python | lintcode/1375.2.py | jianershi/algorithm | c3c38723b9c5f1cc745550d89e228f92fd4abfb2 | [
"MIT"
] | 1 | 2021-01-08T06:57:49.000Z | 2021-01-08T06:57:49.000Z | lintcode/1375.2.py | jianershi/algorithm | c3c38723b9c5f1cc745550d89e228f92fd4abfb2 | [
"MIT"
] | null | null | null | lintcode/1375.2.py | jianershi/algorithm | c3c38723b9c5f1cc745550d89e228f92fd4abfb2 | [
"MIT"
] | 1 | 2021-01-08T06:57:52.000Z | 2021-01-08T06:57:52.000Z | """
1375. Substring With At Least K Distinct Characters
"""
| 28.5 | 91 | 0.512281 |
58bb7d04b96141208c9caee423f5f2553e1e7354 | 390 | py | Python | ex29_half.py | youknowone/learn-python3-thw-code-ko | 3b7fccaf3eed7427e437004cfe3c4908823f5e41 | [
"MIT"
] | null | null | null | ex29_half.py | youknowone/learn-python3-thw-code-ko | 3b7fccaf3eed7427e437004cfe3c4908823f5e41 | [
"MIT"
] | null | null | null | ex29_half.py | youknowone/learn-python3-thw-code-ko | 3b7fccaf3eed7427e437004cfe3c4908823f5e41 | [
"MIT"
] | null | null | null | people = 20
cats = 30
dogs = 15
if people < cats:
print(" ! !")
if people > cats:
print(" ! !")
if people < dogs:
print(" !")
if people > dogs:
print(" !")
dogs += 5
if people >= dogs:
print(" ")
if people <= dogs:
print(" .")
if people == dogs:
print(" .")
| 13 | 36 | 0.584615 |
58bbe52ab96a55b367459bffd53e878ab429b0e4 | 1,019 | py | Python | env/lib/python3.6/site-packages/traits/util/tests/test_import_symbol.py | Raniac/NEURO-LEARN | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | [
"Apache-2.0"
] | 8 | 2019-05-29T09:38:30.000Z | 2021-01-20T03:36:59.000Z | env/lib/python3.6/site-packages/traits/util/tests/test_import_symbol.py | Raniac/neurolearn_dev | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | [
"Apache-2.0"
] | 12 | 2021-03-09T03:01:16.000Z | 2022-03-11T23:59:36.000Z | env/lib/python3.6/site-packages/traits/util/tests/test_import_symbol.py | Raniac/NEURO-LEARN | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | [
"Apache-2.0"
] | 1 | 2020-07-17T12:49:49.000Z | 2020-07-17T12:49:49.000Z | """ Tests for the import manager. """
from traits.util.api import import_symbol
from traits.testing.unittest_tools import unittest
if __name__ == "__main__":
unittest.main()
#### EOF ######################################################################
| 22.644444 | 79 | 0.614328 |
58bc5c4ebc0423782cb9f4ff3dd73ea7e914bd8c | 7,692 | py | Python | cubecode/二阶段算法合集/python版/RubiksCube-TwophaseSolver-master/client_gui.py | YuYuCong/Color-recognition-of-Rubik-s-Cube | 35d5af5383ed56d38e596983aaeda98540fdb646 | [
"CC0-1.0"
] | 11 | 2018-07-28T03:20:26.000Z | 2022-02-18T07:36:35.000Z | cubecode/二阶段算法合集/python版/RubiksCube-TwophaseSolver-master/client_gui.py | technicianliu/Color-recognition-of-Rubik-s-Cube | 35d5af5383ed56d38e596983aaeda98540fdb646 | [
"CC0-1.0"
] | null | null | null | cubecode/二阶段算法合集/python版/RubiksCube-TwophaseSolver-master/client_gui.py | technicianliu/Color-recognition-of-Rubik-s-Cube | 35d5af5383ed56d38e596983aaeda98540fdb646 | [
"CC0-1.0"
] | 9 | 2018-07-28T03:20:29.000Z | 2021-05-09T05:54:30.000Z | # ################ A simple graphical interface which communicates with the server #####################################
from tkinter import *
import socket
import face
import cubie
# ################################## some global variables and constants ###############################################
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = '8080'
width = 60 # width of a facelet in pixels
facelet_id = [[[0 for col in range(3)] for row in range(3)] for face in range(6)]
colorpick_id = [0 for i in range(6)]
curcol = None
t = ("U", "R", "F", "D", "L", "B")
cols = ("yellow", "green", "red", "white", "blue", "orange")
########################################################################################################################
# ################################################ Diverse functions ###################################################
def show_text(txt):
"""Displays messages."""
print(txt)
display.insert(INSERT, txt)
root.update_idletasks()
def create_facelet_rects(a):
"""Initializes the facelet grid on the canvas."""
offset = ((1, 0), (2, 1), (1, 1), (1, 2), (0, 1), (3, 1))
for f in range(6):
for row in range(3):
y = 10 + offset[f][1] * 3 * a + row * a
for col in range(3):
x = 10 + offset[f][0] * 3 * a + col * a
facelet_id[f][row][col] = canvas.create_rectangle(x, y, x + a, y + a, fill="grey")
if row == 1 and col == 1:
canvas.create_text(x + width // 2, y + width // 2, font=("", 14), text=t[f], state=DISABLED)
for f in range(6):
canvas.itemconfig(facelet_id[f][1][1], fill=cols[f])
def create_colorpick_rects(a):
"""Initializes the "paintbox" on the canvas"""
global curcol
global cols
for i in range(6):
x = (i % 3)*(a+5) + 7*a
y = (i // 3)*(a+5) + 7*a
colorpick_id[i] = canvas.create_rectangle(x, y, x + a, y + a, fill=cols[i])
canvas.itemconfig(colorpick_id[0], width=4)
curcol = cols[0]
def get_definition_string():
"""Generates the cube definition string from the facelet colors."""
color_to_facelet = {}
for i in range(6):
color_to_facelet.update({canvas.itemcget(facelet_id[i][1][1], "fill"): t[i]})
s = ''
for f in range(6):
for row in range(3):
for col in range(3):
s += color_to_facelet[canvas.itemcget(facelet_id[f][row][col], "fill")]
return s
########################################################################################################################
# ############################### Solve the displayed cube with a local or remote server ###############################
def solve():
"""Connects to the server and returns the solving maneuver."""
display.delete(1.0, END) # clear output window
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error:
show_text('Failed to create socket')
return
# host = 'f9f0b2jt6zmzyo6b.myfritz.net' # my RaspberryPi, if online
host = txt_host.get(1.0, END).rstrip() # default is localhost
port = int(txt_port.get(1.0, END)) # default is port 8080
try:
remote_ip = socket.gethostbyname(host)
except socket.gaierror:
show_text('Hostname could not be resolved.')
return
try:
s.connect((remote_ip, port))
except:
show_text('Cannot connect to server!')
return
show_text('Connected with ' + remote_ip + '\n')
try:
defstr = get_definition_string()+'\n'
except:
show_text('Invalid facelet configuration.\nWrong or missing colors.')
return
show_text(defstr)
try:
s.sendall((defstr+'\n').encode())
except:
show_text('Cannot send cube configuration to server.')
return
show_text(s.recv(2048).decode())
########################################################################################################################
# ################################# Functions to change the facelet colors #############################################
def clean():
"""Restores the cube to a clean cube."""
for f in range(6):
for row in range(3):
for col in range(3):
canvas.itemconfig(facelet_id[f][row][col], fill=canvas.itemcget(facelet_id[f][1][1], "fill"))
def empty():
"""Removes the facelet colors except the center facelets colors."""
for f in range(6):
for row in range(3):
for col in range(3):
if row != 1 or col != 1:
canvas.itemconfig(facelet_id[f][row][col], fill="grey")
def random():
"""Generates a random cube and sets the corresponding facelet colors."""
cc = cubie.CubieCube()
cc.randomize()
fc = cc.to_facelet_cube()
idx = 0
for f in range(6):
for row in range(3):
for col in range(3):
canvas.itemconfig(facelet_id[f][row][col], fill=cols[fc.f[idx]] )
idx += 1
########################################################################################################################
# ################################### Edit the facelet colors ##########################################################
def click(event):
"""Defines how to react on left mouse clicks"""
global curcol
idlist = canvas.find_withtag("current")
if len(idlist) > 0:
if idlist[0] in colorpick_id:
curcol = canvas.itemcget("current", "fill")
for i in range(6):
canvas.itemconfig(colorpick_id[i], width=1)
canvas.itemconfig("current", width=5)
else:
canvas.itemconfig("current", fill=curcol)
########################################################################################################################
# ###################################### Generate and display the TK_widgets ##########################################
root = Tk()
root.wm_title("Solver Client")
canvas = Canvas(root, width=12 * width + 20, height=9 * width + 20)
canvas.pack()
bsolve = Button(text="Solve", height=2, width=10, relief=RAISED, command=solve)
bsolve_window = canvas.create_window(10 + 10.5 * width, 10 + 6.5 * width, anchor=NW, window=bsolve)
bclean = Button(text="Clean", height=1, width=10, relief=RAISED, command=clean)
bclean_window = canvas.create_window(10 + 10.5 * width, 10 + 7.5 * width, anchor=NW, window=bclean)
bempty = Button(text="Empty", height=1, width=10, relief=RAISED, command=empty)
bempty_window = canvas.create_window(10 + 10.5 * width, 10 + 8 * width, anchor=NW, window=bempty)
brandom = Button(text="Random", height=1, width=10, relief=RAISED, command=random)
brandom_window = canvas.create_window(10 + 10.5 * width, 10 + 8.5 * width, anchor=NW, window=brandom)
display = Text(height=7, width=39)
text_window = canvas.create_window(10 + 6.5 * width, 10 + .5 * width, anchor=NW, window=display)
hp = Label(text=' Hostname and Port')
hp_window = canvas.create_window(10 + 0 * width, 10 + 0.6 * width, anchor=NW, window=hp)
txt_host = Text(height=1, width=20)
txt_host_window = canvas.create_window(10 + 0 * width, 10 + 1 * width, anchor=NW, window=txt_host)
txt_host.insert(INSERT, DEFAULT_HOST)
txt_port = Text(height=1, width=20)
txt_port_window = canvas.create_window(10 + 0 * width, 10 + 1.5 * width, anchor=NW, window=txt_port)
txt_port.insert(INSERT, DEFAULT_PORT)
canvas.bind("<Button-1>", click)
create_facelet_rects(width)
create_colorpick_rects(width)
root.mainloop()
########################################################################################################################
| 40.484211 | 120 | 0.518201 |
58bc76fe979d8a17599711a8021f4425b357315a | 1,159 | py | Python | bootcamp/wiki/core/compat.py | basiltiger/easy_bootcamp | 875b9ed287f1a7824bb38f142dbe2f3b1ce54389 | [
"MIT"
] | null | null | null | bootcamp/wiki/core/compat.py | basiltiger/easy_bootcamp | 875b9ed287f1a7824bb38f142dbe2f3b1ce54389 | [
"MIT"
] | null | null | null | bootcamp/wiki/core/compat.py | basiltiger/easy_bootcamp | 875b9ed287f1a7824bb38f142dbe2f3b1ce54389 | [
"MIT"
] | null | null | null | """Abstraction layer to deal with Django related changes in order to keep
compatibility with several Django versions simultaneously."""
from __future__ import unicode_literals
from django.conf import settings as django_settings
USER_MODEL = getattr(django_settings, 'AUTH_USER_MODEL', 'auth.User')
# Django 1.11 Widget.build_attrs has a different signature, designed for the new
# template based rendering. The previous version was more useful for our needs,
# so we restore that version.
# When support for Django < 1.11 is dropped, we should look at using the
# new template based rendering, at which point this probably won't be needed at all.
try:
# Python 3
from urllib.parse import urljoin # noqa
except ImportError:
# Python 2
from urlparse import urljoin # noqa @UnusedImport
| 36.21875 | 84 | 0.734254 |
58bd378d11da26df2129a68edbd47f10e2375dc5 | 10,649 | py | Python | ethereum.py/ethereum/clients/ethereum.py | dixonwhitmire/connect-clients | 09bf6f53f0a4fc923d1fb18f75ce86521880517c | [
"Apache-2.0"
] | null | null | null | ethereum.py/ethereum/clients/ethereum.py | dixonwhitmire/connect-clients | 09bf6f53f0a4fc923d1fb18f75ce86521880517c | [
"Apache-2.0"
] | 6 | 2021-07-13T19:58:58.000Z | 2021-11-02T21:25:14.000Z | ethereum.py/ethereum/clients/ethereum.py | dixonwhitmire/connect-clients | 09bf6f53f0a4fc923d1fb18f75ce86521880517c | [
"Apache-2.0"
] | 1 | 2021-07-13T19:22:04.000Z | 2021-07-13T19:22:04.000Z | """
ethereum.py
ethereum.py contains an EthereumClient class that provides functions for interacting
with the Coverage.sol solidity contract on an Ethereum blockchain network.
"""
import asyncio
import datetime
import json
import logging
import os
from ethereum.clients.nats import get_nats_client
from ethereum.config import get_settings, nats_eligibility_subject
from ethereum.exceptions import EthereumNetworkConnectionError
from hexbytes import HexBytes
from typing import Optional, Any, List
from web3 import Web3
logger = logging.getLogger(__name__)
# client instance
eth_client = None
def get_ethereum_client() -> Optional[EthereumClient]:
"""
:return: a connected EthereumClient instance
"""
global eth_client
if not eth_client:
settings = get_settings()
# load ABI file
abi_file: str = os.path.join(settings.ethereum_config_directory, settings.ethereum_contract_abi)
contract_info = json.load(open(abi_file))
eth_client = EthereumClient(
eth_network_uri=settings.ethereum_network_uri,
contract_address=settings.ethereum_contract_address,
contract_abi=contract_info["abi"],
event_poll_interval=settings.ethereum_event_poll_seconds
)
return eth_client
| 42.426295 | 137 | 0.582966 |
58bd44d4180b36c4fc8b719cc1462f1b22fa94a6 | 1,759 | py | Python | cli/actions/mc_combination_action.py | daneshvar-amrollahi/polar | b72254e1a8354e6a10135cd3990b8edfda02559e | [
"MIT"
] | 1 | 2021-11-14T05:52:21.000Z | 2021-11-14T05:52:21.000Z | cli/actions/mc_combination_action.py | daneshvar-amrollahi/polar | b72254e1a8354e6a10135cd3990b8edfda02559e | [
"MIT"
] | null | null | null | cli/actions/mc_combination_action.py | daneshvar-amrollahi/polar | b72254e1a8354e6a10135cd3990b8edfda02559e | [
"MIT"
] | null | null | null | from argparse import Namespace
from .action import Action
from symengine.lib.symengine_wrapper import sympify
from termcolor import colored
from program.mc_comb_finder import MCCombFinder
from cli.common import prepare_program
| 37.425532 | 114 | 0.637862 |
58c073b6ae4e5dbeb4eb910c743f8e1c8773b328 | 494 | py | Python | docker/gunicorn.py | admariner/madewithwagtail | a43b3263c0f151ece4994fccd561b0575db4979f | [
"MIT"
] | null | null | null | docker/gunicorn.py | admariner/madewithwagtail | a43b3263c0f151ece4994fccd561b0575db4979f | [
"MIT"
] | null | null | null | docker/gunicorn.py | admariner/madewithwagtail | a43b3263c0f151ece4994fccd561b0575db4979f | [
"MIT"
] | null | null | null | import gunicorn
accesslog = "-"
errorlog = "-"
access_log_format = '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s" "%({X-Forwarded-For}i)s"'
capture_output = True
forwarded_allow_ips = "*"
secure_scheme_headers = {"X-CLOUDFRONT": "yes"}
workers = 2
worker_class = "gthread"
worker_connections = 5
bind = ":8000"
keep_alive = 75
chdir = "/madewithwagtail"
# Obfuscate the Server header (to the md5sum of "Springload")
gunicorn.SERVER_SOFTWARE = "04e96149a2f64d6135c82d199ab62122"
| 27.444444 | 106 | 0.690283 |
58c135e6998a8525b0faabf5c07d8105ddf708e8 | 1,596 | py | Python | Lista 2/Questao_1.py | flaviomelo10/Python-para-PLN | 845da043c2618f3aace655cf065fca3d866342d5 | [
"MIT"
] | null | null | null | Lista 2/Questao_1.py | flaviomelo10/Python-para-PLN | 845da043c2618f3aace655cf065fca3d866342d5 | [
"MIT"
] | null | null | null | Lista 2/Questao_1.py | flaviomelo10/Python-para-PLN | 845da043c2618f3aace655cf065fca3d866342d5 | [
"MIT"
] | null | null | null | # -- encoding:utf-8 -- #
'''
Crie uma varivel com a string instituto de cincias matemticas e de computao e faa:
a. Concatene (adicione) uma outra string chamada usp
b. Concatene (adicione) uma outra informao: 2021
c. Verifique o tamanho da nova string (com as informaes adicionadas das questes a e b), com referncia a caracteres e espaos
d. Transforme a string inteiramente em maisculo
e. Transforme a string inteiramente em minsculo
f. Retire o espao que est no incio da string e imprima a string
g. Substitua todas as letras a por x
h. Separe a string em palavras nicas
i. Verifique quantas palavras existem na string
j. Separe a string por meio da palavra de
k. Verifique agora quantas palavras/frases foram formadas quando houve a separao pela palavra de
l. Junte as palavras que foram separadas (pode usar a separao resultante da questo h ou j)
m. Junte as palavras que foram separadas, mas agora separadas por uma barra invertida, no por espaos (pode usar a separao resultante da questo h ou j)
'''
texto = " instituto de cincias matemticas e de computao"
#a)
texto = texto + " usp"
print(texto)
#b)
texto = texto + " 2021"
print(texto)
#c)
tamanho = len(texto)
print(tamanho)
#d)
print(texto.upper())
#e)
print(texto.lower())
#f)
print(texto[1:])
print(texto.strip())
#g)
print(texto.replace('a', 'x'))
#h
separar = texto.split()
print(separar)
#i)
print(separar)
#j)
separar2 = texto.split('de')
print(separar2)
#k)
print(len(separar2))
#l)
juntar = " ".join(separar)
print(juntar)
#m)
juntar2 = "/".join(separar)
print(juntar2)
| 24.181818 | 155 | 0.734962 |
58c1cf2e7948459916f7782bc8f6b76e361226be | 753 | py | Python | breadcrumbs/templatetags/breadcrumbs_tags.py | LinuxOSsk/Shakal-NG | c4091c7972cffd86f64aa9f9a058d2907a56e5eb | [
"MIT"
] | 10 | 2016-02-06T15:40:30.000Z | 2018-09-27T15:15:13.000Z | breadcrumbs/templatetags/breadcrumbs_tags.py | LinuxOSsk/Shakal-NG | c4091c7972cffd86f64aa9f9a058d2907a56e5eb | [
"MIT"
] | 94 | 2016-02-04T18:39:36.000Z | 2022-01-20T05:25:00.000Z | breadcrumbs/templatetags/breadcrumbs_tags.py | LinuxOSsk/Shakal-NG | c4091c7972cffd86f64aa9f9a058d2907a56e5eb | [
"MIT"
] | 8 | 2016-05-10T20:29:53.000Z | 2021-02-07T00:50:31.000Z | # -*- coding: utf-8 -*-
from django.shortcuts import resolve_url
from django.template.loader import render_to_string
from django_jinja import library
from jinja2 import contextfunction
| 25.1 | 61 | 0.755644 |
58c23fc6ab7f8d080ab7dfae6e27ec6257ea2869 | 1,334 | py | Python | contrib/opencensus-ext-datadog/opencensus/ext/datadog/transport.py | Flared/opencensus-python | e2535e688a50c7a06be8af93ca3b987d387da605 | [
"Apache-2.0"
] | 650 | 2017-07-09T02:08:10.000Z | 2022-03-22T20:39:54.000Z | contrib/opencensus-ext-datadog/opencensus/ext/datadog/transport.py | Flared/opencensus-python | e2535e688a50c7a06be8af93ca3b987d387da605 | [
"Apache-2.0"
] | 735 | 2017-07-26T01:15:16.000Z | 2022-03-29T20:17:20.000Z | contrib/opencensus-ext-datadog/opencensus/ext/datadog/transport.py | Flared/opencensus-python | e2535e688a50c7a06be8af93ca3b987d387da605 | [
"Apache-2.0"
] | 256 | 2017-07-24T18:29:15.000Z | 2022-03-15T15:33:03.000Z | import platform
import requests
| 28.382979 | 83 | 0.595202 |
58c4071d4471ff72fd95738a79b453160bfc2e4b | 252 | py | Python | credsweeper/file_handler/analysis_target.py | ARKAD97/CredSweeper | 0f613cded13d6c28c19c57eac54dd245b2c318ea | [
"MIT"
] | null | null | null | credsweeper/file_handler/analysis_target.py | ARKAD97/CredSweeper | 0f613cded13d6c28c19c57eac54dd245b2c318ea | [
"MIT"
] | null | null | null | credsweeper/file_handler/analysis_target.py | ARKAD97/CredSweeper | 0f613cded13d6c28c19c57eac54dd245b2c318ea | [
"MIT"
] | null | null | null | from typing import List
| 25.2 | 83 | 0.650794 |
58c4179e5713c05abfe6169f74df8cd9ca6987a4 | 5,558 | py | Python | model/vgg_deeplab.py | ireina7/zero-shot-segmentation | 870d08ad7ea3965f006d0eb44667f6ecd87ef205 | [
"MIT"
] | null | null | null | model/vgg_deeplab.py | ireina7/zero-shot-segmentation | 870d08ad7ea3965f006d0eb44667f6ecd87ef205 | [
"MIT"
] | null | null | null | model/vgg_deeplab.py | ireina7/zero-shot-segmentation | 870d08ad7ea3965f006d0eb44667f6ecd87ef205 | [
"MIT"
] | null | null | null | import torchvision
import torch
import torch.nn as nn
import torch.nn.functional as F
if __name__ == "__main__":
net = Vgg_Deeplab(3, 10)
in_ten = torch.randn(1, 3, 224, 224)
out = net(in_ten)
print(net)
print(out.size())
in_ten = torch.randn(1, 3, 64, 64)
mod = nn.Conv2d(3,
512,
kernel_size=3,
stride=1,
padding=2,
dilation=2)
out = mod(in_ten)
print(out.shape)
| 35.858065 | 84 | 0.526268 |
58c5f8a4b5b023272ffe37f64fa23b186bdc1cd0 | 510 | py | Python | web/app.py | erberlin/themepark-times-API | 0b6a44224c3d5e1f98399ac30e96e35bad38849e | [
"MIT"
] | 7 | 2019-04-19T16:32:24.000Z | 2019-07-10T12:41:50.000Z | web/app.py | erberlin/themepark-times-API | 0b6a44224c3d5e1f98399ac30e96e35bad38849e | [
"MIT"
] | null | null | null | web/app.py | erberlin/themepark-times-API | 0b6a44224c3d5e1f98399ac30e96e35bad38849e | [
"MIT"
] | 1 | 2019-07-10T04:36:43.000Z | 2019-07-10T04:36:43.000Z | # -*- coding: utf-8 -*-
"""
This module defines a connexion app object and configures the API
endpoints based the swagger.yml configuration file.
copyright: 2019 by Erik R Berlin.
license: MIT, see LICENSE for more details.
"""
import connexion
app = connexion.App(__name__, specification_dir="./")
app.app.url_map.strict_slashes = False
app.add_api("swagger.yml")
if __name__ == "__main__":
# FLASK_ENV=development & FLASK_DEBUG=1 w/ Docker don't seem to enable debug mode.
app.run(debug=True)
| 25.5 | 86 | 0.731373 |
58c6d6c03c23a334c302f4903855ceb65421ce9b | 2,341 | py | Python | CLIMATExScience/air-pollution-index/data-visualization/pollutant-freq.py | MY-Climate-Observatory/myco-data | 5203fa63c7ce609bbc9bbc4186f55da78befdc50 | [
"CC-BY-4.0"
] | null | null | null | CLIMATExScience/air-pollution-index/data-visualization/pollutant-freq.py | MY-Climate-Observatory/myco-data | 5203fa63c7ce609bbc9bbc4186f55da78befdc50 | [
"CC-BY-4.0"
] | null | null | null | CLIMATExScience/air-pollution-index/data-visualization/pollutant-freq.py | MY-Climate-Observatory/myco-data | 5203fa63c7ce609bbc9bbc4186f55da78befdc50 | [
"CC-BY-4.0"
] | 1 | 2021-12-16T04:56:09.000Z | 2021-12-16T04:56:09.000Z | # -*- coding: utf-8 -*-
"""
17 June 2020
Author: Xiandi Ooi
Visualizing the types of pollutants.
"""
import pandas as pd
from plotly.offline import plot
import plotly.graph_objects as go
# Get the file from us
df = pd.read_csv(https://www.dropbox.com/s/u0ymg0ufne0an60/api-20200713.csv?dl=1", sep = ";")
# Make the selection
selected_area = "Sandakan"
df_select = df.loc[(df.Area == selected_area),
["Area", "Dominant", "Datetime"]]
# Data wrangling for this particular visual
df_update = df_select.set_index(pd.DatetimeIndex(df_select["Datetime"]))
df_update.drop(df_update.columns[2], axis = 1, inplace = True)
# Wrangling
df_group_time = df_update.groupby(pd.Grouper(freq = "Q")).size().reset_index(name = "Total")
df_group = df_update.groupby([pd.Grouper(freq = "Q"),
pd.Grouper("Dominant")]).size().reset_index(name = "Count")
df_output = df_group.set_index("Datetime").join(df_group_time.set_index("Datetime"))
df_output["Frequency"] = df_output["Count"] / df_output["Total"]
# Creating df subset for the stacked bars, here we are only dealing with the main dominant pollutants
df_pm2_5 = df_output.loc[(df_output.Dominant == "**")]
df_pm10 = df_output.loc[(df_output.Dominant == "*")]
df_so2 = df_output.loc[(df_output.Dominant == "a")]
df_no2 = df_output.loc[(df_output.Dominant == "b")]
df_o3 = df_output.loc[(df_output.Dominant == "c")]
df_co = df_output.loc[(df_output.Dominant == "d")]
# Now comes the bar chart
fig = go.Figure()
fig.add_trace(go.Bar(x = df_pm2_5.index,
y = df_pm2_5["Frequency"],
name = "PM 2.5"))
fig.add_trace(go.Bar(x = df_pm10.index,
y = df_pm10["Frequency"],
name = "PM 10"))
fig.add_trace(go.Bar(x = df_so2.index,
y = df_so2["Frequency"],
name = "SO2"))
fig.add_trace(go.Bar(x = df_no2.index,
y = df_no2["Frequency"],
name = "NO2"))
fig.add_trace(go.Bar(x = df_o3.index,
y = df_o3["Frequency"],
name = "O3"))
fig.add_trace(go.Bar(x = df_co.index,
y = df_co["Frequency"],
name = "CO"))
fig.update_layout(barmode = "stack", title_text="Frequency of Detected Pollutants")
plot(fig)
| 32.971831 | 101 | 0.612986 |
58c6e236acba1419c8019e6e9d0019c26bbbfc7f | 3,977 | py | Python | tests/bs3/test_block_fields.py | rpkilby/django-template-forms | 5099d87d661a6a313df49fa484afd94f145e65bc | [
"BSD-3-Clause"
] | 1 | 2021-01-29T11:53:32.000Z | 2021-01-29T11:53:32.000Z | tests/bs3/test_block_fields.py | rpkilby/django-template-forms | 5099d87d661a6a313df49fa484afd94f145e65bc | [
"BSD-3-Clause"
] | 5 | 2017-11-29T11:01:56.000Z | 2018-02-05T23:34:08.000Z | tests/bs3/test_block_fields.py | rpkilby/django-template-forms | 5099d87d661a6a313df49fa484afd94f145e65bc | [
"BSD-3-Clause"
] | null | null | null |
from django import forms
from django.test import TestCase
from template_forms import bs3
| 30.829457 | 103 | 0.538094 |
58c773feba3d980c07a404541fff29ea0e07df10 | 19,463 | py | Python | bzt/modules/java.py | 3dgiordano/taurus | 77cb31b6f0e5c27545094f600ac2b595fa76d992 | [
"Apache-2.0"
] | 1 | 2018-02-17T16:00:34.000Z | 2018-02-17T16:00:34.000Z | bzt/modules/java.py | 3dgiordano/taurus | 77cb31b6f0e5c27545094f600ac2b595fa76d992 | [
"Apache-2.0"
] | 5 | 2018-03-10T20:50:24.000Z | 2021-08-20T15:07:32.000Z | bzt/modules/java.py | 3dgiordano/taurus | 77cb31b6f0e5c27545094f600ac2b595fa76d992 | [
"Apache-2.0"
] | 1 | 2018-05-04T23:06:15.000Z | 2018-05-04T23:06:15.000Z | """
Copyright 2017 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import os
import shutil
import subprocess
import time
from os import listdir
from os.path import join
from bzt import ToolError, TaurusConfigError
from bzt.engine import HavingInstallableTools, Scenario
from bzt.modules import SubprocessedExecutor
from bzt.utils import get_full_path, shell_exec, TclLibrary, JavaVM, RequiredTool, MirrorsManager
SELENIUM_DOWNLOAD_LINK = "http://selenium-release.storage.googleapis.com/3.6/" \
"selenium-server-standalone-3.6.0.jar"
SELENIUM_VERSION = "3.6" # FIXME: unused, remove it
JUNIT_DOWNLOAD_LINK = "http://search.maven.org/remotecontent?filepath=junit/junit/" \
"{version}/junit-{version}.jar"
JUNIT_VERSION = "4.12"
JUNIT_MIRRORS_SOURCE = "http://search.maven.org/solrsearch/select?q=g%3A%22junit%22%20AND%20a%3A%22" \
"junit%22%20AND%20v%3A%22{version}%22&rows=20&wt=json".format(version=JUNIT_VERSION)
TESTNG_VERSION = "6.8.5"
TESTNG_DOWNLOAD_LINK = "http://search.maven.org/remotecontent?filepath=org/testng/testng/" \
"{version}/testng-{version}.jar".format(version=TESTNG_VERSION)
HAMCREST_DOWNLOAD_LINK = "http://search.maven.org/remotecontent?filepath=org/hamcrest/hamcrest-core" \
"/1.3/hamcrest-core-1.3.jar"
JSON_JAR_DOWNLOAD_LINK = "http://search.maven.org/remotecontent?filepath=org/json/json/20160810/json-20160810.jar"
| 41.946121 | 121 | 0.640138 |
58c8432548a967e56cf908c27cbcc2cdbca067b8 | 1,434 | py | Python | various_modules/interface_segregation_principle.py | Neykah/design_patterns_python | 6f801fc4fc60f2d34002e4fe435feb6111a2cd23 | [
"MIT"
] | null | null | null | various_modules/interface_segregation_principle.py | Neykah/design_patterns_python | 6f801fc4fc60f2d34002e4fe435feb6111a2cd23 | [
"MIT"
] | null | null | null | various_modules/interface_segregation_principle.py | Neykah/design_patterns_python | 6f801fc4fc60f2d34002e4fe435feb6111a2cd23 | [
"MIT"
] | null | null | null | """
Maybe not so relevant in Python due to the possibility to use multiple inheritance...
"""
from abc import ABC, abstractmethod
if __name__ == "__main__":
amazon = Amazon()
dropbox = Dropbox()
amazon.get_file("Baba")
dropbox.store_file("Baba")
| 22.40625 | 85 | 0.642259 |
58c8441bd96dbfec3988f61c1477017eb7ba3344 | 3,536 | py | Python | SCSCons/Variables/PackageVariable.py | Relintai/pandemonium_engine | 3de05db75a396b497f145411f71eb363572b38ae | [
"MIT",
"Apache-2.0",
"CC-BY-4.0",
"Unlicense"
] | 1,403 | 2017-11-23T14:24:01.000Z | 2022-03-30T20:59:39.000Z | nuitka/build/inline_copy/lib/scons-4.3.0/SCons/Variables/PackageVariable.py | lps1333/Nuitka | 02e8d59a275cd7fe482cbc8100e753ff5abe39d7 | [
"Apache-2.0"
] | 3,708 | 2017-11-27T13:47:12.000Z | 2022-03-29T17:21:17.000Z | nuitka/build/inline_copy/lib/scons-4.3.0/SCons/Variables/PackageVariable.py | lps1333/Nuitka | 02e8d59a275cd7fe482cbc8100e753ff5abe39d7 | [
"Apache-2.0"
] | 281 | 2017-12-01T23:48:38.000Z | 2022-03-31T15:25:44.000Z | # MIT License
#
# Copyright The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Variable type for package Variables.
To be used whenever a 'package' may be enabled/disabled and the
package path may be specified.
Given these options ::
x11=no (disables X11 support)
x11=yes (will search for the package installation dir)
x11=/usr/local/X11 (will check this path for existence)
Can be used as a replacement for autoconf's ``--with-xxx=yyy`` ::
opts = Variables()
opts.Add(
PackageVariable(
key='x11',
help='use X11 installed here (yes = search some places)',
default='yes'
)
)
...
if env['x11'] == True:
dir = ... # search X11 in some standard places ...
env['x11'] = dir
if env['x11']:
... # build with x11 ...
"""
from typing import Tuple, Callable
import SCons.Errors
__all__ = ['PackageVariable',]
ENABLE_STRINGS = ('1', 'yes', 'true', 'on', 'enable', 'search')
DISABLE_STRINGS = ('0', 'no', 'false', 'off', 'disable')
def _converter(val):
""" """
lval = val.lower()
if lval in ENABLE_STRINGS:
return True
if lval in DISABLE_STRINGS:
return False
return val
def _validator(key, val, env, searchfunc) -> None:
""" """
# NB: searchfunc is currently undocumented and unsupported
# TODO write validator, check for path
import os
if env[key] is True:
if searchfunc:
env[key] = searchfunc(key, val)
elif env[key] and not os.path.exists(val):
raise SCons.Errors.UserError(
'Path does not exist for option %s: %s' % (key, val))
def PackageVariable(key, help, default, searchfunc=None) -> Tuple[str, str, str, Callable, Callable]:
"""Return a tuple describing a package list SCons Variable.
The input parameters describe a 'package list' option. Returns
a tuple including the correct converter and validator appended.
The result is usable as input to :meth:`Add` .
A 'package list' option may either be 'all', 'none' or a pathname
string. This information is appended to *help*.
"""
# NB: searchfunc is currently undocumented and unsupported
help = '\n '.join(
(help, '( yes | no | /path/to/%s )' % key))
return (key, help, default,
lambda k, v, e: _validator(k, v, e, searchfunc),
_converter)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 32.740741 | 101 | 0.669683 |
58c98a83a2ec15c194e9ea24edc51018cb47644a | 8,351 | py | Python | tests/test_protocol.py | kwikiel/edgedb | dac3f0c408bcf3fe9159b7f844d0186c54aaa3fd | [
"Apache-2.0"
] | null | null | null | tests/test_protocol.py | kwikiel/edgedb | dac3f0c408bcf3fe9159b7f844d0186c54aaa3fd | [
"Apache-2.0"
] | null | null | null | tests/test_protocol.py | kwikiel/edgedb | dac3f0c408bcf3fe9159b7f844d0186c54aaa3fd | [
"Apache-2.0"
] | null | null | null | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2020-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import edgedb
from edb.server import compiler
from edb import protocol
from edb.testbase.protocol.test import ProtocolTestCase
| 33.138889 | 79 | 0.554305 |
58cc1d434d0ca910c890148d1eb3817d02e4f5af | 278 | py | Python | 210125/homework_re_3.py | shadowsmain/pyton-adv | 9562097b2d34c1b286c13cf0930fa06079532a67 | [
"MIT"
] | null | null | null | 210125/homework_re_3.py | shadowsmain/pyton-adv | 9562097b2d34c1b286c13cf0930fa06079532a67 | [
"MIT"
] | null | null | null | 210125/homework_re_3.py | shadowsmain/pyton-adv | 9562097b2d34c1b286c13cf0930fa06079532a67 | [
"MIT"
] | null | null | null | import re
RE_NUMBER_VALIDATOR = re.compile(r'^\d+[.,]\d+$')
assert number_is_valid('1.32')
assert number_is_valid('1,32')
assert not number_is_valid('asdasd1234')
assert not number_is_valid('22,a44') | 21.384615 | 49 | 0.755396 |
58cc767b16ca728bd586a1ff7e220380c8ce5e1a | 3,529 | py | Python | shp_code/prec_reformat.py | anahm/inferring-population-preferences | 1eec9c6966e65c615f3cf5bd769ab121369b926d | [
"Unlicense"
] | 4 | 2016-10-29T12:10:48.000Z | 2016-11-06T02:25:09.000Z | shp_code/prec_reformat.py | anahm/inferring-population-preferences | 1eec9c6966e65c615f3cf5bd769ab121369b926d | [
"Unlicense"
] | null | null | null | shp_code/prec_reformat.py | anahm/inferring-population-preferences | 1eec9c6966e65c615f3cf5bd769ab121369b926d | [
"Unlicense"
] | null | null | null | """
prec_reformat.py
Taking state data and having each line be a precinct's voting results and candidate
cf-scores (rather than each line be each candidate per precinct.
| prec_id | cf_score_0 | num_votes_0 | cf_score_1 | num_votes_1 |
"""
import math
import numpy as np
import pandas as pd
from prec_cd import prec_cd_main
from check_data import check_main
"""
data_clean()
Function to parse out certain types of data that are not useful in our
results.
# NOTE: overwrites the old file, since it is unnecessary
"""
"""
prec_reformat_main()
Function that does the bulk of the original main function and can be called
by the commandline.
@param: state, year
@return: location of new precline file
"""
if __name__ == "__main__":
main()
| 26.140741 | 83 | 0.616322 |
58ce3480a9b43387f9f12525806c69631b6a2afa | 1,668 | py | Python | scripts/make_fasta.py | orionzhou/snk-rnaseq | 5ead8aebf5ed00a2aec15363b8023c9b75b0ed4a | [
"MIT"
] | null | null | null | scripts/make_fasta.py | orionzhou/snk-rnaseq | 5ead8aebf5ed00a2aec15363b8023c9b75b0ed4a | [
"MIT"
] | null | null | null | scripts/make_fasta.py | orionzhou/snk-rnaseq | 5ead8aebf5ed00a2aec15363b8023c9b75b0ed4a | [
"MIT"
] | null | null | null | from snakemake import shell
input, output, params, threads, w, config = snakemake.input, snakemake.output, snakemake.params, snakemake.threads, snakemake.wildcards, snakemake.config
genome = w.genome
params.hybrid = config['x'][genome]['hybrid']
opt = params.opt
shell("""
rm -rf {output.fna}* {output.fai}*
rm -rf {output.chrom_bed} {output.chrom_size} {output.gap}
mkdir -p {params.wdir}/{params.odir}
cd {params.wdir}/{params.odir}
rm -rf raw.fna.* renamed* map* raw.sizes
""")
merge_tag = '--merge_short' if w.genome != 'Mt_R108' else ''
if params.hybrid:
shell("""
cat {input} > {params.wdir}/{params.odir}/renamed.fna
cd {params.wdir}/{params.odir}
fasta.py size renamed.fna > renamed.sizes
touch mapf.chain mapb.chain
""")
else:
params.gap = int(config['x'][genome]['gap'])
params.prefix = config['x'][genome]['prefix']
shell("""
cd {params.wdir}/{params.odir}
ln -sf ../download/raw.fna raw.fna
fasta.py size raw.fna > raw.sizes
fasta.py rename raw.fna renamed.fna mapf.bed mapb.bed \
--opt {params.opt} {merge_tag} \
--gap {params.gap} --prefix_chr {params.prefix}
fasta.py size renamed.fna > renamed.sizes
chain.py fromBed mapf.bed raw.sizes renamed.sizes > mapf.chain
chainSwap mapf.chain mapb.chain
""")
shell("""
cd {params.wdir}
ln -sf {params.odir}/renamed.fna 10_genome.fna
cd ..
samtools faidx {output.fna}
fasta.py size --bed {output.fna} > {output.chrom_bed}
cut -f1,3 {output.chrom_bed} > {output.chrom_size}
fasta.py gaps {output.fna} > {output.gap}
""")
| 32.076923 | 153 | 0.631894 |
58ceaafc2d2819124d87eef37b783e73dcf0c336 | 2,358 | py | Python | HanderCode/aidaiwangApp/aidaiwangApp/Login_aidaiwangApp.py | mocne/PycharmProjects | b009e530f4f01e5b1826bbe2364d86b65bcd66e3 | [
"MIT"
] | null | null | null | HanderCode/aidaiwangApp/aidaiwangApp/Login_aidaiwangApp.py | mocne/PycharmProjects | b009e530f4f01e5b1826bbe2364d86b65bcd66e3 | [
"MIT"
] | null | null | null | HanderCode/aidaiwangApp/aidaiwangApp/Login_aidaiwangApp.py | mocne/PycharmProjects | b009e530f4f01e5b1826bbe2364d86b65bcd66e3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import time
import xlrd
import Register_aidaiwangApp
import LogOut_aidiawangApp
| 33.685714 | 98 | 0.636556 |
58cf4798257428d881df3e8a39b178b627d7c8dd | 389 | py | Python | Python/Learning/Language/arg_ex.py | prynix/learning-programming | 85aea40a61fb824a2b4e142331d9ac7971fef263 | [
"MIT"
] | 2 | 2017-03-14T16:02:08.000Z | 2017-05-02T13:48:18.000Z | Python/Learning/Language/arg_ex.py | CajetanP/learning-programming | 85aea40a61fb824a2b4e142331d9ac7971fef263 | [
"MIT"
] | 4 | 2021-05-20T21:10:13.000Z | 2022-02-26T09:50:19.000Z | Python/Learning/Language/arg_ex.py | CajetanP/learning-programming | 85aea40a61fb824a2b4e142331d9ac7971fef263 | [
"MIT"
] | 1 | 2021-06-18T01:31:24.000Z | 2021-06-18T01:31:24.000Z | from sys import argv
script, user_name = argv
prompt = '>'
print(user_name, script)
print("Do you like me " + user_name + "?")
likes = input(prompt)
print("Where do you live " + user_name + "?")
lives = input(prompt)
print("""
So you said {:s} about liking me.
You live in {:s}.
""".format(likes, lives))
print("Script: ", script)
age = int(input("Age? "))
print("Age*2: ", age*2)
| 16.208333 | 45 | 0.62982 |
58cf698a07fcbc7df1f0d9ad44c0aa4f953a79ab | 2,565 | py | Python | tests/functional/test_uploads.py | jounile/nollanet | 7bea20934d3f5e09658a9d31c3b05c15416398a0 | [
"MIT"
] | 3 | 2019-10-13T08:37:13.000Z | 2020-02-16T12:24:11.000Z | tests/functional/test_uploads.py | jounile/nollanet | 7bea20934d3f5e09658a9d31c3b05c15416398a0 | [
"MIT"
] | 5 | 2019-11-13T15:56:52.000Z | 2021-04-30T20:58:19.000Z | tests/functional/test_uploads.py | jounile/nollanet | 7bea20934d3f5e09658a9d31c3b05c15416398a0 | [
"MIT"
] | 1 | 2020-04-08T21:09:52.000Z | 2020-04-08T21:09:52.000Z | import io
import pytest
from requests import get
from urllib.parse import urljoin
def test_my_uploads_page(wait_for_api, login_user):
"""
GIVEN a user has logged in (login_user)
WHEN the '/my/uploads' page is navigated to (GET)
THEN check the response is valid and page title is correct
"""
request_session, api_url = wait_for_api
response = request_session.get(urljoin(api_url, '/my/uploads'))
assert response.status_code == 200
assert '<h1>My uploads</h1>' in response.text
def test_valid_new_upload_page(wait_for_api, login_user):
"""
GIVEN a user has logged in (login_user)
WHEN the '/media/newupload' page is navigated to (GET)
THEN check the response is valid and page title is correct
"""
request_session, api_url = wait_for_api
response = request_session.get(urljoin(api_url, '/media/newupload'))
assert response.status_code == 200
assert '<h1>New upload</h1>' in response.text
def test_invalid_new_upload_page(wait_for_api):
"""
GIVEN a user has not logged in
WHEN the '/media/newupload' page is navigated to (GET)
THEN check the response is valid and page title is correct
"""
request_session, api_url = wait_for_api
response = request_session.get(urljoin(api_url, '/media/newupload'))
assert response.status_code == 200
assert '<div class="flash">Please login first</div>' in response.text
def test_new_upload(wait_for_api, login_user):
"""
GIVEN a user has logged in (login_user)
WHEN the '/media/newupload' page is posted an example image (POST)
THEN check the response is valid and the page title is correct
"""
example_file=open("./app/static/gfx/example.png","rb")
files = { 'file': example_file }
request_session, api_url = wait_for_api
response = request_session.post(urljoin(api_url, '/media/newupload'), files=files, allow_redirects=True)
assert response.status_code == 200
assert '<h1>My uploads</h1>' in response.text
#def test_remove_upload(wait_for_api, login_user):
# """
# GIVEN a user has logged in (login_user)
# WHEN the '/blob/delete' page is posted (POST)
# THEN check the response is valid and the user is logged in
# """
# valid_blob = dict(blob_path='images/*example.png', upload_id=2)
# request_session, api_url = wait_for_api
# response = request_session.post(urljoin(api_url, '/blob/delete'), data=valid_blob, allow_redirects=True)
# assert response.status_code == 200
# assert 'example.png was deleted successfully' in response.text
| 40.078125 | 109 | 0.71384 |
58cfe77be2b1a529ec5b49496f3549cf64c84e22 | 1,107 | py | Python | plugins/python/test/testCustomEntity.py | shotgunsoftware/cplusplus-api | 576aab4ae266e37ba80da23f82fe9ed08b9894e4 | [
"BSD-3-Clause"
] | 3 | 2015-04-04T03:08:52.000Z | 2021-01-09T00:09:25.000Z | plugins/python/test/testCustomEntity.py | shotgunsoftware/cplusplus-api | 576aab4ae266e37ba80da23f82fe9ed08b9894e4 | [
"BSD-3-Clause"
] | null | null | null | plugins/python/test/testCustomEntity.py | shotgunsoftware/cplusplus-api | 576aab4ae266e37ba80da23f82fe9ed08b9894e4 | [
"BSD-3-Clause"
] | 4 | 2015-04-04T03:08:57.000Z | 2021-10-03T14:59:23.000Z | #!/usr/bin/env python
import sys
from shotgun import *
try:
if len(sys.argv) > 1:
sg = Shotgun(sys.argv[1])
else:
sg = Shotgun()
#################################################################
# Find CustomEntity01 entities
#################################################################
print "*" * 40, "findEntities - CustomEntity01", "*" * 40
for entity in sg.findEntities("CustomEntity01", FilterBy(), 5):
#print entity
#print "-" * 40
print "%s : %s" % (entity.sgProjectCode(), entity.getAttrValue("code"))
#################################################################
# Find CustomEntity02 entities
#################################################################
print "*" * 40, "findEntities - CustomEntity02", "*" * 40
for entity in sg.findEntities("CustomEntity02", FilterBy(), 5):
#print entity
#print "-" * 40
print "%s : %s" % (entity.sgProjectCode(), entity.getAttrValue("code"))
except SgError, e:
print "SgError:", e
except Exception, e:
print "Error:", e
| 31.628571 | 79 | 0.443541 |
58d09df8656249313c3aee99521da7185757d936 | 24,955 | py | Python | word2vec_np/utils/data.py | mkserge/word2vec-np | 6b53b7e3defd7c8333dcb9d9098e16502a9ce20f | [
"MIT"
] | 1 | 2021-11-22T11:30:10.000Z | 2021-11-22T11:30:10.000Z | word2vec_np/utils/data.py | mkserge/word2vec-np | 6b53b7e3defd7c8333dcb9d9098e16502a9ce20f | [
"MIT"
] | null | null | null | word2vec_np/utils/data.py | mkserge/word2vec-np | 6b53b7e3defd7c8333dcb9d9098e16502a9ce20f | [
"MIT"
] | null | null | null | import word2vec_np.utils.checks as checks
import collections
import numpy as np
import math
import time
import logging
def get_sentences_from_file(train_file):
""" Returns a list of sentences from an input file.
Args:
train_file: A path to a file
Returns:
A list of sentences as they appear in the input.
"""
# Read the sentences from the input file (assumed to be a sentence per line)
sentences = [line.rstrip('\n') for line in open(train_file)]
return sentences
def get_words_from_file(train_file):
""" Returns a list of words from input sentences.
Args:
train_file: A path to a file
Returns:
A list of words as they appear in the input.
"""
words = []
sentences = get_sentences_from_file(train_file)
for sentence in sentences:
sentence_words = sentence.split()
words.extend(sentence_words)
return words
def get_words_from_sentences(sentences):
""" Returns a list of words from a list of sentences.
Args:
sentences: A list of sentences
Returns:
A list of words as they appear in the input.
"""
words = []
for sentence in sentences:
sentence_words = sentence.split()
words.extend(sentence_words)
return words
def save_word_counts(word_count, dict_file):
""" Saves the dictionary into a file.
The word_count and dictionary have the same ordering
except that dictionary has extra 'PAD' symbol at index 0
Args:
word_count: List of (word, count) tuples
dict_file: Path to the output file.
"""
dict_file = open(dict_file, 'w+')
for word, count in word_count:
dict_file.write(word + ' ' + str(count) + '\n')
dict_file.close()
def save_dictionary(word_count, dict_file):
"""Saves the dictionary into a file.
The word_count and dictionary have the same ordering
except that dictionary has extra 'PAD' symbol at index 0
Args:
word_count: List of (word, count) tuples
dict_file: Path to the output file.
"""
#
dict_file = open(dict_file, 'w+')
for word, _ in word_count:
dict_file.write(word + '\n')
dict_file.close()
def get_data(sentences, num_total_words, dictionaries, args):
""" Gets data ready for training.
Args:
sentences: list of training sentences
num_total_words: Total number of words in training corpus.
dictionaries: Dictionary of dictionary (urgh) and word counts.
args: Args passed to the script.
"""
logger = logging.getLogger('main')
logger.info('Building train data...')
# Get the relevant dictionaries
dictionary = dictionaries['dictionary']
word_count = dictionaries['word_count']
# If we want to use word2vec's dictionary swap here.
# This is for debugging only, to compare with embeddings
# generated from original word2vec.
if args.use_w2v_weights:
dictionary_w2v, word_count_w2v = get_w2v_dictionaries(num_total_words, args)
# Do some sanity checks
checks.check_word_counts(word_count, word_count_w2v)
checks.check_dictionaries(dictionary, dictionary_w2v)
# Swap the dictionaries
dictionary = dictionary_w2v
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
word_count = word_count_w2v
# See if we want to load pre-generated data instead of building it.
if args.load_data:
return np.load(args.x_file + '.npy'), np.load(args.y_file + '.npy'), np.load(args.yneg_file + '.npy')
# Get the probabilities of keeping the words during downsampling
keep_prob = get_keep_probs(word_count, num_total_words, args.ds_param)
# Dump the dictionary into a file.
save_word_counts(word_count, args.dict_file)
# Get the training data. This returns a list of ([context], target, [negative samples]) tuples.
train_data = get_train_data_with_sentence_downsampling(sentences, dictionaries, args)
# Break training data into arrays of context words, targets and negative samples.
x_train, y_train, y_neg = process_data(train_data, word_count, args)
logger.info('Finished building train data...')
# Dump the files to a file
np.save(args.x_file, x_train)
np.save(args.y_file, y_train)
np.save(args.yneg_file, y_neg)
return x_train, y_train, y_neg
def get_dictionaries(words, args):
""" Returns a dictionary of dictionaries used in training.
Args:
words: A list of words from the training file.
args: The arguments passed on to the script.
Returns:
A dictionary of consisting of
dictionary -- dictionary mapping words to indices.
reversed_dictionary -- dictionary indices to words.
word_count -- dictionary mapping words to the number of times they occur in the corpus
keep_prob -- a list of probabilities of keeping them during down-sampling.
ns_prob -- a list of probabilities of getting sampled during NS
"""
logger = logging.getLogger('main')
logger.info('Building dictionaries...')
start_time = time.time()
# List of (word, word_count) tuples
word_count = [('UNK', 0)]
# Total number of the words in the corpus
num_total_words = len(words)
# Sort the list of words by frequency and pick the top vocab_size ones
if args.vocab_size == 0:
# noinspection PyArgumentList
# vocab_size = 0 implies we take the entire vocabulary available from the corpus
word_count.extend(collections.Counter(words).most_common())
else:
# noinspection PyArgumentList
word_count.extend(collections.Counter(words).most_common(args.vocab_size - 1))
# Build the dictionary
dictionary = dict()
dictionary['PAD'] = 0
# num_vocab_words stores the number of words in the corpus that exist in our dictionary.
num_vocab_words = 0
for word, count in word_count:
num_vocab_words += count
dictionary[word] = len(dictionary)
# Update word count list
word_count[0] = ('UNK', num_total_words - num_vocab_words)
# Get the reversed dictionary.
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
# Get the negative sampling probabilities
ns_probs = get_ns_probs(word_count, args.ns_param)
# Get the probabilities of keeping the words during downsampling
keep_probs = get_keep_probs(word_count, num_total_words, args.ds_param)
dictionaries = {'dictionary': dictionary,
'reversed_dictionary': reversed_dictionary,
'word_count': word_count,
'ns_probs': ns_probs,
'keep_probs': keep_probs}
elapsed_time = time.time() - start_time
logger.info('Finished building dictionaries in %d seconds' % elapsed_time)
return dictionaries
def downsample_sentence(sentence_in, dictionaries):
""" Downsamples the training sentences exactly as in word2vec.
* Words not in the vocabulary are omitted.
* EOS symbols are also omitted.
Args:
sentence_in: The input sentence that will be downsampled
dictionaries: List of dictionaries
Returns:
The downsampled sentence
"""
dictionary = dictionaries['dictionary']
keep_probs = dictionaries['keep_probs']
sentence_out = []
sentence_words = sentence_in.split()
for ind, word in enumerate(sentence_words):
# Ignore the UNK words
if dictionary.get(word, 1) == 1:
continue
# Ignore the EOS word
if word == 'EOS':
continue
# Sub-sample the frequent words.
random_number = np.random.rand()
if keep_probs.get(word) < random_number:
continue
sentence_out.append(word)
return ' '.join(sentence_out)
def get_train_data_with_sentence_downsampling(sentences, dictionaries, args):
""" This is the new implementation of get_train_data where the downsampling is done before building the context on
each sentence. The main differences with get_train_data_with_context_downsampling implementation are
* Downsampling is done before building context on each sentence.
* Context window size is downsized randomly for each sentence.
Args:
sentences: list of sentences in the training data
dictionaries: a list of dictionaries including
dictionary: dictionary of the vocabulary words mapping words to indices
reversed_dictionary: dictionary mapping indices to their corresponding words
word_count: a list of (word, word_count) tuples
ns_probs: dictionary of negative sampling probabilities
keep_prob: a dictionary mapping words to their probability of staying during downsampling
args: input args
Returns:
train_data: A list of (context, target, neg_samples) tuples
"""
logger = logging.getLogger('main')
train_data = []
# Get the required dictionaries
ns_probs = dictionaries['ns_probs']
dictionary = dictionaries['dictionary']
reversed_dictionary = dictionaries['reversed_dictionary']
num_processed_sentences = 0
num_total_sentences = len(sentences)
logger.info('Number of sentences: %d' % num_total_sentences)
for sentence in sentences:
# Note that the downsampled sentence will not contain 'UNK' or 'EOS' symbols.
sentence = downsample_sentence(sentence, dictionaries)
sentence_words = sentence.split()
num_processed_words = 0
num_total_words = len(sentence_words)
for ind, word in enumerate(sentence_words):
# Get the dictionary index for the given word. This is our target
# W2 matrix does not contain 'PAD' or 'UNK', so we shift the target index by two
target_ind = dictionary.get(word) - 2
# Build context for the current word in the sentence.
# Shrink context window by a random number
context_window = np.random.randint(1, args.context_window + 1)
context = []
for cont_ind in range(ind - context_window, ind + context_window + 1):
if cont_ind < 0:
continue
if cont_ind == ind:
continue
if cont_ind >= len(sentence_words):
continue
if dictionary.get(sentence_words[cont_ind], 1) == 1:
continue
context.append(dictionary.get(sentence_words[cont_ind]))
if len(context) != 0:
# If we are doing negative sampling, build a set of negative samples
neg_samples = []
if args.ns_param != 0:
# Pick neg_samples of negative samples.
while len(neg_samples) < args.num_neg_samples:
# Pick a random word from the dictionary (ignoring 'PAD', 'UNK' and 'EOS')
# according to probabilities stored in ns_prob table.
neg_ind = np.random.choice(np.arange(2, len(dictionary)), p=ns_probs)
# Ignore if the random pick is the EOS symbol, or the target index
if reversed_dictionary.get(neg_ind) == 'EOS' \
or neg_ind == target_ind \
or neg_ind in neg_samples:
continue
# W2 matrix does not contain 'PAD' or 'UNK', so we shift the dictionary by two
neg_samples.append(neg_ind - 2)
train_data.append((context, target_ind, neg_samples))
num_processed_words += 1
if num_processed_words % 1000 == 0:
logger.info('Processed words for sentence: %.3f%%' % (float(num_processed_words * 100) / num_total_words))
num_processed_sentences += 1
if num_processed_sentences % 1000 == 0:
logger.info('Processed sentences: %.3f%%' % (float(num_processed_sentences * 100) / num_total_sentences))
return train_data
def get_ns_probs(word_count, ns_param):
""" Returns a list of the probabilities of picking each word as a negative sample.
List is ordered as word_count without the 'UNK' (this is not considered in any of these calculations).
:param word_count: The dictionary containing mappings from words to their count in the corpus.
:param ns_param: The negative sampling parameter used when building the probability distribution.
:return: A list of probabilities for each word.
"""
ns_probs = []
# Compute normalization constant so that probabilities add up to 1.
norm_const = 0
for word, count in word_count[1:]: # TODO: Think about this
norm_const += np.power(count, ns_param)
# Compute the probabilities for each word.
for word, count in word_count[1:]: # <- Skip 'UNK'
word_prob = np.power(count, ns_param) / norm_const
ns_probs.append(word_prob)
return ns_probs
def get_keep_probs(word_count, num_total_words, ds_param):
""" Returns a list of probabilities of keeping the corresponding words during downsampling
:param word_count: A list containing tuples of (word, word_count)
:param num_total_words: Total number of words in the corpus
:param ds_param: The downsampling parameter, used in the distribution
:return: A dictionary mapping words to their probabilities
"""
# Build the probabilities of keeping the words when downsampling
keep_prob = []
for word, count in word_count[1:]: # <- Ignore 'UNK'
# Compute the fraction of the words in the vocabulary that are the current word.
word_frac = float(count) / num_total_words
# Compute the probability of keeping the current word.
word_prob = (np.sqrt(word_frac / ds_param) + 1) * ds_param / word_frac
keep_prob.append(word_prob)
return keep_prob
def get_mini_batches(X, Y, YNEG, batch_size=64, shuffled=True):
"""Split the data into minibatches of batch_size
:param X: array containing the context words at each row
:param Y: array containing the target word at each row
:param YNEG: array containing the negative samples at each row
:param batch_size: size of the mini-batch
:param shuffled: If true, training examples will be shuffled before building mini-batches
:return: a list of mini-batches.
"""
logger = logging.getLogger('main')
logger.info('Processing into mini-batches...')
mini_batches = []
# Get the total number of training examples
n_training_examples = X.shape[0]
# If shuffled=True, shuffle X and Y
if shuffled:
permutation = list(np.random.permutation(n_training_examples))
X = X[permutation, :]
Y = Y[permutation, :]
YNEG = YNEG[permutation, :]
num_full_batches = int(math.floor(n_training_examples / batch_size))
for k in range(0, num_full_batches):
mini_batch_X = X[k * batch_size: (k + 1) * batch_size, :]
mini_batch_Y = Y[k * batch_size: (k + 1) * batch_size, :]
mini_batch_YNEG = YNEG[k * batch_size: (k + 1) * batch_size, :]
mini_batch = (mini_batch_X, mini_batch_Y, mini_batch_YNEG)
mini_batches.append(mini_batch)
if n_training_examples % batch_size != 0:
mini_batch_X = X[num_full_batches * batch_size:, :]
mini_batch_Y = Y[num_full_batches * batch_size:, :]
mini_batch_YNEG = YNEG[num_full_batches * batch_size:, :]
mini_batch = (mini_batch_X, mini_batch_Y, mini_batch_YNEG)
mini_batches.append(mini_batch)
logger.info('Finished processing mini-batches.')
return mini_batches
sentence_index = 0
word_index = 0
def get_training_example(sentences, dictionaries, args):
""" Generates a single training example from the input sentences sequentially
(a.k.a. we keep track of positioning on the sentence and the target word)
:param sentences: A list of sentences, where each sentence is a list of word indices
:param dictionaries: The dictionaries built from corpus
:param args: Scripts arguments
:return: A tuple of ([context], target, [negative samples])
"""
logger = logging.getLogger('main')
global sentence_index
global word_index
current_sentence = sentences[sentence_index]
target = current_sentence[word_index] - 2
# Shrink context window by random amount
context_window = np.random.randint(1, args.context_window + 1)
context = []
low = max(word_index - context_window, 0)
high = min(word_index + context_window + 1, len(current_sentence))
for cont_ind in range(low, high):
# Target word cannot be part of context
if cont_ind == word_index:
continue
# Do not use 'UNK' words as context
# TODO: Remove this check if downsampling is applied
# if current_sentence[cont_ind] == 1:
# continue
context.append(current_sentence[cont_ind])
# Pad context with zeros
while len(context) < 2 * args.context_window:
context.append(0)
neg_samples = get_negative_samples(target, args.num_neg_samples, dictionaries)
# Advance the word_index to the next word
word_index += 1
# If we reached the end of the sentence, advance to next sentence and reset word index
if word_index >= len(current_sentence):
sentence_index += 1
word_index = 0
# If we reached the end of the sentences, reset sentence_index back to the first one
if sentence_index >= len(sentences):
sentence_index = 0
logger.info('Epoch completed.')
return context, target, neg_samples
| 40.909836 | 123 | 0.640232 |
58d14d425be795bfa4409700edc4323d29494ae2 | 307 | py | Python | nicegui/elements/row.py | florianwittkamp/nicegui | 4c054f4e5b82e4ac56db93b73d5fb5ffcd480d06 | [
"MIT"
] | 30 | 2021-06-16T15:46:45.000Z | 2022-03-27T03:14:18.000Z | nicegui/elements/row.py | florianwittkamp/nicegui | 4c054f4e5b82e4ac56db93b73d5fb5ffcd480d06 | [
"MIT"
] | 11 | 2021-05-24T17:05:22.000Z | 2022-02-19T07:13:18.000Z | nicegui/elements/row.py | florianwittkamp/nicegui | 4c054f4e5b82e4ac56db93b73d5fb5ffcd480d06 | [
"MIT"
] | 7 | 2021-07-22T05:51:04.000Z | 2022-01-31T19:39:37.000Z | import justpy as jp
from .group import Group
| 23.615385 | 86 | 0.628664 |
58d1b1562239fddc199cba78a4c7fd5ac432e0af | 102 | py | Python | src/mtvs/__init__.py | digsim/mtvs | d89d12d4cd65eafe732226e588a54874123db7f4 | [
"Apache-2.0"
] | 2 | 2017-11-19T05:51:31.000Z | 2020-01-22T08:12:53.000Z | src/mtvs/__init__.py | digsim/mtvs | d89d12d4cd65eafe732226e588a54874123db7f4 | [
"Apache-2.0"
] | 3 | 2015-12-03T00:34:46.000Z | 2016-01-04T15:49:14.000Z | src/mtvs/__init__.py | digsim/missingTvShows | f17660dc965c7a6eef1b0cfad9577d62087cba56 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import pkg_resources
__version__ = pkg_resources.require("mtvs")[0].version
| 17 | 54 | 0.705882 |
58d37fec96977e11aa6010c2276ce5434c4fc6f8 | 452 | py | Python | tests/guinea-pigs/unittest/expected_failure.py | Tirzono/teamcity-messages | e7f7334e2956a9e707222e4c83de9ffeb15b8ac0 | [
"Apache-2.0"
] | 105 | 2015-06-24T15:40:41.000Z | 2022-02-04T10:30:34.000Z | tests/guinea-pigs/unittest/expected_failure.py | Tirzono/teamcity-messages | e7f7334e2956a9e707222e4c83de9ffeb15b8ac0 | [
"Apache-2.0"
] | 145 | 2015-06-24T15:26:28.000Z | 2022-03-22T20:04:19.000Z | tests/guinea-pigs/unittest/expected_failure.py | Tirzono/teamcity-messages | e7f7334e2956a9e707222e4c83de9ffeb15b8ac0 | [
"Apache-2.0"
] | 76 | 2015-07-20T08:18:21.000Z | 2022-03-18T20:03:53.000Z | # coding=utf-8
import sys
from teamcity.unittestpy import TeamcityTestRunner
if sys.version_info < (2, 7):
from unittest2 import main, TestCase, expectedFailure
else:
from unittest import main, TestCase, expectedFailure
main(testRunner=TeamcityTestRunner)
| 26.588235 | 66 | 0.783186 |
58d426717d52de8aec95bf782518c3dd7fa7dafe | 877 | py | Python | main.py | GabrielIFPB/wishlist-fastapi | 1d39bf4c65dcc4491d0836487a218e67dbb07a80 | [
"MIT"
] | null | null | null | main.py | GabrielIFPB/wishlist-fastapi | 1d39bf4c65dcc4491d0836487a218e67dbb07a80 | [
"MIT"
] | null | null | null | main.py | GabrielIFPB/wishlist-fastapi | 1d39bf4c65dcc4491d0836487a218e67dbb07a80 | [
"MIT"
] | null | null | null |
import uvicorn
from fastapi import FastAPI
from database import Base, engine
from routers.user import router as router_user
from routers.product import router as router_product
from routers.authentication import router as router_auth
app = FastAPI(
title="Wish List",
description="Permita que seus clientes acompanhem seus produtos favoritos, adicionando-os a uma lista de desejos.",
version="1.0.0",
)
Base.metadata.create_all(engine)
app.include_router(router_auth)
app.include_router(router_product)
app.include_router(router_user)
if __name__ == "__main__":
uvicorn.run("main:app", host="127.0.0.1", port=8000, reload=True)
| 21.925 | 116 | 0.735462 |
58d441702771292f5be7e698cfa7a42a16e08886 | 1,605 | py | Python | libs/data_layers/transform.py | lsc25846/Wildlife-Recognition-System | 81d8afdf4a50bc94bf5e1952bfce1b5a9c4c6bd2 | [
"MIT"
] | null | null | null | libs/data_layers/transform.py | lsc25846/Wildlife-Recognition-System | 81d8afdf4a50bc94bf5e1952bfce1b5a9c4c6bd2 | [
"MIT"
] | null | null | null | libs/data_layers/transform.py | lsc25846/Wildlife-Recognition-System | 81d8afdf4a50bc94bf5e1952bfce1b5a9c4c6bd2 | [
"MIT"
] | null | null | null | # encoding: utf-8
import torch
import cv2
import numpy as np
import pdb
def detection_collate(batch):
"""Custom collate fn for dealing with batches of images that have a different
number of associated object annotations (bounding boxes).
Arguments:
batch: (tuple) A tuple of tensor images and lists of annotations
Return:
A tuple containing:
1) (tensor) batch of images stacked on their 0 dim
2) (tensor) [batch, num_gt, 5]
batch of annotations stacked on their 0 dim
annotations for a given image are stacked on 1 dim
"""
targets = []
imgs = []
# numpy array
num_gts = [sample[1].shape[0] for sample in batch]
max_num_gt = max(num_gts)
for sample in batch:
imgs.append(sample[0])
size_gt = sample[1].shape
num_gt = size_gt[0]
aug_size = list(size_gt[:])
aug_size[0] = max_num_gt
aug_gt = np.zeros(aug_size, dtype=sample[1].dtype)
aug_gt[:num_gt] = sample[1]
targets.append(torch.FloatTensor(aug_gt))
return torch.stack(imgs, 0), torch.stack(targets, 0)
| 29.181818 | 81 | 0.633022 |
58d6ba044f8814b989985c1b13e416f82125fe24 | 2,273 | py | Python | rllab/torch/algos/base.py | NeurIPSPaperSubmission7934/code_submission | 713fce673e8e3ba30b559d4eebe6d3e4891069ed | [
"Apache-2.0"
] | null | null | null | rllab/torch/algos/base.py | NeurIPSPaperSubmission7934/code_submission | 713fce673e8e3ba30b559d4eebe6d3e4891069ed | [
"Apache-2.0"
] | null | null | null | rllab/torch/algos/base.py | NeurIPSPaperSubmission7934/code_submission | 713fce673e8e3ba30b559d4eebe6d3e4891069ed | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018 Copyright holder of the paper Generative Adversarial Model Learning
# submitted to NeurIPS 2019 for review
# All rights reserved.
import numpy as np
import torch
| 33.925373 | 88 | 0.661681 |
58d6f86c63a774052533d60d46858c6d938085a4 | 915 | py | Python | lab-4.py | PavelKovalets/python-reviewer-test | a4d489482f596570abd5d34677f7549e1b724c8e | [
"MIT"
] | null | null | null | lab-4.py | PavelKovalets/python-reviewer-test | a4d489482f596570abd5d34677f7549e1b724c8e | [
"MIT"
] | null | null | null | lab-4.py | PavelKovalets/python-reviewer-test | a4d489482f596570abd5d34677f7549e1b724c8e | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import boto3
source_ddb = boto3.resource('dynamodb', 'us-east-1')
dest_ddb = boto3.client('dynamodb', 'us-west-2')
sync(source_ddb, dest_ddb) | 33.888889 | 81 | 0.619672 |
58d7f0d0c28563ede738e4f0ad2b23de85fe0aac | 2,492 | py | Python | core/reports/views.py | jilbertozamorasaa/panda-bigmon-core | 4e38411489e9ca538a000df43aed5280a72b4342 | [
"Apache-2.0"
] | 3 | 2017-07-19T18:29:23.000Z | 2021-11-18T04:57:18.000Z | core/reports/views.py | jilbertozamorasaa/panda-bigmon-core | 4e38411489e9ca538a000df43aed5280a72b4342 | [
"Apache-2.0"
] | 3 | 2021-09-01T15:20:21.000Z | 2022-03-12T01:02:51.000Z | core/reports/views.py | jilbertozamorasaa/panda-bigmon-core | 4e38411489e9ca538a000df43aed5280a72b4342 | [
"Apache-2.0"
] | 9 | 2015-11-17T15:52:23.000Z | 2021-09-07T12:23:16.000Z | """
"""
import json
from django.views.decorators.cache import never_cache
from django.http import HttpResponse
from django.shortcuts import render_to_response
from core.views import initRequest, DateEncoder
from core.reports import MC16aCPReport, ObsoletedTasksReport, TitanProgressReport
| 43.719298 | 270 | 0.72191 |
58d8fe58ae3d14e3614960efa20628276cc29e39 | 4,594 | py | Python | xain/fl/participant/participant.py | danieljanes/ox-msc-diss-code-freeze | 20c6881cabdf1e3ed7a9ddb40bbdcc7a7fd22f78 | [
"Apache-2.0"
] | 1 | 2020-05-30T20:34:19.000Z | 2020-05-30T20:34:19.000Z | xain/fl/participant/participant.py | danieljanes/ox-msc-diss-code-freeze | 20c6881cabdf1e3ed7a9ddb40bbdcc7a7fd22f78 | [
"Apache-2.0"
] | null | null | null | xain/fl/participant/participant.py | danieljanes/ox-msc-diss-code-freeze | 20c6881cabdf1e3ed7a9ddb40bbdcc7a7fd22f78 | [
"Apache-2.0"
] | null | null | null | from typing import Dict, List, Tuple
import numpy as np
import tensorflow as tf
from absl import logging
from xain.datasets import prep
from xain.types import History, Metrics, Partition, Theta, VolumeByClass
from .model_provider import ModelProvider
| 35.068702 | 88 | 0.653243 |
58db0a434e3091024b2614aa6f89111b6536e4cd | 1,380 | py | Python | client/animation/qr_code.py | Nurgak/IoT-RGB-LED-Matrix-Socket | e03ce4c2e2d77a6939662aad4ac92fbf9bdea77c | [
"MIT"
] | 1 | 2022-01-26T09:01:44.000Z | 2022-01-26T09:01:44.000Z | client/animation/qr_code.py | Nurgak/IoT-RGB-LED-Matrix-Socket | e03ce4c2e2d77a6939662aad4ac92fbf9bdea77c | [
"MIT"
] | 3 | 2021-12-28T10:29:02.000Z | 2022-01-06T03:01:08.000Z | client/animation/qr_code.py | Nurgak/IoT-RGB-LED-Matrix-Socket | e03ce4c2e2d77a6939662aad4ac92fbf9bdea77c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""! QR code generator animation script."""
import numpy as np
import qrcode
from src.animate import Animate
| 30 | 87 | 0.602899 |
58db8c9e99f143cbab9455bc35570eeeb524d210 | 483 | py | Python | tests/test_xiaochengtu.py | lamzuzuzu/yxf_yixue_py | 90eb077f195b543f93a507f28b0a4c016cb0c92f | [
"MIT"
] | 20 | 2019-01-08T08:13:39.000Z | 2021-12-23T09:04:14.000Z | tests/test_xiaochengtu.py | lamzuzuzu/yxf_yixue_py | 90eb077f195b543f93a507f28b0a4c016cb0c92f | [
"MIT"
] | null | null | null | tests/test_xiaochengtu.py | lamzuzuzu/yxf_yixue_py | 90eb077f195b543f93a507f28b0a4c016cb0c92f | [
"MIT"
] | 13 | 2019-04-22T03:25:13.000Z | 2022-01-04T05:43:48.000Z | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) #
sys.path.append(BASE_DIR)
import datetime
from yxf_yixue.xiaochengtu import XiaochengtuApi
if __name__ == '__main__':
string = '1996/02/29 23:16'
obj = datetime.datetime(2012, 3, 7, 17, 40)
a = XiaochengtuApi()
res1 = a.paipan(obj)
print(res1)
a.print_pan()
res2 = a.get_chuantongfenxi()
print(res2)
| 24.15 | 84 | 0.681159 |
58dc98f64796c7f6f0664ca055829713dcb9192e | 3,662 | py | Python | _Dist/NeuralNetworks/b_TraditionalML/MultinomialNB.py | leoatchina/MachineLearning | 071f2c0fc6f5af3d9550cfbeafe8d537c35a76d3 | [
"MIT"
] | 1,107 | 2016-09-21T02:18:36.000Z | 2022-03-29T02:52:12.000Z | _Dist/NeuralNetworks/b_TraditionalML/MultinomialNB.py | leoatchina/MachineLearning | 071f2c0fc6f5af3d9550cfbeafe8d537c35a76d3 | [
"MIT"
] | 18 | 2016-12-22T10:24:47.000Z | 2022-03-11T23:18:43.000Z | _Dist/NeuralNetworks/b_TraditionalML/MultinomialNB.py | leoatchina/MachineLearning | 071f2c0fc6f5af3d9550cfbeafe8d537c35a76d3 | [
"MIT"
] | 776 | 2016-12-21T12:08:08.000Z | 2022-03-21T06:12:08.000Z | import numpy as np
from sklearn.preprocessing import OneHotEncoder
| 29.296 | 96 | 0.560896 |
58df035c2ab9c1b7f4e6cbacccfa792d055318cf | 9,362 | py | Python | Reinforcement-Learning/Python-Model/venv/lib/python3.8/site-packages/tensorflow/core/protobuf/graph_debug_info_pb2.py | lawrence910426/ProgrammingII_FinalProject | 493183dc2a674310e65bffe3a5e00395e8bebb4b | [
"MIT"
] | null | null | null | Reinforcement-Learning/Python-Model/venv/lib/python3.8/site-packages/tensorflow/core/protobuf/graph_debug_info_pb2.py | lawrence910426/ProgrammingII_FinalProject | 493183dc2a674310e65bffe3a5e00395e8bebb4b | [
"MIT"
] | null | null | null | Reinforcement-Learning/Python-Model/venv/lib/python3.8/site-packages/tensorflow/core/protobuf/graph_debug_info_pb2.py | lawrence910426/ProgrammingII_FinalProject | 493183dc2a674310e65bffe3a5e00395e8bebb4b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/core/protobuf/graph_debug_info.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/protobuf/graph_debug_info.proto',
package='tensorflow',
syntax='proto3',
serialized_options=_b('\n\030org.tensorflow.frameworkB\024GraphDebugInfoProtosP\001ZUgithub.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto\370\001\001'),
serialized_pb=_b('\n/tensorflow/core/protobuf/graph_debug_info.proto\x12\ntensorflow\"\xd5\x02\n\x0eGraphDebugInfo\x12\r\n\x05\x66iles\x18\x01 \x03(\t\x12\x36\n\x06traces\x18\x02 \x03(\x0b\x32&.tensorflow.GraphDebugInfo.TracesEntry\x1aX\n\x0b\x46ileLineCol\x12\x12\n\nfile_index\x18\x01 \x01(\x05\x12\x0c\n\x04line\x18\x02 \x01(\x05\x12\x0b\n\x03\x63ol\x18\x03 \x01(\x05\x12\x0c\n\x04\x66unc\x18\x04 \x01(\t\x12\x0c\n\x04\x63ode\x18\x05 \x01(\t\x1aL\n\nStackTrace\x12>\n\x0e\x66ile_line_cols\x18\x01 \x03(\x0b\x32&.tensorflow.GraphDebugInfo.FileLineCol\x1aT\n\x0bTracesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x34\n\x05value\x18\x02 \x01(\x0b\x32%.tensorflow.GraphDebugInfo.StackTrace:\x02\x38\x01\x42\x8c\x01\n\x18org.tensorflow.frameworkB\x14GraphDebugInfoProtosP\x01ZUgithub.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto\xf8\x01\x01\x62\x06proto3')
)
_GRAPHDEBUGINFO_FILELINECOL = _descriptor.Descriptor(
name='FileLineCol',
full_name='tensorflow.GraphDebugInfo.FileLineCol',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='file_index', full_name='tensorflow.GraphDebugInfo.FileLineCol.file_index', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='line', full_name='tensorflow.GraphDebugInfo.FileLineCol.line', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='col', full_name='tensorflow.GraphDebugInfo.FileLineCol.col', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='func', full_name='tensorflow.GraphDebugInfo.FileLineCol.func', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='code', full_name='tensorflow.GraphDebugInfo.FileLineCol.code', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=153,
serialized_end=241,
)
_GRAPHDEBUGINFO_STACKTRACE = _descriptor.Descriptor(
name='StackTrace',
full_name='tensorflow.GraphDebugInfo.StackTrace',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='file_line_cols', full_name='tensorflow.GraphDebugInfo.StackTrace.file_line_cols', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=243,
serialized_end=319,
)
_GRAPHDEBUGINFO_TRACESENTRY = _descriptor.Descriptor(
name='TracesEntry',
full_name='tensorflow.GraphDebugInfo.TracesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='tensorflow.GraphDebugInfo.TracesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='tensorflow.GraphDebugInfo.TracesEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=321,
serialized_end=405,
)
_GRAPHDEBUGINFO = _descriptor.Descriptor(
name='GraphDebugInfo',
full_name='tensorflow.GraphDebugInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='files', full_name='tensorflow.GraphDebugInfo.files', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='traces', full_name='tensorflow.GraphDebugInfo.traces', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_GRAPHDEBUGINFO_FILELINECOL, _GRAPHDEBUGINFO_STACKTRACE, _GRAPHDEBUGINFO_TRACESENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=64,
serialized_end=405,
)
_GRAPHDEBUGINFO_FILELINECOL.containing_type = _GRAPHDEBUGINFO
_GRAPHDEBUGINFO_STACKTRACE.fields_by_name['file_line_cols'].message_type = _GRAPHDEBUGINFO_FILELINECOL
_GRAPHDEBUGINFO_STACKTRACE.containing_type = _GRAPHDEBUGINFO
_GRAPHDEBUGINFO_TRACESENTRY.fields_by_name['value'].message_type = _GRAPHDEBUGINFO_STACKTRACE
_GRAPHDEBUGINFO_TRACESENTRY.containing_type = _GRAPHDEBUGINFO
_GRAPHDEBUGINFO.fields_by_name['traces'].message_type = _GRAPHDEBUGINFO_TRACESENTRY
DESCRIPTOR.message_types_by_name['GraphDebugInfo'] = _GRAPHDEBUGINFO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GraphDebugInfo = _reflection.GeneratedProtocolMessageType('GraphDebugInfo', (_message.Message,), {
'FileLineCol' : _reflection.GeneratedProtocolMessageType('FileLineCol', (_message.Message,), {
'DESCRIPTOR' : _GRAPHDEBUGINFO_FILELINECOL,
'__module__' : 'tensorflow.core.protobuf.graph_debug_info_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.GraphDebugInfo.FileLineCol)
})
,
'StackTrace' : _reflection.GeneratedProtocolMessageType('StackTrace', (_message.Message,), {
'DESCRIPTOR' : _GRAPHDEBUGINFO_STACKTRACE,
'__module__' : 'tensorflow.core.protobuf.graph_debug_info_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.GraphDebugInfo.StackTrace)
})
,
'TracesEntry' : _reflection.GeneratedProtocolMessageType('TracesEntry', (_message.Message,), {
'DESCRIPTOR' : _GRAPHDEBUGINFO_TRACESENTRY,
'__module__' : 'tensorflow.core.protobuf.graph_debug_info_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.GraphDebugInfo.TracesEntry)
})
,
'DESCRIPTOR' : _GRAPHDEBUGINFO,
'__module__' : 'tensorflow.core.protobuf.graph_debug_info_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.GraphDebugInfo)
})
_sym_db.RegisterMessage(GraphDebugInfo)
_sym_db.RegisterMessage(GraphDebugInfo.FileLineCol)
_sym_db.RegisterMessage(GraphDebugInfo.StackTrace)
_sym_db.RegisterMessage(GraphDebugInfo.TracesEntry)
DESCRIPTOR._options = None
_GRAPHDEBUGINFO_TRACESENTRY._options = None
# @@protoc_insertion_point(module_scope)
| 39.838298 | 888 | 0.760735 |
58df52412971e5d196be467f42346c84563d779e | 1,376 | py | Python | tests/inferfaces_tests/test_people.py | jmolinski/traktpy | e6ff22acaf273b7b45070a4f8938c210fe4d63d7 | [
"MIT"
] | null | null | null | tests/inferfaces_tests/test_people.py | jmolinski/traktpy | e6ff22acaf273b7b45070a4f8938c210fe4d63d7 | [
"MIT"
] | 1 | 2019-04-13T10:15:48.000Z | 2019-04-13T10:15:48.000Z | tests/inferfaces_tests/test_people.py | jmolinski/traktpy | e6ff22acaf273b7b45070a4f8938c210fe4d63d7 | [
"MIT"
] | null | null | null | import pytest
from tests.test_data.lists import LIST
from tests.test_data.people import MOVIE_CREDITS, PERSON, SHOW_CREDITS
from tests.utils import mk_mock_client
from trakt.core.exceptions import ArgumentError
from trakt.core.json_parser import parse_tree
from trakt.core.models import Person
| 32 | 83 | 0.721657 |
58e00459697805d8f1e7adbc2795e9616fc70667 | 3,717 | py | Python | batch_score.py | Lufedi/reaper | bdf56b499e5b704c27b9f6c053d798c2a10fa4cf | [
"Apache-2.0"
] | 106 | 2015-07-21T16:18:26.000Z | 2022-03-31T06:45:34.000Z | batch_score.py | Lufedi/reaper | bdf56b499e5b704c27b9f6c053d798c2a10fa4cf | [
"Apache-2.0"
] | 21 | 2015-07-11T03:48:28.000Z | 2022-01-18T12:57:30.000Z | batch_score.py | Lufedi/reaper | bdf56b499e5b704c27b9f6c053d798c2a10fa4cf | [
"Apache-2.0"
] | 26 | 2015-07-22T22:38:21.000Z | 2022-03-14T10:11:56.000Z | #!/usr/bin/env python3
import argparse
import os
import sys
import traceback
from lib import core, utilities, run
from lib.attributes import Attributes
from lib.database import Database
def process_arguments():
"""
Uses the argparse module to parse commandline arguments.
Returns:
Dictionary of parsed commandline arguments.
"""
parser = argparse.ArgumentParser(
description='Calculate the scores of a set of repositories.'
)
parser.add_argument(
'--cleanup',
action='store_true',
dest='cleanup',
help='Delete cloned repositories from the disk when done.'
)
parser.add_argument(
'-c',
'--config',
type=argparse.FileType('r'),
default='config.json',
dest='config_file',
help='Path to the configuration file.'
)
parser.add_argument(
'-m',
'--manifest',
type=argparse.FileType('r'),
default='manifest.json',
dest='manifest_file',
help='Path to the manifest file.'
)
parser.add_argument(
'-r',
'--repositories-root',
dest='repositories_root',
help='Path to the root of downloaded repositories.'
)
parser.add_argument(
'-s',
'--repositories-sample',
type=argparse.FileType('r'),
dest='repositories_sample',
help='A file containing newline-separated GHTorrent project ids'
)
parser.add_argument(
'-k',
'--key-string',
type=str,
dest='key_string',
default=None,
required=False,
help='String of attribute initials. Uppercase to persist data'
)
parser.add_argument(
'-n',
'--num-processes',
type=int,
dest='num_processes',
default=1,
required=False,
help=(
'Number of processes to spawn when processing repositories'
' from the samples file.'
)
)
parser.add_argument(
'--goldenset',
action='store_true',
dest='goldenset',
help=(
'Indicate that the repositories sample file contains projects'
' from the Golden Set.'
)
)
if len(sys.argv) < 2:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def main():
"""
Main execution flow.
"""
try:
args = process_arguments()
config = utilities.read(args.config_file)
manifest = utilities.read(args.manifest_file)
# TODO: Refactor
core.config = config
utilities.TOKENIZER = core.Tokenizer()
database = Database(config['options']['datasource'])
globaloptions = {
'today': config['options']['today'],
'timeout': config['options']['timeout']
}
attributes = Attributes(
manifest['attributes'], database, args.cleanup, args.key_string,
**globaloptions
)
if not os.path.exists(args.repositories_root):
os.makedirs(args.repositories_root, exist_ok=True)
table = 'reaper_results'
if args.goldenset:
table = 'reaper_goldenset'
_run = run.Run(
args.repositories_root, attributes, database,
config['options']['threshold'], args.num_processes
)
_run.run([int(line) for line in args.repositories_sample], table)
except Exception as e:
extype, exvalue, extrace = sys.exc_info()
traceback.print_exception(extype, exvalue, extrace)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('\rCaught interrupt, killing all children...')
| 26.361702 | 76 | 0.584073 |
58e423e71414f032f22c45a5bedf02c030da3667 | 8,423 | py | Python | behave/reporter/summary.py | fluendo/behave | eeffde083456dcf1a0ea9b6139b32091970118c0 | [
"BSD-2-Clause"
] | null | null | null | behave/reporter/summary.py | fluendo/behave | eeffde083456dcf1a0ea9b6139b32091970118c0 | [
"BSD-2-Clause"
] | 2 | 2020-03-21T22:37:54.000Z | 2021-10-04T17:14:14.000Z | behave/reporter/summary.py | fluendo/behave | eeffde083456dcf1a0ea9b6139b32091970118c0 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: UTF-8 -*-
"""
Provides a summary after each test run.
"""
from __future__ import absolute_import, division, print_function
import sys
from time import time as time_now
from behave.model import Rule, ScenarioOutline # MAYBE: Scenario
from behave.model_core import Status
from behave.reporter.base import Reporter
from behave.formatter.base import StreamOpener
# ---------------------------------------------------------------------------
# CONSTANTS:
# ---------------------------------------------------------------------------
# -- DISABLED: OPTIONAL_STEPS = ('untested', 'undefined')
OPTIONAL_STEPS = (Status.untested,) # MAYBE: Status.undefined
STATUS_ORDER = (Status.passed, Status.failed, Status.skipped,
Status.undefined, Status.untested)
# ---------------------------------------------------------------------------
# UTILITY FUNCTIONS:
# ---------------------------------------------------------------------------
def compute_summary_sum(summary):
"""Compute sum of all summary counts (except: all)
:param summary: Summary counts (as dict).
:return: Sum of all counts (as integer).
"""
counts_sum = 0
for name, count in summary.items():
if name == "all":
continue # IGNORE IT.
counts_sum += count
return counts_sum
# -- PREPARED:
def format_summary2(statement_type, summary, end="\n"):
"""Format the summary line for one statement type.
.. code-block::
6 scenarios (passed: 5, failed: 1, skipped: 0, untested: 0)
:param statement_type:
:param summary:
:return:
"""
parts = []
for status in STATUS_ORDER:
if status.name not in summary:
continue
counts = summary[status.name]
if status in OPTIONAL_STEPS and counts == 0:
# -- SHOW-ONLY: For relevant counts, suppress: untested items, etc.
continue
parts.append((status.name, counts))
counts_sum = summary["all"]
statement = pluralize(statement_type, sum)
parts_text = ", ".join(["{0}: {1}".format(name, value)
for name, value in parts])
return "{count:4} {statement:<9} ({parts}){end}".format(
count=counts_sum, statement=statement, parts=parts_text, end=end)
# ---------------------------------------------------------------------------
# REPORTERS:
# ---------------------------------------------------------------------------
| 34.239837 | 141 | 0.581859 |
58e63151e272298d99abe2311270c00ae4f753a6 | 2,109 | py | Python | tests/common/bridgecrew/vulnerability_scanning/conftest.py | vangundy-jason-pfg/checkov | 2fb50908f62390c98dda665f1fa94fe24806b654 | [
"Apache-2.0"
] | 1 | 2021-02-13T15:24:42.000Z | 2021-02-13T15:24:42.000Z | tests/common/bridgecrew/vulnerability_scanning/conftest.py | vangundy-jason-pfg/checkov | 2fb50908f62390c98dda665f1fa94fe24806b654 | [
"Apache-2.0"
] | 7 | 2021-04-12T06:54:07.000Z | 2022-03-21T14:04:14.000Z | tests/common/bridgecrew/vulnerability_scanning/conftest.py | vangundy-jason-pfg/checkov | 2fb50908f62390c98dda665f1fa94fe24806b654 | [
"Apache-2.0"
] | 1 | 2021-12-16T03:09:55.000Z | 2021-12-16T03:09:55.000Z | from typing import Dict, Any
import pytest
from checkov.common.bridgecrew.bc_source import SourceType
from checkov.common.bridgecrew.platform_integration import BcPlatformIntegration, bc_integration
| 40.557692 | 120 | 0.579896 |
58e6b8cbdb9f5deb8475e765553e3c1da2be8892 | 1,038 | py | Python | image_matting/modules/trimap_generator/trimap_generator_application.py | image-matting/backend | bbf502539cf70822dadb5eded31529d5e66c6276 | [
"Apache-2.0"
] | 1 | 2022-01-22T04:12:48.000Z | 2022-01-22T04:12:48.000Z | image_matting/modules/trimap_generator/trimap_generator_application.py | image-matting/backend | bbf502539cf70822dadb5eded31529d5e66c6276 | [
"Apache-2.0"
] | 4 | 2021-12-23T14:02:17.000Z | 2022-01-26T18:44:06.000Z | image_matting/modules/trimap_generator/trimap_generator_application.py | image-matting/backend | bbf502539cf70822dadb5eded31529d5e66c6276 | [
"Apache-2.0"
] | null | null | null | import argparse
from pathlib import Path
from cv2 import cv2
from trimap import generate_trimap
from trimap_output_utils import save_trimap_output
if __name__ == "__main__":
main()
| 29.657143 | 113 | 0.739884 |
58e6e3920ecb9bb8ae3ccc039500c2bfac35935a | 2,102 | py | Python | tests/test_views.py | Siecje/graphene-django-sentry | b82188f91717211896cc1dbfc1f0e86de3729734 | [
"MIT"
] | 20 | 2019-03-13T15:28:17.000Z | 2022-03-23T09:52:26.000Z | tests/test_views.py | Siecje/graphene-django-sentry | b82188f91717211896cc1dbfc1f0e86de3729734 | [
"MIT"
] | 5 | 2019-06-29T06:41:16.000Z | 2021-06-10T21:05:25.000Z | tests/test_views.py | Siecje/graphene-django-sentry | b82188f91717211896cc1dbfc1f0e86de3729734 | [
"MIT"
] | 2 | 2019-05-30T13:03:23.000Z | 2019-06-17T16:08:39.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from http.client import OK
from unittest.mock import MagicMock, patch
from urllib.parse import urlencode
import graphene_django.views as views
from django.urls import reverse
from graphql import GraphQLError
from graphql.error import GraphQLLocatedError
| 26.948718 | 65 | 0.706946 |
58e7d15456033fa62d2766b6d09f022fb1eb2ace | 3,137 | py | Python | spacy/lang/nl/stop_words.py | cedar101/spaCy | 66e22098a8bb77cbe527b1a4a3c69ec1cfb56f95 | [
"MIT"
] | 12 | 2019-03-20T20:43:47.000Z | 2020-04-13T11:10:52.000Z | spacy/lang/nl/stop_words.py | cedar101/spaCy | 66e22098a8bb77cbe527b1a4a3c69ec1cfb56f95 | [
"MIT"
] | 13 | 2018-06-05T11:54:40.000Z | 2019-07-02T11:33:14.000Z | spacy/lang/nl/stop_words.py | cedar101/spaCy | 66e22098a8bb77cbe527b1a4a3c69ec1cfb56f95 | [
"MIT"
] | 1 | 2020-05-12T16:00:38.000Z | 2020-05-12T16:00:38.000Z | # coding: utf8
from __future__ import unicode_literals
# The original stop words list (added in f46ffe3) was taken from
# http://www.damienvanholten.com/downloads/dutch-stop-words.txt
# and consisted of about 100 tokens.
# In order to achieve parity with some of the better-supported
# languages, e.g., English, French, and German, this original list has been
# extended with 200 additional tokens. The main source of inspiration was
# https://raw.githubusercontent.com/stopwords-iso/stopwords-nl/master/stopwords-nl.txt.
# However, quite a bit of manual editing has taken place as well.
# Tokens whose status as a stop word is not entirely clear were admitted or
# rejected by deferring to their counterparts in the stop words lists for English
# and French. Similarly, those lists were used to identify and fill in gaps so
# that -- in principle -- each token contained in the English stop words list
# should have a Dutch counterpart here.
STOP_WORDS = set("""
aan af al alle alles allebei alleen allen als altijd ander anders andere anderen aangaangde aangezien achter achterna
afgelopen aldus alhoewel anderzijds
ben bij bijna bijvoorbeeld behalve beide beiden beneden bent bepaald beter betere betreffende binnen binnenin boven
bovenal bovendien bovenstaand buiten
daar dan dat de der den deze die dit doch doen door dus daarheen daarin daarna daarnet daarom daarop des dezelfde dezen
dien dikwijls doet doorgaand doorgaans
een eens en er echter enige eerder eerst eerste eersten effe eigen elk elke enkel enkele enz erdoor etc even eveneens
evenwel
ff
ge geen geweest gauw gedurende gegeven gehad geheel gekund geleden gelijk gemogen geven geweest gewoon gewoonweg
geworden gij
haar had heb hebben heeft hem het hier hij hoe hun hadden hare hebt hele hen hierbeneden hierboven hierin hoewel hun
iemand iets ik in is idd ieder ikke ikzelf indien inmiddels inz inzake
ja je jou jouw jullie jezelf jij jijzelf jouwe juist
kan kon kunnen klaar konden krachtens kunnen kunt
lang later liet liever
maar me meer men met mij mijn moet mag mede meer meesten mezelf mijzelf min minder misschien mocht mochten moest moesten
moet moeten mogelijk mogen
na naar niet niets nog nu nabij nadat net nogal nooit nr nu
of om omdat ons ook op over omhoog omlaag omstreeks omtrent omver onder ondertussen ongeveer onszelf onze ooit opdat
opnieuw opzij over overigens
pas pp precies prof publ
reeds rond rondom
sedert sinds sindsdien slechts sommige spoedig steeds
t 't te tegen toch toen tot tamelijk ten tenzij ter terwijl thans tijdens toe totdat tussen
u uit uw uitgezonderd uwe uwen
van veel voor vaak vanaf vandaan vanuit vanwege veeleer verder verre vervolgens vgl volgens vooraf vooral vooralsnog
voorbij voordat voordien voorheen voorop voort voorts vooruit vrij vroeg
want waren was wat we wel werd wezen wie wij wil worden waar waarom wanneer want weer weg wegens weinig weinige weldra
welk welke welken werd werden wiens wier wilde wordt
zal ze zei zelf zich zij zijn zo zonder zou zeer zeker zekere zelfde zelfs zichzelf zijnde zijne zon zoals zodra zouden
zoveel zowat zulk zulke zulks zullen zult
""".split())
| 42.391892 | 120 | 0.808734 |
58e841d7116f44d86fb300ae823c11eb893353a1 | 1,858 | py | Python | liv_covid19/web/artic/opentrons_thread.py | neilswainston/liv-covid19 | 4842fccdca626caca50bd7c545e3f673660503d4 | [
"MIT"
] | 2 | 2020-03-31T12:59:13.000Z | 2021-02-08T21:40:20.000Z | liv_covid19/web/artic/opentrons_thread.py | neilswainston/liv-covid19 | 4842fccdca626caca50bd7c545e3f673660503d4 | [
"MIT"
] | null | null | null | liv_covid19/web/artic/opentrons_thread.py | neilswainston/liv-covid19 | 4842fccdca626caca50bd7c545e3f673660503d4 | [
"MIT"
] | 2 | 2020-06-23T16:49:20.000Z | 2020-06-25T14:59:32.000Z | '''
(c) University of Liverpool 2020
Licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>..
@author: neilswainston
'''
# pylint: disable=broad-except
import os.path
import tempfile
from liv_covid19.web.artic import opentrons
from liv_covid19.web.job import JobThread, save_export
| 30.459016 | 77 | 0.595264 |
58e8cb21bea9ec496741309cc75c724289559dd8 | 838 | py | Python | futuquant/common/ft_logger.py | hxhxhx88/futuquant | a1b4a875604f1de451ddde4bfa3e713452482b0a | [
"Apache-2.0"
] | null | null | null | futuquant/common/ft_logger.py | hxhxhx88/futuquant | a1b4a875604f1de451ddde4bfa3e713452482b0a | [
"Apache-2.0"
] | null | null | null | futuquant/common/ft_logger.py | hxhxhx88/futuquant | a1b4a875604f1de451ddde4bfa3e713452482b0a | [
"Apache-2.0"
] | null | null | null | import logging
from datetime import datetime
import os
logger = logging.getLogger('FT')
log_level = logging.INFO
is_file_log = True
# loggerlevelDEBUG
logger.setLevel(log_level)
# StreamHandler
hdr = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s [%(filename)s] %(funcName)s:%(lineno)d: %(message)s')
hdr.setFormatter(formatter)
# loggerhandler
logger.addHandler(hdr)
# handle
if is_file_log:
filename = 'ft_' + datetime.now().strftime('%Y%m%d') + '.log'
tempPath = os.path.join(os.getcwd(), 'log')
if not os.path.exists(tempPath):
os.makedirs(tempPath)
filepath = os.path.join(tempPath, filename)
fileHandler = logging.FileHandler(filepath)
fileHandler.setLevel(log_level)
fileHandler.setFormatter(formatter)
logger.addHandler(fileHandler)
| 24.647059 | 70 | 0.731504 |
58ecb5c52b1dbb5c09611ce11efbdfb06b5edf51 | 3,946 | py | Python | image_classification/cifar10/cifar10/networking/cifar10_vgg16_model.py | poncos/deeplearning | 0874b22dab05117bcf39ccb4895d513ab6f87861 | [
"MIT"
] | null | null | null | image_classification/cifar10/cifar10/networking/cifar10_vgg16_model.py | poncos/deeplearning | 0874b22dab05117bcf39ccb4895d513ab6f87861 | [
"MIT"
] | null | null | null | image_classification/cifar10/cifar10/networking/cifar10_vgg16_model.py | poncos/deeplearning | 0874b22dab05117bcf39ccb4895d513ab6f87861 | [
"MIT"
] | null | null | null | # Copyright 2018 Esteban Collado.
#
# Licensed under the MIT License
import tensorflow as tf
DEFAULT_VARIABLE_NAMES = ['conv1', 'conv2', 'conv3', 'conv4', 'fc1', 'fc2', 'softmax_linear']
BATCH_SIZE = 200
IMAGE_WIDTH = 32
IMAGE_HEIGHT = 32
IMAGE_DEPTH = 3
NUM_CLASSES = 10
INPUT_PLACEHOLDER = 'X_INPUT'
LABELS_PLACEHOLDER = 'Y_LABELS'
| 43.362637 | 118 | 0.640902 |
58eeedb6cd1adb5de820dbc349b434e1a3735952 | 425 | py | Python | wrappers/Python/sbmlsolver/__init__.py | gitter-badger/sbmlsolver | c92936832297ea1d2ad7f17223b68ada43c8f0b2 | [
"Apache-2.0"
] | null | null | null | wrappers/Python/sbmlsolver/__init__.py | gitter-badger/sbmlsolver | c92936832297ea1d2ad7f17223b68ada43c8f0b2 | [
"Apache-2.0"
] | null | null | null | wrappers/Python/sbmlsolver/__init__.py | gitter-badger/sbmlsolver | c92936832297ea1d2ad7f17223b68ada43c8f0b2 | [
"Apache-2.0"
] | null | null | null | """
The LibRoadRunner SBML Simulation Engine, (c) 2009-2014 Andy Somogyi and Herbert Sauro
LibRoadRunner is an SBML JIT compiler and simulation engine with a variety of analysis
functions. LibRoadRunner is a self contained library which is designed to be integrated
into existing simulation platforms or may be used a stand alone simulation and analysis
package.
"""
from sbmlsolver import *
__version__ = getVersionStr()
| 32.692308 | 87 | 0.807059 |
58f0ab77666277ac6d3ddc06e53dedb0c6d49f2b | 1,573 | py | Python | classification/tests/test_evidence_mixin.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
] | 5 | 2021-01-14T03:34:42.000Z | 2022-03-07T15:34:18.000Z | classification/tests/test_evidence_mixin.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
] | 551 | 2020-10-19T00:02:38.000Z | 2022-03-30T02:18:22.000Z | classification/tests/test_evidence_mixin.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
] | null | null | null | from classification.models import EvidenceMixin
from classification.models.evidence_mixin import VCStore
# doesn't work without Transcripts loaded now
# class EvidenceMixinTest(TestCase):
#
# @override_settings(VARIANT_ANNOTATION_TRANSCRIPT_PREFERENCES=['refseq_transcript_accession'])
# def test_get_transcript(self):
# # if transcript version is in c.hgvs use it
# be = BasicEvidence({
# SpecialEKeys.C_HGVS: "NM_020975.5(RET):c.867+48A>G",
# SpecialEKeys.REFSEQ_TRANSCRIPT_ID: "NM_020975",
# SpecialEKeys.GENOME_BUILD: "GRCh37"
# })
# self.assertEqual(be.transcript, "NM_020975.5")
#
# # if transcript version is in c.hgvs but transcript doesn't match
# # value in transcript field, use the raw transcript value
# be = BasicEvidence({
# SpecialEKeys.C_HGVS: "NM_020975.5(RET):c.867+48A>G",
# SpecialEKeys.REFSEQ_TRANSCRIPT_ID: "NM_033333",
# SpecialEKeys.GENOME_BUILD: "GRCh37"
# })
# self.assertEqual(be.transcript, "NM_033333")
#
# # if there is no transcript field, use the contents of c.hgvs
# be = BasicEvidence({
# SpecialEKeys.C_HGVS: "NM_020975.5(RET):c.867+48A>G",
# SpecialEKeys.GENOME_BUILD: "GRCh37"
# })
# self.assertEqual(be.transcript, "NM_020975.5")
| 37.452381 | 99 | 0.650985 |
58f0dabb24cb5744c956fc257b97c051c5d3142b | 674 | py | Python | scronsole/widgets/main_screen.py | bastianh/screeps_console_mod | e093cc1e071fae5bdf106674b97e71902fbbb6ff | [
"MIT"
] | 2 | 2017-10-08T19:39:27.000Z | 2017-10-08T19:51:18.000Z | scronsole/widgets/main_screen.py | bastianh/screeps_console_mod | e093cc1e071fae5bdf106674b97e71902fbbb6ff | [
"MIT"
] | null | null | null | scronsole/widgets/main_screen.py | bastianh/screeps_console_mod | e093cc1e071fae5bdf106674b97e71902fbbb6ff | [
"MIT"
] | null | null | null | import urwid
from scronsole.config_manager import ConfigManager
from scronsole.plugin_manager import PluginManager
from scronsole.widgets.main_menu import MainMenu
from scronsole.widgets.server_screen import ServerScreen
| 30.636364 | 62 | 0.746291 |
58f1e5bfcc6007b51ace335dfbea68c9b539583f | 436 | py | Python | sql/language.py | skylarkgit/sql2java | befd55180969b0ec68e242991c3260272d755cc9 | [
"MIT"
] | 2 | 2019-10-23T08:27:30.000Z | 2019-10-23T09:58:45.000Z | sql/language.py | skylarkgit/sql2java | befd55180969b0ec68e242991c3260272d755cc9 | [
"MIT"
] | null | null | null | sql/language.py | skylarkgit/sql2java | befd55180969b0ec68e242991c3260272d755cc9 | [
"MIT"
] | null | null | null | import re
from csv import reader
| 31.142857 | 88 | 0.53211 |
58f3c7c8febd7b51f53b623ee90e4c562e1d0bd1 | 659 | py | Python | easy_ArrayAdditionI.py | GabrielGhe/CoderbyteChallenges | 5601dbc24c95a65fed04896de2f534417c2e730d | [
"MIT"
] | 1 | 2020-11-04T15:30:18.000Z | 2020-11-04T15:30:18.000Z | easy_ArrayAdditionI.py | GabrielGhe/CoderbyteChallenges | 5601dbc24c95a65fed04896de2f534417c2e730d | [
"MIT"
] | null | null | null | easy_ArrayAdditionI.py | GabrielGhe/CoderbyteChallenges | 5601dbc24c95a65fed04896de2f534417c2e730d | [
"MIT"
] | null | null | null | import itertools
#################################################
# This function will see if there is any #
# possible combination of the numbers in #
# the array that will give the largest number #
#################################################
print ArrayAdditionI(raw_input())
| 26.36 | 54 | 0.53566 |
58f4750834ad708c962b1818098008a6819ef467 | 1,994 | py | Python | SVM.py | JAMJU/KernelMethod | e52f5a0cfaefa87073facd88220c311709e513e8 | [
"MIT"
] | null | null | null | SVM.py | JAMJU/KernelMethod | e52f5a0cfaefa87073facd88220c311709e513e8 | [
"MIT"
] | null | null | null | SVM.py | JAMJU/KernelMethod | e52f5a0cfaefa87073facd88220c311709e513e8 | [
"MIT"
] | null | null | null | import numpy as np
import quadprog
def quadprog_solve_qp(P, q, G=None, h=None, A=None, b=None):
""" Solve a QP of the form min 1/2xTPx + qTx st Gx < h st Ax=b"""
#qp_G = .5 * (P + P.T) # make sure P is symmetric
qp_G = P
qp_a = -q
if A is not None:
qp_C = -np.vstack([A, G]).T
qp_b = -np.hstack([b, h])
meq = A.shape[0]
else: # no equality constraint
qp_C = -G.T
qp_b = -h
meq = 0
return quadprog.solve_qp(qp_G, qp_a, qp_C, qp_b, meq)[0]
def evaluate_alpha(alpha, K, label):
""" Return success percent """
result = K.dot(alpha)
success = [float(result[i,0]*label[i] > 0) for i in range(len(label))]
return np.mean(success)*100
def svm_compute_label(data_in_kernel, alpha):
""" Compute the label for the data given (in the form data[i,j] = K(x, xj) with x a new data, xj in the data set"""
result = data_in_kernel.dot(alpha)
return [int(result[i,0] > 0.) for i in range(data_in_kernel.shape[0])]
""" Just an example of how quadprog works :
M = np.array([[1., 2., 0.], [-8., 3., 2.], [0., 1., 1.]])
P = np.dot(M.T, M)
q = np.dot(np.array([3., 2., 3.]), M).reshape((3,))
G = np.array([[1., 2., 1.], [2., 0., 1.], [-1., 2., -1.]])
h = np.array([3., 2., -2.]).reshape((3,))
al = quadprog_solve_qp(P, q, G, h)
print(al)""" | 33.79661 | 119 | 0.57322 |
58f7245b5e8f50a694e2a4405f7daff21e842618 | 1,547 | py | Python | util/MalShare.py | cclauss/ph0neutria | 04b6a569d4e707c3de652ba7ad15c1b5223bebcb | [
"Apache-2.0"
] | null | null | null | util/MalShare.py | cclauss/ph0neutria | 04b6a569d4e707c3de652ba7ad15c1b5223bebcb | [
"Apache-2.0"
] | null | null | null | util/MalShare.py | cclauss/ph0neutria | 04b6a569d4e707c3de652ba7ad15c1b5223bebcb | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
from ConfigUtils import getBaseConfig
from LogUtils import getModuleLogger
from StringUtils import isValidUrl, randomString
from urlparse import urlparse
import json
import os
import requests
import sys
cDir = os.path.dirname(os.path.realpath(__file__))
rootDir = os.path.abspath(os.path.join(cDir, os.pardir))
baseConfig = getBaseConfig(rootDir)
logging = getModuleLogger(__name__)
| 29.75 | 129 | 0.661926 |
58f785be1e5480e5359e098bb9e3ecdf8b2d4246 | 705 | py | Python | attempt/runner.py | prstolpe/rrc_simulation | b430fe4e575641cdd64945cf57d0dd67a0eea17a | [
"BSD-3-Clause"
] | null | null | null | attempt/runner.py | prstolpe/rrc_simulation | b430fe4e575641cdd64945cf57d0dd67a0eea17a | [
"BSD-3-Clause"
] | null | null | null | attempt/runner.py | prstolpe/rrc_simulation | b430fe4e575641cdd64945cf57d0dd67a0eea17a | [
"BSD-3-Clause"
] | null | null | null |
from attempt.ddpg import HERDDPG, DDPG
import gym
import os
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
if __name__ == "__main__":
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
env = gym.make('FetchReach-v1')
agent = HERDDPG(env)
for epoch in range(2):
for cycle in tqdm(range(10)):
agent.gather_cycle()
# target_agent.train()
agent.test_env(10)
env.close()
plt.plot(np.vstack(agent.rewards))
plt.title('Rewards')
plt.show()
plt.plot(np.vstack(agent.policy_losses))
plt.title('Policy Losses')
plt.show()
plt.plot(np.vstack(agent.value_losses))
plt.title('Value Losses')
plt.show() | 20.735294 | 44 | 0.64539 |
58f82c89b0b711b196471a3d0d54cc05fadd6ef6 | 8,187 | py | Python | src/shogun/base/class_list.cpp.py | srgnuclear/shogun | 33c04f77a642416376521b0cd1eed29b3256ac13 | [
"Ruby",
"MIT"
] | 1 | 2015-11-05T18:31:14.000Z | 2015-11-05T18:31:14.000Z | src/shogun/base/class_list.cpp.py | waderly/shogun | 9288b6fa38e001d63c32188f7f847dadea66e2ae | [
"Ruby",
"MIT"
] | null | null | null | src/shogun/base/class_list.cpp.py | waderly/shogun | 9288b6fa38e001d63c32188f7f847dadea66e2ae | [
"Ruby",
"MIT"
] | null | null | null | #!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Written (W) 2008-2009 Soeren Sonnenburg
# Copyright (C) 2008-2009 Fraunhofer Institute FIRST and Max Planck Society
class_str='class'
types=["BOOL", "CHAR", "INT8", "UINT8", "INT16", "UINT16", "INT32", "UINT32",
"INT64", "UINT64", "FLOAT32", "FLOAT64", "FLOATMAX", "COMPLEX128"]
config_tests=["HAVE_HDF5", "HAVE_JSON", "HAVE_XML", "HAVE_LAPACK", "USE_CPLEX",
"USE_SVMLIGHT", "USE_GLPK", "USE_LZO", "USE_GZIP", "USE_BZIP2", "USE_LZMA",
"USE_MOSEK", "HAVE_EIGEN3", "HAVE_COLPACK", "HAVE_NLOPT", "HAVE_PROTOBUF",
"HAVE_VIENNACL"]
SHOGUN_TEMPLATE_CLASS = "SHOGUN_TEMPLATE_CLASS"
SHOGUN_BASIC_CLASS = "SHOGUN_BASIC_CLASS"
def extract_classes(HEADERS, template, blacklist, supports_complex):
"""
Search in headers for non-template/non-abstract class-names starting
with `C'.
Does not support local nor multiple classes and
drops classes with pure virtual functions
"""
classes=list()
for fname in HEADERS:
try:
lines=open(fname).readlines()
except: # python3 workaround
lines=open(fname, encoding='utf-8', errors='ignore').readlines()
line_nr=0
while line_nr<len(lines):
line=lines[line_nr]
if line.find('IGNORE_IN_CLASSLIST')!=-1:
line_nr+=1
continue
c=None
if template:
tp=line.find('template')
if tp!=-1:
line=line[tp:]
cp=line.find('>')
line=line[cp+1:]
cp=line.find(class_str)
if cp!=-1:
c=extract_class_name(lines, line_nr, line, blacklist)
else:
if line.find(class_str)!=-1:
c=extract_class_name(lines, line_nr, None, blacklist)
if c:
ok, line_nr=test_candidate(c, lines, line_nr, supports_complex)
if ok:
classes.append((c,template))
continue
line_nr+=1
return classes
if __name__=='__main__':
import sys
TEMPL_FILE=sys.argv[1]
HEADERS=None
if (sys.argv[2] == "-in"):
# read header file list from file
with open(sys.argv[3]) as f:
content = f.readlines()
HEADERS = [x.strip() for x in content]
else:
HEADERS=sys.argv[2:]
blacklist = get_blacklist()
classes = extract_classes(HEADERS, False, blacklist, False)
template_classes = extract_classes(HEADERS, True, blacklist, False)
complex_template_classes = extract_classes(HEADERS, True, blacklist, True)
includes = get_includes(classes+template_classes+complex_template_classes)
definitions = get_definitions(classes)
template_definitions = get_template_definitions(template_classes, False)
complex_template_definitions = get_template_definitions(complex_template_classes, True)
struct = get_struct(classes+template_classes+complex_template_classes)
substitutes = {'includes': includes,
'definitions' :definitions,
'template_definitions' : template_definitions,
'complex_template_definitions' : complex_template_definitions,
'struct' : struct
}
write_templated_file(TEMPL_FILE, substitutes)
| 27.109272 | 130 | 0.693416 |
58f8d01058e75992d07c8d9e6c624ed7a5775471 | 771 | py | Python | script/solr_unauthorized_access.py | 5up3rc/Vxscan | 0d2cae446f6502b51596853be3514c7c4c62809c | [
"Apache-2.0"
] | 2 | 2019-12-05T01:58:22.000Z | 2019-12-14T09:19:28.000Z | script/solr_unauthorized_access.py | 5up3rc/Vxscan | 0d2cae446f6502b51596853be3514c7c4c62809c | [
"Apache-2.0"
] | null | null | null | script/solr_unauthorized_access.py | 5up3rc/Vxscan | 0d2cae446f6502b51596853be3514c7c4c62809c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# project = https://github.com/Xyntax/POC-T
# author = i@cdxy.me
"""
Apache Solr PoC
(iterate_path)
Usage
python POC-T.py -s solr-unauth -iF target.txt
python POC-T.py -s solr-unauth -aZ "solr country:cn"
"""
from lib.verify import verify
from lib.random_header import get_ua
import requests
vuln = ['solr']
| 24.09375 | 95 | 0.608301 |