hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
81b112f3d2024fef2d743717dabaf4db53727b51 | 661 | py | Python | tests/functional_tests/test_camera.py | accessai/access-face-vision | 04469ebc03ac9644a44bbdb90951f1821dca0f6d | [
"Apache-2.0"
] | 3 | 2019-07-19T17:59:19.000Z | 2019-07-21T16:07:43.000Z | tests/functional_tests/test_camera.py | accessai/access-face-vision | 04469ebc03ac9644a44bbdb90951f1821dca0f6d | [
"Apache-2.0"
] | 9 | 2019-07-19T17:38:11.000Z | 2022-03-11T23:53:13.000Z | tests/functional_tests/test_camera.py | accessai/access-face-vision | 04469ebc03ac9644a44bbdb90951f1821dca0f6d | [
"Apache-2.0"
] | 1 | 2019-07-21T16:07:54.000Z | 2019-07-21T16:07:54.000Z | from multiprocessing import Queue, Value
from time import sleep
from access_face_vision.source.camera import Camera
from access_face_vision.utils import create_parser
from access_face_vision import access_logger
LOG_LEVEL = 'debug'
logger, log_que, que_listener = access_logger.set_main_process_logger(LOG_LEVEL)
if __name__ == '__main__':
test_camera()
| 26.44 | 90 | 0.754917 |
81b28caa54d539dfc14006299c0cf1e06133e78c | 1,537 | py | Python | utils/deserializer/__tests__/test_protobuf_deserializer.py | Mouse-BB-Team/Bot-Detection | 4438d8ccec1baaa22f3357213e6d52a62ff6d618 | [
"MIT"
] | 5 | 2020-09-30T16:58:59.000Z | 2021-11-30T22:34:10.000Z | utils/deserializer/__tests__/test_protobuf_deserializer.py | Mouse-BB-Team/Bot-Detection | 4438d8ccec1baaa22f3357213e6d52a62ff6d618 | [
"MIT"
] | null | null | null | utils/deserializer/__tests__/test_protobuf_deserializer.py | Mouse-BB-Team/Bot-Detection | 4438d8ccec1baaa22f3357213e6d52a62ff6d618 | [
"MIT"
] | null | null | null | from utils.deserializer.protobuf_deserializer import ProtoLoader
from pathlib import Path
import pandas as pd
import pytest
PROTOFILES_DIR_PATH = Path(__file__).parent.joinpath("protofilesdir").absolute().__str__()
INVALID_PATH = "some/wrong/path"
| 33.413043 | 92 | 0.791802 |
81b2cfe5a1a59f76e8e712dc7fabc6c32050694c | 18,966 | py | Python | wisdem/assemblies/turbinese/turbine_se_seam.py | dzalkind/WISDEM | 724a7bf9c19bad3ca7e18c240628d1a75b07e3f0 | [
"Apache-2.0"
] | 1 | 2020-01-22T17:48:30.000Z | 2020-01-22T17:48:30.000Z | wisdem/assemblies/turbinese/turbine_se_seam.py | dzalkind/WISDEM | 724a7bf9c19bad3ca7e18c240628d1a75b07e3f0 | [
"Apache-2.0"
] | 17 | 2019-09-13T22:21:15.000Z | 2019-10-25T20:04:26.000Z | wisdem/assemblies/turbinese/turbine_se_seam.py | dzalkind/WISDEM | 724a7bf9c19bad3ca7e18c240628d1a75b07e3f0 | [
"Apache-2.0"
] | 2 | 2019-03-21T10:38:05.000Z | 2021-01-08T18:49:53.000Z | #!/usr/bin/env python
# encoding: utf-8
"""
turbine.py
Created by Andrew Ning and Katherine Dykes on 2014-01-13.
Copyright (c) NREL. All rights reserved.
"""
from openmdao.main.api import Assembly, Component
from openmdao.main.datatypes.api import Float, Array, Enum, Bool, Int
from openmdao.lib.drivers.api import FixedPointIterator
import numpy as np
#from rotorse.rotor import RotorSE
#from towerse.tower import TowerSE
#from commonse.rna import RNAMass, RotorLoads
from drivewpact.drive import DriveWPACT
from drivewpact.hub import HubWPACT
from commonse.csystem import DirectionVector
from commonse.utilities import interp_with_deriv, hstack, vstack
from drivese.drive import Drive4pt, Drive3pt
from drivese.drivese_utils import blade_moment_transform, blade_force_transform
from drivese.hub import HubSE, Hub_System_Adder_drive
from SEAMLoads.SEAMLoads import SEAMLoads
from SEAMTower.SEAMTower import SEAMTower
from SEAMAero.SEAM_AEP import SEAM_PowerCurve
from SEAMRotor.SEAMRotor import SEAMBladeStructure
# from SEAMGeometry.SEAMGeometry import SEAMGeometry
def configure_turbine(assembly, with_new_nacelle=True, flexible_blade=False, with_3pt_drive=False):
"""a stand-alone configure method to allow for flatter assemblies
Parameters
----------
assembly : Assembly
an openmdao assembly to be configured
with_new_nacelle : bool
False uses the default implementation, True uses an experimental implementation designed
to smooth out discontinities making in amenable for gradient-based optimization
flexible_blade : bool
if True, internally solves the coupled aero/structural deflection using fixed point iteration.
Note that the coupling is currently only in the flapwise deflection, and is primarily
only important for highly flexible blades. If False, the aero loads are passed
to the structure but there is no further iteration.
"""
#SEAM variables ----------------------------------
#d2e = Float(0.73, iotype='in', desc='Dollars to Euro ratio'
assembly.add('rated_power',Float(3000., iotype='in', units='kW', desc='Turbine rated power', group='Global'))
assembly.add('hub_height', Float(100., iotype='in', units='m', desc='Hub height', group='Global'))
assembly.add('rotor_diameter', Float(110., iotype='in', units='m', desc='Rotor diameter', group='Global'))
# assembly.add('site_type',Enum('onshore', values=('onshore', 'offshore'), iotype='in', desc='Site type', group='Global'))
assembly.add('tower_bottom_diameter', Float(4., iotype='in', desc='Tower bottom diameter', group='Global'))
assembly.add('tower_top_diameter', Float(2., iotype='in', desc='Tower top diameter', group='Global'))
assembly.add('project_lifetime', Float(iotype = 'in', desc='Operating years', group='Global'))
assembly.add('rho_steel', Float(7.8e3, iotype='in', desc='density of steel', group='Tower'))
assembly.add('lifetime_cycles', Float(1.e7, iotype='in', desc='Equivalent lifetime cycles', group='Rotor'))
assembly.add('stress_limit_extreme_tower', Float(iotype='in', units='MPa', desc='Tower ultimate strength', group='Tower'))
assembly.add('stress_limit_fatigue_tower', Float(iotype='in', units='MPa', desc='Tower fatigue strength', group='Tower'))
assembly.add('safety_factor_tower', Float(iotype='in', desc='Tower loads safety factor', group='Tower'))
assembly.add('PMtarget_tower', Float(1., iotype='in', desc='', group='Tower'))
assembly.add('wohler_exponent_tower', Float(4., iotype='in', desc='Tower fatigue Wohler exponent', group='Tower'))
assembly.add('tower_z', Array(iotype='out', desc='Tower discretization'))
assembly.add('tower_wall_thickness', Array(iotype='out', units='m', desc='Tower wall thickness'))
assembly.add('tower_mass', Float(iotype='out', units='kg', desc='Tower mass'))
assembly.add('tsr', Float(iotype='in', units='m', desc='Design tip speed ratio', group='Aero'))
assembly.add('F', Float(iotype='in', desc='Rotor power loss factor', group='Aero'))
assembly.add('wohler_exponent_blade_flap', Float(iotype='in', desc='Wohler Exponent blade flap', group='Rotor'))
assembly.add('nSigma4fatFlap', Float(iotype='in', desc='', group='Loads'))
assembly.add('nSigma4fatTower', Float(iotype='in', desc='', group='Loads'))
assembly.add('dLoad_dU_factor_flap', Float(iotype='in', desc='', group='Loads'))
assembly.add('dLoad_dU_factor_tower', Float(iotype='in', desc='', group='Loads'))
assembly.add('blade_edge_dynload_factor_ext', Float(iotype='in', desc='Extreme dynamic edgewise loads factor', group='Loads'))
assembly.add('blade_edge_dynload_factor_fat', Float(iotype='in', desc='Fatigue dynamic edgewise loads factor', group='Loads'))
assembly.add('PMtarget_blades', Float(1., iotype='in', desc='', group='Rotor'))
assembly.add('max_tipspeed', Float(iotype='in', desc='Maximum tip speed', group='Aero'))
assembly.add('n_wsp', Int(iotype='in', desc='Number of wind speed bins', group='Aero'))
assembly.add('min_wsp', Float(0.0, iotype = 'in', units = 'm/s', desc = 'min wind speed', group='Aero'))
assembly.add('max_wsp', Float(iotype = 'in', units = 'm/s', desc = 'max wind speed', group='Aero'))
assembly.add('turbulence_int', Float(iotype='in', desc='Reference turbulence intensity', group='Plant_AEP'))
# assembly.add('WeibullInput', Bool(True, iotype='in', desc='Flag for Weibull input', group='AEP'))
assembly.add('weibull_C', Float(iotype = 'in', units='m/s', desc = 'Weibull scale factor', group='AEP'))
assembly.add('weibull_k', Float(iotype = 'in', desc='Weibull shape or form factor', group='AEP'))
assembly.add('blade_sections', Int(iotype='in', desc='number of sections along blade', group='Rotor'))
assembly.add('wohler_exponent_blade_flap', Float(iotype='in', desc='Blade flap fatigue Wohler exponent', group='Rotor'))
assembly.add('MaxChordrR', Float(iotype='in', units='m', desc='Spanwise position of maximum chord', group='Rotor'))
assembly.add('tif_blade_root_flap_ext', Float(1., iotype='in', desc='Technology improvement factor flap extreme', group='Rotor'))
assembly.add('tif_blade_root_edge_ext', Float(1., iotype='in', desc='Technology improvement factor edge extreme', group='Rotor'))
assembly.add('tif_blade_root_flap_fat', Float(1., iotype='in', desc='Technology improvement factor flap LEQ', group='Rotor'))
assembly.add('sc_frac_flap', Float(iotype='in', desc='spar cap fraction of chord', group='Rotor'))
assembly.add('sc_frac_edge', Float(iotype='in', desc='spar cap fraction of thickness', group='Rotor'))
assembly.add('safety_factor_blade', Float(iotype='in', desc='Blade loads safety factor', group='Rotor'))
assembly.add('stress_limit_extreme_blade', Float(iotype='in', units='MPa', desc='Blade ultimate strength', group='Rotor'))
assembly.add('stress_limit_fatigue_blade', Float(iotype='in', units='MPa', desc='Blade fatigue strength', group='Rotor'))
assembly.add('AddWeightFactorBlade', Float(iotype='in', desc='Additional weight factor for blade shell', group='Rotor'))
assembly.add('blade_material_density', Float(iotype='in', units='kg/m**3', desc='Average density of blade materials', group='Rotor'))
assembly.add('blade_mass', Float(iotype = 'out', units = 'kg', desc = 'Blade mass'))
# assembly.add('mean_wsp', Float(iotype = 'in', units = 'm/s', desc = 'mean wind speed', group='Aero')) # [m/s]
assembly.add('air_density', Float(iotype = 'in', units = 'kg/m**3', desc = 'density of air', group='Plant_AEP')) # [kg / m^3]
assembly.add('max_Cp', Float(iotype = 'in', desc = 'max CP', group='Aero'))
assembly.add('gearloss_const', Float(iotype = 'in', desc = 'Gear loss constant', group='Drivetrain'))
assembly.add('gearloss_var', Float(iotype = 'in', desc = 'Gear loss variable', group='Drivetrain'))
assembly.add('genloss', Float(iotype = 'in', desc = 'Generator loss', group='Drivetrain'))
assembly.add('convloss', Float(iotype = 'in', desc = 'Converter loss', group='Drivetrain'))
# Outputs
assembly.add('rated_wind_speed', Float(units = 'm / s', iotype='out', desc='wind speed for rated power'))
assembly.add('ideal_power_curve', Array(iotype='out', units='kW', desc='total power before losses and turbulence'))
assembly.add('power_curve', Array(iotype='out', units='kW', desc='total power including losses and turbulence'))
assembly.add('wind_curve', Array(iotype='out', units='m/s', desc='wind curve associated with power curve'))
assembly.add('aep', Float(iotype = 'out', units='mW*h', desc='Annual energy production in mWh'))
assembly.add('total_aep', Float(iotype = 'out', units='mW*h', desc='AEP for total years of production'))
# END SEAM Variables ----------------------
# Add SEAM components and connections
assembly.add('loads', SEAMLoads())
assembly.add('tower_design', SEAMTower(21))
assembly.add('blade_design', SEAMBladeStructure())
assembly.add('aep_calc', SEAM_PowerCurve())
assembly.driver.workflow.add(['loads', 'tower_design', 'blade_design', 'aep_calc'])
assembly.connect('loads.tower_bottom_moment_max', 'tower_design.tower_bottom_moment_max')
assembly.connect('loads.tower_bottom_moment_leq', 'tower_design.tower_bottom_moment_leq')
assembly.connect('loads.blade_root_flap_max', 'blade_design.blade_root_flap_max')
assembly.connect('loads.blade_root_edge_max', 'blade_design.blade_root_edge_max')
assembly.connect('loads.blade_root_flap_leq', 'blade_design.blade_root_flap_leq')
assembly.connect('loads.blade_root_edge_leq', 'blade_design.blade_root_edge_leq')
connect_io(assembly, assembly.aep_calc)
connect_io(assembly, assembly.loads)
connect_io(assembly, assembly.tower_design)
connect_io(assembly, assembly.blade_design)
# End SEAM add components and connections -------------
if with_new_nacelle:
assembly.add('hub',HubSE())
assembly.add('hubSystem',Hub_System_Adder_drive())
if with_3pt_drive:
assembly.add('nacelle', Drive3pt())
else:
assembly.add('nacelle', Drive4pt())
else:
assembly.add('nacelle', DriveWPACT())
assembly.add('hub', HubWPACT())
assembly.driver.workflow.add(['hub', 'nacelle'])
if with_new_nacelle:
assembly.driver.workflow.add(['hubSystem'])
# connections to hub and hub system
assembly.connect('blade_design.blade_mass', 'hub.blade_mass')
assembly.connect('loads.blade_root_flap_max', 'hub.rotor_bending_moment')
assembly.connect('rotor_diameter', ['hub.rotor_diameter'])
assembly.connect('blade_design.blade_root_diameter', 'hub.blade_root_diameter')
assembly.add('blade_number',Int(3,iotype='in',desc='number of blades', group='Aero'))
assembly.connect('blade_number', 'hub.blade_number')
if with_new_nacelle:
assembly.connect('rated_power','hub.machine_rating')
assembly.connect('rotor_diameter', ['hubSystem.rotor_diameter'])
assembly.connect('nacelle.MB1_location','hubSystem.MB1_location') # TODO: bearing locations
assembly.connect('nacelle.L_rb','hubSystem.L_rb')
assembly.add('rotor_tilt', Float(5.0, iotype='in', desc='rotor tilt', group='Rotor'))
assembly.connect('rotor_tilt','hubSystem.shaft_angle')
assembly.connect('hub.hub_diameter','hubSystem.hub_diameter')
assembly.connect('hub.hub_thickness','hubSystem.hub_thickness')
assembly.connect('hub.hub_mass','hubSystem.hub_mass')
assembly.connect('hub.spinner_mass','hubSystem.spinner_mass')
assembly.connect('hub.pitch_system_mass','hubSystem.pitch_system_mass')
# connections to nacelle #TODO: fatigue option variables
assembly.connect('rotor_diameter', 'nacelle.rotor_diameter')
assembly.connect('1.5 * aep_calc.rated_torque', 'nacelle.rotor_torque')
assembly.connect('loads.max_thrust', 'nacelle.rotor_thrust')
assembly.connect('aep_calc.rated_speed', 'nacelle.rotor_speed')
assembly.connect('rated_power', 'nacelle.machine_rating')
assembly.add('generator_speed',Float(1173.7,iotype='in',units='rpm',desc='speed of generator', group='Drivetrain')) # - should be in nacelle
assembly.connect('generator_speed/aep_calc.rated_speed', 'nacelle.gear_ratio')
assembly.connect('tower_top_diameter', 'nacelle.tower_top_diameter')
assembly.connect('blade_number * blade_design.blade_mass + hub.hub_system_mass', 'nacelle.rotor_mass') # assuming not already in rotor force / moments
# variable connections for new nacelle
if with_new_nacelle:
assembly.connect('blade_number','nacelle.blade_number')
assembly.connect('rotor_tilt','nacelle.shaft_angle')
assembly.connect('333.3 * rated_power / 1000.0','nacelle.shrink_disc_mass')
assembly.connect('blade_design.blade_root_diameter','nacelle.blade_root_diameter')
#moments - ignoring for now (nacelle will use internal defaults)
#assembly.connect('rotor.Mxyz_0','moments.b1')
#assembly.connect('rotor.Mxyz_120','moments.b2')
#assembly.connect('rotor.Mxyz_240','moments.b3')
#assembly.connect('rotor.Pitch','moments.pitch_angle')
#assembly.connect('rotor.TotalCone','moments.cone_angle')
assembly.connect('1.5 * aep_calc.rated_torque','nacelle.rotor_bending_moment_x') #accounted for in ratedConditions.Q
#assembly.connect('moments.My','nacelle.rotor_bending_moment_y')
#assembly.connect('moments.Mz','nacelle.rotor_bending_moment_z')
#forces - ignoring for now (nacelle will use internal defaults)
#assembly.connect('rotor.Fxyz_0','forces.b1')
#assembly.connect('rotor.Fxyz_120','forces.b2')
#assembly.connect('rotor.Fxyz_240','forces.b3')
#assembly.connect('rotor.Pitch','forces.pitch_angle')
#assembly.connect('rotor.TotalCone','forces.cone_angle')
assembly.connect('loads.max_thrust','nacelle.rotor_force_x')
#assembly.connect('forces.Fy','nacelle.rotor_force_y')
#assembly.connect('forces.Fz','nacelle.rotor_force_z')
if __name__ == '__main__':
turbine = Turbine_SE_SEAM()
#=========== SEAM inputs
turbine.AddWeightFactorBlade = 1.2
turbine.blade_material_density = 2100.0
turbine.tower_bottom_diameter = 6.
turbine.tower_top_diameter = 3.78
turbine.blade_edge_dynload_factor_ext = 2.5
turbine.blade_edge_dynload_factor_fat = 0.75
turbine.F = 0.777
turbine.MaxChordrR = 0.2
turbine.project_lifetime = 20.0
turbine.lifetime_cycles = 10000000.0
turbine.blade_sections = 21
turbine.PMtarget_tower = 1.0
turbine.PMtarget_blades = 1.0
turbine.safety_factor_blade = 1.1
turbine.safety_factor_tower = 1.5
turbine.stress_limit_extreme_tower = 235.0
turbine.stress_limit_fatigue_tower = 14.885
turbine.stress_limit_extreme_blade = 200.0
turbine.stress_limit_fatigue_blade = 27.0
turbine.tif_blade_root_flap_ext = 1.0
turbine.tif_blade_root_flap_fat = 1.0
turbine.tif_blade_root_edge_ext = 1.0
turbine.weibull_C = 11.0
turbine.weibull_k = 2.0
turbine.wohler_exponent_blade_flap = 10.0
turbine.wohler_exponent_tower = 4.0
turbine.dLoad_dU_factor_flap = 0.9
turbine.dLoad_dU_factor_tower = 0.8
turbine.hub_height = 90.0
turbine.max_tipspeed = 80.0
turbine.n_wsp = 26
turbine.min_wsp = 0.0
turbine.max_wsp = 25.0
turbine.nSigma4fatFlap = 1.2
turbine.nSigma4fatTower = 0.8
turbine.rated_power = 5000.0
turbine.rho_steel = 7800.0
turbine.rotor_diameter = 126.0
turbine.sc_frac_edge = 0.8
turbine.sc_frac_flap = 0.3
turbine.tsr = 8.0
turbine.air_density = 1.225
turbine.turbulence_int = 0.16
turbine.max_Cp = 0.49
turbine.gearloss_const = 0.01 # Fraction
turbine.gearloss_var = 0.014 # Fraction
turbine.genloss = 0.03 # Fraction
turbine.convloss = 0.03 # Fraction
#==============
# === nacelle ======
turbine.blade_number = 3 # turbine level that must be added for SEAM
turbine.rotor_tilt = 5.0 # turbine level that must be added for SEAM
turbine.generator_speed = 1173.7
turbine.nacelle.L_ms = 1.0 # (Float, m): main shaft length downwind of main bearing in low-speed shaft
turbine.nacelle.L_mb = 2.5 # (Float, m): main shaft length in low-speed shaft
turbine.nacelle.h0_front = 1.7 # (Float, m): height of Ibeam in bedplate front
turbine.nacelle.h0_rear = 1.35 # (Float, m): height of Ibeam in bedplate rear
turbine.nacelle.drivetrain_design = 'geared'
turbine.nacelle.crane = True # (Bool): flag for presence of crane
turbine.nacelle.bevel = 0 # (Int): Flag for the presence of a bevel stage - 1 if present, 0 if not
turbine.nacelle.gear_configuration = 'eep' # (Str): tring that represents the configuration of the gearbox (stage number and types)
turbine.nacelle.Np = [3, 3, 1] # (Array): number of planets in each stage
turbine.nacelle.ratio_type = 'optimal' # (Str): optimal or empirical stage ratios
turbine.nacelle.shaft_type = 'normal' # (Str): normal or short shaft length
#turbine.nacelle.shaft_angle = 5.0 # (Float, deg): Angle of the LSS inclindation with respect to the horizontal
turbine.nacelle.shaft_ratio = 0.10 # (Float): Ratio of inner diameter to outer diameter. Leave zero for solid LSS
turbine.nacelle.carrier_mass = 8000.0 # estimated for 5 MW
turbine.nacelle.mb1Type = 'CARB' # (Str): Main bearing type: CARB, TRB or SRB
turbine.nacelle.mb2Type = 'SRB' # (Str): Second bearing type: CARB, TRB or SRB
turbine.nacelle.yaw_motors_number = 8.0 # (Float): number of yaw motors
turbine.nacelle.uptower_transformer = True
turbine.nacelle.flange_length = 0.5 #m
turbine.nacelle.gearbox_cm = 0.1
turbine.nacelle.hss_length = 1.5
turbine.nacelle.overhang = 5.0 #TODO - should come from turbine configuration level
turbine.nacelle.check_fatigue = 0 #0 if no fatigue check, 1 if parameterized fatigue check, 2 if known loads inputs
# =================
# === run ===
turbine.run()
print 'mass rotor blades (kg) =', turbine.blade_number * turbine.blade_design.blade_mass
print 'mass hub system (kg) =', turbine.hubSystem.hub_system_mass
print 'mass nacelle (kg) =', turbine.nacelle.nacelle_mass
print 'mass tower (kg) =', turbine.tower_design.tower_mass
# =================
| 54.188571 | 154 | 0.703048 |
81b36615a4cceca74102543564ca1a7f49b62e92 | 2,880 | py | Python | src/triage/component/results_schema/alembic/versions/5dd2ba8222b1_add_run_type.py | josephbajor/triage_NN | cbaee6e5a06e597c91fec372717d89a2b5f34fa5 | [
"MIT"
] | 160 | 2017-06-13T09:59:59.000Z | 2022-03-21T22:00:35.000Z | src/triage/component/results_schema/alembic/versions/5dd2ba8222b1_add_run_type.py | josephbajor/triage_NN | cbaee6e5a06e597c91fec372717d89a2b5f34fa5 | [
"MIT"
] | 803 | 2016-10-21T19:44:02.000Z | 2022-03-29T00:02:33.000Z | src/triage/component/results_schema/alembic/versions/5dd2ba8222b1_add_run_type.py | josephbajor/triage_NN | cbaee6e5a06e597c91fec372717d89a2b5f34fa5 | [
"MIT"
] | 59 | 2017-01-31T22:10:22.000Z | 2022-03-19T12:35:03.000Z | """add run_type
Revision ID: 5dd2ba8222b1
Revises: 079a74c15e8b
Create Date: 2021-07-22 23:53:04.043651
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '5dd2ba8222b1'
down_revision = '079a74c15e8b'
branch_labels = None
depends_on = None
| 48.813559 | 209 | 0.755556 |
81b43298bda18b704f77ed56a530bc20370af1bf | 126 | py | Python | projects/PanopticFCN_cityscapes/panopticfcn/__init__.py | fatihyildiz-cs/detectron2 | 700b1e6685ca95a60e27cb961f363a2ca7f30d3c | [
"Apache-2.0"
] | 166 | 2020-12-01T18:34:47.000Z | 2021-03-27T04:20:15.000Z | panopticfcn/__init__.py | ywcmaike/PanopticFCN | 9201b06d871df128547ce36b80f6caceb105465d | [
"Apache-2.0"
] | 28 | 2021-05-20T08:59:05.000Z | 2022-03-18T13:17:35.000Z | panopticfcn/__init__.py | ywcmaike/PanopticFCN | 9201b06d871df128547ce36b80f6caceb105465d | [
"Apache-2.0"
] | 33 | 2021-05-23T14:09:19.000Z | 2022-03-30T14:27:55.000Z | from .config import add_panopticfcn_config
from .panoptic_seg import PanopticFCN
from .build_solver import build_lr_scheduler
| 31.5 | 44 | 0.880952 |
81b626c300ff32d3e035f1c56e66bff3d7d1f4dc | 592 | py | Python | 03_lecture_Django/lecture3/hello/views.py | MoStgt/CS50 | 62bd6eb38bea745c6356e1a8f03adb6ab70e2a37 | [
"MIT"
] | null | null | null | 03_lecture_Django/lecture3/hello/views.py | MoStgt/CS50 | 62bd6eb38bea745c6356e1a8f03adb6ab70e2a37 | [
"MIT"
] | null | null | null | 03_lecture_Django/lecture3/hello/views.py | MoStgt/CS50 | 62bd6eb38bea745c6356e1a8f03adb6ab70e2a37 | [
"MIT"
] | null | null | null | from http.client import HTTPResponse
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
# def index(request):
# return HttpResponse("Hello World!")
# def greet(request, name):
# return HttpResponse(f"Hello, {name.capitalize()}!") | 24.666667 | 57 | 0.706081 |
81b65c798718e0eb1c455ac640017cba04a70ce8 | 19,426 | py | Python | txdav/common/datastore/upgrade/test/test_migrate.py | backwardn/ccs-calendarserver | 13c706b985fb728b9aab42dc0fef85aae21921c3 | [
"Apache-2.0"
] | 462 | 2016-08-14T17:43:24.000Z | 2022-03-17T07:38:16.000Z | txdav/common/datastore/upgrade/test/test_migrate.py | backwardn/ccs-calendarserver | 13c706b985fb728b9aab42dc0fef85aae21921c3 | [
"Apache-2.0"
] | 72 | 2016-09-01T23:19:35.000Z | 2020-02-05T02:09:26.000Z | txdav/common/datastore/upgrade/test/test_migrate.py | backwardn/ccs-calendarserver | 13c706b985fb728b9aab42dc0fef85aae21921c3 | [
"Apache-2.0"
] | 171 | 2016-08-16T03:50:30.000Z | 2022-03-26T11:49:55.000Z | ##
# Copyright (c) 2010-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Tests for L{txdav.common.datastore.upgrade.migrate}.
"""
from twext.enterprise.adbapi2 import Pickle
from twext.enterprise.dal.syntax import Delete
from twext.python.filepath import CachingFilePath
from txweb2.http_headers import MimeType
from twisted.internet.defer import inlineCallbacks, Deferred, returnValue
from twisted.internet.protocol import Protocol
from twisted.protocols.amp import AMP, Command, String
from twisted.python.modules import getModule
from twisted.python.reflect import qual, namedAny
from twisted.trial.unittest import TestCase
from twistedcaldav import customxml, caldavxml
from twistedcaldav.config import config
from twistedcaldav.ical import Component
from txdav.base.propertystore.base import PropertyName
from txdav.caldav.datastore.test.common import CommonTests
from txdav.carddav.datastore.test.common import CommonTests as ABCommonTests
from txdav.common.datastore.file import CommonDataStore
from txdav.common.datastore.sql_tables import schema
from txdav.common.datastore.test.util import SQLStoreBuilder
from txdav.common.datastore.test.util import (
populateCalendarsFrom, StubNotifierFactory, resetCalendarMD5s,
populateAddressBooksFrom, resetAddressBookMD5s, deriveValue,
withSpecialValue, CommonCommonTests
)
from txdav.common.datastore.upgrade.migrate import UpgradeToDatabaseStep, \
StoreSpawnerService, swapAMP
from txdav.xml import element
import copy
| 37.357692 | 130 | 0.653197 |
81b69499f86483624239f156b1fed165ba08aee8 | 1,770 | py | Python | generated-libraries/python/netapp/fcp/aliases_info.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | 2 | 2017-03-28T15:31:26.000Z | 2018-08-16T22:15:18.000Z | generated-libraries/python/netapp/fcp/aliases_info.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | null | null | null | generated-libraries/python/netapp/fcp/aliases_info.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | null | null | null | from netapp.netapp_object import NetAppObject
| 26.818182 | 95 | 0.565537 |
81b7304301ce82e40f227f18f20c21206d2e4a7b | 5,745 | py | Python | imagernn/generic_batch_generator.py | OnlyBelter/learn_neuralTalk | 53c78962960e142bbd149eb92405029b795845ed | [
"MIT"
] | 7 | 2019-03-18T10:19:11.000Z | 2021-11-10T02:10:55.000Z | imagernn/generic_batch_generator.py | AllenMas/learn_neuralTalk | 53c78962960e142bbd149eb92405029b795845ed | [
"MIT"
] | null | null | null | imagernn/generic_batch_generator.py | AllenMas/learn_neuralTalk | 53c78962960e142bbd149eb92405029b795845ed | [
"MIT"
] | 8 | 2017-11-17T08:54:51.000Z | 2021-05-29T15:08:34.000Z | import numpy as np
import code
from imagernn.utils import merge_init_structs, initw, accumNpDicts
from imagernn.lstm_generator import LSTMGenerator
from imagernn.rnn_generator import RNNGenerator
| 37.064516 | 131 | 0.673281 |
81b75b5619b0c3e8fbf77d44a083c297724d2365 | 6,110 | py | Python | ConfigUpdater.py | godfatherlmh/LoLAnalyzer | 0b265b33230316ab1a1459a9767ef7527a4a4f89 | [
"MIT"
] | null | null | null | ConfigUpdater.py | godfatherlmh/LoLAnalyzer | 0b265b33230316ab1a1459a9767ef7527a4a4f89 | [
"MIT"
] | null | null | null | ConfigUpdater.py | godfatherlmh/LoLAnalyzer | 0b265b33230316ab1a1459a9767ef7527a4a4f89 | [
"MIT"
] | null | null | null | # Update the working patch and champions list
from __future__ import print_function
import configparser
import json
import os
import urllib.request
from datetime import datetime
from slugify import slugify
from collections import OrderedDict
from InterfaceAPI import InterfaceAPI
if __name__ == '__main__':
run()
| 54.553571 | 147 | 0.592635 |
81b791765a1072aa2be9a40bf2e9fca71ca77b5d | 555 | py | Python | app/migrations/0010_auto_20200709_1512.py | RuijiaX/w3hacks | 79e1eb81836b766737e8f053a26495ec2c6fd963 | [
"MIT"
] | 1 | 2020-08-30T04:49:20.000Z | 2020-08-30T04:49:20.000Z | app/migrations/0010_auto_20200709_1512.py | RuijiaX/w3hacks | 79e1eb81836b766737e8f053a26495ec2c6fd963 | [
"MIT"
] | 44 | 2020-06-21T03:10:35.000Z | 2020-08-08T23:55:19.000Z | app/migrations/0010_auto_20200709_1512.py | RuijiaX/w3hacks | 79e1eb81836b766737e8f053a26495ec2c6fd963 | [
"MIT"
] | 2 | 2020-07-01T16:54:58.000Z | 2020-07-13T21:13:06.000Z | # Generated by Django 3.0.7 on 2020-07-09 22:12
from django.db import migrations, models
| 23.125 | 61 | 0.574775 |
81b8a377f7e00482ba8d3e94e5cc8f42cb23bfce | 28,078 | py | Python | tests/test_fitting.py | adrdrew/viroconcom | 3eb748ba8e3e076eddd174a0fcdfee3917aa4045 | [
"MIT"
] | null | null | null | tests/test_fitting.py | adrdrew/viroconcom | 3eb748ba8e3e076eddd174a0fcdfee3917aa4045 | [
"MIT"
] | 1 | 2020-05-18T11:06:28.000Z | 2020-05-18T11:06:28.000Z | tests/test_fitting.py | adrdrew/viroconcom | 3eb748ba8e3e076eddd174a0fcdfee3917aa4045 | [
"MIT"
] | null | null | null | import unittest
import csv
import numpy as np
from viroconcom.fitting import Fit
def read_benchmark_dataset(path='tests/testfiles/1year_dataset_A.txt'):
"""
Reads a datasets provided for the environmental contour benchmark.
Parameters
----------
path : string
Path to dataset including the file name, defaults to 'examples/datasets/A.txt'
Returns
-------
x : ndarray of doubles
Observations of the environmental variable 1.
y : ndarray of doubles
Observations of the environmental variable 2.
x_label : str
Label of the environmantal variable 1.
y_label : str
Label of the environmental variable 2.
"""
x = list()
y = list()
x_label = None
y_label = None
with open(path, newline='') as csv_file:
reader = csv.reader(csv_file, delimiter=';')
idx = 0
for row in reader:
if idx == 0:
x_label = row[1][
1:] # Ignore first char (is a white space).
y_label = row[2][
1:] # Ignore first char (is a white space).
if idx > 0: # Ignore the header
x.append(float(row[1]))
y.append(float(row[2]))
idx = idx + 1
x = np.asarray(x)
y = np.asarray(y)
return (x, y, x_label, y_label)
| 46.563847 | 121 | 0.561044 |
81b9e4775c9ff677415dc6ea782a4181f1639a50 | 22,100 | py | Python | python/scripts/wavsep/wavsep.py | rugheid/OSS-ZAP | d486dde326a9120c9ddd52a3d4dcf1b9a2b4d042 | [
"Apache-2.0"
] | 4 | 2016-08-11T05:35:26.000Z | 2021-11-15T11:27:28.000Z | python/scripts/wavsep/wavsep.py | rugheid/OSS-ZAP | d486dde326a9120c9ddd52a3d4dcf1b9a2b4d042 | [
"Apache-2.0"
] | 1 | 2018-06-12T13:55:16.000Z | 2018-06-12T15:27:59.000Z | python/scripts/wavsep/wavsep.py | rugheid/OSS-ZAP | d486dde326a9120c9ddd52a3d4dcf1b9a2b4d042 | [
"Apache-2.0"
] | 12 | 2018-05-15T10:14:00.000Z | 2019-11-10T07:03:16.000Z | # Zed Attack Proxy (ZAP) and its related class files.
#
# ZAP is an HTTP/HTTPS proxy for assessing web application security.
#
# Copyright 2012 ZAP Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script tests ZAP against wavsep: http://code.google.com/p/wavsep/
# Note wavsep has to be installed somewhere - the above link is to the
# project not the test suite!
#
# To this script:
# * Install the ZAP Python API:
# Use 'pip install python-owasp-zap-v2' or
# download from https://github.com/zaproxy/zaproxy/wiki/Downloads
# * Start ZAP (as this is for testing purposes you might not want the
# 'standard' ZAP to be started)
# * Access wavsep via your browser, proxying through ZAP
# * Vist all of the wavsep top level URLs, eg
# http://localhost:8080/wavsep/index-active.jsp
# http://localhost:8080/wavsep/index-passive.jsp
# * Run the Spider against http://localhost:8080
# * Run the Active Scanner against http://localhost:8080/wavsep
# * Run this script
# * Open the report.html file generated in your browser
#
# Notes:
# This has been tested against wavsep 1.5
from zapv2 import ZAPv2
import datetime, sys, getopt
if __name__ == "__main__":
main(sys.argv[1:])
| 38.368056 | 170 | 0.611403 |
81bafa0175de3af83830a52504e9b10d4a89639b | 10,439 | py | Python | pocketsmith/models/attachment.py | brett-comber/python-pocketsmith-api | a9c7f25abf65e4e022535431dc1d34d6a1bd97e8 | [
"MIT"
] | null | null | null | pocketsmith/models/attachment.py | brett-comber/python-pocketsmith-api | a9c7f25abf65e4e022535431dc1d34d6a1bd97e8 | [
"MIT"
] | null | null | null | pocketsmith/models/attachment.py | brett-comber/python-pocketsmith-api | a9c7f25abf65e4e022535431dc1d34d6a1bd97e8 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
PocketSmith
The public PocketSmith API # noqa: E501
The version of the OpenAPI document: 2.0
Contact: api@pocketsmith.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from pocketsmith.configuration import Configuration
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Attachment):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Attachment):
return True
return self.to_dict() != other.to_dict()
| 28.061828 | 227 | 0.590861 |
81bce2f74bd4337a65e512dbd85c7e158418982f | 16,476 | py | Python | pynsq/nsq/NSQReader.py | ghorges/nsq-2.0 | b8dc67fa9467e9f39f976f923b798f574d12d8a9 | [
"MIT"
] | null | null | null | pynsq/nsq/NSQReader.py | ghorges/nsq-2.0 | b8dc67fa9467e9f39f976f923b798f574d12d8a9 | [
"MIT"
] | null | null | null | pynsq/nsq/NSQReader.py | ghorges/nsq-2.0 | b8dc67fa9467e9f39f976f923b798f574d12d8a9 | [
"MIT"
] | null | null | null | """
high-level NSQ reader class built on top of a Tornado IOLoop supporting both sync and
async modes of operation.
supports various hooks to modify behavior when heartbeats are received, temporarily
disable the reader, and pre-process/validate messages.
when supplied a list of nsqlookupd addresses, a reader instance will periodically poll
the specified topic in order to discover new producers and reconnect to existing ones.
sync ex.
import nsq
def task1(message):
print message
return True
def task2(message):
print message
return True
all_tasks = {"task1": task1, "task2": task2}
r = nsq.Reader(all_tasks, lookupd_http_addresses=['http://127.0.0.1:4161'],
topic="nsq_reader", channel="asdf", lookupd_poll_interval=15)
nsq.run()
async ex.
import nsq
buf = []
def process_message(message, finisher):
global buf
# cache both the message and the finisher callable for later processing
buf.append((message, finisher))
if len(buf) >= 3:
print '****'
for msg, finish_fxn in buf:
print msg
finish_fxn(True) # use finish_fxn to tell NSQ of success
print '****'
buf = []
else:
print 'deferring processing'
all_tasks = {"task1": process_message}
r = nsq.Reader(all_tasks, lookupd_http_addresses=['http://127.0.0.1:4161'],
topic="nsq_reader", channel="async", async=True)
nsq.run()
"""
import logging
try:
import simplejson as json
except ImportError:
import json
import time
import signal
import socket
import functools
import urllib
import random
import tornado.ioloop
import tornado.httpclient
import BackoffTimer
import nsq
import async
def get_conn_id(conn, task):
return str(conn) + ':' + task
def _handle_term_signal(sig_num, frame):
logging.info('TERM Signal handler called with signal %r' % sig_num)
tornado.ioloop.IOLoop.instance().stop()
def run():
signal.signal(signal.SIGTERM, _handle_term_signal)
tornado.ioloop.IOLoop.instance().start()
| 40.581281 | 113 | 0.624059 |
81bed88a93d034618c88d318a0da803628905ccb | 337 | py | Python | main.py | ygidtu/mountainClimber | 37a1b2934741a755c90000af8d2f9e8256f24ca6 | [
"Apache-2.0"
] | null | null | null | main.py | ygidtu/mountainClimber | 37a1b2934741a755c90000af8d2f9e8256f24ca6 | [
"Apache-2.0"
] | null | null | null | main.py | ygidtu/mountainClimber | 37a1b2934741a755c90000af8d2f9e8256f24ca6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
u"""
Created at 2020.09.04 by Zhang Yiming
"""
import warnings
warnings.filterwarnings("ignore")
import click
from cli.climb import climb
from cli.diff import diff
main.add_command(climb)
main.add_command(diff)
if __name__ == '__main__':
main()
| 14.041667 | 37 | 0.700297 |
81bf3cce63eb0d81e1cb3c04efffcbc893d011ef | 2,023 | py | Python | app/fednlp/data/raw_data_loader/CNN_Dailymail/data_loader.py | ray-ruisun/FedML | 24ff30d636bb70f64e94e9ca205375033597d3dd | [
"Apache-2.0"
] | null | null | null | app/fednlp/data/raw_data_loader/CNN_Dailymail/data_loader.py | ray-ruisun/FedML | 24ff30d636bb70f64e94e9ca205375033597d3dd | [
"Apache-2.0"
] | null | null | null | app/fednlp/data/raw_data_loader/CNN_Dailymail/data_loader.py | ray-ruisun/FedML | 24ff30d636bb70f64e94e9ca205375033597d3dd | [
"Apache-2.0"
] | null | null | null | import os
from data.raw_data_loader.base.base_raw_data_loader import Seq2SeqRawDataLoader
| 36.125 | 79 | 0.527435 |
81bf6ad4a1d9f400fda048a534023120e5946c0a | 4,098 | py | Python | packages/utils/propagate_license.py | justi/m2g | 09e8b889889ee8d8fb08b9b6fcd726fb3d901644 | [
"Apache-2.0"
] | 12 | 2015-03-11T22:07:17.000Z | 2016-01-29T21:24:29.000Z | packages/utils/propagate_license.py | youngmook/m2g | 09e8b889889ee8d8fb08b9b6fcd726fb3d901644 | [
"Apache-2.0"
] | 213 | 2015-01-30T16:02:57.000Z | 2016-01-29T21:45:02.000Z | packages/utils/propagate_license.py | youngmook/m2g | 09e8b889889ee8d8fb08b9b6fcd726fb3d901644 | [
"Apache-2.0"
] | 5 | 2015-02-04T13:58:12.000Z | 2016-01-29T21:24:46.000Z | #!/usr/bin/env python
# Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# propagate_license.py
# Created by Disa Mhembere on 2014-05-16.
# Email: disa@jhu.edu
__license_header__ = """
{} Copyright 2014 Open Connectome Project (http://openconnecto.me)
{}
{} Licensed under the Apache License, Version 2.0 (the "License");
{} you may not use this file except in compliance with the License.
{} You may obtain a copy of the License at
{}
{} http://www.apache.org/licenses/LICENSE-2.0
{}
{} Unless required by applicable law or agreed to in writing, software
{} distributed under the License is distributed on an "AS IS" BASIS,
{} WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
{} See the License for the specific language governing permissions and
{} limitations under the License.
{}
"""
COMM_COUNT = 14
comm = {".py":"#", ".pyx":"#", "": "#", ".html":"", ".sh":"#", ".r":"#", ".m":"%", ".c":"//",
".c++":"//", ".java":"//", ".js":"//"}
import argparse
import os
if __name__ == "__main__":
main()
| 35.327586 | 107 | 0.656418 |
81c0253c14333698b3786f1e30f1b538f9b67736 | 1,384 | py | Python | core/gf/test.py | zorrock/accelerated-text | 942bacf653fc4c901748b06eaba72da4aeaaec9e | [
"Apache-2.0"
] | 1 | 2021-05-05T01:37:51.000Z | 2021-05-05T01:37:51.000Z | core/gf/test.py | zorrock/accelerated-text | 942bacf653fc4c901748b06eaba72da4aeaaec9e | [
"Apache-2.0"
] | null | null | null | core/gf/test.py | zorrock/accelerated-text | 942bacf653fc4c901748b06eaba72da4aeaaec9e | [
"Apache-2.0"
] | null | null | null | import pytest
import server
| 32.186047 | 77 | 0.62211 |
81c086bf3828eec5887f2980268193fc09c2dd9d | 3,126 | py | Python | troposphere/validators/dynamodb.py | compose-x/troposphere | 9a94a8fafd8b4da1cd1f4239be0e7aa0681fd8d4 | [
"BSD-2-Clause"
] | null | null | null | troposphere/validators/dynamodb.py | compose-x/troposphere | 9a94a8fafd8b4da1cd1f4239be0e7aa0681fd8d4 | [
"BSD-2-Clause"
] | null | null | null | troposphere/validators/dynamodb.py | compose-x/troposphere | 9a94a8fafd8b4da1cd1f4239be0e7aa0681fd8d4 | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2012-2022, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from .. import AWSHelperFn, If
def attribute_type_validator(x):
"""
Property: AttributeDefinition.AttributeType
"""
valid_types = ["S", "N", "B"]
if x not in valid_types:
raise ValueError("AttributeType must be one of: %s" % ", ".join(valid_types))
return x
def key_type_validator(x):
"""
Property: KeySchema.KeyType
"""
valid_types = ["HASH", "RANGE"]
if x not in valid_types:
raise ValueError("KeyType must be one of: %s" % ", ".join(valid_types))
return x
def projection_type_validator(x):
"""
Property: Projection.ProjectionType
"""
valid_types = ["KEYS_ONLY", "INCLUDE", "ALL"]
if x not in valid_types:
raise ValueError("ProjectionType must be one of: %s" % ", ".join(valid_types))
return x
def billing_mode_validator(x):
"""
Property: Table.BillingMode
"""
valid_modes = ["PROVISIONED", "PAY_PER_REQUEST"]
if x not in valid_modes:
raise ValueError(
"Table billing mode must be one of: %s" % ", ".join(valid_modes)
)
return x
def table_class_validator(x):
"""
Property: Table.TableClass
"""
valid_table_classes = ["STANDARD", "STANDARD_INFREQUENT_ACCESS"]
if x not in valid_table_classes:
raise ValueError(
"Table class must be one of: %s" % ", ".join(valid_table_classes)
)
return x
def validate_table(self):
"""
Class: Table
"""
billing_mode = self.properties.get("BillingMode", "PROVISIONED")
indexes = self.properties.get("GlobalSecondaryIndexes", [])
tput_props = [self.properties]
tput_props.extend([x.properties for x in indexes if not isinstance(x, AWSHelperFn)])
if isinstance(billing_mode, If):
if check_any("ProvisionedThroughput", tput_props):
raise ValueError(
"Table billing mode is per-request. "
"ProvisionedThroughput property is mutually exclusive"
)
return
if billing_mode == "PROVISIONED":
if not check_if_all("ProvisionedThroughput", tput_props):
raise ValueError(
"Table billing mode is provisioned. "
"ProvisionedThroughput required if available"
)
elif billing_mode == "PAY_PER_REQUEST":
if check_any("ProvisionedThroughput", tput_props):
raise ValueError(
"Table billing mode is per-request. "
"ProvisionedThroughput property is mutually exclusive"
)
| 29.214953 | 88 | 0.619962 |
81c08bcad1b73822669737a9c7a8c3b7773030bc | 430 | py | Python | videoclip_sources/e004.py | ChrisScarred/misty2py-skills | 30557d246b91fb525866fe8b92e280d2609ca26b | [
"MIT"
] | null | null | null | videoclip_sources/e004.py | ChrisScarred/misty2py-skills | 30557d246b91fb525866fe8b92e280d2609ca26b | [
"MIT"
] | null | null | null | videoclip_sources/e004.py | ChrisScarred/misty2py-skills | 30557d246b91fb525866fe8b92e280d2609ca26b | [
"MIT"
] | null | null | null | import time
from misty2py.robot import Misty
from misty2py.utils.env_loader import EnvLoader
from misty2py_skills.utils.utils import get_abs_path
env_loader = EnvLoader(get_abs_path(".env"))
m = Misty(env_loader.get_ip())
d = m.event("subscribe", type="BatteryCharge")
e_name = d.get("event_name")
time.sleep(1)
d = m.event("get_data", name=e_name)
# do something with the data here
d = m.event("unsubscribe", name=e_name)
| 21.5 | 52 | 0.755814 |
81c1b8a6fb449ff2c4c107dcaec453b46983daed | 2,302 | py | Python | p2/Python Files/audit_street.py | priyankaswadi/Udacity-Data-Analyst-Nanodegree | 52989f7e447e69c6fb08119f4e39a4500dcdf571 | [
"Apache-2.0"
] | null | null | null | p2/Python Files/audit_street.py | priyankaswadi/Udacity-Data-Analyst-Nanodegree | 52989f7e447e69c6fb08119f4e39a4500dcdf571 | [
"Apache-2.0"
] | null | null | null | p2/Python Files/audit_street.py | priyankaswadi/Udacity-Data-Analyst-Nanodegree | 52989f7e447e69c6fb08119f4e39a4500dcdf571 | [
"Apache-2.0"
] | null | null | null | #Map incorrect and abbreviated street names with correct/better ones
import xml.etree.cElementTree as ET
from collections import defaultdict
import re
import pprint
OSMFILE = "albany.osm"
street_type_re = re.compile(r'\b\S+\.?$', re.IGNORECASE)
# UPDATE THIS VARIABLE
mapping = {"rd": "Road",
"Rd": "Road",
"road": "Road",
"Ave": "Avenue",
"Ave.": "Avenue",
"AVE": "Avenue",
"way" : "Way",
"street": "Street",
"way":"Way",
"Dr.":"Drive",
"Blvd":"Boulevard",
"rt":"Route",
"Ext": "Extension",
"Jay":"Jay Street",
"Nott St E":"Nott Street East",
"Troy-Schenetady-Road":"Troy Schenectady Road",
"Troy-Schenetady Rd" :"Troy Schenectady Road",
"Delatour":"Delatour Road",
"Deltour": "Delatour Road",
"Sparrowbush": "Sparrowbush Road"
}
if __name__ == '__main__':
test() | 27.73494 | 68 | 0.541703 |
81c1bcd0f7ae70104609fcbc8c962b13a08d4c00 | 1,943 | py | Python | modules/week2/utils.py | tobias-z/4-sem-python | 35c0a73f0a2085f2dc539c8ec8761c26675aa078 | [
"MIT"
] | null | null | null | modules/week2/utils.py | tobias-z/4-sem-python | 35c0a73f0a2085f2dc539c8ec8761c26675aa078 | [
"MIT"
] | null | null | null | modules/week2/utils.py | tobias-z/4-sem-python | 35c0a73f0a2085f2dc539c8ec8761c26675aa078 | [
"MIT"
] | null | null | null | from io import TextIOWrapper
import os
from typing import List
OUTPUT = "files/output.csv"
FOLDER = "modules/week2/folders"
def get_file_names(folderpath, out=OUTPUT):
"""takes a path to a folder and writes all filenames in the folder to a specified output file"""
dir_list = os.listdir(folderpath)
with open(out, "w") as file:
for line in dir_list:
file.write(line + "\n")
def get_all_file_names(folderpath, out=OUTPUT):
"""takes a path to a folder and write all filenames recursively (files of all sub folders to)"""
with open(out, "w") as file:
write_dir_to_file(file, os.listdir(folderpath), folderpath)
def print_line_one(file_names: List[str]):
"""takes a list of filenames and print the first line of each"""
for file_name in file_names:
with open(file_name) as file:
print(file.readline())
def print_emails(file_names: List[str]):
"""takes a list of filenames and print each line that contains an email (just look for @)"""
for file_name in file_names:
with open(file_name) as file:
for line in file.readlines():
if "@" in line:
print(line)
def write_headlines(md_files: List[str], out=OUTPUT):
"""takes a list of md files and writes all headlines (lines starting with #) to a file"""
with open(out, "w") as output_file:
for md_file in md_files:
with open(md_file) as file:
for line in file.readlines():
if line.startswith("#"):
output_file.write(line)
| 34.696429 | 100 | 0.629439 |
81c234494317e86e4d284863eff810f848405889 | 4,480 | py | Python | src/api/providers.py | ismetacar/ertis-auth | 64727cc8201d5fcc955485e94262500d63ff4b17 | [
"MIT"
] | 17 | 2020-06-17T15:28:59.000Z | 2021-09-21T19:18:14.000Z | src/api/providers.py | ismetacar/Ertis-Auth | 5521eb8a0b11fca7c5ff2a4ecc6cc0b9af59aa8f | [
"MIT"
] | 5 | 2020-06-17T21:22:56.000Z | 2021-05-02T19:10:05.000Z | src/api/providers.py | ismetacar/Ertis-Auth | 5521eb8a0b11fca7c5ff2a4ecc6cc0b9af59aa8f | [
"MIT"
] | 2 | 2021-03-02T17:08:07.000Z | 2021-04-07T18:11:59.000Z | import json
from sanic import response
from sanic_openapi import doc
from src.plugins.authorization import authorized
from src.plugins.validator import validated
from src.request_models.providers import Provider
from src.request_models.query_model import Query
from src.resources.generic import ensure_membership_is_exists, QUERY_BODY_SCHEMA
from src.resources.providers.resource import CREATE_PROVIDER_SCHEMA
from src.utils import query_helpers
from src.utils.json_helpers import bson_to_json
| 43.076923 | 104 | 0.705134 |
81c238300e9927729e01076aa4674e5af0b62cf8 | 3,078 | py | Python | lista08_pesquisa/questao02.py | mayararysia/ESTD | 65aa8816aa8773066201cb410b02c1cb72ad5611 | [
"MIT"
] | null | null | null | lista08_pesquisa/questao02.py | mayararysia/ESTD | 65aa8816aa8773066201cb410b02c1cb72ad5611 | [
"MIT"
] | null | null | null | lista08_pesquisa/questao02.py | mayararysia/ESTD | 65aa8816aa8773066201cb410b02c1cb72ad5611 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#Lista de Exerccios 08 (Pesquisa) - Questo 02
#Mayara Rysia
from time import time
from time import sleep
from random import randint
"""
2. Use as duas funes de busca binria apresentadas (iterativa e recursiva). Gere
uma lista de nmeros aleatrios, ordene-os e verifique o desempenho delas. Qual
os resultados?
"""
#Busca Binria - cdigo recursivo
#Busca Binria - cdigo iterativo
#ordena a lista
#cria a lista
if __name__ == '__main__':
l = criaLista()
lista = ordena(l)
qtd_br = qtd_bi = 0
#Testes
for i in range(5):
num = randint(0, 42)
print("<< Busca Recursiva >> \n")
tempo_gasto_br = Teste(lista, num)
print('\ttempo gasto: ', tempo_gasto_br)
print('\n\n')
sleep(2)
print("<< Busca Iterativa >> \n")
tempo_gasto_bi = Teste_it(lista, num)
print('\ttempo gasto: ', tempo_gasto_bi)
print('\n\n')
if tempo_gasto_br < tempo_gasto_bi:
qtd_br +=1
print('\n-> Busca Recursiva levou o menor tempo\n')
else:
qtd_bi +=1
print('\n-> Busca Iterativa levou o menor tempo\n')
print("------- ------- ------- ------- -------")
print("\nCONCLUSO\n\n ")
if qtd_br > qtd_bi:
print("Busca Binria Recursiva teve o melhor desempenho!")
else:
print("Busca Binria Iterativa teve o melhor desempenho!")
print("Quantidade Binria Recursiva: ", qtd_br)
print("Quantidade Binria Iterativa: ", qtd_bi)
| 20.938776 | 82 | 0.635153 |
81c3777bd3aa3fe5f25a3ee068f24e1720ba3426 | 3,290 | py | Python | ccvpn/views/__init__.py | CCrypto/ccvpn | 6bbfd01f41816bea905518f302f4cec474fdd221 | [
"MIT"
] | 81 | 2015-03-07T20:26:55.000Z | 2016-05-16T10:22:05.000Z | ccvpn/views/__init__.py | CCrypto/ccvpn2 | 6bbfd01f41816bea905518f302f4cec474fdd221 | [
"MIT"
] | 1 | 2017-09-21T15:56:31.000Z | 2017-11-30T15:10:56.000Z | ccvpn/views/__init__.py | CCrypto/ccvpn | 6bbfd01f41816bea905518f302f4cec474fdd221 | [
"MIT"
] | 20 | 2015-03-07T22:36:46.000Z | 2016-04-23T22:47:12.000Z | import codecs
import markdown
import os
import logging
from pyramid.view import view_config
from pyramid.httpexceptions import HTTPOk, HTTPNotFound
from sqlalchemy import func
from mako.lookup import TemplateLookup
import mako.exceptions
logger = logging.getLogger(__name__)
from ccvpn.models import DBSession, User, IcingaError, IcingaQuery, Gateway, VPNSession
from ccvpn.views import account, admin, api, order # noqa
def format_bps(bits):
multiples = ((1e9, 'G'), (1e6, 'M'), (1e3, 'K'), (0, ''))
for d, m in multiples:
if bits < d:
continue
n = bits / (d or 1)
return '{:2g}{}bps'.format(n, m)
| 33.571429 | 87 | 0.619149 |
81c43cdcda51abd9a7c25faabe42afd1a69a3e45 | 3,142 | py | Python | rx/subjects/subject.py | MichaelSchneeberger/RxPY | 994f974d37783f63c5d9e018a316fa9b06ba9337 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | rx/subjects/subject.py | MichaelSchneeberger/RxPY | 994f974d37783f63c5d9e018a316fa9b06ba9337 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | rx/subjects/subject.py | MichaelSchneeberger/RxPY | 994f974d37783f63c5d9e018a316fa9b06ba9337 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import threading
from typing import Any, List, Optional
from rx.disposable import Disposable
from rx.core.typing import Observer, Scheduler
from rx.core import Observable, typing
from rx.internal import DisposedException
from .anonymoussubject import AnonymousSubject
from .innersubscription import InnerSubscription
| 28.825688 | 100 | 0.590707 |
81c467ca6111d33d242e6a5ccd32ee27968ad970 | 931 | py | Python | scripts/uda.py | nng555/fairseq | c9730a125825a85f33042e1b9fd1959b8ca829e5 | [
"MIT"
] | 2 | 2020-10-05T08:52:01.000Z | 2021-03-03T15:26:35.000Z | scripts/uda.py | nng555/fairseq | c9730a125825a85f33042e1b9fd1959b8ca829e5 | [
"MIT"
] | null | null | null | scripts/uda.py | nng555/fairseq | c9730a125825a85f33042e1b9fd1959b8ca829e5 | [
"MIT"
] | null | null | null | import os
import hydra
import subprocess
import logging
from omegaconf import DictConfig
from hydra import slurm_utils
log = logging.getLogger(__name__)
if __name__ == "__main__":
launch()
| 29.09375 | 110 | 0.684211 |
81c5bce0c4d9254a207a213c3a227fa2fcf0908d | 2,062 | py | Python | 06_Business/application_iris/app.py | MaryMP11/The_Bridge_School_DataScience_PT | 8b4a24d0b79608061a470e806de542dbbcccf75d | [
"Apache-2.0"
] | null | null | null | 06_Business/application_iris/app.py | MaryMP11/The_Bridge_School_DataScience_PT | 8b4a24d0b79608061a470e806de542dbbcccf75d | [
"Apache-2.0"
] | null | null | null | 06_Business/application_iris/app.py | MaryMP11/The_Bridge_School_DataScience_PT | 8b4a24d0b79608061a470e806de542dbbcccf75d | [
"Apache-2.0"
] | null | null | null | from flask import Flask, request, jsonify, session, url_for, redirect, render_template
import joblib
from flower_form import FlowerForm
classifier_loaded = joblib.load("application_iris/saved_models/knn_iris_dataset.pkl")
encoder_loaded = joblib.load("application_iris/saved_models/iris_label_encoder.pkl")
# prediction function
app = Flask(__name__)
app.config['SECRET_KEY'] = 'mysecretkey'
# Read models
# classifier_loaded = joblib.load("saved_models/01.knn_with_iris_dataset.pkl")
# encoder_loaded = joblib.load("saved_models/02.iris_label_encoder.pkl")
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080) | 32.21875 | 112 | 0.734724 |
81c63070aaf168ec47a744d51e1a20dd220ae56b | 8,522 | py | Python | test.py | EdwinChan/python-physical | 44383280acddd77b35adf8923b7d8cdb512553a0 | [
"MIT"
] | 2 | 2021-07-13T05:58:13.000Z | 2021-08-19T04:37:57.000Z | test.py | EdwinChan/python-physical | 44383280acddd77b35adf8923b7d8cdb512553a0 | [
"MIT"
] | null | null | null | test.py | EdwinChan/python-physical | 44383280acddd77b35adf8923b7d8cdb512553a0 | [
"MIT"
] | null | null | null | import math
import re
import unittest
import urllib.error
import urllib.request
from .core import Quantity
from .define import defined_systems
si = defined_systems['si']
esu = defined_systems['esu']
emu = defined_systems['emu']
gauss = defined_systems['gauss']
if __name__ == '__main__':
unittest.main()
| 40.198113 | 74 | 0.602558 |
81c77ad5e87d4cea69ce9a40ad25f9768714ae45 | 3,828 | py | Python | test/test_cirrus_ngs/test_cfnCluster/test_ConnectionManager.py | ucsd-ccbb/cirrus-ngs | 8f51450b3d971b03d4fd08a1aab11d5a076aa23e | [
"MIT"
] | 8 | 2017-01-20T00:00:45.000Z | 2022-02-11T00:20:45.000Z | test/test_cirrus_ngs/test_cfnCluster/test_ConnectionManager.py | miko-798/cirrus-ngs | 2c005f0fe29e298652ed2164e08ada75e908229b | [
"MIT"
] | 3 | 2018-03-23T19:09:06.000Z | 2018-03-26T19:49:55.000Z | test/test_cirrus_ngs/test_cfnCluster/test_ConnectionManager.py | miko-798/cirrus-ngs | 2c005f0fe29e298652ed2164e08ada75e908229b | [
"MIT"
] | 2 | 2018-03-29T06:24:31.000Z | 2019-04-01T18:34:53.000Z | import unittest
import sys
import os
sys.path.append(os.getcwd().replace("test", "src"))
import cirrus_ngs.cfnCluster.ConnectionManager as ConnectionManager
import paramiko
import tempfile
import re
##THIS TEST WILL NOT WORK##
if __name__ == "__main__":
unittest.main(module=__name__, buffer=True, exit=False)
| 35.775701 | 117 | 0.637931 |
81c7caa0739efb8823c259206d2c89fc45540cae | 3,166 | py | Python | src/backend/opus/opusctl/cmds/process.py | DTG-FRESCO/opus | 8975e154524802efead82794ab2f70d4a1611000 | [
"Apache-2.0"
] | null | null | null | src/backend/opus/opusctl/cmds/process.py | DTG-FRESCO/opus | 8975e154524802efead82794ab2f70d4a1611000 | [
"Apache-2.0"
] | null | null | null | src/backend/opus/opusctl/cmds/process.py | DTG-FRESCO/opus | 8975e154524802efead82794ab2f70d4a1611000 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Commands for launching processes with or without OPUS interposition.
'''
from __future__ import absolute_import, division, print_function
import argparse
import os
import psutil
from .. import config, server_start, utils
def handle(cmd, **params):
if cmd == "launch":
handle_launch(**params)
elif cmd == "exclude":
handle_exclude(**params)
| 31.979798 | 82 | 0.609602 |
81c8df8164adec85e55f94629c0090d4dd609286 | 1,718 | py | Python | tests/unit/l2_infrastructure/test_app_collection_config_parser.py | ansible-self-service/ansible-self-service | 80840f02b68c6ba5fe6c55ab9a317b310c185b4d | [
"MIT"
] | null | null | null | tests/unit/l2_infrastructure/test_app_collection_config_parser.py | ansible-self-service/ansible-self-service | 80840f02b68c6ba5fe6c55ab9a317b310c185b4d | [
"MIT"
] | null | null | null | tests/unit/l2_infrastructure/test_app_collection_config_parser.py | ansible-self-service/ansible-self-service | 80840f02b68c6ba5fe6c55ab9a317b310c185b4d | [
"MIT"
] | null | null | null | import pytest
from ansible_self_service.l2_infrastructure.app_collection_config_parser import AppCollectionConfigValidationException, \
YamlAppCollectionConfigParser
from ansible_self_service.l4_core.models import AppCategory, App
VALID_CATEGORY_NAME = 'Misc'
VALID_ITEM_NAME = 'Cowsay'
VALID_ITEM_DESCRIPTION = 'Let an ASCII cow say stuff in your terminal!'
VALID_CONFIG = f"""
categories:
{VALID_CATEGORY_NAME}: {{}}
items:
{VALID_ITEM_NAME}:
description: |
{VALID_ITEM_DESCRIPTION}
categories:
- {VALID_CATEGORY_NAME}
image_url: https://upload.wikimedia.org/wikipedia/commons/8/80/Cowsay_Typical_Output.png
playbook: playbooks/cowsay.yml
params:
ansible_become_password:
type: secret
mandatory: true
requirements: > # any expression that we could use for a tasks "when" clause; items are ANDed
- ansible_distribution == 'Ubuntu'
"""
INVALID_CONFIG = '''
this is not even YAML
'''
| 33.038462 | 121 | 0.760186 |
81ca35091868d035a8a09d9c9753adadf774b179 | 6,088 | py | Python | api-server.py | proatria/sftpplus-api-example | 1fc3af66beef06d66ad46a0cf74bb0905793cf7f | [
"MIT"
] | null | null | null | api-server.py | proatria/sftpplus-api-example | 1fc3af66beef06d66ad46a0cf74bb0905793cf7f | [
"MIT"
] | null | null | null | api-server.py | proatria/sftpplus-api-example | 1fc3af66beef06d66ad46a0cf74bb0905793cf7f | [
"MIT"
] | null | null | null | """
Run a simple HTTP server which provides API endpoint for SFTPPlus.
Usage:
server.py [options]
-h --help Show this help.
-p --port=8000 Listen to a specific port. [default: 8080]
-a --address=127.0.0.1 Listen on specific address. [default: 0.0.0.0]
-c --certificate=PATH Enable HTTPS by defining the path
to a file containing server key, certificate, and CA chain
all PEM format and stored in a single file.
-f --flaky Introduce random errors to test SFTPPlus API retry functionality.
The following API endpoints are provided:
* /auth-api - For the authentication API
* /event-api - For the event handler API
"""
from __future__ import absolute_import, unicode_literals
import base64
import json
import ssl
from random import randint
from aiohttp import web
from docopt import docopt
# Command line handling part.
arguments = docopt(__doc__)
# Convert arguments to usable types.
port = int(arguments["--port"])
# Need to escape the address for ipv6.
address = arguments["--address"].replace(":", r"\:")
is_flaky = arguments["--flaky"]
certificate = arguments["--certificate"]
# Set to lower values to increase the probability of a failure.
_FLAKY_DEGREE = 3
# DB with accepted accounts.
# Each key is the name of an user.
# Each value contains the accepted password and/or SSH-key.
ACCOUNTS = {
# An account with some custom configuration.
# Configuration that is not explicitly defined here is extracted based on
# the SFTPPlus group.
"test-user": {
"password": "test-pass",
# Just the public key value, in OpenSSH format.
# Without hte key type or comments.
"ssh-public-key": "AAAAB3NzaC1yc2EAAAADAQABAAAAgQC4fV6tSakDSB6ZovygLsf1iC9P3tJHePTKAPkPAWzlu5BRHcmAu0uTjn7GhrpxbjjWMwDVN0Oxzw7teI0OEIVkpnlcyM6L5mGk+X6Lc4+lAfp1YxCR9o9+FXMWSJP32jRwI+4LhWYxnYUldvAO5LDz9QeR0yKimwcjRToF6/jpLw==",
"configuration": {
"home_folder_path": "/tmp",
# EXTRA_DATA is not yet supported.
# 'extra_data': {
# 'file_api_token': 'fav1_some_value',
# },
},
},
# An account with default configuration extracted from
# the default SFTPPlus group.
# SSH-Key authentication is disabled for this user.
"default-user": {
"password": "default-pass",
"ssh-public-key": "",
"configuration": {},
},
}
app = web.Application()
app.add_routes(
[
web.get("/", handle_root),
web.post("/auth-api", handle_auth),
web.post("/event-api", handle_event),
]
)
ssl_context = None
if certificate:
ssl_context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
ssl_context.load_cert_chain(certificate, certificate)
if __name__ == "__main__":
web.run_app(app, host=address, port=port, ssl_context=ssl_context)
| 31.220513 | 233 | 0.655388 |
81ca610dec0f1e1d5519b0914515a58eb09c500b | 55 | py | Python | arkfbp/flow/__init__.py | arkfbp/arkfbp-py | 2444736462e8b4f09ae1ffe56779d9f515deb39f | [
"MIT"
] | 2 | 2020-09-11T09:26:43.000Z | 2020-12-17T07:32:38.000Z | arkfbp/flow/__init__.py | arkfbp/arkfbp-py | 2444736462e8b4f09ae1ffe56779d9f515deb39f | [
"MIT"
] | 4 | 2020-12-02T03:42:38.000Z | 2020-12-14T07:56:06.000Z | arkfbp/flow/__init__.py | arkfbp/arkfbp-py | 2444736462e8b4f09ae1ffe56779d9f515deb39f | [
"MIT"
] | 2 | 2020-12-08T01:11:54.000Z | 2021-01-25T04:29:15.000Z | from .base import Flow
from .view_flow import ViewFlow
| 18.333333 | 31 | 0.818182 |
81cb6312561698f081c8ea3ba400b666e569a740 | 740 | py | Python | ethereumetl/mappers/event_mapper.py | thanhnv2303/ethereum-etl | 94381feadf1f1602a95db44aea5e944559628271 | [
"MIT"
] | null | null | null | ethereumetl/mappers/event_mapper.py | thanhnv2303/ethereum-etl | 94381feadf1f1602a95db44aea5e944559628271 | [
"MIT"
] | null | null | null | ethereumetl/mappers/event_mapper.py | thanhnv2303/ethereum-etl | 94381feadf1f1602a95db44aea5e944559628271 | [
"MIT"
] | null | null | null | from config.constant import ExportItemConstant, ExportItemTypeConstant, EventConstant, TransactionConstant
from ethereumetl.service.eth_event_service import EthEvent
| 43.529412 | 106 | 0.731081 |
81cc09d97179b0455468e2dd08a57556c6ae600f | 5,934 | py | Python | openerp/addons/crm_partner_assign/wizard/crm_forward_to_partner.py | ntiufalara/openerp7 | 903800da0644ec0dd9c1dcd34205541f84d45fe4 | [
"MIT"
] | 3 | 2016-01-29T14:39:49.000Z | 2018-12-29T22:42:00.000Z | openerp/addons/crm_partner_assign/wizard/crm_forward_to_partner.py | ntiufalara/openerp7 | 903800da0644ec0dd9c1dcd34205541f84d45fe4 | [
"MIT"
] | 2 | 2016-03-23T14:29:41.000Z | 2017-02-20T17:11:30.000Z | openerp/addons/crm_partner_assign/wizard/crm_forward_to_partner.py | ntiufalara/openerp7 | 903800da0644ec0dd9c1dcd34205541f84d45fe4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 47.472 | 195 | 0.635322 |
81cd30e6f9de401088d486d770c3328acf7f5008 | 5,033 | py | Python | losses.py | DensenDavis/yolov5_tf2 | 5b2f38e3f0391e5beee1551a386e9b81512db179 | [
"Unlicense"
] | null | null | null | losses.py | DensenDavis/yolov5_tf2 | 5b2f38e3f0391e5beee1551a386e9b81512db179 | [
"Unlicense"
] | null | null | null | losses.py | DensenDavis/yolov5_tf2 | 5b2f38e3f0391e5beee1551a386e9b81512db179 | [
"Unlicense"
] | null | null | null | import tensorflow as tf
from tensorflow.keras.losses import binary_crossentropy,sparse_categorical_crossentropy
from config import Configuration
cfg = Configuration()
| 44.9375 | 98 | 0.587522 |
81cd44adfb162f86e55541035a3d572728194cd3 | 2,235 | py | Python | test/stress/mmlogic.py | dzlier-gcp/open-match | 8db449b307468e20c9835cc22dcca9511c38025a | [
"Apache-2.0"
] | null | null | null | test/stress/mmlogic.py | dzlier-gcp/open-match | 8db449b307468e20c9835cc22dcca9511c38025a | [
"Apache-2.0"
] | 12 | 2019-08-10T00:37:58.000Z | 2019-08-14T22:47:26.000Z | test/stress/mmlogic.py | dzlier-gcp/open-match | 8db449b307468e20c9835cc22dcca9511c38025a | [
"Apache-2.0"
] | 2 | 2019-08-10T00:31:54.000Z | 2019-08-10T00:33:10.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import json
from locust import HttpLocust, TaskSequence, task, seq_task
from util import ticket_generator, pool_generator, ATTRIBUTE_LIST
NUM_QUERY_ATTR = 20
| 35.47619 | 181 | 0.714541 |
81cdcd944d2ec3787c0800a16240ad15e52f16bd | 500 | py | Python | benchmarks_sphere/report_konwihr_rexi_nl/compare_wt_dt_vs_accuracy_galewsky_new_rexi_cmlarge_elrexi/postprocessing_pickle.py | valentinaschueller/sweet | 27e99c7a110c99deeadee70688c186d82b39ac90 | [
"MIT"
] | 6 | 2017-11-20T08:12:46.000Z | 2021-03-11T15:32:36.000Z | benchmarks_sphere/report_konwihr_rexi_nl/compare_wt_dt_vs_accuracy_galewsky_new_rexi_cmlarge_elrexi/postprocessing_pickle.py | valentinaschueller/sweet | 27e99c7a110c99deeadee70688c186d82b39ac90 | [
"MIT"
] | 4 | 2018-02-02T21:46:33.000Z | 2022-01-11T11:10:27.000Z | benchmarks_sphere/report_konwihr_rexi_nl/compare_wt_dt_vs_accuracy_galewsky_new_rexi_cmlarge_elrexi/postprocessing_pickle.py | valentinaschueller/sweet | 27e99c7a110c99deeadee70688c186d82b39ac90 | [
"MIT"
] | 12 | 2016-03-01T18:33:34.000Z | 2022-02-08T22:20:31.000Z | #! /usr/bin/env python3
import sys
import math
import glob
from mule_local.postprocessing.pickle_SphereDataSpectralDiff import *
from mule.exec_program import *
# Ugly hack!
#output, retval = exec_program('ls *benchref*/*prog_h* | sort | tail -n 1 | sed "s/.*prog_h//"')
#if retval != 0:
# print(output)
# raise Exception("Something went wrong")
#output = output.replace("\n", '')
#output = output.replace("\r", '')
#p = pickle_SphereDataSpectralDiff(output)
p = pickle_SphereDataSpectralDiff()
| 23.809524 | 96 | 0.72 |
81ceeac6fb9c99499e11e6ba24211d641629642f | 4,355 | py | Python | src/houdini_package_runner/items/base.py | captainhammy/houdini_package_runner | 40f8b60ebe32c64fd9b37328a9a5eefacd1c6ebd | [
"MIT"
] | 3 | 2022-02-06T23:31:17.000Z | 2022-02-07T11:10:03.000Z | src/houdini_package_runner/items/base.py | captainhammy/houdini_package_runner | 40f8b60ebe32c64fd9b37328a9a5eefacd1c6ebd | [
"MIT"
] | null | null | null | src/houdini_package_runner/items/base.py | captainhammy/houdini_package_runner | 40f8b60ebe32c64fd9b37328a9a5eefacd1c6ebd | [
"MIT"
] | null | null | null | """This module contains a base runnable item."""
# =============================================================================
# IMPORTS
# =============================================================================
# Future
from __future__ import annotations
# Standard Library
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, List
# Imports for type checking.
if TYPE_CHECKING:
import pathlib
import houdini_package_runner.runners.base
# =============================================================================
# CLASSES
# =============================================================================
| 29.828767 | 83 | 0.461538 |
81cf7c347a9efbb6723692e303da22251c98208b | 71 | py | Python | visualizer/__init__.py | AndreasMadsen/bachelor-code | 115fd2b955de07f34cdec998ba2a7f103ae253e3 | [
"MIT"
] | 1 | 2015-06-16T06:53:52.000Z | 2015-06-16T06:53:52.000Z | visualizer/__init__.py | AndreasMadsen/bachelor-code | 115fd2b955de07f34cdec998ba2a7f103ae253e3 | [
"MIT"
] | null | null | null | visualizer/__init__.py | AndreasMadsen/bachelor-code | 115fd2b955de07f34cdec998ba2a7f103ae253e3 | [
"MIT"
] | null | null | null |
from graph.graph_server import GraphServer
__all__ = ['GraphServer']
| 14.2 | 42 | 0.788732 |
81cfb18746180392d2ab217e02dc844bfc9a910e | 4,485 | py | Python | djangoplicity/blog/migrations/0001_initial.py | djangoplicity/blog | 2465b34228d794db9f746e314fa04657cbf18d38 | [
"BSD-3-Clause"
] | null | null | null | djangoplicity/blog/migrations/0001_initial.py | djangoplicity/blog | 2465b34228d794db9f746e314fa04657cbf18d38 | [
"BSD-3-Clause"
] | 1 | 2021-10-20T00:11:16.000Z | 2021-10-20T00:17:51.000Z | djangoplicity/blog/migrations/0001_initial.py | djangoplicity/djangoplicity-blog | 2465b34228d794db9f746e314fa04657cbf18d38 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2017-08-15 16:23
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
import djangoplicity.archives.base
import djangoplicity.archives.fields
| 48.75 | 151 | 0.599331 |
81d02a1b1d708206e4f47e1ceb3bcbc7a7b7f3aa | 4,386 | py | Python | picklesize/test_picklesize.py | pydron/picklesize | c524ccae7beecfada663fbcf251c4166158f2995 | [
"MIT"
] | null | null | null | picklesize/test_picklesize.py | pydron/picklesize | c524ccae7beecfada663fbcf251c4166158f2995 | [
"MIT"
] | null | null | null | picklesize/test_picklesize.py | pydron/picklesize | c524ccae7beecfada663fbcf251c4166158f2995 | [
"MIT"
] | null | null | null | '''
Created on 20.07.2015
@author: stefan
'''
import unittest
import pickle
import picklesize
import copy_reg
def tuple_reducer(obj):
return (NewStyle_Reducer, tuple())
copy_reg.pickle(NewStyle_Reducer, tuple_reducer) | 24.779661 | 110 | 0.564979 |
81d18ed12d722ad07b48dba575ac241b64bc31f0 | 1,691 | py | Python | setup.py | extensive-nlp/ttc_nlp | b84892b4f6856baf99d9a5975cdcbf2fe3b19b7a | [
"Apache-2.0"
] | null | null | null | setup.py | extensive-nlp/ttc_nlp | b84892b4f6856baf99d9a5975cdcbf2fe3b19b7a | [
"Apache-2.0"
] | null | null | null | setup.py | extensive-nlp/ttc_nlp | b84892b4f6856baf99d9a5975cdcbf2fe3b19b7a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Setup process."""
from io import open
from os import path
from setuptools import find_packages, setup
with open(
path.join(path.abspath(path.dirname(__file__)), "README.md"), encoding="utf-8"
) as f:
long_description = f.read()
setup(
# Basic project information
name="ttctext",
version="0.0.1",
# Authorship and online reference
author="Satyajit Ghana",
author_email="satyajitghana7@gmail.com",
url="https://github.com/extensive-nlp/ttc_nlp",
# Detailled description
description="TTC NLP Module",
long_description=long_description,
long_description_content_type="text/markdown",
keywords="sample setuptools development",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
# Package configuration
packages=find_packages(exclude=("tests",)),
include_package_data=True,
python_requires=">= 3.6",
install_requires=[
"torch>=1.9.0",
"torchtext>=0.10.0",
"torchmetrics>=0.4.1",
"omegaconf>=2.1.0",
"pytorch-lightning>=1.3.8",
"gdown>=3.13.0",
"spacy>=3.1.0",
"pandas~=1.1.0",
"seaborn>=0.11.1",
"matplotlib>=3.1.3",
"tqdm>=4.61.2",
"scikit-learn~=0.24.2",
],
# Licensing and copyright
license="Apache 2.0",
)
| 28.661017 | 82 | 0.607333 |
81d3e9a297bdf6007923e315c9b06917f0723c4c | 216 | py | Python | auxein/fitness/__init__.py | auxein/auxein | 5388cb572b65aecc282f915515c35dc3b987154c | [
"Apache-2.0"
] | 1 | 2019-05-08T14:53:27.000Z | 2019-05-08T14:53:27.000Z | auxein/fitness/__init__.py | auxein/auxein | 5388cb572b65aecc282f915515c35dc3b987154c | [
"Apache-2.0"
] | 2 | 2020-08-26T09:16:47.000Z | 2020-10-30T16:47:03.000Z | auxein/fitness/__init__.py | auxein/auxein | 5388cb572b65aecc282f915515c35dc3b987154c | [
"Apache-2.0"
] | null | null | null | # flake8: noqa
from .core import Fitness
from .kernel_based import GlobalMinimum
from .observation_based import ObservationBasedFitness, MultipleLinearRegression, SimplePolynomialRegression, MultipleLinearRegression | 43.2 | 134 | 0.87963 |
81d542769cfc331b3bf5ee9b379987289db08efb | 1,071 | py | Python | steelpy/codes/main.py | svortega/steelpy | bef35eb8ab8728fc29f57b7070b5f3bac0b0e840 | [
"MIT"
] | 4 | 2021-09-28T12:52:01.000Z | 2022-02-24T22:30:22.000Z | steelpy/codes/main.py | svortega/steelpy | bef35eb8ab8728fc29f57b7070b5f3bac0b0e840 | [
"MIT"
] | null | null | null | steelpy/codes/main.py | svortega/steelpy | bef35eb8ab8728fc29f57b7070b5f3bac0b0e840 | [
"MIT"
] | null | null | null | # Copyright (c) 2019-2020 steelpy
# Python stdlib imports
# package imports
#from steelpy.codes.aisc.aisc360 import AISC_360_16
#from steelpy.codes.aisc.aisc335 import AISC_335_89
#from steelpy.codes.iso.ISO19902 import ISOCodeCheck
from steelpy.codes.piping.pipeline import Pipeline_Assessment
#from steelpy.codes.api.wsd_22ed import APIwsd22ed
from steelpy.codes.dnv.pannel import CodeCheckPanel
#
#from steelpy.process.units.main import Units
#from steelpy.material.material import Material
#from steelpy.sections.tubular import Tubular
from steelpy.codes.api.main import API_design
| 22.3125 | 61 | 0.644258 |
81d63e7ca9da71f50fffa4b00a77a421574650e4 | 347 | py | Python | main.py | soyoung97/MixText | 22993cd028a4223a54e138a89b53cd7978a5e38b | [
"MIT"
] | null | null | null | main.py | soyoung97/MixText | 22993cd028a4223a54e138a89b53cd7978a5e38b | [
"MIT"
] | null | null | null | main.py | soyoung97/MixText | 22993cd028a4223a54e138a89b53cd7978a5e38b | [
"MIT"
] | null | null | null | import os
os.system("pip install pytorch_transformers")
import nsml
print(nsml.DATASET_PATH)
os.system('python ./code/train.py --n-labeled 10 --data-path '+ nsml.DATASET_PATH + '/train/ --batch-size 4 --batch-size-u 8 --epochs 20 --val-iteration 1000 --lambda-u 1 --T 0.5 --alpha 16 --mix-layers-set 7 9 12 --lrmain 0.000005 --lrlast 0.00005'
)
| 38.555556 | 249 | 0.706052 |
81d742485fceccd1810f61f429cd089c6e0b112d | 1,126 | py | Python | test.py | IldusTim/QAStudy | f2f5e9c673259e7e1c8d0ab2887f28326300abe3 | [
"Apache-2.0"
] | null | null | null | test.py | IldusTim/QAStudy | f2f5e9c673259e7e1c8d0ab2887f28326300abe3 | [
"Apache-2.0"
] | null | null | null | test.py | IldusTim/QAStudy | f2f5e9c673259e7e1c8d0ab2887f28326300abe3 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
import math
from selenium.webdriver.support.ui import Select
import os
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
link = "http://suninjuly.github.io/explicit_wait2.html"
opt = webdriver.ChromeOptions()
opt.add_experimental_option('w3c', False)
browser = webdriver.Chrome(chrome_options=opt)
browser.implicitly_wait(5, 0.5)
browser.get(link)
button = browser.find_element_by_id("book")
price = WebDriverWait(browser, 12).until(EC.text_to_be_present_in_element((By.ID, "price"),"10000 RUR"))
button.click()
browser.find_element_by_class_name("btn-primary").click()
# new_window = browser.window_handles[1]
# browser.switch_to.window(new_window)
x_element = browser.find_element_by_id("input_value")
x = x_element.text
y = calc(x)
browser.find_element_by_id("answer").click()
browser.find_element_by_id("answer").send_keys(y)
browser.find_element_by_id("solve").click() | 31.277778 | 104 | 0.785968 |
81d761dcf0b173ad97a22e411c04701a33909ebc | 1,224 | py | Python | django_backend/product/migrations/0002_product.py | itsmahadi007/E-Commerce-VueJS-Django | 4fc298f2181fd22c6aeb74439edef78a397d5447 | [
"MIT"
] | null | null | null | django_backend/product/migrations/0002_product.py | itsmahadi007/E-Commerce-VueJS-Django | 4fc298f2181fd22c6aeb74439edef78a397d5447 | [
"MIT"
] | 4 | 2022-01-13T03:56:36.000Z | 2022-03-12T01:01:24.000Z | django_backend/product/migrations/0002_product.py | itsmahadi007/E-Commerce-VueJS-Django | 4fc298f2181fd22c6aeb74439edef78a397d5447 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.7 on 2021-09-01 17:46
from django.db import migrations, models
import django.db.models.deletion
| 38.25 | 140 | 0.580882 |
81d9ca381791e7e9b4231c86815a9f9bf5fa4886 | 40,400 | py | Python | kraken/ketos.py | zjsteyn/kraken | eaa9f4290db5425ddf80d0aebfa3944713558ab5 | [
"Apache-2.0"
] | null | null | null | kraken/ketos.py | zjsteyn/kraken | eaa9f4290db5425ddf80d0aebfa3944713558ab5 | [
"Apache-2.0"
] | null | null | null | kraken/ketos.py | zjsteyn/kraken | eaa9f4290db5425ddf80d0aebfa3944713558ab5 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright 2015 Benjamin Kiessling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
import os
import json
import glob
import uuid
import click
import logging
import unicodedata
from click import open_file
from bidi.algorithm import get_display
from typing import cast, Set, List, IO, Any
from kraken.lib import log
from kraken.lib.exceptions import KrakenCairoSurfaceException
from kraken.lib.exceptions import KrakenEncodeException
from kraken.lib.exceptions import KrakenInputException
APP_NAME = 'kraken'
logger = logging.getLogger('kraken')
if __name__ == '__main__':
cli()
| 47.251462 | 234 | 0.62146 |
81daacebc9755ed9fad67d0bb9146bb8f488fc5d | 2,728 | py | Python | util/visualize_loss.py | whq-hqw/detr_change | 142f75cc5e0b59ca6e07928ddcbed3e461816611 | [
"Apache-2.0"
] | 2 | 2020-07-17T15:09:47.000Z | 2020-11-20T13:52:48.000Z | util/visualize_loss.py | whq-hqw/detr_change | 142f75cc5e0b59ca6e07928ddcbed3e461816611 | [
"Apache-2.0"
] | null | null | null | util/visualize_loss.py | whq-hqw/detr_change | 142f75cc5e0b59ca6e07928ddcbed3e461816611 | [
"Apache-2.0"
] | null | null | null | from os.path import *
import glob
import json
import numpy as np
from util.plot_utils import plot_curves, plot_multi_loss_distribution
TMPJPG = expanduser("~/Pictures/")
if __name__ == '__main__':
exp_name = ["be", "be_768", "be_1024", "be_mid_layer_only", "origin"]
keys = ["train_loss_bbox", "train_loss_ce", "train_loss_giou", "test_coco_eval_bbox"]
eval_name = ["AP", "AP50", "AP75", "AP_small", "AP_mid", "AP_Big",
"AR", "AR50", "AR75", "AR_small", "AR_mid", "AR_Big"]
plot_multi_logs(exp_name, keys, save_name="loss", epoch=50, addition_len=eval_name[:6])
| 38.422535 | 101 | 0.612903 |
81dab8323f10c78c0bf2886a1ab5569f40f742ad | 12,616 | py | Python | tower_cli/resources/job.py | kedark3/tower-cli | 487a1b9a8e96509798fee108e4f7d2c187177771 | [
"Apache-2.0"
] | 363 | 2015-01-14T17:48:34.000Z | 2022-01-29T06:37:04.000Z | tower_cli/resources/job.py | kedark3/tower-cli | 487a1b9a8e96509798fee108e4f7d2c187177771 | [
"Apache-2.0"
] | 703 | 2015-01-06T17:17:20.000Z | 2020-09-16T15:54:17.000Z | tower_cli/resources/job.py | kedark3/tower-cli | 487a1b9a8e96509798fee108e4f7d2c187177771 | [
"Apache-2.0"
] | 203 | 2015-01-18T22:38:23.000Z | 2022-01-28T19:19:05.000Z | # Copyright 2015, Ansible, Inc.
# Luke Sneeringer <lsneeringer@ansible.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, unicode_literals
from getpass import getpass
from distutils.version import LooseVersion
import click
from tower_cli import models, get_resource, resources, exceptions as exc
from tower_cli.api import client
from tower_cli.cli import types
from tower_cli.utils import debug, parser
PROMPT_LIST = ['diff_mode', 'limit', 'tags', 'skip_tags', 'job_type', 'verbosity', 'inventory', 'credential']
| 46.212454 | 112 | 0.611367 |
81db0f62ce609e284136aef25e5f81fbdf1a0feb | 1,643 | py | Python | src/backend/expungeservice/models/charge_types/traffic_offense.py | april96415/recordexpungPDX | 43ec60ddfb7fe1ec7940b2a38c6e7d7f85286506 | [
"MIT"
] | 38 | 2019-05-09T03:13:43.000Z | 2022-03-16T22:59:25.000Z | src/backend/expungeservice/models/charge_types/traffic_offense.py | april96415/recordexpungPDX | 43ec60ddfb7fe1ec7940b2a38c6e7d7f85286506 | [
"MIT"
] | 938 | 2019-05-02T15:13:21.000Z | 2022-02-27T20:59:00.000Z | src/backend/expungeservice/models/charge_types/traffic_offense.py | april96415/recordexpungPDX | 43ec60ddfb7fe1ec7940b2a38c6e7d7f85286506 | [
"MIT"
] | 65 | 2019-05-09T03:28:12.000Z | 2022-03-21T00:06:39.000Z | from dataclasses import dataclass
from typing import Any
from expungeservice.models.charge import ChargeType
from expungeservice.models.charge import ChargeUtil
from expungeservice.models.expungement_result import TypeEligibility, EligibilityStatus
| 45.638889 | 181 | 0.677419 |
81dbffa128ea7c27541a642445edf3ebd5fd3197 | 8,918 | py | Python | os_migrate/plugins/modules/import_workload_create_instance.py | jbadiapa/os-migrate | 19b591a672bc9e4af72e62dbd96be94a238a6dc2 | [
"Apache-2.0"
] | 35 | 2020-01-22T18:38:27.000Z | 2022-03-22T16:19:56.000Z | os_migrate/plugins/modules/import_workload_create_instance.py | jbadiapa/os-migrate | 19b591a672bc9e4af72e62dbd96be94a238a6dc2 | [
"Apache-2.0"
] | 292 | 2019-12-09T11:15:26.000Z | 2022-03-31T14:37:52.000Z | os_migrate/plugins/modules/import_workload_create_instance.py | jbadiapa/os-migrate | 19b591a672bc9e4af72e62dbd96be94a238a6dc2 | [
"Apache-2.0"
] | 32 | 2019-12-09T11:09:44.000Z | 2022-03-24T01:13:31.000Z | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: import_workload_create_instance
short_description: Create NBD exports of OpenStack volumes
extends_documentation_fragment: openstack
version_added: "2.9.0"
author: "OpenStack tenant migration tools (@os-migrate)"
description:
- "Take an instance from an OS-Migrate YAML structure, and export its volumes over NBD."
options:
auth:
description:
- Dictionary with parameters for chosen auth type on the destination cloud.
required: true
type: dict
auth_type:
description:
- Auth type plugin for destination OpenStack cloud. Can be omitted if using password authentication.
required: false
type: str
region_name:
description:
- Destination OpenStack region name. Can be omitted if using default region.
required: false
type: str
availability_zone:
description:
- Availability zone.
required: false
type: str
cloud:
description:
- Ignored. Present for backwards compatibility.
required: false
type: raw
validate_certs:
description:
- Validate HTTPS certificates when logging in to OpenStack.
required: false
type: bool
data:
description:
- Data structure with server parameters as loaded from OS-Migrate workloads YAML file.
required: true
type: dict
block_device_mapping:
description:
- A block_device_mapping_v2 structure from the transfer_volumes module.
- Used to attach destination volumes to the new instance in the right order.
required: true
type: list
elements: dict
'''
EXAMPLES = '''
main.yml:
- name: validate loaded resources
os_migrate.os_migrate.validate_resource_files:
paths:
- "{{ os_migrate_data_dir }}/workloads.yml"
register: workloads_file_validation
when: import_workloads_validate_file
- name: read workloads resource file
os_migrate.os_migrate.read_resources:
path: "{{ os_migrate_data_dir }}/workloads.yml"
register: read_workloads
- name: get source conversion host address
os_migrate.os_migrate.os_conversion_host_info:
auth:
auth_url: https://src-osp:13000/v3
username: migrate
password: migrate
project_domain_id: default
project_name: migration-source
user_domain_id: default
server_id: ce4dda96-5d8e-4b67-aee2-9845cdc943fe
register: os_src_conversion_host_info
- name: get destination conversion host address
os_migrate.os_migrate.os_conversion_host_info:
auth:
auth_url: https://dest-osp:13000/v3
username: migrate
password: migrate
project_domain_id: default
project_name: migration-destination
user_domain_id: default
server_id: 2d2afe57-ace5-4187-8fca-5f10f9059ba1
register: os_dst_conversion_host_info
- name: import workloads
include_tasks: workload.yml
loop: "{{ read_workloads.resources }}"
workload.yml:
- block:
- name: preliminary setup for workload import
os_migrate.os_migrate.import_workload_prelim:
auth:
auth_url: https://dest-osp:13000/v3
username: migrate
password: migrate
project_domain_id: default
project_name: migration-destination
user_domain_id: default
validate_certs: False
src_conversion_host: "{{ os_src_conversion_host_info.openstack_conversion_host }}"
src_auth:
auth_url: https://src-osp:13000/v3
username: migrate
password: migrate
project_domain_id: default
project_name: migration-source
user_domain_id: default
src_validate_certs: False
data: "{{ item }}"
data_dir: "{{ os_migrate_data_dir }}"
register: prelim
- debug:
msg:
- "{{ prelim.server_name }} log file: {{ prelim.log_file }}"
- "{{ prelim.server_name }} progress file: {{ prelim.state_file }}"
when: prelim.changed
- name: expose source volumes
os_migrate.os_migrate.import_workload_export_volumes:
auth: "{{ os_migrate_src_auth }}"
auth_type: "{{ os_migrate_src_auth_type|default(omit) }}"
region_name: "{{ os_migrate_src_region_name|default(omit) }}"
validate_certs: "{{ os_migrate_src_validate_certs|default(omit) }}"
ca_cert: "{{ os_migrate_src_ca_cert|default(omit) }}"
client_cert: "{{ os_migrate_src_client_cert|default(omit) }}"
client_key: "{{ os_migrate_src_client_key|default(omit) }}"
conversion_host:
"{{ os_src_conversion_host_info.openstack_conversion_host }}"
data: "{{ item }}"
log_file: "{{ os_migrate_data_dir }}/{{ prelim.server_name }}.log"
state_file: "{{ os_migrate_data_dir }}/{{ prelim.server_name }}.state"
ssh_key_path: "{{ os_migrate_conversion_keypair_private_path }}"
register: exports
when: prelim.changed
- name: transfer volumes to destination
os_migrate.os_migrate.import_workload_transfer_volumes:
auth: "{{ os_migrate_dst_auth }}"
auth_type: "{{ os_migrate_dst_auth_type|default(omit) }}"
region_name: "{{ os_migrate_dst_region_name|default(omit) }}"
validate_certs: "{{ os_migrate_dst_validate_certs|default(omit) }}"
ca_cert: "{{ os_migrate_dst_ca_cert|default(omit) }}"
client_cert: "{{ os_migrate_dst_client_cert|default(omit) }}"
client_key: "{{ os_migrate_dst_client_key|default(omit) }}"
data: "{{ item }}"
conversion_host:
"{{ os_dst_conversion_host_info.openstack_conversion_host }}"
ssh_key_path: "{{ os_migrate_conversion_keypair_private_path }}"
transfer_uuid: "{{ exports.transfer_uuid }}"
src_conversion_host_address:
"{{ os_src_conversion_host_info.openstack_conversion_host.address }}"
volume_map: "{{ exports.volume_map }}"
state_file: "{{ os_migrate_data_dir }}/{{ prelim.server_name }}.state"
log_file: "{{ os_migrate_data_dir }}/{{ prelim.server_name }}.log"
register: transfer
when: prelim.changed
- name: create destination instance
os_migrate.os_migrate.import_workload_create_instance:
auth: "{{ os_migrate_dst_auth }}"
auth_type: "{{ os_migrate_dst_auth_type|default(omit) }}"
region_name: "{{ os_migrate_dst_region_name|default(omit) }}"
validate_certs: "{{ os_migrate_dst_validate_certs|default(omit) }}"
ca_cert: "{{ os_migrate_dst_ca_cert|default(omit) }}"
client_cert: "{{ os_migrate_dst_client_cert|default(omit) }}"
client_key: "{{ os_migrate_dst_client_key|default(omit) }}"
data: "{{ item }}"
block_device_mapping: "{{ transfer.block_device_mapping }}"
register: os_migrate_destination_instance
when: prelim.changed
rescue:
- fail:
msg: "Failed to import {{ item.params.name }}!"
'''
RETURN = '''
server_id:
description: The ID of the newly created server.
returned: On successful creation of migrated server on destination cloud.
type: str
sample: 059635b7-451f-4a64-978a-7c2e9e4c15ff
'''
from ansible.module_utils.basic import AnsibleModule
# Import openstack module utils from ansible_collections.openstack.cloud.plugins as per ansible 3+
try:
from ansible_collections.openstack.cloud.plugins.module_utils.openstack \
import openstack_full_argument_spec, openstack_cloud_from_module
except ImportError:
# If this fails fall back to ansible < 3 imports
from ansible.module_utils.openstack \
import openstack_full_argument_spec, openstack_cloud_from_module
from ansible_collections.os_migrate.os_migrate.plugins.module_utils import server
if __name__ == '__main__':
main()
| 33.152416 | 106 | 0.703185 |
81dc5fd1549c9df6ac26331817777ce1242a46e7 | 427 | py | Python | PythonExercicio/ex081.py | fotavio16/PycharmProjects | f5be49db941de69159ec543e8a6dde61f9f94d86 | [
"MIT"
] | null | null | null | PythonExercicio/ex081.py | fotavio16/PycharmProjects | f5be49db941de69159ec543e8a6dde61f9f94d86 | [
"MIT"
] | null | null | null | PythonExercicio/ex081.py | fotavio16/PycharmProjects | f5be49db941de69159ec543e8a6dde61f9f94d86 | [
"MIT"
] | null | null | null | valores = []
while True:
num = int(input('Digite um valor: '))
valores.append(num)
cont = str(input('Quer continuar? [S/N] ')).upper()
if cont == 'N':
break
print(f'Voc digitou {len(valores)} elememtos.')
valores.sort(reverse=True)
print(f'Os valores em ordem decrescente so {valores}')
if 5 in valores:
print('O valor 5 faz parte da lista!')
else:
print('O valor 5 no faz parte da lista.') | 26.6875 | 55 | 0.641686 |
81dc7fd7e49eea8472a8c802075bd5a03af475ce | 10,670 | py | Python | huobi/client/margin.py | codemonkey89/huobi_Python | 92b96679f6e239c785df7c4354a0a94deda2768f | [
"Apache-2.0"
] | 1 | 2021-09-06T00:09:11.000Z | 2021-09-06T00:09:11.000Z | huobi/client/margin.py | codemonkey89/huobi_Python | 92b96679f6e239c785df7c4354a0a94deda2768f | [
"Apache-2.0"
] | null | null | null | huobi/client/margin.py | codemonkey89/huobi_Python | 92b96679f6e239c785df7c4354a0a94deda2768f | [
"Apache-2.0"
] | null | null | null |
from huobi.utils.input_checker import *
| 36.541096 | 116 | 0.624649 |
81dd1a8439621b09316ab23b0da1c48479109ea1 | 2,297 | py | Python | vine/commit.py | robinson96/GRAPE | f6404ae6ee2933647e515a9480077ab01fb2c430 | [
"BSD-3-Clause"
] | 4 | 2017-04-30T17:08:42.000Z | 2019-11-15T04:44:09.000Z | vine/commit.py | robinson96/GRAPE | f6404ae6ee2933647e515a9480077ab01fb2c430 | [
"BSD-3-Clause"
] | 1 | 2016-02-12T07:51:30.000Z | 2016-02-12T07:51:30.000Z | vine/commit.py | robinson96/GRAPE | f6404ae6ee2933647e515a9480077ab01fb2c430 | [
"BSD-3-Clause"
] | null | null | null | import os
import option
import grapeGit as git
import grapeConfig
import utility
| 33.289855 | 134 | 0.573792 |
81ddc6f0c5c1c51183abe91817be444c4671d793 | 2,743 | py | Python | allopy/optimize/regret/abstract.py | wangcj05/allopy | 0d97127e5132df1449283198143994b45fb11214 | [
"MIT"
] | 1 | 2021-04-06T04:33:03.000Z | 2021-04-06T04:33:03.000Z | allopy/optimize/regret/abstract.py | wangcj05/allopy | 0d97127e5132df1449283198143994b45fb11214 | [
"MIT"
] | null | null | null | allopy/optimize/regret/abstract.py | wangcj05/allopy | 0d97127e5132df1449283198143994b45fb11214 | [
"MIT"
] | null | null | null | from abc import ABC
from typing import List, Optional, Union
import numpy as np
from allopy import OptData
from allopy.penalty import NoPenalty, Penalty
__all__ = ["AbstractObjectiveBuilder", "AbstractConstraintBuilder"]
def format_inputs(data: List[Union[OptData, np.ndarray]],
cvar_data: Optional[List[Union[OptData, np.ndarray]]],
time_unit: int):
data = [d if isinstance(data, OptData) else OptData(d, time_unit) for d in data]
if cvar_data is None:
return [d.cut_by_horizon(3) for d in data]
else:
cvar_data = [c if isinstance(c, OptData) else OptData(c, time_unit) for c in cvar_data]
return data, cvar_data
| 40.338235 | 115 | 0.682829 |
81de227f0a3f6458399634f490d77c2bd9c293a6 | 3,786 | py | Python | dataset-processor3.py | Pawel762/class5-homework | 8e48dcda1ed91b7a5e28bea6db13b2a82182e074 | [
"MIT"
] | null | null | null | dataset-processor3.py | Pawel762/class5-homework | 8e48dcda1ed91b7a5e28bea6db13b2a82182e074 | [
"MIT"
] | null | null | null | dataset-processor3.py | Pawel762/class5-homework | 8e48dcda1ed91b7a5e28bea6db13b2a82182e074 | [
"MIT"
] | null | null | null | import os
import pandas as pd
import matplotlib.pyplot as plt
wine_df = pd.read_csv(filepath_or_buffer='~/class5-homework/wine.data',
sep=',',
header=None)
wine_df.columns = ['Class','Alcohol','Malic_Acid','Ash','Alcalinity_of_Ash','Magnesium',
'Total_Phenols','Flavanoids','Nonflavanoid_Phenols','Proanthocyanins',
'Color_Intensity','Hue','OD280_OD315_of_Diluted_Wines','Proline']
wine_B = wine_df.drop(['Class'], axis = 1)
os.makedirs('graphs', exist_ok=True)
#Ploting line for alcohol
plt.plot(wine_B['Alcohol'], color='g')
plt.title('Alcohol by Index')
plt.xlabel('Index')
plt.ylabel('Alcohol')
plt.savefig(f'graphs/Alcohol_by_index_plot.png', format='png')
plt.clf()
#Ploting line for Malic_Acid
plt.plot(wine_B['Malic_Acid'], color='g')
plt.title('Malic_Acid by Index')
plt.xlabel('Index')
plt.ylabel('Malic_Acid')
plt.savefig(f'graphs/Malic_Acid_by_index_plot.png', format='png')
plt.clf()
#Ploting line for Ash
plt.plot(wine_B['Ash'], color='g')
plt.title('Ash by Index')
plt.xlabel('Index')
plt.ylabel('Ash')
plt.savefig(f'graphs/Ash_by_index_plot.png', format='png')
plt.clf()
#Ploting line for Alcalinity_of_Ash
plt.plot(wine_B['Alcalinity_of_Ash'], color='g')
plt.title('Alcalinity_of_Ash by Index')
plt.xlabel('Index')
plt.ylabel('Alcalinity_of_Ash')
plt.savefig(f'graphs/Alcalinity_of_Ash_by_index_plot.png', format='png')
plt.clf()
#Ploting line for Magnesium
plt.plot(wine_B['Magnesium'], color='g')
plt.title('Magnesium by Index')
plt.xlabel('Index')
plt.ylabel('Magnesium')
plt.savefig(f'graphs/Magnesium_by_index_plot.png', format='png')
plt.clf()
#Ploting line for Total_Phenols
plt.plot(wine_B['Total_Phenols'], color='g')
plt.title('Total_Phenols by Index')
plt.xlabel('Index')
plt.ylabel('Total_Phenols')
plt.savefig(f'graphs/Total_Phenols_by_index_plot.png', format='png')
plt.clf()
#Ploting line for Flavanoids
plt.plot(wine_B['Flavanoids'], color='g')
plt.title('Flavanoids by Index')
plt.xlabel('Index')
plt.ylabel('Flavanoids')
plt.savefig(f'graphs/Flavanoids_by_index_plot.png', format='png')
plt.clf()
#Ploting line for Nonflavanoid_Phenols
plt.plot(wine_B['Nonflavanoid_Phenols'], color='g')
plt.title('Nonflavanoid_Phenols by Index')
plt.xlabel('Index')
plt.ylabel('Nonflavanoid_Phenols')
plt.savefig(f'graphs/Nonflavanoid_Phenols_by_index_plot.png', format='png')
plt.clf()
#Ploting line for Proanthocyanins
plt.plot(wine_B['Proanthocyanins'], color='g')
plt.title('Proanthocyanins by Index')
plt.xlabel('Index')
plt.ylabel('Proanthocyanins')
plt.savefig(f'graphs/Proanthocyanins_by_index_plot.png', format='png')
plt.clf()
#Ploting line for Color_Intensity
plt.plot(wine_B['Color_Intensity'], color='g')
plt.title('Color_Intensity by Index')
plt.xlabel('Index')
plt.ylabel('Color_Intensity')
plt.savefig(f'graphs/Color_Intensity_by_index_plot.png', format='png')
plt.clf()
#Ploting line for Hue
plt.plot(wine_B['Hue'], color='g')
plt.title('Hue by Index')
plt.xlabel('Index')
plt.ylabel('Hue')
plt.savefig(f'graphs/Hue_by_index_plot.png', format='png')
plt.clf()
#Ploting line for OD280_OD315_of_Diluted_Wines
plt.plot(wine_B['OD280_OD315_of_Diluted_Wines'], color='g')
plt.title('OD280_OD315_of_Diluted_Wines by Index')
plt.xlabel('Index')
plt.ylabel('OD280_OD315_of_Diluted_Wines')
plt.savefig(f'graphs/OD280_OD315_of_Diluted_Wines_by_index_plot.png', format='png')
plt.clf()
#Ploting line for Proline
plt.plot(wine_B['Proline'], color='g')
plt.title('Proline by Index')
plt.xlabel('Index')
plt.ylabel('Proline')
plt.savefig(f'graphs/Proline_by_index_plot.png', format='png')
plt.clf()
#plt.plot(wine_B[i], color='green')
#plt.title(str(i)+' by Index')
#plt.xlabel('Index')
#plt.ylabel(i)
#plt.savefig(f'graphs/'+str(i)+'_by_index_plot.png', format='png')
#plt.clf()
| 29.578125 | 88 | 0.744057 |
81e0b2153d543771f9ccb08bc53b840667a38757 | 7,451 | py | Python | ares/attack/bim.py | KuanKuanQAQ/ares | 40dbefc18f6438e1812021fe6d6c3195f22ca295 | [
"MIT"
] | 206 | 2020-12-31T09:43:11.000Z | 2022-03-30T07:02:41.000Z | ares/attack/bim.py | afoolboy/ares | 89610d41fdde194e4ad916d29961aaed73383692 | [
"MIT"
] | 7 | 2021-01-26T06:45:44.000Z | 2022-02-26T05:25:48.000Z | ares/attack/bim.py | afoolboy/ares | 89610d41fdde194e4ad916d29961aaed73383692 | [
"MIT"
] | 61 | 2020-12-29T14:02:41.000Z | 2022-03-26T14:21:10.000Z | import tensorflow as tf
import numpy as np
from ares.attack.base import BatchAttack
from ares.attack.utils import get_xs_ph, get_ys_ph, maybe_to_array, get_unit
| 53.992754 | 120 | 0.65414 |
81e170cb1bf7f677e6d97334533f66e198b1aa28 | 3,148 | py | Python | parasite/resolver.py | SGevorg/parasite | 574b3992abeef03406524a94b6a8a2d662ca13e7 | [
"MIT"
] | 9 | 2020-09-21T11:21:01.000Z | 2020-12-18T08:21:27.000Z | parasite/resolver.py | bittlingmayer/parasite | daac95eeaa19d5b05c0a3af076e364ca21708ff4 | [
"MIT"
] | 1 | 2020-11-21T09:45:45.000Z | 2020-11-21T09:45:45.000Z | parasite/resolver.py | bittlingmayer/parasite | daac95eeaa19d5b05c0a3af076e364ca21708ff4 | [
"MIT"
] | 4 | 2020-11-21T09:08:30.000Z | 2020-12-05T15:46:56.000Z | import numpy as np
from functools import lru_cache
from typing import Tuple
| 32.453608 | 80 | 0.55432 |
81e2d167ec9fa89c74b62f2bf234fc1429ff2619 | 3,864 | py | Python | utils/preprocess.py | Deep-MI/3d-neuro-seg | 57cc1e16e5ecbef8caf9f6f1e735a0e7339d1152 | [
"Apache-2.0"
] | null | null | null | utils/preprocess.py | Deep-MI/3d-neuro-seg | 57cc1e16e5ecbef8caf9f6f1e735a0e7339d1152 | [
"Apache-2.0"
] | null | null | null | utils/preprocess.py | Deep-MI/3d-neuro-seg | 57cc1e16e5ecbef8caf9f6f1e735a0e7339d1152 | [
"Apache-2.0"
] | null | null | null | import numpy as np
"""
Contains preprocessing code for creating additional information based on MRI volumes and true segmentation maps (asegs).
Eg. weight masks for median frequency class weighing, edge weighing etc.
"""
def create_weight_mask(aseg):
"""
Main function for calculating weight mask of segmentation map for loss function. Currently only Median Frequency
Weighing is implemented. Other types can be additively added to the 'weights' variable
Args:
aseg (numpy.ndarray): Segmentation map with shape l x w x d
Returns:
numpy.ndarray: Weight Mask of same shape as aseg
"""
if len(aseg.shape)==4:
_, h,w,d = aseg.shape
elif len(aseg.shape)==3:
h,w,d = aseg.shape
weights = np.zeros((h,w,d), dtype=float) # Container ndarray of zeros for weights
weights += median_freq_class_weighing(aseg) # Add median frequency weights
# Further weights (eg. extra weights for region borders) can be added here
# Eg. weights += edge_weights(aseg)
return weights
def median_freq_class_weighing(aseg):
"""
Median Frequency Weighing. Guarded against class absence of certain classes.
Args:
aseg (numpy.ndarray): Segmentation map with shape l x w x d
Returns:
numpy.ndarray: Median frequency weighted mask of same shape as aseg
"""
# Calculates median frequency based weighing for classes
unique, counts = np.unique(aseg, return_counts=True)
if len(aseg.shape)==4:
_, h,w,d = aseg.shape
elif len(aseg.shape)==3:
h,w,d = aseg.shape
class_wise_weights = np.median(counts)/counts
aseg = aseg.astype(int)
# Guards against the absence of certain classes in sample
discon_guard_lut = np.zeros(int(max(unique))+1)-1
for idx, val in enumerate(unique):
discon_guard_lut[int(val)] = idx
discon_guard_lut = discon_guard_lut.astype(int)
# Assigns weights to w_mask and resets the missing classes
w_mask = np.reshape(class_wise_weights[discon_guard_lut[aseg.ravel()]], (h, w, d))
return w_mask
# Label mapping functions (to aparc (eval) and to label (train))
def map_label2aparc_aseg(mapped_aseg):
"""
Function to perform look-up table mapping from label space to aparc.DKTatlas+aseg space
:param np.ndarray mapped_aseg: label space segmentation (aparc.DKTatlas + aseg)
:return:
"""
aseg = np.zeros_like(mapped_aseg)
labels = np.array([0, 2, 4, 5, 7, 8, 10, 11, 12, 13, 14,
15, 16, 17, 18, 24, 26, 28, 31, 41, 43, 44,
46, 47, 49, 50, 51, 52, 53, 54, 58, 60, 63,
77, 1002, 1003, 1005, 1006, 1007, 1008, 1009, 1010, 1011,
1012, 1013, 1014, 1015, 1016, 1017, 1018, 1019, 1020, 1021, 1022,
1023, 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031, 1034, 1035,
2002, 2005, 2010, 2012, 2013, 2014, 2016, 2017, 2021, 2022, 2023,
2024, 2025, 2028])
h, w, d = aseg.shape
aseg = labels[mapped_aseg.ravel()]
aseg = aseg.reshape((h, w, d))
return aseg
# if __name__ == "__main__":
# #a = np.random.randint(0, 5, size=(10,10,10))
# #b = np.random.randint(5, 10, size=(10000))
#
# #map_masks_into_5_classes(np.random.randint(0, 250, size=(256, 256, 256)))
#
# import nibabel as nib
# from data_utils.process_mgz_into_hdf5 import map_aparc_aseg2label, map_aseg2label
# path = r"abide_ii/sub-28675/mri/aparc.DKTatlas+aseg.mgz"
# aseg = nib.load(path).get_data()
# labels_full, _ = map_aparc_aseg2label(aseg) # only for 79 classes case
# # labels_full, _ = map_aseg2label(aseg) # only for 37 classes case
# aseg = labels_full
# # print(aseg.shape)
# median_freq_class_weighing(aseg)
# # print(edge_weighing(aseg, 1.5))
| 35.777778 | 120 | 0.646222 |
81e2ef476be7e9c25d158962fb2d60491bb44e8a | 4,381 | py | Python | test/test_oneview_hypervisor_cluster_profile_facts.py | nabhajit-ray/oneview-ansible | b31af8a696013bac7a1900748a2fa5ba491fe8e2 | [
"Apache-2.0"
] | 108 | 2016-06-28T18:14:08.000Z | 2022-02-21T09:16:06.000Z | test/test_oneview_hypervisor_cluster_profile_facts.py | HPE-Japan-Presales/oneview-ansible | 26eb13354333d862d9e80f07e3fe9bbe2eb59af3 | [
"Apache-2.0"
] | 248 | 2016-07-14T12:50:17.000Z | 2022-02-06T18:57:16.000Z | test/test_oneview_hypervisor_cluster_profile_facts.py | HPE-Japan-Presales/oneview-ansible | 26eb13354333d862d9e80f07e3fe9bbe2eb59af3 | [
"Apache-2.0"
] | 88 | 2016-06-29T15:52:44.000Z | 2022-03-10T12:34:41.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2020) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
import pytest
import mock
from copy import deepcopy
from hpe_test_utils import OneViewBaseFactsTest
from oneview_module_loader import HypervisorClusterProfileFactsModule
PROFILE_URI = '/rest/hypervisor-cluster-profiles/57d3af2a-b6d2-4446-8645-f38dd808ea4d'
PARAMS_GET_ALL = dict(
config='config.json'
)
PARAMS_GET_BY_NAME = dict(
config='config.json',
name="Test Cluster Profile"
)
PARAMS_GET_BY_URI = dict(
config='config.json',
uri="/rest/test/123"
)
PARAMS_WITH_OPTIONS = dict(
config='config.json',
name="Test Cluster Profile",
options=[
'compliancePreview',
]
)
if __name__ == '__main__':
pytest.main([__file__])
| 35.048 | 115 | 0.71947 |
81e5c39849311e6837ffa50cd43accfe28aa75bf | 1,339 | py | Python | utils/predictions.py | jaingaurav3/ML_sample | 4e53de198f7965fa96f0db44717df27032df4b48 | [
"MIT"
] | 19 | 2018-06-08T05:33:47.000Z | 2021-04-26T16:19:32.000Z | utils/predictions.py | jaingaurav3/ML_sample | 4e53de198f7965fa96f0db44717df27032df4b48 | [
"MIT"
] | null | null | null | utils/predictions.py | jaingaurav3/ML_sample | 4e53de198f7965fa96f0db44717df27032df4b48 | [
"MIT"
] | 13 | 2018-09-24T21:52:06.000Z | 2021-02-26T10:40:25.000Z | import os
import scipy
import numpy as np
import pandas as pd
import torch
from torch.autograd import Variable
| 25.264151 | 70 | 0.630321 |
81e5f55f1de69308bb6ff205c3967683e8097ccc | 3,806 | py | Python | gammapy/data/tests/test_pointing.py | Rishank2610/gammapy | 3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76 | [
"BSD-3-Clause"
] | 155 | 2015-02-25T12:38:02.000Z | 2022-03-13T17:54:30.000Z | gammapy/data/tests/test_pointing.py | Rishank2610/gammapy | 3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76 | [
"BSD-3-Clause"
] | 3,131 | 2015-01-06T15:36:23.000Z | 2022-03-31T17:30:57.000Z | gammapy/data/tests/test_pointing.py | Rishank2610/gammapy | 3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76 | [
"BSD-3-Clause"
] | 158 | 2015-03-16T20:36:44.000Z | 2022-03-30T16:05:37.000Z | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from numpy.testing import assert_allclose
from astropy.time import Time
from gammapy.data import FixedPointingInfo, PointingInfo
from gammapy.utils.testing import assert_time_allclose, requires_data
| 34.6 | 69 | 0.679453 |
81e620b1dfd869927a5135342a7294ba02276c08 | 1,183 | py | Python | src/config.py | BRAVO68WEB/architus | 21b9f94a64b142ee6e9b5efd79bd872a13ce8f6a | [
"MIT"
] | null | null | null | src/config.py | BRAVO68WEB/architus | 21b9f94a64b142ee6e9b5efd79bd872a13ce8f6a | [
"MIT"
] | null | null | null | src/config.py | BRAVO68WEB/architus | 21b9f94a64b142ee6e9b5efd79bd872a13ce8f6a | [
"MIT"
] | null | null | null | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
# from src.commands import *
# import src.commands as command_modules
secret_token = None
db_user = None
db_pass = None
sessions = {}
try:
lines = [line.rstrip('\n') for line in open('.secret_token')]
secret_token = lines[0]
db_user = lines[1]
db_pass = lines[2]
client_id = lines[3]
client_secret = lines[4]
twitter_consumer_key = lines[5]
twitter_consumer_secret = lines[6]
twitter_access_token_key = lines[7]
twitter_access_token_secret = lines[8]
scraper_token = lines[9]
except Exception as e:
print(e)
print('error reading .secret_token, make it you aut')
session = get_session()
| 25.170213 | 94 | 0.674556 |
81e6447d74e137ba6ed7fb43a3550f34c92da3a7 | 2,876 | py | Python | aict_tools/scripts/plot_regressor_performance.py | LukasBeiske/aict-tools | ccf61c051c58040cf4b676180ae7184021d1b81b | [
"MIT"
] | null | null | null | aict_tools/scripts/plot_regressor_performance.py | LukasBeiske/aict-tools | ccf61c051c58040cf4b676180ae7184021d1b81b | [
"MIT"
] | null | null | null | aict_tools/scripts/plot_regressor_performance.py | LukasBeiske/aict-tools | ccf61c051c58040cf4b676180ae7184021d1b81b | [
"MIT"
] | null | null | null | import click
import logging
import matplotlib
import matplotlib.pyplot as plt
import joblib
import fact.io
from ..configuration import AICTConfig
from ..plotting import (
plot_regressor_confusion,
plot_bias_resolution,
plot_feature_importances,
)
if matplotlib.get_backend() == 'pgf':
from matplotlib.backends.backend_pgf import PdfPages
else:
from matplotlib.backends.backend_pdf import PdfPages
| 30.924731 | 83 | 0.691586 |
c48c8a45a8bc31ea98b3b0eb49ac12298185c634 | 2,426 | py | Python | kenlm_training/cc_net/tokenizer.py | ruinunca/data_tooling | 297e1f8c2898d00b523ccafb7bdd19c6d6aac9ff | [
"Apache-2.0"
] | 435 | 2019-11-04T22:35:50.000Z | 2022-03-29T20:15:07.000Z | kenlm_training/cc_net/tokenizer.py | ruinunca/data_tooling | 297e1f8c2898d00b523ccafb7bdd19c6d6aac9ff | [
"Apache-2.0"
] | 331 | 2021-11-02T00:30:56.000Z | 2022-03-08T16:48:13.000Z | kenlm_training/cc_net/tokenizer.py | ruinunca/data_tooling | 297e1f8c2898d00b523ccafb7bdd19c6d6aac9ff | [
"Apache-2.0"
] | 66 | 2019-11-06T01:28:12.000Z | 2022-03-01T09:18:32.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import time
from typing import Dict, Optional
import sacremoses # type: ignore
from cc_net import jsonql, text_normalizer
| 30.325 | 90 | 0.626958 |
c48caf2d700cbc3c512434c652a6ac5a08e2206b | 346 | py | Python | scripts/exercicios/ex063.py | RuanBarretodosSantos/python | 4142ccd71c4ffb4bb6a10d61c85f612758f5bb41 | [
"MIT"
] | null | null | null | scripts/exercicios/ex063.py | RuanBarretodosSantos/python | 4142ccd71c4ffb4bb6a10d61c85f612758f5bb41 | [
"MIT"
] | null | null | null | scripts/exercicios/ex063.py | RuanBarretodosSantos/python | 4142ccd71c4ffb4bb6a10d61c85f612758f5bb41 | [
"MIT"
] | null | null | null | cont = 3
t1 = 0
t2 = 1
print('-----' * 12)
print('Sequncia de Fibonacci')
print('-----' * 12)
valor = int(input('Quantos termos voc quer mostrar ? '))
print('~~~~~' * 12)
print(f'{t1} {t2} ' , end=' ')
while cont <= valor:
t3 = t1 + t2
print(f' {t3}', end=' ')
t1 = t2
t2 = t3
t3 = t1
cont += 1
print(' F I M')
| 19.222222 | 57 | 0.482659 |
c48ce6625a976f83a24cccf09278351389aa811f | 3,991 | py | Python | CGAT/Sra.py | 861934367/cgat | 77fdc2f819320110ed56b5b61968468f73dfc5cb | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | CGAT/Sra.py | 861934367/cgat | 77fdc2f819320110ed56b5b61968468f73dfc5cb | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | CGAT/Sra.py | 861934367/cgat | 77fdc2f819320110ed56b5b61968468f73dfc5cb | [
"BSD-2-Clause",
"BSD-3-Clause"
] | 1 | 2019-08-04T22:46:38.000Z | 2019-08-04T22:46:38.000Z | ##########################################################################
#
# MRC FGU Computational Genomics Group
#
# $Id$
#
# Copyright (C) 2009 Andreas Heger
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
##########################################################################
'''
Sra.py - Methods for dealing with short read archive files
==========================================================
Utility functions for dealing with :term:`SRA` formatted files from
the Short Read Archive.
Requirements:
* fastq-dump >= 2.1.7
Code
----
'''
import os
import glob
import tempfile
import shutil
import CGAT.Experiment as E
import CGAT.Fastq as Fastq
import CGAT.IOTools as IOTools
def peek(sra, outdir=None):
"""return the full file names for all files which will be extracted
Parameters
----------
outdir : path
perform extraction in outdir. If outdir is None, the extraction
will take place in a temporary directory, which will be deleted
afterwards.
Returns
-------
files : list
A list of fastq formatted files that are contained in the archive.
format : string
The quality score format in the :term:`fastq` formatted files.
"""
if outdir is None:
workdir = tempfile.mkdtemp()
else:
workdir = outdir
# --split-files creates files called prefix_#.fastq.gz,
# where # is the read number.
# If file cotains paired end data:
# output = prefix_1.fastq.gz, prefix_2.fastq.gz
# *special case: unpaired reads in a paired end --> prefix.fastq.gz
# *special case: if paired reads are stored in a single read,
# fastq-dump will split. There might be a joining
# sequence. The output would thus be:
# prefix_1.fastq.gz, prefix_2.fastq.gz, prefix_3.fastq.gz
# You want files 1 and 3.
E.run("""fastq-dump --split-files --gzip -X 1000
--outdir %(workdir)s %(sra)s""" % locals())
f = sorted(glob.glob(os.path.join(workdir, "*.fastq.gz")))
ff = [os.path.basename(x) for x in f]
if len(f) == 1:
# sra file contains one read: output = prefix.fastq.gz
pass
elif len(f) == 2:
# sra file contains read pairs:
# output = prefix_1.fastq.gz, prefix_2.fastq.gz
assert ff[0].endswith(
"_1.fastq.gz") and ff[1].endswith("_2.fastq.gz")
elif len(f) == 3:
if ff[2].endswith("_3.fastq.gz"):
f = glob.glob(os.path.join(workdir, "*_[13].fastq.gz"))
else:
f = glob.glob(os.path.join(workdir, "*_[13].fastq.gz"))
# check format of fastqs in .sra
fastq_format = Fastq.guessFormat(IOTools.openFile(f[0], "r"), raises=False)
fastq_datatype = Fastq.guessDataType(IOTools.openFile(f[0], "r"), raises=True)
if outdir is None:
shutil.rmtree(workdir)
return f, fastq_format, fastq_datatype
def extract(sra, outdir, tool="fastq-dump"):
"""return statement for extracting the SRA file in `outdir`.
possible tools are fastq-dump and abi-dump. Use abi-dump for colorspace"""
if tool == "fastq-dump":
tool += " --split-files"
statement = """%(tool)s --gzip --outdir %(outdir)s %(sra)s""" % locals()
return statement
| 32.447154 | 82 | 0.607367 |
c48d9b9b2d55aa3083a0ad90f19c76032b967b27 | 11,902 | py | Python | LipidFinder/LFDataFrame.py | s-andrews/LipidFinder | c91d6caa8008e0a67188914e48f30913deff888d | [
"MIT"
] | null | null | null | LipidFinder/LFDataFrame.py | s-andrews/LipidFinder | c91d6caa8008e0a67188914e48f30913deff888d | [
"MIT"
] | null | null | null | LipidFinder/LFDataFrame.py | s-andrews/LipidFinder | c91d6caa8008e0a67188914e48f30913deff888d | [
"MIT"
] | null | null | null | # Copyright (c) 2019 J. Alvarez-Jarreta and C.J. Brasher
#
# This file is part of the LipidFinder software tool and governed by the
# 'MIT License'. Please see the LICENSE file that should have been
# included as part of this software.
"""Represent a DataFrame to be processed with LipidFinder's workflow."""
import glob
import logging
import os
import pandas
| 50.008403 | 80 | 0.584272 |
c48f04379334e4d1150bc95e2f72b0aa259025e8 | 4,836 | py | Python | tensorflow/python/ops/fused_embedding_ops.py | lixy9474/DeepRec-1 | dbfdf98af68505201a4f647348cce56ecbb652b2 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/ops/fused_embedding_ops.py | lixy9474/DeepRec-1 | dbfdf98af68505201a4f647348cce56ecbb652b2 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/ops/fused_embedding_ops.py | lixy9474/DeepRec-1 | dbfdf98af68505201a4f647348cce56ecbb652b2 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables
from tensorflow.python.ops import array_ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import gen_fused_embedding_ops
from tensorflow.python.ops.gen_fused_embedding_ops import fused_embedding_local_sparse_look_up_grad
from tensorflow.python.ops.gen_fused_embedding_ops import fused_embedding_local_sparse_look_up
from tensorflow.python.ops.gen_fused_embedding_ops import fused_embedding_sparse_pre_look_up
from tensorflow.python.ops.gen_fused_embedding_ops import fused_embedding_sparse_post_look_up
from tensorflow.python.ops.gen_fused_embedding_ops import fused_embedding_sparse_post_look_up_grad
from tensorflow.python.util.tf_export import tf_export
| 49.346939 | 99 | 0.709884 |
c48f102d83062572178277d6397d5fa6395d6e36 | 705 | py | Python | docs/source/conf.py | deeplook/ipycanvas | c42a5540c55534f919da0fd462cef4593ac7d755 | [
"BSD-3-Clause"
] | null | null | null | docs/source/conf.py | deeplook/ipycanvas | c42a5540c55534f919da0fd462cef4593ac7d755 | [
"BSD-3-Clause"
] | null | null | null | docs/source/conf.py | deeplook/ipycanvas | c42a5540c55534f919da0fd462cef4593ac7d755 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import sphinx_rtd_theme
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
# 'sphinx.ext.intersphinx',
# 'sphinx.ext.autosummary',
# 'sphinx.ext.viewcode',
# 'jupyter_sphinx.embed_widgets',
]
templates_path = ['_templates']
master_doc = 'index'
source_suffix = '.rst'
# General information about the project.
project = 'ipycanvas'
author = 'Martin Renou'
exclude_patterns = []
highlight_language = 'python'
pygments_style = 'sphinx'
# Output file base name for HTML help builder.
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
htmlhelp_basename = 'ipycanvasdoc'
autodoc_member_order = 'bysource'
| 21.363636 | 58 | 0.721986 |
c48f34eb08cb0b005af06224c4565e27b18c0cb3 | 672 | py | Python | pyTuplingUtils/io.py | umd-lhcb/pyTuplingUtils | dd2efe154f1418a70295eabd8919e16ace2785cc | [
"BSD-2-Clause"
] | null | null | null | pyTuplingUtils/io.py | umd-lhcb/pyTuplingUtils | dd2efe154f1418a70295eabd8919e16ace2785cc | [
"BSD-2-Clause"
] | 7 | 2020-04-20T17:25:45.000Z | 2021-06-13T21:05:14.000Z | pyTuplingUtils/io.py | umd-lhcb/pyTuplingUtils | dd2efe154f1418a70295eabd8919e16ace2785cc | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
#
# Author: Yipeng Sun
# License: BSD 2-clause
# Last Change: Sun May 09, 2021 at 02:52 AM +0200
import numpy as np
ARRAY_TYPE = 'np'
| 23.172414 | 72 | 0.699405 |
c4900758f320c35b4e8aaebac80f973e8645fdc4 | 483 | py | Python | clinnotes/reminders/forms.py | mattnickerson993/clinnotes2 | bc44e516a5042e22de8c6618425966bd58919eff | [
"MIT"
] | null | null | null | clinnotes/reminders/forms.py | mattnickerson993/clinnotes2 | bc44e516a5042e22de8c6618425966bd58919eff | [
"MIT"
] | null | null | null | clinnotes/reminders/forms.py | mattnickerson993/clinnotes2 | bc44e516a5042e22de8c6618425966bd58919eff | [
"MIT"
] | null | null | null | from django import forms
from .models import Reminder
from clinnotes.users.models import EpisodeOfCare
| 34.5 | 94 | 0.693582 |
c491762e38ab524f8ee85b6bd6fa42008e8b092d | 3,583 | py | Python | AlgorithmB.py | tejaDhulipala/SnowflakeGen | effabafb790a1a407c5c27cf249806a4775127e4 | [
"MIT"
] | null | null | null | AlgorithmB.py | tejaDhulipala/SnowflakeGen | effabafb790a1a407c5c27cf249806a4775127e4 | [
"MIT"
] | null | null | null | AlgorithmB.py | tejaDhulipala/SnowflakeGen | effabafb790a1a407c5c27cf249806a4775127e4 | [
"MIT"
] | null | null | null | import pygame as pg
from shapely.geometry import Point, Polygon
from time import perf_counter
# Vars
A = [(100, 600), (700, 600), (400, 80)]
triangles = [[(100, 600), (700, 600), (400, 80)]]
SQRT_3 = 3 ** (1 / 2)
WHITE = (255, 255, 255)
# Graphics part
pg.init()
screen = pg.display.set_mode((800, 800))
# Funcs
distance = lambda x, y: ((x[0] - y[0]) ** 2 + (x[1] - y[1]) ** 2) ** 0.5
start = perf_counter()
# Call Func
generateSnowflake(A, 6)
print(len(A))
# Game Loop
while True:
screen.fill(WHITE)
A.append(A[0])
for i in range(len(A) - 1):
pg.draw.line(screen, (0, 0, 0), A[i], A[i + 1])
# exit code
for event in pg.event.get():
if event.type == pg.QUIT:
pg.quit()
quit(0)
# Updating
pg.display.update()
print(perf_counter() - start)
| 32.87156 | 86 | 0.487301 |
c492c9c90d1fe8546ec965192035975153cc63a2 | 39,839 | py | Python | validator/testcases/javascript/actions.py | AutomatedTester/amo-validator | a063002497395ce04085a3940713b4467f12e9fd | [
"BSD-3-Clause"
] | null | null | null | validator/testcases/javascript/actions.py | AutomatedTester/amo-validator | a063002497395ce04085a3940713b4467f12e9fd | [
"BSD-3-Clause"
] | null | null | null | validator/testcases/javascript/actions.py | AutomatedTester/amo-validator | a063002497395ce04085a3940713b4467f12e9fd | [
"BSD-3-Clause"
] | null | null | null | from copy import deepcopy
from functools import partial
import sys
import types
# Global import of predefinedentities will cause an import loop
import instanceactions
from validator.constants import (BUGZILLA_BUG, DESCRIPTION_TYPES, FENNEC_GUID,
FIREFOX_GUID, MAX_STR_SIZE, MDN_DOC)
from validator.decorator import version_range
from jstypes import JSArray, JSContext, JSLiteral, JSObject, JSWrapper
NUMERIC_TYPES = (int, long, float, complex)
# None of these operations (or their augmented assignment counterparts) should
# be performed on non-numeric data. Any time we get non-numeric data for these
# guys, we just return window.NaN.
NUMERIC_OPERATORS = ('-', '*', '/', '%', '<<', '>>', '>>>', '|', '^', '&')
NUMERIC_OPERATORS += tuple('%s=' % op for op in NUMERIC_OPERATORS)
def _get_member_exp_property(traverser, node):
"""Return the string value of a member expression's property."""
if node['property']['type'] == 'Identifier' and not node.get('computed'):
return unicode(node['property']['name'])
else:
eval_exp = traverser._traverse_node(node['property'])
return _get_as_str(eval_exp.get_literal_value())
def _expand_globals(traverser, node):
"""Expands a global object that has a lambda value."""
if node.is_global and callable(node.value.get('value')):
result = node.value['value'](traverser)
if isinstance(result, dict):
output = traverser._build_global('--', result)
elif isinstance(result, JSWrapper):
output = result
else:
output = JSWrapper(result, traverser)
# Set the node context.
if 'context' in node.value:
traverser._debug('CONTEXT>>%s' % node.value['context'])
output.context = node.value['context']
else:
traverser._debug('CONTEXT>>INHERITED')
output.context = node.context
return output
return node
def trace_member(traverser, node, instantiate=False):
'Traces a MemberExpression and returns the appropriate object'
traverser._debug('TESTING>>%s' % node['type'])
if node['type'] == 'MemberExpression':
# x.y or x[y]
# x = base
base = trace_member(traverser, node['object'], instantiate)
base = _expand_globals(traverser, base)
identifier = _get_member_exp_property(traverser, node)
# Handle the various global entity properties.
if base.is_global:
# If we've got an XPCOM wildcard, return a copy of the entity.
if 'xpcom_wildcard' in base.value:
traverser._debug('MEMBER_EXP>>XPCOM_WILDCARD')
from predefinedentities import CONTRACT_ENTITIES
if identifier in CONTRACT_ENTITIES:
kw = dict(err_id=('js', 'actions', 'dangerous_contract'),
warning='Dangerous XPCOM contract ID')
kw.update(CONTRACT_ENTITIES[identifier])
traverser.warning(**kw)
base.value = base.value.copy()
del base.value['xpcom_wildcard']
return base
test_identifier(traverser, identifier)
traverser._debug('MEMBER_EXP>>PROPERTY: %s' % identifier)
output = base.get(
traverser=traverser, instantiate=instantiate, name=identifier)
output.context = base.context
if base.is_global:
# In the cases of XPCOM objects, methods generally
# remain bound to their parent objects, even when called
# indirectly.
output.parent = base
return output
elif node['type'] == 'Identifier':
traverser._debug('MEMBER_EXP>>ROOT:IDENTIFIER')
test_identifier(traverser, node['name'])
# If we're supposed to instantiate the object and it doesn't already
# exist, instantitate the object.
if instantiate and not traverser._is_defined(node['name']):
output = JSWrapper(JSObject(), traverser=traverser)
traverser.contexts[0].set(node['name'], output)
else:
output = traverser._seek_variable(node['name'])
return _expand_globals(traverser, output)
else:
traverser._debug('MEMBER_EXP>>ROOT:EXPRESSION')
# It's an expression, so just try your damndest.
return traverser._traverse_node(node)
def test_identifier(traverser, name):
'Tests whether an identifier is banned'
import predefinedentities
if name in predefinedentities.BANNED_IDENTIFIERS:
traverser.err.warning(
err_id=('js', 'actions', 'banned_identifier'),
warning='Banned or deprecated JavaScript Identifier',
description=predefinedentities.BANNED_IDENTIFIERS[name],
filename=traverser.filename,
line=traverser.line,
column=traverser.position,
context=traverser.context)
def _function(traverser, node):
'Prevents code duplication'
# Put the function off for traversal at the end of the current block scope.
traverser.function_collection[-1].append(partial(wrap, traverser, node))
return JSWrapper(traverser=traverser, callable=True, dirty=True)
def _func_expr(traverser, node):
'Represents a lambda function'
return _function(traverser, node)
def _define_with(traverser, node):
'Handles `with` statements'
object_ = traverser._traverse_node(node['object'])
if isinstance(object_, JSWrapper) and isinstance(object_.value, JSObject):
traverser.contexts[-1] = object_.value
traverser.contexts.append(JSContext('block'))
return
def _define_var(traverser, node):
'Creates a local context variable'
traverser._debug('VARIABLE_DECLARATION')
traverser.debug_level += 1
declarations = (node['declarations'] if 'declarations' in node
else node['head'])
kind = node.get('kind', 'let')
for declaration in declarations:
# It could be deconstruction of variables :(
if declaration['id']['type'] == 'ArrayPattern':
vars = []
for element in declaration['id']['elements']:
# NOTE : Multi-level array destructuring sucks. Maybe implement
# it someday if you're bored, but it's so rarely used and it's
# so utterly complex, there's probably no need to ever code it
# up.
if element is None or element['type'] != 'Identifier':
vars.append(None)
continue
vars.append(element['name'])
# The variables are not initialized
if declaration['init'] is None:
# Simple instantiation; no initialization
for var in vars:
if not var:
continue
traverser._declare_variable(var, None)
# The variables are declared inline
elif declaration['init']['type'] == 'ArrayPattern':
# TODO : Test to make sure len(values) == len(vars)
for value in declaration['init']['elements']:
if vars[0]:
traverser._declare_variable(
vars[0], JSWrapper(traverser._traverse_node(value),
traverser=traverser))
vars = vars[1:] # Pop off the first value
# It's being assigned by a JSArray (presumably)
elif declaration['init']['type'] == 'ArrayExpression':
assigner = traverser._traverse_node(declaration['init'])
for value in assigner.value.elements:
if vars[0]:
traverser._declare_variable(vars[0], value)
vars = vars[1:]
elif declaration['id']['type'] == 'ObjectPattern':
init = traverser._traverse_node(declaration['init'])
if init is not None:
_proc_objpattern(init_obj=init,
properties=declaration['id']['properties'])
else:
var_name = declaration['id']['name']
traverser._debug('NAME>>%s' % var_name)
var_value = traverser._traverse_node(declaration['init'])
traverser._debug('VALUE>>%s' % (var_value.output()
if var_value is not None
else 'None'))
if not isinstance(var_value, JSWrapper):
var = JSWrapper(value=var_value,
const=kind == 'const',
traverser=traverser)
else:
var = var_value
var.const = kind == 'const'
traverser._declare_variable(var_name, var, type_=kind)
if 'body' in node:
traverser._traverse_node(node['body'])
traverser.debug_level -= 1
# The "Declarations" branch contains custom elements.
return True
def _define_obj(traverser, node):
'Creates a local context object'
var = JSObject()
for prop in node['properties']:
if prop['type'] == 'PrototypeMutation':
var_name = 'prototype'
else:
key = prop['key']
if key['type'] == 'Literal':
var_name = key['value']
elif isinstance(key['name'], basestring):
var_name = key['name']
else:
if 'property' in key['name']:
name = key['name']
else:
name = {'property': key['name']}
var_name = _get_member_exp_property(traverser, name)
var_value = traverser._traverse_node(prop['value'])
var.set(var_name, var_value, traverser)
# TODO: Observe "kind"
if not isinstance(var, JSWrapper):
return JSWrapper(var, lazy=True, traverser=traverser)
var.lazy = True
return var
def _define_array(traverser, node):
"""Instantiate an array object from the parse tree."""
arr = JSArray()
arr.elements = map(traverser._traverse_node, node['elements'])
return arr
def _define_template_strings(traverser, node):
"""Instantiate an array of raw and cooked template strings."""
cooked = JSArray()
cooked.elements = map(traverser._traverse_node, node['cooked'])
raw = JSArray()
raw.elements = map(traverser._traverse_node, node['raw'])
cooked.set('raw', raw, traverser)
return cooked
def _define_template(traverser, node):
"""Instantiate a template literal."""
elements = map(traverser._traverse_node, node['elements'])
return reduce(partial(_binary_op, '+', traverser=traverser), elements)
def _define_literal(traverser, node):
"""
Convert a literal node in the parse tree to its corresponding
interpreted value.
"""
value = node['value']
if isinstance(value, dict):
return JSWrapper(JSObject(), traverser=traverser, dirty=True)
wrapper = JSWrapper(value if value is not None else JSLiteral(None),
traverser=traverser)
test_literal(traverser, wrapper)
return wrapper
def test_literal(traverser, wrapper):
"""
Test the value of a literal, in particular only a string literal at the
moment, against possibly dangerous patterns.
"""
value = wrapper.get_literal_value()
if isinstance(value, basestring):
# Local import to prevent import loop.
from validator.testcases.regex import validate_string
validate_string(value, traverser, wrapper=wrapper)
def _call_settimeout(a, t, e):
"""
Handler for setTimeout and setInterval. Should determine whether a[0]
is a lambda function or a string. Strings are banned, lambda functions are
ok. Since we can't do reliable type testing on other variables, we flag
those, too.
"""
if not a:
return
if a[0]['type'] in ('FunctionExpression', 'ArrowFunctionExpression'):
return
if t(a[0]).callable:
return
return {'err_id': ('javascript', 'dangerous_global', 'eval'),
'description':
'In order to prevent vulnerabilities, the `setTimeout` '
'and `setInterval` functions should be called only with '
'function expressions as their first argument.',
'signing_help': (
'Please do not ever call `setTimeout` or `setInterval` with '
'string arguments. If you are passing a function which is '
'not being correctly detected as such, please consider '
'passing a closure or arrow function, which in turn calls '
'the original function.'),
'signing_severity': 'high'}
def _call_require(a, t, e):
"""
Tests for unsafe uses of `require()` in SDK add-ons.
"""
args, traverse, err = a, t, e
if not err.metadata.get('is_jetpack') and len(args):
return
module = traverse(args[0]).get_literal_value()
if not isinstance(module, basestring):
return
if module.startswith('sdk/'):
module = module[len('sdk/'):]
LOW_LEVEL = {
# Added from bugs 689340, 731109
'chrome', 'window-utils', 'observer-service',
# Added from bug 845492
'window/utils', 'sdk/window/utils', 'sdk/deprecated/window-utils',
'tab/utils', 'sdk/tab/utils',
'system/events', 'sdk/system/events',
}
if module in LOW_LEVEL:
err.metadata['requires_chrome'] = True
return {'warning': 'Usage of low-level or non-SDK interface',
'description': 'Your add-on uses an interface which bypasses '
'the high-level protections of the add-on SDK. '
'This interface should be avoided, and its use '
'may significantly complicate your review '
'process.'}
if module == 'widget':
return {'warning': 'Use of deprecated SDK module',
'description':
"The 'widget' module has been deprecated due to a number "
'of performance and usability issues, and has been '
'removed from the SDK as of Firefox 40. Please use the '
"'sdk/ui/button/action' or 'sdk/ui/button/toggle' module "
'instead. See '
'https://developer.mozilla.org/Add-ons/SDK/High-Level_APIs'
'/ui for more information.'}
def _call_create_pref(a, t, e):
"""
Handler for pref() and user_pref() calls in defaults/preferences/*.js files
to ensure that they don't touch preferences outside of the "extensions."
branch.
"""
# We really need to clean up the arguments passed to these functions.
traverser = t.im_self
if not traverser.filename.startswith('defaults/preferences/') or not a:
return
instanceactions.set_preference(JSWrapper(JSLiteral(None),
traverser=traverser),
a, traverser)
value = _get_as_str(t(a[0]))
return test_preference(value)
def _readonly_top(traverser, right, node_right):
"""Handle the readonly callback for window.top."""
traverser.notice(
err_id=('testcases_javascript_actions',
'_readonly_top'),
notice='window.top is a reserved variable',
description='The `top` global variable is reserved and cannot be '
'assigned any values starting with Gecko 6. Review your '
'code for any uses of the `top` global, and refer to '
'%s for more information.' % BUGZILLA_BUG % 654137,
for_appversions={FIREFOX_GUID: version_range('firefox',
'6.0a1', '7.0a1'),
FENNEC_GUID: version_range('fennec',
'6.0a1', '7.0a1')},
compatibility_type='warning',
tier=5)
def _expression(traverser, node):
"""
This is a helper method that allows node definitions to point at
`_traverse_node` without needing a reference to a traverser.
"""
return traverser._traverse_node(node['expression'])
def _get_this(traverser, node):
'Returns the `this` object'
if not traverser.this_stack:
from predefinedentities import GLOBAL_ENTITIES
return traverser._build_global('window', GLOBAL_ENTITIES[u'window'])
return traverser.this_stack[-1]
def _new(traverser, node):
'Returns a new copy of a node.'
# We don't actually process the arguments as part of the flow because of
# the Angry T-Rex effect. For now, we just traverse them to ensure they
# don't contain anything dangerous.
args = node['arguments']
if isinstance(args, list):
for arg in args:
traverser._traverse_node(arg, source='arguments')
else:
traverser._traverse_node(args)
elem = traverser._traverse_node(node['callee'])
if not isinstance(elem, JSWrapper):
elem = JSWrapper(elem, traverser=traverser)
if elem.is_global:
traverser._debug('Making overwritable')
elem.value = deepcopy(elem.value)
elem.value['overwritable'] = True
return elem
def _ident(traverser, node):
'Initiates an object lookup on the traverser based on an identifier token'
name = node['name']
# Ban bits like "newThread"
test_identifier(traverser, name)
if traverser._is_defined(name):
return traverser._seek_variable(name)
return JSWrapper(JSObject(), traverser=traverser, dirty=True)
def _expr_assignment(traverser, node):
"""Evaluate an AssignmentExpression node."""
traverser._debug('ASSIGNMENT_EXPRESSION')
traverser.debug_level += 1
traverser._debug('ASSIGNMENT>>PARSING RIGHT')
right = traverser._traverse_node(node['right'])
right = JSWrapper(right, traverser=traverser)
# Treat direct assignment different than augmented assignment.
if node['operator'] == '=':
from predefinedentities import GLOBAL_ENTITIES, is_shared_scope
global_overwrite = False
readonly_value = is_shared_scope(traverser)
node_left = node['left']
traverser._debug('ASSIGNMENT:DIRECT(%s)' % node_left['type'])
if node_left['type'] == 'Identifier':
# Identifiers just need the ID name and a value to push.
# Raise a global overwrite issue if the identifier is global.
global_overwrite = traverser._is_global(node_left['name'])
# Get the readonly attribute and store its value if is_global
if global_overwrite:
global_dict = GLOBAL_ENTITIES[node_left['name']]
if 'readonly' in global_dict:
readonly_value = global_dict['readonly']
traverser._declare_variable(node_left['name'], right, type_='glob')
elif node_left['type'] == 'MemberExpression':
member_object = trace_member(traverser, node_left['object'],
instantiate=True)
global_overwrite = (member_object.is_global and
not ('overwritable' in member_object.value and
member_object.value['overwritable']))
member_property = _get_member_exp_property(traverser, node_left)
traverser._debug('ASSIGNMENT:MEMBER_PROPERTY(%s)'
% member_property)
traverser._debug('ASSIGNMENT:GLOB_OV::%s' % global_overwrite)
# Don't do the assignment if we're facing a global.
if not member_object.is_global:
if member_object.value is None:
member_object.value = JSObject()
if not member_object.is_global:
member_object.value.set(member_property, right, traverser)
else:
# It's probably better to do nothing.
pass
elif 'value' in member_object.value:
member_object_value = _expand_globals(traverser,
member_object).value
if member_property in member_object_value['value']:
# If it's a global and the actual member exists, test
# whether it can be safely overwritten.
member = member_object_value['value'][member_property]
if 'readonly' in member:
global_overwrite = True
readonly_value = member['readonly']
traverser._debug('ASSIGNMENT:DIRECT:GLOB_OVERWRITE %s' %
global_overwrite)
traverser._debug('ASSIGNMENT:DIRECT:READONLY %r' %
readonly_value)
if callable(readonly_value):
readonly_value = readonly_value(traverser, right, node['right'])
if readonly_value and global_overwrite:
kwargs = dict(
err_id=('testcases_javascript_actions',
'_expr_assignment',
'global_overwrite'),
warning='Global variable overwrite',
description='An attempt was made to overwrite a global '
'variable in some JavaScript code.')
if isinstance(readonly_value, DESCRIPTION_TYPES):
kwargs['description'] = readonly_value
elif isinstance(readonly_value, dict):
kwargs.update(readonly_value)
traverser.warning(**kwargs)
return right
lit_right = right.get_literal_value()
traverser._debug('ASSIGNMENT>>PARSING LEFT')
left = traverser._traverse_node(node['left'])
traverser._debug('ASSIGNMENT>>DONE PARSING LEFT')
traverser.debug_level -= 1
if isinstance(left, JSWrapper):
if left.dirty:
return left
lit_left = left.get_literal_value()
token = node['operator']
# Don't perform an operation on None. Python freaks out
if lit_left is None:
lit_left = 0
if lit_right is None:
lit_right = 0
# Give them default values so we have them in scope.
gleft, gright = 0, 0
# All of the assignment operators
operators = {'=': lambda: right,
'+=': lambda: lit_left + lit_right,
'-=': lambda: gleft - gright,
'*=': lambda: gleft * gright,
'/=': lambda: 0 if gright == 0 else (gleft / gright),
'%=': lambda: 0 if gright == 0 else (gleft % gright),
'<<=': lambda: int(gleft) << int(gright),
'>>=': lambda: int(gleft) >> int(gright),
'>>>=': lambda: float(abs(int(gleft)) >> gright),
'|=': lambda: int(gleft) | int(gright),
'^=': lambda: int(gleft) ^ int(gright),
'&=': lambda: int(gleft) & int(gright)}
# If we're modifying a non-numeric type with a numeric operator, return
# NaN.
if (not isinstance(lit_left, NUMERIC_TYPES) and
token in NUMERIC_OPERATORS):
left.set_value(get_NaN(traverser), traverser=traverser)
return left
# If either side of the assignment operator is a string, both sides
# need to be casted to strings first.
if (isinstance(lit_left, types.StringTypes) or
isinstance(lit_right, types.StringTypes)):
lit_left = _get_as_str(lit_left)
lit_right = _get_as_str(lit_right)
gleft, gright = _get_as_num(left), _get_as_num(right)
traverser._debug('ASSIGNMENT>>OPERATION:%s' % token)
if token not in operators:
# We don't support that operator. (yet?)
traverser._debug('ASSIGNMENT>>OPERATOR NOT FOUND', 1)
return left
elif token in ('<<=', '>>=', '>>>=') and gright < 0:
# The user is doing weird bitshifting that will return 0 in JS but
# not in Python.
left.set_value(0, traverser=traverser)
return left
elif (token in ('<<=', '>>=', '>>>=', '|=', '^=', '&=') and
(abs(gleft) == float('inf') or abs(gright) == float('inf'))):
# Don't bother handling infinity for integer-converted operations.
left.set_value(get_NaN(traverser), traverser=traverser)
return left
traverser._debug('ASSIGNMENT::L-value global? (%s)' %
('Y' if left.is_global else 'N'), 1)
try:
new_value = operators[token]()
except Exception:
traverser.system_error(exc_info=sys.exc_info())
new_value = None
# Cap the length of analyzed strings.
if (isinstance(new_value, types.StringTypes) and
len(new_value) > MAX_STR_SIZE):
new_value = new_value[:MAX_STR_SIZE]
traverser._debug('ASSIGNMENT::New value >> %s' % new_value, 1)
left.set_value(new_value, traverser=traverser)
return left
# Though it would otherwise be a syntax error, we say that 4=5 should
# evaluate out to 5.
return right
def _expr_binary(traverser, node):
'Evaluates a BinaryExpression node.'
traverser.debug_level += 1
# Select the proper operator.
operator = node['operator']
traverser._debug('BIN_OPERATOR>>%s' % operator)
# Traverse the left half of the binary expression.
with traverser._debug('BIN_EXP>>l-value'):
if (node['left']['type'] == 'BinaryExpression' and
'__traversal' not in node['left']):
# Process the left branch of the binary expression directly. This
# keeps the recursion cap in line and speeds up processing of
# large chains of binary expressions.
left = _expr_binary(traverser, node['left'])
node['left']['__traversal'] = left
else:
left = traverser._traverse_node(node['left'])
# Traverse the right half of the binary expression.
with traverser._debug('BIN_EXP>>r-value'):
if (operator == 'instanceof' and
node['right']['type'] == 'Identifier' and
node['right']['name'] == 'Function'):
# We make an exception for instanceof's r-value if it's a
# dangerous global, specifically Function.
return JSWrapper(True, traverser=traverser)
else:
right = traverser._traverse_node(node['right'])
traverser._debug('Is dirty? %r' % right.dirty, 1)
return _binary_op(operator, left, right, traverser)
def _binary_op(operator, left, right, traverser):
"""Perform a binary operation on two pre-traversed nodes."""
# Dirty l or r values mean we can skip the expression. A dirty value
# indicates that a lazy operation took place that introduced some
# nondeterminacy.
# FIXME(Kris): We should process these as if they're strings anyway.
if left.dirty:
return left
elif right.dirty:
return right
# Binary expressions are only executed on literals.
left = left.get_literal_value()
right_wrap = right
right = right.get_literal_value()
# Coerce the literals to numbers for numeric operations.
gleft = _get_as_num(left)
gright = _get_as_num(right)
operators = {
'==': lambda: left == right or gleft == gright,
'!=': lambda: left != right,
'===': lambda: left == right, # Be flexible.
'!==': lambda: type(left) != type(right) or left != right,
'>': lambda: left > right,
'<': lambda: left < right,
'<=': lambda: left <= right,
'>=': lambda: left >= right,
'<<': lambda: int(gleft) << int(gright),
'>>': lambda: int(gleft) >> int(gright),
'>>>': lambda: float(abs(int(gleft)) >> int(gright)),
'+': lambda: left + right,
'-': lambda: gleft - gright,
'*': lambda: gleft * gright,
'/': lambda: 0 if gright == 0 else (gleft / gright),
'%': lambda: 0 if gright == 0 else (gleft % gright),
'in': lambda: right_wrap.contains(left),
# TODO : implement instanceof
# FIXME(Kris): Treat instanceof the same as `QueryInterface`
}
output = None
if (operator in ('>>', '<<', '>>>') and
(left is None or right is None or gright < 0)):
output = False
elif operator in operators:
# Concatenation can be silly, so always turn undefineds into empty
# strings and if there are strings, make everything strings.
if operator == '+':
if left is None:
left = ''
if right is None:
right = ''
if isinstance(left, basestring) or isinstance(right, basestring):
left = _get_as_str(left)
right = _get_as_str(right)
# Don't even bother handling infinity if it's a numeric computation.
if (operator in ('<<', '>>', '>>>') and
(abs(gleft) == float('inf') or abs(gright) == float('inf'))):
return get_NaN(traverser)
try:
output = operators[operator]()
except Exception:
traverser.system_error(exc_info=sys.exc_info())
output = None
# Cap the length of analyzed strings.
if (isinstance(output, types.StringTypes) and
len(output) > MAX_STR_SIZE):
output = output[:MAX_STR_SIZE]
wrapper = JSWrapper(output, traverser=traverser)
# Test the newly-created literal for dangerous values.
# This may cause duplicate warnings for strings which
# already match a dangerous value prior to concatenation.
test_literal(traverser, wrapper)
return wrapper
return JSWrapper(output, traverser=traverser)
def _expr_unary(traverser, node):
"""Evaluate a UnaryExpression node."""
expr = traverser._traverse_node(node['argument'])
expr_lit = expr.get_literal_value()
expr_num = _get_as_num(expr_lit)
operators = {'-': lambda: -1 * expr_num,
'+': lambda: expr_num,
'!': lambda: not expr_lit,
'~': lambda: -1 * (expr_num + 1),
'void': lambda: None,
'typeof': lambda: _expr_unary_typeof(expr),
'delete': lambda: None} # We never want to empty the context
if node['operator'] in operators:
output = operators[node['operator']]()
else:
output = None
if not isinstance(output, JSWrapper):
output = JSWrapper(output, traverser=traverser)
return output
def _expr_unary_typeof(wrapper):
"""Evaluate the "typeof" value for a JSWrapper object."""
if (wrapper.callable or
(wrapper.is_global and 'return' in wrapper.value and
'value' not in wrapper.value)):
return 'function'
value = wrapper.value
if value is None:
return 'undefined'
elif isinstance(value, JSLiteral):
value = value.value
if isinstance(value, bool):
return 'boolean'
elif isinstance(value, (int, long, float)):
return 'number'
elif isinstance(value, types.StringTypes):
return 'string'
return 'object'
def _get_as_num(value):
"""Return the JS numeric equivalent for a value."""
if isinstance(value, JSWrapper):
value = value.get_literal_value()
if value is None:
return 0
try:
if isinstance(value, types.StringTypes):
if value.startswith('0x'):
return int(value, 16)
else:
return float(value)
elif isinstance(value, (int, float, long)):
return value
else:
return int(value)
except (ValueError, TypeError):
return 0
def _get_as_str(value):
"""Return the JS string equivalent for a literal value."""
if isinstance(value, JSWrapper):
value = value.get_literal_value()
if value is None:
return ''
if isinstance(value, bool):
return u'true' if value else u'false'
elif isinstance(value, (int, float, long)):
if value == float('inf'):
return u'Infinity'
elif value == float('-inf'):
return u'-Infinity'
# Try to see if we can shave off some trailing significant figures.
try:
if int(value) == value:
return unicode(int(value))
except ValueError:
pass
return unicode(value)
| 37.407512 | 79 | 0.588192 |
c4930d25761ee9d797224e253c155e8643ca0fdb | 14,588 | py | Python | geometry_utils/tests/test_bound_box.py | NOAA-ORR-ERD/geometry_utils | 0417a8c459fb17f101945f53d048191dc22e97c0 | [
"BSD-3-Clause"
] | null | null | null | geometry_utils/tests/test_bound_box.py | NOAA-ORR-ERD/geometry_utils | 0417a8c459fb17f101945f53d048191dc22e97c0 | [
"BSD-3-Clause"
] | null | null | null | geometry_utils/tests/test_bound_box.py | NOAA-ORR-ERD/geometry_utils | 0417a8c459fb17f101945f53d048191dc22e97c0 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""
Test code for the BBox Object
"""
import numpy as np
import pytest
from geometry_utils.bound_box import (BBox,
asBBox,
NullBBox,
InfBBox,
fromBBArray,
from_points,
)
| 25.151724 | 75 | 0.466822 |
c493107b1fd8b943176b6566abf9ca29701a3c9c | 4,789 | py | Python | cresi/net/augmentations/functional.py | ankshah131/cresi | 99328e065910c45a626e761cd308670e4a60f058 | [
"Apache-2.0"
] | 117 | 2019-08-29T08:43:55.000Z | 2022-03-24T20:56:23.000Z | cresi/net/augmentations/functional.py | ankshah131/cresi | 99328e065910c45a626e761cd308670e4a60f058 | [
"Apache-2.0"
] | 9 | 2019-11-23T10:55:13.000Z | 2021-06-22T12:26:21.000Z | cresi/net/augmentations/functional.py | ankshah131/cresi | 99328e065910c45a626e761cd308670e4a60f058 | [
"Apache-2.0"
] | 33 | 2019-08-08T16:56:37.000Z | 2022-02-24T20:52:44.000Z | import cv2
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
import numpy as np
import math
from functools import wraps
def clipped(func):
"""
wrapper to clip results of transform to image dtype value range
"""
return wrapped_function
def fix_shift_values(img, *args):
"""
shift values are normally specified in uint, but if your data is float - you need to remap values
"""
if img.dtype == np.float32:
return list(map(lambda x: x / 255, args))
return args
def rotate(img, angle):
"""
rotate image on specified angle
:param angle: angle in degrees
"""
height, width = img.shape[0:2]
mat = cv2.getRotationMatrix2D((width/2, height/2), angle, 1.0)
img = cv2.warpAffine(img, mat, (width, height),
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REFLECT_101)
return img
def shift_scale_rotate(img, angle, scale, dx, dy):
"""
:param angle: in degrees
:param scale: relative scale
"""
height, width = img.shape[:2]
cc = math.cos(angle/180*math.pi) * scale
ss = math.sin(angle/180*math.pi) * scale
rotate_matrix = np.array([[cc, -ss], [ss, cc]])
box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ])
box1 = box0 - np.array([width/2, height/2])
box1 = np.dot(box1, rotate_matrix.T) + np.array([width/2+dx*width, height/2+dy*height])
box0 = box0.astype(np.float32)
box1 = box1.astype(np.float32)
mat = cv2.getPerspectiveTransform(box0, box1)
img = cv2.warpPerspective(img, mat, (width, height),
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REFLECT_101)
return img
def img_to_tensor(im, verbose=False):
'''AVE edit'''
im_out = np.moveaxis(im / (255. if im.dtype == np.uint8 else 1), -1, 0).astype(np.float32)
if verbose:
print ("augmentations.functiona.py.img_to_tensor(): im_out.shape:", im_out.shape)
print ("im_out.unique:", np.unique(im_out))
return im_out
def mask_to_tensor(mask, num_classes, verbose=False):
'''AVE edit'''
if num_classes > 1:
mask = img_to_tensor(mask)
else:
mask = np.expand_dims(mask / (255. if mask.dtype == np.uint8 else 1), 0).astype(np.float32)
if verbose:
print ("augmentations.functiona.py.img_to_tensor(): mask.shape:", mask.shape)
print ("mask.unique:", np.unique(mask))
return mask
| 28.005848 | 102 | 0.595114 |
c493ccaae899498f57b59dd3ded561a78518f5a9 | 417 | py | Python | regtestsWin_customBuildPy.py | greenwoodms/TRANSFORM-Library | dc152d4f0298d3f18385f2ea33645d87d7812915 | [
"Apache-2.0"
] | 29 | 2018-04-24T17:06:19.000Z | 2021-11-21T05:17:28.000Z | regtestsWin_customBuildPy.py | greenwoodms/TRANSFORM-Library | dc152d4f0298d3f18385f2ea33645d87d7812915 | [
"Apache-2.0"
] | 13 | 2018-04-05T08:34:27.000Z | 2021-10-04T14:24:41.000Z | regtestsWin_customBuildPy.py | greenwoodms/TRANSFORM-Library | dc152d4f0298d3f18385f2ea33645d87d7812915 | [
"Apache-2.0"
] | 17 | 2018-08-06T22:18:01.000Z | 2022-01-29T21:38:17.000Z | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 14 09:49:13 2017
@author: vmg
"""
import os
import buildingspy.development.regressiontest as r
rt = r.Tester(check_html=False)#,tool="dymola")
LibPath = os.path.join("TRANSFORM")
ResPath = LibPath
rt.showGUI(True)
rt.setLibraryRoot(LibPath, ResPath)
rt.setNumberOfThreads(1)
#rt.TestSinglePackage('Media.Solids.Examples.Hastelloy_N_Haynes', SinglePack=True)
rt.run()
| 23.166667 | 82 | 0.748201 |
c49410af0e45267e29dfed5b9b7fc2e415dd48a2 | 2,039 | py | Python | src/deoxys/model/activations.py | huynhngoc/deoxys | b2e9936b723807e129fda36d8d6131ca00db558f | [
"MIT"
] | 1 | 2021-12-28T15:48:45.000Z | 2021-12-28T15:48:45.000Z | src/deoxys/model/activations.py | huynhngoc/deoxys | b2e9936b723807e129fda36d8d6131ca00db558f | [
"MIT"
] | 2 | 2020-06-26T11:03:53.000Z | 2020-06-26T11:05:09.000Z | src/deoxys/model/activations.py | huynhngoc/deoxys | b2e9936b723807e129fda36d8d6131ca00db558f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
__author__ = "Ngoc Huynh Bao"
__email__ = "ngoc.huynh.bao@nmbu.no"
from ..keras.layers import Activation
from ..keras.activations import deserialize
from ..utils import Singleton
def register_activation(key, activation):
"""
Register the customized activation.
If the key name is already registered, it will raise a KeyError exception
Parameters
----------
key: str
The unique key-name of the activation
activation: tensorflow.keras.activations.Activation
The customized activation class
"""
Activations().register(key, activation)
def unregister_activation(key):
"""
Remove the registered activation with the key-name
Parameters
----------
key: str
The key-name of the activation to be removed
"""
Activations().unregister(key)
| 26.141026 | 77 | 0.647376 |
c4950cd78452abf9ab6be8b01808431c4aeef93d | 2,618 | py | Python | raspisump/reading.py | seanm/raspi-sump | 65456b5e0d1e93bb2574a46527f410a08f9f51ba | [
"MIT"
] | 79 | 2015-03-11T12:13:35.000Z | 2022-03-30T04:35:09.000Z | raspisump/reading.py | seanm/raspi-sump | 65456b5e0d1e93bb2574a46527f410a08f9f51ba | [
"MIT"
] | 72 | 2015-02-20T02:26:25.000Z | 2022-02-19T01:22:36.000Z | raspisump/reading.py | seanm/raspi-sump | 65456b5e0d1e93bb2574a46527f410a08f9f51ba | [
"MIT"
] | 36 | 2015-01-11T12:11:43.000Z | 2022-01-07T01:35:30.000Z | """ Module to take a water_level reading."""
# Raspi-sump, a sump pump monitoring system.
# Al Audet
# http://www.linuxnorth.org/raspi-sump/
#
# All configuration changes should be done in raspisump.conf
# MIT License -- http://www.linuxnorth.org/raspi-sump/license.html
try:
import ConfigParser as configparser # Python2
except ImportError:
import configparser # Python3
from hcsr04sensor import sensor
from raspisump import log, alerts, heartbeat
config = configparser.RawConfigParser()
config.read("/home/pi/raspi-sump/raspisump.conf")
configs = {
"critical_water_level": config.getint("pit", "critical_water_level"),
"pit_depth": config.getint("pit", "pit_depth"),
"temperature": config.getint("pit", "temperature"),
"trig_pin": config.getint("gpio_pins", "trig_pin"),
"echo_pin": config.getint("gpio_pins", "echo_pin"),
"unit": config.get("pit", "unit"),
}
# If item in raspisump.conf add to configs dict. If not provide defaults.
try:
configs["alert_when"] = config.get("pit", "alert_when")
except configparser.NoOptionError:
configs["alert_when"] = "high"
try:
configs["heartbeat"] = config.getint("email", "heartbeat")
except configparser.NoOptionError:
configs["heartbeat"] = 0
def initiate_heartbeat():
"""Initiate the heartbeat email process if needed"""
if configs["heartbeat"] == 1:
heartbeat.determine_if_heartbeat()
else:
pass
def water_reading():
"""Initiate a water level reading."""
pit_depth = configs["pit_depth"]
trig_pin = configs["trig_pin"]
echo_pin = configs["echo_pin"]
temperature = configs["temperature"]
unit = configs["unit"]
value = sensor.Measurement(trig_pin, echo_pin, temperature, unit)
try:
raw_distance = value.raw_distance(sample_wait=0.3)
except SystemError:
log.log_errors(
"**ERROR - Signal not received. Possible cable or sensor problem."
)
exit(0)
return round(value.depth(raw_distance, pit_depth), 1)
def water_depth():
"""Determine the depth of the water, log result and generate alert
if needed.
"""
critical_water_level = configs["critical_water_level"]
water_depth = water_reading()
if water_depth < 0.0:
water_depth = 0.0
log.log_reading(water_depth)
if water_depth > critical_water_level and configs["alert_when"] == "high":
alerts.determine_if_alert(water_depth)
elif water_depth < critical_water_level and configs["alert_when"] == "low":
alerts.determine_if_alert(water_depth)
else:
pass
initiate_heartbeat()
| 29.088889 | 79 | 0.688312 |
c49a318a7b8ef2bfca67b0d3b643cbd37094de2d | 2,830 | py | Python | pipelines/pancreas_pipeline.py | marvinquiet/RefConstruction_supervisedCelltyping | 7bdd02a0486c175785ec24461dc6356c4d172091 | [
"MIT"
] | null | null | null | pipelines/pancreas_pipeline.py | marvinquiet/RefConstruction_supervisedCelltyping | 7bdd02a0486c175785ec24461dc6356c4d172091 | [
"MIT"
] | null | null | null | pipelines/pancreas_pipeline.py | marvinquiet/RefConstruction_supervisedCelltyping | 7bdd02a0486c175785ec24461dc6356c4d172091 | [
"MIT"
] | null | null | null | '''
Configuration generation for running Pancreas datasets
'''
import os, argparse
from pipelines import method_utils, dataloading_utils
from preprocess.process_train_test_data import *
if __name__ == "__main__":
data_dir = "~/gpu/data"
## parse arguments
import argparse
parser = argparse.ArgumentParser(description="Celltyping pipeline.")
parser.add_argument('data_source', help="Load which dataset",
choices=[
'pancreas', 'pancreas_seg_cond', 'pancreas_custom',
'pancreas_seg_mix', 'pancreas_multi_to_multi'
])
parser.add_argument('-m', '--method', help="Run which method",
choices=['MLP', 'MLP_GO', 'MLP_CP', 'GEDFN', 'ItClust', 'SVM_RBF', 'SVM_linear', 'RF'], ## remove DFN
required=True)
parser.add_argument('--select_on', help="Feature selection on train or test, or None of them",
choices=['train', 'test'])
parser.add_argument('--select_method', help="Feature selection method, Seurat/FEAST or None",
choices=['Seurat', 'FEAST', 'F-test'])
parser.add_argument('--n_features', help="Number of features selected",
default=1000, type=int)
parser.add_argument('--train', help="Specify which as train", required=True)
parser.add_argument('--test', help="Specify which as test", required=True)
parser.add_argument('--sample_seed', help="Downsample seed in combined individual effect",
default=0, type=int)
args = parser.parse_args()
pipeline_dir = "pipelines/result_Pancreas_collections"
result_prefix = pipeline_dir+os.sep+"result_"+args.data_source+'_'+\
args.train+'_to_'+args.test
os.makedirs(result_prefix, exist_ok=True)
## create file directory
if args.select_on is None and args.select_method is None:
result_dir = result_prefix+os.sep+"no_feature"
else:
result_dir = result_prefix+os.sep+args.select_method+'_'+\
str(args.n_features)+'_on_'+args.select_on
os.makedirs(result_dir, exist_ok=True)
load_ind, train_adata, test_adata = load_adata(result_dir)
if not load_ind:
train_adata, test_adata = dataloading_utils.load_Pancreas_adata(
data_dir, result_dir, args=args)
## whether to purify reference dataset
purify_method = ""
if "purify_dist" in args.data_source:
purify_method = "distance"
elif "purify_SVM" in args.data_source:
purify_method = "SVM"
train_adata, test_adata = dataloading_utils.process_loaded_data(
train_adata, test_adata, result_dir, args=args, purify_method=purify_method)
print("Train anndata: \n", train_adata)
print("Test anndata: \n", test_adata)
method_utils.run_pipeline(args, train_adata, test_adata, data_dir, result_dir)
| 41.617647 | 109 | 0.677032 |
c49b19da4180160b333ba71b17f93e31d23578f0 | 436 | py | Python | MachineLearning/StandardScaler/standardization.py | yexianyi/AI_Practice | 80499ab3a06ac055641aa069fe1e37864c9e41c4 | [
"Apache-2.0"
] | null | null | null | MachineLearning/StandardScaler/standardization.py | yexianyi/AI_Practice | 80499ab3a06ac055641aa069fe1e37864c9e41c4 | [
"Apache-2.0"
] | null | null | null | MachineLearning/StandardScaler/standardization.py | yexianyi/AI_Practice | 80499ab3a06ac055641aa069fe1e37864c9e41c4 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
from sklearn.preprocessing import StandardScaler
stand_demo()
| 25.647059 | 75 | 0.692661 |
c49cdbfcdbfea8b874ccc9f3311ef29e7510510a | 716 | py | Python | tests/test_primitive_roots.py | greysonDEV/rng | 5af76f5edff4de1b502c21ca1c1ce93243a618c8 | [
"MIT"
] | null | null | null | tests/test_primitive_roots.py | greysonDEV/rng | 5af76f5edff4de1b502c21ca1c1ce93243a618c8 | [
"MIT"
] | null | null | null | tests/test_primitive_roots.py | greysonDEV/rng | 5af76f5edff4de1b502c21ca1c1ce93243a618c8 | [
"MIT"
] | null | null | null | from prng.util.util import primitive_roots
import pytest
| 37.684211 | 112 | 0.544693 |
c49d07ec16361493f21d1cdf3590979db22f9935 | 383 | py | Python | hackerrank-python/xml-1-find-the-score.py | fmelihh/competitive-programming-solutions | c15c2f7d90153f35f9bd9ffcea20ac921564eacf | [
"MIT"
] | 2 | 2021-09-06T22:13:12.000Z | 2021-11-22T08:50:04.000Z | hackerrank-python/xml-1-find-the-score.py | fmelihh/competitive-programming-solutions | c15c2f7d90153f35f9bd9ffcea20ac921564eacf | [
"MIT"
] | null | null | null | hackerrank-python/xml-1-find-the-score.py | fmelihh/competitive-programming-solutions | c15c2f7d90153f35f9bd9ffcea20ac921564eacf | [
"MIT"
] | null | null | null |
# https://www.hackerrank.com/challenges/xml-1-find-the-score/problem
import sys
import xml.etree.ElementTree as etree
if __name__ == '__main__':
sys.stdin.readline()
xml = sys.stdin.read()
tree = etree.ElementTree(etree.fromstring(xml))
root = tree.getroot()
print(get_attr_number(root))
| 23.9375 | 68 | 0.710183 |
c49d9514c95f15c6be6ba6695dcb54d27f071828 | 347 | py | Python | CodeChef/Contest/June Long/pricecon.py | GSri30/Competetive_programming | 0dc1681500a80b6f0979d0dc9f749357ee07bcb8 | [
"MIT"
] | 22 | 2020-01-03T17:32:00.000Z | 2021-11-07T09:31:44.000Z | CodeChef/Contest/June Long/pricecon.py | GSri30/Competetive_programming | 0dc1681500a80b6f0979d0dc9f749357ee07bcb8 | [
"MIT"
] | 10 | 2020-09-30T09:41:18.000Z | 2020-10-11T11:25:09.000Z | CodeChef/Contest/June Long/pricecon.py | GSri30/Competetive_programming | 0dc1681500a80b6f0979d0dc9f749357ee07bcb8 | [
"MIT"
] | 25 | 2019-10-14T19:25:01.000Z | 2021-05-26T08:12:20.000Z | test = int(input())
while test > 0 :
n,k = map(int,input().split())
p = list(map(int,input().split()))
original = 0
later = 0
for i in p :
if i > k :
later += k
original += i
else :
later += i
original += i
print(original-later)
test -= 1 | 23.133333 | 39 | 0.414986 |
c49e67e8dbe87dd913b66006fd7f5daf6198c333 | 2,948 | py | Python | src/utils/Shell.py | vlab-cs-ucsb/quacky | c031577883550820e2586ce530e59eb30aeccc37 | [
"BSD-2-Clause"
] | 1 | 2022-02-28T18:10:29.000Z | 2022-02-28T18:10:29.000Z | src/utils/Shell.py | vlab-cs-ucsb/quacky | c031577883550820e2586ce530e59eb30aeccc37 | [
"BSD-2-Clause"
] | null | null | null | src/utils/Shell.py | vlab-cs-ucsb/quacky | c031577883550820e2586ce530e59eb30aeccc37 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 18 22:20:01 2014
@author: baki
"""
import shlex
from subprocess import Popen, PIPE
from .Log import Log
| 32.043478 | 119 | 0.557327 |
c49f689d742bf5ec9d22f74b83fe32c9c62c281f | 4,458 | py | Python | scrapy_autounit/middleware.py | ogiaquino/scrapy-autounit | 97f00d2d62c1ad49bbac462018907abe6a20e4cd | [
"BSD-3-Clause"
] | null | null | null | scrapy_autounit/middleware.py | ogiaquino/scrapy-autounit | 97f00d2d62c1ad49bbac462018907abe6a20e4cd | [
"BSD-3-Clause"
] | null | null | null | scrapy_autounit/middleware.py | ogiaquino/scrapy-autounit | 97f00d2d62c1ad49bbac462018907abe6a20e4cd | [
"BSD-3-Clause"
] | null | null | null | import os
import six
import copy
import pickle
import random
import logging
from scrapy.http import Request
from scrapy.exceptions import NotConfigured
from scrapy.commands.genspider import sanitize_module_name
from scrapy.spiders import CrawlSpider
from .utils import (
add_sample,
response_to_dict,
get_or_create_test_dir,
parse_request,
parse_object,
get_project_dir,
get_middlewares,
create_dir,
)
logger = logging.getLogger(__name__)
| 31.617021 | 77 | 0.593091 |
c4a06bb562106e2918ecce48527f9b40a6d8d42c | 2,735 | py | Python | python_examples/create_tags/utils.py | kirank0220/api-examples | 9d6c51eeb2d4e38d95b0b7d88fd30fe96ef28d20 | [
"MIT"
] | 1 | 2021-12-20T16:49:00.000Z | 2021-12-20T16:49:00.000Z | python_examples/create_tags/utils.py | kirank0220/api-examples | 9d6c51eeb2d4e38d95b0b7d88fd30fe96ef28d20 | [
"MIT"
] | 2 | 2020-11-20T04:51:16.000Z | 2021-06-16T17:02:35.000Z | python_examples/create_tags/utils.py | kirank0220/api-examples | 9d6c51eeb2d4e38d95b0b7d88fd30fe96ef28d20 | [
"MIT"
] | 1 | 2020-11-20T04:46:17.000Z | 2020-11-20T04:46:17.000Z | #########################################################################
# _________ ___. ______________________ ___
# \_ ___ \___.__.\_ |__ ___________ / _____/\______ \ \/ /
# / \ \< | | | __ \_/ __ \_ __ \/ \ ___ | _/\ /
# \ \___\___ | | \_\ \ ___/| | \/\ \_\ \| | \/ \
# \______ / ____| |___ /\___ >__| \______ /|____|_ /___/\ \
# \/\/ \/ \/ \/ \/ \_/
#
#
import os
import json
import requests
from collections import OrderedDict
from openpyxl import Workbook
from openpyxl.styles.fills import FILL_SOLID
from openpyxl.styles import Color, PatternFill, Font, Border, Side
from openpyxl.styles import colors
from openpyxl.cell import Cell
from tqdm import tqdm
from glom import glom
| 35.064103 | 114 | 0.527971 |
c4a0dfed3531558a43bad867fbac20a6c63fe3e4 | 3,104 | py | Python | gpytorch/lazy/non_lazy_tensor.py | phumm/gpytorch | 4e8042bcecda049956f8f9e823d82ba6340766d5 | [
"MIT"
] | 1 | 2019-09-30T06:51:03.000Z | 2019-09-30T06:51:03.000Z | gpytorch/lazy/non_lazy_tensor.py | phumm/gpytorch | 4e8042bcecda049956f8f9e823d82ba6340766d5 | [
"MIT"
] | null | null | null | gpytorch/lazy/non_lazy_tensor.py | phumm/gpytorch | 4e8042bcecda049956f8f9e823d82ba6340766d5 | [
"MIT"
] | 1 | 2020-09-16T16:35:27.000Z | 2020-09-16T16:35:27.000Z | #!/usr/bin/env python3
import torch
from .lazy_tensor import LazyTensor
def lazify(obj):
"""
A function which ensures that `obj` is a LazyTensor.
If `obj` is a LazyTensor, this function does nothing.
If `obj` is a (normal) Tensor, this function wraps it with a `NonLazyTensor`.
"""
if torch.is_tensor(obj):
return NonLazyTensor(obj)
elif isinstance(obj, LazyTensor):
return obj
else:
raise TypeError("object of class {} cannot be made into a LazyTensor".format(obj.__class__.__name__))
__all__ = ["NonLazyTensor", "lazify"]
| 30.431373 | 111 | 0.634665 |
c4a0e5d601dd26ad6e285ee33bc0cea8cb5b622e | 1,956 | py | Python | aoc_wim/aoc2019/q19.py | wimglenn/advent-of-code-wim | 6308c3fa5d29b318680419f877fd5b8ac1359b5d | [
"WTFPL"
] | 20 | 2019-10-15T07:33:13.000Z | 2022-01-19T13:40:36.000Z | aoc_wim/aoc2019/q19.py | wimglenn/advent-of-code-wim | 6308c3fa5d29b318680419f877fd5b8ac1359b5d | [
"WTFPL"
] | 5 | 2019-02-01T23:31:27.000Z | 2021-12-03T06:55:58.000Z | aoc_wim/aoc2019/q19.py | wimglenn/advent-of-code-wim | 6308c3fa5d29b318680419f877fd5b8ac1359b5d | [
"WTFPL"
] | 8 | 2019-12-03T15:41:23.000Z | 2021-12-06T17:13:57.000Z | """
--- Day 19: Tractor Beam ---
https://adventofcode.com/2019/day/19
"""
from aocd import data
from aoc_wim.aoc2019 import IntComputer
from aoc_wim.zgrid import ZGrid
from aoc_wim.search import Bisect
import functools
if __name__ == "__main__":
print("populating 50x50 zgrid...")
grid = ZGrid()
x0 = 0
for y in range(50):
on = False
for x in range(x0, 50):
z = x + y * 1j
val = grid[z] = beam(z)
if not on and val:
on = True
x0 = x
if x0:
m = y / x0
if on and not val:
break
print("part a", sum(grid.values()))
grid.translate({0: ".", 1: "#"})
grid.draw()
print("initial gradient is approx -->", m)
print("refining gradient estimate -->", end=" ")
z = left_edge_of_beam(2000, gradient=m)
m = z.imag/z.real
print(m)
z = locate_square(beam, width=100, gradient_estimate=m)
print("part b", int(z.real)*10000 + int(z.imag))
| 24.759494 | 67 | 0.552147 |
c4a20cea738e338abf0c6eb0710a2bbf72908f18 | 378 | py | Python | BlurDetection.py | samaritan-security/samaritan-backend | 3a4450e4a2e7a823d6d2fb1e982871ac0aa97744 | [
"WTFPL"
] | null | null | null | BlurDetection.py | samaritan-security/samaritan-backend | 3a4450e4a2e7a823d6d2fb1e982871ac0aa97744 | [
"WTFPL"
] | 59 | 2020-02-05T03:09:43.000Z | 2020-04-23T19:29:58.000Z | BlurDetection.py | samaritan-security/samaritan-backend | 3a4450e4a2e7a823d6d2fb1e982871ac0aa97744 | [
"WTFPL"
] | null | null | null | import cv2
"""
checks if an image is blurry
returns True if blurry, False otherwise
""" | 19.894737 | 50 | 0.708995 |
c4a251a0e7e64524dd68b799ffbb2a257e20933b | 894 | py | Python | python-essential-training/4_operators/main.py | alexprodan99/python-workspace | 8c805afc29fafe3916759d1cf07e597f945b8b45 | [
"MIT"
] | null | null | null | python-essential-training/4_operators/main.py | alexprodan99/python-workspace | 8c805afc29fafe3916759d1cf07e597f945b8b45 | [
"MIT"
] | null | null | null | python-essential-training/4_operators/main.py | alexprodan99/python-workspace | 8c805afc29fafe3916759d1cf07e597f945b8b45 | [
"MIT"
] | null | null | null |
if __name__ == '__main__':
main() | 21.804878 | 90 | 0.401566 |
c4a2a593c0a2d8ff337685d877ea3ffe9f0a0f35 | 2,689 | py | Python | UPGen/utils.py | HenryLiangzy/COMP9517_Group | 83be7304bee47d52781ea71f06838cd148dbd0bd | [
"Apache-2.0"
] | 21 | 2020-04-24T01:14:30.000Z | 2021-11-26T09:44:00.000Z | UPGen/utils.py | HenryLiangzy/COMP9517_Group | 83be7304bee47d52781ea71f06838cd148dbd0bd | [
"Apache-2.0"
] | null | null | null | UPGen/utils.py | HenryLiangzy/COMP9517_Group | 83be7304bee47d52781ea71f06838cd148dbd0bd | [
"Apache-2.0"
] | 2 | 2020-05-18T11:43:17.000Z | 2020-06-19T13:13:14.000Z | """
Helper functions and utilities
"""
from datetime import datetime as dt
from mrcnn import visualize
import numpy as np
import os
import cv2
TIMESTAMP_FORMAT = "%d/%m/%Y %H:%M:%S"
def mask_to_rgb(mask):
"""
Converts a mask to RGB Format
"""
colours = visualize.random_colors(mask.shape[2])
rgb_mask = np.zeros((mask.shape[0], mask.shape[1], 3))
for i in range(mask.shape[2]):
for c in range(3):
rgb_mask[:, :, c] = np.where(mask[:, :, i] != 0, int(colours[i][c] * 255), rgb_mask[:, :, c])
return rgb_mask
def mask_to_outlined(mask):
"""
Converts a mask to RGB Format
"""
colours = visualize.random_colors(mask.shape[2])
rgb_mask = np.zeros((mask.shape[0], mask.shape[1], 3))
for i in range(mask.shape[2]):
for c in range(3):
rgb_mask[:, :, c] = np.where(mask[:, :, i] != 0, int(colours[i][c] * 255), rgb_mask[:, :, c])
# put edges over the top of the colours
for i in range(mask.shape[2]):
# Find the contour of the leaf
threshold = mask[:, :, i]
threshold[threshold != 0] = 255
_, contours, hierarchy = cv2.findContours(threshold.astype(np.uint8),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
# Draw outline on mask
if len(contours) > 0:
cv2.drawContours(rgb_mask, [contours[0]], 0, (255, 255, 255), thickness=1)
return rgb_mask | 25.855769 | 115 | 0.579026 |
c4a31e4a9faadb779ad5e3539b89e160045375e9 | 108 | py | Python | lmctl/project/mutate/base.py | manojn97/lmctl | 844925cb414722351efac90cb97f10c1185eef7a | [
"Apache-2.0"
] | 3 | 2021-07-19T09:46:01.000Z | 2022-03-07T13:51:25.000Z | lmctl/project/mutate/base.py | manojn97/lmctl | 844925cb414722351efac90cb97f10c1185eef7a | [
"Apache-2.0"
] | 43 | 2019-08-27T12:36:29.000Z | 2020-08-27T14:50:40.000Z | lmctl/project/mutate/base.py | manojn97/lmctl | 844925cb414722351efac90cb97f10c1185eef7a | [
"Apache-2.0"
] | 7 | 2020-09-22T20:32:17.000Z | 2022-03-29T12:25:51.000Z | import abc
| 15.428571 | 38 | 0.712963 |
c4a3253a85318d51afd9e6db7f79225a1972648a | 630 | py | Python | src/django_otp/conf.py | jaap3/django-otp | d7980bf516018319158570cc75353c905375a3ab | [
"BSD-2-Clause"
] | 318 | 2019-08-27T15:57:05.000Z | 2022-03-30T08:38:29.000Z | src/django_otp/conf.py | jaap3/django-otp | d7980bf516018319158570cc75353c905375a3ab | [
"BSD-2-Clause"
] | 77 | 2019-09-17T11:48:38.000Z | 2022-03-13T17:26:56.000Z | src/django_otp/conf.py | jaap3/django-otp | d7980bf516018319158570cc75353c905375a3ab | [
"BSD-2-Clause"
] | 76 | 2019-08-30T20:29:40.000Z | 2022-03-30T09:14:36.000Z | import django.conf
settings = Settings()
| 27.391304 | 80 | 0.668254 |
c4a3547cec59bda0f54c29fe3708b9bf82715b42 | 544 | py | Python | Moderation/purge.py | DevFlock/Multis | 8332edddcbb957ad8fc47766d102295da8aef591 | [
"MIT"
] | 3 | 2020-12-27T20:32:14.000Z | 2021-09-02T08:59:34.000Z | Moderation/purge.py | DevFlock/Multis | 8332edddcbb957ad8fc47766d102295da8aef591 | [
"MIT"
] | 1 | 2021-05-09T21:44:42.000Z | 2022-03-01T22:36:53.000Z | Moderation/purge.py | DevFlock/Multis | 8332edddcbb957ad8fc47766d102295da8aef591 | [
"MIT"
] | 1 | 2021-05-10T22:55:41.000Z | 2021-05-10T22:55:41.000Z | import asyncio
import discord
from discord.ext import commands
from discord.ext.commands.core import has_permissions
| 24.727273 | 62 | 0.693015 |
c4a3b7fd35e583f4df4df37c10b28021b5e84c76 | 184 | py | Python | tensorboard/acceptance/__init__.py | DeepLearnI/atlas | 8aca652d7e647b4e88530b93e265b536de7055ed | [
"Apache-2.0"
] | 296 | 2020-03-16T19:55:00.000Z | 2022-01-10T19:46:05.000Z | tensorboard/acceptance/__init__.py | DeepLearnI/atlas | 8aca652d7e647b4e88530b93e265b536de7055ed | [
"Apache-2.0"
] | 57 | 2020-03-17T11:15:57.000Z | 2021-07-10T14:42:27.000Z | tensorboard/acceptance/__init__.py | DeepLearnI/atlas | 8aca652d7e647b4e88530b93e265b536de7055ed | [
"Apache-2.0"
] | 38 | 2020-03-17T21:06:05.000Z | 2022-02-08T03:19:34.000Z | from .test_tensorboard_rest_api import TestTensorboardRestAPI
from .test_tensorboard_server import TestTensorboardServer
from .test_tensorboard_endpoints import TestTensorboardEndpoint | 61.333333 | 63 | 0.923913 |