hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bc71cbe7f13c2f51663de7c1b18572924543ac36
| 26,868
|
py
|
Python
|
bkt/library/powerpoint/elements.py
|
pyro-team/bkt-toolbox
|
bbccba142a81ca0a46056f2bcda75899979158a5
|
[
"MIT"
] | 12
|
2019-05-31T02:57:26.000Z
|
2022-03-26T09:40:50.000Z
|
bkt/library/powerpoint/elements.py
|
mrflory/bkt-toolbox
|
bbccba142a81ca0a46056f2bcda75899979158a5
|
[
"MIT"
] | 27
|
2021-11-27T16:33:19.000Z
|
2022-03-27T17:47:26.000Z
|
bkt/library/powerpoint/elements.py
|
pyro-team/bkt-toolbox
|
bbccba142a81ca0a46056f2bcda75899979158a5
|
[
"MIT"
] | 3
|
2019-06-12T10:59:20.000Z
|
2020-04-21T15:13:50.000Z
|
# -*- coding: utf-8 -*-
'''
Created on 02.11.2017
@author: fstallmann
'''
from __future__ import absolute_import
from collections import deque
import bkt
from bkt import dotnet
Drawing = dotnet.import_drawing()
from . import helpers as pplib
def button_get_label(self, index):
try:
return self.symbols[index][2]
except:
return "Zuletzt verwendet: Undefined"
def button_get_visible(self, index):
try:
return self.symbols[index] is not None
except:
return False
class LocpinGallery(bkt.ribbon.Gallery):
class PositionGallery(bkt.ribbon.Gallery):
# items: [label, position, reference]
# position: [left, top, width, height]
# values can be absolute or percentage
# reference: CONTENTE / SLIDE / ABS
# values are converted according to reference
items = [
[u"Volle Flche", [ 0, 0, 1, 1], 'CONTENT'],
[u"2/3 Links", [ 0, 0, 2./3, 1], 'CONTENT'],
[u"2/3 Rechts", [1./3, 0, 2./3, 1], 'CONTENT'],
[u"1/2 Links", [ 0, 0, .5, 1], 'CONTENT'],
[u"1/2 Mitte", [.25, 0, .5, 1], 'CONTENT'],
[u"1/2 Rechts", [ .5, 0, .5, 1], 'CONTENT'],
[u"1/3 Links", [ 0, 0, 1./3, 1], 'CONTENT'],
[u"1/3 Mitte", [1./3, 0, 1./3, 1], 'CONTENT'],
[u"1/3 Rechts", [2./3, 0, 1./3, 1], 'CONTENT'],
[u"1/6 Oben", [ 0, 0, 1, 1./6], 'CONTENT'],
[u"1/6 Unten", [ 0, 5./6, 1, 1./6], 'CONTENT']
]
def on_action_indexed(self, selected_item, index, context, **kwargs):
''' reposition shapes according of settings in clicked element '''
item = self.items[index]
position = item[1]
reference = item[2]
#self.change_position(selection, shapes, item[1])
# reference size
if reference == 'CONTENT':
ref_left,ref_top,ref_width,ref_height = pplib.slide_content_size(context.slide)
else: # SLIDE / ABS
page_setup = context.presentation.PageSetup
ref_left,ref_top = 0, 0
ref_width,ref_height = page_setup.SlideWidth, page_setup.SlideHeight
# target size
left,top,width,height = self.rect_from_definition(position, ref_frame=[ref_left,ref_top,ref_width, ref_height])
frame = pplib.BoundingFrame.from_rect(left, top, width, height)
if 'on_position_change' in self._callbacks:
if context:
return context.invoke_callback(self._callbacks['on_position_change'], target_frame=frame, **kwargs)
# def get_enabled(self, shapes):
# return True
# def get_item_label(self, index):
# item = self.items[index]
# return "%s" % getattr(NumberedShapes, 'label_' + item['label'])[index%self.columns]
def get_item_image(self, index, presentation):
''' creates an item image with target area according to settings in the specified item '''
# retrieve item-settings
item = self.items[index]
return self.create_image(item[1], item[2], presentation)
## userdefined area
| 42.512658
| 242
| 0.628703
|
bc73adc709a1a6dd422dd898ada82a431739cb7e
| 34,680
|
py
|
Python
|
sc2/unit.py
|
guliverza/AdditionalPylons
|
37336dcd1678c6cdfa22d881c2178ba65cb1fd61
|
[
"MIT"
] | null | null | null |
sc2/unit.py
|
guliverza/AdditionalPylons
|
37336dcd1678c6cdfa22d881c2178ba65cb1fd61
|
[
"MIT"
] | null | null | null |
sc2/unit.py
|
guliverza/AdditionalPylons
|
37336dcd1678c6cdfa22d881c2178ba65cb1fd61
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import warnings
from typing import Any, Dict, List, Optional, Set, Tuple, Union, TYPE_CHECKING
from .cache import property_immutable_cache, property_mutable_cache
from .constants import (
transforming,
IS_STRUCTURE,
IS_LIGHT,
IS_ARMORED,
IS_BIOLOGICAL,
IS_MECHANICAL,
IS_MASSIVE,
IS_PSIONIC,
UNIT_BATTLECRUISER,
UNIT_ORACLE,
TARGET_GROUND,
TARGET_AIR,
TARGET_BOTH,
IS_SNAPSHOT,
IS_VISIBLE,
IS_MINE,
IS_ENEMY,
IS_CLOAKED,
IS_REVEALED,
CAN_BE_ATTACKED,
IS_CARRYING_MINERALS,
IS_CARRYING_VESPENE,
IS_CARRYING_RESOURCES,
IS_ATTACKING,
IS_PATROLLING,
IS_GATHERING,
IS_RETURNING,
IS_COLLECTING,
IS_CONSTRUCTING_SCV,
IS_REPAIRING,
IS_DETECTOR,
UNIT_PHOTONCANNON,
UNIT_COLOSSUS,
)
from .data import Alliance, Attribute, CloakState, DisplayType, Race, TargetType, warpgate_abilities, TargetType, Target
from .ids.ability_id import AbilityId
from .ids.buff_id import BuffId
from .ids.upgrade_id import UpgradeId
from .ids.unit_typeid import UnitTypeId
from .position import Point2, Point3
from .unit_command import UnitCommand
warnings.simplefilter("once")
if TYPE_CHECKING:
from .bot_ai import BotAI
from .game_data import AbilityData
def target_in_range(self, target: Unit, bonus_distance: Union[int, float] = 0) -> bool:
""" Checks if the target is in range.
Includes the target's radius when calculating distance to target.
:param target:
:param bonus_distance: """
# TODO: Fix this because immovable units (sieged tank, planetary fortress etc.) have a little lower range than this formula
if self.can_attack_ground and not target.is_flying:
unit_attack_range = self.ground_range
elif self.can_attack_air and (target.is_flying or target.type_id == UNIT_COLOSSUS):
unit_attack_range = self.air_range
else:
return False
return (
self._bot_object._distance_squared_unit_to_unit(self, target)
<= (self.radius + target.radius + unit_attack_range + bonus_distance) ** 2
)
def in_ability_cast_range(
self, ability_id: AbilityId, target: Union[Unit, Point2], bonus_distance: float = 0
) -> bool:
""" Test if a unit is able to cast an ability on the target without checking ability cooldown (like stalker blink) or if ability is made available through research (like HT storm).
:param ability_id:
:param target:
:param bonus_distance: """
cast_range = self._bot_object._game_data.abilities[ability_id.value]._proto.cast_range
assert cast_range > 0, f"Checking for an ability ({ability_id}) that has no cast range"
ability_target_type = self._bot_object._game_data.abilities[ability_id.value]._proto.target
# For casting abilities that target other units, like transfuse, feedback, snipe, yamato
if ability_target_type in {Target.Unit.value, Target.PointOrUnit.value} and isinstance(target, Unit):
return (
self._bot_object._distance_squared_unit_to_unit(self, target)
<= (cast_range + self.radius + target.radius + bonus_distance) ** 2
)
# For casting abilities on the ground, like queen creep tumor, ravager bile, HT storm
if ability_target_type in {Target.Point.value, Target.PointOrUnit.value} and isinstance(
target, (Point2, tuple)
):
return (
self._bot_object._distance_pos_to_pos(self.position_tuple, target)
<= cast_range + self.radius + bonus_distance
)
return False
# TODO: a function that checks if this unit is facing another unit
def is_facing_unit(self, other_unit: Unit, angle_error: float = 1e-3) -> bool:
"""
Function not completed yet
:param other_unit:
:param angle_error:
"""
pass
# PROPERTIES BELOW THIS COMMENT ARE NOT POPULATED FOR SNAPSHOTS
# PROPERTIES BELOW THIS COMMENT ARE NOT POPULATED FOR ENEMIES
def is_using_ability(self, abilities: Union[AbilityId, Set[AbilityId]]) -> bool:
""" Check if the unit is using one of the given abilities.
Only works for own units. """
if not self.orders:
return False
if isinstance(abilities, AbilityId):
abilities = {abilities}
return self.orders[0].ability.id in abilities
# Unit functions
def has_buff(self, buff: BuffId) -> bool:
""" Checks if unit has buff 'buff'. """
assert isinstance(buff, BuffId), f"{buff} is no BuffId"
return buff in self.buffs
def train(self, unit: UnitTypeId, queue: bool = False) -> UnitCommand:
""" Orders unit to train another 'unit'.
Usage: self.actions.append(COMMANDCENTER.train(SCV))
:param unit:
:param queue: """
return self(self._bot_object._game_data.units[unit.value].creation_ability.id, queue=queue)
def build(self, unit: UnitTypeId, position: Union[Point2, Point3] = None, queue: bool = False) -> UnitCommand:
""" Orders unit to build another 'unit' at 'position'.
Usage: self.actions.append(SCV.build(COMMANDCENTER, position))
:param unit:
:param position:
:param queue:
"""
return self(self._bot_object._game_data.units[unit.value].creation_ability.id, target=position, queue=queue)
def research(self, upgrade: UpgradeId, queue: bool = False) -> UnitCommand:
""" Orders unit to research 'upgrade'.
Requires UpgradeId to be passed instead of AbilityId.
:param upgrade:
:param queue:
"""
return self(self._bot_object._game_data.upgrades[upgrade.value].research_ability.id, queue=queue)
def warp_in(self, unit: UnitTypeId, position: Union[Point2, Point3]) -> UnitCommand:
""" Orders Warpgate to warp in 'unit' at 'position'.
:param unit:
:param queue:
"""
normal_creation_ability = self._bot_object._game_data.units[unit.value].creation_ability.id
return self(warpgate_abilities[normal_creation_ability], target=position)
def attack(self, target: Union[Unit, Point2, Point3], queue: bool = False) -> UnitCommand:
""" Orders unit to attack. Target can be a Unit or Point2.
Attacking a position will make the unit move there and attack everything on its way.
:param target:
:param queue:
"""
return self(AbilityId.ATTACK, target=target, queue=queue)
def gather(self, target: Unit, queue: bool = False) -> UnitCommand:
""" Orders a unit to gather minerals or gas.
'Target' must be a mineral patch or a gas extraction building.
:param target:
:param queue:
"""
return self(AbilityId.HARVEST_GATHER, target=target, queue=queue)
def return_resource(self, target: Unit = None, queue: bool = False) -> UnitCommand:
""" Orders the unit to return resource. Does not need a 'target'.
:param target:
:param queue:
"""
return self(AbilityId.HARVEST_RETURN, target=target, queue=queue)
def move(self, position: Union[Point2, Point3], queue: bool = False) -> UnitCommand:
""" Orders the unit to move to 'position'.
Target can be a Unit (to follow that unit) or Point2.
:param position:
:param queue:
"""
return self(AbilityId.MOVE_MOVE, target=position, queue=queue)
def scan_move(self, *args, **kwargs) -> UnitCommand:
""" Deprecated: This ability redirects to 'AbilityId.ATTACK' """
return self(AbilityId.SCAN_MOVE, *args, **kwargs)
def hold_position(self, queue: bool = False) -> UnitCommand:
""" Orders a unit to stop moving. It will not move until it gets new orders.
:param queue:
"""
return self(AbilityId.HOLDPOSITION, queue=queue)
def stop(self, queue: bool = False) -> UnitCommand:
""" Orders a unit to stop, but can start to move on its own
if it is attacked, enemy unit is in range or other friendly
units need the space.
:param queue:
"""
return self(AbilityId.STOP, queue=queue)
def patrol(self, position: Union[Point2, Point3], queue: bool = False) -> UnitCommand:
""" Orders a unit to patrol between position it has when the command starts and the target position.
Can be queued up to seven patrol points. If the last point is the same as the starting
point, the unit will patrol in a circle.
:param position:
:param queue:
"""
return self(AbilityId.PATROL, target=position, queue=queue)
def repair(self, repair_target: Unit, queue: bool = False) -> UnitCommand:
""" Order an SCV or MULE to repair.
:param repair_target:
:param queue:
"""
return self(AbilityId.EFFECT_REPAIR, target=repair_target, queue=queue)
def __hash__(self):
return self.tag
def __eq__(self, other):
try:
return self.tag == other.tag
except:
return False
def __call__(self, ability, target=None, queue: bool = False):
return UnitCommand(ability, self, target=target, queue=queue)
| 37.37069
| 188
| 0.647953
|
bc73bb29476960582e88da68ad24bf687cb2dd0e
| 65
|
py
|
Python
|
healthy_candies/load/__init__.py
|
striantafyllouEPFL/healthy-candies
|
fc7d9e05d54ba207e15d997acea44ff0bf9edb13
|
[
"BSD-2-Clause"
] | 1
|
2018-11-04T21:46:29.000Z
|
2018-11-04T21:46:29.000Z
|
healthy_candies/load/__init__.py
|
striantafyllouEPFL/healthy-candies
|
fc7d9e05d54ba207e15d997acea44ff0bf9edb13
|
[
"BSD-2-Clause"
] | null | null | null |
healthy_candies/load/__init__.py
|
striantafyllouEPFL/healthy-candies
|
fc7d9e05d54ba207e15d997acea44ff0bf9edb13
|
[
"BSD-2-Clause"
] | null | null | null |
from .load import load_data, NUTRI_COLS, load_clean_rel_to_nutri
| 32.5
| 64
| 0.861538
|
bc74af5f799cda766e2f5e64ed34c0e410d241a2
| 1,402
|
py
|
Python
|
simglucose/sensor/cgm.py
|
mia-jingyi/simglucose
|
a90bd8750fce362be91668ed839b3b252bc0d58d
|
[
"MIT"
] | null | null | null |
simglucose/sensor/cgm.py
|
mia-jingyi/simglucose
|
a90bd8750fce362be91668ed839b3b252bc0d58d
|
[
"MIT"
] | null | null | null |
simglucose/sensor/cgm.py
|
mia-jingyi/simglucose
|
a90bd8750fce362be91668ed839b3b252bc0d58d
|
[
"MIT"
] | null | null | null |
# from .noise_gen import CGMNoiseGenerator
from .noise_gen import CGMNoise
import pandas as pd
import logging
logger = logging.getLogger(__name__)
if __name__ == '__main__':
pass
| 26.961538
| 72
| 0.624108
|
bc7580cf9a167be668d4a125b31c0b817e88571f
| 5,731
|
py
|
Python
|
var/spack/repos/builtin/packages/thepeg/package.py
|
carlabguillen/spack
|
7070bb892f9bdb5cf9e76e0eecd64f6cc5f4695c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1
|
2020-10-20T08:57:12.000Z
|
2020-10-20T08:57:12.000Z
|
var/spack/repos/builtin/packages/thepeg/package.py
|
carlabguillen/spack
|
7070bb892f9bdb5cf9e76e0eecd64f6cc5f4695c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 3
|
2022-03-09T09:15:39.000Z
|
2022-03-09T09:15:42.000Z
|
var/spack/repos/builtin/packages/thepeg/package.py
|
carlabguillen/spack
|
7070bb892f9bdb5cf9e76e0eecd64f6cc5f4695c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1
|
2021-01-05T20:00:52.000Z
|
2021-01-05T20:00:52.000Z
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
| 55.105769
| 97
| 0.716105
|
bc7679c1660ef9ceb329970fcb693da9107e9ae5
| 9,258
|
py
|
Python
|
vmca/python/get_cert.py
|
wfu8/lightwave
|
cf6a7417cd9807bfcf9bcd99c43c5b2eecf2d298
|
[
"Apache-2.0"
] | 357
|
2015-04-20T00:16:30.000Z
|
2022-03-17T05:34:09.000Z
|
vmca/python/get_cert.py
|
wfu8/lightwave
|
cf6a7417cd9807bfcf9bcd99c43c5b2eecf2d298
|
[
"Apache-2.0"
] | 38
|
2015-11-19T05:20:53.000Z
|
2022-03-31T07:21:59.000Z
|
vmca/python/get_cert.py
|
wfu8/lightwave
|
cf6a7417cd9807bfcf9bcd99c43c5b2eecf2d298
|
[
"Apache-2.0"
] | 135
|
2015-04-21T15:23:21.000Z
|
2022-03-30T11:46:36.000Z
|
#!/usr/bin/env python
#
# Copyright 2012-2016 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the License); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS, without
# warranties or conditions of any kind, EITHER EXPRESS OR IMPLIED. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Helper function that gets certificates from VMWare Certificate Authority
# More details. If this module can be used as a main program, include usage information.
""" certool.py : This is the standard library function for
cloudVM/vcenterwindows first boot to integrate with
VMCA Certificate Generation.
if not running under a cloudVM, then it is assumed that
the OS.Environment has the following defined.
VMWARE_SKIP_VISL = True
system.urlhostname
vmdir.ldu-guid
system.hostname.type
vmca.cert.password
vmca.cert.dir
"""
__copyright__ = "Copyright 2012, VMware Inc."
__version__ = 0.1
__author__ = "VMware, Inc."
import logging
import os
import subprocess
def main():
""" Example Code Usage """
testComponent = 'sso'
VmcaCertool = CerTool()
VmcaCertool.GenCert(testComponent)
print 'Generated a pfx file : %s' % VmcaCertool.GetPfxFileName()
print 'Using Password : %s' % VmcaCertool.GetPassword()
if __name__ == "__main__":
main()
| 34.935849
| 89
| 0.625081
|
bc783a7352b8476e222dafa470f894420847e079
| 22,670
|
py
|
Python
|
sdk/python/pulumi_gcp/securitycenter/notification_config.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 121
|
2018-06-18T19:16:42.000Z
|
2022-03-31T06:06:48.000Z
|
sdk/python/pulumi_gcp/securitycenter/notification_config.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 492
|
2018-06-22T19:41:03.000Z
|
2022-03-31T15:33:53.000Z
|
sdk/python/pulumi_gcp/securitycenter/notification_config.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 43
|
2018-06-19T01:43:13.000Z
|
2022-03-23T22:43:37.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['NotificationConfigArgs', 'NotificationConfig']
class NotificationConfig(pulumi.CustomResource):
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(NotificationConfigArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
config_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
organization: Optional[pulumi.Input[str]] = None,
pubsub_topic: Optional[pulumi.Input[str]] = None,
streaming_config: Optional[pulumi.Input[pulumi.InputType['NotificationConfigStreamingConfigArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = NotificationConfigArgs.__new__(NotificationConfigArgs)
if config_id is None and not opts.urn:
raise TypeError("Missing required property 'config_id'")
__props__.__dict__["config_id"] = config_id
__props__.__dict__["description"] = description
if organization is None and not opts.urn:
raise TypeError("Missing required property 'organization'")
__props__.__dict__["organization"] = organization
if pubsub_topic is None and not opts.urn:
raise TypeError("Missing required property 'pubsub_topic'")
__props__.__dict__["pubsub_topic"] = pubsub_topic
if streaming_config is None and not opts.urn:
raise TypeError("Missing required property 'streaming_config'")
__props__.__dict__["streaming_config"] = streaming_config
__props__.__dict__["name"] = None
__props__.__dict__["service_account"] = None
super(NotificationConfig, __self__).__init__(
'gcp:securitycenter/notificationConfig:NotificationConfig',
resource_name,
__props__,
opts)
| 44.714004
| 161
| 0.661226
|
bc7927d63159c18366a26d654024afa30b73946a
| 1,893
|
py
|
Python
|
malib/agents/tabular/q_learning/base_tabular_agent.py
|
wwxFromTju/malib
|
7cd2a4af55cf1f56da8854e26ea7a4f3782ceea2
|
[
"MIT"
] | 6
|
2021-05-19T10:25:36.000Z
|
2021-12-27T03:30:33.000Z
|
malib/agents/tabular/q_learning/base_tabular_agent.py
|
wwxFromTju/malib
|
7cd2a4af55cf1f56da8854e26ea7a4f3782ceea2
|
[
"MIT"
] | 1
|
2021-05-29T04:51:37.000Z
|
2021-05-30T06:18:10.000Z
|
malib/agents/tabular/q_learning/base_tabular_agent.py
|
wwxFromTju/malib
|
7cd2a4af55cf1f56da8854e26ea7a4f3782ceea2
|
[
"MIT"
] | 1
|
2021-06-30T10:53:03.000Z
|
2021-06-30T10:53:03.000Z
|
from abc import ABCMeta, abstractmethod
import numpy as np
| 27.434783
| 88
| 0.590597
|
bc796051d35cf6cd654ce6528d4ed35ac535ec1b
| 1,523
|
py
|
Python
|
290.word-pattern.py
|
Lonitch/hackerRank
|
84991b8340e725422bc47eec664532cc84a3447e
|
[
"MIT"
] | null | null | null |
290.word-pattern.py
|
Lonitch/hackerRank
|
84991b8340e725422bc47eec664532cc84a3447e
|
[
"MIT"
] | null | null | null |
290.word-pattern.py
|
Lonitch/hackerRank
|
84991b8340e725422bc47eec664532cc84a3447e
|
[
"MIT"
] | null | null | null |
#
# @lc app=leetcode id=290 lang=python3
#
# [290] Word Pattern
#
# https://leetcode.com/problems/word-pattern/description/
#
# algorithms
# Easy (35.86%)
# Likes: 825
# Dislikes: 113
# Total Accepted: 164K
# Total Submissions: 455.9K
# Testcase Example: '"abba"\n"dog cat cat dog"'
#
# Given a pattern and a string str, find if str follows the same pattern.
#
# Here follow means a full match, such that there is a bijection between a
# letter in pattern and a non-empty word in str.
#
# Example 1:
#
#
# Input: pattern = "abba", str = "dog cat cat dog"
# Output: true
#
# Example 2:
#
#
# Input:pattern = "abba", str = "dog cat cat fish"
# Output: false
#
# Example 3:
#
#
# Input: pattern = "aaaa", str = "dog cat cat dog"
# Output: false
#
# Example 4:
#
#
# Input: pattern = "abba", str = "dog dog dog dog"
# Output: false
#
# Notes:
# You may assume pattern contains only lowercase letters, and str contains
# lowercase letters that may be separated by a single space.
#
#
# @lc code=start
from collections import defaultdict
# @lc code=end
| 22.397059
| 74
| 0.61392
|
bc79d0b1cabca396208cd2aeb132525a435758f4
| 705
|
py
|
Python
|
s1_getting_started/exercise_files/final_exercise/model.py
|
jaschn/dtu_mlops
|
59f404cffc756739433b5ccebb46ef6bfd467436
|
[
"Apache-2.0"
] | null | null | null |
s1_getting_started/exercise_files/final_exercise/model.py
|
jaschn/dtu_mlops
|
59f404cffc756739433b5ccebb46ef6bfd467436
|
[
"Apache-2.0"
] | null | null | null |
s1_getting_started/exercise_files/final_exercise/model.py
|
jaschn/dtu_mlops
|
59f404cffc756739433b5ccebb46ef6bfd467436
|
[
"Apache-2.0"
] | null | null | null |
from torch import nn
| 32.045455
| 97
| 0.438298
|
bc7aed95070ea2718e44219b9db81ddfb927929e
| 5,036
|
py
|
Python
|
musket_core/tests/coders_test.py
|
dreamflyer/musket_core
|
1bdf1b4715a3b5c63bf687799d7b977fdf49053f
|
[
"MIT"
] | 16
|
2019-09-25T14:58:45.000Z
|
2020-04-04T22:03:27.000Z
|
musket_core/tests/coders_test.py
|
dreamflyer/musket_core
|
1bdf1b4715a3b5c63bf687799d7b977fdf49053f
|
[
"MIT"
] | 17
|
2019-06-28T06:46:31.000Z
|
2020-01-23T10:01:12.000Z
|
musket_core/tests/coders_test.py
|
dreamflyer/musket_core
|
1bdf1b4715a3b5c63bf687799d7b977fdf49053f
|
[
"MIT"
] | 2
|
2019-11-22T15:09:18.000Z
|
2019-12-17T03:17:25.000Z
|
import unittest
from musket_core import coders
import numpy as np
import pandas as pd
import os
import math
fl=__file__
fl=os.path.dirname(fl)
| 37.029412
| 86
| 0.538721
|
bc7b31007719919e0de3183e896e2da210eb63a7
| 1,706
|
py
|
Python
|
manage.py
|
isijara/zulip
|
403f4dafcc71369f3b1143b9f7073cd5d76bf357
|
[
"Apache-2.0"
] | 1
|
2019-04-14T20:31:55.000Z
|
2019-04-14T20:31:55.000Z
|
manage.py
|
hcxiong/zulip
|
bf22eefedebd50b25f32b22988217c13a89b65d1
|
[
"Apache-2.0"
] | 7
|
2020-09-06T14:54:30.000Z
|
2022-02-10T18:51:14.000Z
|
manage.py
|
hcxiong/zulip
|
bf22eefedebd50b25f32b22988217c13a89b65d1
|
[
"Apache-2.0"
] | 9
|
2019-11-04T18:59:29.000Z
|
2022-03-22T17:46:37.000Z
|
#!/usr/bin/env python3
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
import scripts.lib.setup_path_on_import
if __name__ == "__main__":
if 'posix' in os.name and os.geteuid() == 0:
print("manage.py should not be run as root. Use `su zulip` to drop root.")
sys.exit(1)
if (os.access('/etc/zulip/zulip.conf', os.R_OK) and not
os.access('/etc/zulip/zulip-secrets.conf', os.R_OK)):
# The best way to detect running manage.py as another user in
# production before importing anything that would require that
# access is to check for access to /etc/zulip/zulip.conf (in
# which case it's a production server, not a dev environment)
# and lack of access for /etc/zulip/zulip-secrets.conf (which
# should be only readable by root and zulip)
print("Error accessing Zulip secrets; manage.py in production must be run as the zulip user.")
sys.exit(1)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zproject.settings")
from django.conf import settings
from django.core.management import execute_from_command_line
from django.core.management.base import CommandError
from scripts.lib.zulip_tools import log_management_command
log_management_command(" ".join(sys.argv), settings.MANAGEMENT_LOG_PATH)
os.environ.setdefault("PYTHONSTARTUP", os.path.join(BASE_DIR, "scripts/lib/pythonrc.py"))
if "--no-traceback" not in sys.argv and len(sys.argv) > 1:
sys.argv.append("--traceback")
try:
execute_from_command_line(sys.argv)
except CommandError as e:
print(e, file=sys.stderr)
sys.exit(1)
| 42.65
| 102
| 0.694607
|
bc7b521791f08dc13fece1c31003d055797c5819
| 2,385
|
py
|
Python
|
core/scripts/fetch_instructions_specs.py
|
merwaaan/mr.system
|
0b3ff1b1fd726c6fd525a3f03f361dcac678344a
|
[
"MIT"
] | null | null | null |
core/scripts/fetch_instructions_specs.py
|
merwaaan/mr.system
|
0b3ff1b1fd726c6fd525a3f03f361dcac678344a
|
[
"MIT"
] | null | null | null |
core/scripts/fetch_instructions_specs.py
|
merwaaan/mr.system
|
0b3ff1b1fd726c6fd525a3f03f361dcac678344a
|
[
"MIT"
] | null | null | null |
import json, requests
from bs4 import BeautifulSoup
if __name__ == '__main__':
"""
This scripts fetches the contents of a webpage that contains
nicely formatted data about the Z80 opcodes and outputs it
to JSON.
"""
page = fetch()
if page is not None:
opcodes = parse_tables(page)
with open('opcodes.json', 'w') as output:
json.dump(opcodes, output, indent=2)
| 24.84375
| 107
| 0.607547
|
bc7c42367a8432fba7810ae50ee93f6f9fc12d32
| 2,516
|
py
|
Python
|
unittests/tools/test_intsights_parser.py
|
M-Rod101/django-DefectDojo
|
7b09a00b1a526abaf40455c2ddec16aaa06b16e2
|
[
"BSD-3-Clause"
] | 249
|
2016-09-06T21:04:40.000Z
|
2018-01-19T15:59:44.000Z
|
unittests/tools/test_intsights_parser.py
|
OWASP/django-DefectDojo
|
c101e47b294863877cd68a82d0cc60f8017b45b1
|
[
"BSD-3-Clause"
] | 255
|
2016-09-06T21:36:37.000Z
|
2018-01-19T19:57:57.000Z
|
unittests/tools/test_intsights_parser.py
|
M-Rod101/django-DefectDojo
|
7b09a00b1a526abaf40455c2ddec16aaa06b16e2
|
[
"BSD-3-Clause"
] | 152
|
2016-09-06T21:04:54.000Z
|
2018-01-18T08:52:24.000Z
|
from ..dojo_test_case import DojoTestCase
from dojo.models import Test
from dojo.tools.intsights.parser import IntSightsParser
| 38.121212
| 102
| 0.677663
|
bc7fdbab2e2a6960b77a8cd250963e5c2c2a372b
| 5,046
|
py
|
Python
|
tools/testutils.py
|
sktollman/p4c
|
380830f6c26135d1d65e1312e3ba2da628c18145
|
[
"Apache-2.0"
] | 1
|
2019-01-01T21:46:03.000Z
|
2019-01-01T21:46:03.000Z
|
tools/testutils.py
|
cslev/p4c
|
008f01ebc4bc0fcada4e674e9916b156427512ca
|
[
"Apache-2.0"
] | null | null | null |
tools/testutils.py
|
cslev/p4c
|
008f01ebc4bc0fcada4e674e9916b156427512ca
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2013-present Barefoot Networks, Inc.
# Copyright 2018 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Defines helper functions for a general testing framework. Used by multiple
Python testing scripts in the backends folder."""
from __future__ import print_function
import subprocess
from subprocess import Popen
from threading import Timer
import sys
import os
TIMEOUT = 10 * 60
SUCCESS = 0
FAILURE = 1
SKIPPED = 2 # used occasionally to indicate that a test was not executed
def is_err(p4filename):
""" True if the filename represents a p4 program that should fail. """
return "_errors" in p4filename
def report_err(file, *message):
""" Write message to given file, report to stderr if verbose """
print("***", file=sys.stderr, *message)
if (file and file != sys.stderr):
err_file = open(file, "a+")
print("***", file=err_file, *message)
err_file.close()
def report_output(file, verbose, *message):
""" Write message to given file, report to stdout if verbose """
if (verbose):
print(file=sys.stdout, *message)
if (file and file != sys.stdout):
out_file = open(file, "a+")
print("", file=out_file, *message)
out_file.close()
def byte_to_hex(byteStr):
""" Convert byte sequences to a hex string. """
return ''.join(["%02X " % ord(x) for x in byteStr]).strip()
def hex_to_byte(hexStr):
""" Convert hex strings to bytes. """
bytes = []
hexStr = ''.join(hexStr.split(" "))
for i in range(0, len(hexStr), 2):
bytes.append(chr(int(hexStr[i:i + 2], 16)))
return ''.join(bytes)
def compare_pkt(outputs, expected, received):
""" Compare two given byte sequences and check if they are the same.
Report errors if this is not the case. """
received = ''.join(byte_to_hex(str(received)).split()).upper()
expected = ''.join(expected.split()).upper()
if len(received) < len(expected):
report_err(outputs["stderr"], "Received packet too short",
len(received), "vs", len(expected))
return FAILURE
for i in range(0, len(expected)):
if expected[i] == "*":
continue
if expected[i] != received[i]:
report_err(outputs["stderr"], "Received packet ", received)
report_err(outputs["stderr"], "Packet different at position", i,
": expected", expected[i], ", received", received[i])
report_err(outputs["stderr"], "Expected packet ", expected)
return FAILURE
return SUCCESS
def open_process(verbose, args, outputs):
""" Run the given arguments as a subprocess. Time out after TIMEOUT
seconds and report failures or stdout. """
report_output(outputs["stdout"],
verbose, "Writing", args)
proc = None
if outputs["stderr"] is not None:
try:
proc = Popen(args, stdout=subprocess.PIPE, shell=True,
stdin=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
except OSError as e:
report_err(outputs["stderr"], "Failed executing: ", e)
if proc is None:
# Never even started
report_err(outputs["stderr"], "Process failed to start")
return proc
def check_root():
""" This function returns False if the user does not have root privileges.
Caution: Only works on Unix systems """
return (os.getuid() == 0)
| 33.865772
| 78
| 0.630202
|
bc8027a9a53c2f0f832850a598757b1b43c5255c
| 6,237
|
py
|
Python
|
AlgorithmsAndDataStructures/mod2/Heap.py
|
BootyAss/bmstu
|
bea202cbdff159d3840335b2a2a5c3bd632a7393
|
[
"FSFAP"
] | null | null | null |
AlgorithmsAndDataStructures/mod2/Heap.py
|
BootyAss/bmstu
|
bea202cbdff159d3840335b2a2a5c3bd632a7393
|
[
"FSFAP"
] | null | null | null |
AlgorithmsAndDataStructures/mod2/Heap.py
|
BootyAss/bmstu
|
bea202cbdff159d3840335b2a2a5c3bd632a7393
|
[
"FSFAP"
] | 1
|
2021-09-15T18:39:33.000Z
|
2021-09-15T18:39:33.000Z
|
cycle = True
heap = Heap()
while cycle:
try:
line = input()
cmd = line.split(' ', 2)
try:
if len(cmd) == 1 and cmd[0] == '':
continue
if len(cmd) == 2 and cmd[0] == '' and cmd[1] == '':
continue
if cmd[0] == 'add':
heap.add(int(cmd[1]), cmd[2])
elif cmd[0] == 'set':
heap.set(int(cmd[1]), cmd[2])
elif cmd[0] == 'delete':
heap.delete(int(cmd[1]))
elif cmd[0] == 'search':
heap.search(int(cmd[1]))
elif cmd[0] == 'min':
heap.min()
elif cmd[0] == 'max':
heap.max()
elif cmd[0] == 'extract':
heap.extract()
elif cmd[0] == 'print':
heap.print()
else:
raise(Exception)
except Exception:
print('error')
continue
except Exception:
cycle = False
| 25.048193
| 99
| 0.467372
|
bc807e3864743112b7b85584b7afbab826c8463a
| 2,332
|
py
|
Python
|
django_comments_xtd/tests/test_api_views.py
|
Boondockers-Welcome/django-comments-xtd
|
8edd68350803bfc351345820ccc4289077918e91
|
[
"BSD-2-Clause"
] | 1
|
2021-01-27T03:20:45.000Z
|
2021-01-27T03:20:45.000Z
|
django_comments_xtd/tests/test_api_views.py
|
Boondockers-Welcome/django-comments-xtd
|
8edd68350803bfc351345820ccc4289077918e91
|
[
"BSD-2-Clause"
] | null | null | null |
django_comments_xtd/tests/test_api_views.py
|
Boondockers-Welcome/django-comments-xtd
|
8edd68350803bfc351345820ccc4289077918e91
|
[
"BSD-2-Clause"
] | 1
|
2020-03-24T21:28:31.000Z
|
2020-03-24T21:28:31.000Z
|
from __future__ import unicode_literals
try:
from unittest.mock import patch
except ImportError:
from mock import patch
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from django.urls import reverse
from rest_framework.test import APIRequestFactory, force_authenticate
from django_comments_xtd import django_comments
from django_comments_xtd.api.views import CommentCreate
from django_comments_xtd.tests.models import Article, Diary
request_factory = APIRequestFactory()
| 37.612903
| 77
| 0.677959
|
bc82ef8de803f7a119ffe50ddde0e017fafeacd2
| 16,041
|
py
|
Python
|
momentumopt/python/momentumopt/kinoptpy/momentum_kinematics_optimizer.py
|
ferdinand-wood/kino_dynamic_opt
|
ba6bef170819c55d1d26e40af835a744d1ae663f
|
[
"BSD-3-Clause"
] | null | null | null |
momentumopt/python/momentumopt/kinoptpy/momentum_kinematics_optimizer.py
|
ferdinand-wood/kino_dynamic_opt
|
ba6bef170819c55d1d26e40af835a744d1ae663f
|
[
"BSD-3-Clause"
] | null | null | null |
momentumopt/python/momentumopt/kinoptpy/momentum_kinematics_optimizer.py
|
ferdinand-wood/kino_dynamic_opt
|
ba6bef170819c55d1d26e40af835a744d1ae663f
|
[
"BSD-3-Clause"
] | null | null | null |
'''
@file momentum_kinematics_optimizer.py
@package momentumopt
@author Brahayam Ponton (brahayam.ponton@tuebingen.mpg.de)
@license License BSD-3-Clause
@copyright Copyright (c) 2019, New York University and Max Planck Gesellschaft.
@date 2019-10-08
'''
import os
import numpy as np
from momentumopt.kinoptpy.qp import QpSolver
from momentumopt.kinoptpy.inverse_kinematics import PointContactInverseKinematics
from pinocchio import RobotWrapper
import pinocchio as se3
from pinocchio.utils import zero
from pymomentum import *
from momentumopt.quadruped.quadruped_wrapper import QuadrupedWrapper
from momentumopt.kinoptpy.min_jerk_traj import *
from pymomentum import \
PlannerVectorParam_KinematicDefaultJointPositions, \
PlannerIntParam_NumTimesteps, \
PlannerDoubleParam_TimeStep
| 41.342784
| 130
| 0.625335
|
bc831b7e95388ec378c7efd07e50c5540c59f285
| 435
|
py
|
Python
|
gullveig/web/__init__.py
|
Addvilz/gullveig
|
6ac5e66062c1b5ea8ad7c66f69be9e3d99ac0825
|
[
"Apache-2.0"
] | 8
|
2020-08-24T14:53:14.000Z
|
2021-03-16T03:58:01.000Z
|
gullveig/web/__init__.py
|
Addvilz/gullveig
|
6ac5e66062c1b5ea8ad7c66f69be9e3d99ac0825
|
[
"Apache-2.0"
] | 6
|
2020-08-25T13:19:02.000Z
|
2021-02-21T21:55:34.000Z
|
gullveig/web/__init__.py
|
Addvilz/gullveig
|
6ac5e66062c1b5ea8ad7c66f69be9e3d99ac0825
|
[
"Apache-2.0"
] | null | null | null |
import logging
from gullveig import bootstrap_default_logger
# Configure default logging
_configure_default_web_logger()
| 22.894737
| 52
| 0.795402
|
bc84db3b22d112c3d8e47827ed44b0cdb57ad39d
| 1,482
|
py
|
Python
|
jupyterhub_http_authenticator/httpauthenticator.py
|
clockfly/jupterhub_http_authenticator
|
88185e4677836129cd1bd15af368b7070103b1bf
|
[
"BSD-3-Clause"
] | null | null | null |
jupyterhub_http_authenticator/httpauthenticator.py
|
clockfly/jupterhub_http_authenticator
|
88185e4677836129cd1bd15af368b7070103b1bf
|
[
"BSD-3-Clause"
] | null | null | null |
jupyterhub_http_authenticator/httpauthenticator.py
|
clockfly/jupterhub_http_authenticator
|
88185e4677836129cd1bd15af368b7070103b1bf
|
[
"BSD-3-Clause"
] | null | null | null |
import json
import urllib
import os
import jupyterhub
from tornado.httpclient import HTTPRequest, AsyncHTTPClient
from traitlets import Unicode
from jupyterhub.auth import Authenticator
from tornado import gen
| 23.52381
| 68
| 0.524966
|
bc85621d3dca3de545ceeff3a1f12920ad9784b4
| 9,912
|
py
|
Python
|
src/lr_find.py
|
KushajveerSingh/fastai_without_fastai
|
9a7c71b92c49be1e05858dc0e7ce63901c3c1bd2
|
[
"MIT"
] | 12
|
2019-03-30T16:43:53.000Z
|
2022-03-21T19:49:12.000Z
|
src/lr_find.py
|
KushajveerSingh/fastai_without_fastai
|
9a7c71b92c49be1e05858dc0e7ce63901c3c1bd2
|
[
"MIT"
] | null | null | null |
src/lr_find.py
|
KushajveerSingh/fastai_without_fastai
|
9a7c71b92c49be1e05858dc0e7ce63901c3c1bd2
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
# NOT -> ParameterModule
# NOT -> children_and_parameters
# NOT -> flatten_model
# NOT -> lr_range
# NOT -> scheduling functions
# NOT -> SmoothenValue
# YES -> lr_find
# NOT -> plot_lr_find
# NOT TO BE MODIFIED
# NOT TO BE MODIFIED
# To be used to flatten_model
def children_and_parameters(m:nn.Module):
"Return the children of `m` and its direct parameters not registered in modules."
children = list(m.children())
children_p = sum([[id(p) for p in c.parameters()] for c in m.children()],[])
for p in m.parameters():
if id(p) not in children_p: children.append(ParameterModule(p))
return children
# NOT TO BE MODIFIED
flatten_model = lambda m: sum(map(flatten_model,children_and_parameters(m)),[]) if len(list(m.children())) else [m]
# NOT TO BE MODIFIED
def lr_range(model, lr):
"""
Build differential learning rate from lr. It will give you the
Arguments:
model :- torch.nn.Module
lr :- float or slice
Returns:
Depending upon lr
"""
if not isinstance(lr, slice):
return lr
num_layer = len([nn.Sequential(*flatten_model(model))])
if lr.start:
mult = lr.stop / lr.start
step = mult**(1/(num_layer-1))
res = np.array([lr.start*(step**i) for i in range(num_layer)])
else:
res = [lr.stop/10.]*(num_layer-1) + [lr.stop]
return np.array(res)
# NOT TO BE MODIFIED
# These are the functions that would give us the values of lr. Liks for linearly
# increasing lr we would use annealing_linear.
# You can add your own custom function, for producing lr.
# By defualt annealing_exp is used for both lr and momentum
def annealing_no(start, end, pct:float):
"No annealing, always return `start`."
return start
def annealing_linear(start, end, pct:float):
"Linearly anneal from `start` to `end` as pct goes from 0.0 to 1.0."
return start + pct * (end-start)
def annealing_exp(start, end, pct:float):
"Exponentially anneal from `start` to `end` as pct goes from 0.0 to 1.0."
return start * (end/start) ** pct
def annealing_cos(start, end, pct:float):
"Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0."
cos_out = np.cos(np.pi * pct) + 1
return end + (start-end)/2 * cos_out
# NOT TO BE MODIFIED
# NOT TO BE MODIFIED
# TO BE MODIFIED IN SOME CASES
def lr_find(data_loader, model, loss_fn, opt, wd:int=0, start_lr:float=1e-7, end_lr:float=10,
num_it:int=100, stop_div:bool=True, smooth_beta:float=0.98, use_gpu:bool=True,
device=torch.device('cuda'), anneal_func=annealing_exp):
"""
The main function that you will call to plot learning_rate vs losses graph. It is
the only function from lr_find.py that you will call. By default it will use GPU. It
assumes your model is already on GPU if you use use_gpu.
Arguments:-
data_loader :- torch.utils.data.DataLoader
model :- torch.nn.Module
loss_fn :- torch.nn.LossFunction
opt :- torch.optim.Optimizer
wd :- weight decay (default=0).
start_lr :- The learning rate from where to start in lr_find (default=1e-7)
end_lr :- The learning rate at which to end lr_find (default=10)
num_it :- Number of iterations for lr_find (default=100)
stop_div :- If the loss diverges, then stop early (default=True)
smooth_beta :- The beta value to smoothen the running avergae of the loss function (default=0.98)
use_gpu :- True (train on GPU) else CPU
anneal_func :- The step function you want to use (default exp)
device :- Torch device to use for training model (default GPU)
Returns:
losses :- list of smoothened version of losses
lrs :- list of all lrs that we test
"""
model.train()
stop = False
flag = False
best_loss = 0.
iteration = 0
losses = []
lrs = []
lrs.append(start_lr)
start_lr = lr_range(model, start_lr)
start_lr = np.array(start_lr) if isinstance(start_lr, (tuple, list)) else start_lr
end_lr = lr_range(model, end_lr)
end_lr = np.array(end_lr) if isinstance(end_lr, (tuple, list)) else end_lr
sched = Stepper((start_lr, end_lr), num_it, anneal_func)
smoothener = SmoothenValue(smooth_beta)
epochs = int(np.ceil(num_it/len(data_loader)))
# save model_dict
model_state = model.state_dict()
opt_state = opt.state_dict()
# Set optimizer learning_rate = start_lr
for group in opt.param_groups:
group['lr'] = sched.start
for i in range(epochs):
for data in data_loader:
opt.zero_grad()
################### TO BE MODIFIED ###################
# Depending on your model, you will have to modify your
# data pipeline and how you give inputs to your model.
inputs, labels = data
if use_gpu:
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
loss = loss_fn(outputs, labels)
#####################################################
if use_gpu:
smoothener.add_value(loss.detach().cpu())
else:
smoothener.add_value(loss.detach())
smooth_loss = smoothener.smooth
losses.append(smooth_loss)
loss.backward()
################### TO BE MODIFIED ###################
# For AdamW. If you want to use Adam, comment these lines
for group in opt.param_groups:
for param in group['params']:
param.data = param.data.add(-wd * group['lr'], param.data)
#####################################################
opt.step()
# Change lr
new_lr = sched.step()
lrs.append(new_lr)
for group in opt.param_groups:
group['lr'] = new_lr
################### TO BE MODIFIED ###################
# You necessarily don't want to change it. But in cases
# when you are maximizing the loss, then you will have
# to change it.
if iteration == 0 or smooth_loss < best_loss:
best_loss = smooth_loss
iteration += 1
if sched.is_done or (stop_div and (smooth_loss > 4*best_loss or torch.isnan(loss))):
flag = True
break
#####################################################
if iteration%10 == 0:
print(f'Iteration: {iteration}')
if flag:
break
# Load state dict
model.load_state_dict(model_state)
opt.load_state_dict(opt_state)
lrs.pop()
print(f'LR Finder is complete.')
return losses, lrs
# NOT TO BE MODIFIED
def plot_lr_find(losses, lrs, skip_start:int=10, skip_end:int=5, suggestion:bool=False, return_fig:bool=None):
"""
It will take the losses and lrs returned by lr_find as input.
Arguments:-
skip_start -> It will skip skip_start lrs from the start
skip_end -> It will skip skip_end lrs from the end
suggestion -> If you want to see the point where the gradient changes most
return_fig -> True then get the fig in the return statement
"""
lrs = lrs[skip_start:-skip_end] if skip_end > 0 else lrs[skip_start:]
losses = losses[skip_start:-skip_end] if skip_end > 0 else losses[skip_start:]
losses = [x.item() for x in losses]
fig, ax = plt.subplots(1, 1)
ax.plot(lrs, losses)
ax.set_ylabel("Loss")
ax.set_xlabel("Learning Rate")
ax.set_xscale('log')
ax.xaxis.set_major_formatter(plt.FormatStrFormatter('%.0e'))
if suggestion:
try:
mg = (np.gradient(np.array(losses))).argmin()
except:
print("Failed to compute the gradients, there might not be enough points.")
return
print(f"Min numerical gradient: {lrs[mg]:.2E}")
ax.plot(lrs[mg], losses[mg], markersize=10, marker='o', color='red')
if return_fig is not None:
return fig
| 36.307692
| 115
| 0.601392
|
bc85ba5181e5203592287503621708b994737b25
| 3,905
|
py
|
Python
|
SymbolExtractorAndRenamer/lldb/packages/Python/lldbsuite/test/lang/cpp/class_types/TestClassTypesDisassembly.py
|
Polidea/SiriusObfuscator
|
b0e590d8130e97856afe578869b83a209e2b19be
|
[
"Apache-2.0"
] | 427
|
2018-05-29T14:21:02.000Z
|
2022-03-16T03:17:54.000Z
|
SymbolExtractorAndRenamer/lldb/packages/Python/lldbsuite/test/lang/cpp/class_types/TestClassTypesDisassembly.py
|
PolideaPlayground/SiriusObfuscator
|
b0e590d8130e97856afe578869b83a209e2b19be
|
[
"Apache-2.0"
] | 25
|
2018-07-23T08:34:15.000Z
|
2021-11-05T07:13:36.000Z
|
SymbolExtractorAndRenamer/lldb/packages/Python/lldbsuite/test/lang/cpp/class_types/TestClassTypesDisassembly.py
|
PolideaPlayground/SiriusObfuscator
|
b0e590d8130e97856afe578869b83a209e2b19be
|
[
"Apache-2.0"
] | 52
|
2018-07-19T19:57:32.000Z
|
2022-03-11T16:05:38.000Z
|
"""
Test the lldb disassemble command on each call frame when stopped on C's ctor.
"""
from __future__ import print_function
import os
import time
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
| 38.663366
| 92
| 0.56338
|
bc87838b315ca1f64fa986f62a70ee610e20d306
| 1,116
|
py
|
Python
|
reservedwords.py
|
irinaid/MAlice
|
02740d661020866c3927b9ee7ee4523aaaafcb7e
|
[
"MIT"
] | 1
|
2021-04-25T22:53:36.000Z
|
2021-04-25T22:53:36.000Z
|
reservedwords.py
|
irinaid/MAlice
|
02740d661020866c3927b9ee7ee4523aaaafcb7e
|
[
"MIT"
] | null | null | null |
reservedwords.py
|
irinaid/MAlice
|
02740d661020866c3927b9ee7ee4523aaaafcb7e
|
[
"MIT"
] | null | null | null |
'''
All the reserved, individual words used in MAlice.
'''
A = "a"
ALICE = "Alice"
AND = "and"
ATE = "ate"
BECAME = "became"
BECAUSE = "because"
BUT = "but"
CLOSED = "closed"
COMMA = ","
CONTAINED = "contained"
DOT = "."
DRANK = "drank"
EITHER = "either"
ENOUGH = "enough"
EVENTUALLY = "eventually"
FOUND = "found"
HAD = "had"
HATTA = "hatta"
LETTER = "letter"
LOOKING_GLASS = "looking-glass"
LPAR = "("
MAYBE = "maybe"
NUMBER = "number"
OF = "of"
OPENED = "opened"
OR = "or"
PERHAPS = "perhaps"
PIECE = "piece"
QUESTION = "?"
ROOM = "room"
RPAR = ")"
S = "'s"
SAID = "said"
SENTENCE = "sentence"
SO = "so"
SPIDER = "spider"
SPOKE = "spoke"
THE = "The"
THEN = "then"
TIMES = "times"
TOO = "too"
UNDERSCORE = "_"
UNSURE = "unsure"
WAS = "was"
WHAT = "what"
WHICH = "which"
RESTRICTED = [ A, ALICE, AND, ATE, BECAME ,BECAUSE ,BUT ,CLOSED ,COMMA ,CONTAINED ,DOT ,DRANK ,EITHER ,ENOUGH ,EVENTUALLY ,FOUND ,HAD ,HATTA ,LETTER ,LOOKING_GLASS ,LPAR ,MAYBE ,NUMBER ,OF ,OPENED ,OR ,PERHAPS ,PIECE ,QUESTION ,ROOM ,RPAR ,S ,SAID, SENTENCE ,SO ,SPIDER ,SPOKE ,THE ,THEN ,TIMES ,TOO ,UNDERSCORE ,UNSURE ,WAS ,WHAT ,WHICH]
| 21.056604
| 338
| 0.638889
|
bc88724609a6f077241f73613153365855b09321
| 853
|
py
|
Python
|
leetcode/0057_Insert_Interval/result.py
|
theck17/notes
|
f32f0f4b8f821b1ed38d173ef0913efddd094b91
|
[
"MIT"
] | null | null | null |
leetcode/0057_Insert_Interval/result.py
|
theck17/notes
|
f32f0f4b8f821b1ed38d173ef0913efddd094b91
|
[
"MIT"
] | null | null | null |
leetcode/0057_Insert_Interval/result.py
|
theck17/notes
|
f32f0f4b8f821b1ed38d173ef0913efddd094b91
|
[
"MIT"
] | null | null | null |
# !/usr/bin/env python3
# Author: C.K
# Email: theck17@163.com
# DateTime:2021-04-12 18:35:15
# Description:
import os
import sys
if __name__ == "__main__":
pass
| 24.371429
| 80
| 0.534584
|
bc889aea13c53b5ac47e25b4727f37433f19b834
| 322
|
py
|
Python
|
src/pymortests/benchmarks.py
|
TiKeil/pymor
|
5c6b3b6e1714b5ede11ce7cf03399780ab29d252
|
[
"Unlicense"
] | 1
|
2021-08-17T15:55:12.000Z
|
2021-08-17T15:55:12.000Z
|
src/pymortests/benchmarks.py
|
TreeerT/pymor
|
e8b18d2d4c4b5998f0bd84f6728e365e0693b753
|
[
"Unlicense"
] | 4
|
2022-03-17T10:07:38.000Z
|
2022-03-30T12:41:06.000Z
|
src/pymortests/benchmarks.py
|
TreeerT/pymor
|
e8b18d2d4c4b5998f0bd84f6728e365e0693b753
|
[
"Unlicense"
] | null | null | null |
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2020 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
from pymortests.base import runmodule
if __name__ == "__main__":
runmodule(filename=__file__)
| 32.2
| 77
| 0.76087
|
bc89b98301ba0f533627b829ae7b31f9ab29c245
| 756
|
py
|
Python
|
storage3/_sync/client.py
|
anand2312/storage-py
|
75c9c43ea373cb58970255b8e7438c2ec67e7f25
|
[
"MIT"
] | null | null | null |
storage3/_sync/client.py
|
anand2312/storage-py
|
75c9c43ea373cb58970255b8e7438c2ec67e7f25
|
[
"MIT"
] | null | null | null |
storage3/_sync/client.py
|
anand2312/storage-py
|
75c9c43ea373cb58970255b8e7438c2ec67e7f25
|
[
"MIT"
] | null | null | null |
from ..utils import SyncClient, __version__
from .bucket import SyncStorageBucketAPI
from .file_api import SyncBucketProxy
__all__ = [
"SyncStorageClient",
]
| 26.068966
| 78
| 0.613757
|
bc8a0406013c9abeb99153a42725a7e4225fc35e
| 1,755
|
py
|
Python
|
repo/script.module.liveresolver/lib/js2py/translators/__init__.py
|
Hades01/Addons
|
710da97ac850197498a3cd64be1811c593610add
|
[
"Apache-2.0"
] | 3
|
2020-03-03T13:21:44.000Z
|
2021-07-21T09:53:31.000Z
|
repo/script.module.liveresolver/lib/js2py/translators/__init__.py
|
Hades01/Addons
|
710da97ac850197498a3cd64be1811c593610add
|
[
"Apache-2.0"
] | null | null | null |
repo/script.module.liveresolver/lib/js2py/translators/__init__.py
|
Hades01/Addons
|
710da97ac850197498a3cd64be1811c593610add
|
[
"Apache-2.0"
] | 2
|
2020-04-01T22:11:12.000Z
|
2020-05-07T23:54:52.000Z
|
# The MIT License
#
# Copyright 2014, 2015 Piotr Dabkowski
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so, subject
# to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE
__all__ = ['PyJsParser', 'Node', 'WrappingNode', 'node_to_dict', 'parse', 'translate_js', 'translate', 'syntax_tree_translate',
'DEFAULT_HEADER']
__author__ = 'Piotr Dabkowski'
__version__ = '2.2.0'
from pyjsparser import PyJsParser, Node, WrappingNode, node_to_dict
from translator import translate_js, trasnlate, syntax_tree_translate, DEFAULT_HEADER
def parse(javascript_code):
"""Returns syntax tree of javascript_code.
Syntax tree has the same structure as syntax tree produced by esprima.js
Same as PyJsParser().parse For your convenience :) """
p = PyJsParser()
return p.parse(javascript_code)
| 45
| 127
| 0.764672
|
bc8acb8ede34bacdf376a2fc95f5b2c7c78ede61
| 141,721
|
py
|
Python
|
src/test/python/test_scc_pacs.py
|
xchange11/ttconv-1
|
6e67172af126fa0e90690044848f300c0173715c
|
[
"BSD-2-Clause"
] | 66
|
2020-09-25T11:38:28.000Z
|
2022-03-23T15:15:34.000Z
|
src/test/python/test_scc_pacs.py
|
xchange11/ttconv-1
|
6e67172af126fa0e90690044848f300c0173715c
|
[
"BSD-2-Clause"
] | 217
|
2020-09-22T22:45:22.000Z
|
2022-03-31T23:02:15.000Z
|
src/test/python/test_scc_pacs.py
|
xchange11/ttconv-1
|
6e67172af126fa0e90690044848f300c0173715c
|
[
"BSD-2-Clause"
] | 5
|
2020-09-25T09:24:17.000Z
|
2021-08-08T20:52:26.000Z
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2020, Sandflow Consulting LLC
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for the SCC PACs"""
# pylint: disable=R0201,C0115,C0116
import unittest
from ttconv.scc.codes.preambles_address_codes import SccPreambleAddressCode
from ttconv.style_properties import TextDecorationType, NamedColors, FontStyleType
if __name__ == '__main__':
unittest.main()
| 87.374229
| 129
| 0.689333
|
bc8adf2af330cf7308b0b0e25463ed5a44b45099
| 1,484
|
py
|
Python
|
azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/models/custom_image_properties_custom.py
|
NMijat1024/azure-sdk-for-python
|
c49e1d6d797dceaca81813cafb1a486d67185182
|
[
"MIT"
] | 1
|
2022-03-30T22:39:15.000Z
|
2022-03-30T22:39:15.000Z
|
azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/models/custom_image_properties_custom.py
|
NMijat1024/azure-sdk-for-python
|
c49e1d6d797dceaca81813cafb1a486d67185182
|
[
"MIT"
] | 54
|
2016-03-25T17:25:01.000Z
|
2018-10-22T17:27:54.000Z
|
azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/models/custom_image_properties_custom.py
|
NMijat1024/azure-sdk-for-python
|
c49e1d6d797dceaca81813cafb1a486d67185182
|
[
"MIT"
] | 2
|
2017-01-20T18:25:46.000Z
|
2017-05-12T21:31:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
| 35.333333
| 76
| 0.6031
|
bc8c55932d28aa8c9253fefe76b11ab1d6dbc13a
| 1,886
|
py
|
Python
|
distance_torch_no_compile/chamfer.py
|
nicolalandro/softpool
|
ca77161ab70e5fe6c6505dc40f448bd8e1d78a48
|
[
"Apache-2.0"
] | null | null | null |
distance_torch_no_compile/chamfer.py
|
nicolalandro/softpool
|
ca77161ab70e5fe6c6505dc40f448bd8e1d78a48
|
[
"Apache-2.0"
] | null | null | null |
distance_torch_no_compile/chamfer.py
|
nicolalandro/softpool
|
ca77161ab70e5fe6c6505dc40f448bd8e1d78a48
|
[
"Apache-2.0"
] | null | null | null |
import torch
def expanded_pairwise_distances(x, y):
'''
Input: x is a bxNxd matrix
y is an optional bxMxd matirx
Output: dist is a bxNxM matrix where dist[i,j] is the square norm between x[i,:] and y[j,:]
if y is not given then use 'y=x'.
i.e. dist[i,j] = ||x[i,:]-y[j,:]||^2
'''
differences = x.unsqueeze(2) - y.unsqueeze(1)
distances = torch.sum(differences * differences, -1)
return distances
def chamfer_distance(x, y):
'''
input x and y are bxNxM matrix, b: batch, N:number of point, M: point dim (ex. 2 for 2D or 3 for 3D)
output is a bx1 Matrix with the value of the chamfer distance for each sample of the batch
'''
dist_vec = expanded_pairwise_distances(x, y)
min_distances = torch.topk(dist_vec, k=1, dim=2, largest=False).values
chamfer = torch.sum(min_distances, dim=1) / torch.tensor(x.shape[1])
return chamfer
if __name__ == "__main__":
x = torch.tensor([
[
[0., 0., 0.],
[0., 1., 0.],
[0., 1., 0.],
],
[
[1., 1., 0.],
[1., 2., 0.],
[0., 1., 0.],
]
])
y = torch.tensor([
[
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
],
[
[1., 1., 0.],
[1., 2., 0.],
[0., 1., 0.],
]
])
chamfer = ChamferLoss()
print('chamfer loss torch (cpu):', chamfer(x, y))
print('chamfer loss torch (cuda):', chamfer(x.cuda(), y.cuda()))
# import sys
# sys.path.append("../distance/chamfer/")
# import dist_chamfer as cd
# CD = cd.chamferDist()
# dist1, dist2, _, _= CD(x, y)
# print('orig', dist1)
| 27.735294
| 104
| 0.507423
|
bc8c6ccfc24c9f2c6b892349f506c390ec4d676f
| 8,400
|
py
|
Python
|
isiscb/curation/authority_views/relation_views.py
|
crispzips/IsisCB
|
72f5ad47bbc2c615f995df148f5b86550835efdb
|
[
"MIT"
] | 4
|
2016-01-25T20:35:33.000Z
|
2020-04-07T15:39:52.000Z
|
isiscb/curation/authority_views/relation_views.py
|
crispzips/IsisCB
|
72f5ad47bbc2c615f995df148f5b86550835efdb
|
[
"MIT"
] | 41
|
2015-08-19T17:34:41.000Z
|
2022-03-11T23:19:01.000Z
|
isiscb/curation/authority_views/relation_views.py
|
crispzips/IsisCB
|
72f5ad47bbc2c615f995df148f5b86550835efdb
|
[
"MIT"
] | 2
|
2020-11-25T20:18:18.000Z
|
2021-06-24T15:15:41.000Z
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse, QueryDict #, HttpResponseForbidden, Http404, , JsonResponse
from django.shortcuts import get_object_or_404, render, redirect
from django.urls import reverse
from django.contrib.admin.views.decorators import staff_member_required, user_passes_test
from rules.contrib.views import permission_required, objectgetter
from isisdata.models import *
from isisdata.utils import strip_punctuation, normalize
from isisdata import operations
from isisdata.filters import *
from isisdata import tasks as data_tasks
from curation import p3_port_utils
from curation.forms import *
from curation.contrib.views import check_rules
| 39.810427
| 133
| 0.68619
|
bc8efe8d75934b61443e05664bf142fdc9790c04
| 6,351
|
py
|
Python
|
run_tests.py
|
silx-kit/silx
|
360f890a617676a92f0bed6a28b718d09e70ec03
|
[
"CC0-1.0",
"MIT"
] | 94
|
2016-03-04T17:25:53.000Z
|
2022-03-18T18:05:23.000Z
|
run_tests.py
|
silx-kit/silx
|
360f890a617676a92f0bed6a28b718d09e70ec03
|
[
"CC0-1.0",
"MIT"
] | 2,841
|
2016-01-21T09:06:49.000Z
|
2022-03-18T14:53:56.000Z
|
run_tests.py
|
silx-kit/silx
|
360f890a617676a92f0bed6a28b718d09e70ec03
|
[
"CC0-1.0",
"MIT"
] | 71
|
2015-09-30T08:35:35.000Z
|
2022-03-16T07:16:28.000Z
|
#!/usr/bin/env python3
# coding: utf8
# /*##########################################################################
#
# Copyright (c) 2015-2021 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""Run the tests of the project.
This script expects a suite function in <project_package>.test,
which returns a unittest.TestSuite.
Test coverage dependencies: coverage, lxml.
"""
__authors__ = ["Jrme Kieffer", "Thomas Vincent"]
__date__ = "30/09/2020"
__license__ = "MIT"
import distutils.util
import logging
import os
import subprocess
import sys
import importlib
# Capture all default warnings
logging.captureWarnings(True)
import warnings
warnings.simplefilter('default')
logger = logging.getLogger("run_tests")
logger.setLevel(logging.WARNING)
logger.info("Python %s %s", sys.version, tuple.__itemsize__ * 8)
try:
import numpy
except Exception as error:
logger.warning("Numpy missing: %s", error)
else:
logger.info("Numpy %s", numpy.version.version)
try:
import h5py
except Exception as error:
logger.warning("h5py missing: %s", error)
else:
logger.info("h5py %s", h5py.version.version)
def get_project_name(root_dir):
"""Retrieve project name by running python setup.py --name in root_dir.
:param str root_dir: Directory where to run the command.
:return: The name of the project stored in root_dir
"""
logger.debug("Getting project name in %s", root_dir)
p = subprocess.Popen([sys.executable, "setup.py", "--name"],
shell=False, cwd=root_dir, stdout=subprocess.PIPE)
name, _stderr_data = p.communicate()
logger.debug("subprocess ended with rc= %s", p.returncode)
return name.split()[-1].decode('ascii')
def is_debug_python():
"""Returns true if the Python interpreter is in debug mode."""
try:
import sysconfig
except ImportError: # pragma nocover
# Python < 2.7
import distutils.sysconfig as sysconfig
if sysconfig.get_config_var("Py_DEBUG"):
return True
return hasattr(sys, "gettotalrefcount")
def build_project(name, root_dir):
"""Run python setup.py build for the project.
Build directory can be modified by environment variables.
:param str name: Name of the project.
:param str root_dir: Root directory of the project
:return: The path to the directory were build was performed
"""
platform = distutils.util.get_platform()
architecture = "lib.%s-%i.%i" % (platform,
sys.version_info[0], sys.version_info[1])
if is_debug_python():
architecture += "-pydebug"
if os.environ.get("PYBUILD_NAME") == name:
# we are in the debian packaging way
home = os.environ.get("PYTHONPATH", "").split(os.pathsep)[-1]
elif os.environ.get("BUILDPYTHONPATH"):
home = os.path.abspath(os.environ.get("BUILDPYTHONPATH", ""))
else:
home = os.path.join(root_dir, "build", architecture)
logger.warning("Building %s to %s", name, home)
p = subprocess.Popen([sys.executable, "setup.py", "build"],
shell=False, cwd=root_dir)
logger.debug("subprocess ended with rc= %s", p.wait())
if os.path.isdir(home):
return home
alt_home = os.path.join(os.path.dirname(home), "lib")
if os.path.isdir(alt_home):
return alt_home
def import_project_module(project_name, project_dir):
"""Import project module, from the system of from the project directory"""
if "--installed" in sys.argv:
try:
module = importlib.import_module(project_name)
except Exception:
logger.error("Cannot run tests on installed version: %s not installed or raising error.",
project_name)
raise
else: # Use built source
build_dir = build_project(project_name, project_dir)
if build_dir is None:
logging.error("Built project is not available !!! investigate")
sys.path.insert(0, build_dir)
logger.warning("Patched sys.path, added: '%s'", build_dir)
module = importlib.import_module(project_name)
return module
if __name__ == "__main__": # Needed for multiprocessing support on Windows
import pytest
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
PROJECT_NAME = get_project_name(PROJECT_DIR)
logger.info("Project name: %s", PROJECT_NAME)
project_module = import_project_module(PROJECT_NAME, PROJECT_DIR)
PROJECT_VERSION = getattr(project_module, 'version', '')
PROJECT_PATH = project_module.__path__[0]
args = [normalize_option(p) for p in sys.argv[1:] if p != "--installed"]
# Run test on PROJECT_PATH if nothing is specified
without_options = [a for a in args if not a.startswith("-")]
if len(without_options) == 0:
args += [PROJECT_PATH]
argv = ["--rootdir", PROJECT_PATH] + args
sys.exit(pytest.main(argv))
| 34.895604
| 101
| 0.668714
|
bc8faa6c50d7d1921cb25f63e39e57127594a8e6
| 7,072
|
py
|
Python
|
src/robot/utils/error.py
|
vprashanth777/Selenium
|
b3c48b75e73322891bb697f251b32a9a9d8b4dbe
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2018-03-10T11:10:20.000Z
|
2018-03-10T11:10:20.000Z
|
src/robot/utils/error.py
|
vprashanth777/Selenium
|
b3c48b75e73322891bb697f251b32a9a9d8b4dbe
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/robot/utils/error.py
|
vprashanth777/Selenium
|
b3c48b75e73322891bb697f251b32a9a9d8b4dbe
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys
import traceback
from robot.errors import RobotError
from .platform import JYTHON, RERAISED_EXCEPTIONS
from .unic import unic
EXCLUDE_ROBOT_TRACES = not os.getenv('ROBOT_INTERNAL_TRACES')
if JYTHON:
from java.io import StringWriter, PrintWriter
from java.lang import Throwable, OutOfMemoryError
else:
Throwable = ()
def get_error_message():
"""Returns error message of the last occurred exception.
This method handles also exceptions containing unicode messages. Thus it
MUST be used to get messages from all exceptions originating outside the
framework.
"""
return ErrorDetails().message
def get_error_details(exclude_robot_traces=EXCLUDE_ROBOT_TRACES):
"""Returns error message and details of the last occurred exception."""
details = ErrorDetails(exclude_robot_traces=exclude_robot_traces)
return details.message, details.traceback
def ErrorDetails(exc_info=None, exclude_robot_traces=EXCLUDE_ROBOT_TRACES):
"""This factory returns an object that wraps the last occurred exception
It has attributes `message`, `traceback` and `error`, where `message`
contains type and message of the original error, `traceback` contains the
traceback/stack trace and `error` contains the original error instance.
"""
exc_type, exc_value, exc_traceback = exc_info or sys.exc_info()
if exc_type in RERAISED_EXCEPTIONS:
raise exc_value
details = PythonErrorDetails \
if not isinstance(exc_value, Throwable) else JavaErrorDetails
return details(exc_type, exc_value, exc_traceback, exclude_robot_traces)
| 34.330097
| 78
| 0.662472
|
bc90147c820b8957a1562bdb12623216308ec658
| 308
|
py
|
Python
|
dedupe/_init.py
|
neozhangthe1/dedupe
|
aff99e6bd027291eecfb78eae08aa73877f4fff0
|
[
"MIT"
] | null | null | null |
dedupe/_init.py
|
neozhangthe1/dedupe
|
aff99e6bd027291eecfb78eae08aa73877f4fff0
|
[
"MIT"
] | null | null | null |
dedupe/_init.py
|
neozhangthe1/dedupe
|
aff99e6bd027291eecfb78eae08aa73877f4fff0
|
[
"MIT"
] | null | null | null |
from dedupe.api import StaticDedupe, Dedupe
from dedupe.api import StaticRecordLink, RecordLink
from dedupe.api import StaticGazetteer, Gazetteer
from dedupe.core import randomPairs, randomPairsMatch, frozendict
from dedupe.convenience import consoleLabel, trainingDataDedupe, trainingDataLink, canonicalize
| 51.333333
| 95
| 0.866883
|
bc926bb3d7c2f20a37f4cae0b86f7455ebdb913c
| 1,430
|
py
|
Python
|
scalability/tests/test_misc.py
|
ggreif/ic
|
ac56ec91f077c00d59eea3f73f51e14a1b3ea882
|
[
"Apache-2.0"
] | 941
|
2021-05-10T08:14:14.000Z
|
2022-03-31T11:40:24.000Z
|
scalability/tests/test_misc.py
|
ggreif/ic
|
ac56ec91f077c00d59eea3f73f51e14a1b3ea882
|
[
"Apache-2.0"
] | 3
|
2022-02-16T12:24:20.000Z
|
2022-03-23T12:05:41.000Z
|
scalability/tests/test_misc.py
|
ggreif/ic
|
ac56ec91f077c00d59eea3f73f51e14a1b3ea882
|
[
"Apache-2.0"
] | 122
|
2021-05-10T08:21:23.000Z
|
2022-03-25T20:34:12.000Z
|
import unittest
from unittest import TestCase
from misc import verify
if __name__ == "__main__":
unittest.main()
| 43.333333
| 110
| 0.735664
|
bc92877f15ab8c1e45e6b3f0628b4c9b556c0100
| 4,271
|
py
|
Python
|
mars/tensor/fft/ifft.py
|
tomzhang/mars-1
|
6f1d85e37eb1b383251314cb0ba13e06288af03d
|
[
"Apache-2.0"
] | 2
|
2019-03-29T04:11:10.000Z
|
2020-07-08T10:19:54.000Z
|
mars/tensor/fft/ifft.py
|
tomzhang/mars-1
|
6f1d85e37eb1b383251314cb0ba13e06288af03d
|
[
"Apache-2.0"
] | null | null | null |
mars/tensor/fft/ifft.py
|
tomzhang/mars-1
|
6f1d85e37eb1b383251314cb0ba13e06288af03d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ... import opcodes as OperandDef
from ..datasource import tensor as astensor
from .core import TensorComplexFFTMixin, validate_fft, TensorStandardFFT
def ifft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `mt.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e.,
* ``a[0]`` should contain the zero frequency term,
* ``a[1:n//2]`` should contain the positive-frequency terms,
* ``a[n//2 + 1:]`` should contain the negative-frequency terms, in
increasing order starting from the most negative frequency.
For an even number of input points, ``A[n//2]`` represents the sum of
the values at the positive and negative Nyquist frequencies, as the two
are aliased together. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input tensor, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex Tensor
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
mt.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> import mars.tensor as mt
>>> mt.fft.ifft([0, 4, 0, 0]).execute()
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = mt.arange(400)
>>> n = mt.zeros((400,), dtype=complex)
>>> n[40:60] = mt.exp(1j*mt.random.uniform(0, 2*mt.pi, (20,)))
>>> s = mt.fft.ifft(n)
>>> plt.plot(t.execute(), s.real.execute(), 'b-', t.execute(), s.imag.execute(), 'r--')
...
>>> plt.legend(('real', 'imaginary'))
...
>>> plt.show()
"""
a = astensor(a)
validate_fft(a, axis, norm)
op = TensorIFFT(n=n, axis=axis, norm=norm, dtype=np.dtype(np.complex_))
return op(a)
| 35.890756
| 91
| 0.657926
|
bc936c9856eecc335b0cca94f1df34512def1882
| 754
|
py
|
Python
|
Physics250-ME29/magAverageEMFinCoil.py
|
illusion173/Physics250
|
69f2ffdb8af013e8b0739779861c1455b579ddaf
|
[
"MIT"
] | null | null | null |
Physics250-ME29/magAverageEMFinCoil.py
|
illusion173/Physics250
|
69f2ffdb8af013e8b0739779861c1455b579ddaf
|
[
"MIT"
] | null | null | null |
Physics250-ME29/magAverageEMFinCoil.py
|
illusion173/Physics250
|
69f2ffdb8af013e8b0739779861c1455b579ddaf
|
[
"MIT"
] | null | null | null |
import numpy as np
import math
extraNumber = 4 * math.pi * pow(10,-7)
avgEMF()
| 29
| 69
| 0.624668
|
bc93ed322f15833ada38ade26d0df82b04900ca0
| 1,908
|
py
|
Python
|
bench_cupy.py
|
zhouxzh/Jetson_nano_stft_benchmark
|
ffa97984f95b9862ac2a10b8459bb7ef241c6c72
|
[
"MIT"
] | null | null | null |
bench_cupy.py
|
zhouxzh/Jetson_nano_stft_benchmark
|
ffa97984f95b9862ac2a10b8459bb7ef241c6c72
|
[
"MIT"
] | null | null | null |
bench_cupy.py
|
zhouxzh/Jetson_nano_stft_benchmark
|
ffa97984f95b9862ac2a10b8459bb7ef241c6c72
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Computes the spectrogram of a test signal using cupy and cuFFT.
Author: Jan Schlter
"""
import sys
import os
import timeit
import numpy as np
import cupy as cp
INPUT_ON_GPU = True
OUTPUT_ON_GPU = True
from testfile import make_test_signal
def spectrogram(signal, sample_rate=22050, frame_len=1024, fps=70):
"""
Computes a magnitude spectrogram at a given sample rate (in Hz), frame
length (in samples) and frame rate (in Hz), on CUDA using cupy.
"""
if not INPUT_ON_GPU:
signal = cp.array(signal.astype(np.float32)) # already blown up to a list of frames
win = cp.hanning(frame_len).astype(cp.float32)
# apply window function
#signal *= win # this doesn't work correctly for some reason.
signal = signal * win
# perform FFT
spect = cp.fft.rfft(signal)
# convert into magnitude spectrogram
spect = cp.abs(spect)
# return
if OUTPUT_ON_GPU:
cp.cuda.get_current_stream().synchronize()
else:
return spect.get()
if __name__=="__main__":
main()
| 26.5
| 92
| 0.649371
|
bc945bd064cef2e58f31004f5a02ca29c75f9558
| 1,597
|
py
|
Python
|
dataxHWSp2021/HW3-4_NeuralNet/student/tests/q2b3.py
|
UCBerkeley-SCET/DataX-Berkeley
|
f912d22c838b511d3ada4ecfa3548afd80437b74
|
[
"Apache-2.0"
] | 28
|
2020-06-15T23:53:36.000Z
|
2022-03-19T09:27:02.000Z
|
dataxHWSp2021/HW3-4_NeuralNet/student/tests/q2b3.py
|
UCBerkeley-SCET/DataX-Berkeley
|
f912d22c838b511d3ada4ecfa3548afd80437b74
|
[
"Apache-2.0"
] | 4
|
2020-06-24T22:20:31.000Z
|
2022-02-28T01:37:36.000Z
|
dataxHWSp2021/HW3-4_NeuralNet/student/tests/q2b3.py
|
UCBerkeley-SCET/DataX-Berkeley
|
f912d22c838b511d3ada4ecfa3548afd80437b74
|
[
"Apache-2.0"
] | 78
|
2020-06-19T09:41:01.000Z
|
2022-02-05T00:13:29.000Z
|
test = { 'name': 'q2b3',
'points': 5,
'suites': [ { 'cases': [ { 'code': '>>> '
'histories_2b[2].model.count_params()\n'
'119260',
'hidden': False,
'locked': False},
{ 'code': '>>> '
'histories_2b[2].model.layers[1].activation.__name__\n'
"'sigmoid'",
'hidden': False,
'locked': False},
{ 'code': '>>> '
'histories_2b[2].model.layers[1].units\n'
'150',
'hidden': False,
'locked': False},
{ 'code': '>>> '
"histories_2b[2].history['loss'][4] "
'<= '
"histories_2b[1].history['loss'][4]\n"
'True',
'hidden': False,
'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
| 55.068966
| 102
| 0.21603
|
bc96f541ad15eaf540c8e8c9a48800ebc1c0301c
| 1,427
|
py
|
Python
|
geotrek/appconfig.py
|
Cynthia-Borot-PNE/Geotrek-admin
|
abd9ca8569a7e35ef7473f5b52731b1c78668754
|
[
"BSD-2-Clause"
] | null | null | null |
geotrek/appconfig.py
|
Cynthia-Borot-PNE/Geotrek-admin
|
abd9ca8569a7e35ef7473f5b52731b1c78668754
|
[
"BSD-2-Clause"
] | null | null | null |
geotrek/appconfig.py
|
Cynthia-Borot-PNE/Geotrek-admin
|
abd9ca8569a7e35ef7473f5b52731b1c78668754
|
[
"BSD-2-Clause"
] | null | null | null |
from django.apps import AppConfig
from django.contrib.admin.apps import AdminConfig
from django.contrib.auth.apps import AuthConfig
from django.contrib.contenttypes.apps import ContentTypesConfig
from django.contrib.sessions.apps import SessionsConfig
from django.db.models.signals import post_migrate
from django_celery_results.apps import CeleryResultConfig
from geotrek.common.utils.signals import check_srid_has_meter_unit, pm_callback
| 27.442308
| 95
| 0.775753
|
bc96fd29e9d6cb6eb71dd73f5f39dcfd2bcd44f8
| 11,604
|
py
|
Python
|
dtr_code/shared/run_torch_trial.py
|
merrymercy/dtr-prototype
|
bf40e182453a7d8d23581ea68f32a9d7d2037d62
|
[
"Linux-OpenIB"
] | 1
|
2021-08-02T02:42:58.000Z
|
2021-08-02T02:42:58.000Z
|
dtr_code/shared/run_torch_trial.py
|
merrymercy/dtr-prototype
|
bf40e182453a7d8d23581ea68f32a9d7d2037d62
|
[
"Linux-OpenIB"
] | null | null | null |
dtr_code/shared/run_torch_trial.py
|
merrymercy/dtr-prototype
|
bf40e182453a7d8d23581ea68f32a9d7d2037d62
|
[
"Linux-OpenIB"
] | 1
|
2021-08-05T08:58:53.000Z
|
2021-08-05T08:58:53.000Z
|
"""
To avoid any issues of memory hanging around between inputs,
we run each input as a separate process.
A little ugly but effective
"""
import gc
import glob
import json
import os
import random
import time
import numpy as np
import torch
from common import invoke_main, read_json, write_json, prepare_out_file, check_file_exists
from validate_config import validate_trials_config
from pt_trial_util import create_csv_writer
from tqdm import tqdm
import model_util
def save_trial_log(dest_dir, sim_conf_filename, model_name, specific_params, is_baseline=False):
"""
Find the last DTR log produced in the trial (if any exist)
and move it to the directory
"""
all_logs = glob.glob(os.path.join(os.getcwd(), '*.log'))
if not all_logs:
return
# if we delete all logs in advance, there should be at most one log
assert len(all_logs) == 1
most_recent = all_logs[0]
# rename and move
# (new name just appends info to the old one)
batch_size = specific_params['batch_size']
budget = specific_params['memory_budget']
if budget < 0:
budget = 'inf'
new_name = '{}-{}-{}-{}'.format(model_name, batch_size, budget,
os.path.basename(most_recent))
filename = prepare_out_file(dest_dir, new_name)
os.rename(most_recent, filename)
if is_baseline and sim_conf_filename is not None:
extend_simrd_config(dest_dir, sim_conf_filename, model_name, specific_params, filename)
def run_single_measurement(model_name, produce_model, run_model, teardown, inp, criterion, extra_params, use_dtr, use_profiling):
"""
This function initializes a model and performs
a single measurement of the model on the given input.
While it might seem most reasonable to initialize
the model outside of the loop, DTR's logs have shown
that certain constants in the model persist between loop iterations;
performing these actions in a separate *function scope* turned out to be the only
way to prevent having those constants hang around.
Returns a dict of measurements
"""
torch.cuda.reset_max_memory_allocated()
# resetting means the count should be reset to
# only what's in scope, meaning only the input
input_mem = torch.cuda.max_memory_allocated()
model = produce_model(extra_params=extra_params)
params = []
for m in model:
if hasattr(m, 'parameters'):
params.extend(m.parameters())
model_mem = torch.cuda.max_memory_allocated()
optimizer = torch.optim.SGD(model[0].parameters(), 1e-3, momentum=0.9, weight_decay=1e-4)
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
# start timing
torch.cuda.synchronize()
start_time = time.time()
if use_dtr:
torch.reset_profile()
start.record()
# with torch.autograd.profiler.profile(use_cuda=True) as prof:
run_model(criterion, *model, *inp, optimizer=optimizer)
end.record()
start_sync = time.time()
torch.cuda.synchronize()
end_sync = time.time()
end_time = time.time()
# end timing
if use_dtr:
# operators-only time, tracked by DTR
cuda_time = torch.compute_time()
base_compute_time = -1
remat_compute_time = -1
search_time = -1
cost_time = -1
if use_profiling:
base_compute_time = torch.base_compute_time()
remat_compute_time = torch.remat_compute_time()
search_time = torch.search_time()
cost_time = torch.cost_time()
torch.reset_profile()
total_mem = torch.cuda.max_memory_allocated()
teardown(*model)
torch.cuda.reset_max_memory_allocated()
del model
if use_dtr:
torch.toggle_log(False)
del params
batch_size = len(inp[0])
ips = batch_size / (end_time - start_time)
result = {
'time': end_time - start_time,
'sync_time': end_sync - start_sync,
'gpu_time': start.elapsed_time(end),
'input_mem': input_mem,
'model_mem': model_mem,
'total_mem': total_mem,
'base_compute_time': base_compute_time,
'remat_compute_time': remat_compute_time,
'search_time': search_time,
'cost_time': cost_time,
'batch_size': batch_size,
'ips': ips
}
if use_dtr:
result['cuda_time'] = cuda_time
else:
result['cuda_time'] = -1.0
return result
if __name__ == '__main__':
invoke_main(main, 'config_dir', 'experiment_mode',
'model_name', 'input_idx', 'params_file',
'out_file', 'trial_run', 'trial_run_outfile')
| 35.057402
| 170
| 0.635384
|
bc986ff7e618db67d5b1902a0fdfeecd1595ea88
| 1,482
|
py
|
Python
|
pythonTools/downloadPDBsInList.py
|
rsanchezgarc/BIPSPI
|
e155fee0836084ea02bc9919c58817d26a4a13e5
|
[
"Apache-2.0"
] | 5
|
2020-01-21T21:11:49.000Z
|
2022-02-06T19:55:28.000Z
|
pythonTools/downloadPDBsInList.py
|
rsanchezgarc/BIPSPI
|
e155fee0836084ea02bc9919c58817d26a4a13e5
|
[
"Apache-2.0"
] | null | null | null |
pythonTools/downloadPDBsInList.py
|
rsanchezgarc/BIPSPI
|
e155fee0836084ea02bc9919c58817d26a4a13e5
|
[
"Apache-2.0"
] | 3
|
2018-05-25T14:57:36.000Z
|
2022-01-27T12:53:41.000Z
|
import sys, os
from subprocess import call
try:
from downloadPdb import downloadPDB
except ImportError:
from .downloadPdb import downloadPDB
pdbListFile="/home/rsanchez/Tesis/rriPredMethod/data/joanDimers/117_dimers_list.tsv"
outPath="/home/rsanchez/Tesis/rriPredMethod/data/joanDimers/pdbFiles/rawPDBs"
USE_BIO_UNIT=False
##def downloadPDB(pdbId, pdbOutPath, useBioUnit):
#### descargar pdb: wget ftp://ftp.wwpdb.org/pub/pdb/data/biounit/coordinates/all/1i1q.pdb2.gz o ya descomprimido
#### wget -qO- ftp://ftp.wwpdb.org/pub/pdb/data/biounit/coordinates/all/1i1q.pdb2.gz |zcat > 1i1q.pdb
## outName= os.path.join(pdbOutPath,pdbId+'.pdb')
## if not os.path.isfile(outName):
## if useBioUnit:
## cmd= 'wget -qO- ftp://ftp.wwpdb.org/pub/pdb/data/biounit/coordinates/all/%s.pdb1.gz |zcat > %s'%(pdbId.lower(), outName)
## else:
## cmd= 'wget -qO- http://www.pdb.org/pdb/files/%s.pdb | cat > %s'%(pdbId.upper(), outName)
## print(cmd)
## call(cmd, shell= True)
if __name__=="__main__":
if len(sys.argv)==3:
pdbListFile= os.path.abspath(os.path.expanduser(sys.argv[1]))
outPath= os.path.abspath(os.path.expanduser(sys.argv[2]))
print( pdbListFile, outPath)
downloadInFile(pdbListFile, outPath, USE_BIO_UNIT)
| 36.146341
| 129
| 0.702429
|
bc98a22d0cd11d65a7d45c78d01ce4ed45420116
| 1,935
|
py
|
Python
|
code/python3/search_facets.py
|
hsethi2709/xapian-docsprint
|
a872c83fef6fde13efce67fd5563d43514c7444a
|
[
"MIT"
] | 47
|
2015-01-20T15:38:41.000Z
|
2022-02-15T21:03:50.000Z
|
code/python3/search_facets.py
|
hsethi2709/xapian-docsprint
|
a872c83fef6fde13efce67fd5563d43514c7444a
|
[
"MIT"
] | 16
|
2015-06-09T16:12:50.000Z
|
2020-02-05T06:40:18.000Z
|
code/python3/search_facets.py
|
hsethi2709/xapian-docsprint
|
a872c83fef6fde13efce67fd5563d43514c7444a
|
[
"MIT"
] | 56
|
2015-01-20T15:38:44.000Z
|
2022-03-03T18:13:39.000Z
|
#!/usr/bin/env python
import json
import sys
import xapian
import support
### End of example code.
if len(sys.argv) < 3:
print("Usage: %s DBPATH QUERYTERM..." % sys.argv[0])
sys.exit(1)
search(dbpath = sys.argv[1], querystring = " ".join(sys.argv[2:]))
| 31.209677
| 69
| 0.649612
|
bc98ed1d916dea38c19eaadce5f09692d5d10eeb
| 1,272
|
py
|
Python
|
iconcollections/serializers.py
|
plrthink/myicons
|
62475e118e2c7404d88146ea5d67961418d7f8ab
|
[
"BSD-2-Clause"
] | 83
|
2015-01-02T04:50:43.000Z
|
2021-06-06T03:26:55.000Z
|
iconcollections/serializers.py
|
plrthink/myicons
|
62475e118e2c7404d88146ea5d67961418d7f8ab
|
[
"BSD-2-Clause"
] | 2
|
2015-01-04T11:25:20.000Z
|
2015-01-05T11:13:37.000Z
|
iconcollections/serializers.py
|
plrthink/myicons
|
62475e118e2c7404d88146ea5d67961418d7f8ab
|
[
"BSD-2-Clause"
] | 20
|
2015-01-15T10:00:09.000Z
|
2019-11-06T07:25:59.000Z
|
import re
from rest_framework import serializers
from .models import Collection, CollectionIcon
| 27.06383
| 81
| 0.616352
|
bc99e84c9e8d7aa99d673f47ef51acfd45692fba
| 1,738
|
py
|
Python
|
Python/partition-to-k-equal-sum-subsets.py
|
sm2774us/leetcode_interview_prep_2021
|
33b41bea66c266b733372d9a8b9d2965cd88bf8c
|
[
"Fair"
] | null | null | null |
Python/partition-to-k-equal-sum-subsets.py
|
sm2774us/leetcode_interview_prep_2021
|
33b41bea66c266b733372d9a8b9d2965cd88bf8c
|
[
"Fair"
] | null | null | null |
Python/partition-to-k-equal-sum-subsets.py
|
sm2774us/leetcode_interview_prep_2021
|
33b41bea66c266b733372d9a8b9d2965cd88bf8c
|
[
"Fair"
] | null | null | null |
# Time: O(n*2^n)
# Space: O(2^n)
# Time: O(k^(n-k) * k!)
# Space: O(n)
# DFS solution with pruning.
| 30.491228
| 87
| 0.468354
|
bc9aea616fee38b1a73a79e690091369c909ef06
| 737
|
py
|
Python
|
var/spack/repos/builtin/packages/aspell/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/aspell/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8
|
2021-11-09T20:28:40.000Z
|
2022-03-15T03:26:33.000Z
|
var/spack/repos/builtin/packages/aspell/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2019-02-08T20:37:20.000Z
|
2019-03-31T15:19:26.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
# See also: AspellDictPackage
| 32.043478
| 98
| 0.738128
|
bc9b38aa93978a9c5a2ff6d24ac4f1e6be8b4faa
| 1,888
|
py
|
Python
|
third_party_package/RDKit_2015_03_1/rdkit/ML/Descriptors/UnitTestParser.py
|
Ivy286/cluster_basedfps
|
7fc216537f570436f008ea567c137d03ba2b6d81
|
[
"WTFPL"
] | 9
|
2019-04-23T01:46:12.000Z
|
2021-08-16T07:07:12.000Z
|
third_party_package/RDKit_2015_03_1/rdkit/ML/Descriptors/UnitTestParser.py
|
Ivy286/cluster_basedfps
|
7fc216537f570436f008ea567c137d03ba2b6d81
|
[
"WTFPL"
] | null | null | null |
third_party_package/RDKit_2015_03_1/rdkit/ML/Descriptors/UnitTestParser.py
|
Ivy286/cluster_basedfps
|
7fc216537f570436f008ea567c137d03ba2b6d81
|
[
"WTFPL"
] | 5
|
2016-09-21T03:47:48.000Z
|
2019-07-30T22:17:35.000Z
|
#
# Copyright (C) 2001 greg Landrum
#
""" unit testing code for compound descriptors
"""
from __future__ import print_function
import unittest
import Parser
from rdkit.six.moves import xrange
if __name__ == '__main__':
suite = TestSuite()
unittest.TextTestRunner().run(suite)
| 35.622642
| 101
| 0.64036
|
bc9c8f24e080e4c64950de33e4962b6b2e44ede2
| 1,575
|
py
|
Python
|
setup.py
|
maciek3000/data_dashboard
|
1b573b674d37f57ae7e8bbfb1e83c801b488dfd6
|
[
"MIT"
] | 8
|
2021-05-03T04:06:15.000Z
|
2022-01-15T16:27:42.000Z
|
setup.py
|
maciek3000/data_dashboard
|
1b573b674d37f57ae7e8bbfb1e83c801b488dfd6
|
[
"MIT"
] | null | null | null |
setup.py
|
maciek3000/data_dashboard
|
1b573b674d37f57ae7e8bbfb1e83c801b488dfd6
|
[
"MIT"
] | 3
|
2021-05-19T17:31:18.000Z
|
2021-06-19T12:24:01.000Z
|
from setuptools import setup, find_packages
import pathlib
here = pathlib.Path(__file__).parent.resolve()
long_description = (here / "readme.md").read_text(encoding="utf-8")
setup(
name="data_dashboard",
version="0.1.1",
description="Dashboard to explore the data and to create baseline Machine Learning model.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/maciek3000/data_dashboard",
author="Maciej Dowgird",
author_email="dowgird.maciej@gmail.com",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Visualization"
],
package_dir={"data_dashboard": "data_dashboard"},
packages=find_packages(),
python_requires=">=3.7",
install_requires=[
"pandas>=1.2.3",
"numpy>=1.19.5",
"scipy>=1.6.1",
"beautifulsoup4>=4.9.3",
"scikit-learn>=0.24.1",
"seaborn>=0.11.1",
"bokeh>=2.3.0",
"Jinja2>=2.11.3",
"xgboost>=1.3.3",
"lightgbm>=3.2.0"
],
package_data={
"data_dashboard": ["static/*", "templates/*", "examples/*"]
},
project_urls={
"Github": "https://github.com/maciek3000/data_dashboard",
},
)
| 32.142857
| 95
| 0.615238
|
bc9d746d95215d78b546409456d7b42ad25142a0
| 5,577
|
py
|
Python
|
test/e2e/tests/test_instance.py
|
acornett21/ack-ec2-controller
|
aa747d981239e41ae4254a9b31ee0f20ac882c85
|
[
"Apache-2.0"
] | null | null | null |
test/e2e/tests/test_instance.py
|
acornett21/ack-ec2-controller
|
aa747d981239e41ae4254a9b31ee0f20ac882c85
|
[
"Apache-2.0"
] | null | null | null |
test/e2e/tests/test_instance.py
|
acornett21/ack-ec2-controller
|
aa747d981239e41ae4254a9b31ee0f20ac882c85
|
[
"Apache-2.0"
] | null | null | null |
# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may
# not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Integration tests for Instance API.
"""
import datetime
import pytest
import time
import logging
from acktest.resources import random_suffix_name
from acktest.k8s import resource as k8s
from e2e import service_marker, CRD_GROUP, CRD_VERSION, load_ec2_resource
from e2e.replacement_values import REPLACEMENT_VALUES
from e2e.bootstrap_resources import get_bootstrap_resources
RESOURCE_PLURAL = "instances"
# highly available instance type for deterministic testing
INSTANCE_TYPE = "m4.large"
INSTANCE_AMI = "Amazon Linux 2 Kernel"
INSTANCE_TAG_KEY = "owner"
INSTANCE_TAG_VAL = "ack-controller"
CREATE_WAIT_AFTER_SECONDS = 10
DELETE_WAIT_AFTER_SECONDS = 10
TIMEOUT_SECONDS = 300
| 33.8
| 95
| 0.684777
|
bc9f42407dc824808c93da43b669882c77d6d9f4
| 9,461
|
py
|
Python
|
web/app/forms.py
|
Devidence7/Break
|
f961b1b46977c86739ff651fe81a1d9fff98a8e1
|
[
"MIT"
] | null | null | null |
web/app/forms.py
|
Devidence7/Break
|
f961b1b46977c86739ff651fe81a1d9fff98a8e1
|
[
"MIT"
] | null | null | null |
web/app/forms.py
|
Devidence7/Break
|
f961b1b46977c86739ff651fe81a1d9fff98a8e1
|
[
"MIT"
] | null | null | null |
from flask_wtf import FlaskForm
from wtforms import Form, StringField, PasswordField, BooleanField, SubmitField, IntegerField, validators, FileField, \
MultipleFileField, SelectField, RadioField, HiddenField, DecimalField, TextAreaField
from wtforms.fields.html5 import DateField
from wtforms.validators import DataRequired
# Structure of the Login form
# Structure of the Register form
# Structure of the Login form
# Structure of the Subir Anuncio form
| 48.025381
| 323
| 0.649931
|
bc9fd661a260bba8109c66590275e9d7c9b1094c
| 2,774
|
py
|
Python
|
hello.py
|
LMiceOrg/postdoc-voting
|
091fd6caa120f7c5aae600c0a492a185ec10e9d6
|
[
"CC0-1.0",
"MIT"
] | null | null | null |
hello.py
|
LMiceOrg/postdoc-voting
|
091fd6caa120f7c5aae600c0a492a185ec10e9d6
|
[
"CC0-1.0",
"MIT"
] | null | null | null |
hello.py
|
LMiceOrg/postdoc-voting
|
091fd6caa120f7c5aae600c0a492a185ec10e9d6
|
[
"CC0-1.0",
"MIT"
] | null | null | null |
#coding: utf-8
import sys
import os
import asyncio
import websockets
import json
import socket
import xlrd
#global vars
phd_data = None
pro_data = None
#json
port = 5678
if len(sys.argv) >=2:
port = sys.argv[1]
ws_server = websockets.serve(main_logic, '0.0.0.0', port)
asyncio.get_event_loop().run_until_complete(ws_server)
asyncio.get_event_loop().run_forever()
| 20.248175
| 60
| 0.519466
|
bca0727b76dc54909be0bf60b6d636ec8f539927
| 2,518
|
py
|
Python
|
runtime/Python3/src/antlr4/dfa/DFASerializer.py
|
maximmenshikov/antlr4
|
5ad8c150ae6b9a34a92df1f59606516fe58cb65f
|
[
"BSD-3-Clause"
] | 11,811
|
2015-01-01T02:40:39.000Z
|
2022-03-31T16:11:19.000Z
|
runtime/Python3/src/antlr4/dfa/DFASerializer.py
|
maximmenshikov/antlr4
|
5ad8c150ae6b9a34a92df1f59606516fe58cb65f
|
[
"BSD-3-Clause"
] | 2,364
|
2015-01-01T00:29:19.000Z
|
2022-03-31T21:26:34.000Z
|
runtime/Python3/src/antlr4/dfa/DFASerializer.py
|
maximmenshikov/antlr4
|
5ad8c150ae6b9a34a92df1f59606516fe58cb65f
|
[
"BSD-3-Clause"
] | 3,240
|
2015-01-05T02:34:15.000Z
|
2022-03-30T18:26:29.000Z
|
#
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#/
# A DFA walker that knows how to dump them to serialized strings.#/
from io import StringIO
from antlr4 import DFA
from antlr4.Utils import str_list
from antlr4.dfa.DFAState import DFAState
| 34.027027
| 113
| 0.548451
|
bca253db9d9aae8a5131355cc2fd801c42bb88f2
| 13,242
|
py
|
Python
|
sw/calibrate.py
|
microsoft/moabian
|
db95844103faedb3788abb5f37d0f37a771a9455
|
[
"MIT"
] | 13
|
2020-09-17T19:54:30.000Z
|
2022-03-01T00:25:11.000Z
|
sw/calibrate.py
|
microsoft/moabian
|
db95844103faedb3788abb5f37d0f37a771a9455
|
[
"MIT"
] | 27
|
2020-09-21T23:51:50.000Z
|
2022-03-25T19:45:16.000Z
|
sw/calibrate.py
|
microsoft/moabian
|
db95844103faedb3788abb5f37d0f37a771a9455
|
[
"MIT"
] | 13
|
2020-11-30T19:01:38.000Z
|
2021-11-10T11:28:36.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""
Calibration Controller
Performs calibration for hue, center of camera position, and servo offsets
"""
import os
import cv2
import time
import json
import argparse
import datetime
import numpy as np
import logging as log
from env import MoabEnv
from typing import Tuple
from common import Vector2
from detector import hsv_detector
from controllers import pid_controller
from dataclasses import dataclass, astuple
from hardware import plate_angles_to_servo_positions
def ball_close_enough(x, y, radius, max_ball_dist=0.045, min_ball_dist=0.01):
# reject balls which are too far from the center and too small
return (
np.abs(x) < max_ball_dist
and np.abs(y) < max_ball_dist
and radius > min_ball_dist
)
def calibrate_hue(camera_fn, detector_fn, is_menu_down_fn):
hue_low = 0
hue_high = 360
hue_steps = 41 # Is 41 instead of 40 so that the steps are even
img_frame, elapsed_time = camera_fn()
hue_options = list(np.linspace(hue_low, hue_high, hue_steps))
detected_hues = []
for hue in hue_options:
if is_menu_down_fn():
return CalibHue(early_quit=True)
img_frame, elapsed_time = camera_fn()
ball_detected, ((x, y), radius) = detector_fn(img_frame, hue=hue, debug=True)
# If we found a ball roughly in the center that is large enough
if ball_detected and ball_close_enough(x, y, radius):
log.info(
f"hue={hue:0.3f}, ball_detected={ball_detected}, "
f"(x, y)={x:0.3f} {y:0.3f}, radius={radius:0.3f}"
)
detected_hues.append(hue)
if len(detected_hues) > 0:
# https://en.wikipedia.org/wiki/Mean_of_circular_quantities
detected_hues_rad = np.radians(detected_hues)
sines, cosines = np.sin(detected_hues_rad), np.cos(detected_hues_rad)
sin_mean, cos_mean = np.mean(sines), np.mean(cosines)
avg_hue_rad = np.arctan2(sin_mean, cos_mean)
avg_hue = np.degrees(avg_hue_rad) % 360 # Convert back to [0, 360]
print(f"Hues are: {detected_hues}")
print(f"Hue calibrated: {avg_hue:0.2f}")
print(f"Avg hue: {avg_hue:0.2f}")
return CalibHue(hue=int(avg_hue), success=True)
else:
log.warning(f"Hue calibration failed.")
return CalibHue()
def calibrate_pos(camera_fn, detector_fn, hue, is_menu_down_fn):
for i in range(10): # Try and detect for 10 frames before giving up
if is_menu_down_fn():
return CalibPos(early_quit=True)
img_frame, elapsed_time = camera_fn()
ball_detected, ((x, y), radius) = detector_fn(img_frame, hue=hue)
# If we found a ball roughly in the center that is large enough
if ball_detected and ball_close_enough(x, y, radius):
x_offset = round(x, 3)
y_offset = round(y, 3)
log.info(f"Offset calibrated: [{x_offset:.3f}, {y_offset:.3f}]")
return CalibPos(position=(x_offset, y_offset), success=True)
log.warning(f"Offset calibration failed.")
return CalibPos()
def calibrate_servo_offsets(pid_fn, env, stationary_vel=0.005, time_limit=20):
start_time = time.time()
action = Vector2(0, 0)
# Initial high vel_history (to use the vel_hist[-100:] later)
vel_x_hist = [1.0 for _ in range(100)]
vel_y_hist = [1.0 for _ in range(100)]
# Run until the ball has stabilized or the time limit was reached
while time.time() < start_time + time_limit:
state = env.step(action)
action, info = pid_fn(state)
(x, y, vel_x, vel_y, sum_x, sum_y), ball_detected, buttons = state
# Quit on menu down
if buttons.menu_button:
return CalibServos(early_quit=True)
if ball_detected:
vel_x_hist.append(vel_x)
vel_y_hist.append(vel_y)
prev_100_x = np.mean(np.abs(vel_x_hist[-100:]))
prev_100_y = np.mean(np.abs(vel_y_hist[-100:]))
print("Prev 100: ", (prev_100_x, prev_100_y))
# If the average velocity for the last 100 timesteps is under the limit
if (prev_100_x < stationary_vel) and (prev_100_y < stationary_vel):
# Calculate offsets by calculating servo positions at the
# current stable position and subtracting the `default` zeroed
# position of the servos.
servos = np.array(plate_angles_to_servo_positions(*action))
servos_zeroed = np.array(plate_angles_to_servo_positions(0, 0))
servo_offsets = list(servos - servos_zeroed)
return CalibServos(servos=servo_offsets, success=True)
# If the plate could be stabilized in time_limit seconds, quit
log.warning(f"Servo calibration failed.")
return CalibServos()
def write_calibration(calibration_dict, calibration_file="bot.json"):
log.info("Writing calibration.")
# write out stuff
with open(calibration_file, "w+") as outfile:
log.info(f"Creating calibration file {calibration_file}")
json.dump(calibration_dict, outfile, indent=4, sort_keys=True)
def read_calibration(calibration_file="bot.json"):
log.info("Reading previous calibration.")
if os.path.isfile(calibration_file):
with open(calibration_file, "r") as f:
calibration_dict = json.load(f)
else: # Use defaults
calibration_dict = {
"ball_hue": 44,
"plate_offsets": (0.0, 0.0),
"servo_offsets": (0.0, 0.0, 0.0),
}
return calibration_dict
def wait_for_joystick_or_menu(hardware, sleep_time=1 / 30):
"""Waits for either the joystick or the menu. Returns the buttons"""
while True:
buttons = hardware.get_buttons()
if buttons.menu_button or buttons.joy_button:
return buttons
time.sleep(sleep_time)
if __name__ == "__main__": # Parse command line args
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", action="store_true")
parser.add_argument("-f", "--file", default="bot.json", type=str)
args, _ = parser.parse_known_args()
main(args.file, debug=args.debug)
| 32.696296
| 105
| 0.655264
|
bca450dae1b4675ac1d585a61880a16b6a3d235e
| 3,739
|
py
|
Python
|
marketing/tests_celery_tasks.py
|
renzyndrome/lits-crm
|
32daea8c76f91780b8cc8c3f107d04df606c0ec8
|
[
"MIT"
] | 1
|
2021-03-01T12:07:10.000Z
|
2021-03-01T12:07:10.000Z
|
marketing/tests_celery_tasks.py
|
renzyndrome/lits-crm
|
32daea8c76f91780b8cc8c3f107d04df606c0ec8
|
[
"MIT"
] | null | null | null |
marketing/tests_celery_tasks.py
|
renzyndrome/lits-crm
|
32daea8c76f91780b8cc8c3f107d04df606c0ec8
|
[
"MIT"
] | 1
|
2021-12-09T09:38:50.000Z
|
2021-12-09T09:38:50.000Z
|
from datetime import datetime, timedelta
from django.test import TestCase
from django.test.utils import override_settings
from marketing.tasks import (
delete_multiple_contacts_tasks,
list_all_bounces_unsubscribes,
run_all_campaigns,
run_campaign,
send_campaign_email_to_admin_contact,
send_scheduled_campaigns,
upload_csv_file,
)
from marketing.tests import TestMarketingModel
| 29.912
| 58
| 0.502006
|
bca568d5e71e781c0b945807208117a83879f72f
| 263
|
py
|
Python
|
doc's/3-labels_and_titles.py
|
andreluispy/py2html
|
227f3225632b467c95131b841d6ffab4c5202e44
|
[
"MIT"
] | null | null | null |
doc's/3-labels_and_titles.py
|
andreluispy/py2html
|
227f3225632b467c95131b841d6ffab4c5202e44
|
[
"MIT"
] | null | null | null |
doc's/3-labels_and_titles.py
|
andreluispy/py2html
|
227f3225632b467c95131b841d6ffab4c5202e44
|
[
"MIT"
] | null | null | null |
from py2html.main import *
page = web()
page.create()
# Header Parameters
# text = header text
# n = title level
page.header(text='My Site', n=1)
# Label Parameters
# text = label text
# color = label color
page.label(text='', color='')
page.compile()
| 16.4375
| 32
| 0.657795
|
bca56f1f07a7efd89750413292d60e6212055e4a
| 1,022
|
py
|
Python
|
JorGpi/tests/test_pickup.py
|
adujovic/JorG
|
15062984e837a938819e548c83f6f5414fa47103
|
[
"BSD-3-Clause"
] | 1
|
2020-07-22T11:05:03.000Z
|
2020-07-22T11:05:03.000Z
|
JorGpi/tests/test_pickup.py
|
adujovic/JorG
|
15062984e837a938819e548c83f6f5414fa47103
|
[
"BSD-3-Clause"
] | 2
|
2019-06-07T11:53:48.000Z
|
2019-06-24T08:20:25.000Z
|
JorGpi/tests/test_pickup.py
|
adujovic/JorG
|
15062984e837a938819e548c83f6f5414fa47103
|
[
"BSD-3-Clause"
] | 3
|
2019-07-01T12:38:06.000Z
|
2022-02-01T21:38:12.000Z
|
import unittest
from JorGpi.pickup.pickup import SmartPickUp,Reference,CommandLineOptions
| 37.851852
| 92
| 0.682975
|
bca5cc541dfab73d45981c6f120eb783e0579f49
| 131
|
py
|
Python
|
jwt_auth/admin.py
|
alaraayan/todo-backend
|
37e46b6789012c2d64a39f6d2429b1ae893dba37
|
[
"CC-BY-3.0"
] | null | null | null |
jwt_auth/admin.py
|
alaraayan/todo-backend
|
37e46b6789012c2d64a39f6d2429b1ae893dba37
|
[
"CC-BY-3.0"
] | null | null | null |
jwt_auth/admin.py
|
alaraayan/todo-backend
|
37e46b6789012c2d64a39f6d2429b1ae893dba37
|
[
"CC-BY-3.0"
] | null | null | null |
from django.contrib import admin
from django.contrib.auth import get_user_model
User = get_user_model()
admin.site.register(User)
| 21.833333
| 46
| 0.824427
|
bca685ecc21d97e8e24f16ef46f6aaabe24a9d13
| 21,795
|
py
|
Python
|
spotseeker_server/test/search/distance.py
|
uw-it-aca/spotseeker_server
|
1d8a5bf98b76fdcb807ed4cd32f939bb7e9aa66c
|
[
"Apache-2.0"
] | 5
|
2015-03-12T00:36:33.000Z
|
2022-02-24T16:41:25.000Z
|
spotseeker_server/test/search/distance.py
|
uw-it-aca/spotseeker_server
|
1d8a5bf98b76fdcb807ed4cd32f939bb7e9aa66c
|
[
"Apache-2.0"
] | 133
|
2016-02-03T23:54:45.000Z
|
2022-03-30T21:33:58.000Z
|
spotseeker_server/test/search/distance.py
|
uw-it-aca/spotseeker_server
|
1d8a5bf98b76fdcb807ed4cd32f939bb7e9aa66c
|
[
"Apache-2.0"
] | 6
|
2015-01-07T23:21:15.000Z
|
2017-12-07T08:26:33.000Z
|
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.test import TestCase
from django.conf import settings
from django.test.client import Client
from spotseeker_server.models import Spot
import simplejson as json
from decimal import *
from django.test.utils import override_settings
from mock import patch
from spotseeker_server import models
| 31.864035
| 79
| 0.501491
|
bca72e22e6e8fe1739abb6f05b2ae3bdddd3d4b0
| 1,807
|
py
|
Python
|
get_active_LSPs.py
|
JNPRAutomate/northstar_SDN_controller_automation
|
09fb5b84eaa1cf939268b542239c9923520d99d3
|
[
"MIT"
] | 3
|
2019-03-18T17:27:11.000Z
|
2020-01-22T15:39:18.000Z
|
get_active_LSPs.py
|
ksator/northstar_SDN_controller_automation
|
b78b304194bb64bc14a9c96235ae0792c974f1af
|
[
"MIT"
] | null | null | null |
get_active_LSPs.py
|
ksator/northstar_SDN_controller_automation
|
b78b304194bb64bc14a9c96235ae0792c974f1af
|
[
"MIT"
] | 2
|
2018-03-12T21:13:52.000Z
|
2020-11-20T23:16:31.000Z
|
# this python script makes a rest call to Juniper Northstar to get active LSPs
# usage: python get_active_LSPs.py
import json
import requests
from requests.auth import HTTPBasicAuth
from pprint import pprint
import yaml
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
my_variables_in_yaml=import_variables_from_file()
authuser = my_variables_in_yaml['northstar']['username']
authpwd = my_variables_in_yaml['northstar']['password']
url_base = 'http://' + my_variables_in_yaml['northstar']['ip'] + ':8091/NorthStar/API/v2/tenant/'
url = url_base + '1/topology/1/te-lsps'
headers = { 'Accept': 'application/json' }
headers = { 'Content-type': 'application/json' }
# r = requests.get(url, headers=headers, auth=(authuser, authpwd))
get_token()
headers = {'Authorization':get_token(), 'Accept' : 'application/json', 'Content-Type' : 'application/json'}
r = requests.get(url, headers=headers, verify=False)
# type(r.json())
# pprint(r.json())
# This gives the names of all the LSPs that are active
for item in r.json():
if item['operationalStatus'] == 'Active':
print "This LSP is active: " + item['name']
| 37.645833
| 116
| 0.753735
|
bca98a1ce3fff11966f586aae11d75f7d4194f73
| 859
|
py
|
Python
|
bindings/python/tests/cdef_types.py
|
mewbak/dragonffi
|
2a205dbe4dd980d5dd53026c871514795573a7fb
|
[
"Apache-2.0"
] | null | null | null |
bindings/python/tests/cdef_types.py
|
mewbak/dragonffi
|
2a205dbe4dd980d5dd53026c871514795573a7fb
|
[
"Apache-2.0"
] | null | null | null |
bindings/python/tests/cdef_types.py
|
mewbak/dragonffi
|
2a205dbe4dd980d5dd53026c871514795573a7fb
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Adrien Guinet <adrien@guinet.me>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# RUN: "%python" "%s"
#
import pydffi
import sys
F = pydffi.FFI()
CU = F.cdef('''
#include <stdint.h>
typedef int32_t MyInt;
typedef struct {
int a;
int b;
} A;
''')
assert(CU.types.MyInt == F.Int32Ty)
assert(isinstance(CU.types.A, pydffi.StructType))
| 26.030303
| 74
| 0.726426
|
bcaa2cc3091f251f513b10ec7f23dc034c71de01
| 6,031
|
py
|
Python
|
hierarchical_foresight/env/environment.py
|
deepneuralmachine/google-research
|
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
|
[
"Apache-2.0"
] | 23,901
|
2018-10-04T19:48:53.000Z
|
2022-03-31T21:27:42.000Z
|
hierarchical_foresight/env/environment.py
|
deepneuralmachine/google-research
|
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
|
[
"Apache-2.0"
] | 891
|
2018-11-10T06:16:13.000Z
|
2022-03-31T10:42:34.000Z
|
hierarchical_foresight/env/environment.py
|
deepneuralmachine/google-research
|
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
|
[
"Apache-2.0"
] | 6,047
|
2018-10-12T06:31:02.000Z
|
2022-03-31T13:59:28.000Z
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Environment wrapper around the maze navigation environment.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from . import simple_maze
import cv2
import numpy as np
| 37.228395
| 80
| 0.636213
|
bcaa324b8d6cf63921fcf9763740cc9027d44173
| 855
|
py
|
Python
|
trips/migrations/0004_invoice.py
|
chorna/taxi24
|
09e174a0cb3b9543ca4987e60cd0d37ecda6ac3c
|
[
"MIT"
] | null | null | null |
trips/migrations/0004_invoice.py
|
chorna/taxi24
|
09e174a0cb3b9543ca4987e60cd0d37ecda6ac3c
|
[
"MIT"
] | null | null | null |
trips/migrations/0004_invoice.py
|
chorna/taxi24
|
09e174a0cb3b9543ca4987e60cd0d37ecda6ac3c
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.5 on 2021-07-11 23:51
from django.db import migrations, models
import django.db.models.deletion
| 32.884615
| 130
| 0.592982
|
bcaa742ec3f2633707689915b345db35e3f84a87
| 25,875
|
py
|
Python
|
src/general_harvester.py
|
Badger-Finance/python-keepers
|
b5b2b0b083a237dceecd161d81754512959822b1
|
[
"MIT"
] | null | null | null |
src/general_harvester.py
|
Badger-Finance/python-keepers
|
b5b2b0b083a237dceecd161d81754512959822b1
|
[
"MIT"
] | 22
|
2022-03-08T19:30:45.000Z
|
2022-03-28T21:14:15.000Z
|
src/general_harvester.py
|
Badger-Finance/python-keepers
|
b5b2b0b083a237dceecd161d81754512959822b1
|
[
"MIT"
] | null | null | null |
import logging
import os
from decimal import Decimal
from time import sleep
import requests
from hexbytes import HexBytes
from web3 import Web3
from web3 import contract
from web3.contract import Contract
from config.constants import BASE_CURRENCIES
from config.constants import GAS_LIMITS
from config.constants import MULTICHAIN_CONFIG
from config.enums import Network
from src.harvester import IHarvester
from src.misc_utils import hours
from src.misc_utils import seconds_to_blocks
from src.tx_utils import get_effective_gas_price
from src.tx_utils import get_gas_price_of_tx
from src.tx_utils import get_priority_fee
from src.web3_utils import confirm_transaction
from src.utils import get_abi
from src.discord_utils import get_hash_from_failed_tx_error
from src.web3_utils import get_last_harvest_times
from src.token_utils import get_token_price
from src.discord_utils import send_error_to_discord
from src.discord_utils import send_success_to_discord
logging.basicConfig(level=logging.INFO)
MAX_TIME_BETWEEN_HARVESTS = hours(120)
HARVEST_THRESHOLD = 0.0005 # min ratio of want to total vault AUM required to harvest
NUM_FLASHBOTS_BUNDLES = 6
| 37.939883
| 100
| 0.602705
|
bcaab7186ea62c16403a777679f43c5651b2eeea
| 1,170
|
py
|
Python
|
clickhouse_plantuml/column.py
|
yonesko/clickhouse-plantuml
|
6db26788fe86854967f627f28fd8a403ccbf7ffb
|
[
"Apache-2.0"
] | null | null | null |
clickhouse_plantuml/column.py
|
yonesko/clickhouse-plantuml
|
6db26788fe86854967f627f28fd8a403ccbf7ffb
|
[
"Apache-2.0"
] | null | null | null |
clickhouse_plantuml/column.py
|
yonesko/clickhouse-plantuml
|
6db26788fe86854967f627f28fd8a403ccbf7ffb
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# License: Apache-2.0
# Copyright (C) 2020 Mikhail f. Shiryaev
| 25.434783
| 56
| 0.622222
|
bcaae8938e310a72ba14496462496246c713e82d
| 577
|
py
|
Python
|
contrib/micronet/scripts/file2buf.py
|
pmalhaire/WireHub
|
588a372e678b49557deed6ba88a896596222fb2d
|
[
"Apache-2.0"
] | 337
|
2018-12-21T22:13:57.000Z
|
2019-11-01T18:35:10.000Z
|
contrib/micronet/scripts/file2buf.py
|
nask0/WireHub
|
588a372e678b49557deed6ba88a896596222fb2d
|
[
"Apache-2.0"
] | 8
|
2018-12-24T20:16:40.000Z
|
2019-09-02T11:54:48.000Z
|
contrib/micronet/scripts/file2buf.py
|
nask0/WireHub
|
588a372e678b49557deed6ba88a896596222fb2d
|
[
"Apache-2.0"
] | 18
|
2018-12-24T02:49:38.000Z
|
2019-07-31T20:00:47.000Z
|
#!/usr/bin/env python3
import os
import sys
MAX = 8
fpath = sys.argv[1]
name = sys.argv[2]
with open(fpath, "rb") as fh:
sys.stdout.write("char %s[] = {" % (name,) )
i = 0
while True:
if i > 0:
sys.stdout.write(", ")
if i % MAX == 0:
sys.stdout.write("\n\t")
c = fh.read(1)
if not c:
sys.stdout.write("\n")
break
sys.stdout.write("0x%.2x" % (ord(c), ))
i = i + 1
print("};")
print("")
print("unsigned int %s_sz = %s;" % (name, i))
print("")
| 15.594595
| 49
| 0.443674
|
bcab57fb16b16bdb97e645f2dba9e5a2f1d7fa1f
| 10,293
|
py
|
Python
|
tests/test_observable/test_skip.py
|
christiansandberg/RxPY
|
036027d2858ea6c9d45839c863bd791e5bb50c36
|
[
"MIT"
] | null | null | null |
tests/test_observable/test_skip.py
|
christiansandberg/RxPY
|
036027d2858ea6c9d45839c863bd791e5bb50c36
|
[
"MIT"
] | null | null | null |
tests/test_observable/test_skip.py
|
christiansandberg/RxPY
|
036027d2858ea6c9d45839c863bd791e5bb50c36
|
[
"MIT"
] | null | null | null |
import unittest
from reactivex import operators as ops
from reactivex.testing import ReactiveTest, TestScheduler
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
subscribe = ReactiveTest.subscribe
subscribed = ReactiveTest.subscribed
disposed = ReactiveTest.disposed
created = ReactiveTest.created
if __name__ == "__main__":
unittest.main()
| 28.2
| 57
| 0.469445
|
bcab6a237bb88828d13a4bacbf608684ac108e0d
| 468
|
py
|
Python
|
CF#691/python/A.py
|
chaitanya1243/CP
|
a0e5e34daf6f7c22c9a91212b65338ef0c46d163
|
[
"MIT"
] | null | null | null |
CF#691/python/A.py
|
chaitanya1243/CP
|
a0e5e34daf6f7c22c9a91212b65338ef0c46d163
|
[
"MIT"
] | null | null | null |
CF#691/python/A.py
|
chaitanya1243/CP
|
a0e5e34daf6f7c22c9a91212b65338ef0c46d163
|
[
"MIT"
] | null | null | null |
if __name__ == "__main__":
T = int(input())
for t in range(T):
n = int(input())
red = input()
blue = input()
solve(n, red, blue)
| 24.631579
| 79
| 0.5
|
bcab784b7b6a5cec70ca79e8907334431af7152b
| 2,505
|
py
|
Python
|
shudder/__main__.py
|
fitpay/shudder
|
3bd3d7d712f60b7c7db1d259c024dde3eaeed26c
|
[
"Apache-2.0"
] | null | null | null |
shudder/__main__.py
|
fitpay/shudder
|
3bd3d7d712f60b7c7db1d259c024dde3eaeed26c
|
[
"Apache-2.0"
] | null | null | null |
shudder/__main__.py
|
fitpay/shudder
|
3bd3d7d712f60b7c7db1d259c024dde3eaeed26c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Scopely, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Start polling of SQS and metadata."""
import shudder.queue as queue
import shudder.metadata as metadata
from shudder.config import CONFIG
import time
import os
import requests
import signal
import subprocess
import sys
if __name__ == '__main__':
sqs_connection, sqs_queue = queue.create_queue()
sns_connection, subscription_arn = queue.subscribe_sns(sqs_queue)
uncatchable = ['SIG_DFL','SIGSTOP','SIGKILL']
for i in [x for x in dir(signal) if x.startswith("SIG")]:
if not i in uncatchable:
signum = getattr(signal,i)
signal.signal(signum, receive_signal)
while True:
message = queue.poll_queue(sqs_connection, sqs_queue)
if message or metadata.poll_instance_metadata():
queue.clean_up_sns(sns_connection, subscription_arn, sqs_queue)
if 'endpoint' in CONFIG:
requests.get(CONFIG["endpoint"])
if 'endpoints' in CONFIG:
for endpoint in CONFIG["endpoints"]:
requests.get(endpoint)
if 'commands' in CONFIG:
for command in CONFIG["commands"]:
print 'Running command: %s' % command
process = subprocess.Popen(command)
while process.poll() is None:
time.sleep(30)
"""Send a heart beat to aws"""
queue.record_lifecycle_action_heartbeat(message)
"""Send a complete lifecycle action"""
queue.complete_lifecycle_action(message)
sys.exit(0)
time.sleep(5)
| 37.38806
| 75
| 0.637126
|
bcac6e3cd42df76570409abe5d700496d0e0e054
| 1,688
|
py
|
Python
|
example/hydrogen.py
|
NLESC-JCER/pyCHAMP
|
97523237b3521a426d664b6e2972257045ff8f5e
|
[
"Apache-2.0"
] | 4
|
2019-05-15T13:09:23.000Z
|
2021-03-28T09:10:11.000Z
|
example/hydrogen.py
|
NLESC-JCER/pyCHAMP
|
97523237b3521a426d664b6e2972257045ff8f5e
|
[
"Apache-2.0"
] | 14
|
2019-04-23T15:05:07.000Z
|
2019-08-14T13:21:07.000Z
|
example/hydrogen.py
|
NLESC-JCER/pyCHAMP
|
97523237b3521a426d664b6e2972257045ff8f5e
|
[
"Apache-2.0"
] | 1
|
2019-09-30T22:55:53.000Z
|
2019-09-30T22:55:53.000Z
|
import autograd.numpy as np
from pyCHAMP.wavefunction.wf_base import WF
from pyCHAMP.optimizer.minimize import Minimize
from pyCHAMP.sampler.metropolis import Metropolis
from pyCHAMP.sampler.hamiltonian import Hamiltonian
from pyCHAMP.solver.vmc import VMC
if __name__ == "__main__":
wf = Hydrogen(nelec=1, ndim=3)
sampler = Metropolis(nwalkers=1000, nstep=1000, step_size=3,
nelec=1, ndim=3, domain={'min': -5, 'max': 5})
sampler = Hamiltonian(nwalkers=1000, nstep=1000,
step_size=3, nelec=1, ndim=3)
optimizer = Minimize(method='bfgs', maxiter=25, tol=1E-4)
# VMS solver
vmc = VMC(wf=wf, sampler=sampler, optimizer=optimizer)
# single point
opt_param = [1.]
pos, e, s = vmc.single_point(opt_param)
print('Energy : ', e)
print('Variance : ', s)
vmc.plot_density(pos)
# optimization
init_param = [0.5]
vmc.optimize(init_param)
vmc.plot_history()
| 26.375
| 71
| 0.603081
|
bcac914db6f36c54dadd0991d6bb9fbf2492dbe9
| 585
|
py
|
Python
|
braintree/account_updater_daily_report.py
|
futureironman/braintree_python
|
26bb8a857bc29322a8bca2e8e0fe6d99cfe6a1ac
|
[
"MIT"
] | 182
|
2015-01-09T05:26:46.000Z
|
2022-03-16T14:10:06.000Z
|
braintree/account_updater_daily_report.py
|
futureironman/braintree_python
|
26bb8a857bc29322a8bca2e8e0fe6d99cfe6a1ac
|
[
"MIT"
] | 95
|
2015-02-24T23:29:56.000Z
|
2022-03-13T03:27:58.000Z
|
braintree/account_updater_daily_report.py
|
futureironman/braintree_python
|
26bb8a857bc29322a8bca2e8e0fe6d99cfe6a1ac
|
[
"MIT"
] | 93
|
2015-02-19T17:59:06.000Z
|
2022-03-19T17:01:25.000Z
|
from braintree.configuration import Configuration
from braintree.resource import Resource
| 36.5625
| 75
| 0.71453
|
bcae93da2c9dcb0c8765b93504dcb020462aad8e
| 1,696
|
py
|
Python
|
game/ball.py
|
geoncic/PyBlock
|
69c8220e38a21b7e1c6dd2196752173f9e78981f
|
[
"MIT"
] | null | null | null |
game/ball.py
|
geoncic/PyBlock
|
69c8220e38a21b7e1c6dd2196752173f9e78981f
|
[
"MIT"
] | null | null | null |
game/ball.py
|
geoncic/PyBlock
|
69c8220e38a21b7e1c6dd2196752173f9e78981f
|
[
"MIT"
] | null | null | null |
import pygame
import pygame.gfxdraw
from constants import Constants
| 30.836364
| 102
| 0.625
|
bcaebabdfa8553517a45b393cf40eff654bc096f
| 36,597
|
py
|
Python
|
program/eggUI.py
|
otills/embryocv
|
d501f057bada15ff5dc753d3dae5a883b5c9e244
|
[
"MIT"
] | 1
|
2020-08-05T02:47:12.000Z
|
2020-08-05T02:47:12.000Z
|
program/eggUI.py
|
otills/embryocv
|
d501f057bada15ff5dc753d3dae5a883b5c9e244
|
[
"MIT"
] | null | null | null |
program/eggUI.py
|
otills/embryocv
|
d501f057bada15ff5dc753d3dae5a883b5c9e244
|
[
"MIT"
] | 1
|
2020-08-05T02:47:16.000Z
|
2020-08-05T02:47:16.000Z
|
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
from scipy.spatial import distance as dist
import glob
import re
import os
from PyQt5 import QtGui
from PyQt5.QtCore import *
from PyQt5.QtGui import *
import sys
import cv2
import pandas as pd
from PyQt5.Qt import *
import pyqtgraph as pg
#from PyQt4.Qt import *
#%%
#==============================================================================
#
#==============================================================================
#==============================================================================
# Close button - not implemented (hidden)
#==============================================================================
#==============================================================================
# def closeEvent(self, event):
#
# quit_msg = "Are you sure you want to exit the program?"
# reply = QtGui.QMessageBox.question(self, 'Message',
# quit_msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
#
# if reply == QtGui.QMessageBox.Yes:
# #event.accept()
# app.quit()
# else:
# event.ignore()
#
#==============================================================================
#==============================================================================
# #self.originalEggRotBBox = eggRotBBox.copy()
# #self.originalEggBoxPoints = eggBoxPoints.copy()
# #self.currROI_eggRotBBox = self.eggRotBBox[self.intDivVal,self.withinSeqVal]
# #self.currROI_eggBoxPoints = self.eggBoxPoints[self.intDivVal,self.withinSeqVal]
#
# # Modified version of updateOpenCVEggROICurrEmbryo
# # Remove previous
# self.diag.imv.removeItem(self.roi)
# # Get relevant video position and ROI.
# self.getSeqValsAndCurrROI()
# # Get rotated bounding box points
# ySorted = self.originalEggBoxPoints[np.argsort(self.originalEggBoxPoints[:, 1]), :]
# # Get bottom most, and top most sorted corner points
# bottomMost = ySorted[:2, :]
# topMost = ySorted[2:, :]
# # Get bottom most
# bottomMost = bottomMost[np.argsort(bottomMost[:, 1]), :]
# (bl, br) = bottomMost
# # Use bottom-left coordinate as anchor to calculate the Euclidean distance between the
# # The point with the largest distance will be our bottom-right point
# D = dist.cdist(bl[np.newaxis], topMost, "euclidean")[0]
# (tl, tr) = topMost[np.argsort(D)[::-1], :]
# # Make ROI - note non 0,or 90 degree angles, require different of the X size
# # Rectangular ROI used to enable more easy handling of corner handles for tracking user chagnges.
# if (self.originalEggRotBBox[4] == -90.0) | (self.originalEggRotBBox[4] == -0.0)| (self.originalEggRotBBox[4] == 0.0):
# self.roi = pg.ROI([bottomMost[0][0], bottomMost[0][1]], [self.originalEggRotBBox[2], self.originalEggRotBBox[3]])
# # roi = pg.EllipseROI([bottomMost[0][0], bottomMost[0][1]], [eggRotBBox[vidTime][2], eggRotBBox[vidTime][3]])
# else:
# # Random angle ROIs
# self.roi = pg.ROI([bottomMost[0][0], bottomMost[0][1]], [-self.originalEggRotBBox[2], self.originalEggRotBBox[3]])
# self.roi.setAngle(self.originalEggRotBBox[4], update=True)
# # roi = pg.EllipseROI([bottomMost[0][0], bottomMost[0][1]], [-eggRotBBox[vidTime][2], eggRotBBox[vidTime][3]])
# # Add handles
# self.roi.addRotateHandle([1, 0],[0.5,0.5])
# self.roi.addRotateHandle([0, 1], [0.5,0.5])
# self.roi.addScaleHandle([1, 1], [0, 0])
# self.roi.addScaleHandle([0, 0], [1, 1])
# self.roi.setPen('y',width=3)
# self.roi.removable
# self.roi.invertible = 'True'
# # Make var for dealing with modifications to roi
# self.updatedEggROI=[]
# ### Still to do...
# self.diag.imv.addItem(self.roi)
# self.roi.sigRegionChangeFinished.connect(self.updateROI)
#==============================================================================
#===============
| 54.540984
| 329
| 0.545099
|
bcaf2fd4c9457e78084e56f1b1fab3aa1985e417
| 394
|
py
|
Python
|
Curso em Vídeo/Mundo 2 Estruturas de Controle/Desafios/desafio053.py
|
henriqueumeda/-Estudo-python
|
28e93a377afa4732037a29eb74d4bc7c9e24b62f
|
[
"MIT"
] | null | null | null |
Curso em Vídeo/Mundo 2 Estruturas de Controle/Desafios/desafio053.py
|
henriqueumeda/-Estudo-python
|
28e93a377afa4732037a29eb74d4bc7c9e24b62f
|
[
"MIT"
] | null | null | null |
Curso em Vídeo/Mundo 2 Estruturas de Controle/Desafios/desafio053.py
|
henriqueumeda/-Estudo-python
|
28e93a377afa4732037a29eb74d4bc7c9e24b62f
|
[
"MIT"
] | null | null | null |
frase = input('Digite uma frase: ').upper().strip().replace(' ', '')
tamanho = int(len(frase))
inverso = ''
#Opo mais simples:
# inverso = frase[::-1]
for contador in range(tamanho-1, -1, -1):
inverso += frase[contador]
print('O inverso de {} {}'.format(frase, inverso))
if frase == inverso:
print('Temos um palndromo!')
else:
print('A frase digitada no um palndromo!')
| 24.625
| 68
| 0.639594
|
bcaf69ed2a6fded7e4b539b423940b33563b6d40
| 540
|
py
|
Python
|
tests/unittest/options/pricing/test_binomial_trees.py
|
yiluzhu/quant
|
784c5cc20eeded2ff684b464eec4744f000d9638
|
[
"MIT"
] | 1
|
2020-10-14T12:56:14.000Z
|
2020-10-14T12:56:14.000Z
|
tests/unittest/options/pricing/test_binomial_trees.py
|
yiluzhu/quant
|
784c5cc20eeded2ff684b464eec4744f000d9638
|
[
"MIT"
] | null | null | null |
tests/unittest/options/pricing/test_binomial_trees.py
|
yiluzhu/quant
|
784c5cc20eeded2ff684b464eec4744f000d9638
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from options.pricing.binomial_trees import BinomialTreePricer
from options.option import OptionType, Option
| 30
| 86
| 0.698148
|
bcaf71bd0c6cd2298c1b67ea7ef95ddacb0851be
| 16,589
|
py
|
Python
|
mctimer.py
|
Sharpieman20/MCtimer
|
5d4609f3697778de090816b8a768b82bbe217294
|
[
"Beerware"
] | null | null | null |
mctimer.py
|
Sharpieman20/MCtimer
|
5d4609f3697778de090816b8a768b82bbe217294
|
[
"Beerware"
] | null | null | null |
mctimer.py
|
Sharpieman20/MCtimer
|
5d4609f3697778de090816b8a768b82bbe217294
|
[
"Beerware"
] | null | null | null |
import atexit
import os
import sys
import platform
import json
import glob
import datetime
import time
import threading
import tkinter as tk
from pynput import mouse
from pathlib import Path
from playsound import playsound
from enum import Enum
import copy
#"THE BEER-WARE LICENSE" (Revision 42):
#bleach86 wrote this file. As long as you retain this notice you can do whatever you want with this stuff.
#If we meet some day, and you think this stuff is worth it, you can buy me a beer in return
input_fil = Path("/Users/sharpieman20/MCtimer/MCtimer") / "input.txt"
# continuously read from input file every 10ms
# when you get a "reset timer" message, reset the timer
#
# class Category:
# def __init__():
# self.actions = []
# self.attempts = []
# # convert actions to attempts
# def read():
# def write():
# class Actions(Enum):
# CREATE_WORLD = 0
# START = 1
# class Attempt:
stage = 0
ind = 0
time_count = 0
rsg = [
("World Created", True),
([
"Savannah",
"Desert",
"Plains",
"Other"
], False),
([
"0-15",
"15-30",
"30-45",
"45-60",
"60-75",
"75+"
], False),
([
"Iron",
"Logs",
"Feathers",
"Wool",
"Gravel"
], True),
("Enter Nether", True),
("Find Fortress", True),
("Find Spawner", True),
("Exit Spawner", True),
("Exit Nether", True),
("Tower Build Start", True),
("Tower Build Finished", True),
("Tower Leave", True),
("Enter Stronghold", True),
("Enter End", True),
("Finish", True)
]
cur_stages = {}
json_file = 'mct_config.json'
with open(json_file) as json_file:
data2 = json.load(json_file)
if data2['borderless'] == 'true':
data2['borderless']
else:
data2['borderless'] = False
running_path = Path.cwd()
NUM_CHARS = 11
system_type = platform.system()
if system_type == 'Linux':
directory = os.path.expanduser(data2['linux_saves'])
elif system_type == 'Darwin':
directory = os.path.expanduser(data2['mac_saves'])
elif system_type == 'Windows':
directory = os.path.expanduser(data2['windows_saves'])
amount2 = 0
last_amount = 0
window = tk.Tk()
# bg = BindGlobal(widget=window)
window.text = tk.StringVar()
window.text2 = tk.StringVar()
window.text3 = tk.StringVar()
window.text4 = tk.StringVar()
window.geometry("{}x{}".format(data2["width"], data2["height"]))
window.configure(bg='black')
rt = time.time()
old_version = False
did_change = False
count = 0
ig = 0
base = 0
program_time = 0
metronome_armed = False
metronome_running = False
metronome_active = False
metronome_beats = int(data2['metronome_beats'])
listener = None
metronome_time = 0
base_update = int(data2['base_update'])
rta_update = int(data2['rta_update']) * base_update
metronome_bpm = int(data2['metronome_bpm'])
metronome_interval = 0
if data2['auto_start'] == 'true':
click1 = 1
click2 = 1
else:
click1 = 0
click2 = 0
cur_fil = None
world_base_time = 0
# def update_split()
''' METRONOME CODE '''
''' Metronome mouse listener '''
atexit.register(exit_handler)
''' Sound playing code '''
''' Metronome functions '''
main()
| 24.833832
| 119
| 0.578335
|
bcb0236709da62fc588329e551c92b5fc621ffd9
| 2,927
|
py
|
Python
|
kafka/structs.py
|
informatique-cdc/kafka-python
|
d73bd6fc2f8825e2fddb7c4f091af7b266e37aea
|
[
"Apache-2.0"
] | 4,389
|
2015-06-12T06:00:10.000Z
|
2022-03-31T20:41:42.000Z
|
kafka/structs.py
|
informatique-cdc/kafka-python
|
d73bd6fc2f8825e2fddb7c4f091af7b266e37aea
|
[
"Apache-2.0"
] | 1,595
|
2015-12-02T20:58:22.000Z
|
2022-03-27T07:28:03.000Z
|
kafka/structs.py
|
informatique-cdc/kafka-python
|
d73bd6fc2f8825e2fddb7c4f091af7b266e37aea
|
[
"Apache-2.0"
] | 1,115
|
2015-12-02T23:17:52.000Z
|
2022-03-30T03:34:29.000Z
|
""" Other useful structs """
from __future__ import absolute_import
from collections import namedtuple
"""A topic and partition tuple
Keyword Arguments:
topic (str): A topic name
partition (int): A partition id
"""
TopicPartition = namedtuple("TopicPartition",
["topic", "partition"])
"""A Kafka broker metadata used by admin tools.
Keyword Arguments:
nodeID (int): The Kafka broker id.
host (str): The Kafka broker hostname.
port (int): The Kafka broker port.
rack (str): The rack of the broker, which is used to in rack aware
partition assignment for fault tolerance.
Examples: `RACK1`, `us-east-1d`. Default: None
"""
BrokerMetadata = namedtuple("BrokerMetadata",
["nodeId", "host", "port", "rack"])
"""A topic partition metadata describing the state in the MetadataResponse.
Keyword Arguments:
topic (str): The topic name of the partition this metadata relates to.
partition (int): The id of the partition this metadata relates to.
leader (int): The id of the broker that is the leader for the partition.
replicas (List[int]): The ids of all brokers that contain replicas of the
partition.
isr (List[int]): The ids of all brokers that contain in-sync replicas of
the partition.
error (KafkaError): A KafkaError object associated with the request for
this partition metadata.
"""
PartitionMetadata = namedtuple("PartitionMetadata",
["topic", "partition", "leader", "replicas", "isr", "error"])
"""The Kafka offset commit API
The Kafka offset commit API allows users to provide additional metadata
(in the form of a string) when an offset is committed. This can be useful
(for example) to store information about which node made the commit,
what time the commit was made, etc.
Keyword Arguments:
offset (int): The offset to be committed
metadata (str): Non-null metadata
"""
OffsetAndMetadata = namedtuple("OffsetAndMetadata",
# TODO add leaderEpoch: OffsetAndMetadata(offset, leaderEpoch, metadata)
["offset", "metadata"])
"""An offset and timestamp tuple
Keyword Arguments:
offset (int): An offset
timestamp (int): The timestamp associated to the offset
"""
OffsetAndTimestamp = namedtuple("OffsetAndTimestamp",
["offset", "timestamp"])
MemberInformation = namedtuple("MemberInformation",
["member_id", "client_id", "client_host", "member_metadata", "member_assignment"])
GroupInformation = namedtuple("GroupInformation",
["error_code", "group", "state", "protocol_type", "protocol", "members", "authorized_operations"])
"""Define retry policy for async producer
Keyword Arguments:
Limit (int): Number of retries. limit >= 0, 0 means no retries
backoff_ms (int): Milliseconds to backoff.
retry_on_timeouts:
"""
RetryOptions = namedtuple("RetryOptions",
["limit", "backoff_ms", "retry_on_timeouts"])
| 33.261364
| 102
| 0.702767
|
bcb1c97f3222308944fcb2351152a564408ff396
| 7,357
|
py
|
Python
|
Vehicle_Counting_colab.py
|
manolosolalinde/Vehicle-Counting
|
898e1993613ea5a6803078cc5026d2d690c12322
|
[
"MIT"
] | null | null | null |
Vehicle_Counting_colab.py
|
manolosolalinde/Vehicle-Counting
|
898e1993613ea5a6803078cc5026d2d690c12322
|
[
"MIT"
] | null | null | null |
Vehicle_Counting_colab.py
|
manolosolalinde/Vehicle-Counting
|
898e1993613ea5a6803078cc5026d2d690c12322
|
[
"MIT"
] | null | null | null |
import cv2
from trackers.tracker import create_blob, add_new_blobs, remove_duplicates
import numpy as np
from collections import OrderedDict
from detectors.detector import get_bounding_boxes
import uuid
import os
import contextlib
from datetime import datetime
import argparse
from utils.detection_roi import get_roi_frame, draw_roi
from counter import get_counting_line, is_passed_counting_line
# parse CLI arguments
parser = argparse.ArgumentParser()
parser.add_argument('video', help='relative/absolute path to video or camera input of traffic scene')
parser.add_argument('--iscam', action='store_true', help='specify if video capture is from a camera')
parser.add_argument('--droi', help='specify a detection region of interest (ROI) \
i.e a set of vertices that represent the area (polygon) \
where you want detections to be made (format: 1,2|3,4|5,6|7,8|9,10 \
default: 0,0|frame_width,0|frame_width,frame_height|0,frame_height \
[i.e the whole video frame])')
parser.add_argument('--showdroi', action='store_true', help='display/overlay the detection roi on the video')
parser.add_argument('--mctf', type=int, help='maximum consecutive tracking failures \
i.e number of tracking failures before the tracker concludes \
the tracked object has left the frame')
parser.add_argument('--di', type=int, help='detection interval i.e number of frames \
before detection is carried out again (in order to find new vehicles \
and update the trackers of old ones)')
parser.add_argument('--detector', help='select a model/algorithm to use for vehicle detection \
(options: yolo, haarc, bgsub, ssd | default: yolo)')
parser.add_argument('--tracker', help='select a model/algorithm to use for vehicle tracking \
(options: csrt, kcf, camshift | default: kcf)')
parser.add_argument('--record', action='store_true', help='record video and vehicle count logs')
parser.add_argument('--clposition', help='position of counting line (options: top, bottom, \
left, right | default: bottom)')
parser.add_argument('--hideimage', action='store_true', help='hide resulting image')
args = parser.parse_args()
# capture traffic scene video
video = int(args.video) if args.iscam else args.video
cap = cv2.VideoCapture(video)
_, frame = cap.read()
# configs
blobs = OrderedDict()
blob_id = 1
frame_counter = 0
DETECTION_INTERVAL = 10 if args.di == None else args.di
MAX_CONSECUTIVE_TRACKING_FAILURES = 3 if args.mctf == None else args.mctf
detector = 'yolo' if args.detector == None else args.detector
tracker = 'kcf' if args.tracker == None else args.tracker
f_height, f_width, _ = frame.shape
# init video object and log file to record counting
if args.record:
output_video = cv2.VideoWriter('./videos/output.avi', cv2.VideoWriter_fourcc('M','J','P','G'), 30, (f_width, f_height))
log_file_name = 'log.txt'
with contextlib.suppress(FileNotFoundError):
os.remove(log_file_name)
log_file = open(log_file_name, 'a')
log_file.write('vehicle_id, count, datetime\n')
log_file.flush()
# set counting line
clposition = 'bottom' if args.clposition == None else args.clposition
counting_line = get_counting_line(clposition, f_width, f_height)
vehicle_count = 0
# create detection ROI
droi = [(0, 0), (f_width, 0), (f_width, f_height), (0, f_height)]
if args.droi:
droi = []
points = args.droi.replace(' ', '').split('|')
for point_str in points:
point = tuple(map(int, point_str.split(',')))
droi.append(point)
# initialize trackers and create new blobs
droi_frame = get_roi_frame(frame, droi)
initial_bboxes = get_bounding_boxes(droi_frame, detector)
for box in initial_bboxes:
_blob = create_blob(box, frame, tracker)
blobs[blob_id] = _blob
blob_id += 1
while True:
k = cv2.waitKey(1)
if args.iscam or cap.get(cv2.CAP_PROP_POS_FRAMES) + 1 < cap.get(cv2.CAP_PROP_FRAME_COUNT):
_, frame = cap.read()
nframes = cap.get(cv2.CAP_PROP_POS_FRAMES)
frame_count = cap.get(cv2.CAP_PROP_FRAME_COUNT)
if nframes % 10 == 0 or nframes == 1:
print("Processing {} of {} frames".format(nframes,frame_count))
for _id, blob in list(blobs.items()):
# update trackers
success, box = blob.tracker.update(frame)
if success:
blob.num_consecutive_tracking_failures = 0
blob.update(box)
else:
blob.num_consecutive_tracking_failures += 1
# delete untracked blobs
if blob.num_consecutive_tracking_failures >= MAX_CONSECUTIVE_TRACKING_FAILURES:
del blobs[_id]
# count vehicles
if is_passed_counting_line(blob.centroid, counting_line, clposition) and not blob.counted:
blob.counted = True
vehicle_count += 1
# log count data to a file (vehicle_id, count, datetime)
if args.record:
_row = '{0}, {1}, {2}\n'.format('v_' + str(_id), vehicle_count, datetime.now())
log_file.write(_row)
log_file.flush()
if frame_counter >= DETECTION_INTERVAL:
# rerun detection
droi_frame = get_roi_frame(frame, droi)
boxes = get_bounding_boxes(droi_frame, detector)
blobs, current_blob_id = add_new_blobs(boxes, blobs, frame, tracker, blob_id, counting_line, clposition)
blob_id = current_blob_id
blobs = remove_duplicates(blobs)
frame_counter = 0
# draw and label blob bounding boxes
for _id, blob in blobs.items():
(x, y, w, h) = [int(v) for v in blob.bounding_box]
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(frame, 'v_' + str(_id), (x, y - 2), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 255, 0), 2, cv2.LINE_AA)
# draw counting line
cv2.line(frame, counting_line[0], counting_line[1], (0, 255, 0), 3)
# display vehicle count
cv2.putText(frame, 'Count: ' + str(vehicle_count), (20, 60), cv2.FONT_HERSHEY_DUPLEX, 2, (255, 0, 0), 2, cv2.LINE_AA)
# show detection roi
if args.showdroi:
frame = draw_roi(frame, droi)
# save frame in video output
if args.record:
output_video.write(frame)
# visualize vehicle counting
if not args.hideimage:
resized_frame = cv2.resize(frame, (858, 480))
cv2.imshow('tracking', resized_frame)
frame_counter += 1
# save frame if 's' key is pressed
if k & 0xFF == ord('s'):
cv2.imwrite(os.path.join('screenshots', 'ss_' + uuid.uuid4().hex + '.png'), frame)
print('Screenshot taken.')
else:
print('End of video.')
# end video loop if on the last frame
break
# end video loop if 'q' key is pressed
if k & 0xFF == ord('q'):
print('Video exited.')
break
# end capture, close window, close log file and video objects if any
cap.release()
if not args.hideimage:
cv2.destroyAllWindows()
if args.record:
log_file.close()
output_video.release()
| 40.646409
| 125
| 0.644964
|
bcb21686e2484863628d877e956c259a49e6e1be
| 2,542
|
py
|
Python
|
app/resources/magic_castle_api.py
|
ComputeCanada/mc-hub
|
92b4c212ba8f7b5b1c8b8700f981275605a07067
|
[
"BSD-3-Clause"
] | 5
|
2020-09-04T16:34:36.000Z
|
2020-09-25T19:14:59.000Z
|
app/resources/magic_castle_api.py
|
ComputeCanada/mc-hub
|
92b4c212ba8f7b5b1c8b8700f981275605a07067
|
[
"BSD-3-Clause"
] | 39
|
2020-09-12T17:37:14.000Z
|
2022-03-10T17:49:57.000Z
|
app/resources/magic_castle_api.py
|
ComputeCanada/mc-hub
|
92b4c212ba8f7b5b1c8b8700f981275605a07067
|
[
"BSD-3-Clause"
] | 1
|
2021-03-29T15:42:13.000Z
|
2021-03-29T15:42:13.000Z
|
from flask import request
from resources.api_view import ApiView
from exceptions.invalid_usage_exception import InvalidUsageException
from models.user.user import User
from models.user.authenticated_user import AuthenticatedUser
| 39.71875
| 77
| 0.5893
|
bcb26248e648d4d9b19d0a9ff813f2b53c5baabf
| 2,615
|
py
|
Python
|
tests/rbac/api/role/propose_member_test.py
|
kthblmfld/sawtooth-next-directory
|
57291f1a7e6ce1dfc11a9c5e2930e8c5ebd31707
|
[
"Apache-2.0"
] | null | null | null |
tests/rbac/api/role/propose_member_test.py
|
kthblmfld/sawtooth-next-directory
|
57291f1a7e6ce1dfc11a9c5e2930e8c5ebd31707
|
[
"Apache-2.0"
] | null | null | null |
tests/rbac/api/role/propose_member_test.py
|
kthblmfld/sawtooth-next-directory
|
57291f1a7e6ce1dfc11a9c5e2930e8c5ebd31707
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Contributors to Hyperledger Sawtooth
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
""" Propose Role Add Member Test """
# pylint: disable=invalid-name
import time
import requests
import pytest
from rbac.common.logs import get_logger
from tests.rbac import helper
from tests.rbac.api.assertions import assert_api_error
from tests.rbac.api.assertions import assert_api_success
from tests.rbac.api.assertions import assert_api_post_requires_auth
LOGGER = get_logger(__name__)
| 36.830986
| 79
| 0.702868
|
bcb2a48f3534ac05974fe4c223430ebf965fdf0b
| 881
|
py
|
Python
|
f2v.py
|
ClimberY/video_super_resolution_toolbox
|
e03fd34f60bf1104bd78ac0738a2648cee2eae46
|
[
"MIT"
] | null | null | null |
f2v.py
|
ClimberY/video_super_resolution_toolbox
|
e03fd34f60bf1104bd78ac0738a2648cee2eae46
|
[
"MIT"
] | null | null | null |
f2v.py
|
ClimberY/video_super_resolution_toolbox
|
e03fd34f60bf1104bd78ac0738a2648cee2eae46
|
[
"MIT"
] | null | null | null |
import cv2
import os
import numpy as np
from PIL import Image
if __name__ == '__main__':
im_dir = '/media/hy/Seagate Expansion Drive/Results/merge_dir/' #
video_dir = '/media/hy/Seagate Expansion Drive/Results/sandy.mp4' #
fps = 15 #
frame2video(im_dir, video_dir, fps)
| 33.884615
| 82
| 0.682179
|
bcb332026597a8538e3390f5b54de4be3aa00f42
| 11,103
|
py
|
Python
|
mflops/model_info.py
|
shuncyu/mflops
|
81fddf9407bcbdca02b9c57f6b03640b3fb94101
|
[
"MIT"
] | 1
|
2020-12-17T03:09:20.000Z
|
2020-12-17T03:09:20.000Z
|
mflops/model_info.py
|
shuncyu/mflops
|
81fddf9407bcbdca02b9c57f6b03640b3fb94101
|
[
"MIT"
] | null | null | null |
mflops/model_info.py
|
shuncyu/mflops
|
81fddf9407bcbdca02b9c57f6b03640b3fb94101
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 14 17:38:48 2020
@author: luke
"""
import sys
from functools import partial
import torch
import torch.nn as nn
import prettytable as pt
from .basic_hook import MODULES_MAPPING
def compute_average_compute_cost(self):
"""
A method that will be available after add_computing_methods() is called
on a desired net object.
Returns current mean flops/mac consumption per image.
"""
batches_count = self.__batch_counter__
flops_sum = 0
mac_sum = 0
params_sum = 0
for module in self.modules():
if is_supported_instance(module):
flops_sum += module.__flops__
mac_sum += module.__mac__
params_sum = get_model_parameters_number(self)
return flops_sum / batches_count, mac_sum / batches_count, params_sum
def start_compute(self, **kwargs):
"""
A method that will be available after add_computing_methods() is called
on a desired net object.
Activates the computation of mean flops/mac consumption per image.
Call it before you run the network.
"""
add_batch_counter_hook_function(self)
seen_types = set()
self.apply(partial(add_compute_hook_function, **kwargs))
def stop_compute(self):
"""
A method that will be available after add_computing_methods() is called
on a desired net object.
Stops computing the mean flops consumption per image.
Call whenever you want to pause the computation.
"""
remove_batch_counter_hook_function(self)
self.apply(remove_compute_hook_function)
def reset_compute(self):
"""
A method that will be available after add_computing_methods() is called
on a desired net object.
Resets statistics computed so far.
"""
add_batch_counter_variables_or_reset(self)
self.apply(add_compute_variable_or_reset)
| 35.359873
| 107
| 0.631721
|
bcb33dd64b91d776f626dc908c114e472e82874d
| 2,301
|
py
|
Python
|
dosagelib/plugins/derideal.py
|
Church-/dosage
|
7ef18a2a2e9f77aa1e64a44906731506a00fac45
|
[
"MIT"
] | 1
|
2020-06-18T17:51:13.000Z
|
2020-06-18T17:51:13.000Z
|
dosagelib/plugins/derideal.py
|
Church-/dosage
|
7ef18a2a2e9f77aa1e64a44906731506a00fac45
|
[
"MIT"
] | null | null | null |
dosagelib/plugins/derideal.py
|
Church-/dosage
|
7ef18a2a2e9f77aa1e64a44906731506a00fac45
|
[
"MIT"
] | null | null | null |
# SPDX-License-Identifier: MIT
# Copyright (C) 2019-2020 Tobias Gruetzmacher
# Copyright (C) 2019-2020 Daniel Ring
from ..scraper import _ParserScraper
from ..helpers import indirectStarter
| 40.368421
| 94
| 0.614081
|
bcb3b617387a63312fcb662d0698c65cf437acee
| 3,340
|
py
|
Python
|
LearnFunction/learnfunction01.py
|
subash-kc/2022-01-04-Python
|
5ce51e4265bcd860a4e62423edef6ec9cd1437b4
|
[
"MIT"
] | 1
|
2022-01-14T18:03:42.000Z
|
2022-01-14T18:03:42.000Z
|
LearnFunction/learnfunction01.py
|
subash-kc/2022-01-04-Python
|
5ce51e4265bcd860a4e62423edef6ec9cd1437b4
|
[
"MIT"
] | null | null | null |
LearnFunction/learnfunction01.py
|
subash-kc/2022-01-04-Python
|
5ce51e4265bcd860a4e62423edef6ec9cd1437b4
|
[
"MIT"
] | null | null | null |
"""
Function are subprograms which are used to compute a value or perform a task.
Type of Functions:-
Built in Functions:
print(), upper()
User define functions
Advantage of Functions
1. Write once and use it as many time as you need. This provides code reusability
2. Function facilitates ease of code maintenance
3. Divide Large task into many small task so it will help you to debug code
4. You can remove or add new feature to a function anytime.
"""
"""
We can define a function using def keyword followed by function name with parentheses. This is also called
as Creating a function, Writing a Function, Defining a FUnction.
Syntax:-
def function_name():
Local Variable
block of statement
return(variable or expression)
def function_name(param1, param2, param3, .....)
Local Variable
Block of statement
return (variable or expression)
Note - Nooed to mainitain a proper indentation
"""
# creating a list
if __name__ == '__main__':
add()
print()
# another method
if __name__ == '__main__':
sum_list()
print()
if __name__ == '__main__':
multiplylist()
# Method 2: Unsing numpy.prid() ^ Install numpy package
import numpy
product_total()
print()
findingminmax(22, 26, 30)
print()
print("Another Method to find maximum")
x = int(input("Enter your first Number: "))
y = int(input("Enter your second Number: "))
z = int(input("Enter your third Number: "))
print("Maximum number is ::>", findingmaximum(x, y, z))
"""Python program to print the even numbers from a given list"""
find_even()
print()
"""
Pythhon program to find prime numbers in given list
Function should return true if the number is prime; else false
"""
number =int(input("Enter the number you will like to check whether the number is prime or not: \n"))
if isPrime(number):
print(number, "is a Prime Number")
else:
print(number, "is not a Prime number")
"""
Another Method to find prime number
"""
| 18.870056
| 106
| 0.645808
|
bcb3deb24bc63c8049391df8c67ec2a72c8f437a
| 945
|
py
|
Python
|
trackr/cli.py
|
rpedigoni/trackr
|
ab5cf0cc661d003c6bd2ffa5516babf2e931de78
|
[
"MIT"
] | 9
|
2017-04-23T23:54:56.000Z
|
2021-12-26T02:21:28.000Z
|
trackr/cli.py
|
rpedigoni/trackr
|
ab5cf0cc661d003c6bd2ffa5516babf2e931de78
|
[
"MIT"
] | null | null | null |
trackr/cli.py
|
rpedigoni/trackr
|
ab5cf0cc661d003c6bd2ffa5516babf2e931de78
|
[
"MIT"
] | 3
|
2017-04-23T23:55:13.000Z
|
2017-05-03T01:20:23.000Z
|
# coding: utf-8
import click
if __name__ == "__main__":
main()
| 25.540541
| 83
| 0.582011
|
bcb3f4ba8d64955ba6c3c16193d7d7869a8725dd
| 3,043
|
py
|
Python
|
pitop/common/notifications.py
|
pi-top/pi-top-Python-SDK
|
6c83cc5f612d77f86f8d391c7f2924a28f7b1232
|
[
"Apache-2.0"
] | 28
|
2020-11-24T08:02:58.000Z
|
2022-02-27T18:37:33.000Z
|
pitop/common/notifications.py
|
pi-top/pi-top-Python-SDK
|
6c83cc5f612d77f86f8d391c7f2924a28f7b1232
|
[
"Apache-2.0"
] | 263
|
2020-11-10T14:35:10.000Z
|
2022-03-31T12:35:13.000Z
|
pitop/common/notifications.py
|
pi-top/pi-top-Python-SDK
|
6c83cc5f612d77f86f8d391c7f2924a28f7b1232
|
[
"Apache-2.0"
] | 1
|
2022-01-31T22:48:35.000Z
|
2022-01-31T22:48:35.000Z
|
from enum import Enum, auto
from subprocess import CalledProcessError, run
from pitop.common.command_runner import run_command
from pitop.common.logger import PTLogger
def send_notification(
title: str,
text: str,
icon_name: str = "",
timeout: int = 0,
app_name: str = "",
notification_id: int = -1,
actions_manager: NotificationActionManager = None,
urgency_level: NotificationUrgencyLevel = None,
capture_notification_id: bool = True,
) -> str:
# Check that `notify-send-ng` is available, as it's not a hard dependency of the package
try:
run(["dpkg-query", "-l", "notify-send-ng"], capture_output=True, check=True)
except CalledProcessError:
raise Exception("notify-send-ng not installed")
cmd = "/usr/bin/notify-send "
cmd += "--print-id "
cmd += "--expire-time=" + str(timeout) + " "
if icon_name:
cmd += "--icon=" + icon_name + " "
if notification_id >= 0:
cmd += "--replace=" + str(notification_id) + " "
if actions_manager is not None:
for action in actions_manager.actions:
cmd += (
'--action="'
+ action.call_to_action_text
+ ":"
+ action.command_str
+ '" '
)
if actions_manager.default_action is not None:
cmd += (
"--default-action=" + actions_manager.default_action.command_str + " "
)
if actions_manager.close_action is not None:
cmd += "--close-action=" + actions_manager.close_action.command_str + " "
if app_name:
cmd += "--app-name=" + app_name + " "
if urgency_level is not None:
cmd += "--urgency=" + urgency_level.name + " "
cmd += ' "' + title + '" '
cmd += '"' + text + '"'
PTLogger.info("notify-send command: {}".format(cmd))
try:
resp_stdout = run_command(cmd, 2000, capture_output=capture_notification_id)
except Exception as e:
PTLogger.warning("Failed to show message: {}".format(e))
raise
return resp_stdout
| 29.833333
| 92
| 0.621755
|
bcb4c4328d404e8eec9df91c64d171e98d7a2415
| 5,778
|
py
|
Python
|
src/Gismo_XY To Location.py
|
AntonelloDN/gismo
|
3ffbabaf8405efd3572701c9e0b7497211dfc248
|
[
"Apache-2.0"
] | 57
|
2017-01-31T11:55:22.000Z
|
2022-03-26T16:00:40.000Z
|
src/Gismo_XY To Location.py
|
AntonelloDN/gismo
|
3ffbabaf8405efd3572701c9e0b7497211dfc248
|
[
"Apache-2.0"
] | 11
|
2017-02-22T16:45:11.000Z
|
2020-05-06T17:00:07.000Z
|
src/Gismo_XY To Location.py
|
AntonelloDN/gismo
|
3ffbabaf8405efd3572701c9e0b7497211dfc248
|
[
"Apache-2.0"
] | 19
|
2017-01-29T18:02:58.000Z
|
2021-08-25T10:56:57.000Z
|
# xy to location
#
# Gismo is a plugin for GIS environmental analysis (GPL) started by Djordje Spasic.
#
# This file is part of Gismo.
#
# Copyright (c) 2019, Djordje Spasic <djordjedspasic@gmail.com>
# Gismo is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
#
# Gismo is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with this program. If not, see http://www.gnu.org/licenses/.
#
# The GPL-3.0+ license <http://spdx.org/licenses/GPL-3.0+>
"""
Use this component to calculate latitude and longitude coordinates of the _point in Rhino scene.
For example: you created some building shapes with Gismo "OSM Shapes" component, and now you would like to check what are the latitude and longtitude coordinates of particular part of the building.
-
Provided by Gismo 0.0.3
input:
_point: A point for which we would like to calculate its latitude and longitude coordinates
anchorLocation_: Represents latitude,longitude coordinates which correspond to anchorOrigin_ in Rhino scene.
-
If nothing added to this input, anchorLocation_ with both latitude and longitude set to "0" will be used as a default.
anchorOrigin_: A point in Rhino scene which corresponds to anchorLocation_.
-
If nothing added to this input, anchorOrigin will be set to: 0,0,0.
output:
readMe!: ...
location: Location (latitude, longitude coordinates) of the _point input.
"""
ghenv.Component.Name = "Gismo_XY To Location"
ghenv.Component.NickName = "XYtoLocation"
ghenv.Component.Message = "VER 0.0.3\nJAN_29_2019"
ghenv.Component.IconDisplayMode = ghenv.Component.IconDisplayMode.application
ghenv.Component.Category = "Gismo"
ghenv.Component.SubCategory = "1 | Gismo"
#compatibleGismoVersion = VER 0.0.3\nJAN_29_2019
try: ghenv.Component.AdditionalHelpFromDocStrings = "2"
except: pass
import scriptcontext as sc
import Grasshopper
import Rhino
level = Grasshopper.Kernel.GH_RuntimeMessageLevel.Warning
if sc.sticky.has_key("gismoGismo_released"):
validVersionDate, printMsg = sc.sticky["gismo_check"].versionDate(ghenv.Component)
if validVersionDate:
gismo_preparation = sc.sticky["gismo_Preparation"]()
gismo_gis = sc.sticky["gismo_GIS"]()
location, validInputData, printMsg = main(_point, anchorLocation_, anchorOrigin_)
if not validInputData:
print printMsg
ghenv.Component.AddRuntimeMessage(level, printMsg)
else:
print printMsg
ghenv.Component.AddRuntimeMessage(level, printMsg)
else:
printMsg = "First please run the Gismo Gismo component."
print printMsg
ghenv.Component.AddRuntimeMessage(level, printMsg)
| 47.360656
| 246
| 0.72776
|
bcb5f8a3494a7c1dd73bdaa2595e97b680531db5
| 256
|
py
|
Python
|
Notebooks/SentinelUtilities/SentinelAnomalyLookup/__init__.py
|
ytognder/Azure-Sentinel
|
7345560f178e731d7ba5a5541fd3383bca285311
|
[
"MIT"
] | 266
|
2019-10-18T00:41:39.000Z
|
2022-03-18T05:44:01.000Z
|
Notebooks/SentinelUtilities/SentinelAnomalyLookup/__init__.py
|
ytognder/Azure-Sentinel
|
7345560f178e731d7ba5a5541fd3383bca285311
|
[
"MIT"
] | 113
|
2020-03-10T16:56:10.000Z
|
2022-03-28T21:54:26.000Z
|
Notebooks/SentinelUtilities/SentinelAnomalyLookup/__init__.py
|
ytognder/Azure-Sentinel
|
7345560f178e731d7ba5a5541fd3383bca285311
|
[
"MIT"
] | 93
|
2020-01-07T20:28:43.000Z
|
2022-03-23T04:09:39.000Z
|
# pylint: disable-msg=C0103
"""
SentinelAnomalyLookup: This package is developed for Azure Sentinel Anomaly lookup
"""
# __init__.py
from .anomaly_lookup_view_helper import AnomalyLookupViewHelper
from .anomaly_finder import AnomalyQueries, AnomalyFinder
| 28.444444
| 82
| 0.832031
|
bcb80d7d2c6e6e1e230619095dac5498b39b51c1
| 3,989
|
py
|
Python
|
items/coins.py
|
leerichoang/Legend-Of-Peach
|
bef98ba7afdbddc497c45f8adedfb60e81176bfb
|
[
"MIT"
] | null | null | null |
items/coins.py
|
leerichoang/Legend-Of-Peach
|
bef98ba7afdbddc497c45f8adedfb60e81176bfb
|
[
"MIT"
] | null | null | null |
items/coins.py
|
leerichoang/Legend-Of-Peach
|
bef98ba7afdbddc497c45f8adedfb60e81176bfb
|
[
"MIT"
] | 2
|
2019-10-15T23:22:16.000Z
|
2019-10-29T04:38:02.000Z
|
import pygame
from pygame.sprite import Sprite
| 34.387931
| 93
| 0.560792
|
bcb9144fdddbbf32bc78ac12f77acb144b544d93
| 142
|
py
|
Python
|
python/package/geo/test/__init__.py
|
fiomenankiti/playground
|
7c3139ffe5db4b18cf042b8027c9f670860371e0
|
[
"MIT"
] | null | null | null |
python/package/geo/test/__init__.py
|
fiomenankiti/playground
|
7c3139ffe5db4b18cf042b8027c9f670860371e0
|
[
"MIT"
] | null | null | null |
python/package/geo/test/__init__.py
|
fiomenankiti/playground
|
7c3139ffe5db4b18cf042b8027c9f670860371e0
|
[
"MIT"
] | null | null | null |
from geo.calc import Calc
from geo.calc import Distance
from geo.geosp import Wt
from geo.geosp import Gh
from geo.files.csv_file import check
| 28.4
| 36
| 0.823944
|
bcbacb893f1fc24efc7e31b69bae2dad2d6081f7
| 293
|
py
|
Python
|
tests/test_clean.py
|
tcapelle/nb_helpers
|
432b1f014f1b780b5a4d3722d44f237387db2330
|
[
"MIT"
] | 7
|
2022-01-13T09:54:39.000Z
|
2022-02-08T23:34:47.000Z
|
tests/test_clean.py
|
tcapelle/nb_helpers
|
432b1f014f1b780b5a4d3722d44f237387db2330
|
[
"MIT"
] | 62
|
2021-12-14T10:24:13.000Z
|
2022-02-09T00:00:12.000Z
|
tests/test_clean.py
|
tcapelle/nb_helpers
|
432b1f014f1b780b5a4d3722d44f237387db2330
|
[
"MIT"
] | 2
|
2022-01-20T10:41:51.000Z
|
2022-02-04T11:26:41.000Z
|
from pathlib import Path
from nb_helpers.clean import clean_all, clean_one
from tests import TEST_PATH
TEST_PATH
TEST_NB = Path("test_nb.py")
def test_clean_one():
"clean just one nb"
clean_one(TEST_NB)
def test_clean_all():
"clean all test nbs"
clean_all(path=TEST_PATH)
| 17.235294
| 49
| 0.744027
|
bcbc7df90a025f59202f5950277107bf1a366441
| 5,746
|
py
|
Python
|
apps/technical_analysis.py
|
KiloSat/FirstNivesh
|
0fe200e08bb9f7d89de91f59eb14448fa7b972b9
|
[
"MIT"
] | null | null | null |
apps/technical_analysis.py
|
KiloSat/FirstNivesh
|
0fe200e08bb9f7d89de91f59eb14448fa7b972b9
|
[
"MIT"
] | null | null | null |
apps/technical_analysis.py
|
KiloSat/FirstNivesh
|
0fe200e08bb9f7d89de91f59eb14448fa7b972b9
|
[
"MIT"
] | 2
|
2021-04-03T16:39:23.000Z
|
2021-08-15T08:09:21.000Z
|
import streamlit as st
| 34.202381
| 197
| 0.603376
|
bcbd7d0edc16eccd95b307b889e7f1a174b4d31c
| 4,642
|
py
|
Python
|
tests/sentry/mediators/sentry_apps/test_creator.py
|
pombredanne/django-sentry
|
4ad09417fb3cfa3aa4a0d4175ae49fe02837c567
|
[
"BSD-3-Clause"
] | null | null | null |
tests/sentry/mediators/sentry_apps/test_creator.py
|
pombredanne/django-sentry
|
4ad09417fb3cfa3aa4a0d4175ae49fe02837c567
|
[
"BSD-3-Clause"
] | null | null | null |
tests/sentry/mediators/sentry_apps/test_creator.py
|
pombredanne/django-sentry
|
4ad09417fb3cfa3aa4a0d4175ae49fe02837c567
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
from mock import patch
from django.db import IntegrityError
from sentry.mediators.sentry_apps import Creator
from sentry.models import (
AuditLogEntry,
AuditLogEntryEvent,
ApiApplication,
IntegrationFeature,
SentryApp,
SentryAppComponent,
User,
)
from sentry.testutils import TestCase
| 32.013793
| 98
| 0.640888
|
bcbdf778d11c4a8378ce0f01967703c04ca3e0b9
| 17,869
|
py
|
Python
|
python/Model_Files/LFV_3/parameters.py
|
ZAKI1905/HEP-Phen
|
bc06fecb2aa6bf108b59f76794e63c29eb37a35a
|
[
"MIT"
] | 1
|
2019-10-21T08:25:46.000Z
|
2019-10-21T08:25:46.000Z
|
python/Model_Files/LFV_3/parameters.py
|
ZAKI1905/HEP-Phen
|
bc06fecb2aa6bf108b59f76794e63c29eb37a35a
|
[
"MIT"
] | null | null | null |
python/Model_Files/LFV_3/parameters.py
|
ZAKI1905/HEP-Phen
|
bc06fecb2aa6bf108b59f76794e63c29eb37a35a
|
[
"MIT"
] | null | null | null |
# This file was automatically created by FeynRules 2.3.32
# Mathematica version: 11.3.0 for Mac OS X x86 (64-bit) (March 7, 2018)
# Date: Sat 21 Apr 2018 20:48:39
from object_library import all_parameters, Parameter
from function_library import complexconjugate, re, im, csc, sec, acsc, asec, cot
# This is a default parameter object representing 0.
ZERO = Parameter(name = 'ZERO',
nature = 'internal',
type = 'real',
value = '0.0',
texname = '0')
# User-defined parameters.
cabi = Parameter(name = 'cabi',
nature = 'external',
type = 'real',
value = 0.227736,
texname = '\\theta _c',
lhablock = 'CKMBLOCK',
lhacode = [ 1 ])
aEWM1 = Parameter(name = 'aEWM1',
nature = 'external',
type = 'real',
value = 127.9,
texname = '\\text{aEWM1}',
lhablock = 'SMINPUTS',
lhacode = [ 1 ])
Gf = Parameter(name = 'Gf',
nature = 'external',
type = 'real',
value = 0.0000116637,
texname = 'G_f',
lhablock = 'SMINPUTS',
lhacode = [ 2 ])
aS = Parameter(name = 'aS',
nature = 'external',
type = 'real',
value = 0.1184,
texname = '\\alpha _s',
lhablock = 'SMINPUTS',
lhacode = [ 3 ])
ymdo = Parameter(name = 'ymdo',
nature = 'external',
type = 'real',
value = 0.00504,
texname = '\\text{ymdo}',
lhablock = 'YUKAWA',
lhacode = [ 1 ])
ymup = Parameter(name = 'ymup',
nature = 'external',
type = 'real',
value = 0.00255,
texname = '\\text{ymup}',
lhablock = 'YUKAWA',
lhacode = [ 2 ])
yms = Parameter(name = 'yms',
nature = 'external',
type = 'real',
value = 0.101,
texname = '\\text{yms}',
lhablock = 'YUKAWA',
lhacode = [ 3 ])
ymc = Parameter(name = 'ymc',
nature = 'external',
type = 'real',
value = 1.27,
texname = '\\text{ymc}',
lhablock = 'YUKAWA',
lhacode = [ 4 ])
ymb = Parameter(name = 'ymb',
nature = 'external',
type = 'real',
value = 4.7,
texname = '\\text{ymb}',
lhablock = 'YUKAWA',
lhacode = [ 5 ])
ymt = Parameter(name = 'ymt',
nature = 'external',
type = 'real',
value = 172,
texname = '\\text{ymt}',
lhablock = 'YUKAWA',
lhacode = [ 6 ])
yme = Parameter(name = 'yme',
nature = 'external',
type = 'real',
value = 0.000511,
texname = '\\text{yme}',
lhablock = 'YUKAWA',
lhacode = [ 11 ])
ymm = Parameter(name = 'ymm',
nature = 'external',
type = 'real',
value = 0.10566,
texname = '\\text{ymm}',
lhablock = 'YUKAWA',
lhacode = [ 13 ])
ymtau = Parameter(name = 'ymtau',
nature = 'external',
type = 'real',
value = 1.777,
texname = '\\text{ymtau}',
lhablock = 'YUKAWA',
lhacode = [ 15 ])
kq = Parameter(name = 'kq',
nature = 'external',
type = 'real',
value = 0.001,
texname = 'k_q',
lhablock = 'FRBlock',
lhacode = [ 1 ])
lamf = Parameter(name = 'lamf',
nature = 'external',
type = 'real',
value = 0.1,
texname = 'l_{\\text{fi}}',
lhablock = 'FRBlock',
lhacode = [ 2 ])
yf1x1 = Parameter(name = 'yf1x1',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf1x1}',
lhablock = 'FRBlock6',
lhacode = [ 1, 1 ])
yf1x2 = Parameter(name = 'yf1x2',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf1x2}',
lhablock = 'FRBlock6',
lhacode = [ 1, 2 ])
yf1x3 = Parameter(name = 'yf1x3',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf1x3}',
lhablock = 'FRBlock6',
lhacode = [ 1, 3 ])
yf2x1 = Parameter(name = 'yf2x1',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf2x1}',
lhablock = 'FRBlock6',
lhacode = [ 2, 1 ])
yf2x2 = Parameter(name = 'yf2x2',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf2x2}',
lhablock = 'FRBlock6',
lhacode = [ 2, 2 ])
yf2x3 = Parameter(name = 'yf2x3',
nature = 'external',
type = 'complex',
value = 1.e-6,
texname = '\\text{yf2x3}',
lhablock = 'FRBlock6',
lhacode = [ 2, 3 ])
yf3x1 = Parameter(name = 'yf3x1',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf3x1}',
lhablock = 'FRBlock6',
lhacode = [ 3, 1 ])
yf3x2 = Parameter(name = 'yf3x2',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf3x2}',
lhablock = 'FRBlock6',
lhacode = [ 3, 2 ])
yf3x3 = Parameter(name = 'yf3x3',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf3x3}',
lhablock = 'FRBlock6',
lhacode = [ 3, 3 ])
MZ = Parameter(name = 'MZ',
nature = 'external',
type = 'real',
value = 91.1876,
texname = '\\text{MZ}',
lhablock = 'MASS',
lhacode = [ 23 ])
Me = Parameter(name = 'Me',
nature = 'external',
type = 'real',
value = 0.000511,
texname = '\\text{Me}',
lhablock = 'MASS',
lhacode = [ 11 ])
MMU = Parameter(name = 'MMU',
nature = 'external',
type = 'real',
value = 0.10566,
texname = '\\text{MMU}',
lhablock = 'MASS',
lhacode = [ 13 ])
MTA = Parameter(name = 'MTA',
nature = 'external',
type = 'real',
value = 1.777,
texname = '\\text{MTA}',
lhablock = 'MASS',
lhacode = [ 15 ])
MU = Parameter(name = 'MU',
nature = 'external',
type = 'real',
value = 0.00255,
texname = 'M',
lhablock = 'MASS',
lhacode = [ 2 ])
MC = Parameter(name = 'MC',
nature = 'external',
type = 'real',
value = 1.27,
texname = '\\text{MC}',
lhablock = 'MASS',
lhacode = [ 4 ])
MT = Parameter(name = 'MT',
nature = 'external',
type = 'real',
value = 172,
texname = '\\text{MT}',
lhablock = 'MASS',
lhacode = [ 6 ])
MD = Parameter(name = 'MD',
nature = 'external',
type = 'real',
value = 0.00504,
texname = '\\text{MD}',
lhablock = 'MASS',
lhacode = [ 1 ])
MS = Parameter(name = 'MS',
nature = 'external',
type = 'real',
value = 0.101,
texname = '\\text{MS}',
lhablock = 'MASS',
lhacode = [ 3 ])
MB = Parameter(name = 'MB',
nature = 'external',
type = 'real',
value = 4.7,
texname = '\\text{MB}',
lhablock = 'MASS',
lhacode = [ 5 ])
MH = Parameter(name = 'MH',
nature = 'external',
type = 'real',
value = 125,
texname = '\\text{MH}',
lhablock = 'MASS',
lhacode = [ 25 ])
MP = Parameter(name = 'MP',
nature = 'external',
type = 'real',
value = 120,
texname = '\\text{MP}',
lhablock = 'MASS',
lhacode = [ 9000005 ])
Mfi = Parameter(name = 'Mfi',
nature = 'external',
type = 'real',
value = 10,
texname = '\\text{Mfi}',
lhablock = 'MASS',
lhacode = [ 9000006 ])
WZ = Parameter(name = 'WZ',
nature = 'external',
type = 'real',
value = 2.4952,
texname = '\\text{WZ}',
lhablock = 'DECAY',
lhacode = [ 23 ])
WW = Parameter(name = 'WW',
nature = 'external',
type = 'real',
value = 2.085,
texname = '\\text{WW}',
lhablock = 'DECAY',
lhacode = [ 24 ])
WT = Parameter(name = 'WT',
nature = 'external',
type = 'real',
value = 1.50833649,
texname = '\\text{WT}',
lhablock = 'DECAY',
lhacode = [ 6 ])
WH = Parameter(name = 'WH',
nature = 'external',
type = 'real',
value = 0.00589569,
texname = '\\text{WH}',
lhablock = 'DECAY',
lhacode = [ 25 ])
WH1 = Parameter(name = 'WH1',
nature = 'external',
type = 'real',
value = 0.00575308848,
texname = '\\text{WH1}',
lhablock = 'DECAY',
lhacode = [ 9000005 ])
Wfi = Parameter(name = 'Wfi',
nature = 'external',
type = 'real',
value = 6.03044e-9,
texname = '\\text{Wfi}',
lhablock = 'DECAY',
lhacode = [ 9000006 ])
aEW = Parameter(name = 'aEW',
nature = 'internal',
type = 'real',
value = '1/aEWM1',
texname = '\\alpha _{\\text{EW}}')
G = Parameter(name = 'G',
nature = 'internal',
type = 'real',
value = '2*cmath.sqrt(aS)*cmath.sqrt(cmath.pi)',
texname = 'G')
CKM1x1 = Parameter(name = 'CKM1x1',
nature = 'internal',
type = 'complex',
value = 'cmath.cos(cabi)',
texname = '\\text{CKM1x1}')
CKM1x2 = Parameter(name = 'CKM1x2',
nature = 'internal',
type = 'complex',
value = 'cmath.sin(cabi)',
texname = '\\text{CKM1x2}')
CKM1x3 = Parameter(name = 'CKM1x3',
nature = 'internal',
type = 'complex',
value = '0',
texname = '\\text{CKM1x3}')
CKM2x1 = Parameter(name = 'CKM2x1',
nature = 'internal',
type = 'complex',
value = '-cmath.sin(cabi)',
texname = '\\text{CKM2x1}')
CKM2x2 = Parameter(name = 'CKM2x2',
nature = 'internal',
type = 'complex',
value = 'cmath.cos(cabi)',
texname = '\\text{CKM2x2}')
CKM2x3 = Parameter(name = 'CKM2x3',
nature = 'internal',
type = 'complex',
value = '0',
texname = '\\text{CKM2x3}')
CKM3x1 = Parameter(name = 'CKM3x1',
nature = 'internal',
type = 'complex',
value = '0',
texname = '\\text{CKM3x1}')
CKM3x2 = Parameter(name = 'CKM3x2',
nature = 'internal',
type = 'complex',
value = '0',
texname = '\\text{CKM3x2}')
CKM3x3 = Parameter(name = 'CKM3x3',
nature = 'internal',
type = 'complex',
value = '1',
texname = '\\text{CKM3x3}')
MW = Parameter(name = 'MW',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(MZ**2/2. + cmath.sqrt(MZ**4/4. - (aEW*cmath.pi*MZ**2)/(Gf*cmath.sqrt(2))))',
texname = 'M_W')
ee = Parameter(name = 'ee',
nature = 'internal',
type = 'real',
value = '2*cmath.sqrt(aEW)*cmath.sqrt(cmath.pi)',
texname = 'e')
sw2 = Parameter(name = 'sw2',
nature = 'internal',
type = 'real',
value = '1 - MW**2/MZ**2',
texname = '\\text{sw2}')
cw = Parameter(name = 'cw',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(1 - sw2)',
texname = 'c_w')
sw = Parameter(name = 'sw',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(sw2)',
texname = 's_w')
g1 = Parameter(name = 'g1',
nature = 'internal',
type = 'real',
value = 'ee/cw',
texname = 'g_1')
gw = Parameter(name = 'gw',
nature = 'internal',
type = 'real',
value = 'ee/sw',
texname = 'g_w')
vev = Parameter(name = 'vev',
nature = 'internal',
type = 'real',
value = '(2*MW*sw)/ee',
texname = '\\text{vev}')
mfi = Parameter(name = 'mfi',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(100 - (kq*vev**2)/2.)',
texname = 'M_{\\text{fi}}')
AH = Parameter(name = 'AH',
nature = 'internal',
type = 'real',
value = '(47*ee**2*(1 - (2*MH**4)/(987.*MT**4) - (14*MH**2)/(705.*MT**2) + (213*MH**12)/(2.634632e7*MW**12) + (5*MH**10)/(119756.*MW**10) + (41*MH**8)/(180950.*MW**8) + (87*MH**6)/(65800.*MW**6) + (57*MH**4)/(6580.*MW**4) + (33*MH**2)/(470.*MW**2)))/(72.*cmath.pi**2*vev)',
texname = 'A_H')
GH = Parameter(name = 'GH',
nature = 'internal',
type = 'real',
value = '-(G**2*(1 + (13*MH**6)/(16800.*MT**6) + MH**4/(168.*MT**4) + (7*MH**2)/(120.*MT**2)))/(12.*cmath.pi**2*vev)',
texname = 'G_H')
Gphi = Parameter(name = 'Gphi',
nature = 'internal',
type = 'real',
value = '-(G**2*(1 + MH**6/(560.*MT**6) + MH**4/(90.*MT**4) + MH**2/(12.*MT**2)))/(8.*cmath.pi**2*vev)',
texname = 'G_h')
lam = Parameter(name = 'lam',
nature = 'internal',
type = 'real',
value = 'MH**2/(2.*vev**2)',
texname = '\\text{lam}')
yb = Parameter(name = 'yb',
nature = 'internal',
type = 'real',
value = '(ymb*cmath.sqrt(2))/vev',
texname = '\\text{yb}')
yc = Parameter(name = 'yc',
nature = 'internal',
type = 'real',
value = '(ymc*cmath.sqrt(2))/vev',
texname = '\\text{yc}')
ydo = Parameter(name = 'ydo',
nature = 'internal',
type = 'real',
value = '(ymdo*cmath.sqrt(2))/vev',
texname = '\\text{ydo}')
ye = Parameter(name = 'ye',
nature = 'internal',
type = 'real',
value = '(yme*cmath.sqrt(2))/vev',
texname = '\\text{ye}')
ym = Parameter(name = 'ym',
nature = 'internal',
type = 'real',
value = '(ymm*cmath.sqrt(2))/vev',
texname = '\\text{ym}')
ys = Parameter(name = 'ys',
nature = 'internal',
type = 'real',
value = '(yms*cmath.sqrt(2))/vev',
texname = '\\text{ys}')
yt = Parameter(name = 'yt',
nature = 'internal',
type = 'real',
value = '(ymt*cmath.sqrt(2))/vev',
texname = '\\text{yt}')
ytau = Parameter(name = 'ytau',
nature = 'internal',
type = 'real',
value = '(ymtau*cmath.sqrt(2))/vev',
texname = '\\text{ytau}')
yup = Parameter(name = 'yup',
nature = 'internal',
type = 'real',
value = '(ymup*cmath.sqrt(2))/vev',
texname = '\\text{yup}')
muH = Parameter(name = 'muH',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(lam*vev**2)',
texname = '\\mu')
| 31.459507
| 288
| 0.38245
|
bcbef8c15ce4fa1656c062f45eb901b87f935220
| 1,828
|
py
|
Python
|
musicLrc.py
|
xiangxing98/Rhythm-Enlightment
|
d6302321e858d07480b18e94c59de87f91c39202
|
[
"MIT"
] | null | null | null |
musicLrc.py
|
xiangxing98/Rhythm-Enlightment
|
d6302321e858d07480b18e94c59de87f91c39202
|
[
"MIT"
] | null | null | null |
musicLrc.py
|
xiangxing98/Rhythm-Enlightment
|
d6302321e858d07480b18e94c59de87f91c39202
|
[
"MIT"
] | null | null | null |
import time
musicLrc = """
[00:03.50]
[00:19.10]
[00:20.60]
[00:26.60]
[04:40.75][02:39.90][00:36.25]
[04:49.00]
[02:47.44][00:43.69]
[02:54.83][00:51.24]
[03:02.32][00:58.75]
[03:08.15][01:04.30]
[03:09.35][01:05.50]
[03:16.90][01:13.13]
[03:24.42][01:20.92]
[03:31.85][01:28.44]
[03:38.67][01:35.05]
[04:09.96][03:39.87][01:36.25]
[04:16.37][03:46.38][01:42.47]
[04:24.82][03:54.83][01:51.18]
[04:31.38][04:01.40][01:57.43]
[04:39.55][04:09.00][02:07.85]
"""
lrcDict = {}
musicLrcList = musicLrc.splitlines()
#print(musicLrcList)
for lrcLine in musicLrcList:
#[04:40.75][02:39.90][00:36.25]
#[04:40.75 [02:39.90 [00:36.25
#[00:20.60]
lrcLineList = lrcLine.split("]")
for index in range(len(lrcLineList) - 1):
timeStr = lrcLineList[index][1:]
#print(timeStr)
#00:03.50
timeList = timeStr.split(":")
timelrc = float(timeList[0]) * 60 + float(timeList[1])
#print(time)
lrcDict[timelrc] = lrcLineList[-1]
print(lrcDict)
allTimeList = []
for t in lrcDict:
allTimeList.append(t)
allTimeList.sort()
#print(allTimeList)
'''
while 1:
getTime = float(input(""))
for n in range(len(allTimeList)):
tempTime = allTimeList[n]
if getTime < tempTime:
break
if n == 0:
print("")
else:
print(lrcDict[allTimeList[n - 1]])
'''
getTime = 0
while 1:
for n in range(len(allTimeList)):
tempTime = allTimeList[n]
if getTime < tempTime:
break
lrc = lrcDict.get(allTimeList[n - 1])
if lrc == None:
pass
else:
print(lrc)
time.sleep(1)
getTime += 1
| 22.292683
| 62
| 0.605033
|
bcbfd5dadc46bd5eab08a4b4f4c45a601d0075b6
| 16,826
|
py
|
Python
|
octoprint_octopod/__init__.py
|
mnebelung/OctoPrint-OctoPod
|
3af1a2e1ad7c6f73ed05d9c1ff029fb645f3115a
|
[
"Apache-2.0"
] | 52
|
2019-05-28T03:41:20.000Z
|
2022-01-29T00:32:57.000Z
|
octoprint_octopod/__init__.py
|
mnebelung/OctoPrint-OctoPod
|
3af1a2e1ad7c6f73ed05d9c1ff029fb645f3115a
|
[
"Apache-2.0"
] | 111
|
2019-05-28T14:50:01.000Z
|
2022-03-21T22:12:05.000Z
|
octoprint_octopod/__init__.py
|
mnebelung/OctoPrint-OctoPod
|
3af1a2e1ad7c6f73ed05d9c1ff029fb645f3115a
|
[
"Apache-2.0"
] | 11
|
2019-07-20T15:36:21.000Z
|
2021-12-30T16:53:56.000Z
|
# coding=utf-8
from __future__ import absolute_import
import datetime
import logging
import sys
import flask
import octoprint.plugin
from octoprint.events import eventManager, Events
from octoprint.server import user_permission
from octoprint.util import RepeatedTimer
from .bed_notifications import BedNotifications
from .custom_notifications import CustomNotifications
from .ifttt_notifications import IFTTTAlerts
from .job_notifications import JobNotifications
from .layer_notifications import LayerNotifications
from .libs.sbc import SBCFactory, SBC, RPi
from .mmu import MMUAssistance
from .palette2 import Palette2Notifications
from .paused_for_user import PausedForUser
from .soc_temp_notifications import SocTempNotifications
from .thermal_protection_notifications import ThermalProtectionNotifications
from .tools_notifications import ToolsNotifications
# Plugin that stores APNS tokens reported from iOS devices to know which iOS devices to alert
# when print is done or other relevant events
debug_soc_temp = False
# If you want your plugin to be registered within OctoPrint under a different name than what you defined in setup.py
# ("OctoPrint-PluginSkeleton"), you may define that here. Same goes for the other metadata derived from setup.py that
# can be overwritten via __plugin_xyz__ control properties. See the documentation for that.
__plugin_name__ = "OctoPod Plugin"
__plugin_pythoncompat__ = ">=2.7,<4"
| 39.683962
| 141
| 0.757934
|
bcc019e1e7277f852d55bb225dc74bb333185aa3
| 660
|
py
|
Python
|
tests/test_buffers.py
|
romanchyla/CSPatterns
|
d9627297aabce1ab648f4a4cdbe9882527add138
|
[
"MIT"
] | null | null | null |
tests/test_buffers.py
|
romanchyla/CSPatterns
|
d9627297aabce1ab648f4a4cdbe9882527add138
|
[
"MIT"
] | null | null | null |
tests/test_buffers.py
|
romanchyla/CSPatterns
|
d9627297aabce1ab648f4a4cdbe9882527add138
|
[
"MIT"
] | null | null | null |
from cspatterns.datastructures import buffer
| 25.384615
| 66
| 0.551515
|
bcc146cfb565fd0140a85d638082d12ef6686650
| 1,214
|
py
|
Python
|
plugins/redacted/migrations/0001_initial.py
|
2600box/harvest
|
57264c15a3fba693b4b58d0b6d4fbf4bd5453bbd
|
[
"Apache-2.0"
] | 9
|
2019-03-26T14:50:00.000Z
|
2020-11-10T16:44:08.000Z
|
plugins/redacted/migrations/0001_initial.py
|
2600box/harvest
|
57264c15a3fba693b4b58d0b6d4fbf4bd5453bbd
|
[
"Apache-2.0"
] | 22
|
2019-03-02T23:16:13.000Z
|
2022-02-27T10:36:36.000Z
|
plugins/redacted/migrations/0001_initial.py
|
2600box/harvest
|
57264c15a3fba693b4b58d0b6d4fbf4bd5453bbd
|
[
"Apache-2.0"
] | 5
|
2019-04-24T00:51:30.000Z
|
2020-11-06T18:31:49.000Z
|
# Generated by Django 2.1.7 on 2019-02-17 14:50
from django.db import migrations, models
| 31.947368
| 114
| 0.538715
|
bcc1d17c27a82c381571bf91c586033e374ec7d9
| 1,741
|
py
|
Python
|
code_examples/plotting_data/hexbin.py
|
ezcitron/BasemapTutorial
|
0db9248b430d39518bdfdb25d713145be4eb966a
|
[
"CC0-1.0"
] | 99
|
2015-01-14T21:20:48.000Z
|
2022-01-25T10:38:37.000Z
|
code_examples/plotting_data/hexbin.py
|
ezcitron/BasemapTutorial
|
0db9248b430d39518bdfdb25d713145be4eb966a
|
[
"CC0-1.0"
] | 1
|
2017-08-31T07:02:20.000Z
|
2017-08-31T07:02:20.000Z
|
code_examples/plotting_data/hexbin.py
|
ezcitron/BasemapTutorial
|
0db9248b430d39518bdfdb25d713145be4eb966a
|
[
"CC0-1.0"
] | 68
|
2015-01-14T21:21:01.000Z
|
2022-01-29T14:53:38.000Z
|
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from numpy import array
from numpy import max
map = Basemap(llcrnrlon=-0.5,llcrnrlat=39.8,urcrnrlon=4.,urcrnrlat=43.,
resolution='i', projection='tmerc', lat_0 = 39.5, lon_0 = 1)
map.readshapefile('../sample_files/lightnings', 'lightnings')
x = []
y = []
c = []
for info, lightning in zip(map.lightnings_info, map.lightnings):
x.append(lightning[0])
y.append(lightning[1])
if float(info['amplitude']) < 0:
c.append(-1 * float(info['amplitude']))
else:
c.append(float(info['amplitude']))
plt.figure(0)
map.drawcoastlines()
map.readshapefile('../sample_files/comarques', 'comarques')
map.hexbin(array(x), array(y))
map.colorbar(location='bottom')
plt.figure(1)
map.drawcoastlines()
map.readshapefile('../sample_files/comarques', 'comarques')
map.hexbin(array(x), array(y), gridsize=20, mincnt=1, cmap='summer', bins='log')
map.colorbar(location='bottom', format='%.1f', label='log(# lightnings)')
plt.figure(2)
map.drawcoastlines()
map.readshapefile('../sample_files/comarques', 'comarques')
map.hexbin(array(x), array(y), gridsize=20, mincnt=1, cmap='summer', norm=colors.LogNorm())
cb = map.colorbar(location='bottom', format='%d', label='# lightnings')
cb.set_ticks([1, 5, 10, 15, 20, 25, 30])
cb.set_ticklabels([1, 5, 10, 15, 20, 25, 30])
plt.figure(3)
map.drawcoastlines()
map.readshapefile('../sample_files/comarques', 'comarques')
map.hexbin(array(x), array(y), C = array(c), reduce_C_function = max, gridsize=20, mincnt=1, cmap='YlOrBr', linewidths=0.5, edgecolors='k')
map.colorbar(location='bottom', label='Mean amplitude (kA)')
plt.show()
| 23.527027
| 139
| 0.687536
|
bcc231c6648af0cd64b843faf63ad79a79b6853b
| 895
|
py
|
Python
|
src/plugins/sjsy.py
|
2443391447/nonebot2
|
c9fa0c44c130b8a1425b2b71105fa909232c37b0
|
[
"MIT"
] | 1
|
2021-08-24T03:18:23.000Z
|
2021-08-24T03:18:23.000Z
|
src/plugins/sjsy.py
|
2443391447/nonebot2
|
c9fa0c44c130b8a1425b2b71105fa909232c37b0
|
[
"MIT"
] | null | null | null |
src/plugins/sjsy.py
|
2443391447/nonebot2
|
c9fa0c44c130b8a1425b2b71105fa909232c37b0
|
[
"MIT"
] | 1
|
2021-09-01T07:50:03.000Z
|
2021-09-01T07:50:03.000Z
|
from nonebot import on_keyword, on_command
from nonebot.typing import T_State
from nonebot.adapters.cqhttp import Message, Bot, Event #
from nonebot.adapters.cqhttp.message import MessageSegment
import requests
from nonebot.permission import *
from nonebot.rule import to_me
from aiocqhttp.exceptions import Error as CQHttpError
sheying = on_keyword({''})
| 28.870968
| 68
| 0.689385
|
bcc3dcd13da8bfacff9f3f45c797b5dd285e8744
| 4,031
|
py
|
Python
|
src/extractors/emojiextractor.py
|
chmduquesne/rofimoji
|
9abdc0a8db1b166bb30da994c4aadb7baf91df2d
|
[
"MIT"
] | 574
|
2017-10-29T18:04:31.000Z
|
2022-03-30T23:34:34.000Z
|
src/extractors/emojiextractor.py
|
chmduquesne/rofimoji
|
9abdc0a8db1b166bb30da994c4aadb7baf91df2d
|
[
"MIT"
] | 104
|
2017-11-02T08:24:29.000Z
|
2022-03-29T02:39:58.000Z
|
src/extractors/emojiextractor.py
|
chmduquesne/rofimoji
|
9abdc0a8db1b166bb30da994c4aadb7baf91df2d
|
[
"MIT"
] | 53
|
2017-11-01T22:38:02.000Z
|
2022-02-14T09:20:36.000Z
|
import html
from collections import namedtuple
from pathlib import Path
from typing import List, Dict
import requests
from bs4 import BeautifulSoup
from lxml import etree
from lxml.etree import XPath
Emoji = namedtuple('Emoji', 'char name')
| 37.324074
| 159
| 0.607045
|