hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e1d196f613c2a1139ba07be80fda44073fa5c141 | 1,602 | py | Python | clase_caballo.py | DorianAlbertoIbanezNanguelu/concurrencia-caballos | 91b7f4818505183bd38923bc5b744fc04e83c2f3 | [
"MIT"
] | null | null | null | clase_caballo.py | DorianAlbertoIbanezNanguelu/concurrencia-caballos | 91b7f4818505183bd38923bc5b744fc04e83c2f3 | [
"MIT"
] | null | null | null | clase_caballo.py | DorianAlbertoIbanezNanguelu/concurrencia-caballos | 91b7f4818505183bd38923bc5b744fc04e83c2f3 | [
"MIT"
] | null | null | null | import threading
import time
import random
from multiprocessing.pool import ThreadPool
from PyQt5 import QtCore, QtGui, QtWidgets
bandera = False
val1 = ""
msg = 'Caballo ganador es: {}'
# Clase Caballo
| 22.25 | 101 | 0.558052 |
e1d33fe58f921e97b404a9c643f4793d56cc9818 | 10,353 | py | Python | vwo/api/track.py | wingify/vwo-python-sdk | 8b8e798a16c43012ca2c6c6c85dde66f4f3cb6a5 | [
"Apache-2.0"
] | 14 | 2019-08-06T06:57:46.000Z | 2022-01-05T13:27:50.000Z | vwo/api/track.py | wingify/vwo-python-sdk | 8b8e798a16c43012ca2c6c6c85dde66f4f3cb6a5 | [
"Apache-2.0"
] | 3 | 2019-08-19T10:29:17.000Z | 2021-09-16T15:59:38.000Z | vwo/api/track.py | wingify/vwo-python-sdk | 8b8e798a16c43012ca2c6c6c85dde66f4f3cb6a5 | [
"Apache-2.0"
] | 10 | 2019-08-08T12:38:50.000Z | 2021-09-14T11:35:00.000Z | # Copyright 2019-2021 Wingify Software Pvt. Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..helpers import impression_util
from ..constants import constants
from ..constants.constants import API_METHODS
from ..helpers import campaign_util, validate_util
from ..enums.log_message_enum import LogMessageEnum
from ..enums.file_name_enum import FileNameEnum
from ..enums.log_level_enum import LogLevelEnum
FILE = FileNameEnum.Api.Track
def _track(vwo_instance, campaign_specifier, user_id, goal_identifier, **kwargs):
"""
This API method: Marks the conversion of the campaign(s) for a particular goal
1. validates the arguments being passed
2. retrieves the campaigns having the same global goal
3. calls track_campaign_goal for all the goals
Args:
campaign_specifier (None, list, string): Campaign key(s), it can be None in case
of all campaigns, list in case of given campaigns and string in case of particular
campaign should to be tracked.
user_id (string): ID assigned to a user
goal_identifier (string): campaign(s)'s unique goal identifier
Keyword Args:
revenue_value (int|float|string): Provide it through **kwargs.
It is the revenue generated on triggering the goal
custom_variables (dict): Custom variables required for segmentation
variation_targeting_variables (dict): Whitelisting variables to target users
Returns:
dict|None: None if called for single campaign and no goal tracked or
called for all campaigns and no goal tracked.
Dict otherwise of campaign_key with True/False showing whether the goal
has been tracked for the campaign or not
"""
vwo_instance.logger.set_api(API_METHODS.TRACK)
# Retrive revenue value and custom_variables
revenue_value = kwargs.get("revenue_value")
custom_variables = kwargs.get("custom_variables")
variation_targeting_variables = kwargs.get("variation_targeting_variables")
valid_params = True
# Check for valid args
if (
not validate_util.is_valid_string(user_id)
or not validate_util.is_valid_string(goal_identifier)
or (custom_variables is not None and not validate_util.is_valid_dict(custom_variables))
or (
variation_targeting_variables is not None and not validate_util.is_valid_dict(variation_targeting_variables)
)
or (revenue_value is not None and not validate_util.is_valid_basic_data_type(revenue_value))
):
valid_params = False
goal_type_to_track = kwargs.get("goal_type_to_track")
if goal_type_to_track is None:
goal_type_to_track = vwo_instance.goal_type_to_track
elif not validate_util.is_valid_goal_type(goal_type_to_track):
valid_params = False
if not valid_params:
vwo_instance.logger.log(
LogLevelEnum.ERROR, LogMessageEnum.ERROR_MESSAGES.TRACK_API_INVALID_PARAMS.format(file=FILE)
)
return None
campaigns_without_goal = []
no_campaign_found = False
if type(campaign_specifier) is str:
campaign = campaign_util.get_campaign(vwo_instance.settings_file, campaign_specifier)
goal = campaign_util.get_campaign_goal(campaign, goal_identifier)
if not goal:
no_campaign_found = True
else:
campaign_goal_list = [(campaign, goal)]
elif type(campaign_specifier) is list:
campaigns = campaign_util.get_campaigns(vwo_instance.settings_file, campaign_specifier).values()
(campaign_goal_list, campaigns_without_goal) = campaign_util.get_campaigns_with_goal_id(
campaigns, goal_identifier
)
for campaign in campaigns_without_goal:
vwo_instance.logger.log(
LogLevelEnum.ERROR,
LogMessageEnum.ERROR_MESSAGES.TRACK_API_GOAL_NOT_FOUND.format(
file=FILE, goal_identifier=goal_identifier, user_id=user_id, campaign_key=campaign.get("key")
),
)
elif campaign_specifier is None:
campaigns = vwo_instance.settings_file.get("campaigns")
campaign_goal_list = campaign_util.get_campaigns_with_goal_id(campaigns, goal_identifier)[0]
if not campaign_goal_list:
no_campaign_found = True
else:
vwo_instance.logger.log(
# Specific log for campaign_specifier type
LogLevelEnum.ERROR,
LogMessageEnum.ERROR_MESSAGES.TRACK_API_INVALID_PARAMS.format(file=FILE),
)
return None
if no_campaign_found:
vwo_instance.logger.log(
LogLevelEnum.ERROR,
LogMessageEnum.ERROR_MESSAGES.NO_CAMPAIGN_FOUND.format(file=FILE, goal_identifier=goal_identifier),
)
return None
ret_value = {}
campaign_goal_revenue_prop_list = []
for campaign, goal in campaign_goal_list:
result = track_campaign_goal(
vwo_instance,
campaign,
user_id,
goal,
revenue_value,
custom_variables,
variation_targeting_variables,
goal_type_to_track,
campaign_goal_revenue_prop_list,
)
ret_value[campaign.get("key")] = result
for campaign in campaigns_without_goal:
ret_value[campaign.get("key")] = False
if len(campaign_goal_revenue_prop_list) != 0 and (
not vwo_instance.is_event_batching_enabled and vwo_instance.is_event_arch_enabled is True
):
params = impression_util.get_events_params(vwo_instance.settings_file, goal_identifier)
impression = impression_util.create_track_goal_events_impression(
vwo_instance.settings_file, user_id, goal_identifier, campaign_goal_revenue_prop_list, revenue=revenue_value
)
vwo_instance.event_dispatcher.dispatch_events(params=params, impression=impression)
return ret_value
def track_campaign_goal(
vwo_instance,
campaign,
user_id,
goal,
revenue_value,
custom_variables,
variation_targeting_variables,
goal_type_to_track,
campaign_goal_revenue_prop_list,
):
"""
It marks the conversion of given goal for the given campaign
1. Checks if user is eligible to get bucketed into the campaign,
2. Gets the assigned determinitic variation to the
user(based on userId), if user becomes part of campaign
3. Sends an impression call to VWO server to track goal data if event arch
is not enabled
Args:
campaign (dict): Campaign object
user_id (string): ID assigned to a user
goal (dict): Goal object
revenue_value (int|float|string): It is the revenue generated on triggering the goal
custom_variables (dict): Custom variables required for segmentation
variation_targeting_variables (dict): Whitelisting variables to target users
goal_type_to_track (vwo.GOAL_TYPES): Goal type that should be tracked in case of mixed
global goal identifier
campaign_goal_revenue_prop_list (list): list of campaign_id, goal_id & goal's revenueProp
(if revenue goal else None) to build event arch impression
Returns:
bool: True if goal successfully tracked else False
"""
campaign_type = campaign.get("type")
if campaign_type == constants.CAMPAIGN_TYPES.FEATURE_ROLLOUT:
vwo_instance.logger.log(
LogLevelEnum.ERROR,
LogMessageEnum.ERROR_MESSAGES.INVALID_API.format(
file=FILE, user_id=user_id, campaign_key=campaign.get("key"), campaign_type=campaign_type
),
)
return False
goal_type = goal.get("type")
if (goal_type_to_track == constants.GOAL_TYPES.CUSTOM and goal_type == constants.GOAL_TYPES.REVENUE) or (
goal_type_to_track == constants.GOAL_TYPES.REVENUE and goal_type == constants.GOAL_TYPES.CUSTOM
):
# We can log goal type didn't match in debug mode
return False
if goal_type == constants.GOAL_TYPES.REVENUE and not validate_util.is_valid_value(revenue_value):
vwo_instance.logger.log(
LogLevelEnum.ERROR,
LogMessageEnum.ERROR_MESSAGES.TRACK_API_REVENUE_NOT_PASSED_FOR_REVENUE_GOAL.format(
file=FILE, user_id=user_id, goal_identifier=goal.get("identifier"), campaign_key=campaign.get("key")
),
)
return False
if goal_type == constants.GOAL_TYPES.CUSTOM:
revenue_value = None
variation, _ = vwo_instance.variation_decider.get_variation(
user_id,
campaign,
custom_variables=custom_variables,
variation_targeting_variables=variation_targeting_variables,
goal_data={"identifier": goal.get("identifier")},
api_method=constants.API_METHODS.TRACK,
)
if variation:
if not vwo_instance.is_event_arch_enabled or vwo_instance.is_event_batching_enabled is True:
impression = impression_util.create_impression(
vwo_instance.settings_file,
campaign.get("id"),
variation.get("id"),
user_id,
goal.get("id"),
revenue_value,
)
vwo_instance.event_dispatcher.dispatch(impression)
vwo_instance.logger.log(
LogLevelEnum.INFO,
LogMessageEnum.INFO_MESSAGES.MAIN_KEYS_FOR_IMPRESSION.format(
file=FILE,
campaign_id=impression.get("experiment_id"),
account_id=impression.get("account_id"),
variation_id=impression.get("combination"),
),
)
else:
campaign_goal_revenue_prop_list.append((campaign.get("id"), goal.get("id"), goal.get("revenueProp")))
return True
return False
| 40.127907 | 120 | 0.691297 |
e1d379ffe45c72193de30757e4bad02874d4385a | 2,687 | py | Python | iMessSpam.py | fabiopigi/iMessageSpam | 4d1984f5286f5cf0229d414470a4dc60e5ba12d2 | [
"MIT"
] | null | null | null | iMessSpam.py | fabiopigi/iMessageSpam | 4d1984f5286f5cf0229d414470a4dc60e5ba12d2 | [
"MIT"
] | null | null | null | iMessSpam.py | fabiopigi/iMessageSpam | 4d1984f5286f5cf0229d414470a4dc60e5ba12d2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#import some dope
import sys
import os
import re
import time
from random import randrange
from itertools import repeat
numbers = {
'adam' :"+41111111111",
'bob' :"+41222222222",
'chris' :"+41333333333",
'dave' :"+41444444444",
}
print "Gespeicherte Empfnger: "
for name in numbers:
print "%10s - %s"%(name,numbers[name])
number = ""
while number == "":
numberID = raw_input("\nEmpfnger eingeben: ")
if numberID in numbers:
number = numbers[numberID]
pause = int(raw_input("\nIntervall in Sekunden: "))
print """
Verfgbare Optionen:
[1] Zeitansagen im Format 'Es ist 17:34:22'
[2] Zufllige 'Chuck Norris' Jokes
[3] Satz fr Satz aus einem Buch (Twilight)
[4] Fifty Shades of HEX
[5] Frhliches Flaggen raten
"""
option = int(raw_input("Option auswhlen: "))
if option == 1:
anzahl = int(raw_input("\nAnzahl Nachrichten: "))
start = 0
elif option == 2:
anzahl = int(raw_input("\nAnzahl Nachrichten: "))
start = 0
replaceName = raw_input("\n'Chuck Norris' durch Namen ersetzen: ")
if replaceName == "":
replaceName = "Chuck Norris"
elif option == 3:
p = open('content/twilight.txt')
book = p.read()
pat = re.compile(r'([A-Z][^\.!?]*[\.!?])', re.M)
sentences = pat.findall(book)
anzahl = int(raw_input("\nAnzahl Nachrichten: "))
start = int(raw_input("\nBei n. Satz anfangen: "))-1
anzahl = anzahl + (start)
elif option == 4:
anzahl = 50
start = 0
elif option == 5:
anzahl = 50
start = 0
import Countries
else:
anzahl = 0
start = 0
print "\n\nSenden beginnt...\n\n"
#tunay bei 207
for i in range(start,anzahl,1):
if option == 1:
cmdCode = "date +'%H:%M:%S'"
message = "Es ist jetzt " + os.popen(cmdCode).read()
elif option == 2:
curlCode = "curl 'http://api.icndb.com/jokes/random' -s | sed -e 's/.*joke\\\": \\\"//' -e 's/\\\", \\\".*//' -e 's/Chuck Norris/" + replaceName + "/g' -e 's/"/\"/g'"
message = os.popen(curlCode).read()
elif option == 3:
message = sentences[i]
elif option == 4:
message = "#%s" % "".join(list(repeat(hex(randrange(16, 255))[2:],3))).upper()
elif option == 5:
flags = os.listdir("content/flags")
country = Countries.iso[flags[randrange(1,len(flags))][:2]]
message = "Dies ist die Flagge von '%s'."%(country["Name"])
filePath = os.path.abspath("content/flags/%s.png"%country["ISO"])
osaCode = "osascript sendImage.scpt \"%s\" \"%s\""%(number,filePath)
osaReturn = os.popen(osaCode).read()
print message
message = message.replace('"', r'\"')
osaCode = "osascript sendText.scpt \"%s\" \"%s\""%(number,message)
print "%3d > %s"%((i+1),message)
osaReturn = os.popen(osaCode).read()
time.sleep(pause)
| 23.163793 | 175 | 0.628582 |
e1d4132df41823b278230500d5a9366ca4662b08 | 2,582 | py | Python | mesh_to_tet.py | NVlabs/deformable_object_grasping | c39147c6ce525e90512f54c3c5386903a0e7f401 | [
"MIT"
] | 30 | 2020-12-18T22:05:10.000Z | 2021-09-27T23:45:18.000Z | mesh_to_tet.py | NVlabs/DefGraspSim | e6c1a9760ded188e6986cc49d0298a2c8803830d | [
"MIT"
] | 2 | 2021-12-09T18:05:22.000Z | 2022-03-20T08:26:04.000Z | mesh_to_tet.py | NVlabs/deformable_object_grasping | c39147c6ce525e90512f54c3c5386903a0e7f401 | [
"MIT"
] | 7 | 2021-01-16T06:23:02.000Z | 2021-09-02T16:32:19.000Z | # Copyright (c) 2020 NVIDIA Corporation
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""Convert a .mesh file (fTetWild format) to .tet (IsaacGym format)."""
def convert_mesh_to_tet(mesh_file_path, tet_output_path):
"""Convert a .mesh file to a .tet file."""
mesh_file = open(mesh_file_path, "r")
tet_output = open(tet_output_path, "w")
mesh_lines = list(mesh_file)
mesh_lines = [line.strip('\n') for line in mesh_lines]
vertices_start = mesh_lines.index('Vertices')
num_vertices = mesh_lines[vertices_start + 1]
vertices = mesh_lines[vertices_start + 2:vertices_start + 2
+ int(num_vertices)]
tetrahedra_start = mesh_lines.index('Tetrahedra')
num_tetrahedra = mesh_lines[tetrahedra_start + 1]
tetrahedra = mesh_lines[tetrahedra_start + 2:tetrahedra_start + 2
+ int(num_tetrahedra)]
print("# Vertices, # Tetrahedra:", num_vertices, num_tetrahedra)
# Write to tet output
tet_output.write("# Tetrahedral mesh generated using\n\n")
tet_output.write("# " + num_vertices + " vertices\n")
for v in vertices:
tet_output.write("v " + v + "\n")
tet_output.write("\n")
tet_output.write("# " + num_tetrahedra + " tetrahedra\n")
for t in tetrahedra:
line = t.split(' 0')[0]
line = line.split(" ")
line = [str(int(k) - 1) for k in line]
l_text = ' '.join(line)
tet_output.write("t " + l_text + "\n")
if __name__ == "__main__":
convert_mesh_to_tet(
"path/to/mesh",
"path/to/tet")
| 40.984127 | 78 | 0.690937 |
e1d45c9d42dd76322a265a56bb903e40fa748ffe | 3,601 | py | Python | tests/policies_tests/test_deterministic_policy.py | xinyuewang1/chainerrl | 49425d09cb0749968f4e364e281670e752a46791 | [
"MIT"
] | 2 | 2020-05-20T06:15:20.000Z | 2020-05-20T06:15:27.000Z | tests/policies_tests/test_deterministic_policy.py | WhenTheyCry96/chainerrl | 0f32aae2855dbb6288ae628be6271739ced6c42c | [
"MIT"
] | null | null | null | tests/policies_tests/test_deterministic_policy.py | WhenTheyCry96/chainerrl | 0f32aae2855dbb6288ae628be6271739ced6c42c | [
"MIT"
] | 1 | 2019-08-08T19:13:53.000Z | 2019-08-08T19:13:53.000Z | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import * # NOQA
from future import standard_library
standard_library.install_aliases() # NOQA
import unittest
import chainer
import chainer.functions as F
from chainer import testing
from chainer.testing import attr
import numpy as np
import chainerrl
| 33.971698 | 75 | 0.608442 |
e1d5174f8289f91757ffb47b8ef0788990d1f6b1 | 33,946 | py | Python | freshmaker/handlers/botas/botas_shipped_advisory.py | mulaievaRH/freshmaker | 809b435d7cab1907eb74ecd898693835a92db9d8 | [
"MIT"
] | 5 | 2020-06-17T11:29:16.000Z | 2022-03-24T07:20:16.000Z | freshmaker/handlers/botas/botas_shipped_advisory.py | mulaievaRH/freshmaker | 809b435d7cab1907eb74ecd898693835a92db9d8 | [
"MIT"
] | 96 | 2020-06-29T15:01:23.000Z | 2022-03-30T08:07:06.000Z | freshmaker/handlers/botas/botas_shipped_advisory.py | mulaievaRH/freshmaker | 809b435d7cab1907eb74ecd898693835a92db9d8 | [
"MIT"
] | 20 | 2020-06-16T01:30:08.000Z | 2022-02-19T15:34:55.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2020 Red Hat, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import copy
import json
from datetime import datetime
import re
import koji
from kobo.rpmlib import parse_nvr
import semver
from freshmaker import db, conf, log
from freshmaker.handlers import ContainerBuildHandler
from freshmaker.events import BotasErrataShippedEvent, ManualBundleRebuild
from freshmaker.lightblue import ContainerImage
from freshmaker.models import ArtifactBuild, ArtifactType, Event
from freshmaker.types import EventState, ArtifactBuildState, RebuildReason
from freshmaker.pyxis import Pyxis
from freshmaker.kojiservice import KojiService
from freshmaker.errata import Errata
def image_has_auto_rebuild_tag(self, image):
""" Check if image has a tag enabled for auto rebuild.
:param dict image: Dict representation of an image entity in Pyxis.
:rtype: bool
:return: True if image has a tag enabled for auto rebuild in repository, otherwise False.
"""
for repo in image['repositories']:
# Skip unpublished repository
if not repo['published']:
continue
auto_rebuild_tags = self._pyxis.get_auto_rebuild_tags(
repo['registry'], repo['repository']
)
tags = [t['name'] for t in repo.get('tags', [])]
if set(auto_rebuild_tags) & set(tags):
return True
# It'd be more efficient to do this check first, but the exceptions are edge cases
# (e.g. testing) and it's best to not use it unless absolutely necessary
nvr = image['brew']['build']
parsed_nvr = parse_nvr(nvr)
nv = f'{parsed_nvr["name"]}-{parsed_nvr["version"]}'
if nv in conf.bundle_autorebuild_tag_exceptions:
self.log_info(
'The bundle %r has an exception for being tagged with an auto-rebuild tag', nvr
)
return True
return False
def _create_original_to_rebuilt_nvrs_map(self):
"""
Creates mapping of original operator build NVRs to rebuilt NVRs in advisory.
Including NVRs of the builds from the blocking advisories
:rtype: dict
:return: map of the original NVRs as keys and rebuilt NVRs as values
"""
nvrs_mapping = {}
# Get builds from all blocking advisories
blocking_advisories_builds = \
Errata().get_blocking_advisories_builds(self.event.advisory.errata_id)
# Get builds NVRs from the advisory attached to the message/event and
# then get original NVR for every build
for product_info in self.event.advisory.builds.values():
for build in product_info['builds']:
# Each build is a one key/value pair, and key is the build NVR
build_nvr = next(iter(build))
# Search for the first build that triggered the chain of rebuilds
# for every shipped NVR to get original NVR from it
original_nvr = self.get_published_original_nvr(build_nvr)
if original_nvr is None:
continue
nvrs_mapping[original_nvr] = build_nvr
parsed_build_nvr = parse_nvr(build_nvr)
# Check builds from blocking advisories and add to the mapping
# all of them, that have overlapping package names
for block_build in blocking_advisories_builds:
block_build_nvr = parse_nvr(block_build)
if (block_build_nvr['name'] == parsed_build_nvr['name']
and block_build_nvr['version'] == parsed_build_nvr['version']): # noqa: W503
nvrs_mapping[block_build] = build_nvr
return nvrs_mapping
def _prepare_builds(self, db_event, to_rebuild_bundles):
"""
Prepare models.ArtifactBuild instance for every bundle that will be
rebuilt
:param models.Event db_event: database event that will contain builds
:param list to_rebuild_bundles: bundles to rebuild
:return: builds that already in database and ready to be submitted to brew
:rtype: list
"""
builds = []
csv_mod_url = conf.freshmaker_root_url + "/api/2/pullspec_overrides/{}"
for bundle in to_rebuild_bundles:
# Reset context to db_event for each iteration before
# the ArtifactBuild is created.
self.set_context(db_event)
rebuild_reason = RebuildReason.DIRECTLY_AFFECTED.value
bundle_name = koji.parse_NVR(bundle["nvr"])["name"]
build = self.record_build(
db_event, bundle_name, ArtifactType.IMAGE,
state=ArtifactBuildState.PLANNED.value,
original_nvr=bundle["nvr"],
rebuild_reason=rebuild_reason)
# Set context to particular build so logging shows this build
# in case of error.
self.set_context(build)
build.transition(ArtifactBuildState.PLANNED.value, "")
additional_data = ContainerImage.get_additional_data_from_koji(bundle["nvr"])
build.build_args = json.dumps({
"repository": additional_data["repository"],
"commit": additional_data["commit"],
"target": additional_data["target"],
"branch": additional_data["git_branch"],
"arches": additional_data["arches"],
# The build system always enforces that bundle images build from
# "scratch", so there is no parent image. See:
# https://osbs.readthedocs.io/en/latest/users.html?#operator-manifest-bundle-builds
"original_parent": None,
"operator_csv_modifications_url": csv_mod_url.format(build.id),
})
build.bundle_pullspec_overrides = {
"pullspec_replacements": bundle["pullspec_replacements"],
"update": bundle["update"],
}
db.session.commit()
builds.append(build)
return builds
| 44.607096 | 116 | 0.604018 |
e1d6a7a8f00c138e84b26623fa12570b059d6d57 | 244 | py | Python | src/masonite/contracts/AuthContract.py | holic-cl/masonite | c5eab7db5f87e389fe83a1f0f20a005035ada9d9 | [
"MIT"
] | 95 | 2018-02-22T23:54:00.000Z | 2021-04-17T03:39:21.000Z | src/masonite/contracts/AuthContract.py | holic-cl/masonite | c5eab7db5f87e389fe83a1f0f20a005035ada9d9 | [
"MIT"
] | 840 | 2018-01-27T04:26:20.000Z | 2021-01-24T12:28:58.000Z | src/masonite/contracts/AuthContract.py | holic-cl/masonite | c5eab7db5f87e389fe83a1f0f20a005035ada9d9 | [
"MIT"
] | 100 | 2018-02-23T00:19:55.000Z | 2020-08-28T07:59:31.000Z | from abc import ABC as Contract, abstractmethod
| 14.352941 | 47 | 0.631148 |
e1d7080d35e6bb09847310ecab242b0c030ed469 | 2,202 | py | Python | netblow/bin/netblow_cli.py | viniciusarcanjo/netblow | 01a2c3a60c5f9eb7e6c199612dedcd01c5dc23ba | [
"Apache-2.0"
] | 8 | 2018-10-07T17:44:46.000Z | 2022-03-24T21:40:57.000Z | netblow/bin/netblow_cli.py | viniciusarcanjo/netblow | 01a2c3a60c5f9eb7e6c199612dedcd01c5dc23ba | [
"Apache-2.0"
] | 8 | 2018-04-29T20:47:28.000Z | 2018-05-01T18:51:58.000Z | netblow/bin/netblow_cli.py | viniciusarcanjo/netblow | 01a2c3a60c5f9eb7e6c199612dedcd01c5dc23ba | [
"Apache-2.0"
] | 1 | 2019-04-27T08:48:50.000Z | 2019-04-27T08:48:50.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""netblow_cli module."""
import argparse
from netblow.netblow import NetBlow
from netblow.version import __version__
def main():
"""Entry function."""
parser = argparse.ArgumentParser(
description="netblow. Vendor agnostic network testing framework to stress network failures." # noqa
)
# to add required args.
optional = parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
m_group = optional.add_mutually_exclusive_group()
m_group.add_argument(
'-d',
'--dryrun',
default=False,
action='store_true',
help="show tests calls, won't connect to any devices")
m_group.add_argument(
'-c',
'--concheck',
default=False,
action='store_true',
help='check connectivity with all devices in the topology')
m_group.add_argument(
'-1',
'--once',
default=False,
action='store_true',
help="iterates only once and perfom napalm diffs")
parser.add_argument(
'-l',
'--level',
choices=['info', 'debug'],
default='info',
help='logging verbosity level (default: info)')
parser.add_argument(
'-v',
'--version',
action='version',
version='{}'.format(__version__),
help='show version')
required.add_argument(
'-f', '--topology', help='topology yml file')
required.add_argument(
'-t', '--tests', help='tests yml file')
parser._action_groups.append(optional)
args = parser.parse_args()
if not args.topology:
parser.error('You have to specify the topology yml file with -f')
if not args.tests:
if args.once or not args.dryrun and not args.concheck:
parser.error('You have to specify the tests yml file with -t')
NetBlow(
topo_file=args.topology,
test_file=args.tests,
dry_run=args.dryrun,
enable_salt=False,
iter_once=args.once,
auto_open=True,
auto_test=True,
con_check=args.concheck,
level=args.level)
if __name__ == "__main__":
main()
| 29.36 | 108 | 0.609446 |
e1d83fca2e1bb93962f5e57c6f7075495edf9d91 | 8,688 | py | Python | src/06_tool/regular_expression.py | edgardeng/python-advance-interview | 59fd7bee8e871acdc7fdfecf2a110db840c47ebb | [
"Apache-2.0"
] | 1 | 2022-03-06T13:03:56.000Z | 2022-03-06T13:03:56.000Z | src/06_tool/regular_expression.py | edgardeng/python-advance-interview | 59fd7bee8e871acdc7fdfecf2a110db840c47ebb | [
"Apache-2.0"
] | null | null | null | src/06_tool/regular_expression.py | edgardeng/python-advance-interview | 59fd7bee8e871acdc7fdfecf2a110db840c47ebb | [
"Apache-2.0"
] | null | null | null | '''
' Python Regular Expression
'
'''
import re
#
#
#
#
#
#
##
if __name__ == '__main__':
# test_match()
# test_match_character()
# test_match_phone()
# test_match_qualifier()
# escape_character()
# boundary()
# test_search()
# test_multi_character()
# test_group()
# test_sub()
# test_compile()
# test_findall()
# test_split()
# greedy_mode()
# <.+><.+>.+</.+></.+>
s = '<link href="../assets/css/app.css?t=20112455" type="text/css" rel="stylesheet">'
mathched = re.findall(r'\S+assets/css/\S+.css\S+"', s)
for m in mathched:
print(m, m.index('.css'))
s = s.replace(m, m[:m.index('.css')] + '.css?t=00000"')
print(s)
| 30.484211 | 111 | 0.536027 |
e1da747be2e0ff514420a41a6547dfb4970c7ba6 | 166 | py | Python | dot_dotfiles/mail/dot_offlineimap.py | TheRealOne78/dots | 52c59dae1fccb7392ceeb16ac564f6a18ee4a159 | [
"MIT"
] | 758 | 2016-11-19T22:52:34.000Z | 2022-03-29T00:43:57.000Z | dot_dotfiles/mail/dot_offlineimap.py | TheRealOne78/dots | 52c59dae1fccb7392ceeb16ac564f6a18ee4a159 | [
"MIT"
] | 27 | 2017-02-09T23:28:58.000Z | 2022-03-22T21:35:24.000Z | dot_dotfiles/mail/dot_offlineimap.py | TheRealOne78/dots | 52c59dae1fccb7392ceeb16ac564f6a18ee4a159 | [
"MIT"
] | 82 | 2016-12-23T04:42:00.000Z | 2022-03-29T19:25:16.000Z | #! /usr/bin/env python2
# -*- coding: utf8 -*-
from subprocess import check_output
| 20.75 | 64 | 0.680723 |
e1dd3f3740e16e48cf7fbe8dce94d776bef908fd | 1,139 | py | Python | tests/encoding-utils/test_big_endian_integer.py | carver/ethereum-utils | 7ec2495b25107776cb4e0e4a79af8a8c64f622c4 | [
"MIT"
] | null | null | null | tests/encoding-utils/test_big_endian_integer.py | carver/ethereum-utils | 7ec2495b25107776cb4e0e4a79af8a8c64f622c4 | [
"MIT"
] | null | null | null | tests/encoding-utils/test_big_endian_integer.py | carver/ethereum-utils | 7ec2495b25107776cb4e0e4a79af8a8c64f622c4 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
import pytest
from hypothesis import (
strategies as st,
given,
)
from eth_utils.encoding import (
int_to_big_endian,
big_endian_to_int,
)
| 22.78 | 60 | 0.665496 |
e1dd62f3dbffbbc08c5996a09c39db0640f82f31 | 1,086 | py | Python | src/data/normalization.py | poly-ai/fluid-surface-estimation | b2e310f38c3cce3c13fbf0b8277ee4eb00755d36 | [
"MIT"
] | 2 | 2022-02-15T21:41:06.000Z | 2022-02-16T04:54:51.000Z | src/data/normalization.py | poly-ai/fluid-surface-estimation | b2e310f38c3cce3c13fbf0b8277ee4eb00755d36 | [
"MIT"
] | null | null | null | src/data/normalization.py | poly-ai/fluid-surface-estimation | b2e310f38c3cce3c13fbf0b8277ee4eb00755d36 | [
"MIT"
] | null | null | null | import numpy as np
# Normalize dataset such that all sequences have min value 0.0, max value 1.0
# Normalize only the sequences in the data that have value outside range [0, 1)
# Normalizes these sequences to have min value 0.0, max value 1.0
| 32.909091 | 79 | 0.669429 |
e1de48b63ed82ddff16804877e556e037ff413c0 | 1,487 | py | Python | setup.py | fwitte/PyPSA | fa2ca201a4f0b3b5f8705a5927475ebb021dbee5 | [
"MIT"
] | null | null | null | setup.py | fwitte/PyPSA | fa2ca201a4f0b3b5f8705a5927475ebb021dbee5 | [
"MIT"
] | null | null | null | setup.py | fwitte/PyPSA | fa2ca201a4f0b3b5f8705a5927475ebb021dbee5 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from setuptools import setup, find_packages
from codecs import open
with open('README.rst', encoding='utf-8') as f:
long_description = f.read()
setup(
name='pypsa',
version='0.19.1',
author='PyPSA Developers, see https://pypsa.readthedocs.io/en/latest/developers.html',
author_email='t.brown@tu-berlin.de',
description='Python for Power Systems Analysis',
long_description=long_description,
long_description_content_type='text/x-rst',
url='https://github.com/PyPSA/PyPSA',
license='MIT',
packages=find_packages(exclude=['doc', 'test']),
include_package_data=True,
python_requires='>=3.6',
install_requires=[
'numpy',
'scipy',
'pandas>=0.24.0',
'xarray',
'netcdf4',
'tables',
'pyomo>=5.7',
'matplotlib',
'networkx>=1.10',
'deprecation'
],
extras_require = {
"dev": ["pytest", "pypower", "pandapower", "scikit-learn"],
"cartopy": ['cartopy>=0.16'],
"docs": ["numpydoc", "sphinx", "sphinx_rtd_theme", "nbsphinx", "nbsphinx-link", "black"],
'gurobipy':['gurobipy']
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
])
| 29.156863 | 97 | 0.604573 |
e1de6c9ea1e78727fc2d5bc8690e68e41338f516 | 556 | py | Python | quiz/urls.py | Hysham/Quiz-Hoster | 19067e3d584cb97562e73d332fdfe74eb49524cc | [
"MIT"
] | 1 | 2020-03-22T13:36:27.000Z | 2020-03-22T13:36:27.000Z | quiz/urls.py | Hysham/Quiz-Hoster | 19067e3d584cb97562e73d332fdfe74eb49524cc | [
"MIT"
] | null | null | null | quiz/urls.py | Hysham/Quiz-Hoster | 19067e3d584cb97562e73d332fdfe74eb49524cc | [
"MIT"
] | 1 | 2020-04-02T15:32:12.000Z | 2020-04-02T15:32:12.000Z |
from django.urls import path
from . import views
urlpatterns = [
path('', views.quiz_home, name='quiz-home'),
path('page/<int:page_no>/', views.quiz_page, name='quiz-page' ),
path('about/', views.quiz_about, name='quiz-about'),
path('submit/', views.quiz_submit, name='quiz-submit'),
## after quiz end
path('view_result/<int:page_no>/', views.quiz_view_result, name='quiz-view_result'),
path('leaderboard/', views.quiz_leaderboard, name='quiz-leaderboard'),
path('feedback/', views.quiz_feedback, name='quiz-feedback'),
] | 37.066667 | 88 | 0.681655 |
e1dfa37abe08ed294d2a701673731176a4e461e5 | 3,500 | py | Python | jamf/setconfig.py | pythoninthegrass/python-jamf | f71a44f4565fc2824ce6daf536359d563ab75ea3 | [
"MIT"
] | 25 | 2020-11-02T18:16:22.000Z | 2022-03-07T04:36:14.000Z | jamf/setconfig.py | pythoninthegrass/python-jamf | f71a44f4565fc2824ce6daf536359d563ab75ea3 | [
"MIT"
] | 17 | 2020-12-22T19:24:05.000Z | 2022-03-02T22:39:04.000Z | jamf/setconfig.py | pythoninthegrass/python-jamf | f71a44f4565fc2824ce6daf536359d563ab75ea3 | [
"MIT"
] | 12 | 2020-10-28T19:03:29.000Z | 2022-03-01T08:29:52.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Jamf Config
"""
__author__ = "Sam Forester"
__email__ = "sam.forester@utah.edu"
__copyright__ = "Copyright (c) 2020 University of Utah, Marriott Library"
__license__ = "MIT"
__version__ = "1.0.4"
import argparse
import getpass
import jamf
import logging
import platform
import pprint
import sys
from os import path
if __name__ == "__main__":
main()
| 28.225806 | 86 | 0.575429 |
e1e00ce354ffc24242ad31b4a0c1c5120baf617a | 979 | py | Python | src/menuResponse/migrations/0001_initial.py | miguelaav/dev | 5ade9d0b393f48c9cc3b160b6ede4a03c29addea | [
"bzip2-1.0.6"
] | null | null | null | src/menuResponse/migrations/0001_initial.py | miguelaav/dev | 5ade9d0b393f48c9cc3b160b6ede4a03c29addea | [
"bzip2-1.0.6"
] | 6 | 2020-06-05T20:02:33.000Z | 2022-03-11T23:43:11.000Z | src/menuResponse/migrations/0001_initial.py | miguelaav/dev | 5ade9d0b393f48c9cc3b160b6ede4a03c29addea | [
"bzip2-1.0.6"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-03-12 17:41
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| 32.633333 | 124 | 0.622063 |
e1e2d0a67c83cc0cf6dbbc60b3dc2efff897636e | 10,889 | py | Python | datacube/drivers/s3/storage/s3aio/s3aio.py | Zac-HD/datacube-core | ebc2025b6fb9d22fb406cdf5f79eba6d144c57e3 | [
"Apache-2.0"
] | 2 | 2018-12-02T11:33:50.000Z | 2021-04-24T11:42:42.000Z | datacube/drivers/s3/storage/s3aio/s3aio.py | Zac-HD/datacube-core | ebc2025b6fb9d22fb406cdf5f79eba6d144c57e3 | [
"Apache-2.0"
] | 103 | 2018-03-21T15:00:05.000Z | 2020-06-04T05:40:25.000Z | datacube/drivers/s3/storage/s3aio/s3aio.py | roarmstrong/datacube-core | 5e38638dabd9e5112e92b503fae6a83c8dcc4902 | [
"Apache-2.0"
] | null | null | null | """
S3AIO Class
Array access to a single S3 object
"""
from __future__ import absolute_import
import SharedArray as sa
import zstd
from itertools import repeat, product
import numpy as np
from pathos.multiprocessing import ProcessingPool
from six.moves import zip
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from .s3io import S3IO, generate_array_name
| 39.452899 | 113 | 0.612545 |
e1e4a6c549324fabd37261ecd95b7fc5b7e7bd39 | 5,447 | py | Python | make_snapshot.py | trquinn/ICgen | 0d7f05187a955be7fa3dee2f638cfcb074ebadcd | [
"MIT"
] | 1 | 2021-09-14T12:03:03.000Z | 2021-09-14T12:03:03.000Z | make_snapshot.py | trquinn/ICgen | 0d7f05187a955be7fa3dee2f638cfcb074ebadcd | [
"MIT"
] | null | null | null | make_snapshot.py | trquinn/ICgen | 0d7f05187a955be7fa3dee2f638cfcb074ebadcd | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 21 15:11:31 2014
@author: ibackus
"""
__version__ = "$Revision: 1 $"
# $Source$
import pynbody
SimArray = pynbody.array.SimArray
import numpy as np
import gc
import os
import isaac
import calc_velocity
import ICgen_utils
import ICglobal_settings
global_settings = ICglobal_settings.global_settings
def snapshot_gen(ICobj):
"""
Generates a tipsy snapshot from the initial conditions object ICobj.
Returns snapshot, param
snapshot: tipsy snapshot
param: dictionary containing info for a .param file
"""
print 'Generating snapshot...'
# Constants
G = SimArray(1.0,'G')
# ------------------------------------
# Load in things from ICobj
# ------------------------------------
print 'Accessing data from ICs'
settings = ICobj.settings
# filenames
snapshotName = settings.filenames.snapshotName
paramName = settings.filenames.paramName
# particle positions
r = ICobj.pos.r
xyz = ICobj.pos.xyz
# Number of particles
nParticles = ICobj.pos.nParticles
# molecular mass
m = settings.physical.m
# star mass
m_star = settings.physical.M.copy()
# disk mass
m_disk = ICobj.sigma.m_disk.copy()
m_disk = isaac.match_units(m_disk, m_star)[0]
# mass of the gas particles
m_particles = m_disk / float(nParticles)
# re-scale the particles (allows making of lo-mass disk)
m_particles *= settings.snapshot.mScale
# -------------------------------------------------
# Assign output
# -------------------------------------------------
print 'Assigning data to snapshot'
# Get units all set up
m_unit = m_star.units
pos_unit = r.units
if xyz.units != r.units:
xyz.convert_units(pos_unit)
# time units are sqrt(L^3/GM)
t_unit = np.sqrt((pos_unit**3)*np.power((G*m_unit), -1)).units
# velocity units are L/t
v_unit = (pos_unit/t_unit).ratio('km s**-1')
# Make it a unit
v_unit = pynbody.units.Unit('{0} km s**-1'.format(v_unit))
# Other settings
metals = settings.snapshot.metals
star_metals = metals
# -------------------------------------------------
# Initialize snapshot
# -------------------------------------------------
# Note that empty pos, vel, and mass arrays are created in the snapshot
snapshot = pynbody.new(star=1,gas=nParticles)
snapshot['vel'].units = v_unit
snapshot['eps'] = 0.01*SimArray(np.ones(nParticles+1, dtype=np.float32), pos_unit)
snapshot['metals'] = SimArray(np.zeros(nParticles+1, dtype=np.float32))
snapshot['rho'] = SimArray(np.zeros(nParticles+1, dtype=np.float32))
snapshot.gas['pos'] = xyz
snapshot.gas['temp'] = ICobj.T(r)
snapshot.gas['mass'] = m_particles
snapshot.gas['metals'] = metals
snapshot.star['pos'] = SimArray([[ 0., 0., 0.]],pos_unit)
snapshot.star['vel'] = SimArray([[ 0., 0., 0.]], v_unit)
snapshot.star['mass'] = m_star
snapshot.star['metals'] = SimArray(star_metals)
# Estimate the star's softening length as the closest particle distance
snapshot.star['eps'] = r.min()
# Make param file
param = isaac.make_param(snapshot, snapshotName)
param['dMeanMolWeight'] = m
gc.collect()
# -------------------------------------------------
# CALCULATE VELOCITY USING calc_velocity.py. This also estimates the
# gravitational softening length eps
# -------------------------------------------------
print 'Calculating circular velocity'
preset = settings.changa_run.preset
max_particles = global_settings['misc']['max_particles']
calc_velocity.v_xy(snapshot, param, changa_preset=preset, max_particles=max_particles)
gc.collect()
# -------------------------------------------------
# Estimate time step for changa to use
# -------------------------------------------------
# Save param file
isaac.configsave(param, paramName, 'param')
# Save snapshot
snapshot.write(filename=snapshotName, fmt=pynbody.tipsy.TipsySnap)
# est dDelta
dDelta = ICgen_utils.est_time_step(paramName, preset)
param['dDelta'] = dDelta
# -------------------------------------------------
# Create director file
# -------------------------------------------------
# largest radius to plot
r_director = float(0.9 * r.max())
# Maximum surface density
sigma_min = float(ICobj.sigma(r_director))
# surface density at largest radius
sigma_max = float(ICobj.sigma.input_dict['sigma'].max())
# Create director dict
director = isaac.make_director(sigma_min, sigma_max, r_director, filename=param['achOutName'])
## Save .director file
#isaac.configsave(director, directorName, 'director')
# -------------------------------------------------
# Wrap up
# -------------------------------------------------
print 'Wrapping up'
# Now set the star particle's tform to a negative number. This allows
# UW ChaNGa treat it as a sink particle.
snapshot.star['tform'] = -1.0
# Update params
r_sink = isaac.strip_units(r.min())
param['dSinkBoundOrbitRadius'] = r_sink
param['dSinkRadius'] = r_sink
param['dSinkMassMin'] = 0.9 * isaac.strip_units(m_star)
param['bDoSinks'] = 1
return snapshot, param, director | 33.213415 | 98 | 0.572242 |
e1e5f2d6ad3305b63d32e9bc867c960c34b149c1 | 8,243 | py | Python | diag_rank_update.py | IPA-HD/ldaf_classification | e7cd08c59d3be2cf961cf6f546ef9b375c9d96c5 | [
"MIT"
] | null | null | null | diag_rank_update.py | IPA-HD/ldaf_classification | e7cd08c59d3be2cf961cf6f546ef9b375c9d96c5 | [
"MIT"
] | null | null | null | diag_rank_update.py | IPA-HD/ldaf_classification | e7cd08c59d3be2cf961cf6f546ef9b375c9d96c5 | [
"MIT"
] | 1 | 2022-02-23T16:13:04.000Z | 2022-02-23T16:13:04.000Z | """
Diagonal Matrix with rank-1 updates.
"""
import itertools
import torch
from torch.functional import Tensor
def batchDot(self, v):
"""
Batched multiplication self @ v
with batch of matrices v (batch_size, n, k)
"""
assert v.ndim == 3
assert v.shape[1] == self.rankUpdates.shape[2]
n = v.shape[1]
diag_bmm = self.diag.reshape((1, n, 1))*v
inner_prod = torch.matmul(self.rankUpdates[:,1,:].unsqueeze(0), v)
# inner_prod now has shape (batch_size, n_updates, k)
outer_prod = torch.matmul(
self.rankUpdates[:,0,:].t().unsqueeze(0),
inner_prod
)
# outer_prod now has shape (batch_size, n, k)
return diag_bmm + outer_prod
def batchDotTransposed(self, v):
"""
Batched multiplication self.t() @ v
with batch of matrices v (batch_size, n, k)
"""
assert v.ndim == 3
assert v.shape[1] == self.rankUpdates.shape[2]
n = v.shape[1]
diag_bmm = self.diag.reshape((1, n, 1))*v
inner_prod = torch.matmul(self.rankUpdates[:,0,:].unsqueeze(0), v)
# inner_prod now has shape (batch_size, n_updates, k)
outer_prod = torch.matmul(
self.rankUpdates[:,1,:].t().unsqueeze(0),
inner_prod
)
# outer_prod now has shape (batch_size, n, k)
return diag_bmm + outer_prod
def dotRight(self, other):
"""
Multiply self @ other
"""
return self.diag * other + torch.matmul(
torch.matmul( self.rankUpdates[:,1,:] , other ),
self.rankUpdates[:,0,:]
)
def dotLeft(self, other):
"""
Multiply other @ self
"""
return self.diag * other + torch.matmul(
torch.matmul( self.rankUpdates[:,0,:] , other ),
self.rankUpdates[:,1,:]
)
def dotBoth(self, v, w):
"""
Let A be self and v, w . Then `dotBoth` computes
v A w
"""
return (self.diag * v * w).sum() + torch.dot(
torch.matmul(self.rankUpdates[:, 0, :], v),
torch.matmul(self.rankUpdates[:, 1, :], w)
)
| 28.922807 | 105 | 0.474463 |
e1e66cd3308883f2371baad138a10eed2eac4eff | 4,074 | py | Python | tests/garage/tf/policies/test_gaussian_mlp_policy_with_model.py | XavierJingfeng/starter | 274566e491d5c7157f3c8deff136c56838022349 | [
"MIT"
] | null | null | null | tests/garage/tf/policies/test_gaussian_mlp_policy_with_model.py | XavierJingfeng/starter | 274566e491d5c7157f3c8deff136c56838022349 | [
"MIT"
] | null | null | null | tests/garage/tf/policies/test_gaussian_mlp_policy_with_model.py | XavierJingfeng/starter | 274566e491d5c7157f3c8deff136c56838022349 | [
"MIT"
] | null | null | null | import pickle
from unittest import mock
from nose2.tools.params import params
import numpy as np
import tensorflow as tf
from garage.tf.envs import TfEnv
from garage.tf.policies import GaussianMLPPolicyWithModel
from tests.fixtures import TfGraphTestCase
from tests.fixtures.envs.dummy import DummyBoxEnv
from tests.fixtures.models import SimpleGaussianMLPModel
| 35.736842 | 76 | 0.59352 |
e1e7670f03c464a40b12de227929a84b71ca6496 | 3,015 | py | Python | cloudify_gcp/monitoring/stackdriver_uptimecheck.py | cloudify-cosmo/cloudify-gcp-plugin | c70faee0555070f7fc67f0001395eaafb681b23c | [
"Apache-2.0"
] | 4 | 2016-10-24T17:42:07.000Z | 2020-05-31T00:34:07.000Z | cloudify_gcp/monitoring/stackdriver_uptimecheck.py | cloudify-cosmo/cloudify-gcp-plugin | c70faee0555070f7fc67f0001395eaafb681b23c | [
"Apache-2.0"
] | 35 | 2015-04-30T20:14:01.000Z | 2022-02-03T21:35:54.000Z | cloudify_gcp/monitoring/stackdriver_uptimecheck.py | cloudify-cosmo/cloudify-gcp-plugin | c70faee0555070f7fc67f0001395eaafb681b23c | [
"Apache-2.0"
] | 13 | 2015-04-17T16:42:03.000Z | 2021-06-24T04:12:14.000Z | # #######
# Copyright (c) 2018-2020 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cloudify import ctx
from cloudify.decorators import operation
from cloudify_gcp.gcp import check_response
from .. import utils
from .. import constants
from ..monitoring import MonitoringBase
| 33.5 | 78 | 0.725373 |
e1e7fd1d9bbf595b4d131e3b6ac6e686c46e866f | 2,041 | py | Python | tests/test_database.py | penggan666/index_selection_evaluation | b6daf1f30c24a0675f4e3acfbd17304e5d91cfd6 | [
"MIT"
] | 37 | 2020-03-03T10:59:06.000Z | 2022-03-29T11:51:37.000Z | tests/test_database.py | Jiachen-Shi/index_selection_evaluation | fb22b929cbab22377e90a12ae23ea4002d8eab7b | [
"MIT"
] | 19 | 2020-03-10T14:55:56.000Z | 2021-05-20T09:54:32.000Z | tests/test_database.py | Jiachen-Shi/index_selection_evaluation | fb22b929cbab22377e90a12ae23ea4002d8eab7b | [
"MIT"
] | 14 | 2020-08-10T03:12:40.000Z | 2022-02-28T06:08:16.000Z | import unittest
from selection.dbms.postgres_dbms import PostgresDatabaseConnector
from selection.index import Index
from selection.table_generator import TableGenerator
from selection.workload import Column, Query, Table
if __name__ == "__main__":
unittest.main()
| 34.016667 | 85 | 0.707496 |
e1e8c509d815e0208974db78a033ef909fdca7d8 | 2,012 | py | Python | aljson/__init__.py | hrzp/aljson | 83cab23f9466c8ca5803dba7d5ec998646ff0436 | [
"MIT"
] | 1 | 2020-02-02T11:33:29.000Z | 2020-02-02T11:33:29.000Z | aljson/__init__.py | hrzp/aljson | 83cab23f9466c8ca5803dba7d5ec998646ff0436 | [
"MIT"
] | null | null | null | aljson/__init__.py | hrzp/aljson | 83cab23f9466c8ca5803dba7d5ec998646ff0436 | [
"MIT"
] | null | null | null | from sqlalchemy.orm.collections import InstrumentedList
| 31.936508 | 74 | 0.630716 |
e1e8ce55d278ecec5ff0a778a7af4a2bbb524f3a | 1,274 | py | Python | src/robotide/context/coreplugins.py | veryl-technologies/t24-tests-ide | 16cd803895916a785c0e1fec3f71f9388c21edc9 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2019-06-27T08:48:24.000Z | 2019-06-27T08:48:24.000Z | src/robotide/context/coreplugins.py | veryl-technologies/t24-tests-ide | 16cd803895916a785c0e1fec3f71f9388c21edc9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/robotide/context/coreplugins.py | veryl-technologies/t24-tests-ide | 16cd803895916a785c0e1fec3f71f9388c21edc9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 42.466667 | 87 | 0.78022 |
e1e993089a256f12c7dadf856a619e12a83973e8 | 918 | py | Python | backend/apps/api/system/v1/serializers/groups.py | offurface/smsta | b8a1f2e6efe6c71703c8d57e8aae255ad213863c | [
"MIT"
] | null | null | null | backend/apps/api/system/v1/serializers/groups.py | offurface/smsta | b8a1f2e6efe6c71703c8d57e8aae255ad213863c | [
"MIT"
] | null | null | null | backend/apps/api/system/v1/serializers/groups.py | offurface/smsta | b8a1f2e6efe6c71703c8d57e8aae255ad213863c | [
"MIT"
] | null | null | null | from rest_framework import serializers
from ... import models
| 21.348837 | 67 | 0.606754 |
e1ea50469b885baae0f3ea29526541040d09f40f | 6,629 | py | Python | cinebot_mini/web_utils/blender_client.py | cheng-chi/cinebot_mini | 708a7c80d2f203dfe3b52bf84d9cbafac7673d27 | [
"MIT"
] | null | null | null | cinebot_mini/web_utils/blender_client.py | cheng-chi/cinebot_mini | 708a7c80d2f203dfe3b52bf84d9cbafac7673d27 | [
"MIT"
] | null | null | null | cinebot_mini/web_utils/blender_client.py | cheng-chi/cinebot_mini | 708a7c80d2f203dfe3b52bf84d9cbafac7673d27 | [
"MIT"
] | null | null | null | from cinebot_mini import SERVERS
import requests
import numpy as np
import json
| 25.996078 | 69 | 0.577312 |
e1eb5b5cf0257ffeb6de52c29326fb2195c7a273 | 6,733 | py | Python | gem5-configs/configs-microbench-tests/run_controlbenchmarks.py | TCHERNET/parsec-tests2 | 775b299a890d0d552877ed510240aa59c630eaa3 | [
"BSD-3-Clause"
] | 5 | 2020-05-20T12:24:29.000Z | 2021-07-20T01:49:30.000Z | gem5-configs/configs-microbench-tests/run_controlbenchmarks.py | TCHERNET/parsec-tests2 | 775b299a890d0d552877ed510240aa59c630eaa3 | [
"BSD-3-Clause"
] | 26 | 2020-04-03T15:01:48.000Z | 2021-06-09T19:08:31.000Z | gem5-configs/configs-microbench-tests/run_controlbenchmarks.py | TCHERNET/parsec-tests2 | 775b299a890d0d552877ed510240aa59c630eaa3 | [
"BSD-3-Clause"
] | 3 | 2020-07-04T14:51:29.000Z | 2021-09-16T04:33:45.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2018 The Regents of the University of California
# All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Jason Lowe-Power
from __future__ import print_function
import argparse
import m5
from m5.objects import TimingSimpleCPU, DerivO3CPU
from m5.objects import SimpleIndirectPredictor, LocalBP, BiModeBP, TournamentBP, LTAGE, SimpleMemory
from m5.objects import Root
from m5.objects import *
from system import BaseTestSystem
from system import InfMemory, SingleCycleMemory, SlowMemory
# Branch predictor params
# If indirect Predictor is disabled use BTB with these params
btbEntries = 512
btbTagSize = 19
ipred = SimpleIndirectPredictor()
#CPU Configs
# Add more CPUs Configs under test before this
valid_configs = [Simple_LocalBP, Simple_BiModeBP, Simple_TournamentBP, Simple_LTAGEBP, DefaultO3_LocalBP, DefaultO3_BiModeBP, DefaultO3_TournamentBP, DefaultO3_LTAGEBP]
valid_configs = {cls.__name__[:-2]:cls for cls in valid_configs}
# Add more Memories under test before this
valid_memories = [InfMemory, SingleCycleMemory, SlowMemory]
valid_memories = {cls.__name__[:-6]:cls for cls in valid_memories}
parser = argparse.ArgumentParser()
parser.add_argument('config', choices = valid_configs.keys())
parser.add_argument('memory_model', choices = valid_memories.keys())
parser.add_argument('binary', type = str, help = "Path to binary to run")
args = parser.parse_args()
system = MySystem()
system.setTestBinary(args.binary)
root = Root(full_system = False, system = system)
m5.instantiate()
exit_event = m5.simulate()
if exit_event.getCause() != 'exiting with last active thread context':
print("Benchmark failed with bad exit cause.")
print(exit_event.getCause())
exit(1)
if exit_event.getCode() != 0:
print("Benchmark failed with bad exit code.")
print("Exit code {}".format(exit_event.getCode()))
exit(1)
print("{} ms".format(m5.curTick()/1e9))
| 40.077381 | 168 | 0.776771 |
e1ebe5e056a585344fff7992dae1cbba59732df5 | 1,273 | py | Python | poezio/args.py | hrnciar/poezio | 12b8af11df35dda535412b0c02ba792890095a7d | [
"Zlib"
] | null | null | null | poezio/args.py | hrnciar/poezio | 12b8af11df35dda535412b0c02ba792890095a7d | [
"Zlib"
] | null | null | null | poezio/args.py | hrnciar/poezio | 12b8af11df35dda535412b0c02ba792890095a7d | [
"Zlib"
] | null | null | null | """
Module related to the argument parsing
There is a fallback to the deprecated optparse if argparse is not found
"""
from pathlib import Path
from argparse import ArgumentParser, SUPPRESS
from poezio.version import __version__
def parse_args(CONFIG_PATH: Path):
"""
Parse the arguments from the command line
"""
parser = ArgumentParser('poezio')
parser.add_argument(
"-c",
"--check-config",
dest="check_config",
action='store_true',
help='Check the config file')
parser.add_argument(
"-d",
"--debug",
dest="debug",
help="The file where debug will be written",
metavar="DEBUG_FILE")
parser.add_argument(
"-f",
"--file",
dest="filename",
default=CONFIG_PATH / 'poezio.cfg',
type=Path,
help="The config file you want to use",
metavar="CONFIG_FILE")
parser.add_argument(
'-v',
'--version',
action='version',
version='Poezio v%s' % __version__,
)
parser.add_argument(
"--custom-version",
dest="custom_version",
help=SUPPRESS,
metavar="VERSION",
default=__version__
)
options = parser.parse_args()
return options
| 24.480769 | 71 | 0.593087 |
e1ed3b48fe37cb69350c8b6542e4845c264e91f2 | 1,125 | py | Python | src/mist/api/poller/schedulers.py | vladimir-ilyashenko/mist.api | f77c451679732ac1cfdafa85ad023c7c170faec4 | [
"Apache-2.0"
] | null | null | null | src/mist/api/poller/schedulers.py | vladimir-ilyashenko/mist.api | f77c451679732ac1cfdafa85ad023c7c170faec4 | [
"Apache-2.0"
] | null | null | null | src/mist/api/poller/schedulers.py | vladimir-ilyashenko/mist.api | f77c451679732ac1cfdafa85ad023c7c170faec4 | [
"Apache-2.0"
] | null | null | null | from celerybeatmongo.schedulers import MongoScheduler
from mist.api.sharding.mixins import ShardManagerMixin
from mist.api.poller.models import PollingSchedule
from mist.api.poller.models import OwnerPollingSchedule
from mist.api.poller.models import CloudPollingSchedule
from mist.api.poller.models import MachinePollingSchedule
import datetime
| 26.162791 | 74 | 0.826667 |
e1ee5eb16b3e9a592172165671953d6cc3271d6d | 13,939 | py | Python | datasets/hdd_classif.py | valeoai/BEEF | f1c5f3708ba91f6402dd05814b76dca1d9012942 | [
"Apache-2.0"
] | 4 | 2021-05-31T16:53:35.000Z | 2021-11-30T03:03:34.000Z | datasets/hdd_classif.py | valeoai/BEEF | f1c5f3708ba91f6402dd05814b76dca1d9012942 | [
"Apache-2.0"
] | 3 | 2022-02-02T20:41:56.000Z | 2022-02-24T11:47:44.000Z | datasets/hdd_classif.py | valeoai/BEEF | f1c5f3708ba91f6402dd05814b76dca1d9012942 | [
"Apache-2.0"
] | null | null | null | from collections import Counter
import json
from pathlib import Path
from PIL import Image
import numpy as np
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
from bootstrap.lib.logger import Logger
from bootstrap.datasets import transforms as bootstrap_tf
try:
from .hdd import HDD
except:
from hdd import HDD
if __name__ == "__main__":
split = "val"
fps = 3
dir_data = Path("/datasets_local/HDD")
nb_threads = 0
horizon = 2
win_size = 21
layer = "goal"
batch_size = 12
use_navig = False
im_size = "small"
dataset = HDDClassif(dir_data,
split,
win_size,
im_size,
layer, # "goal" or "cause"
use_navig=use_navig,
fps=fps,
horizon=horizon, # in seconds
batch_size=batch_size,
debug=False,
shuffle=False,
pin_memory=False,
nb_threads=0)
vidname_to_index = {}
for idx, sequence in enumerate(dataset.index):
vid_name = sequence[0].parent.name
if vid_name not in vidname_to_index:
vidname_to_index[vid_name] = []
vidname_to_index[vid_name].append(idx)
batch_sampler = SequentialBatchSampler(vidname_to_index, batch_size)
N = 0
for batch in batch_sampler:
print(batch)
N += 1
# item = dataset[5]
# loader = dataset.make_batch_loader(batch_size,
# shuffle=False)
# for idx, batch in enumerate(loader):
# break | 38.084699 | 193 | 0.522204 |
e1f01e5ef61eacab7ab09c6ac2aca35cf6f0b034 | 921 | py | Python | 1W/6/3.py | allenalvin333/Hackerrank_Prep | 26ed5b874daba4775d006824d36f9e82ea5ff1ea | [
"MIT"
] | 2 | 2021-11-25T13:38:36.000Z | 2021-11-25T13:42:56.000Z | 1W/6/3.py | allenalvin333/Hackerrank_Prep | 26ed5b874daba4775d006824d36f9e82ea5ff1ea | [
"MIT"
] | null | null | null | 1W/6/3.py | allenalvin333/Hackerrank_Prep | 26ed5b874daba4775d006824d36f9e82ea5ff1ea | [
"MIT"
] | 1 | 2021-11-25T13:38:43.000Z | 2021-11-25T13:38:43.000Z | # https://www.hackerrank.com/challenges/one-week-preparation-kit-jesse-and-cookies/problem
#!/bin/python3
import math
import os
import random
import re
import sys
import heapq
#
# Complete the 'cookies' function below.
#
# The function is expected to return an INTEGER.
# The function accepts following parameters:
# 1. INTEGER k
# 2. INTEGER_ARRAY A
#
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
first_multiple_input = input().rstrip().split()
n = int(first_multiple_input[0])
k = int(first_multiple_input[1])
A = list(map(int, input().rstrip().split()))
result = cookies(k, A)
fptr.write(str(result) + '\n')
fptr.close() | 20.021739 | 90 | 0.633008 |
e1f029d6dec3a3f66d804dec3fb860fb4b271b25 | 3,895 | py | Python | toughio/capillarity/_base.py | keurfonluu/toughio | 1db0600ee5ad1abb5ca858c81c8ac5226c9dbb4f | [
"BSD-3-Clause-LBNL"
] | 21 | 2020-03-05T20:03:58.000Z | 2022-03-14T23:17:42.000Z | toughio/capillarity/_base.py | keurfonluu/toughio | 1db0600ee5ad1abb5ca858c81c8ac5226c9dbb4f | [
"BSD-3-Clause-LBNL"
] | 60 | 2020-02-14T22:53:01.000Z | 2022-03-26T07:24:19.000Z | toughio/capillarity/_base.py | keurfonluu/toughio | 1db0600ee5ad1abb5ca858c81c8ac5226c9dbb4f | [
"BSD-3-Clause-LBNL"
] | 6 | 2020-02-28T08:15:36.000Z | 2022-03-13T23:26:24.000Z | from abc import ABCMeta, abstractmethod, abstractproperty
import numpy
__all__ = [
"BaseCapillarity",
]
# See <https://stackoverflow.com/questions/35673474/using-abc-abcmeta-in-a-way-it-is-compatible-both-with-python-2-7-and-python-3-5>
ABC = ABCMeta("ABC", (object,), {"__slots__": ()})
| 29.507576 | 132 | 0.562773 |
e1f08688ada9b36c08693a0c6eb7ff57ba0e5786 | 23,988 | py | Python | gui.py | NejcHirci/material-addon | c08e2081413c3319b712c2f7193ac8013f601382 | [
"MIT"
] | 4 | 2022-01-31T14:26:39.000Z | 2022-02-06T06:34:27.000Z | gui.py | NejcHirci/material-addon | c08e2081413c3319b712c2f7193ac8013f601382 | [
"MIT"
] | 2 | 2021-11-30T12:19:27.000Z | 2021-11-30T12:42:10.000Z | gui.py | NejcHirci/material-addon | c08e2081413c3319b712c2f7193ac8013f601382 | [
"MIT"
] | null | null | null | import bpy
import glob
from bpy.types import Panel, Operator
from bpy.app.handlers import persistent
import os
import threading
from queue import Queue
from pathlib import Path
from . mix_ops import *
from . matgan_ops import *
from . neural_ops import *
cache_path = os.path.join(Path(__file__).parent.resolve(), '.cache')
# Redraw all function
# Thread function for reading output
def update_active_mat(self, context):
active_obj = bpy.context.active_object
if active_obj:
if context.scene.SelectWorkflow == 'MatGAN':
base_name = "matgan_mat"
elif context.scene.SelectWorkflow == 'NeuralMAT':
base_name = "neural_mat"
elif context.scene.SelectWorkflow == 'MixMAT':
base_name = "mix_mat"
name = f"{active_obj.name}_{base_name}"
if name not in bpy.data.materials:
mat = bpy.data.materials[base_name].copy()
mat.name = name
else:
mat = bpy.data.materials[name]
active_obj.active_material = mat
if context.scene.SelectWorkflow == 'MatGAN' and 'MaterialGAN_Path' in active_obj:
bpy.context.scene.matgan_properties.directory = active_obj['MaterialGAN_Path']
elif context.scene.SelectWorkflow == 'NeuralMAT' and 'Neural_Path' in active_obj:
bpy.context.scene.neural_properties.directory = active_obj['Neural_Path']
elif context.scene.SelectWorkflow == 'MixMAT' and 'Algorithmic_Path' in active_obj:
bpy.context.scene.mixmat_properties.directory = active_obj['Algorithmic_Path']
# Copy files to .cache folder
| 42.306878 | 144 | 0.557654 |
e1f180db019536ccc2e9f00c32c47da031376111 | 4,266 | py | Python | run.py | kbeyer/RPi-LED-SpectrumAnalyzer | f5a5f1210f02188599eb308f5737392ce8c93218 | [
"MIT"
] | 14 | 2015-01-09T12:26:06.000Z | 2021-03-22T22:16:53.000Z | run.py | kbeyer/RPi-LED-SpectrumAnalyzer | f5a5f1210f02188599eb308f5737392ce8c93218 | [
"MIT"
] | 4 | 2015-07-19T07:20:51.000Z | 2017-02-01T16:11:22.000Z | run.py | kbeyer/RPi-LED-SpectrumAnalyzer | f5a5f1210f02188599eb308f5737392ce8c93218 | [
"MIT"
] | 4 | 2016-03-07T12:12:08.000Z | 2018-03-04T21:57:13.000Z | """ Main entry point for running the demo. """
# Standard library
import time
import sys
# Third party library
import alsaaudio as aa
# Local library
from char import show_text
from hs_logo import draw_logo
from leds import ColumnedLEDStrip
from music import calculate_levels, read_musicfile_in_chunks, calculate_column_frequency
from shairplay import initialize_shairplay, shutdown_shairplay, RaopCallbacks
COLUMNS = 12
GAP_LEDS = 0
TOTAL_LEDS = 100
SKIP_LEDS = 4
SAMPLE_RATE = 44100
NUM_CHANNELS = 2
FORMAT = aa.PCM_FORMAT_S16_LE
PERIOD_SIZE = 2048
frequency_limits = calculate_column_frequency(200, 10000, COLUMNS)
if __name__ == '__main__':
from textwrap import dedent
input_types = ('local', 'linein', 'airplay')
usage = dedent("""\
Usage: %s <input-type> [additional arguments]
input-type: should be one of %s
To play a local file, you can pass the path to the file as an additional
argument.
""") % (sys.argv[0], input_types)
if len(sys.argv) == 1:
print usage
sys.exit(1)
input_type = sys.argv[1]
led_strip = get_led_strip()
if input_type == 'local':
path = sys.argv[2] if len(sys.argv) > 2 else 'sample.mp3'
analyze_audio_file(led_strip, path)
elif input_type == 'airplay':
analyze_airplay_input(led_strip)
elif input_type == 'linein':
analyze_line_in(led_strip)
else:
print usage
sys.exit(1)
| 28.44 | 109 | 0.665495 |
e1f1f1c95fd75ee0bf2a6e9603b88f2d439ebe8f | 2,924 | py | Python | 2020/07/solution.py | dglmoore/advent-of-code | ca6e39a842a84ad5271891535c9323e057261d44 | [
"MIT"
] | null | null | null | 2020/07/solution.py | dglmoore/advent-of-code | ca6e39a842a84ad5271891535c9323e057261d44 | [
"MIT"
] | null | null | null | 2020/07/solution.py | dglmoore/advent-of-code | ca6e39a842a84ad5271891535c9323e057261d44 | [
"MIT"
] | null | null | null | import re
if __name__ == '__main__':
with open("test.txt") as handle:
lines = handle.readlines()
print("Part I: ", part1(lines))
print("Part II:", part2(lines))
| 35.658537 | 79 | 0.591313 |
e1f2c620730a24383f1404677c275f4158ee87bb | 1,981 | py | Python | src/m6_your_turtles.py | polsteaj/01-IntroductionToPython | 155f56f66a5746baa4d5319d4e79c14aa857199b | [
"MIT"
] | null | null | null | src/m6_your_turtles.py | polsteaj/01-IntroductionToPython | 155f56f66a5746baa4d5319d4e79c14aa857199b | [
"MIT"
] | null | null | null | src/m6_your_turtles.py | polsteaj/01-IntroductionToPython | 155f56f66a5746baa4d5319d4e79c14aa857199b | [
"MIT"
] | null | null | null | """
Your chance to explore Loops and Turtles!
Authors: David Mutchler, Dave Fisher, Vibha Alangar, Amanda Stouder,
their colleagues and Alec Polster.
"""
import rosegraphics as rg
###############################################################################
# DONE: 1.
# On Line 5 above, replace PUT_YOUR_NAME_HERE with your own name.
###############################################################################
###############################################################################
# DONE: 2.
# You should have RUN the m5e_loopy_turtles module and READ its code.
# (Do so now if you have not already done so.)
#
# Below this comment, add ANY CODE THAT YOU WANT, as long as:
# 1. You construct at least 2 rg.SimpleTurtle objects.
# 2. Each rg.SimpleTurtle object draws something
# (by moving, using its rg.Pen). ANYTHING is fine!
# 3. Each rg.SimpleTurtle moves inside a LOOP.
#
# Be creative! Strive for way-cool pictures! Abstract pictures rule!
#
# If you make syntax (notational) errors, no worries -- get help
# fixing them at either this session OR at the NEXT session.
#
# Don't forget to COMMIT-and-PUSH when you are done with this module.
###############################################################################
window = rg.TurtleWindow()
my_turtle = rg.SimpleTurtle('turtle')
my_turtle.pen = rg.Pen('blue', 10)
my_turtle.speed = 10
your_turtle = rg.SimpleTurtle()
your_turtle.pen = rg.Pen('red', 5)
your_turtle.speed = 10
your_turtle.pen_up()
your_turtle.forward(3)
your_turtle.pen_down()
size = 300
for k in range(15):
my_turtle.draw_square(size)
my_turtle.pen_up()
my_turtle.right(45)
my_turtle.forward(10)
my_turtle.left(45)
my_turtle.pen_down()
your_turtle.draw_square(size-100)
your_turtle.pen_up()
your_turtle.right(45)
your_turtle.forward(10)
your_turtle.left(45)
your_turtle.pen_down()
size = size - 20
window.close_on_mouse_click()
| 33.576271 | 79 | 0.594144 |
e1f30a4f4d1925bf5687b7cf412adf4bd33cee9b | 84 | py | Python | docs/ResearchSession/manage.py | VoIlAlex/pytorchresearch | c4f08cd0ec6b78788e682005c099aef4582640cb | [
"MIT"
] | 1 | 2020-12-13T20:25:27.000Z | 2020-12-13T20:25:27.000Z | docs/ResearchSession/manage.py | VoIlAlex/pytorchresearch | c4f08cd0ec6b78788e682005c099aef4582640cb | [
"MIT"
] | null | null | null | docs/ResearchSession/manage.py | VoIlAlex/pytorchresearch | c4f08cd0ec6b78788e682005c099aef4582640cb | [
"MIT"
] | null | null | null | from backbone import entry_point
if __name__ == '__main__':
entry_point.main()
| 16.8 | 32 | 0.738095 |
e1f4c12b169ff0fc2c245e310a2a7024653caedb | 116 | py | Python | base.py | chenzhangyu/WeiboOAuth | a00cc5983e989bb2ea8907b8d590a0a6c750d804 | [
"MIT"
] | 1 | 2019-10-10T08:26:08.000Z | 2019-10-10T08:26:08.000Z | base.py | chenzhangyu/WeiboOAuth | a00cc5983e989bb2ea8907b8d590a0a6c750d804 | [
"MIT"
] | null | null | null | base.py | chenzhangyu/WeiboOAuth | a00cc5983e989bb2ea8907b8d590a0a6c750d804 | [
"MIT"
] | 1 | 2019-04-12T09:42:03.000Z | 2019-04-12T09:42:03.000Z | # encoding=utf-8
__author__ = 'lance'
import tornado.web
| 12.888889 | 46 | 0.75 |
e1f4f6334ab0ff9c96e987467be3ce874e28f3d7 | 2,958 | py | Python | paddlers/custom_models/cd/cdnet.py | huilin16/PaddleRS | ca0d6223d8e56cd3bd3cbd3a033c89f1718ce26a | [
"Apache-2.0"
] | 40 | 2022-02-28T02:07:28.000Z | 2022-03-31T09:54:29.000Z | paddlers/custom_models/cd/cdnet.py | huilin16/PaddleRS | ca0d6223d8e56cd3bd3cbd3a033c89f1718ce26a | [
"Apache-2.0"
] | 5 | 2022-03-15T12:13:33.000Z | 2022-03-31T15:54:08.000Z | paddlers/custom_models/cd/cdnet.py | huilin16/PaddleRS | ca0d6223d8e56cd3bd3cbd3a033c89f1718ce26a | [
"Apache-2.0"
] | 20 | 2022-02-28T02:07:31.000Z | 2022-03-31T11:40:40.000Z | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
if __name__ == "__main__":
t1 = paddle.randn((1, 3, 512, 512), dtype="float32")
t2 = paddle.randn((1, 3, 512, 512), dtype="float32")
model = CDNet(6, 2)
pred = model(t1, t2)[0]
print(pred.shape)
| 38.921053 | 75 | 0.610886 |
e1f4fb4322ad7bde9174a243c1005f58f9c30795 | 1,948 | py | Python | contrib/make-leap-seconds.py | dmgerman/ntpsec | 28dde8422e1a949e50663ae965d58c2fdbc782b9 | [
"CC-BY-4.0",
"BSD-2-Clause",
"NTP",
"MIT",
"BSD-3-Clause"
] | null | null | null | contrib/make-leap-seconds.py | dmgerman/ntpsec | 28dde8422e1a949e50663ae965d58c2fdbc782b9 | [
"CC-BY-4.0",
"BSD-2-Clause",
"NTP",
"MIT",
"BSD-3-Clause"
] | null | null | null | contrib/make-leap-seconds.py | dmgerman/ntpsec | 28dde8422e1a949e50663ae965d58c2fdbc782b9 | [
"CC-BY-4.0",
"BSD-2-Clause",
"NTP",
"MIT",
"BSD-3-Clause"
] | 1 | 2021-09-24T18:19:49.000Z | 2021-09-24T18:19:49.000Z | #!/usr/bin/env python
"""\
make-leap-seconds.py - make leap second file for testing
Optional args are date of leap second: YYYY-MM-DD
and expiration date of file.
Defaults are start of tomorrow (UTC), and 28 days after the leap.
"Start of tomorow" is as soon as possible for testing.
"""
# SPDX-License-Identifier: BSD-2-Clause
from __future__ import print_function, division
import datetime
import sha
import sys
import time
JAN_1970 = 2208988800 # convert Unix/POSIX epoch to NTP epoch
epoch = datetime.datetime.utcfromtimestamp(0)
args = sys.argv[1:]
leap = time.time()
days = int(leap/86400)
leap = (days+1)*86400
if len(args) > 0:
leapdate = datetime.datetime.strptime(args[0], "%Y-%m-%d")
leap = (leapdate - epoch).total_seconds()
leap = int(leap)
args = args[1:]
expire = leap + 28*86400
if len(args) > 0:
expiredate = datetime.datetime.strptime(args[0], "%Y-%m-%d")
expire = (expiredate - epoch).total_seconds()
expire = int(expire)
args = args[1:]
leap_txt = time.asctime(time.gmtime(leap))
leap = str(leap+JAN_1970)
expire_txt = time.asctime(time.gmtime(expire))
expire = str(expire+JAN_1970)
update = int(time.time())
update_txt = time.asctime(time.gmtime(update))
update = str(update+JAN_1970)
tai = "40" # hardwired
# File format
#
# # is comment
# #$ xxx Update Date
# #@ xxx Expiration Date
# #h SHA1 hash of payload
#
# #$ 3676924800
# #@ 3707596800
# 2272060800 10 # 1 Jan 1972
# #h dacf2c42 2c4765d6 3c797af8 2cf630eb 699c8c67
#
# All dates use NTP epoch of 1900-01-01
sha1 = sha.new()
print("%s %s # %s" % (leap, tai, leap_txt))
sha1.update(leap)
sha1.update(tai)
print("#@ %s # %s" % (expire, expire_txt))
sha1.update(expire)
print("#$ %s # %s" % (update, update_txt))
sha1.update(update)
digest = sha1.hexdigest()
print("#h %s %s %s %s %s" %
(digest[0:8], digest[8:16], digest[16:24], digest[24:32], digest[32:40]))
# end
| 24.35 | 79 | 0.664784 |
e1f5bc34418af89095c0d30d7b41fe28a2137a99 | 695 | py | Python | tests/profiling/test_scheduler.py | uniq10/dd-trace-py | ca9ce1fe552cf03c2828bcd160e537336aa275d5 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2020-10-17T14:55:46.000Z | 2020-10-17T14:55:46.000Z | tests/profiling/test_scheduler.py | uniq10/dd-trace-py | ca9ce1fe552cf03c2828bcd160e537336aa275d5 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2020-12-22T16:56:55.000Z | 2020-12-22T16:56:55.000Z | tests/profiling/test_scheduler.py | uniq10/dd-trace-py | ca9ce1fe552cf03c2828bcd160e537336aa275d5 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2020-12-22T16:54:02.000Z | 2020-12-22T16:54:02.000Z | # -*- encoding: utf-8 -*-
from ddtrace.profiling import event
from ddtrace.profiling import exporter
from ddtrace.profiling import recorder
from ddtrace.profiling import scheduler
| 23.965517 | 68 | 0.689209 |
e1f73d543e655fe197f206bbd67ec8e450d4935c | 5,546 | py | Python | scrape_reviews/scrape_reviews/spiders/imdb_spider.py | eshwarkoka/sentiment_analysis_on_movie_reviews | 16ad65904ea1446f0b5d2f432e48581414e12c04 | [
"MIT"
] | null | null | null | scrape_reviews/scrape_reviews/spiders/imdb_spider.py | eshwarkoka/sentiment_analysis_on_movie_reviews | 16ad65904ea1446f0b5d2f432e48581414e12c04 | [
"MIT"
] | 2 | 2020-09-09T16:48:28.000Z | 2020-09-09T16:48:36.000Z | scrape_reviews/scrape_reviews/spiders/imdb_spider.py | eshwarkoka/sentiment_analysis_on_movie_reviews | 16ad65904ea1446f0b5d2f432e48581414e12c04 | [
"MIT"
] | null | null | null | import scrapy,json,re,time,os,glob
from scrapy.exceptions import CloseSpider
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.chrome.options import Options
#get all the imdb xpaths from xpaths.json file
with open('./locators/xpaths.json') as f:
xpaths = json.load(f)
imdb = xpaths["imdb"][0]
#define all the required variables
movie_name = ''
project_path = r'/Users/eshwar/Documents/projects/sentiment_analysis_on_movie_reviews/'
scraped_reviews_path = project_path + "data/scraped_reviews/"
predicted_reviews_path = project_path + "data/predicted_reviews/"
chrome_driver_path = project_path+"scrape_reviews/chrome_driver/chromedriver"
| 36.728477 | 128 | 0.638118 |
e1f89f4c50e5d75fea57eee72158205ed8c1ffe8 | 423 | py | Python | backend/notifications/admin.py | ProgrammingLanguageLeader/TutorsApp | f2d5968b5c29ce75f5f634d6076a6e66efc76801 | [
"MIT"
] | 3 | 2019-02-24T23:30:19.000Z | 2019-03-27T20:06:53.000Z | backend/notifications/admin.py | ProgrammingLanguageLeader/TutorsApp | f2d5968b5c29ce75f5f634d6076a6e66efc76801 | [
"MIT"
] | 1 | 2019-03-30T08:58:06.000Z | 2019-03-30T08:58:06.000Z | backend/notifications/admin.py | ProgrammingLanguageLeader/TutorsApp | f2d5968b5c29ce75f5f634d6076a6e66efc76801 | [
"MIT"
] | 1 | 2019-03-01T20:10:19.000Z | 2019-03-01T20:10:19.000Z | from django.contrib import admin
from notifications.models import Notification
| 17.625 | 45 | 0.553191 |
e1f95d627c633bc21a45b92e2b2fbf936f530ed6 | 1,916 | py | Python | logistic-regression/code.py | kalpeshsnaik09/ga-learner-dsmp-repo | b0b8b0b1e8f91d6462d1ea129f86595b5200a4c4 | [
"MIT"
] | null | null | null | logistic-regression/code.py | kalpeshsnaik09/ga-learner-dsmp-repo | b0b8b0b1e8f91d6462d1ea129f86595b5200a4c4 | [
"MIT"
] | null | null | null | logistic-regression/code.py | kalpeshsnaik09/ga-learner-dsmp-repo | b0b8b0b1e8f91d6462d1ea129f86595b5200a4c4 | [
"MIT"
] | null | null | null | # --------------
# import the libraries
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.model_selection import train_test_split
import warnings
warnings.filterwarnings('ignore')
# Code starts here
df=pd.read_csv(path)
print(df.head())
X=df.drop(columns='insuranceclaim')
y=df['insuranceclaim']
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=6)
# Code ends here
# --------------
import matplotlib.pyplot as plt
# Code starts here
plt.boxplot(X_train['bmi'])
plt.show()
q_value=X_train['bmi'].quantile(0.95)
print(y_train.value_counts())
# Code ends here
# --------------
import seaborn as sns
# Code starts here
relation=X_train.corr()
print(relation)
sns.pairplot(X_train)
plt.show()
# Code ends here
# --------------
import seaborn as sns
import matplotlib.pyplot as plt
# Code starts here
cols=['children','sex','region','smoker']
fig,axes=plt.subplots(2,2)
for i in range(2):
for j in range(2):
col=cols[i*2+j]
sns.countplot(X_train[col],hue=y_train,ax=axes[i,j])
# Code ends here
# --------------
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
# parameters for grid search
parameters = {'C':[0.1,0.5,1,5]}
# Code starts here
lr=LogisticRegression(random_state=9)
grid=GridSearchCV(estimator=lr,param_grid=parameters)
grid.fit(X_train,y_train)
y_pred=grid.predict(X_test)
accuracy=accuracy_score(y_test,y_pred)
print(accuracy)
# Code ends here
# --------------
from sklearn.metrics import roc_auc_score
from sklearn import metrics
# Code starts here
score=roc_auc_score(y_test,y_pred)
y_pred_proba=grid.predict_proba(X_test)[:,1]
fpr,tpr,_=metrics.roc_curve(y_test,y_pred)
roc_auc=roc_auc_score(y_test,y_pred_proba)
plt.plot(fpr,tpr,label="Logistic model, auc="+str(roc_auc))
# Code ends here
| 20.602151 | 80 | 0.731733 |
e1fa2fd607868b6a76f691220804b86d0b59aec1 | 2,227 | py | Python | macro_benchmark/SSD_Tensorflow/caffe_to_tensorflow.py | songhappy/ai-matrix | 901078e480c094235c721c49f8141aec7a84e70e | [
"Apache-2.0"
] | 180 | 2018-09-20T07:27:40.000Z | 2022-03-19T07:55:42.000Z | macro_benchmark/SSD_Tensorflow/caffe_to_tensorflow.py | songhappy/ai-matrix | 901078e480c094235c721c49f8141aec7a84e70e | [
"Apache-2.0"
] | 80 | 2018-09-26T18:55:56.000Z | 2022-02-10T02:03:26.000Z | macro_benchmark/SSD_Tensorflow/caffe_to_tensorflow.py | songhappy/ai-matrix | 901078e480c094235c721c49f8141aec7a84e70e | [
"Apache-2.0"
] | 72 | 2018-08-30T00:49:15.000Z | 2022-02-15T23:22:40.000Z | """Convert a Caffe model file to TensorFlow checkpoint format.
Assume that the network built is a equivalent (or a sub-) to the Caffe
definition.
"""
import tensorflow as tf
from nets import caffe_scope
from nets import nets_factory
slim = tf.contrib.slim
# =========================================================================== #
# Main flags.
# =========================================================================== #
tf.app.flags.DEFINE_string(
'model_name', 'ssd_300_vgg', 'Name of the model to convert.')
tf.app.flags.DEFINE_string(
'num_classes', 21, 'Number of classes in the dataset.')
tf.app.flags.DEFINE_string(
'caffemodel_path', None,
'The path to the Caffe model file to convert.')
FLAGS = tf.app.flags.FLAGS
# =========================================================================== #
# Main converting routine.
# =========================================================================== #
if __name__ == '__main__':
tf.app.run()
| 33.238806 | 79 | 0.579704 |
e1faa2d284c1670dec2da5bc75095f1370cf8e94 | 1,211 | py | Python | setup.py | danihodovic/django-toolshed | 78d559db662488bafbd3f701f4c0c5304ae151d9 | [
"MIT"
] | 3 | 2021-08-09T11:59:16.000Z | 2021-08-09T12:44:54.000Z | setup.py | danihodovic/django-toolshed | 78d559db662488bafbd3f701f4c0c5304ae151d9 | [
"MIT"
] | null | null | null | setup.py | danihodovic/django-toolshed | 78d559db662488bafbd3f701f4c0c5304ae151d9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import re
from setuptools import find_packages, setup
version = get_version("django_toolshed", "__init__.py")
readme = open("README.md").read()
setup(
name="django-toolshed",
version=version,
description="""Your project description goes here""",
long_description=readme,
author="Dani Hodovic",
author_email="you@example.com",
url="https://github.com/danihodovic/django-toolshed",
packages=find_packages(),
include_package_data=True,
install_requires=[],
license="MIT",
keywords="django,app",
classifiers=[
"Development Status :: 3 - Alpha",
"Framework :: Django :: 2.0",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
],
)
| 28.162791 | 88 | 0.652353 |
e1fcd5a6b602e7b63e359f1c120e157503211aa4 | 5,686 | py | Python | detection/models/roi_extractors/roi_align.py | waiiinta/object_detection_lab | 6af56ab1c0f595181d87163c62e613398ac96af8 | [
"MIT"
] | 13 | 2020-01-04T07:37:38.000Z | 2021-08-31T05:19:58.000Z | detection/models/roi_extractors/roi_align.py | waiiinta/object_detection_lab | 6af56ab1c0f595181d87163c62e613398ac96af8 | [
"MIT"
] | 3 | 2020-06-05T22:42:53.000Z | 2020-08-24T07:18:54.000Z | detection/models/roi_extractors/roi_align.py | waiiinta/object_detection_lab | 6af56ab1c0f595181d87163c62e613398ac96af8 | [
"MIT"
] | 9 | 2020-10-19T04:53:06.000Z | 2021-08-31T05:20:01.000Z | import tensorflow as tf
from detection.utils.misc import *
| 45.854839 | 155 | 0.577559 |
e1ff64213edb5548904c05273b193883e930a827 | 150 | py | Python | examples/simple_regex/routes/__init__.py | nekonoshiri/tiny-router | 3bb808bcc9f9eb368ee390179dfc5e9d48cf8600 | [
"MIT"
] | null | null | null | examples/simple_regex/routes/__init__.py | nekonoshiri/tiny-router | 3bb808bcc9f9eb368ee390179dfc5e9d48cf8600 | [
"MIT"
] | null | null | null | examples/simple_regex/routes/__init__.py | nekonoshiri/tiny-router | 3bb808bcc9f9eb368ee390179dfc5e9d48cf8600 | [
"MIT"
] | null | null | null | from ..router import Router
from . import create_user, get_user
router = Router()
router.include(get_user.router)
router.include(create_user.router)
| 21.428571 | 35 | 0.8 |
c0003725e83dcd344816d0f9a584c175d9cf972f | 712 | py | Python | poetry/packages/constraints/any_constraint.py | vanyakosmos/poetry | b218969107e49dc57e65dbc0d349e83cbe1f44a8 | [
"MIT"
] | 2 | 2019-06-19T15:07:58.000Z | 2019-11-24T14:08:55.000Z | poetry/packages/constraints/any_constraint.py | vanyakosmos/poetry | b218969107e49dc57e65dbc0d349e83cbe1f44a8 | [
"MIT"
] | 18 | 2020-01-15T04:11:31.000Z | 2020-06-30T13:24:27.000Z | poetry/packages/constraints/any_constraint.py | vanyakosmos/poetry | b218969107e49dc57e65dbc0d349e83cbe1f44a8 | [
"MIT"
] | 1 | 2021-04-08T03:26:23.000Z | 2021-04-08T03:26:23.000Z | from .base_constraint import BaseConstraint
from .empty_constraint import EmptyConstraint
| 18.736842 | 45 | 0.622191 |
c0008a22baca00584adf6ac2a3f849b58fa45823 | 2,147 | py | Python | lib/rucio/db/sqla/migrate_repo/versions/22d887e4ec0a_create_sources_table.py | brianv0/rucio | 127a36fd53e5b4d9eb14ab02fe6c36443d78bfd0 | [
"Apache-2.0"
] | null | null | null | lib/rucio/db/sqla/migrate_repo/versions/22d887e4ec0a_create_sources_table.py | brianv0/rucio | 127a36fd53e5b4d9eb14ab02fe6c36443d78bfd0 | [
"Apache-2.0"
] | null | null | null | lib/rucio/db/sqla/migrate_repo/versions/22d887e4ec0a_create_sources_table.py | brianv0/rucio | 127a36fd53e5b4d9eb14ab02fe6c36443d78bfd0 | [
"Apache-2.0"
] | null | null | null | # Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Vincent Garonne, <vincent.garonne@cern.ch>, 2015
"""Create sources table
Revision ID: 22d887e4ec0a
Revises: 1a80adff031a
Create Date: 2015-03-30 11:37:20.737582
"""
from alembic import context, op
import sqlalchemy as sa
from rucio.db.sqla.types import GUID
# revision identifiers, used by Alembic.
revision = '22d887e4ec0a'
down_revision = '1a80adff031a'
| 39.759259 | 133 | 0.633442 |
c000ce6357216e513a1258f8b804cf4a615522f7 | 4,024 | py | Python | algorithms/FdGars/FdGars_main.py | ss1004124654/DGFraud-TF2 | 18c2bcc03e850afb7d9b507464b366cad30d675f | [
"Apache-2.0"
] | 51 | 2021-05-24T08:38:52.000Z | 2022-03-28T13:14:21.000Z | algorithms/FdGars/FdGars_main.py | aqeelferoze/DGFraud-TF2 | 18c2bcc03e850afb7d9b507464b366cad30d675f | [
"Apache-2.0"
] | 6 | 2021-06-20T05:21:19.000Z | 2022-02-26T21:58:25.000Z | algorithms/FdGars/FdGars_main.py | aqeelferoze/DGFraud-TF2 | 18c2bcc03e850afb7d9b507464b366cad30d675f | [
"Apache-2.0"
] | 18 | 2021-06-01T12:36:51.000Z | 2022-03-30T15:18:34.000Z | """
This code is attributed to Yingtong Dou (@YingtongDou) and UIC BDSC Lab
DGFraud (A Deep Graph-based Toolbox for Fraud Detection in TensorFlow 2.X)
https://github.com/safe-graph/DGFraud-TF2
"""
import argparse
import numpy as np
from tqdm import tqdm
import tensorflow as tf
from tensorflow.keras import optimizers
from algorithms.FdGars.FdGars import FdGars
from utils.data_loader import load_data_dblp
from utils.utils import preprocess_adj, preprocess_feature, sample_mask
# init the common args, expect the model specific args
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=123, help='random seed')
parser.add_argument('--epochs', type=int, default=200,
help='number of epochs to train')
parser.add_argument('--batch_size', type=int, default=512,
help='batch size')
parser.add_argument('--train_size', type=float, default=0.2,
help='training set percentage')
parser.add_argument('--dropout', type=float, default=0.5, help='dropout rate')
parser.add_argument('--weight_decay', type=float, default=0.001,
help='weight decay')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate')
parser.add_argument('--nhid', type=int, default=64,
help='number of hidden units in GCN')
args = parser.parse_args()
# set seed
np.random.seed(args.seed)
tf.random.set_seed(args.seed)
def FdGars_main(support: list,
features: tf.SparseTensor,
label: tf.Tensor, masks: list,
args: argparse.ArgumentParser().parse_args()) -> None:
"""
Main function to train, val and test the model
:param support: a list of the sparse adjacency matrices
:param features: node feature tuple for all nodes {coords, values, shape}
:param label: the label tensor for all nodes
:param masks: a list of mask tensors to obtain the train, val, test data
:param args: additional parameters
"""
model = FdGars(args.input_dim, args.nhid, args.output_dim, args)
optimizer = optimizers.Adam(lr=args.lr)
# train
for epoch in tqdm(range(args.epochs)):
with tf.GradientTape() as tape:
train_loss, train_acc = model([support, features, label, masks[0]])
grads = tape.gradient(train_loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
val_loss, val_acc = model([support, features, label, masks[1]],
training=False)
if epoch % 10 == 0:
print(
f"train_loss: {train_loss:.4f}, "
f"train_acc: {train_acc:.4f},"
f"val_loss: {val_loss:.4f},"
f"val_acc: {val_acc:.4f}")
# test
_, test_acc = model([support, features, label, masks[2]], training=False)
print(f"Test acc: {test_acc:.4f}")
if __name__ == "__main__":
# load the data
adj_list, features, [idx_train, _, idx_val, _, idx_test, _], y = \
load_data_dblp(meta=False, train_size=args.train_size)
# convert to dense tensors
train_mask = tf.convert_to_tensor(sample_mask(idx_train, y.shape[0]))
val_mask = tf.convert_to_tensor(sample_mask(idx_val, y.shape[0]))
test_mask = tf.convert_to_tensor(sample_mask(idx_test, y.shape[0]))
label = tf.convert_to_tensor(y, dtype=tf.float32)
# normalize the adj matrix and feature matrix
features = preprocess_feature(features)
support = preprocess_adj(adj_list[0])
# initialize the model parameters
args.input_dim = features[2][1]
args.output_dim = y.shape[1]
args.train_size = len(idx_train)
args.num_features_nonzero = features[1].shape
# cast sparse matrix tuples to sparse tensors
features = tf.cast(tf.SparseTensor(*features), dtype=tf.float32)
support = [tf.cast(tf.SparseTensor(*support), dtype=tf.float32)]
FdGars_main(support, features, label,
[train_mask, val_mask, test_mask], args)
| 36.917431 | 79 | 0.671223 |
c0014ae28f168834414c0db2df301fe99732fb5d | 849 | py | Python | problem3a.py | mvignoul/phys218_example | de40ca54ecaa493c171a7f032bcc2c50ad929a64 | [
"MIT"
] | null | null | null | problem3a.py | mvignoul/phys218_example | de40ca54ecaa493c171a7f032bcc2c50ad929a64 | [
"MIT"
] | null | null | null | problem3a.py | mvignoul/phys218_example | de40ca54ecaa493c171a7f032bcc2c50ad929a64 | [
"MIT"
] | null | null | null | """ find the Schwarzschild radius of the Sun in m using pint"""
import pint
def schwarz_rad(mass):
""" Given a mass, find the Schwarzschild radius """
star = Sun(mass)
radius = star.schwarz()
return radius
if __name__ == "__main__":
MASS = 1.0
RAD = schwarz_rad(MASS)
print(RAD)
| 29.275862 | 79 | 0.63722 |
c00233a09c18a5f027b1634d9d3dd63a23d04cbb | 1,009 | py | Python | morpfw/authn/pas/user/rulesprovider.py | morpframework/morpfw | b867e5809d6c52e8839586670a29fcd179ce64c7 | [
"Apache-2.0"
] | 8 | 2018-12-08T01:41:58.000Z | 2020-12-21T15:30:12.000Z | morpfw/authn/pas/user/rulesprovider.py | morpframework/morpfw | b867e5809d6c52e8839586670a29fcd179ce64c7 | [
"Apache-2.0"
] | 17 | 2019-02-05T15:01:32.000Z | 2020-04-28T16:17:42.000Z | morpfw/authn/pas/user/rulesprovider.py | morpframework/morpfw | b867e5809d6c52e8839586670a29fcd179ce64c7 | [
"Apache-2.0"
] | 2 | 2018-12-08T05:03:37.000Z | 2019-03-20T07:15:21.000Z | from ....crud.rulesprovider.base import RulesProvider
from .. import exc
from ..app import App
from ..utils import has_role
from .model import UserCollection, UserModel
| 34.793103 | 87 | 0.718533 |
c003ee8ec790d27dd3cd5b33bab3613edcd51ffa | 1,523 | py | Python | dsa_extras/library/codec_code/huffman.py | palette-swapped-serra/dsa-extras | 99544453da510b886b2a4c47cf2eceabee329cd2 | [
"Unlicense"
] | 1 | 2020-08-24T00:26:08.000Z | 2020-08-24T00:26:08.000Z | dsa_extras/library/codec_code/huffman.py | palette-swapped-serra/dsa-extras | 99544453da510b886b2a4c47cf2eceabee329cd2 | [
"Unlicense"
] | null | null | null | dsa_extras/library/codec_code/huffman.py | palette-swapped-serra/dsa-extras | 99544453da510b886b2a4c47cf2eceabee329cd2 | [
"Unlicense"
] | null | null | null | from dsa.parsing.line_parsing import line_parser
from dsa.parsing.token_parsing import make_parser
_parser = line_parser(
'Huffman table entry',
make_parser(
'Huffman table entry data',
('integer', 'encoded bit sequence'),
('hexdump', 'decoded bytes')
)
)
| 26.258621 | 67 | 0.54432 |
c006629acd80aec892cca41444a68366c027a55b | 6,486 | py | Python | evap/evaluation/tests/test_auth.py | Sohn123/EvaP | 8b0ba8365cb673ef59829cf8db5ab829472a9c58 | [
"MIT"
] | null | null | null | evap/evaluation/tests/test_auth.py | Sohn123/EvaP | 8b0ba8365cb673ef59829cf8db5ab829472a9c58 | [
"MIT"
] | null | null | null | evap/evaluation/tests/test_auth.py | Sohn123/EvaP | 8b0ba8365cb673ef59829cf8db5ab829472a9c58 | [
"MIT"
] | null | null | null | from unittest.mock import patch
import urllib
from django.urls import reverse
from django.core import mail
from django.conf import settings
from django.test import override_settings
from model_bakery import baker
from evap.evaluation import auth
from evap.evaluation.models import Contribution, Evaluation, UserProfile
from evap.evaluation.tests.tools import WebTest
| 47 | 130 | 0.707216 |
c006bcd2ec1c5a47b7a93378891b836502179c96 | 1,827 | py | Python | gwd/converters/spike2kaggle.py | kazakh-shai/kaggle-global-wheat-detection | b26295ea257f73089f1a067b70b4a7ee638f6b83 | [
"Apache-2.0"
] | 136 | 2020-08-24T08:18:16.000Z | 2022-03-31T13:45:12.000Z | gwd/converters/spike2kaggle.py | kazakh-shai/kaggle-global-wheat-detection | b26295ea257f73089f1a067b70b4a7ee638f6b83 | [
"Apache-2.0"
] | 5 | 2020-10-07T08:44:36.000Z | 2021-12-17T06:00:57.000Z | gwd/converters/spike2kaggle.py | kazakh-shai/kaggle-global-wheat-detection | b26295ea257f73089f1a067b70b4a7ee638f6b83 | [
"Apache-2.0"
] | 28 | 2020-08-24T11:07:07.000Z | 2022-01-01T13:07:54.000Z | import argparse
import os.path as osp
from glob import glob
import cv2
import pandas as pd
from tqdm import tqdm
from gwd.converters import kaggle2coco
if __name__ == "__main__":
main()
| 38.0625 | 111 | 0.642036 |
c008713b35128a47b245f9ad063e4cc7dcc2e046 | 5,090 | py | Python | shuttl/tests/test_views/test_organization.py | shuttl-io/shuttl-cms | 50c85db0de42e901c371561270be6425cc65eccc | [
"MIT"
] | 2 | 2017-06-26T18:06:58.000Z | 2017-10-11T21:45:29.000Z | shuttl/tests/test_views/test_organization.py | shuttl-io/shuttl-cms | 50c85db0de42e901c371561270be6425cc65eccc | [
"MIT"
] | null | null | null | shuttl/tests/test_views/test_organization.py | shuttl-io/shuttl-cms | 50c85db0de42e901c371561270be6425cc65eccc | [
"MIT"
] | null | null | null | import json
from shuttl import app
from shuttl.tests import testbase
from shuttl.Models.Reseller import Reseller
from shuttl.Models.organization import Organization, OrganizationDoesNotExistException
| 40.72 | 117 | 0.621022 |
c00b0d921904cae0f3219c2a2df1410ec3c0ae18 | 3,477 | py | Python | emissary/controllers/load.py | LukeB42/Emissary | 31629a8baedc91a9b60c551a01b2b45372b9a8c7 | [
"MIT"
] | 193 | 2015-06-20T23:46:05.000Z | 2021-02-16T14:04:29.000Z | emissary/controllers/load.py | LukeB42/Emissary | 31629a8baedc91a9b60c551a01b2b45372b9a8c7 | [
"MIT"
] | 4 | 2015-08-23T15:25:55.000Z | 2016-01-06T11:29:20.000Z | emissary/controllers/load.py | LukeB42/Emissary | 31629a8baedc91a9b60c551a01b2b45372b9a8c7 | [
"MIT"
] | 21 | 2015-07-05T12:20:06.000Z | 2019-07-12T08:07:46.000Z | # This file contains functions designed for
# loading cron tables and storing new feeds.
from emissary import db
from sqlalchemy import and_
from emissary.controllers.utils import spaceparse
from emissary.controllers.cron import parse_timings
from emissary.models import APIKey, Feed, FeedGroup
def create_feed(log, db, key, group, feed):
"""
Takes a key object, a group name and a dictionary
describing a feed ({name:,url:,schedule:,active:})
and reliably attaches a newly created feed to the key
and group.
"""
if not type(feed) == dict:
log('Unexpected type when creating feed for API key "%s"' % key.name)
return
for i in ['name', 'schedule', 'active', 'url']:
if not i in feed.keys():
log('%s: Error creating feed. Missing "%s" field from feed definition.' % (key.name, i))
return
f = Feed.query.filter(and_(Feed.key == key, Feed.name == feed['name'])).first()
fg = FeedGroup.query.filter(and_(FeedGroup.key == key, FeedGroup.name == group)).first()
if f:
if f.group:
log('%s: Error creating feed "%s" in group "%s", feed already exists in group "%s".' % \
(key.name, feed['name'], group, f.group.name))
return
elif fg:
log('%s: %s: Adding feed "%s"' % (key.name, fg.name, f.name))
fg.append(f)
db.session.add(fg)
db.session.add(f)
db.session.commit()
return
if not fg:
log('%s: Creating feed group %s.' % (key.name, group))
fg = FeedGroup(name=group)
key.feedgroups.append(fg)
try:
parse_timings(feed['schedule'])
except Exception, e:
log('%s: %s: Error creating "%s": %s' % \
(key.name, fg.name, feed['name'], e.message))
log('%s: %s: Creating feed "%s"' % (key.name, fg.name, feed['name']))
f = Feed(
name=feed['name'],
url=feed['url'],
active=feed['active'],
schedule=feed['schedule']
)
fg.feeds.append(f)
key.feeds.append(f)
db.session.add(key)
db.session.add(fg)
db.session.add(f)
db.session.commit()
def parse_crontab(filename):
"""
Get a file descriptor on filename and
create feeds and groups for API keys therein.
"""
# read filename into a string named crontab
try:
fd = open(filename, "r")
except OSError:
print "Error opening %s" % filename
raise SystemExit
crontab = fd.read()
fd.close()
# keep a resident api key on hand
key = None
for i, line in enumerate(crontab.split('\n')):
# Set the APIKey we're working with when we find a line starting
# with apikey:
if line.startswith("apikey:"):
if ' ' in line:
key_str = line.split()[1]
key = APIKey.query.filter(APIKey.key == key_str).first()
if not key:
print 'Malformed or unknown API key at line %i in %s: %s' % (i+1, filename, line)
raise SystemExit
else:
print 'Using API key "%s".' % key.name
if line.startswith("http"):
feed = {'active': True}
# Grab the URL and set the string to the remainder
feed['url'] = line.split().pop(0)
line = ' '.join(line.split()[1:])
# Grab names and groups
names = spaceparse(line)
if not names:
print "Error parsing feed or group name at line %i in %s: %s" % (i+1, filename, line)
continue
feed['name'], group = names[:2]
# The schedule should be the last five items
schedule = line.split()[-5:]
try:
parse_timings(schedule)
except Exception, e:
print "Error parsing schedule at line %i in %s: %s" % (i+1, filename, e.message)
continue
feed['schedule'] = ' '.join(schedule)
create_feed(log, db, key, group, feed)
| 27.816 | 91 | 0.6543 |
c00f497cdfab4a0df082b81815ffb6293fc4eaf2 | 7,661 | py | Python | vodgen/main.py | Oveof/Vodgen | 3e4b9a715f385b76dc34d82ac188b6d3db170957 | [
"MIT"
] | null | null | null | vodgen/main.py | Oveof/Vodgen | 3e4b9a715f385b76dc34d82ac188b6d3db170957 | [
"MIT"
] | 10 | 2021-11-30T22:01:30.000Z | 2022-03-18T14:50:08.000Z | vodgen/main.py | Oveof/Vodgen | 3e4b9a715f385b76dc34d82ac188b6d3db170957 | [
"MIT"
] | null | null | null | """Vodgen app"""
from msilib.schema import Directory
import sys
import json
import re
from PyQt5.QtWidgets import (QApplication, QCheckBox, QComboBox,
QFileDialog, QLabel, QLineEdit, QMainWindow, QPlainTextEdit, QPushButton, QVBoxLayout, QWidget)
from videocutter import create_video
from thumbnail import Thumbnail, Player, Config, ImageInfo, MatchInfo
import sys
from os.path import exists
#sys.stdout = open("vodgen.log", "w")
import logging
import os
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.warning, filename="./vodgen.log")
if not exists("./characterinfo.json"):
logging.error("characterinfo.json could not be found!")
exit()
if not exists("./config.json"):
logging.error("config.json could not be found!")
exit()
def formatTitle(title):
game_info = title.split(": ")[1].split(" - ")[0]
tournament_round = ' '.join(game_info.split(' ')[-2:])
#gameRound = gameInfo.split(' ', 2)
game_name = game_info.split(' ')[0]
if "Winners" in game_info:
game_name = game_info.split(' Winners')[0]
elif "Losers" in game_info:
game_name = game_info.split(' Losers')[0]
elif "Grand Finals" in game_info:
game_name = game_info.split(' Grand')[0]
else:
raise InvalidRoundName()
player_info = title.split("-")[1]
team1 = player_info.split("vs")[0].strip()
team1_players = team1.split("(")[0].split(" + ")
team1_characters_search = re.search(r"\(([A-Za-z0-9_, .+]+)\)", team1)
if team1_characters_search == None:
raise MissingPlayer1Character()
team1_characters = team1_characters_search.group(1).split(", ")[0].split(" + ")
team2 = player_info.split("vs")[1].strip()
team2_players = team2.split("(")[0].split(" + ")
team2_characters_search = re.search(r"\(([A-Za-z0-9_, .+]+)\)", team2)
if team2_characters_search == None:
raise MissingPlayer2Character
team2_characters = team2_characters_search.group(1).split(", ")[0].split(" + ")
player_names = team1_players + team2_players
player_characters = team1_characters + team2_characters
player_list = []
for x in range(len(player_names)):
if len(player_names) / 2 > x:
team_num = 0
else:
team_num = 1
player_list.append(Player(player_names[x], player_characters[x], team_num, x+1))
return player_list, tournament_round, game_name
app = QApplication(sys.argv)
window = MainWindow()
window.show()
app.exec()
#sys.stdout.close()
| 38.497487 | 150 | 0.638951 |
c01048af256422693f245a8c170084866f81cf42 | 1,733 | py | Python | bin/plpproject.py | stefanct/pulp-tools | 63a05d59908534ad01133d0111e181fa69d00ce3 | [
"Apache-2.0"
] | 2 | 2018-02-09T08:12:34.000Z | 2020-06-16T17:45:33.000Z | bin/plpproject.py | stefanct/pulp-tools | 63a05d59908534ad01133d0111e181fa69d00ce3 | [
"Apache-2.0"
] | 2 | 2018-02-09T07:54:32.000Z | 2018-03-09T08:51:31.000Z | bin/plpproject.py | stefanct/pulp-tools | 63a05d59908534ad01133d0111e181fa69d00ce3 | [
"Apache-2.0"
] | 6 | 2018-03-08T11:12:22.000Z | 2019-12-05T12:36:47.000Z |
#
# Copyright (C) 2018 ETH Zurich and University of Bologna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import plptools as plp
| 23.739726 | 74 | 0.705713 |
c0115ad71776b57663adb5064185c84d654f136a | 5,177 | py | Python | tests/shell/test_console.py | svidoso/ipopo | 1d4b81207e67890dfccc8f562336c7104f194c17 | [
"Apache-2.0"
] | 65 | 2015-04-21T10:41:18.000Z | 2022-01-02T16:25:40.000Z | tests/shell/test_console.py | svidoso/ipopo | 1d4b81207e67890dfccc8f562336c7104f194c17 | [
"Apache-2.0"
] | 85 | 2015-01-20T14:23:52.000Z | 2022-02-19T17:08:46.000Z | tests/shell/test_console.py | svidoso/ipopo | 1d4b81207e67890dfccc8f562336c7104f194c17 | [
"Apache-2.0"
] | 32 | 2015-03-13T07:43:05.000Z | 2020-04-24T07:56:53.000Z | #!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Tests the shell console
:author: Thomas Calmant
"""
# Pelix
from pelix.utilities import to_str, to_bytes
# Standard library
import random
import string
import sys
import threading
import time
# Tests
try:
import unittest2 as unittest
except ImportError:
import unittest
# ------------------------------------------------------------------------------
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
try:
import subprocess
except ImportError:
# Can't run the test if we can't start another process
pass
else:
| 30.633136 | 80 | 0.459533 |
c01227c807be8c1f87a3e23c71237c6860b77b30 | 708 | py | Python | src/util/utils.py | 5agado/intro-ai | dfb7cd636ad8f8ac2d88053f9d3f279730b8608a | [
"Apache-2.0"
] | 3 | 2015-11-07T14:45:20.000Z | 2018-01-27T13:06:25.000Z | src/util/utils.py | 5agado/intro-ai | dfb7cd636ad8f8ac2d88053f9d3f279730b8608a | [
"Apache-2.0"
] | null | null | null | src/util/utils.py | 5agado/intro-ai | dfb7cd636ad8f8ac2d88053f9d3f279730b8608a | [
"Apache-2.0"
] | null | null | null | import os
import math
| 27.230769 | 94 | 0.577684 |
c0126570af4c13122b92f578f7e7cd7fb226531b | 3,530 | py | Python | scripts/models/xgboost/test-xgboost_tuning3.py | jmquintana79/utilsDS | 1693810b6f10024542b30fdfedbfcd0518f32945 | [
"MIT"
] | null | null | null | scripts/models/xgboost/test-xgboost_tuning3.py | jmquintana79/utilsDS | 1693810b6f10024542b30fdfedbfcd0518f32945 | [
"MIT"
] | null | null | null | scripts/models/xgboost/test-xgboost_tuning3.py | jmquintana79/utilsDS | 1693810b6f10024542b30fdfedbfcd0518f32945 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Author: Juan Quintana
# @Date: 2018-09-26 10:01:02
# @Last Modified by: Juan Quintana
# @Last Modified time: 2018-09-26 16:04:24
"""
XGBOOST Regressor with Bayesian tuning: OPTION 3
In this case it will be used hyperopt-sklearn and his native algorithm
"xgboost_regression".
NOTE: scikit-learn tools is not working for this estimator.
Reference: https://github.com/hyperopt/hyperopt-sklearn
"""
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import sys
sys.path.append('../../')
from datasets import solar
from tools.reader import get_dcol
from preprocessing.scalers.normalization import Scaler
from models.metrics import metrics_regression
from tools.timer import *
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold, KFold
import time
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
import xgboost as xgb
from sklearn.metrics import r2_score, mean_absolute_error
import os
os.environ['OMP_NUM_THREADS'] = str(2)
if __name__ == '__main__':
main()
| 32.990654 | 113 | 0.654958 |
c0128df7aa9cde7c55a1d29edac835d168b71fd9 | 719 | py | Python | EstruturaDeRepeticao/exercicio32.py | Nicolas-Wursthorn/exercicios-python-brasil | b2b564d48b519be04643636033ec0815e6d99ea1 | [
"MIT"
] | null | null | null | EstruturaDeRepeticao/exercicio32.py | Nicolas-Wursthorn/exercicios-python-brasil | b2b564d48b519be04643636033ec0815e6d99ea1 | [
"MIT"
] | null | null | null | EstruturaDeRepeticao/exercicio32.py | Nicolas-Wursthorn/exercicios-python-brasil | b2b564d48b519be04643636033ec0815e6d99ea1 | [
"MIT"
] | null | null | null | # O Departamento Estadual de Meteorologia lhe contratou para desenvolver um programa que leia as um conjunto indeterminado de temperaturas, e informe ao final a menor e a maior temperaturas informadas, bem como a mdia das temperaturas.
temperaturas = []
while True:
graus = float(input("Digite a temperatura em graus (tecle 0 para parar): "))
temperaturas.append(graus)
media = sum(temperaturas) / len(temperaturas)
if graus == 0:
temperaturas.pop()
print("A maior temperatura registrada: {}C".format(max(temperaturas)))
print("A menor temperatura registrada: {}C".format(min(temperaturas)))
print("A temperatura mdia registrada: {}C".format(media))
break | 47.933333 | 236 | 0.709318 |
c012b837e3e30a6eafa5b481e3b7199fb7fec744 | 369 | py | Python | src/utils/tools.py | Xuenew/2c | 2e6ada011bcc8bbe19d2e745fcc9eff1fc31a520 | [
"Apache-2.0"
] | null | null | null | src/utils/tools.py | Xuenew/2c | 2e6ada011bcc8bbe19d2e745fcc9eff1fc31a520 | [
"Apache-2.0"
] | null | null | null | src/utils/tools.py | Xuenew/2c | 2e6ada011bcc8bbe19d2e745fcc9eff1fc31a520 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
Created by howie.hu at 2021/4/7.
Description
Changelog: all notable changes to this file will be documented
"""
import hashlib
def md5_encryption(string: str) -> str:
"""
md5
:param string:
:return:
"""
m = hashlib.md5()
m.update(string.encode("utf-8"))
return m.hexdigest()
| 18.45 | 66 | 0.631436 |
c0145129a570eee9c990a840954e93502103b3c8 | 14,612 | py | Python | anaconda-mode/0.1.13/jedi-0.15.1-py3.7.egg/jedi/evaluate/gradual/annotation.py | space-scl/emacs.d | 6285c38714023b72a023fe24cbcb5e4fcdcdb949 | [
"Apache-2.0"
] | 2 | 2020-09-30T00:11:09.000Z | 2021-10-04T13:00:38.000Z | anaconda-mode/0.1.13/jedi-0.15.1-py3.7.egg/jedi/evaluate/gradual/annotation.py | space-scl/emacs.d | 6285c38714023b72a023fe24cbcb5e4fcdcdb949 | [
"Apache-2.0"
] | 10 | 2020-05-11T20:29:28.000Z | 2022-01-13T01:41:27.000Z | anaconda-mode/0.1.13/jedi-0.15.1-py3.7.egg/jedi/evaluate/gradual/annotation.py | space-scl/emacs.d | 6285c38714023b72a023fe24cbcb5e4fcdcdb949 | [
"Apache-2.0"
] | 1 | 2020-01-25T20:08:59.000Z | 2020-01-25T20:08:59.000Z | """
PEP 0484 ( https://www.python.org/dev/peps/pep-0484/ ) describes type hints
through function annotations. There is a strong suggestion in this document
that only the type of type hinting defined in PEP0484 should be allowed
as annotations in future python versions.
"""
import re
from parso import ParserSyntaxError, parse
from jedi._compatibility import force_unicode
from jedi.evaluate.cache import evaluator_method_cache
from jedi.evaluate.base_context import ContextSet, NO_CONTEXTS
from jedi.evaluate.gradual.typing import TypeVar, LazyGenericClass, \
AbstractAnnotatedClass
from jedi.evaluate.gradual.typing import GenericClass
from jedi.evaluate.helpers import is_string
from jedi.evaluate.compiled import builtin_from_name
from jedi import debug
from jedi import parser_utils
def eval_annotation(context, annotation):
"""
Evaluates an annotation node. This means that it evaluates the part of
`int` here:
foo: int = 3
Also checks for forward references (strings)
"""
context_set = context.eval_node(annotation)
if len(context_set) != 1:
debug.warning("Eval'ed typing index %s should lead to 1 object, "
" not %s" % (annotation, context_set))
return context_set
evaled_context = list(context_set)[0]
if is_string(evaled_context):
result = _get_forward_reference_node(context, evaled_context.get_safe_value())
if result is not None:
return context.eval_node(result)
return context_set
def _split_comment_param_declaration(decl_text):
"""
Split decl_text on commas, but group generic expressions
together.
For example, given "foo, Bar[baz, biz]" we return
['foo', 'Bar[baz, biz]'].
"""
try:
node = parse(decl_text, error_recovery=False).children[0]
except ParserSyntaxError:
debug.warning('Comment annotation is not valid Python: %s' % decl_text)
return []
if node.type == 'name':
return [node.get_code().strip()]
params = []
try:
children = node.children
except AttributeError:
return []
else:
for child in children:
if child.type in ['name', 'atom_expr', 'power']:
params.append(child.get_code().strip())
return params
def _infer_param(execution_context, param):
"""
Infers the type of a function parameter, using type annotations.
"""
annotation = param.annotation
if annotation is None:
# If no Python 3-style annotation, look for a Python 2-style comment
# annotation.
# Identify parameters to function in the same sequence as they would
# appear in a type comment.
all_params = [child for child in param.parent.children
if child.type == 'param']
node = param.parent.parent
comment = parser_utils.get_following_comment_same_line(node)
if comment is None:
return NO_CONTEXTS
match = re.match(r"^#\s*type:\s*\(([^#]*)\)\s*->", comment)
if not match:
return NO_CONTEXTS
params_comments = _split_comment_param_declaration(match.group(1))
# Find the specific param being investigated
index = all_params.index(param)
# If the number of parameters doesn't match length of type comment,
# ignore first parameter (assume it's self).
if len(params_comments) != len(all_params):
debug.warning(
"Comments length != Params length %s %s",
params_comments, all_params
)
from jedi.evaluate.context.instance import InstanceArguments
if isinstance(execution_context.var_args, InstanceArguments):
if index == 0:
# Assume it's self, which is already handled
return NO_CONTEXTS
index -= 1
if index >= len(params_comments):
return NO_CONTEXTS
param_comment = params_comments[index]
return _evaluate_annotation_string(
execution_context.function_context.get_default_param_context(),
param_comment
)
# Annotations are like default params and resolve in the same way.
context = execution_context.function_context.get_default_param_context()
return eval_annotation(context, annotation)
def infer_type_vars_for_execution(execution_context, annotation_dict):
"""
Some functions use type vars that are not defined by the class, but rather
only defined in the function. See for example `iter`. In those cases we
want to:
1. Search for undefined type vars.
2. Infer type vars with the execution state we have.
3. Return the union of all type vars that have been found.
"""
context = execution_context.function_context.get_default_param_context()
annotation_variable_results = {}
executed_params, _ = execution_context.get_executed_params_and_issues()
for executed_param in executed_params:
try:
annotation_node = annotation_dict[executed_param.string_name]
except KeyError:
continue
annotation_variables = find_unknown_type_vars(context, annotation_node)
if annotation_variables:
# Infer unknown type var
annotation_context_set = context.eval_node(annotation_node)
star_count = executed_param._param_node.star_count
actual_context_set = executed_param.infer(use_hints=False)
if star_count == 1:
actual_context_set = actual_context_set.merge_types_of_iterate()
elif star_count == 2:
# TODO _dict_values is not public.
actual_context_set = actual_context_set.try_merge('_dict_values')
for ann in annotation_context_set:
_merge_type_var_dicts(
annotation_variable_results,
_infer_type_vars(ann, actual_context_set),
)
return annotation_variable_results
def _infer_type_vars(annotation_context, context_set):
"""
This function tries to find information about undefined type vars and
returns a dict from type var name to context set.
This is for example important to understand what `iter([1])` returns.
According to typeshed, `iter` returns an `Iterator[_T]`:
def iter(iterable: Iterable[_T]) -> Iterator[_T]: ...
This functions would generate `int` for `_T` in this case, because it
unpacks the `Iterable`.
"""
type_var_dict = {}
if isinstance(annotation_context, TypeVar):
return {annotation_context.py__name__(): context_set.py__class__()}
elif isinstance(annotation_context, LazyGenericClass):
name = annotation_context.py__name__()
if name == 'Iterable':
given = annotation_context.get_generics()
if given:
for nested_annotation_context in given[0]:
_merge_type_var_dicts(
type_var_dict,
_infer_type_vars(
nested_annotation_context,
context_set.merge_types_of_iterate()
)
)
elif name == 'Mapping':
given = annotation_context.get_generics()
if len(given) == 2:
for context in context_set:
try:
method = context.get_mapping_item_contexts
except AttributeError:
continue
key_contexts, value_contexts = method()
for nested_annotation_context in given[0]:
_merge_type_var_dicts(
type_var_dict,
_infer_type_vars(
nested_annotation_context,
key_contexts,
)
)
for nested_annotation_context in given[1]:
_merge_type_var_dicts(
type_var_dict,
_infer_type_vars(
nested_annotation_context,
value_contexts,
)
)
return type_var_dict
| 35.990148 | 94 | 0.638653 |
c01466f2b1b58f8291be4e054c30cb12aa407427 | 326 | py | Python | string_30.py | Technicoryx/python_strings_inbuilt_functions | 78892d043c6c6d65affe3bd4906ba0162c5d6604 | [
"MIT"
] | null | null | null | string_30.py | Technicoryx/python_strings_inbuilt_functions | 78892d043c6c6d65affe3bd4906ba0162c5d6604 | [
"MIT"
] | null | null | null | string_30.py | Technicoryx/python_strings_inbuilt_functions | 78892d043c6c6d65affe3bd4906ba0162c5d6604 | [
"MIT"
] | null | null | null | """Below Python Programme demonstrate rpartition
functions in a string"""
string = "Python is fun"
# 'is' separator is found
print(string.rpartition('is '))
# 'not' separator is not found
print(string.rpartition('not '))
string = "Python is fun, isn't it"
# splits at last occurence of 'is'
print(string.rpartition('is'))
| 21.733333 | 48 | 0.717791 |
c014e0fef503433734848ae3b6b9307338d2ae08 | 4,583 | py | Python | env/lib/python3.8/site-packages/unidecode/x054.py | avdhari/enigma | b7e965a91ca5f0e929c4c719d695f15ccb8b5a2c | [
"MIT"
] | 48 | 2021-11-20T08:17:53.000Z | 2022-03-19T13:57:15.000Z | venv/lib/python3.6/site-packages/unidecode/x054.py | mrsaicharan1/iiita-updates | a22a0157b90d29b946d0f020e5f76744f73a6bff | [
"Apache-2.0"
] | 392 | 2015-07-30T14:37:05.000Z | 2022-03-21T16:56:09.000Z | venv/lib/python3.6/site-packages/unidecode/x054.py | mrsaicharan1/iiita-updates | a22a0157b90d29b946d0f020e5f76744f73a6bff | [
"Apache-2.0"
] | 15 | 2015-10-01T21:31:08.000Z | 2020-05-05T00:03:27.000Z | data = (
'Mie ', # 0x00
'Xu ', # 0x01
'Mang ', # 0x02
'Chi ', # 0x03
'Ge ', # 0x04
'Xuan ', # 0x05
'Yao ', # 0x06
'Zi ', # 0x07
'He ', # 0x08
'Ji ', # 0x09
'Diao ', # 0x0a
'Cun ', # 0x0b
'Tong ', # 0x0c
'Ming ', # 0x0d
'Hou ', # 0x0e
'Li ', # 0x0f
'Tu ', # 0x10
'Xiang ', # 0x11
'Zha ', # 0x12
'Xia ', # 0x13
'Ye ', # 0x14
'Lu ', # 0x15
'A ', # 0x16
'Ma ', # 0x17
'Ou ', # 0x18
'Xue ', # 0x19
'Yi ', # 0x1a
'Jun ', # 0x1b
'Chou ', # 0x1c
'Lin ', # 0x1d
'Tun ', # 0x1e
'Yin ', # 0x1f
'Fei ', # 0x20
'Bi ', # 0x21
'Qin ', # 0x22
'Qin ', # 0x23
'Jie ', # 0x24
'Bu ', # 0x25
'Fou ', # 0x26
'Ba ', # 0x27
'Dun ', # 0x28
'Fen ', # 0x29
'E ', # 0x2a
'Han ', # 0x2b
'Ting ', # 0x2c
'Hang ', # 0x2d
'Shun ', # 0x2e
'Qi ', # 0x2f
'Hong ', # 0x30
'Zhi ', # 0x31
'Shen ', # 0x32
'Wu ', # 0x33
'Wu ', # 0x34
'Chao ', # 0x35
'Ne ', # 0x36
'Xue ', # 0x37
'Xi ', # 0x38
'Chui ', # 0x39
'Dou ', # 0x3a
'Wen ', # 0x3b
'Hou ', # 0x3c
'Ou ', # 0x3d
'Wu ', # 0x3e
'Gao ', # 0x3f
'Ya ', # 0x40
'Jun ', # 0x41
'Lu ', # 0x42
'E ', # 0x43
'Ge ', # 0x44
'Mei ', # 0x45
'Ai ', # 0x46
'Qi ', # 0x47
'Cheng ', # 0x48
'Wu ', # 0x49
'Gao ', # 0x4a
'Fu ', # 0x4b
'Jiao ', # 0x4c
'Hong ', # 0x4d
'Chi ', # 0x4e
'Sheng ', # 0x4f
'Ne ', # 0x50
'Tun ', # 0x51
'Fu ', # 0x52
'Yi ', # 0x53
'Dai ', # 0x54
'Ou ', # 0x55
'Li ', # 0x56
'Bai ', # 0x57
'Yuan ', # 0x58
'Kuai ', # 0x59
'[?] ', # 0x5a
'Qiang ', # 0x5b
'Wu ', # 0x5c
'E ', # 0x5d
'Shi ', # 0x5e
'Quan ', # 0x5f
'Pen ', # 0x60
'Wen ', # 0x61
'Ni ', # 0x62
'M ', # 0x63
'Ling ', # 0x64
'Ran ', # 0x65
'You ', # 0x66
'Di ', # 0x67
'Zhou ', # 0x68
'Shi ', # 0x69
'Zhou ', # 0x6a
'Tie ', # 0x6b
'Xi ', # 0x6c
'Yi ', # 0x6d
'Qi ', # 0x6e
'Ping ', # 0x6f
'Zi ', # 0x70
'Gu ', # 0x71
'Zi ', # 0x72
'Wei ', # 0x73
'Xu ', # 0x74
'He ', # 0x75
'Nao ', # 0x76
'Xia ', # 0x77
'Pei ', # 0x78
'Yi ', # 0x79
'Xiao ', # 0x7a
'Shen ', # 0x7b
'Hu ', # 0x7c
'Ming ', # 0x7d
'Da ', # 0x7e
'Qu ', # 0x7f
'Ju ', # 0x80
'Gem ', # 0x81
'Za ', # 0x82
'Tuo ', # 0x83
'Duo ', # 0x84
'Pou ', # 0x85
'Pao ', # 0x86
'Bi ', # 0x87
'Fu ', # 0x88
'Yang ', # 0x89
'He ', # 0x8a
'Zha ', # 0x8b
'He ', # 0x8c
'Hai ', # 0x8d
'Jiu ', # 0x8e
'Yong ', # 0x8f
'Fu ', # 0x90
'Que ', # 0x91
'Zhou ', # 0x92
'Wa ', # 0x93
'Ka ', # 0x94
'Gu ', # 0x95
'Ka ', # 0x96
'Zuo ', # 0x97
'Bu ', # 0x98
'Long ', # 0x99
'Dong ', # 0x9a
'Ning ', # 0x9b
'Tha ', # 0x9c
'Si ', # 0x9d
'Xian ', # 0x9e
'Huo ', # 0x9f
'Qi ', # 0xa0
'Er ', # 0xa1
'E ', # 0xa2
'Guang ', # 0xa3
'Zha ', # 0xa4
'Xi ', # 0xa5
'Yi ', # 0xa6
'Lie ', # 0xa7
'Zi ', # 0xa8
'Mie ', # 0xa9
'Mi ', # 0xaa
'Zhi ', # 0xab
'Yao ', # 0xac
'Ji ', # 0xad
'Zhou ', # 0xae
'Ge ', # 0xaf
'Shuai ', # 0xb0
'Zan ', # 0xb1
'Xiao ', # 0xb2
'Ke ', # 0xb3
'Hui ', # 0xb4
'Kua ', # 0xb5
'Huai ', # 0xb6
'Tao ', # 0xb7
'Xian ', # 0xb8
'E ', # 0xb9
'Xuan ', # 0xba
'Xiu ', # 0xbb
'Wai ', # 0xbc
'Yan ', # 0xbd
'Lao ', # 0xbe
'Yi ', # 0xbf
'Ai ', # 0xc0
'Pin ', # 0xc1
'Shen ', # 0xc2
'Tong ', # 0xc3
'Hong ', # 0xc4
'Xiong ', # 0xc5
'Chi ', # 0xc6
'Wa ', # 0xc7
'Ha ', # 0xc8
'Zai ', # 0xc9
'Yu ', # 0xca
'Di ', # 0xcb
'Pai ', # 0xcc
'Xiang ', # 0xcd
'Ai ', # 0xce
'Hen ', # 0xcf
'Kuang ', # 0xd0
'Ya ', # 0xd1
'Da ', # 0xd2
'Xiao ', # 0xd3
'Bi ', # 0xd4
'Yue ', # 0xd5
'[?] ', # 0xd6
'Hua ', # 0xd7
'Sasou ', # 0xd8
'Kuai ', # 0xd9
'Duo ', # 0xda
'[?] ', # 0xdb
'Ji ', # 0xdc
'Nong ', # 0xdd
'Mou ', # 0xde
'Yo ', # 0xdf
'Hao ', # 0xe0
'Yuan ', # 0xe1
'Long ', # 0xe2
'Pou ', # 0xe3
'Mang ', # 0xe4
'Ge ', # 0xe5
'E ', # 0xe6
'Chi ', # 0xe7
'Shao ', # 0xe8
'Li ', # 0xe9
'Na ', # 0xea
'Zu ', # 0xeb
'He ', # 0xec
'Ku ', # 0xed
'Xiao ', # 0xee
'Xian ', # 0xef
'Lao ', # 0xf0
'Bo ', # 0xf1
'Zhe ', # 0xf2
'Zha ', # 0xf3
'Liang ', # 0xf4
'Ba ', # 0xf5
'Mie ', # 0xf6
'Le ', # 0xf7
'Sui ', # 0xf8
'Fou ', # 0xf9
'Bu ', # 0xfa
'Han ', # 0xfb
'Heng ', # 0xfc
'Geng ', # 0xfd
'Shuo ', # 0xfe
'Ge ', # 0xff
)
| 17.694981 | 19 | 0.382064 |
c0176451a4af477e1653a56580ea468230721ad1 | 2,023 | py | Python | merge_sort.py | BCLaird/refreshers | 135e21fdb4396d7b2c558cb08b7e9abf9db7c768 | [
"Unlicense"
] | null | null | null | merge_sort.py | BCLaird/refreshers | 135e21fdb4396d7b2c558cb08b7e9abf9db7c768 | [
"Unlicense"
] | null | null | null | merge_sort.py | BCLaird/refreshers | 135e21fdb4396d7b2c558cb08b7e9abf9db7c768 | [
"Unlicense"
] | null | null | null | import sys
import unittest
def merge(nums1, nums2):
"""
:param nums1: Sorted list of numbers.
:param nums2: Sorted list of numbers.
:return: Combined sorted list of numbers.
"""
merged = list()
while len(nums1) != 0 and len(nums2) != 0:
if nums1[0] <= nums2[0]:
merged.append(nums1.pop(0))
else:
merged.append(nums2.pop(0))
while len(nums1) != 0:
merged.append(nums1.pop(0))
while len(nums2) != 0:
merged.append(nums2.pop(0))
return merged
def merge_sort(nums):
"""
:param nums: List of numbers to sort.
:return: Sorted list of numbers.
"""
if len(nums) != 1:
nums1 = merge_sort(nums[:(len(nums) / 2)])
nums2 = merge_sort(nums[(len(nums) / 2):])
sorted_nums = merge(nums1, nums2)
return sorted_nums
else:
# Nothing to do for a list of length 1.
return nums
if __name__ == "__main__":
sys.stdout.write("Bryan Laird merge_sort module. Test mode.\n")
sys.exit(unittest.main())
| 25.2875 | 68 | 0.565991 |
c017dec4951ea873a5632989e93e1321faa87a5d | 1,548 | py | Python | tests/test_utils/test_textio.py | hongxuenong/mmocr | e8e3a059f8f2e4fca96af37751c33563fc48e2ba | [
"Apache-2.0"
] | 2,261 | 2021-04-08T03:45:41.000Z | 2022-03-31T23:37:46.000Z | tests/test_utils/test_textio.py | hongxuenong/mmocr | e8e3a059f8f2e4fca96af37751c33563fc48e2ba | [
"Apache-2.0"
] | 789 | 2021-04-08T05:40:13.000Z | 2022-03-31T09:42:39.000Z | tests/test_utils/test_textio.py | hongxuenong/mmocr | e8e3a059f8f2e4fca96af37751c33563fc48e2ba | [
"Apache-2.0"
] | 432 | 2021-04-08T03:56:16.000Z | 2022-03-30T18:44:43.000Z | # Copyright (c) OpenMMLab. All rights reserved.
import tempfile
from mmocr.utils import list_from_file, list_to_file
lists = [
[],
[' '],
['\t'],
['a'],
[1],
[1.],
['a', 'b'],
['a', 1, 1.],
[1, 1., 'a'],
['', ''],
['', 'nol', '', ''],
]
| 32.25 | 77 | 0.505168 |
c01a0bc60c407d254aecde1ca9086a30bc750870 | 2,638 | py | Python | License Plate Detection.py | jairajsahgal/License_Plate_and_Face_Recognition | 6a9762f2ca90730e828b3d256418b073b9e80cb0 | [
"Apache-2.0"
] | null | null | null | License Plate Detection.py | jairajsahgal/License_Plate_and_Face_Recognition | 6a9762f2ca90730e828b3d256418b073b9e80cb0 | [
"Apache-2.0"
] | null | null | null | License Plate Detection.py | jairajsahgal/License_Plate_and_Face_Recognition | 6a9762f2ca90730e828b3d256418b073b9e80cb0 | [
"Apache-2.0"
] | null | null | null | import cv2
from Text_Detection import detect_characters, detect_string, detect_words
import re
from live_recognition import facial_recognition
#
####################################################
frameWidth = 640
frameHeight = 480
nPlateCascade = cv2.CascadeClassifier("../../Resources/haarcascade_russian_plate_number.xml")
minArea=500
color=(255,0,255)
name=None
# count = 0
state_codes = ['AP', 'AR', 'AS', 'BR', 'CG', 'GA', 'GJ', 'HR', 'HP', 'JH', 'KA', 'KL', 'MP', 'MH', 'MN', 'ML', 'MZ', 'NL', 'OD', 'PB', 'RJ', 'SK', 'TN', 'TR', 'UP', 'WB', 'TS','ap', 'ar', 'as', 'br', 'cg', 'ga', 'gj', 'hr', 'hp', 'jh', 'ka', 'kl', 'mp', 'mh', 'mn', 'ml', 'mz', 'nl', 'od', 'pb', 'rj', 'sk', 'tn', 'tr', 'up', 'wb', 'ts']
######################################################
# cap = cv2.VideoCapture("C:\\Users\\jaira\\PycharmProjects\\opencv_tutorial\\Resources\\test.mp4")
cap=cv2.VideoCapture(0,cv2.CAP_DSHOW)
cap.set(3, frameWidth)
cap.set(4, frameHeight)
cap.set(10,150)
success, img = cap.read()
while success:
success, img = cap.read()
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
numberPlates = nPlateCascade.detectMultiScale(imgGray, 1.1, 4)
for (x, y, w, h) in numberPlates:
area = w*h
if area > minArea:
cv2.rectangle(img=img,pt1=(x,y),pt2=(x+w,y+h),
color=color,thickness=2)
# cv2.putText(img=img,text="Number Plate",org=(x,y-5),fontFace=cv2.FONT_HERSHEY_COMPLEX_SMALL,color=color,fontScale=1,thickness=2)
imgRoi=img[y:y+h,x:x+w]
cv2.moveWindow("ROI",40,30)
cv2.imshow(winname="ROI",mat=imgRoi)
temp=detect_words(imgRoi)
for i in state_codes:
if i in temp:
temp2 = ''.join(ch for ch in temp if ch.isalnum() and ch!="." and ch!="_")
if temp[-2:].isnumeric() and temp[2:4].isnumeric() and len(temp)==10:
cv2.putText(img=img,text=temp,org=(x,y-5),fontFace=cv2.FONT_HERSHEY_COMPLEX_SMALL,color=color,fontScale=1,thickness=2)
print(temp)
if name==None:
name,face_img=facial_recognition(img)
cv2.imshow("Face Recognition",face_img)
cv2.imshow("Result", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# except:
# break
cv2.destroyAllWindows() | 41.873016 | 338 | 0.559515 |
c01a696f221a5d8d2da0a1df4899941c79bacd5a | 17,268 | py | Python | LogParser/LTEV2.py | a22057916w/python_advance | c964ad3237b503f5ef83e1add12d8007113690b1 | [
"MIT"
] | null | null | null | LogParser/LTEV2.py | a22057916w/python_advance | c964ad3237b503f5ef83e1add12d8007113690b1 | [
"MIT"
] | null | null | null | LogParser/LTEV2.py | a22057916w/python_advance | c964ad3237b503f5ef83e1add12d8007113690b1 | [
"MIT"
] | null | null | null | ##! python3
##==============================================================================
## Copyright (c) 2021 COMPAL Electronic Inc. All rights reserved.
## This program contains proprietary and confidential information.
## All rights reserved except as may be permitted by prior written consent.
##
## Compal STiD NPSD Test Program Release Notification.
##
## ModuleName:
## LTE.py (Log to Excel)
##
## Abstract:
## Parsing log info to a excel with 4 sheets.
## 1. Read log file: parse -> store (a list of dict)
## 2. Read the INI threshold data: store as dict
## 3. New excel workbook: by openpyxl
## 4. Set worksheet according to Step 1: by dict and DataFrame
## 5. Set condition formating for each sheet
## according to Step 2: by dict
## 6. Save the workbook to xlsx file
##
## Author:
## 25-Oct-2021 Willy Chen
##
## Revision History:
## Rev 1.0.0.1 25-Oct-2021 Willy
## First create.
##==============================================================================
import re
import os
import sys
import pandas as pd
import codecs
import time
import configparser
import openpyxl
from openpyxl.utils.dataframe import dataframe_to_rows
from openpyxl.styles import Font, Fill, colors
from openpyxl.formatting.rule import CellIsRule
# [Main]
g_strVersion = "3.0.0.1"
#[ParseLogPath]
g_strLogDir = "./Log/Pass"
#/====================================================================\#
#| Functions of printing log of LTE.py |#
#\====================================================================/#
def getDateTimeFormat():
strDateTime = "[%s]" % (time.strftime("%Y/%m/%d %H:%M:%S", time.localtime()))
return strDateTime
def printLog(strPrintLine):
strFileName = os.path.basename(__file__).split('.')[0]
fileLog = codecs.open(g_strFileName + ".log", 'a', "utf-8")
print(strPrintLine)
fileLog.write("%s%s\r\n" % (getDateTimeFormat(), strPrintLine))
fileLog.close()
if __name__ == "__main__":
global g_strFileName, g_strINIPath, g_nMethodIndex
g_strFileName = os.path.basename(__file__).split('.')[0]
g_strINIPath = os.path.join(os.getcwd(), g_strFileName + ".ini")
g_nMethodIndex = 1
printLog("========== Start ==========")
printLog("[I][main] Python " + sys.version)
printLog("[I][main] %s.py %s" % (g_strFileName, g_strVersion))
# ------------ find the target file --------------
try:
LogParser = cLogParser()
LogParser.log_to_excel()
except Exception as e:
printLog("[E][main] Unexpected Error: " + str(e))
printLog("========== End ==========")
| 47.180328 | 154 | 0.527044 |
c01a9c714b265a55e25bf66dffd00ac40d13d9db | 1,504 | py | Python | cnn/test2.py | INFINITSY/darts | 684f97e407ee044a14c375f4a3078398a4b802bc | [
"Apache-2.0"
] | null | null | null | cnn/test2.py | INFINITSY/darts | 684f97e407ee044a14c375f4a3078398a4b802bc | [
"Apache-2.0"
] | null | null | null | cnn/test2.py | INFINITSY/darts | 684f97e407ee044a14c375f4a3078398a4b802bc | [
"Apache-2.0"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
# darts_025 = [0, 0, 0, 0, 2, 5, 6, 7, 8]
darts_025 = [0, 0, 0, 2, 3, 5, 7, 8]
darts_05 = [0, 0, 3, 3, 4, 4, 5, 7, 7]
adas_025_9 = [0, 0, 0, 0, 3, 5, 7]
adas_05_9 = [0, 0, 1, 4, 5, 6, 6, 7, 7, 7, 7]
adas_05_95 = []
adas_05_97 = [0, 0, 0, 2, 4, 4, 4, 4, 4, 6, 8]
mile = [0, 0, 0, 2, 4, 4, 4, 3, 4, 4, 4]
mile_adas_025_9 = [0, 0, 0, 0, 3, 4, 5, 5, 6, 6, 6]
mile_adas_05_9 = [0, 0, 0, 3, 4, 5, 5, 5, 5, 6, 6]
mile_adas_05_95 = [0, 0, 0, 0, 1, 1, 5, 5, 6, 6, 6]
mile_adas_05_97 = [0, 0, 0, 0, 0, 3, 3, 4, 4, 4, 4]
plt.plot(range(0, 36, 5), darts_025, '-o', label='DARTS, lr: 0.025')
# plt.plot(range(0, 41, 5), darts_05, '-o', label='DARTS, lr: 0.05')
#
# # plt.plot(range(0, 31, 5), adas_025_9, '-o', label='DARTS+Adas, lr: 0.025, beta: 0.9')
# # plt.plot(range(0, 51, 5), adas_05_9, '-o', label='DARTS+Adas, lr: 0.05, beta: 0.9')
# # plt.plot(range(0, 51, 5), adas_05_97, '-o', label='DARTS+Adas, lr: 0.05, beta: 0.97')
plt.plot(range(0, 51, 5), mile, '--o', label='MiLeNAS, lr: 0.025')
plt.plot(range(0, 51, 5), mile_adas_025_9, '--o', label='MiLeNAS+Adas, lr: 0.025, beta: 0.9')
plt.plot(range(0, 51, 5), mile_adas_05_9, '--o', label='MiLeNAS+Adas, lr: 0.05, beta: 0.9')
plt.plot(range(0, 51, 5), mile_adas_05_95, '--o', label='MiLeNAS+Adas, lr: 0.05, beta: 0.95')
plt.plot(range(0, 51, 5), mile_adas_05_97, '--o', linewidth=3.0, label='MiLeNAS+Adas, lr: 0.05, beta: 0.97')
plt.xlabel('Epoch')
plt.ylabel('#Skip-connection')
plt.legend()
plt.show()
| 44.235294 | 108 | 0.571809 |
c01b7158b50aafc1ed3b64cfb1feeaebd488a0fb | 21,040 | py | Python | great_international/panels/capital_invest.py | uktrade/directory-cms | 8c8d13ce29ea74ddce7a40f3dd29c8847145d549 | [
"MIT"
] | 6 | 2018-03-20T11:19:07.000Z | 2021-10-05T07:53:11.000Z | great_international/panels/capital_invest.py | uktrade/directory-cms | 8c8d13ce29ea74ddce7a40f3dd29c8847145d549 | [
"MIT"
] | 802 | 2018-02-05T14:16:13.000Z | 2022-02-10T10:59:21.000Z | great_international/panels/capital_invest.py | uktrade/directory-cms | 8c8d13ce29ea74ddce7a40f3dd29c8847145d549 | [
"MIT"
] | 6 | 2019-01-22T13:19:37.000Z | 2019-07-01T10:35:26.000Z | from wagtail.admin.edit_handlers import (
InlinePanel, HelpPanel, FieldPanel, FieldRowPanel, MultiFieldPanel,
PageChooserPanel,
)
from wagtail.documents.edit_handlers import DocumentChooserPanel
from wagtail.images.edit_handlers import ImageChooserPanel
from core.helpers import make_translated_interface
from core.panels import SearchEngineOptimisationPanel
| 37.841727 | 117 | 0.506369 |
c01b9112e0f0afc9d0edfc412d08f777b3d1b9d7 | 137 | py | Python | _6_EXERCISE_BASIC SYNTAX, CONDITIONAL STATEMENTS AND LOOPS/_7_Maximum_Multiple.py | YordanPetrovDS/Python_Fundamentals | 81163054cd3ac780697eaa43f099cc455f253a0c | [
"MIT"
] | null | null | null | _6_EXERCISE_BASIC SYNTAX, CONDITIONAL STATEMENTS AND LOOPS/_7_Maximum_Multiple.py | YordanPetrovDS/Python_Fundamentals | 81163054cd3ac780697eaa43f099cc455f253a0c | [
"MIT"
] | null | null | null | _6_EXERCISE_BASIC SYNTAX, CONDITIONAL STATEMENTS AND LOOPS/_7_Maximum_Multiple.py | YordanPetrovDS/Python_Fundamentals | 81163054cd3ac780697eaa43f099cc455f253a0c | [
"MIT"
] | null | null | null | divisor = int(input())
bound = int(input())
for num in range(bound, 0, -1):
if num % divisor == 0:
print(num)
break
| 17.125 | 31 | 0.540146 |
c01c83fcecb0cd27f766c4572561b04ffed5866c | 595 | py | Python | Task1C.py | benkw26/IA-Flood-Warning-Project | ded20ebd52f4d328d7437682cffebf6843d26aa3 | [
"MIT"
] | null | null | null | Task1C.py | benkw26/IA-Flood-Warning-Project | ded20ebd52f4d328d7437682cffebf6843d26aa3 | [
"MIT"
] | null | null | null | Task1C.py | benkw26/IA-Flood-Warning-Project | ded20ebd52f4d328d7437682cffebf6843d26aa3 | [
"MIT"
] | null | null | null | from floodsystem.geo import stations_within_radius
from floodsystem.stationdata import build_station_list
def run():
"""Requirements for Task 1C"""
# Build list of stations
stations = build_station_list()
# Store the coordinates of Cambridge City Centre
CambCoord = (52.2053, 0.1218)
#store the radius value
radius = 10
near_cambstations = stations_within_radius(stations, CambCoord, radius)
print(sorted([station.name for station in near_cambstations]))
if __name__ == "__main__":
print("*** Task 1C: CUED Part IA Flood Warning System ***")
run() | 31.315789 | 75 | 0.719328 |
c01ee7097d54131caabca59efa7cd0ede8253fd5 | 2,072 | py | Python | fixture/project.py | Sashatq/bugtrack | ce8bcac2b9041b5ea34de30a10a3431fc62ec21a | [
"Apache-2.0"
] | null | null | null | fixture/project.py | Sashatq/bugtrack | ce8bcac2b9041b5ea34de30a10a3431fc62ec21a | [
"Apache-2.0"
] | null | null | null | fixture/project.py | Sashatq/bugtrack | ce8bcac2b9041b5ea34de30a10a3431fc62ec21a | [
"Apache-2.0"
] | null | null | null | from model.objects import Objects
import time
| 37 | 95 | 0.639479 |
c0200d7fd7135bdc552ee8dcbd7eedcc4a90fd2d | 4,597 | py | Python | john_doe/cities/hungary.py | xioren/JohnDoe | 4bd16f394709cac246438c8ffd650b4b301cb2b7 | [
"MIT"
] | null | null | null | john_doe/cities/hungary.py | xioren/JohnDoe | 4bd16f394709cac246438c8ffd650b4b301cb2b7 | [
"MIT"
] | null | null | null | john_doe/cities/hungary.py | xioren/JohnDoe | 4bd16f394709cac246438c8ffd650b4b301cb2b7 | [
"MIT"
] | null | null | null | cities = [
'Budapest',
'Debrecen',
'Miskolc',
'Szeged',
'Pecs',
'Zuglo',
'Gyor',
'Nyiregyhaza',
'Kecskemet',
'Szekesfehervar',
'Szombathely',
'Jozsefvaros',
'Paradsasvar',
'Szolnok',
'Tatabanya',
'Kaposvar',
'Bekescsaba',
'Erd',
'Veszprem',
'Erzsebetvaros',
'Zalaegerszeg',
'Kispest',
'Sopron',
'Eger',
'Nagykanizsa',
'Dunaujvaros',
'Hodmezovasarhely',
'Salgotarjan',
'Cegled',
'Ozd',
'Baja',
'Vac',
'Szekszard',
'Papa',
'Gyongyos',
'Kazincbarcika',
'Godollo',
'Gyula',
'Hajduboszormeny',
'Kiskunfelegyhaza',
'Ajka',
'Oroshaza',
'Mosonmagyarovar',
'Dunakeszi',
'Kiskunhalas',
'Esztergom',
'Jaszbereny',
'Komlo',
'Nagykoros',
'Mako',
'Budaors',
'Szigetszentmiklos',
'Tata',
'Szentendre',
'Hajduszoboszlo',
'Siofok',
'Torokszentmiklos',
'Hatvan',
'Karcag',
'Gyal',
'Monor',
'Keszthely',
'Varpalota',
'Bekes',
'Dombovar',
'Paks',
'Oroszlany',
'Komarom',
'Vecses',
'Mezotur',
'Mateszalka',
'Mohacs',
'Csongrad',
'Kalocsa',
'Kisvarda',
'Szarvas',
'Satoraljaujhely',
'Hajdunanas',
'Balmazujvaros',
'Mezokovesd',
'Tapolca',
'Szazhalombatta',
'Balassagyarmat',
'Tiszaujvaros',
'Dunaharaszti',
'Fot',
'Dabas',
'Abony',
'Berettyoujfalu',
'Puspokladany',
'God',
'Sarvar',
'Gyomaendrod',
'Kiskoros',
'Pomaz',
'Mor',
'Sarospatak',
'Batonyterenye',
'Bonyhad',
'Gyomro',
'Tiszavasvari',
'Ujfeherto',
'Nyirbator',
'Sarbogard',
'Nagykata',
'Budakeszi',
'Pecel',
'Pilisvorosvar',
'Sajoszentpeter',
'Szigethalom',
'Balatonfured',
'Hajduhadhaz',
'Kisujszallas',
'Dorog',
'Kormend',
'Marcali',
'Barcs',
'Tolna',
'Tiszafured',
'Kiskunmajsa',
'Tiszafoldvar',
'Albertirsa',
'Nagyatad',
'Tiszakecske',
'Toeroekbalint',
'Koszeg',
'Celldomolk',
'Heves',
'Mezobereny',
'Szigetvar',
'Pilis',
'Veresegyhaz',
'Bicske',
'Edeleny',
'Lajosmizse',
'Kistarcsa',
'Hajdusamson',
'Csorna',
'Nagykallo',
'Isaszeg',
'Sarkad',
'Kapuvar',
'Ullo',
'Siklos',
'Toekoel',
'Maglod',
'Paszto',
'Szerencs',
'Turkeve',
'Szeghalom',
'Kerepes',
'Jaszapati',
'Janoshalma',
'Tamasi',
'Kunszentmarton',
'Hajdudorog',
'Vasarosnameny',
'Solymar',
'Rackeve',
'Derecske',
'Kecel',
'Nadudvar',
'Ocsa',
'Dunafoldvar',
'Fehergyarmat',
'Kiskunlachaza',
'Kunszentmiklos',
'Szentgotthard',
'Devavanya',
'Biatorbagy',
'Kunhegyes',
'Lenti',
'Ercsi',
'Balatonalmadi',
'Polgar',
'Tura',
'Suelysap',
'Fuzesabony',
'Jaszarokszallas',
'Gardony',
'Tarnok',
'Nyiradony',
'Zalaszentgrot',
'Sandorfalva',
'Soltvadkert',
'Nyergesujfalu',
'Bacsalmas',
'Csomor',
'Putnok',
'Veszto',
'Kistelek',
'Zirc',
'Halasztelek',
'Mindszent',
'Acs',
'Enying',
'Letavertes',
'Nyirtelek',
'Szentlorinc',
'Felsozsolca',
'Solt',
'Fegyvernek',
'Nagyecsed',
'Encs',
'Ibrany',
'Mezokovacshaza',
'Ujszasz',
'Bataszek',
'Balkany',
'Sumeg',
'Tapioszecso',
'Szabadszallas',
'Battonya',
'Polgardi',
'Mezocsat',
'Totkomlos',
'Piliscsaba',
'Szecseny',
'Fuzesgyarmat',
'Kaba',
'Pusztaszabolcs',
'Teglas',
'Mezohegyes',
'Jaszladany',
'Tapioszele',
'Aszod',
'Diosd',
'Taksony',
'Tiszalok',
'Izsak',
'Komadi',
'Lorinci',
'Alsozsolca',
'Kartal',
'Dunavarsany',
'Erdokertes',
'Janossomorja',
'Kerekegyhaza',
'Balatonboglar',
'Szikszo',
'Domsod',
'Nagyhalasz',
'Kisber',
'Kunmadaras',
'Berhida',
'Kondoros',
'Melykut',
'Jaszkiser',
'Csurgo',
'Csorvas',
'Nagyszenas',
'Ujkigyos',
'Tapioszentmarton',
'Tat',
'Egyek',
'Tiszaluc',
'Orbottyan',
'Rakoczifalva',
'Hosszupalyi',
'Paty',
'Elek',
'Vamospercs',
'Morahalom',
'Bugyi',
'Emod',
'Labatlan',
'Csakvar',
'Algyo',
'Kenderes',
'Csenger',
'Fonyod',
'Rakamaz',
'Martonvasar',
'Devecser',
'Orkeny',
'Tokaj',
'Tiszaalpar',
'Kemecse',
'Korosladany'
]
| 16.301418 | 24 | 0.513161 |
c02010efda9ce4c421135232fa4f140efb168b1f | 969 | py | Python | carbon0/carbon_quiz/migrations/0010_auto_20200909_0853.py | Carbon0-Games/carbon0-web-app | 068a7223b2717d602944ec561adcde39930cba85 | [
"MIT"
] | 2 | 2020-10-30T15:07:28.000Z | 2020-12-22T04:29:50.000Z | carbon0/carbon_quiz/migrations/0010_auto_20200909_0853.py | Carbon0-Games/carbon0-web-app | 068a7223b2717d602944ec561adcde39930cba85 | [
"MIT"
] | 45 | 2020-09-22T12:47:55.000Z | 2022-03-12T00:48:18.000Z | carbon0/carbon_quiz/migrations/0010_auto_20200909_0853.py | Carbon0-Games/carbon0-web-app | 068a7223b2717d602944ec561adcde39930cba85 | [
"MIT"
] | 1 | 2020-09-08T15:48:13.000Z | 2020-09-08T15:48:13.000Z | # Generated by Django 3.1.1 on 2020-09-09 12:53
from django.db import migrations, models
| 25.5 | 79 | 0.522188 |
c02051ed0ef783ea63f4159e47ac37ce14107e5a | 353 | py | Python | wstack/cli/input.py | CCSGroupInternational/wstack | 3b6d75cc6897a0e33d9a3ebb20a2f1642205d51e | [
"Apache-2.0"
] | null | null | null | wstack/cli/input.py | CCSGroupInternational/wstack | 3b6d75cc6897a0e33d9a3ebb20a2f1642205d51e | [
"Apache-2.0"
] | null | null | null | wstack/cli/input.py | CCSGroupInternational/wstack | 3b6d75cc6897a0e33d9a3ebb20a2f1642205d51e | [
"Apache-2.0"
] | null | null | null | import json
from ..webstack import run as webstack_run
| 29.416667 | 59 | 0.668555 |
c02268e8d6e0cd77362adcfa474291e89d12f983 | 4,396 | py | Python | imgtags.py | Donearm/scripts | ad3429dc4b69e6108f538bf1656216c7a192c9fd | [
"OML"
] | 25 | 2015-02-23T00:07:14.000Z | 2022-03-27T01:57:41.000Z | imgtags.py | Donearm/scripts | ad3429dc4b69e6108f538bf1656216c7a192c9fd | [
"OML"
] | null | null | null | imgtags.py | Donearm/scripts | ad3429dc4b69e6108f538bf1656216c7a192c9fd | [
"OML"
] | 7 | 2015-11-25T22:04:37.000Z | 2020-02-18T22:11:09.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (c) 2011-2019, Gianluca Fiore
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
###############################################################################
#
# Requirements: Python 3.7 or later, Py3exiv
#
__author__ = "Gianluca Fiore"
__license__ = "GPL"
__version__ = "0.2"
__date__ = "20190912"
__email__ = "forod.g@gmail.com"
__status__ = "beta"
import sys
import argparse
import os.path
import py3exiv
def argument_parser():
"""Argument parser"""
cli_parser = argparse.ArgumentParser()
cli_parser.add_argument("-f", "--force",
action="store_true",
help="force writing of tags regardless of them being already present",
dest="force")
cli_parser.add_argument("-i", "--image",
required=True,
action="store",
help="the image",
dest="image")
cli_parser.add_argument("-d", "--delete",
action="store_true",
help="delete all tags present in an image",
dest="delete")
cli_parser.add_argument(action="store",
nargs="*",
help="the tags to be written into the file",
dest="tags")
options = cli_parser.parse_args()
return options
def write_tags(image, key, tags):
"""Write each tags into the iptc key inside an image. Tags must be a list"""
image[key] = pyexiv2.IptcTag(key, tags)
image.write()
def delete_tags(metadata, key):
"""Delete any tags present inside an image"""
try:
metadata.__delitem__(key)
except KeyError:
print(("There's not a %s tag in this image, exiting..." % key))
return 1
def main ():
"""main loop"""
options = argument_parser()
image = os.path.abspath(options.image)
if os.path.isfile(image) and image.endswith(('jpg', 'JPG', 'jpeg', 'JPEG', 'png', 'PNG', 'tiff', 'TIFF')):
m = pyexiv2.ImageMetadata(image)
m.read()
iptckeys = m.iptc_keys
xmpkeys = m.xmp_keys
exifkeys = m.exif_keys
if options.delete:
# delete all tags
try:
k = m['Iptc.Application2.Keywords']
delete_tags(m, 'Iptc.Application2.Keywords')
print("Deleting tags")
m.write()
return 0
except KeyError:
# there are already no tags, skip...
print(("%s has no tags, nothing to delete" % options.image))
return 0
if not options.tags:
# without tags given perhaps the user wants just see the already
# presents tags (if any)
try:
k = m['Iptc.Application2.Keywords']
print(("%s is already tagged with %s " % (options.image, k.value)))
return 0
except:
print(("%s has no tags set" % options.image))
return 0
else:
try:
k = m['Iptc.Application2.Keywords']
if options.force:
# Force switch enabled, write tags without questions
write_tags(m, 'Iptc.Application2.Keywords', options.tags)
else:
print("There are already these tags present:\n")
for t in k.value:
print(t)
s = input("\nDo you want to overwrite them with %s ? [y/n] " % options.tags)
if s == 'y' or s == 'Y':
print("Writing tags")
write_tags(m, 'Iptc.Application2.Keywords', options.tags)
else:
print("Exiting...")
sys.exit(0)
except KeyError:
# there is no previously set tag with this name, pyexiv2 throws KeyError
print("Writing tags")
write_tags(m, 'Iptc.Application2.Keywords', options.tags)
else:
print("No image given")
if __name__ == '__main__':
status = main()
sys.exit(status)
| 33.30303 | 110 | 0.526615 |
c024a36ba5d1c4863b44768937e8d76c18a7d61a | 4,344 | py | Python | notes-to-self/trace.py | guilledk/trio | d09c21df3ffe401ee4314d869d82a886bd776e3c | [
"Apache-2.0",
"MIT"
] | 4,681 | 2017-03-10T22:38:41.000Z | 2022-03-31T11:47:44.000Z | notes-to-self/trace.py | guilledk/trio | d09c21df3ffe401ee4314d869d82a886bd776e3c | [
"Apache-2.0",
"MIT"
] | 2,143 | 2017-03-11T05:58:32.000Z | 2022-03-31T10:29:00.000Z | notes-to-self/trace.py | guilledk/trio | d09c21df3ffe401ee4314d869d82a886bd776e3c | [
"Apache-2.0",
"MIT"
] | 313 | 2017-03-11T05:24:33.000Z | 2022-03-23T18:26:02.000Z | import trio
import os
import json
from itertools import count
# Experiment with generating Chrome Event Trace format, which can be browsed
# through chrome://tracing or other mechanisms.
#
# Screenshot: https://files.gitter.im/python-trio/general/fp6w/image.png
#
# Trace format docs: https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview#
#
# Things learned so far:
# - I don't understand how the ph="s"/ph="f" flow events work I think
# they're supposed to show up as arrows, and I'm emitting them between tasks
# that wake each other up, but they're not showing up.
# - I think writing out json synchronously from each event is creating gaps in
# the trace; maybe better to batch them up to write up all at once at the
# end
# - including tracebacks would be cool
# - there doesn't seem to be any good way to group together tasks based on
# nurseries. this really limits the value of this particular trace
# format+viewer for us. (also maybe we should have an instrumentation event
# when a nursery is opened/closed?)
# - task._counter should maybe be public
# - I don't know how to best show task lifetime, scheduling times, and what
# the task is actually doing on the same plot. if we want to show particular
# events like "called stream.send_all", then the chrome trace format won't
# let us also show "task is running", because neither kind of event is
# strictly nested inside the other
t = Trace(open("/tmp/t.json", "w"))
trio.run(parent, instruments=[t])
| 29.154362 | 109 | 0.570672 |
c024d083863172cd08c0e34544cc15c4d39eca0b | 345 | py | Python | common/data_refinery_common/models/__init__.py | dongbohu/ccdl_test | be50b7ca35fba28676b594ba3f003b0b581abcb7 | [
"BSD-3-Clause"
] | null | null | null | common/data_refinery_common/models/__init__.py | dongbohu/ccdl_test | be50b7ca35fba28676b594ba3f003b0b581abcb7 | [
"BSD-3-Clause"
] | 3 | 2020-06-05T17:18:10.000Z | 2021-06-10T20:55:12.000Z | common/data_refinery_common/models/__init__.py | dongbohu/ccdl_test | be50b7ca35fba28676b594ba3f003b0b581abcb7 | [
"BSD-3-Clause"
] | null | null | null | from data_refinery_common.models.surveys import SurveyJob, SurveyJobKeyValue
from data_refinery_common.models.batches import (
BatchStatuses,
Batch,
BatchKeyValue,
File
)
from data_refinery_common.models.jobs import (
WorkerJob,
DownloaderJob,
ProcessorJob
)
from data_refinery_common.models.organism import Organism
| 24.642857 | 76 | 0.791304 |
c0263e8b4a30de418c75ebe0717861acba376145 | 1,144 | py | Python | src/client_sample.py | ryoutoku/gunicorn-soap | 5b7c6bedb7fda1486eb4402114276bdc7fd0e77c | [
"MIT"
] | null | null | null | src/client_sample.py | ryoutoku/gunicorn-soap | 5b7c6bedb7fda1486eb4402114276bdc7fd0e77c | [
"MIT"
] | null | null | null | src/client_sample.py | ryoutoku/gunicorn-soap | 5b7c6bedb7fda1486eb4402114276bdc7fd0e77c | [
"MIT"
] | null | null | null | from zeep import Client
from models import RequestParameter
if __name__ == '__main__':
main()
| 22 | 66 | 0.532343 |
c028333a0436a3c88c477f3244b6bd0fca21d64d | 1,600 | py | Python | rest_api/views.py | vikash98k/django-rest-api | 51c83a5d9c65f03b4b790ac965cd2222c6326752 | [
"MIT"
] | 1 | 2021-11-15T03:29:24.000Z | 2021-11-15T03:29:24.000Z | rest_api/views.py | vikash98k/django-rest-api | 51c83a5d9c65f03b4b790ac965cd2222c6326752 | [
"MIT"
] | null | null | null | rest_api/views.py | vikash98k/django-rest-api | 51c83a5d9c65f03b4b790ac965cd2222c6326752 | [
"MIT"
] | null | null | null | from rest_framework import generics
from .permissions import IsOwner
from .serializers import BucketlistSerializer, UserSerializer
from .models import Bucketlist
from django.contrib.auth.models import User
from rest_framework.permissions import IsAuthenticated
from rest_framework.authentication import SessionAuthentication
| 39.02439 | 71 | 0.775625 |
c028ccbb3fd75e9d2a67753c98bec97a1ad49fb6 | 7,113 | py | Python | darling_ansible/python_venv/lib/python3.7/site-packages/oci/core/models/create_ip_sec_tunnel_bgp_session_details.py | revnav/sandbox | f9c8422233d093b76821686b6c249417502cf61d | [
"Apache-2.0"
] | null | null | null | darling_ansible/python_venv/lib/python3.7/site-packages/oci/core/models/create_ip_sec_tunnel_bgp_session_details.py | revnav/sandbox | f9c8422233d093b76821686b6c249417502cf61d | [
"Apache-2.0"
] | null | null | null | darling_ansible/python_venv/lib/python3.7/site-packages/oci/core/models/create_ip_sec_tunnel_bgp_session_details.py | revnav/sandbox | f9c8422233d093b76821686b6c249417502cf61d | [
"Apache-2.0"
] | 1 | 2020-06-25T03:12:58.000Z | 2020-06-25T03:12:58.000Z | # coding: utf-8
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
| 37.240838 | 245 | 0.679882 |
c029732da347682368446ad63ee588078d4cc569 | 752 | py | Python | server_django/prikmeter/views.py | ttencate/smartmetertap | c768a5818766f897cb5dcd223286b173b31a3a65 | [
"BSD-3-Clause"
] | 1 | 2017-10-26T05:28:08.000Z | 2017-10-26T05:28:08.000Z | server_django/prikmeter/views.py | ttencate/smartmetertap | c768a5818766f897cb5dcd223286b173b31a3a65 | [
"BSD-3-Clause"
] | 9 | 2017-10-16T07:15:51.000Z | 2021-09-11T07:39:40.000Z | server_django/prikmeter/views.py | ttencate/smartmetertap | c768a5818766f897cb5dcd223286b173b31a3a65 | [
"BSD-3-Clause"
] | null | null | null | from django.contrib import auth, messages
from django.shortcuts import redirect, render
from django.views.decorators.http import require_POST, require_safe
| 26.857143 | 69 | 0.720745 |
c02a0889807a2eb0056cc9fc59fcd71cd6dcb6b8 | 146 | py | Python | scenarios/order_show/executable.py | trenton42/txbalanced | 9ee1b906d75b4b2fc3d2f5424dc3bbb9886c2b14 | [
"MIT"
] | null | null | null | scenarios/order_show/executable.py | trenton42/txbalanced | 9ee1b906d75b4b2fc3d2f5424dc3bbb9886c2b14 | [
"MIT"
] | null | null | null | scenarios/order_show/executable.py | trenton42/txbalanced | 9ee1b906d75b4b2fc3d2f5424dc3bbb9886c2b14 | [
"MIT"
] | null | null | null | import balanced
balanced.configure('ak-test-1o9QKwUCrwstHWO5sGxICtIJdQXFTjnrV')
order = balanced.Order.fetch('/orders/OR7qAh5x1cFzX0U9hD628LPa') | 29.2 | 64 | 0.842466 |
c02a0ebb0ba3ca2e51b79708e03d7785c34ec44d | 490 | py | Python | python/array/leetcode/move_zero.py | googege/algo-learn | 054d05e8037005c5810906d837de889108dad107 | [
"MIT"
] | 153 | 2020-09-24T12:46:51.000Z | 2022-03-31T21:30:44.000Z | python/array/leetcode/move_zero.py | googege/algo-learn | 054d05e8037005c5810906d837de889108dad107 | [
"MIT"
] | null | null | null | python/array/leetcode/move_zero.py | googege/algo-learn | 054d05e8037005c5810906d837de889108dad107 | [
"MIT"
] | 35 | 2020-12-22T11:07:06.000Z | 2022-03-09T03:25:08.000Z | from typing import List
#
| 19.6 | 51 | 0.418367 |
c02a3ab8ab4ad9227b45abfaa1e75b75d929e0e8 | 6,368 | py | Python | make_tfrecords.py | ssarfjoo/improvedsegan | df74761ed6404189ba26ccef40c38dddec334684 | [
"MIT"
] | 36 | 2017-10-26T04:15:48.000Z | 2021-08-10T02:10:18.000Z | make_tfrecords.py | ssarfjoo/improvedsegan | df74761ed6404189ba26ccef40c38dddec334684 | [
"MIT"
] | 4 | 2020-01-28T21:34:47.000Z | 2022-02-09T23:26:49.000Z | make_tfrecords.py | ssarfjoo/improvedsegan | df74761ed6404189ba26ccef40c38dddec334684 | [
"MIT"
] | 5 | 2018-09-03T11:50:41.000Z | 2021-12-25T08:58:45.000Z | from __future__ import print_function
import tensorflow as tf
import numpy as np
from collections import namedtuple, OrderedDict
from subprocess import call
import scipy.io.wavfile as wavfile
import argparse
import codecs
import timeit
import struct
import toml
import re
import sys
import os
def slice_signal(signal, window_size, stride=0.5):
""" Return windows of the given signal by sweeping in stride fractions
of window
"""
assert signal.ndim == 1, signal.ndim
n_samples = signal.shape[0]
offset = int(window_size * stride)
slices = []
for beg_i, end_i in zip(range(0, n_samples, offset),
range(window_size, n_samples + offset,
offset)):
if end_i - beg_i < window_size:
break
slice_ = signal[beg_i:end_i]
if slice_.shape[0] == window_size:
slices.append(slice_)
return np.array(slices, dtype=np.int32)
def encoder_proc(wav_filename, noisy_path, out_file, wav_canvas_size, baseline_dir=None):
""" Read and slice the wav and noisy files and write to TFRecords.
out_file: TFRecordWriter.
"""
ppath, wav_fullname = os.path.split(wav_filename)
noisy_filename = os.path.join(noisy_path, wav_fullname)
wav_signals = read_and_slice(wav_filename, wav_canvas_size)
noisy_signals = read_and_slice(noisy_filename, wav_canvas_size)
if not baseline_dir is None:
baseline_filename = os.path.join(baseline_dir, wav_fullname)
baseline_signals = read_and_slice(baseline_filename, wav_canvas_size)
assert wav_signals.shape == noisy_signals.shape, noisy_signals.shape
if baseline_dir is None:
for (wav, noisy) in zip(wav_signals, noisy_signals):
wav_raw = wav.tostring()
noisy_raw = noisy.tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'wav_raw': _bytes_feature(wav_raw),
'noisy_raw': _bytes_feature(noisy_raw)}))
out_file.write(example.SerializeToString())
else:
for (wav, noisy, base) in zip(wav_signals, noisy_signals, baseline_signals):
wav_raw = wav.tostring()
noisy_raw = noisy.tostring()
baseline_raw = base.tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'wav_raw': _bytes_feature(wav_raw),
'noisy_raw': _bytes_feature(noisy_raw),
'baseline_raw': _bytes_feature(baseline_raw)
}))
out_file.write(example.SerializeToString())
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert the set of txt and '
'wavs to TFRecords')
parser.add_argument('--cfg', type=str, default='cfg/e2e_maker.cfg',
help='File containing the description of datasets '
'to extract the info to make the TFRecords.')
parser.add_argument('--save_path', type=str, default='data/',
help='Path to save the dataset')
parser.add_argument('--out_file', type=str, default='segan.tfrecords',
help='Output filename')
parser.add_argument('--force-gen', dest='force_gen', action='store_true',
help='Flag to force overwriting existing dataset.')
parser.set_defaults(force_gen=False)
opts = parser.parse_args()
main(opts)
| 43.319728 | 89 | 0.617619 |
c02a8e075173d16d5fa606436bcb12e60ab69d67 | 1,684 | py | Python | software.py | schamberg97/bullshitMgimoProj | 1577e7a2b0256b98f259a2cc6667194abd689ec8 | [
"Unlicense"
] | null | null | null | software.py | schamberg97/bullshitMgimoProj | 1577e7a2b0256b98f259a2cc6667194abd689ec8 | [
"Unlicense"
] | null | null | null | software.py | schamberg97/bullshitMgimoProj | 1577e7a2b0256b98f259a2cc6667194abd689ec8 | [
"Unlicense"
] | null | null | null | from myShop import MyShop
from myBot import MYBOT
from sMenu import Menu | 29.034483 | 127 | 0.71734 |
c02b02d3a9106d7127a9a094f2f01f8ba90e6fb6 | 22,963 | py | Python | app/src/main/Python/Translate.py | tangcan1600/XuMiJie | 2e47d519c1c62ec3eabb576d80f783dd62052f44 | [
"MIT"
] | null | null | null | app/src/main/Python/Translate.py | tangcan1600/XuMiJie | 2e47d519c1c62ec3eabb576d80f783dd62052f44 | [
"MIT"
] | null | null | null | app/src/main/Python/Translate.py | tangcan1600/XuMiJie | 2e47d519c1c62ec3eabb576d80f783dd62052f44 | [
"MIT"
] | null | null | null | import time, sys, os, hashlib, json, re
import requests, random, js2py
import urllib.request
import urllib.parse
#
languageMapCode = {
'': 'auto',
'': 'sq',
'': 'ar',
'': 'am',
'': 'az',
'': 'ga',
'': 'et',
'': 'eu',
'': 'be',
'': 'bg',
'': 'is',
'': 'pl',
'': 'bs',
'': 'fa',
'()': 'af',
'': 'da',
'': 'de',
'': 'ru',
'': 'fr',
'': 'tl',
'': 'fi',
'': 'fy',
'': 'km',
'': 'ka',
'': 'gu',
'': 'kk',
'': 'ht',
'': 'ko',
'': 'ha',
'': 'nl',
'': 'ky',
'': 'gl',
'': 'ca',
'': 'cs',
'': 'kn',
'': 'co',
'': 'hr',
'': 'ku',
'': 'la',
'': 'lv',
'': 'lo',
'': 'lt',
'': 'lb',
'': 'ro',
'': 'mg',
'': 'mt',
'': 'mr',
'': 'ml',
'': 'ms',
'': 'mk',
'': 'mi',
'': 'mn',
'': 'bn',
'': 'my',
'': 'hmn',
'': 'xh',
'': 'zu',
'': 'ne',
'': 'no',
'': 'pa',
'': 'pt',
'': 'ps',
'': 'ny',
'': 'ja',
'': 'sv',
'': 'sm',
'': 'sr',
'': 'st',
'': 'si',
'': 'eo',
'': 'sk',
'': 'sl',
'': 'sw',
'': 'gd',
'': 'ceb',
'': 'so',
'': 'tg',
'': 'te',
'': 'ta',
'': 'th',
'': 'tr',
'': 'cy',
'': 'ur',
'': 'uk',
'': 'uz',
'': 'es',
'': 'iw',
'': 'el',
'': 'haw',
'': 'sd',
'': 'hu',
'': 'sn',
'': 'hy',
'': 'ig',
'': 'it',
'': 'yi',
'': 'hi',
'': 'su',
'': 'id',
'': 'jw',
'': 'en',
'': 'yo',
'': 'vi',
'': 'zh-CN',
'()': 'zh-TW'
}
""" """
| 36.276461 | 1,459 | 0.512041 |
c02c417a57c64011bc9f7af79d7ad7b2fc564c8d | 132 | py | Python | examples/basic.py | EmbarkStudios/Python-xNormal | a4f005220d31d1e9085a7cbcc1ef46e70cff2753 | [
"BSD-2-Clause"
] | 52 | 2015-04-26T19:46:37.000Z | 2021-12-23T01:45:16.000Z | examples/basic.py | EmbarkStudios/Python-xNormal | a4f005220d31d1e9085a7cbcc1ef46e70cff2753 | [
"BSD-2-Clause"
] | 3 | 2015-10-22T08:07:38.000Z | 2019-08-02T18:13:59.000Z | examples/basic.py | EmbarkStudios/Python-xNormal | a4f005220d31d1e9085a7cbcc1ef46e70cff2753 | [
"BSD-2-Clause"
] | 10 | 2016-08-24T14:02:07.000Z | 2021-11-10T02:40:47.000Z | import xNormal
xNormal.run("piano_high.obj", "piano_low.obj", "piano.png", width=256, height=256, gen_normals = True, gen_ao = True) | 66 | 117 | 0.742424 |
c02c5be917c2e7c70614350c8ed104d79b0759b4 | 1,266 | py | Python | jina/executors/evaluators/rank/recall.py | yk/jina | ab66e233e74b956390f266881ff5dc4e0110d3ff | [
"Apache-2.0"
] | null | null | null | jina/executors/evaluators/rank/recall.py | yk/jina | ab66e233e74b956390f266881ff5dc4e0110d3ff | [
"Apache-2.0"
] | null | null | null | jina/executors/evaluators/rank/recall.py | yk/jina | ab66e233e74b956390f266881ff5dc4e0110d3ff | [
"Apache-2.0"
] | null | null | null | from typing import Sequence, Any, Optional
from . import BaseRankingEvaluator
| 38.363636 | 120 | 0.648499 |
c02c896bba97da4b352ffab0bd4675b7575d7153 | 7,995 | py | Python | tests/tests_main.py | insilications/tqdm-clr | b09a24af7ffe5c85ed0e8e64b33059b43b1be020 | [
"MIT"
] | 22,617 | 2015-06-03T20:26:05.000Z | 2022-03-31T22:25:42.000Z | tests/tests_main.py | insilications/tqdm-clr | b09a24af7ffe5c85ed0e8e64b33059b43b1be020 | [
"MIT"
] | 1,230 | 2015-06-03T13:56:41.000Z | 2022-03-30T06:03:12.000Z | tests/tests_main.py | insilications/tqdm-clr | b09a24af7ffe5c85ed0e8e64b33059b43b1be020 | [
"MIT"
] | 1,445 | 2015-06-03T14:01:33.000Z | 2022-03-29T14:41:52.000Z | """Test CLI usage."""
import logging
import subprocess # nosec
import sys
from functools import wraps
from os import linesep
from tqdm.cli import TqdmKeyError, TqdmTypeError, main
from tqdm.utils import IS_WIN
from .tests_tqdm import BytesIO, _range, closing, mark, raises
def restore_sys(func):
"""Decorates `func(capsysbin)` to save & restore `sys.(stdin|argv)`."""
return inner
def norm(bytestr):
"""Normalise line endings."""
return bytestr if linesep == "\n" else bytestr.replace(linesep.encode(), b"\n")
if sys.version_info[:2] >= (3, 8):
test_pipes = mark.filterwarnings("ignore:unclosed file:ResourceWarning")(
test_pipes)
def test_main_import():
"""Test main CLI import"""
N = 123
_SYS = sys.stdin, sys.argv
# test direct import
sys.stdin = [str(i).encode() for i in _range(N)]
sys.argv = ['', '--desc', 'Test CLI import',
'--ascii', 'True', '--unit_scale', 'True']
try:
import tqdm.__main__ # NOQA, pylint: disable=unused-variable
finally:
sys.stdin, sys.argv = _SYS
| 32.632653 | 86 | 0.591245 |
c02ddc618f6444651370434e959eed89c5b43ed2 | 2,881 | py | Python | plugins/commands.py | Kalpesh0/Project01 | 42383a3aa4a3f17ab69dd01357bfbb0740ba965b | [
"MIT"
] | null | null | null | plugins/commands.py | Kalpesh0/Project01 | 42383a3aa4a3f17ab69dd01357bfbb0740ba965b | [
"MIT"
] | null | null | null | plugins/commands.py | Kalpesh0/Project01 | 42383a3aa4a3f17ab69dd01357bfbb0740ba965b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @REQUEST_M0viz
from pyrogram import Client, filters
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
from script import script
| 36.468354 | 373 | 0.532107 |
c02e20b826436ffe36a314fe9b02b8f0b79df9d5 | 3,583 | py | Python | gpjax/utils.py | thomaspinder/GPJax | 929fcb88d13d15bb10e1175491dbc3e79622325a | [
"Apache-2.0"
] | 44 | 2020-12-03T14:07:39.000Z | 2022-03-14T17:45:34.000Z | gpjax/utils.py | thomaspinder/GPJax | 929fcb88d13d15bb10e1175491dbc3e79622325a | [
"Apache-2.0"
] | 28 | 2020-12-05T08:54:45.000Z | 2022-03-01T09:56:50.000Z | gpjax/utils.py | thomaspinder/GPJax | 929fcb88d13d15bb10e1175491dbc3e79622325a | [
"Apache-2.0"
] | 7 | 2021-02-05T12:37:57.000Z | 2022-03-13T13:00:20.000Z | from copy import deepcopy
from typing import Tuple
import jax.numpy as jnp
from jax.scipy.linalg import cho_factor, cho_solve
from multipledispatch import dispatch
from .types import Array
def I(n: int) -> Array:
"""
Compute an n x n identity matrix.
:param n: The size of of the matrix.
:return: An n x n identity matrix.
"""
return jnp.eye(n)
def concat_dictionaries(a: dict, b: dict) -> dict:
"""
Append one dictionary below another. If duplicate keys exist, then the key-value pair of the second supplied
dictionary will be used.
"""
return {**a, **b}
def merge_dictionaries(base_dict: dict, in_dict: dict) -> dict:
"""
This will return a complete dictionary based on the keys of the first matrix. If the same key should exist in the
second matrix, then the key-value pair from the first dictionary will be overwritten. The purpose of this is that
the base_dict will be a complete dictionary of values such that an incomplete second dictionary can be used to
update specific key-value pairs.
:param base_dict: Complete dictionary of key-value pairs.
:param in_dict: Subset of key-values pairs such that values from this dictionary will take precedent.
:return: A merged single dictionary.
"""
for k, v in base_dict.items():
if k in in_dict.keys():
base_dict[k] = in_dict[k]
return base_dict
def sort_dictionary(base_dict: dict) -> dict:
"""
Sort a dictionary based on the dictionary's key values.
:param base_dict: The unsorted dictionary.
:return: A dictionary sorted alphabetically on the dictionary's keys.
"""
return dict(sorted(base_dict.items()))
def unstandardise(
x: jnp.DeviceArray, xmean: jnp.DeviceArray, xstd: jnp.DeviceArray
) -> jnp.DeviceArray:
"""
Unstandardise a given matrix with respect to a previously computed mean and standard deviation. This is designed
for remapping a matrix back onto its original scale.
:param x: A standardised matrix.
:param xmean: A mean vector.
:param xstd: A standard deviation vector.
:return: A matrix of unstandardised values.
"""
return (x * xstd) + xmean
def as_constant(parameter_set: dict, params: list) -> Tuple[dict, dict]:
base_params = deepcopy(parameter_set)
sparams = {}
for param in params:
sparams[param] = base_params[param]
del base_params[param]
return base_params, sparams
| 33.485981 | 117 | 0.706391 |
c02f4130e68bf1161bbd1638142bdd926a75ebe7 | 999 | py | Python | tools/replace_version.py | jasmcaus/image-deep-learning-keras | 11ff37867c2cb86a92aceb0ac24accb3607e3635 | [
"MIT"
] | 681 | 2020-08-13T09:34:41.000Z | 2022-03-19T15:38:04.000Z | tools/replace_version.py | jasmcaus/image-deep-learning-keras | 11ff37867c2cb86a92aceb0ac24accb3607e3635 | [
"MIT"
] | 30 | 2020-11-03T19:23:14.000Z | 2021-10-13T17:19:34.000Z | tools/replace_version.py | jasmcaus/image-deep-learning-keras | 11ff37867c2cb86a92aceb0ac24accb3607e3635 | [
"MIT"
] | 127 | 2020-11-03T19:14:30.000Z | 2022-03-17T12:01:32.000Z | import os
replace_version((1,8,0), (3,9,1))
| 39.96 | 99 | 0.585586 |
c03051f9b68bca498dcb3a06e10906ed145e2649 | 10,625 | py | Python | DashExperiments/make_plot.py | magruener/reconstructing-proprietary-video-streaming-algorithms | 29917b64e25a81561db7629fbd97e4a935146825 | [
"MIT"
] | 9 | 2020-09-07T17:24:13.000Z | 2022-03-12T23:41:47.000Z | DashExperiments/make_plot.py | magruener/reconstructing-proprietary-video-streaming-algorithms | 29917b64e25a81561db7629fbd97e4a935146825 | [
"MIT"
] | null | null | null | DashExperiments/make_plot.py | magruener/reconstructing-proprietary-video-streaming-algorithms | 29917b64e25a81561db7629fbd97e4a935146825 | [
"MIT"
] | 2 | 2020-11-13T06:49:54.000Z | 2021-01-28T12:24:20.000Z | import argparse
import math
import matplotlib.pyplot as plt
import os
import numpy as np
import shutil
import pandas as pd
import seaborn as sns
sns.set()
sns.set_context("talk")
NUM_BINS = 100
path = '../Data/Video_Info/Pensieve_Info/PenieveVideo_video_info'
video_mappings = {}
video_mappings['300'] = '320x180x30_vmaf_score'
video_mappings['750'] = '640x360x30_vmaf_score'
video_mappings['1200'] = '768x432x30_vmaf_score'
video_mappings['1850'] = '1024x576x30_vmaf_score'
video_mappings['2850'] = '1280x720x30_vmaf_score'
video_mappings['4300'] = '1280x720x60_vmaf_score'
metric_list = ["reward_vmaf", "reward_br", "rebuf", "br_avg", "vmaf_avg", "switching_vmaf", "switching_br"]
#MINERVA
rebuf_penalty = 25
switching_penalty = 2.5
segment_lenght = 4.0
pensieve_video_csv = load_csv()
#
#def get_qoe(abr, trace):
# logdir = os.path.join(args.result_dir, abr + "-" + trace, "result")
# logfile = os.path.join(logdir, abr + "_rewards_0.log")
#
# reward = 0
#
#
# with open(logfile, "r") as fin:
# reward_lines = fin.readlines()
#
# if (len(reward_lines) != args.video_chunks):
# if len(reward_lines) < args.video_chunks:
# to_clean.append(logfile)
# print("{} has {} chunks instead of {}".format(logfile, len(reward_lines), args.video_chunks))
# print("Skip, please")
# return None
#
# for i, r_line in enumerate(reward_lines):
# if i > 0: # skip first
# reward += float(r_line.split()[-1])
#
# return reward
if __name__ == "__main__":
main()
| 34.836066 | 174 | 0.609318 |
c0319cff4f20f991c3fca5acb4a16d11e559fce4 | 936 | py | Python | aiopoke/objects/utility/common_models/encounter.py | beastmatser/aiopokeapi | 6ffe10bf8db0b6349cabf5b5b01b738214f805d0 | [
"MIT"
] | 3 | 2021-10-03T13:49:47.000Z | 2022-03-31T03:12:30.000Z | aiopoke/objects/utility/common_models/encounter.py | beastmatser/aiopokeapi | 6ffe10bf8db0b6349cabf5b5b01b738214f805d0 | [
"MIT"
] | 3 | 2022-01-18T07:31:08.000Z | 2022-01-18T07:32:09.000Z | aiopoke/objects/utility/common_models/encounter.py | beastmatser/aiopokeapi | 6ffe10bf8db0b6349cabf5b5b01b738214f805d0 | [
"MIT"
] | 1 | 2022-01-19T12:35:09.000Z | 2022-01-19T12:35:09.000Z | from typing import TYPE_CHECKING, Any, Dict, List
from aiopoke.utils.minimal_resources import MinimalResource
from aiopoke.utils.resource import Resource
if TYPE_CHECKING:
from aiopoke.objects.resources import EncounterConditionValue, EncounterMethod
| 28.363636 | 86 | 0.681624 |