hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4d72a1c0cc4948586f7b3056d01b428148d2c15b | 942 | py | Python | src/ipyradiant/tests/test_servicepatch_rdflib.py | sanbales/ipyradiant | e798f3575cb08b539c04129b0c7ee74771246101 | [
"BSD-3-Clause"
] | null | null | null | src/ipyradiant/tests/test_servicepatch_rdflib.py | sanbales/ipyradiant | e798f3575cb08b539c04129b0c7ee74771246101 | [
"BSD-3-Clause"
] | null | null | null | src/ipyradiant/tests/test_servicepatch_rdflib.py | sanbales/ipyradiant | e798f3575cb08b539c04129b0c7ee74771246101 | [
"BSD-3-Clause"
] | null | null | null | """ Unit Tests for Service Patch
"""
# Copyright (c) 2021 ipyradiant contributors.
# Distributed under the terms of the Modified BSD License.
import ipyradiant
import rdflib
LINKEDDATA_QUERY = """
SELECT DISTINCT ?s ?p ?o
WHERE {
SERVICE <http://linkeddata.uriburner.com/sparql>
{
SELECT ?s ?p ?o
WHERE {?s ?p ?o}
}
}
"""
PATCHED_LINKEDDATA_QUERY = """
SELECT DISTINCT ?s ?p ?o
WHERE {
service <http://linkeddata.uriburner.com/sparql>
{
SELECT ?s ?p ?o
WHERE {?s ?p ?o}
}
}
"""
| 22.97561 | 65 | 0.569002 |
4d72f8b8e24cd048f0649233ff63ab5878c4cdbc | 6,552 | py | Python | tests/plugins/project/test_git.py | cloudblue/connect-cli | 4b0b460782a27e5d96a106579f082cbccfe10c67 | [
"Apache-2.0"
] | 12 | 2020-10-10T10:53:16.000Z | 2022-02-16T10:15:56.000Z | tests/plugins/project/test_git.py | cloudblue/connect-cli | 4b0b460782a27e5d96a106579f082cbccfe10c67 | [
"Apache-2.0"
] | 37 | 2020-09-28T12:00:52.000Z | 2021-12-20T12:38:25.000Z | tests/plugins/project/test_git.py | cloudblue/connect-cli | 4b0b460782a27e5d96a106579f082cbccfe10c67 | [
"Apache-2.0"
] | 11 | 2020-11-04T18:17:01.000Z | 2022-02-23T08:18:07.000Z | import subprocess
from collections import OrderedDict
import pytest
from connect.cli.plugins.project import git
def test_connect_version_tag_invalid_comparison():
assert not git.ConnectVersionTag(str) == 10
def test_list_tags(mocker):
mock_subprocess_run = mocker.patch('connect.cli.plugins.project.git.subprocess.run')
mock_subprocess_called_process_error = mocker.patch(
'connect.cli.plugins.project.git.subprocess.CompletedProcess',
)
mock_subprocess_called_process_error.stdout = b"""commit1 refs/tags/21.1
commit2 refs/tags/21.10
commit3 refs/tags/21.11
commit4 refs/tags/21.9"""
mock_subprocess_run.return_value = mock_subprocess_called_process_error
tags = git._list_tags('dummy.repo')
assert tags == {'21.1': 'commit1', '21.10': 'commit2', '21.11': 'commit3', '21.9': 'commit4'}
def test_list_tags_error(mocker):
mock_subprocess_run = mocker.patch('connect.cli.plugins.project.git.subprocess.run')
mock_subprocess_called_process = mocker.patch(
'connect.cli.plugins.project.git.subprocess.CompletedProcess',
)
mock_subprocess_called_process.check_returncode.side_effect = subprocess.CalledProcessError(1, [])
mock_subprocess_run.return_value = mock_subprocess_called_process
with pytest.raises(git.GitException):
git._list_tags('dummy.repo')
| 32.117647 | 108 | 0.498626 |
4d7317541546d8bbcf5ef1e2b7e13eb1d36a67cc | 737 | py | Python | src/dao/user.py | lokaimoma/Flask-QR-Code-Web-APP | 5789753757aa1939119a799cbc6bda023ea75bbc | [
"MIT"
] | 2 | 2022-03-05T18:54:15.000Z | 2022-03-24T12:19:22.000Z | src/dao/user.py | lokaimoma/Flask-QR-Code-Web-APP | 5789753757aa1939119a799cbc6bda023ea75bbc | [
"MIT"
] | null | null | null | src/dao/user.py | lokaimoma/Flask-QR-Code-Web-APP | 5789753757aa1939119a799cbc6bda023ea75bbc | [
"MIT"
] | null | null | null | # Created by Kelvin_Clark on 3/5/2022, 6:37 PM
from typing import Optional
from src.models.entities.user import User
from src import database as db
| 24.566667 | 56 | 0.640434 |
4d735b8b416b377c0ccb95d07dcbca87d8a4365f | 839 | py | Python | dmoj/dmpg18g1.py | pi-guy-in-the-sky/competitive-programming | e079f6caf07b5de061ea4f56218f9b577e49a965 | [
"MIT"
] | null | null | null | dmoj/dmpg18g1.py | pi-guy-in-the-sky/competitive-programming | e079f6caf07b5de061ea4f56218f9b577e49a965 | [
"MIT"
] | null | null | null | dmoj/dmpg18g1.py | pi-guy-in-the-sky/competitive-programming | e079f6caf07b5de061ea4f56218f9b577e49a965 | [
"MIT"
] | 1 | 2020-10-25T05:46:57.000Z | 2020-10-25T05:46:57.000Z | # Problem ID: dmpg18g1
# By Alexander Cai 2019-12-09
# Solved
import sys
FILLED = -1
EMPTY = 0
data = sys.stdin.read().split('\n')
n, k = map(int, data[0].split())
chairs = [FILLED for _ in range(n)]
for j in map(int, data[1].split()):
chairs[j-1] = EMPTY
for i, j in enumerate(map(int, data[2].split())):
if chairs[j-1] == EMPTY:
chairs[j-1] = FILLED
else:
chairs[j-1] = i+1 # index of student
s = []
nremaining = chairs.count(EMPTY)
i = 0
while nremaining > 0:
if chairs[i] == FILLED:
pass
elif chairs[i] == EMPTY:
if len(s) > 0:
s.pop()
chairs[i] = FILLED
nremaining -= 1
else: # student at this index -- underneath them is filled
s.append(chairs[i]) # add them to stack
chairs[i] = FILLED
i = (i+1) % n
print(s[0])
| 20.975 | 62 | 0.555423 |
4d735bc291be770b9f45354d1d88f6bbaa19cdc6 | 25,884 | py | Python | tests/test_base.py | tailhook/trafaret | 1968a0561e42d98094a5b90adbd6b0de49e7a4d9 | [
"BSD-2-Clause"
] | null | null | null | tests/test_base.py | tailhook/trafaret | 1968a0561e42d98094a5b90adbd6b0de49e7a4d9 | [
"BSD-2-Clause"
] | null | null | null | tests/test_base.py | tailhook/trafaret | 1968a0561e42d98094a5b90adbd6b0de49e7a4d9 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import unittest
import trafaret as t
from collections import Mapping as AbcMapping
from trafaret import extract_error, ignore, DataError
from trafaret.extras import KeysSubset
# res = @guard(a=String, b=Int, c=String)
# def fn(a, b, c="default"):
# '''docstring'''
# return (a, b, c)
# res = fn.__module__ = None
# res = help(fn)
# self.assertEqual(res, Help on function fn:
# <BLANKLINE>
# fn(*args, **kwargs)
# guarded with <Dict(a=<String>, b=<Int>, c=<String>)>
# <BLANKLINE>
# docstring
# <BLANKLINE>
# **********************************************************************
# File "/Users/mkrivushin/w/trafaret/trafaret/__init__.py", line 1260, in trafaret.guard
# Failed example:
# help(fn)
# Expected:
# Help on function fn:
# <BLANKLINE>
# fn(*args, **kwargs)
# guarded with <Dict(a=<String>, b=<Int>, c=<String>)>
# <BLANKLINE>
# docstring
# <BLANKLINE>
# Got:
# Help on function fn:
# <BLANKLINE>
# fn(a, b, c='default')
# guarded with <Dict(a=<String>, b=<Int>, c=<String>)>
# <BLANKLINE>
# docstring
# <BLANKLINE>
# res = fn("foo", 1)
# self.assertEqual(res, ('foo', 1, 'default')
# res = extract_error(fn, "foo", 1, 2)
# self.assertEqual(res, {'c': 'value is not a string'}
# res = extract_error(fn, "foo")
# self.assertEqual(res, {'b': 'is required'}
# res = g = guard(Dict())
# res = c = Forward()
# res = c << Dict(name=str, children=List[c])
# res = g = guard(c)
# res = g = guard(Int())
# self.assertEqual(res, Traceback (most recent call last):
# ...
# RuntimeError: trafaret should be instance of Dict or Forward
# res = a = Int >> ignore
# res = a.check(7)
# ***Test Failed*** 2 failures.
# res = _dd(fold({'a__a': 4}))
# self.assertEqual(res, "{'a': {'a': 4}}"
# res = _dd(fold({'a__a': 4, 'a__b': 5}))
# self.assertEqual(res, "{'a': {'a': 4, 'b': 5}}"
# res = _dd(fold({'a__1': 2, 'a__0': 1, 'a__2': 3}))
# self.assertEqual(res, "{'a': [1, 2, 3]}"
# res = _dd(fold({'form__a__b': 5, 'form__a__a': 4}, 'form'))
# self.assertEqual(res, "{'a': {'a': 4, 'b': 5}}"
# res = _dd(fold({'form__a__b': 5, 'form__a__a__0': 4, 'form__a__a__1': 7}, 'form'))
# self.assertEqual(res, "{'a': {'a': [4, 7], 'b': 5}}"
# res = repr(fold({'form__1__b': 5, 'form__0__a__0': 4, 'form__0__a__1': 7}, 'form'))
# self.assertEqual(res, "[{'a': [4, 7]}, {'b': 5}]"
# res = _dd(unfold({'a': 4, 'b': 5}))
# self.assertEqual(res, "{'a': 4, 'b': 5}"
# res = _dd(unfold({'a': [1, 2, 3]}))
# self.assertEqual(res, "{'a__0': 1, 'a__1': 2, 'a__2': 3}"
# res = _dd(unfold({'a': {'a': 4, 'b': 5}}))
# self.assertEqual(res, "{'a__a': 4, 'a__b': 5}"
# res = _dd(unfold({'a': {'a': 4, 'b': 5}}, 'form'))
# self.assertEqual(res, "{'form__a__a': 4, 'form__a__b': 5}"
# res = from trafaret import Int
# res = class A(object):
# class B(object):
# d = {'a': 'word'}
# res = dict((DeepKey('B.d.a') >> 'B_a').pop(A))
# self.assertEqual(res, {'B_a': 'word'}
# res = dict((DeepKey('c.B.d.a') >> 'B_a').pop({'c': A}))
# self.assertEqual(res, {'B_a': 'word'}
# res = dict((DeepKey('B.a') >> 'B_a').pop(A))
# self.assertEqual(res, {'B.a': DataError(Unexistent key)}
# res = dict(DeepKey('c.B.d.a', to_name='B_a', trafaret=Int()).pop({'c': A}))
# self.assertEqual(res, {'B_a': DataError(value can't be converted to int)}
| 37.731778 | 112 | 0.564789 |
4d73ae8092a1ed93a90f98b70314fba1d7c65aff | 6,107 | py | Python | libdesktop/desktopfile.py | bharadwaj-raju/libdesktop | 4d6b815755c76660b6ef4d2db6f54beff38c0db7 | [
"MIT"
] | 12 | 2016-07-08T12:59:24.000Z | 2021-03-29T01:01:56.000Z | libdesktop/desktopfile.py | bharadwaj-raju/libdesktop | 4d6b815755c76660b6ef4d2db6f54beff38c0db7 | [
"MIT"
] | 4 | 2016-10-12T10:28:19.000Z | 2016-11-08T10:04:30.000Z | libdesktop/desktopfile.py | bharadwaj-raju/libdesktop | 4d6b815755c76660b6ef4d2db6f54beff38c0db7 | [
"MIT"
] | 1 | 2016-10-02T00:15:21.000Z | 2016-10-02T00:15:21.000Z | # coding: utf-8
# This file is part of libdesktop
# The MIT License (MIT)
#
# Copyright (c) 2016 Bharadwaj Raju
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import subprocess as sp
from libdesktop import system
import sys
def construct(name, exec_, terminal=False, additional_opts={}):
'''Construct a .desktop file and return it as a string.
Create a standards-compliant .desktop file, returning it as a string.
Args:
name (str) : The program's name.
exec\_ (str) : The command.
terminal (bool): Determine if program should be run in a terminal emulator or not. Defaults to ``False``.
additional_opts (dict): Any additional fields.
Returns:
str: The constructed .desktop file.
'''
desktop_file = '[Desktop Entry]\n'
desktop_file_dict = {
'Name': name,
'Exec': exec_,
'Terminal': 'true' if terminal else 'false',
'Comment': additional_opts.get('Comment', name)
}
desktop_file = ('[Desktop Entry]\nName={name}\nExec={exec_}\n'
'Terminal={terminal}\nComment={comment}\n')
desktop_file = desktop_file.format(name=desktop_file_dict['Name'],
exec_=desktop_file_dict['Exec'],
terminal=desktop_file_dict['Terminal'],
comment=desktop_file_dict['Comment'])
if additional_opts is None:
additional_opts = {}
for option in additional_opts:
if not option in desktop_file_dict:
desktop_file += '%s=%s\n' % (option, additional_opts[option])
return desktop_file
def execute(desktop_file, files=None, return_cmd=False, background=False):
'''Execute a .desktop file.
Executes a given .desktop file path properly.
Args:
desktop_file (str) : The path to the .desktop file.
files (list): Any files to be launched by the .desktop. Defaults to empty list.
return_cmd (bool): Return the command (as ``str``) instead of executing. Defaults to ``False``.
background (bool): Run command in background. Defaults to ``False``.
Returns:
str: Only if ``return_cmd``. Returns command instead of running it. Else returns nothing.
'''
# Attempt to manually parse and execute
desktop_file_exec = parse(desktop_file)['Exec']
for i in desktop_file_exec.split():
if i.startswith('%'):
desktop_file_exec = desktop_file_exec.replace(i, '')
desktop_file_exec = desktop_file_exec.replace(r'%F', '')
desktop_file_exec = desktop_file_exec.replace(r'%f', '')
if files:
for i in files:
desktop_file_exec += ' ' + i
if parse(desktop_file)['Terminal']:
# Use eval and __import__ to bypass a circular dependency
desktop_file_exec = eval(
('__import__("libdesktop").applications.terminal(exec_="%s",'
' keep_open_after_cmd_exec=True, return_cmd=True)') %
desktop_file_exec)
if return_cmd:
return desktop_file_exec
desktop_file_proc = sp.Popen([desktop_file_exec], shell=True)
if not background:
desktop_file_proc.wait()
def locate(desktop_filename_or_name):
'''Locate a .desktop from the standard locations.
Find the path to the .desktop file of a given .desktop filename or application name.
Standard locations:
- ``~/.local/share/applications/``
- ``/usr/share/applications``
Args:
desktop_filename_or_name (str): Either the filename of a .desktop file or the name of an application.
Returns:
list: A list of all matching .desktop files found.
'''
paths = [
os.path.expanduser('~/.local/share/applications'),
'/usr/share/applications']
result = []
for path in paths:
for file in os.listdir(path):
if desktop_filename_or_name in file.split(
'.') or desktop_filename_or_name == file:
# Example: org.gnome.gedit
result.append(os.path.join(path, file))
else:
file_parsed = parse(os.path.join(path, file))
try:
if desktop_filename_or_name.lower() == file_parsed[
'Name'].lower():
result.append(file)
elif desktop_filename_or_name.lower() == file_parsed[
'Exec'].split(' ')[0]:
result.append(file)
except KeyError:
pass
for res in result:
if not res.endswith('.desktop'):
result.remove(res)
if not result and not result.endswith('.desktop'):
result.extend(locate(desktop_filename_or_name + '.desktop'))
return result
def parse(desktop_file_or_string):
'''Parse a .desktop file.
Parse a .desktop file or a string with its contents into an easy-to-use dict, with standard values present even if not defined in file.
Args:
desktop_file_or_string (str): Either the path to a .desktop file or a string with a .desktop file as its contents.
Returns:
dict: A dictionary of the parsed file.'''
if os.path.isfile(desktop_file_or_string):
with open(desktop_file_or_string) as f:
desktop_file = f.read()
else:
desktop_file = desktop_file_or_string
result = {}
for line in desktop_file.split('\n'):
if '=' in line:
result[line.split('=')[0]] = line.split('=')[1]
for key, value in result.items():
if value == 'false':
result[key] = False
elif value == 'true':
result[key] = True
if not 'Terminal' in result:
result['Terminal'] = False
if not 'Hidden' in result:
result['Hidden'] = False
return result
| 31 | 136 | 0.713935 |
4d74aef73934a96f618c4d0192a7d28c451a6713 | 2,887 | py | Python | losses.py | abhinav-2912/Polyp-Segmentation | 82f2a309bafb073dc7c12ec85d196c9367b4c702 | [
"MIT"
] | null | null | null | losses.py | abhinav-2912/Polyp-Segmentation | 82f2a309bafb073dc7c12ec85d196c9367b4c702 | [
"MIT"
] | null | null | null | losses.py | abhinav-2912/Polyp-Segmentation | 82f2a309bafb073dc7c12ec85d196c9367b4c702 | [
"MIT"
] | null | null | null | import os
import cv2
import keras
import numpy as np
import albumentations as A
import tensorflow as tf
from keras import backend as K
def iou(y_true, y_pred, label: int):
"""
Return the Intersection over Union (IoU) for a given label.
Args:
y_true: the expected y values as a one-hot
y_pred: the predicted y values as a one-hot or softmax output
label: the label to return the IoU for
Returns:
the IoU for the given label
"""
# extract the label values using the argmax operator then
# calculate equality of the predictions and truths to the label
y_true = K.cast(K.equal(K.argmax(y_true), label), K.floatx())
y_pred = K.cast(K.equal(K.argmax(y_pred), label), K.floatx())
# calculate the |intersection| (AND) of the labels
intersection = K.sum(y_true * y_pred)
# calculate the |union| (OR) of the labels
union = K.sum(y_true) + K.sum(y_pred) - intersection
# avoid divide by zero - if the union is zero, return 1
# otherwise, return the intersection over union
return K.switch(K.equal(union, 0), 1.0, intersection / union)
def mean_iou(y_true, y_pred):
"""
Return the Intersection over Union (IoU) score.
Args:
y_true: the expected y values as a one-hot
y_pred: the predicted y values as a one-hot or softmax output
Returns:
the scalar IoU value (mean over all labels)
"""
# get number of labels to calculate IoU for
num_labels = K.int_shape(y_pred)[-1]
# initialize a variable to store total IoU in
total_iou = K.variable(0)
# iterate over labels to calculate IoU for
for label in range(num_labels):
total_iou = total_iou + iou(y_true, y_pred, label)
# divide total IoU by number of labels to get mean IoU
return total_iou / num_labels
# def iou_metric(y_true, y_pred):
SMOOTH = 1e-01
def iou_coef(y_true, y_pred, smooth=SMOOTH):
"""
IoU = (|X & Y|)/ (|X or Y|)
"""
intersection = K.sum(K.abs(y_true * y_pred), axis=-1)
union = K.sum((y_true,-1) + K.sum(y_pred,-1)) - intersection
return (intersection + smooth) / (union + smooth)
# return iou_coef | 37.012821 | 108 | 0.66124 |
4d74b34c2ad769580f0628389b7964d48ab6bbb5 | 1,119 | py | Python | opsramp/monitoring.py | gauravphagrehpe/python-opsramp | 9ed8397cff70f866b9ed9bd1d7010a189481c62c | [
"Apache-2.0"
] | 6 | 2020-03-30T12:53:44.000Z | 2022-03-05T11:37:12.000Z | opsramp/monitoring.py | gauravphagrehpe/python-opsramp | 9ed8397cff70f866b9ed9bd1d7010a189481c62c | [
"Apache-2.0"
] | 20 | 2019-07-10T13:24:00.000Z | 2021-01-25T14:26:23.000Z | opsramp/monitoring.py | gauravphagrehpe/python-opsramp | 9ed8397cff70f866b9ed9bd1d7010a189481c62c | [
"Apache-2.0"
] | 21 | 2019-06-26T12:05:33.000Z | 2022-03-28T11:44:39.000Z | #!/usr/bin/env python
#
# A minimal Python language binding for the OpsRamp REST API.
#
# monitoring.py
# Classes related to monitoring templates and similar things.
#
# (c) Copyright 2019-2021 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from opsramp.api import ORapi
| 31.083333 | 74 | 0.74084 |
4d756cbf6b405451463d1b3c51d20e4b11805c50 | 5,045 | py | Python | tests/sources/doctest/load_test.py | akshitdewan/ok-client | 3c5eca17100eed808023a815654cfe1c95179080 | [
"Apache-2.0"
] | 30 | 2018-07-10T17:32:49.000Z | 2022-01-03T16:50:56.000Z | tests/sources/doctest/load_test.py | akshitdewan/ok-client | 3c5eca17100eed808023a815654cfe1c95179080 | [
"Apache-2.0"
] | 62 | 2018-08-07T18:43:33.000Z | 2022-02-17T20:53:03.000Z | tests/sources/doctest/load_test.py | akshitdewan/ok-client | 3c5eca17100eed808023a815654cfe1c95179080 | [
"Apache-2.0"
] | 26 | 2018-11-13T22:12:47.000Z | 2022-03-20T00:42:26.000Z | from client import exceptions as ex
from client.sources import doctest
from client.sources.doctest import models
import mock
import unittest
import os.path
| 34.087838 | 90 | 0.643409 |
4d77116fa77002bdedc5a81cb80ae1a9e3ac2069 | 19,585 | py | Python | lib/python3.8/site-packages/ansible_collections/community/azure/plugins/modules/azure_rm_cosmosdbaccount_info.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | null | null | null | lib/python3.8/site-packages/ansible_collections/community/azure/plugins/modules/azure_rm_cosmosdbaccount_info.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | null | null | null | lib/python3.8/site-packages/ansible_collections/community/azure/plugins/modules/azure_rm_cosmosdbaccount_info.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | null | null | null | #!/usr/bin/python
#
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_cosmosdbaccount_info
short_description: Get Azure Cosmos DB Account facts
description:
- Get facts of Azure Cosmos DB Account.
options:
resource_group:
description:
- Name of an Azure resource group.
name:
description:
- Cosmos DB database account name.
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
retrieve_keys:
description:
- Retrieve keys and connection strings.
type: str
choices:
- all
- readonly
retrieve_connection_strings:
description:
- Retrieve connection strings.
type: bool
extends_documentation_fragment:
- azure.azcollection.azure
author:
- Zim Kalinowski (@zikalino)
'''
EXAMPLES = '''
- name: Get instance of Database Account
community.azure.azure_rm_cosmosdbaccount_info:
resource_group: myResourceGroup
name: testaccount
- name: List instances of Database Account
azure_rm_cosmosdbaccousnt_info:
resource_group: myResourceGroup
'''
RETURN = '''
accounts:
description: A list of dictionaries containing facts for Database Account.
returned: always
type: complex
contains:
id:
description:
- The unique resource identifier of the database account.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DocumentDB/databaseAccount
s/testaccount"
resource_group:
description:
- Name of an Azure resource group.
returned: always
type: str
sample: myResourceGroup
name:
description:
- The name of the database account.
returned: always
type: str
sample: testaccount
location:
description:
- The location of the resource group to which the resource belongs.
returned: always
type: str
sample: westus
kind:
description:
- Indicates the type of database account.
returned: always
type: str
sample: global_document_db
consistency_policy:
description:
- Consistency policy.
returned: always
type: complex
contains:
default_consistency_level:
description:
- Default consistency level.
returned: always
type: str
sample: session
max_interval_in_seconds:
description:
- Maximum interval in seconds.
returned: always
type: int
sample: 5
max_staleness_prefix:
description:
- Maximum staleness prefix.
returned: always
type: int
sample: 100
failover_policies:
description:
- The list of new failover policies for the failover priority change.
returned: always
type: complex
contains:
name:
description:
- Location name.
returned: always
type: str
sample: eastus
failover_priority:
description:
- Failover priority.
returned: always
type: int
sample: 0
id:
description:
- Read location ID.
returned: always
type: str
sample: testaccount-eastus
read_locations:
description:
- Read locations.
returned: always
type: complex
contains:
name:
description:
- Location name.
returned: always
type: str
sample: eastus
failover_priority:
description:
- Failover priority.
returned: always
type: int
sample: 0
id:
description:
- Read location ID.
returned: always
type: str
sample: testaccount-eastus
document_endpoint:
description:
- Document endpoint.
returned: always
type: str
sample: https://testaccount-eastus.documents.azure.com:443/
provisioning_state:
description:
- Provisioning state.
returned: always
type: str
sample: Succeeded
write_locations:
description:
- Write locations.
returned: always
type: complex
contains:
name:
description:
- Location name.
returned: always
type: str
sample: eastus
failover_priority:
description:
- Failover priority.
returned: always
type: int
sample: 0
id:
description:
- Read location ID.
returned: always
type: str
sample: testaccount-eastus
document_endpoint:
description:
- Document endpoint.
returned: always
type: str
sample: https://testaccount-eastus.documents.azure.com:443/
provisioning_state:
description:
- Provisioning state.
returned: always
type: str
sample: Succeeded
database_account_offer_type:
description:
- Offer type.
returned: always
type: str
sample: Standard
ip_range_filter:
description:
- Enable IP range filter.
returned: always
type: str
sample: 10.10.10.10
is_virtual_network_filter_enabled:
description:
- Enable virtual network filter.
returned: always
type: bool
sample: true
enable_automatic_failover:
description:
- Enable automatic failover.
returned: always
type: bool
sample: true
enable_cassandra:
description:
- Enable Cassandra.
returned: always
type: bool
sample: true
enable_table:
description:
- Enable Table.
returned: always
type: bool
sample: true
enable_gremlin:
description:
- Enable Gremlin.
returned: always
type: bool
sample: true
virtual_network_rules:
description:
- List of Virtual Network ACL rules configured for the Cosmos DB account.
type: list
contains:
subnet:
description:
- Resource id of a subnet.
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNet
works/testvnet/subnets/testsubnet1"
ignore_missing_vnet_service_endpoint:
description:
- Create Cosmos DB account without existing virtual network service endpoint.
type: bool
enable_multiple_write_locations:
description:
- Enable multiple write locations.
returned: always
type: bool
sample: true
document_endpoint:
description:
- Document endpoint.
returned: always
type: str
sample: https://testaccount.documents.azure.com:443/
provisioning_state:
description:
- Provisioning state of Cosmos DB.
returned: always
type: str
sample: Succeeded
primary_master_key:
description:
- Primary master key.
returned: when requested
type: str
sample: UIWoYD4YaD4LxW6k3Jy69qcHDMLX4aSttECQkEcwWF1RflLd6crWSGJs0R9kJwujehtfLGeQx4ISVSJfTpJkYw==
secondary_master_key:
description:
- Primary master key.
returned: when requested
type: str
sample: UIWoYD4YaD4LxW6k3Jy69qcHDMLX4aSttECQkEcwWF1RflLd6crWSGJs0R9kJwujehtfLGeQx4ISVSJfTpJkYw==
primary_readonly_master_key:
description:
- Primary master key.
returned: when requested
type: str
sample: UIWoYD4YaD4LxW6k3Jy69qcHDMLX4aSttECQkEcwWF1RflLd6crWSGJs0R9kJwujehtfLGeQx4ISVSJfTpJkYw==
secondary_readonly_master_key:
description:
- Primary master key.
returned: when requested
type: str
sample: UIWoYD4YaD4LxW6k3Jy69qcHDMLX4aSttECQkEcwWF1RflLd6crWSGJs0R9kJwujehtfLGeQx4ISVSJfTpJkYw==
connection_strings:
description:
- List of connection strings.
type: list
returned: when requested
contains:
connection_string:
description:
- Description of connection string.
type: str
returned: always
sample: Primary SQL Connection String
description:
description:
- Connection string.
type: str
returned: always
sample: "AccountEndpoint=https://testaccount.documents.azure.com:443/;AccountKey=fSEjathnk6ZeBTrXkud9j5kfhtSEQ
q3dpJxJga76h9BZkK2BJJrDzSO6DDn6yKads017OZBZ1YZWyq1cW4iuvA=="
tags:
description:
- Tags assigned to the resource. Dictionary of "string":"string" pairs.
returned: always
type: dict
sample: { "tag1":"abc" }
'''
from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils.common.dict_transformations import _camel_to_snake
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.cosmosdb import CosmosDB
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
if __name__ == '__main__':
main()
| 37.591171 | 150 | 0.531223 |
4d781dcbcf008c3f1865cdc43e7018be27ebf9f4 | 382 | py | Python | process.py | seiferteric/readon | f4926b8c382bb3dfd6076c50ac0250aaabfab248 | [
"MIT"
] | null | null | null | process.py | seiferteric/readon | f4926b8c382bb3dfd6076c50ac0250aaabfab248 | [
"MIT"
] | null | null | null | process.py | seiferteric/readon | f4926b8c382bb3dfd6076c50ac0250aaabfab248 | [
"MIT"
] | null | null | null |
from rdflib import Graph
import json
import glob
books = {}
rdf_files = glob.glob("gutindex/cache/epub/*/*.rdf")
i = 1
for rdf_file in rdf_files:
g = Graph()
g.parse(rdf_file)
for s,p,o in g:
if 'title' in p:
books[str(o)] = str(s)
print(i, str(o))
i+=1
with open("gutindex_titles.json", "w") as f:
json.dump(books, f)
| 17.363636 | 52 | 0.565445 |
4d7873d4b9675dd4cab3d904b988b8e5a2734851 | 3,188 | py | Python | qrand/caches/cache.py | LaurentAjdnik/qrand | 69a4fb049e1d50af5aaa52a6b417d58490169769 | [
"Apache-2.0"
] | 18 | 2020-10-28T19:04:39.000Z | 2022-03-02T17:23:32.000Z | qrand/caches/cache.py | LaurentAjdnik/qrand | 69a4fb049e1d50af5aaa52a6b417d58490169769 | [
"Apache-2.0"
] | 25 | 2021-04-05T01:11:24.000Z | 2022-02-10T16:13:43.000Z | qrand/caches/cache.py | LaurentAjdnik/qrand | 69a4fb049e1d50af5aaa52a6b417d58490169769 | [
"Apache-2.0"
] | 16 | 2020-12-02T14:59:12.000Z | 2022-01-04T19:17:47.000Z | ## _____ _____
## | __ \| __ \ AUTHOR: Pedro Rivero
## | |__) | |__) | ---------------------------------
## | ___/| _ / DATE: May 18, 2021
## | | | | \ \ ---------------------------------
## |_| |_| \_\ https://github.com/pedrorrivero
##
## Copyright 2021 Pedro Rivero
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
from abc import ABC, abstractmethod
###############################################################################
## BIT CACHE INTERFACE
###############################################################################
| 26.131148 | 79 | 0.500627 |
4d78fa2ba57faef589529bf708bb570e9f71a9a8 | 203 | py | Python | logger/logger.py | BraunPhilipp/sentiment-analyzer | de1528c924b7015bafda56196b264523b64dc7c1 | [
"MIT"
] | 4 | 2016-09-24T22:09:49.000Z | 2017-05-17T12:51:48.000Z | logger/logger.py | BraunPhilipp/sentiment-analyzer | de1528c924b7015bafda56196b264523b64dc7c1 | [
"MIT"
] | 2 | 2016-11-29T05:50:01.000Z | 2021-02-13T18:07:20.000Z | logger/logger.py | BraunPhilipp/sentiment-analyzer | de1528c924b7015bafda56196b264523b64dc7c1 | [
"MIT"
] | null | null | null |
"""
Log multiple instances to same file.
"""
| 13.533333 | 36 | 0.507389 |
4d793706457e29ac7c89f01a4a25bd52a787ecca | 3,959 | py | Python | Stream/TransportStream.py | SeuSQ/SimpleShark | a7da2aa26b3e6f67f160008c0a19078ccfd3ab98 | [
"MIT"
] | 1 | 2019-10-18T15:44:58.000Z | 2019-10-18T15:44:58.000Z | Stream/TransportStream.py | SeuSQ/SimpleShark | a7da2aa26b3e6f67f160008c0a19078ccfd3ab98 | [
"MIT"
] | null | null | null | Stream/TransportStream.py | SeuSQ/SimpleShark | a7da2aa26b3e6f67f160008c0a19078ccfd3ab98 | [
"MIT"
] | null | null | null | from Stream.Stream import Stream
| 36.657407 | 96 | 0.417782 |
4d79c1863feabc09d18bc9b268afe13bc22098ed | 9,775 | py | Python | cea_calibration/validation.py | cooling-singapore/CalibCEA | 63013d46ec92faf8c28c3e937944ae803a3a9088 | [
"MIT"
] | 1 | 2020-10-08T08:24:47.000Z | 2020-10-08T08:24:47.000Z | cea_calibration/validation.py | cooling-singapore/CalibCEA | 63013d46ec92faf8c28c3e937944ae803a3a9088 | [
"MIT"
] | 2 | 2020-11-04T15:59:54.000Z | 2021-11-16T02:22:30.000Z | cea_calibration/validation.py | cooling-singapore/CalibCEA | 63013d46ec92faf8c28c3e937944ae803a3a9088 | [
"MIT"
] | 1 | 2020-11-04T15:48:13.000Z | 2020-11-04T15:48:13.000Z | """
This tool compares measured data (observed) with model outputs (predicted), used in procedures of calibration and validation
"""
from __future__ import division
from __future__ import print_function
import os
from math import sqrt
import pandas as pd
from sklearn.metrics import mean_squared_error as calc_mean_squared_error
import cea.config
import cea.inputlocator
from cea_calibration.global_variables import *
# from cea.constants import MONTHS_IN_YEAR_NAMES
# import cea.examples.global_variables as global_variables
# def outputdatafolder(self):
# return self._ensure_folder(self.scenario, 'outputs', 'data')
#
#
# def get_calibrationresults(self):
# """scenario/outputs/data/calibration_results/calibrationresults.csv"""
# return os.path.join(self.scenario, 'outputs', 'data', 'calibration_results', 'calibrationresults.csv')
#
#
# def get_project_calibrationresults(self):
# """project/outputs/calibration_results/calibrationresults.csv"""
# return os.path.join(self.project, 'outputs', 'calibration_results', 'calibrationresults.csv')
#
#
# def get_totaloccupancy(self):
# """scenario/outputs/data/totaloccupancy.csv"""
# return os.path.join(self.scenario, "outputs", "data", "totaloccupancy.csv")
#
#
# def get_measurements_folder(self):
# return self._ensure_folder(self.scenario, 'inputs', 'measurements')
#
#
# def get_annual_measurements(self):
# return os.path.join(self.get_measurements_folder(), 'annual_measurements.csv')
#
#
# def get_monthly_measurements(self):
# return os.path.join(self.get_measurements_folder(), 'monthly_measurements.csv')
#
#
# def get_global_monthly_measurements(self):
# return os.path.join(self.get_measurements_folder(), 'monthly_measurements.csv')
# global_validation_n_calibrated = []
# global_validation_percentage = []
MONTHS_IN_YEAR_NAMES = ['JANUARY', 'FEBRUARY', 'MARCH', 'APRIL',
'MAY', 'JUNE', 'JULY', 'AUGUST', 'SEPTEMBER',
'OCTOBER', 'NOVEMBER', 'DECEMBER']
__author__ = "Luis Santos"
__copyright__ = "Copyright 2020, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Luis Santos, Jimeno Fonseca, Daren Thomas"]
__license__ = "MIT"
__version__ = "1.0"
__maintainer__ = "Daren Thomas"
__email__ = "cea@arch.ethz.ch"
__status__ = "Production"
def validation(scenario_list,
locators_of_scenarios,
measured_building_names_of_scenarios,
monthly=True,
load='GRID',
):
"""
This tool compares observed (real life measured data) and predicted (output of the model data) values.
Monthly data is compared in terms of NMBE and CvRMSE (follwing ASHRAE Guideline 14-2014).
A new input folder with measurements has to be created, with a csv each for monthly data provided as input for this tool.
The input file contains: Name (CEA ID)| ZipCode (optional) | Monthly Data (JAN - DEC) | Type of equivalent variable in CEA (GRID_kWh is the default for total electricity consumption)
The script prints the NBME and CvRMSE for each building. It also outputs the number of calibrated buildings and a score metric (calibrated buildings weighted by their energy consumption)
"""
## monthly validation
if monthly:
number_of_buildings = 0
print("monthly validation")
validation_output = pd.DataFrame(columns=['scenario', 'calibrated_buildings', 'score'])
for scenario, locator, measured_building_names in zip(scenario_list, locators_of_scenarios,
measured_building_names_of_scenarios):
list_of_scores = []
number_of_calibrated = []
number_of_buildings = number_of_buildings + len(measured_building_names)
# get measured data for buildings in this scenario
monthly_measured_data = pd.read_csv(locator.get_monthly_measurements())
# loop in the measured buildings of this scenario
for building_name in measured_building_names: # number of buildings that have real data available
# extract measured data
print('For building', building_name, 'the errors are')
fields_to_extract = ['Name'] + MONTHS_IN_YEAR_NAMES
monthly_measured_demand = monthly_measured_data[fields_to_extract].set_index('Name')
monthly_measured_demand = monthly_measured_demand.loc[building_name]
monthly_measured_demand = pd.DataFrame({'Month': monthly_measured_demand.index.values,
'measurements': monthly_measured_demand.values})
# extract model output
hourly_modelled_data = pd.read_csv(locator.get_demand_results_file(building_name),
usecols=['DATE', load + '_kWh'])
hourly_modelled_data['DATE'] = pd.to_datetime(hourly_modelled_data['DATE'])
look_up = {1: 'JANUARY', 2: 'FEBRUARY', 3: 'MARCH', 4: 'APRIL', 5: 'MAY',
6: 'JUNE', 7: 'JULY', 8: 'AUGUST', 9: 'SEPTEMBER', 10: 'OCTOBER', 11: 'NOVEMBER',
12: 'DECEMBER'}
# this step is required to have allow the conversion from hourly to monthly data
monthly_modelled_data = hourly_modelled_data.resample('M', on='DATE').sum() # because data is in kWh
monthly_modelled_data['Month'] = monthly_modelled_data.index.month
monthly_modelled_data['Month'] = monthly_modelled_data.apply(lambda x: look_up[x['Month']], axis=1)
monthly_data = monthly_modelled_data.merge(monthly_measured_demand, on='Month')
# calculate errors
cv_root_mean_squared_error, normalized_mean_biased_error = calc_errors_per_building(load, monthly_data)
ind_calib_building, ind_score_building = calc_building_score(cv_root_mean_squared_error, monthly_data,
normalized_mean_biased_error)
# appending list of variables for later use
number_of_calibrated.append(ind_calib_building)
list_of_scores.append(ind_score_building)
n_scenario_calib = sum(number_of_calibrated)
scenario_score = sum(list_of_scores)
scenario_name = os.path.basename(scenario)
validation_output = validation_output.append(
{'scenario': scenario_name, 'calibrated_buildings': n_scenario_calib, 'score': scenario_score},
ignore_index=True)
n_calib = validation_output['calibrated_buildings'].sum()
score = validation_output['score'].sum()
global_validation_n_calibrated.append(n_calib)
global_validation_percentage.append((n_calib / number_of_buildings) * 100)
print('The number of calibrated buildings is', n_calib)
print('The final score is', score)
return score
def main(config):
"""
This is the main entry point to your script. Any parameters used by your script must be present in the ``config``
parameter. The CLI will call this ``main`` function passing in a ``config`` object after adjusting the configuration
to reflect parameters passed on the command line - this is how the ArcGIS interface interacts with the scripts
BTW.
:param config:
:type config: cea.config.Configuration
:return:
"""
assert os.path.exists(config.scenario), 'Scenario not found: %s' % config.scenario
locator = cea.inputlocator.InputLocator(config.scenario, config.plugins)
measured_building_names = get_measured_building_names(locator)
scenario_list = [config.scenario]
locators_of_scenarios = [locator]
measured_building_names_of_scenarios = [measured_building_names]
validation(scenario_list,
locators_of_scenarios,
measured_building_names_of_scenarios,
monthly=True,
load='GRID',
)
if __name__ == '__main__':
main(cea.config.Configuration())
| 47.916667 | 190 | 0.69289 |
4d7ab81b22aae5458f8deb86c17e6d6e000d4ad5 | 1,337 | py | Python | apps/jobs/settings/config.py | rainydaygit/testtcloudserver | 8037603efe4502726a4d794fb1fc0a3f3cc80137 | [
"MIT"
] | 349 | 2020-08-04T10:21:01.000Z | 2022-03-23T08:31:29.000Z | apps/jobs/settings/config.py | rainydaygit/testtcloudserver | 8037603efe4502726a4d794fb1fc0a3f3cc80137 | [
"MIT"
] | 2 | 2021-01-07T06:17:05.000Z | 2021-04-01T06:01:30.000Z | apps/jobs/settings/config.py | rainydaygit/testtcloudserver | 8037603efe4502726a4d794fb1fc0a3f3cc80137 | [
"MIT"
] | 70 | 2020-08-24T06:46:14.000Z | 2022-03-25T13:23:27.000Z | try:
from public_config import *
except ImportError:
pass
HOST = '0.0.0.0'
PORT = 9038
SERVICE_NAME = 'jobs'
SERVER_ENV = 'prod'
SQLALCHEMY_POOL_SIZE = 10
SQLALCHEMY_POOL_RECYCLE = 3600
JOBS = [
{ # 10:30
# 1
'id': 'credit-check-daily', # id,
'func': 'apps.jobs.business.jobs:JobsBusiness.credit_check_daily', #
'args': None, #
'trigger': 'cron', #
'day_of_week': 'mon-fri', # 1 - 5
'hour': 11, # 11
'minute': 30, #
# 'trigger': 'interval', #
# 'hours': 10
# 'seconds': 10
},
{
# cidata
'id': 'cijob_update', # id,
'func': 'apps.extention.business.cidata:CiJobBusiness.update_jenkins_data', #
'args': None, #
'trigger': 'interval', #
'hours': 10
# 'seconds': 10
},
{
# redis
'id': 'get_statistics_route_job', # id,
'func': 'apps.public.daos.public:get_statistics_route_job', #
'args': None, #
'trigger': 'interval', #
'day_of_week': 'mon-fri', # 1 - 5
'hour': 3, # 3
# 'minute': 5, #
}
]
| 27.285714 | 89 | 0.535527 |
4d7ad64476a0364d8dddccf7527a5fab1bf906d6 | 918 | py | Python | exercieses/openInventory/mouseTrial.py | SimSam115/orsr_bbb | 6b81f0637fedc00258114d4660ceadb9dd286909 | [
"MIT"
] | null | null | null | exercieses/openInventory/mouseTrial.py | SimSam115/orsr_bbb | 6b81f0637fedc00258114d4660ceadb9dd286909 | [
"MIT"
] | null | null | null | exercieses/openInventory/mouseTrial.py | SimSam115/orsr_bbb | 6b81f0637fedc00258114d4660ceadb9dd286909 | [
"MIT"
] | null | null | null | import pyautogui, pygetwindow, time
screen = pygetwindow.getWindowsWithTitle('Old School RuneScape')[0]
centerRatio = (2.8,2.8)
#
while True:
boxs = list(pyautogui.locateAllOnScreen("miniTree.png"))
box = boxs[len(boxs)//2]
if box:
pyautogui.click(box)
#tinder = pyautogui.locateCenterOnScreen("tinderbox.png")
#pyautogui.click(tinder)
time.sleep(7)
move("left",1)
time.sleep(3)
#print("found")
| 28.6875 | 100 | 0.603486 |
4d7eddf6f9e62ec0bab631e8436135068d6e2ad3 | 2,251 | py | Python | cmake/make_geant4_env.py | hschwane/offline_production | e14a6493782f613b8bbe64217559765d5213dc1e | [
"MIT"
] | 1 | 2020-12-24T22:00:01.000Z | 2020-12-24T22:00:01.000Z | cmake/make_geant4_env.py | hschwane/offline_production | e14a6493782f613b8bbe64217559765d5213dc1e | [
"MIT"
] | null | null | null | cmake/make_geant4_env.py | hschwane/offline_production | e14a6493782f613b8bbe64217559765d5213dc1e | [
"MIT"
] | 3 | 2020-07-17T09:20:29.000Z | 2021-03-30T16:44:18.000Z | #!/usr/bin/python
'''
Produces POSIX commands to setup the environment variables for Geant4.
Required command line arguments:
1: Location of geant4.sh script.
2: Version of Geant4.
'''
import os
import sys
import re
import subprocess as subp
from codecs import encode,decode
geant4_sh, geant4_version = sys.argv[1:]
# vars and standard directory names
geant4_vars = {
"G4ABLADATA" : "G4ABLA",
"G4LEDATA" : "G4EMLOW",
"G4LEVELGAMMADATA" : "PhotonEvaporation",
"G4NEUTRONHPDATA" : "G4NDL",
"G4NEUTRONXSDATA" : "G4NEUTRONXS",
"G4PIIDATA" : "G4PII",
"G4RADIOACTIVEDATA": "RadioactiveDecay",
"G4REALSURFACEDATA": "RealSurface",
"G4ENSDFSTATEDATA" : "G4ENSDFSTATE2.2",
"G4SAIDXSDATA" : "G4SAIDDATA1.1"
}
geant4_env = {}
# try to get vars from geant4.sh script
if os.path.isfile(geant4_sh):
p = subp.Popen("/bin/bash",
stdin=subp.PIPE,
stdout=subp.PIPE,
cwd=os.path.dirname(geant4_sh),
env={})
penv = decode(p.communicate(encode("source geant4.sh && env"))[0].strip())
for line in penv.split("\n"):
sep = line.index("=")
var = line[:sep]
value = line[sep+1:]
if var in geant4_vars:
geant4_env[var] = value
formatted_pairs = []
for var in geant4_vars:
value = None
if var in os.environ:
# warn user that existing environment variables override this script,
# but don't complain if we are just running inside an env-shell.sh
value = os.environ[var]
if not "I3_SHELL" in os.environ:
sys.stderr.write(("Warning: Geant4 environment variable already set {0}={1}, "
"this overrides automatic detection\n")
.format(var, value))
elif var in geant4_env:
value = geant4_env[var]
if value is None:
sys.stderr.write(("Warning: Geant4 environment variable {0} could not be set, "
"g4-based modules may crash\n").format(var))
else:
formatted_pairs.append("{0}={1}".format(var, value))
# extra formatting for env-shell.sh
sys.stdout.write(" \\\n\t".join(formatted_pairs))
| 31.263889 | 90 | 0.60462 |
4d8114110ca0855b70f6d4767fc2d9b841ade8dd | 454 | py | Python | bin/run_server.py | syedwaseemjan/EXIFExtractor | 97da85c0552bb0a616f04bab1bc0785ae8b35fb6 | [
"MIT"
] | null | null | null | bin/run_server.py | syedwaseemjan/EXIFExtractor | 97da85c0552bb0a616f04bab1bc0785ae8b35fb6 | [
"MIT"
] | null | null | null | bin/run_server.py | syedwaseemjan/EXIFExtractor | 97da85c0552bb0a616f04bab1bc0785ae8b35fb6 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
from __future__ import absolute_import
import os
import sys
PROJECT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
sys.path.append(PROJECT_DIR)
sys.path.append(os.path.abspath(os.path.join(PROJECT_DIR, "app")))
if __name__ == "__main__":
from app.main import Main
aws_bucket_name = None
if len(sys.argv) > 1:
aws_bucket_name = sys.argv[1]
Main().load_images(aws_bucket_name)
| 23.894737 | 81 | 0.722467 |
4d8164d0a3cb6cea0dddeadbc81ca630b7412f3a | 4,906 | py | Python | src/python/nimbusml/internal/core/feature_extraction/image/pixelextractor.py | justinormont/NimbusML-1 | 110b0f9577f3eb2886897c9a0e7632b400239c8a | [
"MIT"
] | 2 | 2019-03-01T01:22:54.000Z | 2019-07-10T19:57:38.000Z | src/python/nimbusml/internal/core/feature_extraction/image/pixelextractor.py | justinormont/NimbusML-1 | 110b0f9577f3eb2886897c9a0e7632b400239c8a | [
"MIT"
] | null | null | null | src/python/nimbusml/internal/core/feature_extraction/image/pixelextractor.py | justinormont/NimbusML-1 | 110b0f9577f3eb2886897c9a0e7632b400239c8a | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------------------------
# - Generated by tools/entrypoint_compiler.py: do not edit by hand
"""
PixelExtractor
"""
__all__ = ["PixelExtractor"]
from ....entrypoints.transforms_imagepixelextractor import \
transforms_imagepixelextractor
from ....utils.utils import trace
from ...base_pipeline_item import BasePipelineItem, DefaultSignature
| 31.651613 | 94 | 0.58459 |
4d81eb54f286004b0054b57c3655bf67b6dd1eb6 | 2,019 | py | Python | leetcode/17.py | 1005281342/learn | c9d1e2e256842d9b4846c4870ac72e83d172b20e | [
"Apache-2.0"
] | 1 | 2018-11-29T01:01:32.000Z | 2018-11-29T01:01:32.000Z | leetcode/17.py | 1005281342/learn | c9d1e2e256842d9b4846c4870ac72e83d172b20e | [
"Apache-2.0"
] | null | null | null | leetcode/17.py | 1005281342/learn | c9d1e2e256842d9b4846c4870ac72e83d172b20e | [
"Apache-2.0"
] | null | null | null | #
# @lc app=leetcode.cn id=17 lang=python3
#
# [17]
#
# https://leetcode-cn.com/problems/letter-combinations-of-a-phone-number/description/
#
# algorithms
# Medium (47.70%)
# Total Accepted: 18K
# Total Submissions: 37.5K
# Testcase Example: '"23"'
#
# 2-9
#
# 1
#
#
#
# :
#
# "23"
# ["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"].
#
#
# :
#
#
#
from itertools import product
S = Solution()
print(S.letterCombinations("23"))
| 22.943182 | 85 | 0.430411 |
4d831472f18b104577a3fb6f299305d975452191 | 15,031 | py | Python | mujoco/setup1/re_split_demo.py | EvieQ01/Learning-Feasibility-Different-Dynamics | 73786b11137b8ba9840d00ec4d258c1296b0a595 | [
"MIT"
] | null | null | null | mujoco/setup1/re_split_demo.py | EvieQ01/Learning-Feasibility-Different-Dynamics | 73786b11137b8ba9840d00ec4d258c1296b0a595 | [
"MIT"
] | null | null | null | mujoco/setup1/re_split_demo.py | EvieQ01/Learning-Feasibility-Different-Dynamics | 73786b11137b8ba9840d00ec4d258c1296b0a595 | [
"MIT"
] | null | null | null | import random
import argparse
from ast import Global
from dis import dis
from glob import glob
from itertools import count
from math import dist
from logger import *
import json
import gym
from matplotlib.pyplot import axis
import scipy.optimize
import pdb
import torch
from torch.autograd import Variable
from jax_rl.agents import AWACLearner, SACLearner
from jax_rl.datasets import ReplayBuffer
from jax_rl.evaluation import evaluate
from jax_rl.utils import make_env
import numpy as np
import pickle
import random
import copy
from sklearn.cluster import KMeans# import ant
# import swimmer
# import reacher
# import walker
# import halfcheetah
# import inverted_double_pendulum
import sys
sys.path.append('../all_envs')
import swimmer
import walker
torch.utils.backcompat.broadcast_warning.enabled = True
torch.utils.backcompat.keepdim_warning.enabled = True
torch.set_default_tensor_type('torch.DoubleTensor')
parser = argparse.ArgumentParser(description='PyTorch actor-critic example')
parser.add_argument('--env-name', default="Reacher-v1", metavar='G',
help='name of the environment to run')
parser.add_argument('--seed', type=int, default=543, metavar='N',
help='random seed (default: 1)')
parser.add_argument('--batch-size', type=int, default=256, metavar='N',
help='random seed (default: 1)')
parser.add_argument('--render', action='store_true',
help='render the environment')
parser.add_argument('--log-interval', type=int, default=1, metavar='N',
help='interval between training status logs (default: 10)')
parser.add_argument('--save_path', type=str, default= 'temp', metavar='N',
help='path to save demonstrations on')
parser.add_argument('--xml', type=str, default= None, metavar='N',
help='For diffent dynamics')
parser.add_argument('--demo_files', nargs='+')
parser.add_argument('--test_demo_files', nargs='+')
parser.add_argument('--ratio', type=float, nargs='+')
parser.add_argument('--eval-interval', type=int, default=1000)
parser.add_argument('--restore_model', default=None)
parser.add_argument('--mode')
parser.add_argument('--discount', type=float, default=0.9)
parser.add_argument('--discount_train', action='store_true')
parser.add_argument('--fixed_train', action='store_true')
parser.add_argument('--algo', default='sac', help='the algorithm of RL')
parser.add_argument('--max_steps', type=int, default=int(1e6), help='the maximum number of steps')
parser.add_argument('--start_training', type=int, default=int(1e4), help='Number of training steps to start training.')
args = parser.parse_args()
logger = CompleteLogger('log/'+ args.env_name + '/'+ os.path.splitext(args.xml)[0] + 'resplit')
# re-define datasets
json.dump(vars(args), logger.get_args_file(), sort_keys=True, indent=4)
target_env_name = os.path.splitext(args.xml)[0]
global iters
# if __name__ == '__main__':
# #
# #
# print('')
# n = 4
# G_list = [[1,1,1,0], [1,1,1,0], [1,1,1,1], [0,0,1,1]]
# print('')
# # for i in range(n):
# # G_list.append(input().split(','))
# x = [0 for i in range(n)]
# G_list = np.array(G_list)
# while(not is_all_clear(G_list)):
# print(G_list)
# global bestn
# bestn = 0
# Max_Clique(0)
# print(bestx,bestn)
# pdb.set_trace()
# update_graph(G=G_list, nodes_to_delete=bestx)
# def re_split_demos(demos_all):
# size = len(demos_all)
# traj_len = len(demos_all[0])
# pdb.set_trace()
# dist_matrix = np.zeros((size, size)) # 200 * 200
# look_1 = np.expand_dims(np.array(demos_all), axis=0) # 1 * 200 * 1000 * 18
# look_2 = np.expand_dims(np.array(demos_all), axis=1) # 200 * 1 * 1000 * 18
# dist_matrix = np.sum(abs(look_1 - look_2), axis=-1) # 200 * 200 * 1000
# # dist_matrix = np.linalg.norm(look_1 - look_2, axis=-1)
# dist_matrix = np.mean(dist_matrix, dim=-1)
# # for i in range(size):
# # for j in range(size):
# # dist_matrix[i][j] = calculate_traj_dist(demos_all[i], demos_all[j])
# global graph_matrix
# # # clique
# # graph_matrix = dist_matrix < (dist_matrix.mean() * 1.1)
# # independent
# graph_matrix = dist_matrix > (dist_matrix.mean() * 0.9)
# print("sample graph:", graph_matrix[0])
# graph_matrix = graph_matrix.astype(int)
# split_done = False
# split_clique=[]
# while(not split_done):
# global x
# # print(G_list)
# global bestn
# global iters
# x = [0 for i in range(size)]
# bestn = 0
# iters = 0
# # pdb.set_trace()
# Max_Clique(0, size=size)
# print(bestx, bestn)
# update_graph(G=graph_matrix, nodes_to_delete=bestx)
# # pdb.set_trace()
# clique = [i for i, x in enumerate(bestx) if x == 1]
# if len(clique) > int(0.1 * size):
# split_clique.append(clique)
# split_done = is_all_clear(graph_matrix)
# print('re_cluster id:', split_clique)
# pdb.set_trace()
# # save new demo clique
# raw_demos = {}
# for i in range(len(split_clique)):
# save_demo_path = '../demo/walker2d/re_split_{}_batch_00.pkl'.format(i)
# raw_demos['obs'] = [demos_all[idx] for idx in split_clique[i]]
# pickle.dump(raw_demos, open(save_demo_path, 'wb'))
# return a list of neigjbor of x (including self)
# main
if args.mode == 'pair':
demos = [load_pairs(args.demo_files[i:i+1], args.ratio[i]) for i in range(len(args.test_demo_files))]
elif args.mode == 'traj':
# load all demos
demos_all, init_obs_all = load_demos(args.demo_files, args.ratio)
test_demos = []
test_init_obs = []
# clean dataset
not_expert = []
for i in range(len(demos_all)):
if len(demos_all[i]) < 1000:
not_expert.append(i) # not expert traj?
if i % 5 == 0:
print("len demos {}:{}".format(i, len(demos_all[i])))
# pdb.set_trace()
for i in reversed(not_expert):
del demos_all[i]
demos_all = np.array(demos_all)
# norm
demos_all_normed = (demos_all / np.expand_dims(np.linalg.norm(demos_all,axis=1), axis=1))
re_split_demos(demos_all=demos_all)
# random_split_demo(demos=demos_all)
# kmeans = KMeans(n_clusters=4, random_state=0).fit(demos_all)
# print(kmeans.labels_)
# pdb.set_trace()
# for i in range(len(args.test_demo_files)):
# demos_single, init_obs_single = load_demos(args.test_demo_files[i:i+1], args.ratio)
# test_demos.append(demos_single) # 4 * 50 * 1000 * 18
# test_init_obs.append(init_obs_single) # 4 * 0?
# pdb.set_trace()
| 38.940415 | 132 | 0.635487 |
4d8333c69b2cee30bebf3bc76fe51963641f9990 | 7,500 | py | Python | slicer/bin/Python/tpycl/tpycl.py | pabloduque0/WMH_AttGatedUnet_CustomLoss | 3503b40c031494ca866dced1421d95f7b2e311fe | [
"MIT"
] | null | null | null | slicer/bin/Python/tpycl/tpycl.py | pabloduque0/WMH_AttGatedUnet_CustomLoss | 3503b40c031494ca866dced1421d95f7b2e311fe | [
"MIT"
] | null | null | null | slicer/bin/Python/tpycl/tpycl.py | pabloduque0/WMH_AttGatedUnet_CustomLoss | 3503b40c031494ca866dced1421d95f7b2e311fe | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# tpycl.py is the python support code to allow calling of python-wrapped
# vtk code from tcl scripts
#
# the main class is tpycl, and scripts can
#
import sys
import os
import Tkinter
from __main__ import slicer
import qt
if __name__ == "__main__":
tp = tpycl()
tp.main(sys.argv[1:])
| 29.761905 | 124 | 0.6212 |
4d848e3435cc8a2b14ed24badeae8ae87d28d72a | 473 | py | Python | 0171 [Easy] Hex to Bmp/script.py | jwthomson/dailyprogrammer | 44eb1c4e0ec9e8c8660721b24c949013fe3acdc6 | [
"MIT"
] | null | null | null | 0171 [Easy] Hex to Bmp/script.py | jwthomson/dailyprogrammer | 44eb1c4e0ec9e8c8660721b24c949013fe3acdc6 | [
"MIT"
] | null | null | null | 0171 [Easy] Hex to Bmp/script.py | jwthomson/dailyprogrammer | 44eb1c4e0ec9e8c8660721b24c949013fe3acdc6 | [
"MIT"
] | null | null | null | hex_strings = [
"FF 81 BD A5 A5 BD 81 FF",
"AA 55 AA 55 AA 55 AA 55",
"3E 7F FC F8 F8 FC 7F 3E",
"93 93 93 F3 F3 93 93 93",
]
if __name__ == "__main__":
for x in range(len(hex_strings)):
hex_data = hex_strings[x].split(' ')
hex_data_to_image(hex_data)
| 26.277778 | 76 | 0.623679 |
4d865dc35190b4ae2e09f8e09f56d08962508268 | 264 | py | Python | core/kryptos/scripts/strat.py | czr1803/kryptos | 2e8e20b81c7486283ab39df053146048f5496474 | [
"MIT"
] | 45 | 2019-01-27T13:47:51.000Z | 2022-03-13T00:25:21.000Z | core/kryptos/scripts/strat.py | czr1803/kryptos | 2e8e20b81c7486283ab39df053146048f5496474 | [
"MIT"
] | 64 | 2019-01-24T06:43:13.000Z | 2020-03-09T17:14:52.000Z | core/kryptos/scripts/strat.py | produvia/kryptos | 2e8e20b81c7486283ab39df053146048f5496474 | [
"MIT"
] | 4 | 2019-11-21T10:49:17.000Z | 2021-09-30T03:33:00.000Z | import click
from kryptos.scripts import build_strategy, stress_worker, kill_strat
cli.add_command(build_strategy.run, "build")
cli.add_command(stress_worker.run, "stress")
cli.add_command(kill_strat.run, "kill")
| 20.307692 | 69 | 0.772727 |
4d869dccaeface891a8710af79793bcca714b0e5 | 2,334 | py | Python | scripts/album_times.py | TypicalFence/lainonlife | 7af0cf3fe8e48e6affdb3e79d2a89e1c399371b3 | [
"MIT"
] | 48 | 2017-04-29T20:13:52.000Z | 2022-03-23T09:48:56.000Z | scripts/album_times.py | ech1/lainonlife | c5bee94d8dec03d586c62e241d2af5c250e1dde9 | [
"MIT"
] | 46 | 2017-04-27T18:39:43.000Z | 2022-03-29T13:09:53.000Z | scripts/album_times.py | TypicalFence/lainonlife | 7af0cf3fe8e48e6affdb3e79d2a89e1c399371b3 | [
"MIT"
] | 12 | 2017-04-29T20:20:13.000Z | 2021-09-20T11:29:14.000Z | #!/usr/bin/env python3
"""Radio scheduling program.
Usage:
album_times.py [--host=HOST] PORT
Options:
--host=HOST Hostname of MPD [default: localhost]
-h --help Show this text
Prints out the last scheduling time of every album.
"""
from datetime import datetime
from docopt import docopt
from mpd import MPDClient
def album_sticker_get(client, album, sticker):
"""Gets a sticker associated with an album."""
# I am pretty sure that MPD only implements stickers for songs, so
# the sticker gets attached to the first song in the album.
tracks = client.find("album", album)
if len(tracks) == 0:
return
return client.sticker_get("song", tracks[0]["file"], "album_" + sticker)
def list_albums(client):
"""Lists albums sorted by last play timestamp."""
# Get all albums
albums = client.list("album")
all_albums = list(
filter(lambda a: a not in ["", "Lainchan Radio Transitions"], albums)
)
# Group albums by when they were last scheduled
albums_by_last_scheduled = {}
last_scheduled_times = []
for album in all_albums:
# Get the last scheduled time, defaulting to 0
try:
last_scheduled = int(album_sticker_get(client, album, "last_scheduled"))
except ValueError:
last_scheduled = 0
# Put the album into the appropriate bucket
if last_scheduled in albums_by_last_scheduled:
albums_by_last_scheduled[last_scheduled].append(album)
else:
albums_by_last_scheduled[last_scheduled] = [album]
last_scheduled_times.append(last_scheduled)
# Pick the 10 oldest times
last_scheduled_times.sort()
for last_scheduled in last_scheduled_times:
dt = datetime.utcfromtimestamp(last_scheduled)
albums = albums_by_last_scheduled[last_scheduled]
print("{}: {}".format(dt.strftime("%Y-%m-%d %H:%M:%S"), albums))
if __name__ == "__main__":
args = docopt(__doc__)
try:
args["PORT"] = int(args["PORT"])
except ValueError:
print("PORT must be an integer")
exit(1)
try:
client = MPDClient()
client.connect(args["--host"], args["PORT"])
except Exception as e:
print(f"could not connect to MPD: {e.args[0]}")
exit(2)
list_albums(client)
| 28.120482 | 84 | 0.652099 |
4d8737a112ba3cd3448b92d237da96606f4b3fb2 | 1,116 | py | Python | pico-examples/usb/device/dev_lowlevel/dev_lowlevel_loopback.py | TheMindVirus/tinyusb | 397f5f916d84841d878ab75cadae007af13220a1 | [
"MIT"
] | null | null | null | pico-examples/usb/device/dev_lowlevel/dev_lowlevel_loopback.py | TheMindVirus/tinyusb | 397f5f916d84841d878ab75cadae007af13220a1 | [
"MIT"
] | null | null | null | pico-examples/usb/device/dev_lowlevel/dev_lowlevel_loopback.py | TheMindVirus/tinyusb | 397f5f916d84841d878ab75cadae007af13220a1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright (c) 2020 Raspberry Pi (Trading) Ltd.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# sudo pip3 install pyusb
import usb.core
import usb.util
# find our device
dev = usb.core.find(idVendor=0x0000, idProduct=0x0001)
# was it found?
if dev is None:
raise ValueError('Device not found')
# get an endpoint instance
cfg = dev.get_active_configuration()
intf = cfg[(0, 0)]
outep = usb.util.find_descriptor(
intf,
# match the first OUT endpoint
custom_match= \
lambda e: \
usb.util.endpoint_direction(e.bEndpointAddress) == \
usb.util.ENDPOINT_OUT)
inep = usb.util.find_descriptor(
intf,
# match the first IN endpoint
custom_match= \
lambda e: \
usb.util.endpoint_direction(e.bEndpointAddress) == \
usb.util.ENDPOINT_IN)
assert inep is not None
assert outep is not None
test_string = "Hello World!"
outep.write(test_string)
from_device = inep.read(len(test_string))
print("Device Says: {}".format(''.join([chr(x) for x in from_device])))
| 22.77551 | 72 | 0.646057 |
4d87cbb67ddde96fe36f99d2c52da4c04b9e08f6 | 1,426 | py | Python | aiidalab_sssp/inspect/subwidgets/summary.py | aiidalab/aiidalab-sssp | 4f06d7fdff32b86996e85b6c65b372a41f0192a4 | [
"MIT"
] | null | null | null | aiidalab_sssp/inspect/subwidgets/summary.py | aiidalab/aiidalab-sssp | 4f06d7fdff32b86996e85b6c65b372a41f0192a4 | [
"MIT"
] | 1 | 2022-03-28T10:22:31.000Z | 2022-03-28T10:22:31.000Z | aiidalab_sssp/inspect/subwidgets/summary.py | aiidalab/aiidalab-sssp | 4f06d7fdff32b86996e85b6c65b372a41f0192a4 | [
"MIT"
] | 1 | 2021-09-30T08:47:39.000Z | 2021-09-30T08:47:39.000Z | import ipywidgets as ipw
import traitlets
from IPython.display import clear_output
| 30.340426 | 86 | 0.590463 |
4d88b9099339ce1c59838e9b514d20d29f9eb74a | 2,846 | py | Python | src/ska_pst_lmc/management/management_device.py | ska-telescope/ska-pst-lmc | d567f874bf55f49269416d0d83b5a80373a1281c | [
"BSD-3-Clause"
] | null | null | null | src/ska_pst_lmc/management/management_device.py | ska-telescope/ska-pst-lmc | d567f874bf55f49269416d0d83b5a80373a1281c | [
"BSD-3-Clause"
] | null | null | null | src/ska_pst_lmc/management/management_device.py | ska-telescope/ska-pst-lmc | d567f874bf55f49269416d0d83b5a80373a1281c | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is part of the SKA PST LMC project
#
# Distributed under the terms of the BSD 3-clause new license.
# See LICENSE for more info.
"""This module implements the PstManagement device."""
from __future__ import annotations
from typing import Optional
from ska_tango_base.csp.controller_device import CspSubElementController
from tango.server import device_property, run
# PyTango imports
# from tango import AttrQuality, AttrWriteType, DebugIt, DevState, DispLevel, PipeWriteType
# from tango.server import Device, attribute, command, device_property, run
__all__ = ["PstManagement", "main"]
# ----------
# Run server
# ----------
def main(args: Optional[list] = None, **kwargs: dict) -> int:
"""
Entry point for module.
:param args: positional arguments
:param kwargs: named arguments
:return: exit code
:rtype: int
"""
return run((PstManagement,), args=args, **kwargs)
if __name__ == "__main__":
main()
| 26.849057 | 91 | 0.641251 |
4d895a735abdd884483a6e9345d2c27f5444c080 | 1,469 | py | Python | change_mac_ip.py | Anon123-tech/WiFi_Pentest_Guide | 969c5018645bc22fce663c42d709320eece1837a | [
"MIT"
] | null | null | null | change_mac_ip.py | Anon123-tech/WiFi_Pentest_Guide | 969c5018645bc22fce663c42d709320eece1837a | [
"MIT"
] | 1 | 2021-10-01T04:19:27.000Z | 2021-10-01T04:19:27.000Z | change_mac_ip.py | Anon123-tech/WiFi_Pentest_Guide | 969c5018645bc22fce663c42d709320eece1837a | [
"MIT"
] | null | null | null | import sys,os
import argparse as arg
import nmap
import urllib2
parser = arg.ArgumentParser()
parser.add_argument("-a", "--address", help="IP address", required=True)
parser.add_argument("-i", "--interface", help="Interface", required=True)
argument = parser.parse_args()
main()
| 27.716981 | 89 | 0.645337 |
4d8a3cc2d4d2015a3e0297d36d0b96dcf9279316 | 713 | py | Python | cartografo/argument_parser.py | mendrugory/cartografo | 2cd58dfa3c954447f39f084abd28031a47d924d7 | [
"MIT"
] | 4 | 2019-01-16T07:49:51.000Z | 2020-02-14T21:25:21.000Z | cartografo/argument_parser.py | mendrugory/cartografo | 2cd58dfa3c954447f39f084abd28031a47d924d7 | [
"MIT"
] | null | null | null | cartografo/argument_parser.py | mendrugory/cartografo | 2cd58dfa3c954447f39f084abd28031a47d924d7 | [
"MIT"
] | null | null | null | import argparse
from cartografo import DEFAULT_OBJECT, DEFAULT_TARGET
__parser = argparse.ArgumentParser()
__get_argparser()
__arguments = __parser.parse_args() | 35.65 | 95 | 0.750351 |
4d8becef069dc2415fd75f8eb057517bc3d380a9 | 343 | py | Python | Python_lxf/Python_Basic_Operation/Python_Basic/basicValueAndFunc/slice.py | QAlexBall/Learning_Py | 8a5987946928a9d86f6807555ed435ac604b2c44 | [
"MIT"
] | 2 | 2019-01-24T15:06:59.000Z | 2019-01-25T07:34:45.000Z | Python_lxf/Python_Basic_Operation/Python_Basic/basicValueAndFunc/slice.py | QAlexBall/Learning_Py | 8a5987946928a9d86f6807555ed435ac604b2c44 | [
"MIT"
] | 1 | 2019-12-23T09:45:11.000Z | 2019-12-23T09:45:11.000Z | Python_lxf/Python_Basic_Operation/Python_Basic/basicValueAndFunc/slice.py | QAlexBall/Learning_Py | 8a5987946928a9d86f6807555ed435ac604b2c44 | [
"MIT"
] | 1 | 2019-07-18T14:21:35.000Z | 2019-07-18T14:21:35.000Z | L = ['michael', 'sarah', 'tracy', 'bob', 'jack']
# N
r = []
n = 3
for i in range(n):
r.append(L[i])
print(r)
# pythonslice.
m = 0
print(L[m:n], L[:n], L[-n:-1])
L = list(range(100))
print(L[:10], '\r', L[-10:], '\r', L[10:20], '\r', L[:10:2], '\r', L[::5])
print((0, 1, 2, 3, 4, 5)[:3])
print('ABCDEFG'[:3], 'ABCDEFG'[::2])
| 16.333333 | 74 | 0.483965 |
4d8d2dfaa3afe8aa6965d0899dc098740dc88c7a | 3,330 | py | Python | test/test_timematcher.py | magus0219/clockwork | 78c08afdd14f226d7f5c13af633d41a2185ebb7f | [
"MIT"
] | null | null | null | test/test_timematcher.py | magus0219/clockwork | 78c08afdd14f226d7f5c13af633d41a2185ebb7f | [
"MIT"
] | null | null | null | test/test_timematcher.py | magus0219/clockwork | 78c08afdd14f226d7f5c13af633d41a2185ebb7f | [
"MIT"
] | null | null | null | '''
Created on Feb 17, 2014
@author: magus0219
'''
import unittest,datetime
from util.dateutil import DateUtil
from core.timematcher import TimeMatcher
from core.timepattern import TimePattern
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testUnvaidValue']
unittest.main()
| 47.571429 | 102 | 0.562162 |
4d8dccb89c879711e7ed922439a880fb09054040 | 276 | py | Python | pets/api/urls.py | tekodan/DaleLaPata | 7a998f617d88c3f71fe5da896f2197fc0043a731 | [
"MIT"
] | 1 | 2019-05-06T18:44:43.000Z | 2019-05-06T18:44:43.000Z | pets/api/urls.py | tekodan/DaleLaPata | 7a998f617d88c3f71fe5da896f2197fc0043a731 | [
"MIT"
] | 10 | 2021-03-18T21:20:55.000Z | 2022-03-11T23:33:03.000Z | pets/api/urls.py | koyoo-maxwel/findyourpet | ae5978f9ddd9e116d332734d2a45c76d7c6ac1f6 | [
"MIT"
] | null | null | null | from django.conf.urls import url
from api import views
urlpatterns = [
url(r'^pets/$', views.ListPets.as_view(), name='list_pets'),
url(r'^cities/$', views.CityList.as_view(), name='city-list'),
url(r'^states/$', views.StateList.as_view(), name='state-list'),
]
| 27.6 | 68 | 0.663043 |
4d8e2ab661763c22c669a9c4b0bb47f144d56291 | 1,503 | py | Python | app-engine-utility-service/toggleIndex.py | isabella232/gov-meetings-made-searchable | bcbb13544fbfb8e5d5a12c66885fdb54ae52584a | [
"Apache-2.0"
] | 26 | 2019-03-06T15:47:21.000Z | 2022-03-30T17:25:20.000Z | app-engine-utility-service/toggleIndex.py | google/gov-meetings-made-searchable | bcbb13544fbfb8e5d5a12c66885fdb54ae52584a | [
"Apache-2.0"
] | 4 | 2021-02-08T20:27:35.000Z | 2021-09-08T00:50:23.000Z | app-engine-utility-service/toggleIndex.py | isabella232/gov-meetings-made-searchable | bcbb13544fbfb8e5d5a12c66885fdb54ae52584a | [
"Apache-2.0"
] | 8 | 2019-03-06T18:48:16.000Z | 2021-08-14T14:33:33.000Z | #!/usr/bin/env python
# This is not an officially supported Google product, though support
# will be provided on a best-effort basis.
# Copyright 2018 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import ujson
import webapp2
import utilities
app = webapp2.WSGIApplication([
("/toggleIndex", main)], debug = True
)
| 25.913793 | 77 | 0.727878 |
4d8e30f69b17eb91d913983be8d413e7774df4f1 | 1,131 | py | Python | app.py | Jianghuchengphilip/Master-art-punk | 4102d82148bf571e0cd418e363c51fa8486c5a43 | [
"Apache-2.0"
] | 37 | 2022-01-12T07:07:59.000Z | 2022-03-31T10:25:46.000Z | app.py | Jianghuchengphilip/Master-art-punk | 4102d82148bf571e0cd418e363c51fa8486c5a43 | [
"Apache-2.0"
] | 1 | 2022-01-25T12:24:57.000Z | 2022-02-03T10:45:00.000Z | app.py | Jianghuchengphilip/Master-art-punk | 4102d82148bf571e0cd418e363c51fa8486c5a43 | [
"Apache-2.0"
] | 10 | 2022-01-12T07:29:37.000Z | 2022-03-28T23:37:42.000Z | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""=================================================
@Author
@Date 2021/9/22 17:04
@Desc
=================================================="""
from colors import ColorMultiImage
import settings
from model import training
import csv
if __name__ == '__main__':
generate_color = ColorMultiImage()
stickers = settings.module # mousecattle
if settings.train:
color_model_path = training(settings.color_data_path)
print(":" + color_model_path)
if settings.color_style == 1:
f = open(settings.color_model_path, "r+", encoding="utf-8-sig")
reader = csv.reader(f)
colors_max = len(list(reader))
print(f"{colors_max}")
for amount in range(0, settings.n): #
pixel = generate_color.merges(stickers)
colors_number = generate_color.colors_number
generate_color.generate(pixel, settings.color_output_name,str(amount),settings.color_model_path,settings.color_style,colors_number)
print(f"INFO:{str(amount)}{settings.color_output_name}") | 41.888889 | 139 | 0.640141 |
4d8fb7a2d431e379774da24a3d509b9f7f50930d | 3,345 | py | Python | indivsims-dist.py | LohmuellerLab/Forward_Neanderthal | bee8b9ab6afc61942526a3e842c499fbe9cf6fdc | [
"MIT"
] | 3 | 2016-04-02T14:02:36.000Z | 2018-11-07T18:36:02.000Z | indivsims-dist.py | LohmuellerLab/Forward_Neanderthal | bee8b9ab6afc61942526a3e842c499fbe9cf6fdc | [
"MIT"
] | null | null | null | indivsims-dist.py | LohmuellerLab/Forward_Neanderthal | bee8b9ab6afc61942526a3e842c499fbe9cf6fdc | [
"MIT"
] | null | null | null | #! /usr/bin/env python
"""
usage: demoselsim.py outfilename popn h pct
"""
import numpy
import sys
if __name__ == "__main__":
main()
| 21.862745 | 70 | 0.581764 |
4d90bf3108f4644d16656af50720d7f25c3d7eaf | 4,692 | py | Python | core/modules.py | egecakmak/SAnD | 73cc3560450312cd2916b45ac043cc763539e5ad | [
"MIT"
] | 43 | 2019-12-27T12:46:31.000Z | 2022-03-12T06:52:01.000Z | core/modules.py | LuisMoralesAlonso/SAnD | d6d214b3681ef2f14b76a6e32f86c0c69022e2ee | [
"MIT"
] | 7 | 2020-04-27T13:16:45.000Z | 2021-12-13T13:06:47.000Z | core/modules.py | LuisMoralesAlonso/SAnD | d6d214b3681ef2f14b76a6e32f86c0c69022e2ee | [
"MIT"
] | 8 | 2020-01-11T17:08:59.000Z | 2021-04-10T15:15:21.000Z | import math
import torch
import numpy as np
import torch.nn as nn
| 32.358621 | 95 | 0.589301 |
4d92c6dffc16135c5125b569d46c22e978986d36 | 4,350 | py | Python | samcli/commands/local/cli_common/options.py | trenton/aws-sam-cli | 11db934d3584c17fb5ba94d0e92e291c2c91d7c9 | [
"Apache-2.0"
] | 1 | 2019-12-24T17:27:09.000Z | 2019-12-24T17:27:09.000Z | samcli/commands/local/cli_common/options.py | ShreyaGangishetty/aws-sam-cli | f896920468770f3407a3035b9c8e04902578d556 | [
"Apache-2.0"
] | 1 | 2021-06-02T02:44:08.000Z | 2021-06-02T02:44:08.000Z | samcli/commands/local/cli_common/options.py | CavHack/aws-sam-cli | 9355b7b613af907055b9ea5fb199f5d6d501c490 | [
"Apache-2.0"
] | null | null | null | """
Common CLI options for invoke command
"""
import click
from samcli.commands._utils.options import template_click_option, docker_click_options, parameter_override_click_option
try:
from pathlib import Path
except ImportError:
from pathlib2 import Path
def get_application_dir():
"""
Returns
-------
Path
Path representing the application config directory
"""
# TODO: Get the config directory directly from `GlobalConfig`
return Path(click.get_app_dir("AWS SAM", force_posix=True))
def get_default_layer_cache_dir():
"""
Default the layer cache directory
Returns
-------
str
String representing the layer cache directory
"""
layer_cache_dir = get_application_dir().joinpath("layers-pkg")
return str(layer_cache_dir)
def invoke_common_options(f):
"""
Common CLI options shared by "local invoke" and "local start-api" commands
:param f: Callback passed by Click
"""
invoke_options = (
[
template_click_option(),
click.option(
"--env-vars",
"-n",
type=click.Path(exists=True),
help="JSON file containing values for Lambda function's environment variables.",
),
parameter_override_click_option(),
click.option(
"--debug-port",
"-d",
help="When specified, Lambda function container will start in debug mode and will expose this "
"port on localhost.",
envvar="SAM_DEBUG_PORT",
),
click.option(
"--debugger-path", help="Host path to a debugger that will be mounted into the Lambda container."
),
click.option(
"--debug-args", help="Additional arguments to be passed to the debugger.", envvar="DEBUGGER_ARGS"
),
click.option(
"--docker-volume-basedir",
"-v",
envvar="SAM_DOCKER_VOLUME_BASEDIR",
help="Specifies the location basedir where the SAM file exists. If the Docker is running on "
"a remote machine, you must mount the path where the SAM file exists on the docker machine "
"and modify this value to match the remote machine.",
),
click.option("--log-file", "-l", help="logfile to send runtime logs to."),
click.option(
"--layer-cache-basedir",
type=click.Path(exists=False, file_okay=False),
envvar="SAM_LAYER_CACHE_BASEDIR",
help="Specifies the location basedir where the Layers your template uses will be downloaded to.",
default=get_default_layer_cache_dir(),
),
]
+ docker_click_options()
+ [
click.option(
"--force-image-build",
is_flag=True,
help="Specify whether CLI should rebuild the image used for invoking functions with layers.",
envvar="SAM_FORCE_IMAGE_BUILD",
default=False,
)
]
)
# Reverse the list to maintain ordering of options in help text printed with --help
for option in reversed(invoke_options):
option(f)
return f
| 31.294964 | 119 | 0.570115 |
4d9318a61357e8ed5c3736eac9a6f69296fcd981 | 3,083 | py | Python | Leetcode/week_6/multi_threaded_merge_sort.py | SamSamhuns/wallbreakers_projekts | c07b555127ee89d6f461cea7cd87811c382086ff | [
"MIT"
] | 1 | 2021-07-07T00:55:23.000Z | 2021-07-07T00:55:23.000Z | Leetcode/week_6/multi_threaded_merge_sort.py | SamSamhuns/wallbreakers_projekts | c07b555127ee89d6f461cea7cd87811c382086ff | [
"MIT"
] | null | null | null | Leetcode/week_6/multi_threaded_merge_sort.py | SamSamhuns/wallbreakers_projekts | c07b555127ee89d6f461cea7cd87811c382086ff | [
"MIT"
] | null | null | null | import threading
# _recursive_sort(arr, 0, len(arr) - 1)
if __name__ == "__main__":
ar = [2, 4, 1, 2, 4, 5, 8, 2, 351, 2, 0]
thread1 = threading.Thread(
target=_recursive_sort, args=(ar, 0, len(ar) // 2),)
thread2 = threading.Thread(
target=_recursive_sort, args=(ar, (len(ar) // 2) + 1, len(ar) - 1,))
thread1.start()
thread2.start()
thread1.join()
thread2.join()
_merge(ar, 0, len(ar) // 2, len(ar) - 1)
print(ar)
| 25.907563 | 76 | 0.472267 |
4d93299efc0720f63f6f0dc30928e10e3a67f707 | 830 | py | Python | examples/reactjs-nodejs/deploy.py | py-mina-deploy/py-mina | 029bbe6f183afece1ccc2e2d3d11873c5560e8f4 | [
"MIT"
] | 7 | 2017-09-21T17:00:59.000Z | 2021-06-18T06:03:19.000Z | examples/reactjs-nodejs/deploy.py | py-mina-deploy/py-mina | 029bbe6f183afece1ccc2e2d3d11873c5560e8f4 | [
"MIT"
] | null | null | null | examples/reactjs-nodejs/deploy.py | py-mina-deploy/py-mina | 029bbe6f183afece1ccc2e2d3d11873c5560e8f4 | [
"MIT"
] | 2 | 2018-03-20T07:51:37.000Z | 2020-05-03T14:30:55.000Z | """
Deploy NodeJs application
https://github.com/react-boilerplate/react-boilerplate
"""
from py_mina import *
from py_mina.subtasks import git_clone, create_shared_paths, link_shared_paths, rollback_release
# Settings - shared
set('verbose', True)
set('shared_dirs', ['node_modules', 'tmp'])
set('shared_files', [])
# Tasks
| 13.606557 | 96 | 0.708434 |
4d9622c35c6cf1b3dd5d13ccfc59f523a7821253 | 4,653 | py | Python | altair_saver/savers/_saver.py | RoyalTS/altair_saver | 31febb5faf7c3d6d27c2f5fe4045635099143042 | [
"BSD-3-Clause"
] | null | null | null | altair_saver/savers/_saver.py | RoyalTS/altair_saver | 31febb5faf7c3d6d27c2f5fe4045635099143042 | [
"BSD-3-Clause"
] | null | null | null | altair_saver/savers/_saver.py | RoyalTS/altair_saver | 31febb5faf7c3d6d27c2f5fe4045635099143042 | [
"BSD-3-Clause"
] | null | null | null | import abc
import json
from typing import Any, Dict, IO, Iterable, List, Optional, Union
import altair as alt
from altair_saver.types import Mimebundle, MimebundleContent, JSONDict
from altair_saver._utils import (
extract_format,
fmt_to_mimetype,
infer_mode_from_spec,
maybe_open,
)
def save(
self, fp: Optional[Union[IO, str]] = None, fmt: Optional[str] = None
) -> Optional[Union[str, bytes]]:
"""Save a chart to file
Parameters
----------
fp : file or filename (optional)
Location to save the result. For fmt in ["png", "pdf"], file must be binary.
For fmt in ["svg", "vega", "vega-lite"], file must be text. If not specified,
the serialized chart will be returned.
fmt : string (optional)
The format in which to save the chart. If not specified and fp is a string,
fmt will be determined from the file extension.
Returns
-------
chart : string, bytes, or None
If fp is None, the serialized chart is returned.
If fp is specified, the return value is None.
"""
if fmt is None:
if fp is None:
raise ValueError("Must specify either `fp` or `fmt` when saving chart")
fmt = extract_format(fp)
if fmt not in self.valid_formats[self._mode]:
raise ValueError(f"Got fmt={fmt}; expected one of {self.valid_formats}")
content = self._serialize(fmt, "save")
if fp is None:
if isinstance(content, dict):
return json.dumps(content)
return content
if isinstance(content, dict):
with maybe_open(fp, "w") as f:
json.dump(content, f, indent=2)
elif isinstance(content, str):
with maybe_open(fp, "w") as f:
f.write(content)
elif isinstance(content, bytes):
with maybe_open(fp, "wb") as f:
f.write(content)
else:
raise ValueError(
f"Unrecognized content type: {type(content)} for fmt={fmt!r}"
)
return None
| 33.235714 | 92 | 0.573823 |
4d9a26770685da502961cca228e3f8b5f696e2a2 | 172 | py | Python | tests/conftest.py | s0undt3ch/mommas-cookbook | ccca526eee9241f12674cad8c1e1da1a900cef82 | [
"Apache-2.0"
] | 2 | 2022-01-02T23:47:32.000Z | 2022-01-07T11:14:15.000Z | tests/conftest.py | UfSoft/mommas-cookbook | ccca526eee9241f12674cad8c1e1da1a900cef82 | [
"Apache-2.0"
] | 1 | 2022-01-17T12:47:37.000Z | 2022-01-17T12:47:37.000Z | tests/conftest.py | s0undt3ch/mommas-cookbook | ccca526eee9241f12674cad8c1e1da1a900cef82 | [
"Apache-2.0"
] | 1 | 2022-01-10T18:49:36.000Z | 2022-01-10T18:49:36.000Z | # Import our project so that our custom logging gets setup early enough
from __future__ import annotations
import mcookbook # noqa: F401 # pylint: disable=unused-import
| 34.4 | 71 | 0.796512 |
4d9a39367847fba7d83c619b3792a5d2c6b7b745 | 401 | py | Python | shoottikala/privileges.py | conikuvat/shootti-ilmo | bf5ab15e20173994bac25e6b5cd3aec42f671f05 | [
"MIT"
] | null | null | null | shoottikala/privileges.py | conikuvat/shootti-ilmo | bf5ab15e20173994bac25e6b5cd3aec42f671f05 | [
"MIT"
] | 9 | 2017-02-15T20:36:49.000Z | 2017-05-26T12:10:43.000Z | shoottikala/privileges.py | conikuvat/shootti-ilmo | bf5ab15e20173994bac25e6b5cd3aec42f671f05 | [
"MIT"
] | null | null | null | from .exceptions import AccessDenied
| 26.733333 | 55 | 0.678304 |
4d9ba15c517c23c2ecfacb361d428d6f96edb488 | 1,976 | py | Python | scrape-scripts/co2-coalition.py | ClimateMisinformation/infrastructure | f0940b6f1814b302ff328d2f1d8a04ffa2acde64 | [
"Apache-2.0"
] | null | null | null | scrape-scripts/co2-coalition.py | ClimateMisinformation/infrastructure | f0940b6f1814b302ff328d2f1d8a04ffa2acde64 | [
"Apache-2.0"
] | null | null | null | scrape-scripts/co2-coalition.py | ClimateMisinformation/infrastructure | f0940b6f1814b302ff328d2f1d8a04ffa2acde64 | [
"Apache-2.0"
] | null | null | null | import os
from bs4 import BeautifulSoup
import html2text
import pandas
data_dir = 'co2-coalition'
data_text_dir = os.path.join(data_dir, 'text')
data_file_name = 'co2-coalition.csv'
html_converter = html2text.HTML2Text()
html_converter.body_width = 0
html_converter.ignore_images = True
f = open('html/faq.html', 'r')
content = f.read()
f.close()
faq_soup = BeautifulSoup(content, 'html.parser')
entries = {
'id' : [],
'title' : [],
'text_file_name' : [],
}
entry_index = 0
title = html_converter.handle(str(faq_soup.find('span', 'span-title2'))).strip()
content = html_converter.handle(str(faq_soup.find('p', 'p1')))
text_file_name = make_file_name(entry_index) + '.txt'
save_text(data_text_dir, text_file_name, content)
entries['id'].append(entry_index)
entries['title'].append(title)
entries['text_file_name'].append(text_file_name)
entry_index += 1
faq_entries_container = faq_soup.find('div', 'vc_tta-panels-container')
faq_entries = faq_entries_container.find_all('div', 'vc_tta-panel')
print(f'Found {len(faq_entries)} entries')
for entry in faq_entries:
title = get_text(entry, 'span', 'vc_tta-title-text', do_strip = True).capitalize()
print(f' Entry {entry_index} : {title}')
content = get_text(entry.find('div', 'vc_tta-panel-body'), 'div', 'wpb_wrapper')
text_file_name = make_file_name(entry_index) + '.txt'
save_text(data_text_dir, text_file_name, content)
entries['id'].append(entry_index)
entries['title'].append(title)
entries['text_file_name'].append(text_file_name)
entry_index += 1
d = pandas.DataFrame(entries)
d.to_csv(data_file_name, index = False)
| 22.976744 | 84 | 0.723178 |
4d9e067a4c732861782bebe97c9870ee7872595c | 218 | py | Python | tareas/3/FranciscoRodrigo-SanchezBeatriz/common/random_proc.py | Miguelp-rez/sistop-2019-2 | 428444217ba0cc98030a9d84d8b415dcddad9b65 | [
"CC-BY-4.0"
] | null | null | null | tareas/3/FranciscoRodrigo-SanchezBeatriz/common/random_proc.py | Miguelp-rez/sistop-2019-2 | 428444217ba0cc98030a9d84d8b415dcddad9b65 | [
"CC-BY-4.0"
] | null | null | null | tareas/3/FranciscoRodrigo-SanchezBeatriz/common/random_proc.py | Miguelp-rez/sistop-2019-2 | 428444217ba0cc98030a9d84d8b415dcddad9b65 | [
"CC-BY-4.0"
] | null | null | null | import random
| 19.818182 | 36 | 0.600917 |
4d9e28ca22e75c89217adddb5f64ba79edc5981d | 1,753 | py | Python | fooof/tests/test_analysis.py | anchandm/fooof | dcc93b14c4a6987ce7e394696af3221dd2a7bbd6 | [
"Apache-2.0"
] | 1 | 2019-03-26T16:30:43.000Z | 2019-03-26T16:30:43.000Z | fooof/tests/test_analysis.py | anchandm/fooof | dcc93b14c4a6987ce7e394696af3221dd2a7bbd6 | [
"Apache-2.0"
] | null | null | null | fooof/tests/test_analysis.py | anchandm/fooof | dcc93b14c4a6987ce7e394696af3221dd2a7bbd6 | [
"Apache-2.0"
] | null | null | null | """Test functions for FOOOF analysis."""
import numpy as np
from fooof.analysis import *
###################################################################################################
###################################################################################################
| 28.274194 | 99 | 0.551055 |
4d9ef0e46af27e7dc0c401a2f1d44362cba6b228 | 2,677 | py | Python | dary_heap.py | fepz/AyCC | 72a184c3da075677a2a7e5aebe50d1ceb6627ccf | [
"MIT"
] | null | null | null | dary_heap.py | fepz/AyCC | 72a184c3da075677a2a7e5aebe50d1ceb6627ccf | [
"MIT"
] | null | null | null | dary_heap.py | fepz/AyCC | 72a184c3da075677a2a7e5aebe50d1ceb6627ccf | [
"MIT"
] | null | null | null | import math
# The code is based on from http://www.cs.cmu.edu/~ckingsf/class/02713-s13/src/mst.py
# Heap item
# d-ary Heap
| 30.770115 | 89 | 0.537542 |
4d9f5affe61b40083c20917f0cdf236631978825 | 679 | py | Python | S1c_Option2.py | tatytita20/TatianaOrtizG | bfc9e4a84fe16063871ca3210373f5cd5d05ec00 | [
"BSD-2-Clause"
] | null | null | null | S1c_Option2.py | tatytita20/TatianaOrtizG | bfc9e4a84fe16063871ca3210373f5cd5d05ec00 | [
"BSD-2-Clause"
] | null | null | null | S1c_Option2.py | tatytita20/TatianaOrtizG | bfc9e4a84fe16063871ca3210373f5cd5d05ec00 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[5]:
import cv2
import numpy as np
imagen = cv2.imread('wheel.png')
gray = cv2.cvtColor(imagen,cv2.COLOR_BGR2GRAY)
_,th = cv2.threshold(gray,100,255,cv2.THRESH_BINARY)
#Para versiones OpenCV3:
img1,contornos1,hierarchy1 = cv2.findContours(th, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
img2,contornos2,hierarchy2 = cv2.findContours(th, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(imagen, contornos1, -1, (0,0,255), 2)
print ('len(contornos1[2])=',len(contornos1[2]))
print ('len(contornos2[2])=',len(contornos2[2]))
cv2.imshow('imagen',imagen)
cv2.imshow('th',th)
cv2.waitKey(0)
cv2.destroyAllWindows()
# In[ ]:
| 21.903226 | 92 | 0.73785 |
4da1078413c6ede4c933c888d3cfb359e4bb4c92 | 2,014 | py | Python | PythonFSDAM/combine_works.py | MauriceKarrenbrock/PythonFSDAM | efd4a1717af37d6598aaaca0fa520f735cf254b0 | [
"BSD-3-Clause"
] | null | null | null | PythonFSDAM/combine_works.py | MauriceKarrenbrock/PythonFSDAM | efd4a1717af37d6598aaaca0fa520f735cf254b0 | [
"BSD-3-Clause"
] | null | null | null | PythonFSDAM/combine_works.py | MauriceKarrenbrock/PythonFSDAM | efd4a1717af37d6598aaaca0fa520f735cf254b0 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#############################################################
# Copyright (c) 2020-2021 Maurice Karrenbrock #
# #
# This software is open-source and is distributed under the #
# BSD 3-Clause "New" or "Revised" License #
#############################################################
"""functions to combine bound and unbound works
"""
import numpy as np
def combine_non_correlated_works(works_1, works_2):
"""combines 2 non correlated sets of work values
If you have 2 set of work values (for example
bound and unbound in the case of vDSSB) that are
un correlated you can combine them in order to
get N * M resulting works. It is equivalent to
convoluting the 2 probability distributions.
Parameters
------------
works_1 : numpy.array
the first set of works values to combine
works_2 : numpy.array
the second set of works values to combine
Returns
-----------
numpy.array :
a 1-D array N * M long containing the combined
work values
Notes
---------
for more information check out this paper:
Virtual Double-System Single-Box: A Nonequilibrium
Alchemical Technique for Absolute Binding Free Energy
Calculations: Application to Ligands of the SARS-CoV-2 Main Protease
Marina Macchiagodena, Marco Pagliai, Maurice Karrenbrock,
Guido Guarnieri, Francesco Iannone, and Piero Procacci
Journal of Chemical Theory and Computation 2020 16 (11), 7160-7172
DOI: 10.1021/acs.jctc.0c00634
section 2 "THEORETICAL BACKGROUND"
"""
#empty array N*M long
output_array = np.empty([works_1.size * works_2.size])
len_works_2 = len(works_2)
i = 0
iterator_1 = np.nditer(works_1)
for value_1 in iterator_1:
cutoff = i * len_works_2
output_array[cutoff:cutoff + len_works_2] = value_1 + works_2[:]
i += 1
return output_array
| 30.059701 | 72 | 0.604767 |
4da13726f32134c310d04e7e245dd18f5b4f2d9a | 8,622 | py | Python | modules/pointrnn_cell_impl.py | hehefan/PointRNN-PyTorch | 4d32a3dbb03ca423d5b79c6c9ae848b75cee724a | [
"MIT"
] | 35 | 2020-03-16T08:40:57.000Z | 2022-03-14T21:14:56.000Z | modules/pointrnn_cell_impl.py | hehefan/PointRNN-PyTorch | 4d32a3dbb03ca423d5b79c6c9ae848b75cee724a | [
"MIT"
] | 4 | 2021-02-23T12:33:47.000Z | 2021-12-29T06:44:34.000Z | modules/pointrnn_cell_impl.py | hehefan/PointRNN-PyTorch | 4d32a3dbb03ca423d5b79c6c9ae848b75cee724a | [
"MIT"
] | 5 | 2020-08-12T05:37:45.000Z | 2021-12-13T02:51:34.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
import pointnet2_utils
import pytorch_utils as pt_utils
from typing import List
if __name__ == '__main__':
radius = 1
nsamples = 4
in_channels = 128
out_channels = 256
lstm = PointLSTMCell(radius, nsamples, in_channels, out_channels).to('cuda')
batch_size = 32
npoints = 1024
P1 = torch.zeros([batch_size, npoints, 3], dtype=torch.float32).to('cuda')
X1 = torch.zeros([batch_size, in_channels, npoints], dtype=torch.float32).to('cuda')
P2 = torch.zeros([batch_size, npoints, 3], dtype=torch.float32).to('cuda')
H2 = torch.zeros([batch_size, out_channels, npoints], dtype=torch.float32).to('cuda')
C2 = torch.zeros([batch_size, out_channels, npoints], dtype=torch.float32).to('cuda')
P1, H1, C1 = lstm((P1, X1), (P2, H2, C2))
print(P1.shape)
print(H1.shape)
print(C1.shape)
| 34.488 | 163 | 0.588727 |
4da267cbfd8f27c90210ee8dd60a31e0b0b8e755 | 347 | py | Python | rssplus/forms.py | Davidhw/infocatch | ddeb5d31b1eb353f41f444c5c6eec2c5d1b1ba72 | [
"BSD-2-Clause"
] | null | null | null | rssplus/forms.py | Davidhw/infocatch | ddeb5d31b1eb353f41f444c5c6eec2c5d1b1ba72 | [
"BSD-2-Clause"
] | null | null | null | rssplus/forms.py | Davidhw/infocatch | ddeb5d31b1eb353f41f444c5c6eec2c5d1b1ba72 | [
"BSD-2-Clause"
] | null | null | null | from django import forms
'''
javascriptChoices = ((2,"Keep Javascript",),(1,"Remove Some Javascript"),(0,"Remove All Javascript"))
keepJavascript = forms.ChoiceField(choices=javascriptChoices,label=" Website Javascript")
'''
| 34.7 | 105 | 0.737752 |
4da29c483d3fd7e63823596e88d9d101fcad2df3 | 78 | py | Python | wsgi.py | FlexMeasures/flexmeasures | a4367976d37ac5721b8eb3ce8a2414595e52c678 | [
"Apache-2.0"
] | 12 | 2021-12-18T10:41:10.000Z | 2022-03-29T23:00:29.000Z | wsgi.py | FlexMeasures/flexmeasures | a4367976d37ac5721b8eb3ce8a2414595e52c678 | [
"Apache-2.0"
] | 103 | 2021-12-07T08:51:15.000Z | 2022-03-31T13:28:48.000Z | wsgi.py | FlexMeasures/flexmeasures | a4367976d37ac5721b8eb3ce8a2414595e52c678 | [
"Apache-2.0"
] | 3 | 2022-01-18T04:45:48.000Z | 2022-03-14T09:48:22.000Z | from flexmeasures.app import create as create_app
application = create_app()
| 19.5 | 49 | 0.820513 |
4da2cd364bac0f635be3f42e807ba80193bddce5 | 839 | py | Python | mbus/MBusAddress.py | droid4control/python-mbus | 8e26c1847c06e57bc0e878ef3d6610dc9ba913b4 | [
"BSD-3-Clause"
] | 23 | 2015-05-19T15:57:40.000Z | 2021-03-18T11:33:22.000Z | mbus/MBusAddress.py | Sensenode/python-mbus | 9b598ada5b3da17bb513cf78e5b4a8f2a3f9a1f1 | [
"BSD-3-Clause"
] | 14 | 2015-09-20T20:26:22.000Z | 2020-05-13T16:39:15.000Z | mbus/MBusAddress.py | neurobat/python-mbus | 8e26c1847c06e57bc0e878ef3d6610dc9ba913b4 | [
"BSD-3-Clause"
] | 22 | 2015-07-27T08:50:44.000Z | 2022-03-19T01:17:18.000Z | from ctypes import Structure, Union, c_int, c_byte, c_char_p
# Inner union
| 22.675676 | 60 | 0.605483 |
4da3d5ac7b735f55566862fbadcd345f662d67b0 | 1,162 | py | Python | settings_template.py | Pierre-Thibault/memberCardGenerator | e05b421d3f50453d3603fd5513383e77378e9ccb | [
"MIT"
] | 2 | 2017-09-29T17:06:19.000Z | 2021-05-10T22:30:50.000Z | settings_template.py | Pierre-Thibault/memberCardGenerator | e05b421d3f50453d3603fd5513383e77378e9ccb | [
"MIT"
] | 1 | 2021-09-07T23:43:40.000Z | 2021-09-07T23:43:40.000Z | settings_template.py | Pierre-Thibault/memberCardGenerator | e05b421d3f50453d3603fd5513383e77378e9ccb | [
"MIT"
] | 1 | 2022-01-10T13:42:54.000Z | 2022-01-10T13:42:54.000Z | # -*- coding: utf-8 -*-
# Copy this file and renamed it settings.py and change the values for your own project
# The csv file containing the information about the member.
# There is three columns: The name, the email and the member type: 0 regular, 1 life time
CSV_FILE = "path to csv file"
# The svg file for regular member. {name} and {email} are going to be replaced with the corresponding values from the
# csv file
SVG_FILE_REGULAR = "path to svg regular member file"
# Same as SVG_FILE_REGULAR but for life time member
SVG_FILE_LIFE_TIME = "path to svg life time member file"
# Destination folder where the member cards will be generated. If the folder does not exist yet it will be created.
DEST_GENERATED_FOLDER = "path to folder that will contain the generated files"
# The message file used as the text body for the email message. UTF-8.
MSG_FILE = "/Users/pierre/Documents/LPA/CA/carte_membre_msg"
# SMTP configuration
SMPT_HOST = "myserver.com"
SMPT_PORT = 587
SMTP_USER = "user_name"
SMTP_PASSWORD = "password"
# Email configuration
EMAIL_FROM = "some_email@something.com"
EMAIL_SUBJECT = "subject"
EMAIL_PDF = "name of attachment file.pdf"
| 36.3125 | 117 | 0.766781 |
4da52c1e9bb8246e1bfb4b704fffa7ba5aef097c | 633 | py | Python | korea_client_prospect.py | DataFinnovation/api-demos-python | 1b5cf3334c537b9a09bcb8973c030ad7f19dd2ba | [
"Apache-2.0"
] | 1 | 2019-10-04T18:20:43.000Z | 2019-10-04T18:20:43.000Z | korea_client_prospect.py | DataFinnovation/api-demos-python | 1b5cf3334c537b9a09bcb8973c030ad7f19dd2ba | [
"Apache-2.0"
] | null | null | null | korea_client_prospect.py | DataFinnovation/api-demos-python | 1b5cf3334c537b9a09bcb8973c030ad7f19dd2ba | [
"Apache-2.0"
] | null | null | null | """the names of companies which filed fields with certain words in them"""
from df_wrappers import facts_stringquery
def main():
"""example code lives in one function"""
# this one is easier in the script language
query_string = """
filingsource:"Korea FSS" AND
fieldname:(hedge OR (foreign AND exchange) OR (interest AND rate))
"""
# send off the query
resp_data = facts_stringquery(query_string, False)
# keep unique list of company names from results
name_list = {x['source']['companyname'] for x in resp_data['hits']}
for name in name_list:
print(str(name))
main()
# eof
| 26.375 | 74 | 0.680885 |
4da5665c468aeff1b89db108dda338732a06bdb4 | 2,413 | py | Python | libs/CacheSimulator.py | architecture-helper/architecture-helper-python | 89c8e2c8ed051f5d5bcbe2283c5228a745c05e4c | [
"MIT"
] | 2 | 2020-06-15T13:08:10.000Z | 2020-06-16T13:56:04.000Z | libs/CacheSimulator.py | architecture-helper/architecture-helper-python | 89c8e2c8ed051f5d5bcbe2283c5228a745c05e4c | [
"MIT"
] | null | null | null | libs/CacheSimulator.py | architecture-helper/architecture-helper-python | 89c8e2c8ed051f5d5bcbe2283c5228a745c05e4c | [
"MIT"
] | 2 | 2020-10-31T13:21:55.000Z | 2020-10-31T13:26:34.000Z | DEBUG = False
from typing import List, Tuple
| 30.935897 | 106 | 0.613344 |
4da56c87210d815ae3ce12ef22d4660f4c50a5e6 | 4,015 | py | Python | src/db_triggers.py | serong/saypy | 19118fcf34093389c689bf540cf53521667b59f7 | [
"MIT"
] | null | null | null | src/db_triggers.py | serong/saypy | 19118fcf34093389c689bf540cf53521667b59f7 | [
"MIT"
] | null | null | null | src/db_triggers.py | serong/saypy | 19118fcf34093389c689bf540cf53521667b59f7 | [
"MIT"
] | null | null | null | """
db_triggers.py
~~~~~~~~~~~~~~
:aciklama:
Veritabanina veri girisi ve gerekli triggerlar icin
:yazar: github.com/serong
"""
import sqlite3
import db as saydb
| 27.128378 | 100 | 0.547696 |
4da617061fb7260c745a228403e1f72dc1155bd6 | 17,088 | py | Python | phathom/utils.py | chunglabmit/phathom | 304db7a95e898e9b03d6b2640172752d21a7e3ed | [
"MIT"
] | 1 | 2018-04-18T11:54:29.000Z | 2018-04-18T11:54:29.000Z | phathom/utils.py | chunglabmit/phathom | 304db7a95e898e9b03d6b2640172752d21a7e3ed | [
"MIT"
] | 2 | 2018-04-05T20:53:52.000Z | 2018-11-01T16:37:39.000Z | phathom/utils.py | chunglabmit/phathom | 304db7a95e898e9b03d6b2640172752d21a7e3ed | [
"MIT"
] | null | null | null | import contextlib
import os
import pickle
import numpy as np
from itertools import product, starmap
import multiprocessing
import tqdm
import sys
if sys.platform.startswith("linux"):
is_linux = True
import tempfile
else:
is_linux = False
import mmap
# import pyina.launchers
# from pyina.ez_map import ez_map
# TODO: Convert utils.py module to use pathlib module
def make_dir(path):
"""Makes a new directory at the provided path only if it doesn't already exist.
Parameters
----------
path : str
The path of the directory to make
"""
if not os.path.exists(path):
os.makedirs(path)
return os.path.abspath(path)
def files_in_dir(path):
"""Searches a path for all files
Parameters
----------
path : str
The directory path to check for files
Returns
-------
list
list of all files and subdirectories in the input path (excluding . and ..)
"""
return sorted(os.listdir(path))
def tifs_in_dir(path):
"""Searches input path for tif files
Parameters
----------
path : str
path of the directory to check for tif images
Returns
-------
tif_paths : list
list of paths to tiffs in path
tif_filenames : list
list of tiff filenames (with the extension) in path
"""
abspath = os.path.abspath(path)
files = files_in_dir(abspath)
tif_paths = []
tif_filenames = []
for f in files:
if f.endswith('.tif') or f.endswith('.tiff'):
tif_paths.append(os.path.join(abspath, f))
tif_filenames.append(f)
return tif_paths, tif_filenames
def load_metadata(path):
"""Loads a metadata.pkl file within provided path
Parameters
----------
path : str
path of a directory containing a 'metadata.pkl' file
Returns
-------
dict
dictionary containing the stored metadata
"""
return pickle_load(os.path.join(path, 'metadata.pkl'))
def pickle_save(path, data):
"""Pickles data and saves it to provided path
Parameters
----------
path : str
path of the pickle file to create / overwrite
data : dict
dictionary with data to be pickled
"""
with open(path, 'wb') as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
def pickle_load(path):
"""Un-pickles a file provided at the input path
Parameters
----------
path : str
path of the pickle file to read
Returns
-------
dict
data that was stored in the input pickle file
"""
with open(path, 'rb') as f:
return pickle.load(f)
def chunk_dims(img_shape, chunk_shape):
"""Calculate the number of chunks needed for a given image shape
Parameters
----------
img_shape : tuple
whole image shape
chunk_shape : tuple
individual chunk shape
Returns
-------
nb_chunks : tuple
a tuple containing the number of chunks in each dimension
"""
return tuple(int(np.ceil(i/c)) for i, c in zip(img_shape, chunk_shape))
def chunk_coordinates(shape, chunks):
"""Calculate the global coordaintes for each chunk's starting position
Parameters
----------
shape : tuple
shape of the image to chunk
chunks : tuple
shape of each chunk
Returns
-------
start_coords : ndarray
the starting indices of each chunk
"""
nb_chunks = chunk_dims(shape, chunks)
start = []
for indices in product(*tuple(range(n) for n in nb_chunks)):
start.append(tuple(i*c for i, c in zip(indices, chunks)))
return np.asarray(start)
def box_slice_idx(start, stop):
"""Creates an index tuple for a bounding box from `start` to `stop` using slices
Parameters
----------
start : array-like
index of box start
stop : array-like
index of box stop (index not included in result)
Returns
-------
idx : tuple
index tuple for bounding box
"""
return tuple(np.s_[a:b] for a, b in zip(start, stop))
def extract_box(arr, start, stop):
"""Indexes `arr` from `start` to `stop`
Parameters
----------
arr : array-like or SharedMemory
input array to index
start : array-like
starting index of the slice
stop : array-like
ending index of the slice. The element at this index is not included.
Returns
-------
box : ndarray
resulting box from `arr`
"""
idx = box_slice_idx(start, stop)
if isinstance(arr, SharedMemory):
with arr.txn() as a:
box = a[idx]
else:
box = arr[idx]
return box
def insert_box(arr, start, stop, data):
"""Indexes `arr` from `start` to `stop` and inserts `data`
Parameters
----------
arr : array-like
input array to index
start : array-like
starting index of the slice
stop : array-like
ending index of the slice. The element at this index is not included.
data : array-like
sub-array to insert into `arr`
Returns
-------
box : ndarray
resulting box from `arr`
"""
idx = box_slice_idx(start, stop)
if isinstance(arr, SharedMemory):
with arr.txn() as a:
a[idx] = data
else:
arr[idx] = data
return arr
def pmap_chunks(f, arr, chunks=None, nb_workers=None, use_imap=False):
"""Maps a function over an array in parallel using chunks
The function `f` should take a reference to the array, a starting index, and the chunk size.
Since each subprocess is handling it's own indexing, any overlapping should be baked into `f`.
Caution: `arr` may get copied if not using memmap. Use with SharedMemory or Zarr array to avoid copies.
Parameters
----------
f : callable
function with signature f(arr, start_coord, chunks). May need to use partial to define other args.
arr : array-like
an N-dimensional input array
chunks : tuple, optional
the shape of chunks to use. Default tries to access arr.chunks and falls back to arr.shape
nb_workers : int, optional
number of parallel processes to apply f with. Default, cpu_count
use_imap : bool, optional
whether or not to use imap instead os starmap in order to get an iterator for tqdm.
Note that this requires input tuple unpacking manually inside of `f`.
Returns
-------
result : list
list of results for each chunk
"""
if chunks is None:
try:
chunks = arr.chunks
except AttributeError:
chunks = arr.shape
if nb_workers is None:
nb_workers = multiprocessing.cpu_count()
start_coords = chunk_coordinates(arr.shape, chunks)
args_list = []
for i, start_coord in enumerate(start_coords):
args = (arr, start_coord, chunks)
args_list.append(args)
if nb_workers > 1:
with multiprocessing.Pool(processes=nb_workers) as pool:
if use_imap:
results = list(tqdm.tqdm(pool.imap(f, args_list), total=len(args_list)))
else:
results = list(pool.starmap(f, args_list))
else:
if use_imap:
results = list(tqdm.tqdm(map(f, args_list), total=len(args_list)))
else:
results = list(starmap(f, args_list))
return results
def read_voxel_size(path, micron=True):
"""Reads in the voxel size stored in `path` CSV file with voxel dimensions in nanometers
:param path: path to CSV file containing integer values of voxel dimensions in nanometers
:param micron: Flag to return nanometers or micron
:return: voxel_size tuple in same order as in CSV
"""
with open(path, mode='r') as f:
line = f.readline().split('\n')[0]
dims = line.split(',')
voxel_size = tuple([int(d) / 1000 for d in dims])
return voxel_size
# mapper = None
#
#
# def parallel_map(fn, args):
# """Map a function over an argument list, returning one result per arg
#
# Parameters
# ----------
# fn : callable
# the function to execute
# args : list
# a list of the single argument to send through the function per invocation
#
# Returns
# -------
# list
# a list of results
#
# Notes
# -----
# The mapper is configured by two environment variables:
#
# PHATHOM_MAPPER - this is the name of one of the mapper classes. Typical
# choices are MpiPool or MpiScatter for OpenMPI and
# SlurmPool or SlurmScatter for SLURM. By default, it
# uses the serial mapper which runs on a single thread.
#
# PHATHOM_NODES - this is the number of nodes that should be used in
# parallel.
#
# By default, a serial mapper is returned if there is no mapper.
#
# Examples
# --------
# myresults = parallel_map(my_function, my_inputs)
#
# """
# global mapper
#
# if mapper is None:
# if "PHATHOM_MAPPER" in os.environ:
# mapper_name = os.environ["PHATHOM_MAPPER"]
# mapper_class = getattr(pyina.launchers, mapper_name)
# if "PHATHOM_NODES" in os.environ:
# nodes = os.environ["PHATHOM_NODES"]
# mapper = mapper_class(nodes)
# else:
# mapper = mapper_class()
# else:
# mapper = pyina.launchers.SerialMapper()
#
# return mapper.map(fn, args)
def shared_memory_to_zarr(memory, zarr, pool, offset, start=None, stop=None):
"""
Copy memory to ZARR array.
Note: offset, start and stop must be on chunk boundaries of the zarr array
:param memory: the memory array to copy to zarr
:param zarr: the zarr array
:param offset: the 3-tuple offset of the destination for the memory in
the zarr array
:param start: the 3-tuple start coordinates of the memory
:param stop: the 3-tuple stop coordinates of the memory
:param pool: the multiprocessing pool to use
"""
chunksize = zarr.chunks
shape = memory.shape
all_starts, all_stops = get_chunk_coords(chunksize, shape, start, stop)
args = [(memory, zarr, offset, a, b) for a, b in zip(all_starts, all_stops)]
pool.starmap(write_one_zarr, args)
def get_chunk_coords(chunksize, shape, start, stop):
"""
Get a sequence of chunk start coordinates and stop coordinates, given
a volume delimited by start and stop coordinates
:param chunksize: the size of a chunk in the zarr or blockfs array
:param shape: the shape of the zarr or blockfs array to handle edge case
:param start: a three-tuple of start coordinates, on a chunk boundary
:param stop: a three-tuple of stop coordinates, on a chunk boundary
:return: a sequence/iterator of start coordinates and of stop coordinates
giving the dimensions for each chunk in the volume
"""
if start is None:
start = (0, 0, 0)
if stop is None:
stop = shape
starts = [np.arange(a, b, c) for a, b, c in zip(start, stop, chunksize)]
stops = [np.minimum(a + b, c) for a, b, c in
zip(starts, chunksize, shape)]
all_starts = product(*starts)
all_stops = product(*stops)
return all_starts, all_stops
def memory_to_blockfs(memory, blockfs_dir, offset, start=None, stop=None):
"""
Write a block of memory to a Blockfs directory
:param memory: the memory to be written
:param blockfs_dir: the BlockFS directory to be written to. This must be
opened and the writer processes must have been started.
:param offset: the offset into the blockfs of the memory (a 3-tuple)
:param start: a three-tuple of start coordinates within the memory. Default
is from the start of memory.
:param stop: a three-tuple of stop coordinates within the memory. Default
is to the end of memory.
"""
chunksize = (blockfs_dir.z_block_size, blockfs_dir.y_block_size,
blockfs_dir.x_block_size)
shape = memory.shape
for (z0, y0, x0), (z1, y1, x1) in zip(*get_chunk_coords(
chunksize, shape, start, stop)):
blockfs_dir.write_block(memory[z0:z1, y0:y1, x0:x1],
x0 + offset[2], y0+offset[1], z0+offset[0]) | 30.031634 | 110 | 0.617743 |
4da7f0dcc82002a84f867e9fa7df76c1807a4a95 | 4,706 | py | Python | ys_code/src/skin_mb/data/diff_abun.py | sverbanic/ps2-npjBM | 646585d787e5ae2d553a04ea4960b36e9d05bf29 | [
"CC0-1.0"
] | null | null | null | ys_code/src/skin_mb/data/diff_abun.py | sverbanic/ps2-npjBM | 646585d787e5ae2d553a04ea4960b36e9d05bf29 | [
"CC0-1.0"
] | null | null | null | ys_code/src/skin_mb/data/diff_abun.py | sverbanic/ps2-npjBM | 646585d787e5ae2d553a04ea4960b36e9d05bf29 | [
"CC0-1.0"
] | null | null | null | from .result import Result
import numpy as np
import pandas as pd
| 36.48062 | 128 | 0.627922 |
4da96c829ebc724feb06739ebe6a1d31d5dda9bf | 49 | py | Python | while.py | egriswol/astr-119-hw-1 | e290355de8f48b9def3fdacf4779ac4a3c51a003 | [
"MIT"
] | null | null | null | while.py | egriswol/astr-119-hw-1 | e290355de8f48b9def3fdacf4779ac4a3c51a003 | [
"MIT"
] | 1 | 2018-10-18T17:49:41.000Z | 2018-10-18T17:49:41.000Z | while.py | egriswol/astr-119-hw-1 | e290355de8f48b9def3fdacf4779ac4a3c51a003 | [
"MIT"
] | 1 | 2018-10-18T01:31:32.000Z | 2018-10-18T01:31:32.000Z | i = 0
while (i<119):
print(i)
i+=10
| 7 | 14 | 0.408163 |
4da98c7999598efa40ea85ebd1e548863fec9e17 | 15,783 | py | Python | coremltools/test/neural_network/test_graph_passes.py | domcorvasce/coremltools | 391114169687b6ac9122174ff77d8072e6bf6b68 | [
"BSD-3-Clause"
] | 65 | 2019-10-02T09:56:22.000Z | 2022-03-16T22:41:14.000Z | coremltools/test/neural_network/test_graph_passes.py | domcorvasce/coremltools | 391114169687b6ac9122174ff77d8072e6bf6b68 | [
"BSD-3-Clause"
] | 51 | 2020-01-13T07:54:13.000Z | 2022-03-17T09:11:56.000Z | coremltools/test/neural_network/test_graph_passes.py | domcorvasce/coremltools | 391114169687b6ac9122174ff77d8072e6bf6b68 | [
"BSD-3-Clause"
] | 16 | 2020-03-06T09:26:03.000Z | 2022-02-05T05:35:05.000Z | import numpy as np
import unittest
import coremltools.models.datatypes as datatypes
from coremltools.models import neural_network as neural_network
from coremltools.models import MLModel
from coremltools.models.neural_network.printer import print_network_spec
from coremltools.converters.nnssa.coreml.graph_pass.mlmodel_passes import \
remove_disconnected_layers, transform_conv_crop, remove_redundant_transposes
import copy
import pytest
DEBUG = False
np.random.seed(100)
if __name__ == '__main__':
RUN_ALL_TESTS = True
if RUN_ALL_TESTS:
unittest.main()
else:
suite = unittest.TestSuite()
suite.addTest(MLModelPassesTest('test_load_constant_remove'))
unittest.TextTestRunner().run(suite)
| 50.264331 | 120 | 0.593677 |
4da9f7a9d46841bf7af0a4af66fd041f70367d1f | 575 | py | Python | accounting_tech/migrations/0018_auto_20190403_1456.py | Tim-Ilin/asup_corp_site | 02a9573f2490ef8f31b3ba95bc351c2458d049e5 | [
"MIT"
] | null | null | null | accounting_tech/migrations/0018_auto_20190403_1456.py | Tim-Ilin/asup_corp_site | 02a9573f2490ef8f31b3ba95bc351c2458d049e5 | [
"MIT"
] | 8 | 2021-03-19T11:12:07.000Z | 2022-03-12T00:32:27.000Z | accounting_tech/migrations/0018_auto_20190403_1456.py | Tim-Ilin/asup_corp_site | 02a9573f2490ef8f31b3ba95bc351c2458d049e5 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.7 on 2019-04-03 11:56
from django.db import migrations, models
import django.db.models.deletion
| 28.75 | 177 | 0.678261 |
4dac488dd2d30d64a8f251641aa08b45ff93ef4d | 500 | py | Python | tests/bgzip_test.py | Swiffers/puretabix | 5f0895c9b17560e76dd962a20844fffb565a4aed | [
"MIT"
] | 1 | 2021-07-07T00:18:47.000Z | 2021-07-07T00:18:47.000Z | tests/bgzip_test.py | Swiffers/puretabix | 5f0895c9b17560e76dd962a20844fffb565a4aed | [
"MIT"
] | null | null | null | tests/bgzip_test.py | Swiffers/puretabix | 5f0895c9b17560e76dd962a20844fffb565a4aed | [
"MIT"
] | null | null | null | from puretabix import get_bgzip_lines_parallel
| 35.714286 | 76 | 0.676 |
4dae657468dbf8a1f6bf472d8e316e6a356158ca | 887 | py | Python | setup.py | Eawag-SWW/datapool_client | a43c38f0f858a687d6354ef4d857beee59882c8a | [
"MIT"
] | null | null | null | setup.py | Eawag-SWW/datapool_client | a43c38f0f858a687d6354ef4d857beee59882c8a | [
"MIT"
] | null | null | null | setup.py | Eawag-SWW/datapool_client | a43c38f0f858a687d6354ef4d857beee59882c8a | [
"MIT"
] | null | null | null | from setuptools import find_packages, setup
setup(
name="datapool_client",
version="1.0",
description="Designed to access the datapool software developed by ETH Zurich - SIS and Eawag. "
"Find out more under https://datapool.readthedocs.io/en/latest/.",
author="Christian Foerster",
author_email="christian.foerster@eawag.ch",
license="MIT Licence",
classifiers=[
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3.9",
],
install_requires=[
"pandas",
"numpy",
"psycopg2-binary",
"matplotlib",
"cufflinks",
"plotly",
"pyparsing==2.4.7",
"sqlalchemy",
"tqdm"
],
keywords="datapool_client, eawag, postgres",
packages=find_packages(),
include_package_data=True,
)
| 28.612903 | 100 | 0.611048 |
4daef32de5d0a4eada8ed86fdce977e9cb3a3093 | 117 | py | Python | mandaw/examples/window.py | mandaw2014/MandawEngineSDL | 597798e556751c57945b1ed6302f17cb6e9d8d22 | [
"MIT"
] | null | null | null | mandaw/examples/window.py | mandaw2014/MandawEngineSDL | 597798e556751c57945b1ed6302f17cb6e9d8d22 | [
"MIT"
] | null | null | null | mandaw/examples/window.py | mandaw2014/MandawEngineSDL | 597798e556751c57945b1ed6302f17cb6e9d8d22 | [
"MIT"
] | 1 | 2021-09-21T08:28:50.000Z | 2021-09-21T08:28:50.000Z | from mandaw import *
mandaw = Mandaw("Window!", width = 800, height = 600, bg_color = (0, 0, 0, 255))
mandaw.loop() | 23.4 | 80 | 0.641026 |
4daf588bf7222a0428a4b569a5e2c8de42912a40 | 1,333 | py | Python | bio-info/bio-info5.py | kyamada101/Python | a9be850b1818fb4784cb84e86b20cf2c61784e38 | [
"MIT"
] | null | null | null | bio-info/bio-info5.py | kyamada101/Python | a9be850b1818fb4784cb84e86b20cf2c61784e38 | [
"MIT"
] | null | null | null | bio-info/bio-info5.py | kyamada101/Python | a9be850b1818fb4784cb84e86b20cf2c61784e38 | [
"MIT"
] | null | null | null | import numpy as np
with open("./dice.txt",'r') as f:
input_str = f.read()
input_data=list(map(int,input_str))
inf = -float('inf')
N = len(input_data)-1
K = 2
trans_p = np.array([[0.95,0.1],[0.05,0.9]])
dice_p = np.array([[1/6,1/6,1/6,1/6,1/6,1/6],[1/10,1/10,1/10,1/10,1/10,1/2]])
transition_t = np.log(trans_p)
dice_t = np.log(dice_p)
X = np.array([[box() for l in range(K)] for k in range(N+1)])
run_viterbi(N,0)
with open('./dice_result.txt','w') as f:
f.write("Eyes of dice{}".format(input_str))
f.write("\n")
f.write("Anticipation is following \n")
trace(N,0) | 25.634615 | 104 | 0.534134 |
4dafa7a5729cfadd647edbfafb8f9ae3d3c677d0 | 10,464 | py | Python | src/tests/scheduling/schedule_config_test.py | Ket3r/script-server | 919a2b7eb29c7bba7acba8e374a0a5cc696bd859 | [
"Apache-2.0",
"CC0-1.0"
] | null | null | null | src/tests/scheduling/schedule_config_test.py | Ket3r/script-server | 919a2b7eb29c7bba7acba8e374a0a5cc696bd859 | [
"Apache-2.0",
"CC0-1.0"
] | null | null | null | src/tests/scheduling/schedule_config_test.py | Ket3r/script-server | 919a2b7eb29c7bba7acba8e374a0a5cc696bd859 | [
"Apache-2.0",
"CC0-1.0"
] | null | null | null | from unittest import TestCase
from parameterized import parameterized
from scheduling.schedule_config import ScheduleConfig
from utils import date_utils
| 79.877863 | 115 | 0.549981 |
4db08c1ad06a4f3c0a888874af940f73222f14eb | 1,458 | py | Python | mojo/python/tests/bindings_constants_unittest.py | zbowling/mojo | 4d2ed40dc2390ca98a6fea0580e840535878f11c | [
"BSD-3-Clause"
] | 1 | 2020-04-28T14:35:10.000Z | 2020-04-28T14:35:10.000Z | mojo/python/tests/bindings_constants_unittest.py | TribeMedia/sky_engine | 4a3894ed246327931b198a7d64652bd0b615b036 | [
"BSD-3-Clause"
] | null | null | null | mojo/python/tests/bindings_constants_unittest.py | TribeMedia/sky_engine | 4a3894ed246327931b198a7d64652bd0b615b036 | [
"BSD-3-Clause"
] | 1 | 2020-04-28T14:35:11.000Z | 2020-04-28T14:35:11.000Z | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import math
import unittest
# Generated files
# pylint: disable=F0401
import sample_service_mojom
import test_constants_mojom
| 37.384615 | 77 | 0.752401 |
4db107eb3d9119fdeaf236399451aa583978436f | 5,219 | py | Python | home/pi/blissflixx/lib/chanutils/chanutils.py | erick-guerra/Royalbox | 967dbbdddc94b9968e6eba873f0d20328fd86f66 | [
"MIT"
] | 1 | 2022-01-29T11:17:58.000Z | 2022-01-29T11:17:58.000Z | home/pi/blissflixx/lib/chanutils/chanutils.py | erick-guerra/Royalbox | 967dbbdddc94b9968e6eba873f0d20328fd86f66 | [
"MIT"
] | null | null | null | home/pi/blissflixx/lib/chanutils/chanutils.py | erick-guerra/Royalbox | 967dbbdddc94b9968e6eba873f0d20328fd86f66 | [
"MIT"
] | null | null | null | import requests, lxml.html, re
import htmlentitydefs, urllib, random
from lxml.cssselect import CSSSelector
from StringIO import StringIO
import cherrypy
import requests
from cachecontrol import CacheControl
_PROXY_LIST = None
_HEADERS = {
'accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'accept-language':'en-GB,en-US;q=0.8,en;q=0.6',
'cache-control':'max-age=0',
#'user-agent':'Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.120 Safari/537.36',
'user-agent':'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A356 Safari/604.1',
'Client-ID':'tq6hq1srip0i37ipzuscegt7viex9fh' # Just for Twitch API
}
MOVIE_RE = re.compile(r'(.*)[\(\[]?([12][90]\d\d)[^pP][\(\[]?.*$')
SERIES_RE = re.compile(r'(.*)S(\d\d)E(\d\d).*$')
| 27.613757 | 153 | 0.637095 |
4db11a5afb53d4558aa8f33a19f180a1ecbc8f9d | 5,672 | py | Python | test/07-text-custom-field-list-test.py | hklarner/couchdb-mango | e519f224423ca4696a61d0065530103dd8c6651b | [
"Apache-2.0"
] | 39 | 2015-02-04T09:48:20.000Z | 2021-11-09T22:07:45.000Z | test/07-text-custom-field-list-test.py | hklarner/couchdb-mango | e519f224423ca4696a61d0065530103dd8c6651b | [
"Apache-2.0"
] | 37 | 2015-02-24T17:59:26.000Z | 2021-05-25T12:20:54.000Z | test/07-text-custom-field-list-test.py | hklarner/couchdb-mango | e519f224423ca4696a61d0065530103dd8c6651b | [
"Apache-2.0"
] | 21 | 2015-04-26T05:53:44.000Z | 2021-11-09T22:06:58.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import mango
import unittest
| 35.672956 | 79 | 0.571403 |
4db2f85a35fd948e670a5be341c3efca737c01ed | 6,534 | py | Python | tests/test_reviews_rest.py | miw-upm/betca-tpv-customer-support | e36946b934123a5c139924192a189c5ce8f3864c | [
"MIT"
] | 1 | 2021-05-04T01:33:00.000Z | 2021-05-04T01:33:00.000Z | tests/test_reviews_rest.py | miw-upm/betca-tpv-customer-support | e36946b934123a5c139924192a189c5ce8f3864c | [
"MIT"
] | null | null | null | tests/test_reviews_rest.py | miw-upm/betca-tpv-customer-support | e36946b934123a5c139924192a189c5ce8f3864c | [
"MIT"
] | 5 | 2021-04-02T15:42:31.000Z | 2022-03-07T09:02:16.000Z | from http import HTTPStatus
from unittest import TestCase, mock
import jwt
from fastapi.testclient import TestClient
from src.api.review_resource import REVIEWS
from src.config import config
from src.main import app
from src.models.article import Article
from src.models.review import Review
| 50.261538 | 120 | 0.718549 |
4db7b0957c01b75e339ff138abd1c5327cd961ef | 2,354 | py | Python | python/dset/write_dataset_W1BS.py | spongezhang/vlb | 52a6b2ab8608496182ac2a33c961344db4a84333 | [
"BSD-2-Clause"
] | 11 | 2017-09-08T16:32:46.000Z | 2022-02-02T15:28:22.000Z | python/dset/write_dataset_W1BS.py | albutko/vlb | 437245c0991948eeb36a277937a7e67d389041e4 | [
"BSD-2-Clause"
] | 9 | 2017-09-13T20:22:51.000Z | 2019-03-13T02:38:25.000Z | python/dset/write_dataset_W1BS.py | albutko/vlb | 437245c0991948eeb36a277937a7e67d389041e4 | [
"BSD-2-Clause"
] | 3 | 2017-09-08T21:07:14.000Z | 2021-02-17T17:42:43.000Z | import json
sequence_name_list = ['A','G','L','map2photo','S']
description_list = ['Viewpoint Appearance','Viewpoint','ViewPoint Lighting','Map to Photo','Modality']
label_list = [
['arch', 'obama', 'vprice0', 'vprice1', 'vprice2', 'yosemite'],
['adam', 'boat','ExtremeZoomA','face','fox','graf','mag','shop','there','vin'],
['amos1','bdom','brugge_square', 'GC2','light','madrid',\
'notredame15','paintedladies','rushmore','trevi','vatican'],
['map1', 'map2', 'map3', 'map4', 'map5', 'map6'],
['angiogram','brain1','EO-IR-2',\
'maunaloa','mms68','mms75','treebranch']
]
#label_list = [
# ['arch', 'obama', 'vprice0', 'vprice1', 'vprice2', 'yosemite']
# ]
json_data = {}
json_data['Dataset Name'] = 'W1BS'
json_data['Description'] = 'Baseline Stereo Benchmark'
json_data['url'] = 'http://cmp.felk.cvut.cz/wbs/datasets/W1BS_with_patches.tar.gz'
json_data['Sequence Number'] = len(sequence_name_list)
json_data['Sequence Name List'] = sequence_name_list
json_data['Sequences'] = []
for idx, sequence_name in enumerate(sequence_name_list):
sequence = {}
sequence['Name'] = sequence_name
sequence['Description'] = sequence_name
sequence['Label'] = description_list[idx]
sequence['Images'] = []
sequence['Image Number'] = len(label_list[idx])*2
sequence['Link Number'] = len(label_list[idx])
sequence['Links'] = []
for image_idx, image_label in enumerate(label_list[idx]):
image = {}
image['file'] = '{}/1/{}.bmp'.format(sequence_name,image_label)
image['id'] = str(image_label) + '_1'
image['label'] = str(image_label) + '_1'
sequence['Images'].append(image)
image = {}
image['file'] = '{}/2/{}.bmp'.format(sequence_name,image_label)
image['id'] = str(image_label) + '_2'
image['label'] = str(image_label) + '_2'
sequence['Images'].append(image)
link = {}
link['source'] = str(image_label) + '_1'
link['target'] = str(image_label) + '_2'
link['file'] = '{}/h/{}.txt'.format(sequence_name, image_label)
sequence['Links'].append(link)
json_data['Sequences'].append(sequence)
with open('./datasets/dataset_info/{}.json'.format('W1BS'),'w') as json_file:
json.dump(json_data, json_file, indent=2)
| 41.298246 | 102 | 0.607477 |
4db85d3fc2b5c525eb2343fee5b61c2b7cad4134 | 27 | py | Python | salman.py | Fayad-hub/Fayad-hub | b132b8266da8a36b6162feb69c67639067a90b69 | [
"BSD-2-Clause"
] | null | null | null | salman.py | Fayad-hub/Fayad-hub | b132b8266da8a36b6162feb69c67639067a90b69 | [
"BSD-2-Clause"
] | null | null | null | salman.py | Fayad-hub/Fayad-hub | b132b8266da8a36b6162feb69c67639067a90b69 | [
"BSD-2-Clause"
] | 1 | 2021-05-26T06:06:38.000Z | 2021-05-26T06:06:38.000Z | <html> salman.py
<html/>
| 6.75 | 16 | 0.592593 |
4db8e935817372c07e59b82af45086b871e6303e | 579 | py | Python | broadcast.py | InukaRanmira/Image-to-pdf | 44f7e33b13aba44c03c3ec5c7e4efe4efe0b1911 | [
"MIT"
] | 1 | 2021-12-24T18:11:49.000Z | 2021-12-24T18:11:49.000Z | broadcast.py | InukaRanmira/Image-to-pdf | 44f7e33b13aba44c03c3ec5c7e4efe4efe0b1911 | [
"MIT"
] | null | null | null | broadcast.py | InukaRanmira/Image-to-pdf | 44f7e33b13aba44c03c3ec5c7e4efe4efe0b1911 | [
"MIT"
] | null | null | null | from pyrogram import Client ,filters
import os
from helper.database import getid
ADMIN = int(os.environ.get("ADMIN", 1696230986))
| 30.473684 | 90 | 0.690846 |
4db99233bd49c358c3fdefaa1fa9186de53680eb | 11,351 | py | Python | scripts/calibration/cal_methods.py | jielyugt/calibration | 1b9be673fb7ff8cf481e875153b1a7649e3b6e67 | [
"MIT"
] | null | null | null | scripts/calibration/cal_methods.py | jielyugt/calibration | 1b9be673fb7ff8cf481e875153b1a7649e3b6e67 | [
"MIT"
] | null | null | null | scripts/calibration/cal_methods.py | jielyugt/calibration | 1b9be673fb7ff8cf481e875153b1a7649e3b6e67 | [
"MIT"
] | null | null | null | # Calibration methods including Histogram Binning and Temperature Scaling
import numpy as np
from scipy.optimize import minimize
from sklearn.metrics import log_loss
import pandas as pd
import time
from sklearn.metrics import log_loss, brier_score_loss
from tensorflow.keras.losses import categorical_crossentropy
from os.path import join
import sklearn.metrics as metrics
# Imports to get "utility" package
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath("utility") ) ) )
from utility.unpickle_probs import unpickle_probs
from utility.evaluation import ECE, MCE
def softmax(x):
"""
Compute softmax values for each sets of scores in x.
Parameters:
x (numpy.ndarray): array containing m samples with n-dimensions (m,n)
Returns:
x_softmax (numpy.ndarray) softmaxed values for initial (m,n) array
"""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=1, keepdims=1)
def evaluate(probs, y_true, verbose = False, normalize = False, bins = 15):
"""
Evaluate model using various scoring measures: Error Rate, ECE, MCE, NLL, Brier Score
Params:
probs: a list containing probabilities for all the classes with a shape of (samples, classes)
y_true: a list containing the actual class labels
verbose: (bool) are the scores printed out. (default = False)
normalize: (bool) in case of 1-vs-K calibration, the probabilities need to be normalized.
bins: (int) - into how many bins are probabilities divided (default = 15)
Returns:
(error, ece, mce, loss, brier), returns various scoring measures
"""
preds = np.argmax(probs, axis=1) # Take maximum confidence as prediction
if normalize:
confs = np.max(probs, axis=1)/np.sum(probs, axis=1)
# Check if everything below or equal to 1?
else:
confs = np.max(probs, axis=1) # Take only maximum confidence
accuracy = metrics.accuracy_score(y_true, preds) * 100
error = 100 - accuracy
# Calculate ECE
ece = ECE(confs, preds, y_true, bin_size = 1/bins)
# Calculate MCE
mce = MCE(confs, preds, y_true, bin_size = 1/bins)
loss = log_loss(y_true=y_true, y_pred=probs)
y_prob_true = np.array([probs[i, idx] for i, idx in enumerate(y_true)]) # Probability of positive class
brier = brier_score_loss(y_true=y_true, y_prob=y_prob_true) # Brier Score (MSE)
if verbose:
print("Accuracy:", accuracy)
print("Error:", error)
print("ECE:", ece)
print("MCE:", mce)
print("Loss:", loss)
print("brier:", brier)
return (error, ece, mce, loss, brier)
def cal_results(fn, path, files, m_kwargs = {}, approach = "all"):
"""
Calibrate models scores, using output from logits files and given function (fn).
There are implemented to different approaches "all" and "1-vs-K" for calibration,
the approach of calibration should match with function used for calibration.
TODO: split calibration of single and all into separate functions for more use cases.
Params:
fn (class): class of the calibration method used. It must contain methods "fit" and "predict",
where first fits the models and second outputs calibrated probabilities.
path (string): path to the folder with logits files
files (list of strings): pickled logits files ((logits_val, y_val), (logits_test, y_test))
m_kwargs (dictionary): keyword arguments for the calibration class initialization
approach (string): "all" for multiclass calibration and "1-vs-K" for 1-vs-K approach.
Returns:
df (pandas.DataFrame): dataframe with calibrated and uncalibrated results for all the input files.
"""
df = pd.DataFrame(columns=["Name", "Error", "ECE", "MCE", "Loss", "Brier"])
total_t1 = time.time()
for i, f in enumerate(files):
name = "_".join(f.split("_")[1:-1])
print(name)
t1 = time.time()
FILE_PATH = join(path, f)
(logits_val, y_val), (logits_test, y_test) = unpickle_probs(FILE_PATH)
if approach == "all":
y_val = y_val.flatten()
model = fn(**m_kwargs)
model.fit(logits_val, y_val)
probs_val = model.predict(logits_val)
probs_test = model.predict(logits_test)
error, ece, mce, loss, brier = evaluate(softmax(logits_test), y_test, verbose=True) # Test before scaling
error2, ece2, mce2, loss2, brier2 = evaluate(probs_test, y_test, verbose=False)
print("Error %f; ece %f; mce %f; loss %f, brier %f" % evaluate(probs_val, y_val, verbose=False, normalize=True))
else: # 1-vs-k models
probs_val = softmax(logits_val) # Softmax logits
probs_test = softmax(logits_test)
K = probs_test.shape[1]
# Go through all the classes
for k in range(K):
# Prep class labels (1 fixed true class, 0 other classes)
y_cal = np.array(y_val == k, dtype="int")[:, 0]
# Train model
model = fn(**m_kwargs)
model.fit(probs_val[:, k], y_cal) # Get only one column with probs for given class "k"
probs_val[:, k] = model.predict(probs_val[:, k]) # Predict new values based on the fittting
probs_test[:, k] = model.predict(probs_test[:, k])
# Replace NaN with 0, as it should be close to zero # TODO is it needed?
idx_nan = np.where(np.isnan(probs_test))
probs_test[idx_nan] = 0
idx_nan = np.where(np.isnan(probs_val))
probs_val[idx_nan] = 0
# Get results for test set
error, ece, mce, loss, brier = evaluate(softmax(logits_test), y_test, verbose=True, normalize=False)
error2, ece2, mce2, loss2, brier2 = evaluate(probs_test, y_test, verbose=False, normalize=True)
print("Error %f; ece %f; mce %f; loss %f, brier %f" % evaluate(probs_val, y_val, verbose=False, normalize=True))
df.loc[i*2] = [name, error, ece, mce, loss, brier]
df.loc[i*2+1] = [(name + "_calib"), error2, ece2, mce2, loss2, brier2]
t2 = time.time()
print("Time taken:", (t2-t1), "\n")
total_t2 = time.time()
print("Total time taken:", (total_t2-total_t1))
return df
| 36.616129 | 124 | 0.602854 |
4db9d563f39ee0c9eaa0404dfef96153f8e1cbb5 | 2,259 | py | Python | neuralmonkey/nn/projection.py | Simon-Will/neuralmonkey | b686a9d302cb10eda5fca991e1d7ee6b9e84b75a | [
"BSD-3-Clause"
] | 5 | 2017-04-24T21:10:03.000Z | 2019-05-22T13:19:35.000Z | neuralmonkey/nn/projection.py | Simon-Will/neuralmonkey | b686a9d302cb10eda5fca991e1d7ee6b9e84b75a | [
"BSD-3-Clause"
] | null | null | null | neuralmonkey/nn/projection.py | Simon-Will/neuralmonkey | b686a9d302cb10eda5fca991e1d7ee6b9e84b75a | [
"BSD-3-Clause"
] | 5 | 2017-04-25T01:36:44.000Z | 2019-12-13T15:04:03.000Z | """Module which implements various types of projections."""
from typing import List, Callable
import tensorflow as tf
from neuralmonkey.nn.utils import dropout
def maxout(inputs: tf.Tensor,
size: int,
scope: str = "MaxoutProjection") -> tf.Tensor:
"""Apply a maxout operation.
Implementation of Maxout layer (Goodfellow et al., 2013).
http://arxiv.org/pdf/1302.4389.pdf
z = Wx + b
y_i = max(z_{2i-1}, z_{2i})
Arguments:
inputs: A tensor or list of tensors. It should be 2D tensors with
equal length in the first dimension (batch size)
size: The size of dimension 1 of the output tensor.
scope: The name of the scope used for the variables
Returns:
A tensor of shape batch x size
"""
with tf.variable_scope(scope):
projected = tf.layers.dense(inputs, size * 2, name=scope)
maxout_input = tf.reshape(projected, [-1, 1, 2, size])
maxpooled = tf.nn.max_pool(
maxout_input, [1, 1, 2, 1], [1, 1, 2, 1], "SAME")
reshaped = tf.reshape(maxpooled, [-1, size])
return reshaped
def glu(input_: tf.Tensor,
gating_fn: Callable[[tf.Tensor], tf.Tensor] = tf.sigmoid) -> tf.Tensor:
"""Apply a Gated Linear Unit.
Gated Linear Unit - Dauphin et al. (2016).
http://arxiv.org/abs/1612.08083
"""
dimensions = input_.get_shape().as_list()
if dimensions[-1] % 2 != 0:
raise ValueError("Input size should be an even number")
lin, nonlin = tf.split(input_, 2, axis=len(dimensions) - 1)
return lin * gating_fn(nonlin)
| 29.723684 | 79 | 0.611332 |
4db9da7c1b63e7750aa55027e3ed9ae8620596ff | 148 | py | Python | test_default_application/application.py | Ca11MeE/dophon | 6737b0f0dc9ec2c2229865940c3c6d6ee326fc28 | [
"Apache-2.0"
] | 1 | 2018-08-13T09:57:34.000Z | 2018-08-13T09:57:34.000Z | test_default_application/application.py | Ca11MeE/dophon | 6737b0f0dc9ec2c2229865940c3c6d6ee326fc28 | [
"Apache-2.0"
] | null | null | null | test_default_application/application.py | Ca11MeE/dophon | 6737b0f0dc9ec2c2229865940c3c6d6ee326fc28 | [
"Apache-2.0"
] | null | null | null | # properties detail please read \
# default_properties.py in dophon_properties module \
# inside your used dophon module package.
# author: CallMeE
| 29.6 | 53 | 0.790541 |
4dbb39dfe98094fd9498b0f54ec487acfb06c3ae | 5,989 | py | Python | afk.py | bsoyka/sunset-bot | ea05000e52e1883ddba77ab754e5f733c8b3375c | [
"MIT"
] | 1 | 2021-06-21T16:58:48.000Z | 2021-06-21T16:58:48.000Z | afk.py | bsoyka/sunset-bot | ea05000e52e1883ddba77ab754e5f733c8b3375c | [
"MIT"
] | 4 | 2021-08-13T16:52:51.000Z | 2021-09-01T13:05:42.000Z | afk.py | sunset-vacation/bot | ea05000e52e1883ddba77ab754e5f733c8b3375c | [
"MIT"
] | 4 | 2021-06-21T22:16:12.000Z | 2021-08-11T21:01:19.000Z | from datetime import datetime
from textwrap import shorten
from typing import Optional, Union
import discord
from discord.abc import Messageable
from discord.errors import Forbidden
from discord.ext.commands import Bot, Cog, Context, check, command, is_owner
from discord.utils import get
from config import CONFIG
from database import Afk as DbAfk
from database import User as DbUser
from database import get_user
| 29.79602 | 135 | 0.574553 |
4dbb6e20146ebd5e06052b663dfca81bbe6df5e3 | 2,176 | pyde | Python | dice/dice.pyde | ahoefnagel/ProjectA-Digital-Components | 79d326f9beb433ded191187ef13d3b5a823057ef | [
"MIT"
] | null | null | null | dice/dice.pyde | ahoefnagel/ProjectA-Digital-Components | 79d326f9beb433ded191187ef13d3b5a823057ef | [
"MIT"
] | null | null | null | dice/dice.pyde | ahoefnagel/ProjectA-Digital-Components | 79d326f9beb433ded191187ef13d3b5a823057ef | [
"MIT"
] | null | null | null | # The shape of the numbers for the dice
dice = { 1: [[None, None, None], [None, "", None], [None, None, None]],
2: [["", None, None], [None, None, None], [None, None, ""]],
3: [["", None, None], [None, "", None], [None, None, ""]],
4: [["", None, ""], [None, None, None], ["", None, ""]],
5: [["", None, ""], [None, "", None], ["", None, ""]],
6: [["", None, ""], ["", None, ""], ["", None, ""]]}
dice_cnt = 3
| 32 | 103 | 0.483456 |
4dbc315472764792a06e13d3e501c508bdc38cb4 | 769 | py | Python | src/pdf/domain/encrypt.py | ichiro-kazusa/PDFCon | 529c22145bfd20919b015b5ba70e8bab33feed01 | [
"MIT"
] | null | null | null | src/pdf/domain/encrypt.py | ichiro-kazusa/PDFCon | 529c22145bfd20919b015b5ba70e8bab33feed01 | [
"MIT"
] | null | null | null | src/pdf/domain/encrypt.py | ichiro-kazusa/PDFCon | 529c22145bfd20919b015b5ba70e8bab33feed01 | [
"MIT"
] | null | null | null |
from ..command.encrypt import EncryptionCommand
| 24.806452 | 76 | 0.625488 |
4dbd2fe8b25b1e5fbb2859a59ee51157048af1ed | 709 | py | Python | photos/migrations/0002_auto_20190616_1812.py | savannah8/The-gallery | ddb95d08e81e874fe8b24046a2acc40be0a68cbe | [
"Unlicense"
] | null | null | null | photos/migrations/0002_auto_20190616_1812.py | savannah8/The-gallery | ddb95d08e81e874fe8b24046a2acc40be0a68cbe | [
"Unlicense"
] | 5 | 2020-06-05T21:16:21.000Z | 2021-09-08T01:03:08.000Z | photos/migrations/0002_auto_20190616_1812.py | savannah8/The-gallery | ddb95d08e81e874fe8b24046a2acc40be0a68cbe | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-06-16 15:12
from __future__ import unicode_literals
from django.db import migrations
| 21.484848 | 46 | 0.543018 |
4dbd6eb36e7e0009be0cae3c766793da9b62afee | 17,853 | py | Python | scripts/old_scripts/test1.py | noambuckman/mpc-multiple-vehicles | a20949c335f1af97962569eed112e6cef46174d9 | [
"MIT"
] | 1 | 2021-11-02T15:16:17.000Z | 2021-11-02T15:16:17.000Z | scripts/old_scripts/test1.py | noambuckman/mpc-multiple-vehicles | a20949c335f1af97962569eed112e6cef46174d9 | [
"MIT"
] | 5 | 2021-04-14T17:08:59.000Z | 2021-05-27T21:41:02.000Z | scripts/old_scripts/test1.py | noambuckman/mpc-multiple-vehicles | a20949c335f1af97962569eed112e6cef46174d9 | [
"MIT"
] | 2 | 2022-02-07T08:16:05.000Z | 2022-03-09T23:30:17.000Z | import time, datetime, argparse
import os, sys
import numpy as np
np.set_printoptions(precision=2)
import matplotlib.pyplot as plt
import copy as cp
import pickle
PROJECT_PATH = '/home/nbuckman/Dropbox (MIT)/DRL/2020_01_cooperative_mpc/mpc-multiple-vehicles/'
sys.path.append(PROJECT_PATH)
import casadi as cas
import src.MPC_Casadi as mpc
import src.TrafficWorld as tw
import src.IterativeBestResponseMPCMultiple as mibr
import src.car_plotting_multiple as cmplot
##########################################################
svo_theta = np.pi/4.0
# random_seed = args.random_seed[0]
random_seed = 3
NEW = True
if NEW:
optional_suffix = "ellipses"
subdir_name = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") + optional_suffix
folder = "results/" + subdir_name + "/"
os.makedirs(folder)
os.makedirs(folder+"imgs/")
os.makedirs(folder+"data/")
os.makedirs(folder+"vids/")
os.makedirs(folder+"plots/")
else:
subdir_name = "20200224-103456_real_dim_CA"
folder = "results/" + subdir_name + "/"
print(folder)
if random_seed > 0:
np.random.seed(random_seed)
#######################################################################
T = 3 # MPC Planning Horizon
dt = 0.3
N = int(T/dt) #Number of control intervals in MPC
n_rounds_mpc = 6
percent_mpc_executed = 0.5 ## This is the percent of MPC that is executed
number_ctrl_pts_executed = int(np.floor(N*percent_mpc_executed))
XAMB_ONLY = False
n_other = 2
n_rounds_ibr = 2
world = tw.TrafficWorld(2, 0, 1000)
# large_world = tw.TrafficWorld(2, 0, 1000, 5.0)
#########################################################################
actual_xamb = np.zeros((6, n_rounds_mpc*number_ctrl_pts_executed + 1))
actual_uamb = np.zeros((2, n_rounds_mpc*number_ctrl_pts_executed))
actual_xothers = [np.zeros((6, n_rounds_mpc*number_ctrl_pts_executed + 1)) for i in range(n_other)]
actual_uothers = [np.zeros((2, n_rounds_mpc*number_ctrl_pts_executed)) for i in range(n_other)]
actual_all_other_x0 = [np.zeros((6, 2*N)) for i in range(n_other)]
xamb = np.zeros(shape=(6, N+1))
t_start_time = time.time()
####################################################
## Create the Cars in this Problem
all_other_x0 = []
all_other_u = []
all_other_MPC = []
all_other_x = [np.zeros(shape=(6, N+1)) for i in range(n_other)]
next_x0 = 0
for i in range(n_other):
x1_MPC = mpc.MPC(dt)
x1_MPC.n_circles = 3
x1_MPC.theta_iamb = svo_theta
x1_MPC.N = N
x1_MPC.k_change_u_v = 0.001
x1_MPC.max_delta_u = 50 * np.pi/180 * x1_MPC.dt
x1_MPC.k_u_v = 0.01
x1_MPC.k_u_delta = .00001
x1_MPC.k_change_u_v = 0.01
x1_MPC.k_change_u_delta = 0.001
x1_MPC.k_s = 0
x1_MPC.k_x = 0
x1_MPC.k_x_dot = -1.0 / 100.0
x1_MPC.k_lat = 0.001
x1_MPC.k_lon = 0.0
x1_MPC.k_phi_error = 0.001
x1_MPC.k_phi_dot = 0.01
####Vehicle Initial Conditions
if i%2 == 0:
lane_number = 0
next_x0 += x1_MPC.L + 2*x1_MPC.min_dist
else:
lane_number = 1
initial_speed = 0.75*x1_MPC.max_v
traffic_world = world
x1_MPC.fd = x1_MPC.gen_f_desired_lane(traffic_world, lane_number, True)
x0 = np.array([next_x0, traffic_world.get_lane_centerline_y(lane_number), 0, 0, initial_speed, 0]).T
## Set the initial control of the other vehicles
u1 = np.zeros((2,N))
# u1[0,:] = np.clip(np.pi/180 *np.random.normal(size=(1,N)), -2 * np.pi/180, 2 * np.pi/180)
SAME_SIDE = False
if lane_number == 1 or SAME_SIDE:
u1[0,0] = 2 * np.pi/180
else:
u1[0,0] = -2 * np.pi/180
u1[0,0] = 0
all_other_MPC += [x1_MPC]
all_other_x0 += [x0]
all_other_u += [u1]
# Settings for Ambulance
amb_MPC = cp.deepcopy(x1_MPC)
amb_MPC.theta_iamb = 0.0
amb_MPC.k_u_v = 0.0000
amb_MPC.k_u_delta = .01
amb_MPC.k_change_u_v = 0.0000
amb_MPC.k_change_u_delta = 0
amb_MPC.k_s = 0
amb_MPC.k_x = 0
amb_MPC.k_x_dot = -1.0 / 100.0
amb_MPC.k_x = -1.0/100
amb_MPC.k_x_dot = 0
amb_MPC.k_lat = 0.00001
amb_MPC.k_lon = 0.0
# amb_MPC.min_v = 0.8*initial_speed
amb_MPC.max_v = 35 * 0.447 # m/s
amb_MPC.k_phi_error = 0.1
amb_MPC.k_phi_dot = 0.01
NO_GRASS = False
amb_MPC.min_y = world.y_min
amb_MPC.max_y = world.y_max
if NO_GRASS:
amb_MPC.min_y += world.grass_width
amb_MPC.max_y -= world.grass_width
amb_MPC.fd = amb_MPC.gen_f_desired_lane(world, 0, True)
x0_amb = np.array([0, 0, 0, 0, initial_speed , 0]).T
pickle.dump(x1_MPC, open(folder + "data/"+"mpc%d"%i + ".p",'wb'))
pickle.dump(amb_MPC, open(folder + "data/"+"mpcamb" + ".p",'wb'))
########################################################################
#### SOLVE THE MPC #####################################################
for i_mpc in range(n_rounds_mpc):
min_slack = np.infty
actual_t = i_mpc * number_ctrl_pts_executed
###### Update the initial conditions for all vehicles
if i_mpc > 0:
x0_amb = xamb[:, number_ctrl_pts_executed]
for i in range(len(all_other_x0)):
all_other_x0[i] = all_other_x[i][:, number_ctrl_pts_executed]
###### Initial guess for the other u. This will be updated once the other vehicles
###### solve the best response to the ambulance. Initial guess just looks at the last solution. This could also be a lange change
# Obtain a simulated trajectory from other vehicle control inputs
all_other_x = [np.zeros(shape=(6, N+1)) for i in range(n_other)]
all_other_x_des = [np.zeros(shape=(3, N+1)) for i in range(n_other)]
for i in range(n_other):
if i_mpc == 0:
all_other_u[i] = np.zeros(shape=(6,N))
else:
all_other_u[i] = np.concatenate((all_other_u[i][:, number_ctrl_pts_executed:], np.tile(all_other_u[i][:,-1:],(1, number_ctrl_pts_executed))),axis=1) ##
x_mpci, u_all_i, x_0_i = all_other_MPC[i], all_other_u[i], all_other_x0[i]
all_other_x[i], all_other_x_des[i] = x_mpci.forward_simulate_all(x_0_i, u_all_i)
for i_rounds_ibr in range(n_rounds_ibr):
########## Solve the Ambulance MPC ##########
response_MPC = amb_MPC
response_x0 = x0_amb
nonresponse_MPC_list = all_other_MPC
nonresponse_x0_list = all_other_x0
nonresponse_u_list = all_other_u
nonresponse_x_list = all_other_x
nonresponse_xd_list = all_other_x_des
################# Generate the warm starts ###############################
u_warm_profiles = mibr.generate_warm_u(N, response_MPC)
### Ambulance Warm Start
if i_rounds_ibr > 0: # warm start with the solution from the last IBR round
u_warm_profiles["previous"] = uamb
else:
# take the control inputs of the last MPC and continue the ctrl
if i_mpc > 0:
u_warm_profiles["previous"] = np.concatenate((uamb[:, number_ctrl_pts_executed:], np.tile(uamb[:,-1:],(1, number_ctrl_pts_executed))),axis=1) ##
#######################################################################
min_response_cost = 99999999
for k_warm in u_warm_profiles.keys():
u_warm = u_warm_profiles[k_warm]
x_warm, x_des_warm = response_MPC.forward_simulate_all(response_x0.reshape(6,1), u_warm)
bri = mibr.IterativeBestResponseMPCMultiple(response_MPC, None, nonresponse_MPC_list )
k_slack = 10000.0
k_CA = 0.000000000000000
k_CA_power = 4
wall_CA = True
bri.k_slack = k_slack
bri.k_CA = k_CA
bri.k_CA_power = k_CA_power
bri.world = world
bri.wall_CA = wall_CA
# for slack_var in bri.slack_vars_list: ## Added to constrain slacks
# bri.opti.subject_to(cas.vec(slack_var) <= 1.0)
INFEASIBLE = True
bri.generate_optimization(N, T, response_x0, None, nonresponse_x0_list, 1, slack=False)
bri.opti.set_initial(bri.u_opt, u_warm)
bri.opti.set_initial(bri.x_opt, x_warm)
bri.opti.set_initial(bri.x_desired, x_des_warm)
### Set the trajectories of the nonresponse vehicles (as given)
for i in range(n_other):
bri.opti.set_value(bri.allother_x_opt[i], nonresponse_x_list[i])
bri.opti.set_value(bri.allother_x_desired[i], nonresponse_xd_list[i])
### Solve the Optimization
# Debugging
# plot_range = [N]
# bri.opti.callback(lambda i: bri.debug_callback(i, plot_range))
# bri.opti.callback(lambda i: print("J_i %.03f, J_j %.03f, Slack %.03f, CA %.03f"%(bri.solution.value(bri.response_svo_cost), bri.solution.value(bri.other_svo_cost), bri.solution.value(bri.k_slack*bri.slack_cost), bri.solution.value(bri.k_CA*bri.collision_cost))))
try:
bri.solve(None, nonresponse_u_list)
x1, u1, x1_des, _, _, _, _, _, _ = bri.get_solution()
print("i_mpc %d n_round %d i %02d Cost %.02f Slack %.02f "%(i_mpc, i_rounds_ibr, i, bri.solution.value(bri.total_svo_cost), bri.solution.value(bri.slack_cost)))
print("J_i %.03f, J_j %.03f, Slack %.03f, CA %.03f"%(bri.solution.value(bri.response_svo_cost), bri.solution.value(bri.other_svo_cost), bri.solution.value(bri.k_slack*bri.slack_cost), bri.solution.value(bri.k_CA*bri.collision_cost)))
print("Dir:", subdir_name)
print("k_warm", k_warm)
INFEASIBLE = False
if bri.solution.value(bri.slack_cost) < min_slack:
current_cost = bri.solution.value(bri.total_svo_cost)
if current_cost < min_response_cost:
uamb = u1
xamb = x1
xamb_des = x1_des
min_response_cost = current_cost
min_response_warm = k_warm
min_bri = bri
# file_name = folder + "data/"+'%03d'%ibr_sub_it
# mibr.save_state(file_name, xamb, uamb, xamb_des, all_other_x, all_other_u, all_other_x_des)
# mibr.save_costs(file_name, bri)
except RuntimeError:
print("Infeasibility: k_warm %s"%k_warm)
# ibr_sub_it +=1
########### SOLVE FOR THE OTHER VEHICLES ON THE ROAD
if not XAMB_ONLY:
for i in range(len(all_other_MPC)):
response_MPC = all_other_MPC[i]
response_x0 = all_other_x0[i]
nonresponse_MPC_list = all_other_MPC[:i] + all_other_MPC[i+1:]
nonresponse_x0_list = all_other_x0[:i] + all_other_x0[i+1:]
nonresponse_u_list = all_other_u[:i] + all_other_u[i+1:]
nonresponse_x_list = all_other_x[:i] + all_other_x[i+1:]
nonresponse_xd_list = all_other_x_des[:i] + all_other_x_des[i+1:]
################ Warm Start
u_warm_profiles = mibr.generate_warm_u(N, response_MPC)
if i_rounds_ibr > 0: # warm start with the solution from the last IBR round
u_warm_profiles["previous"] = all_other_u[i]
else:
# take the control inputs of the last MPC and continue the ctrl
if i_mpc > 0:
u_warm_profiles["previous"] = np.concatenate((all_other_u[i][:, number_ctrl_pts_executed:], np.tile(all_other_u[i][:,-1:],(1, number_ctrl_pts_executed))),axis=1) ##
min_response_cost = 99999999
for k_warm in u_warm_profiles.keys():
u_warm = u_warm_profiles[k_warm]
x_warm, x_des_warm = response_MPC.forward_simulate_all(response_x0.reshape(6,1), u_warm)
bri = mibr.IterativeBestResponseMPCMultiple(response_MPC, amb_MPC, nonresponse_MPC_list)
bri.k_slack = k_slack
bri.k_CA = k_CA
bri.k_CA_power = k_CA_power
bri.world = world
bri.wall_CA = wall_CA
INFEASIBLE = True
bri.generate_optimization(N, T, response_x0, x0_amb, nonresponse_x0_list, 1, slack=False)
# for slack_var in bri.slack_vars_list: ## Added to constrain slacks
# bri.opti.subject_to(cas.vec(slack_var) <= 1.0)
bri.opti.set_initial(bri.u_opt, u_warm)
bri.opti.set_initial(bri.x_opt, x_warm)
bri.opti.set_initial(bri.x_desired, x_des_warm)
### Set the trajectories of the nonresponse vehicles (as given)
bri.opti.set_value(bri.xamb_opt, xamb)
for i in range(len(nonresponse_x_list)):
bri.opti.set_value(bri.allother_x_opt[i], nonresponse_x_list[i])
bri.opti.set_value(bri.allother_x_desired[i], nonresponse_xd_list[i])
# Debugging
# bri.opti.callback(lambda i: bri.debug_callback(i, [N]))
# bri.opti.callback(lambda i: print("J_i %.03f, J_j %.03f, Slack %.03f, CA %.03f"%(bri.solution.value(bri.response_svo_cost), bri.solution.value(bri.other_svo_cost), bri.solution.value(bri.k_slack*bri.slack_cost), bri.solution.value(bri.k_CA*bri.collision_cost))))
try: ### Solve the Optimization
bri.solve(uamb, nonresponse_u_list)
x1_nr, u1_nr, x1_des_nr, _, _, _, _, _, _ = bri.get_solution()
print(" i_mpc %d n_round %d i %02d Cost %.02f Slack %.02f "%(i_mpc, i_rounds_ibr, i, bri.solution.value(bri.total_svo_cost), bri.solution.value(bri.slack_cost)))
print(" J_i %.03f, J_j %.03f, Slack %.03f, CA %.03f"%(bri.solution.value(bri.response_svo_cost), bri.solution.value(bri.other_svo_cost), bri.solution.value(bri.k_slack*bri.slack_cost), bri.solution.value(bri.k_CA*bri.collision_cost)))
print(" Dir:", subdir_name)
print(" k_warm", k_warm)
INFEASIBLE = False
if bri.solution.value(bri.slack_cost) < min_slack:
current_cost = bri.solution.value(bri.total_svo_cost)
if current_cost < min_response_cost:
all_other_u[i] = u1_nr
all_other_x = all_other_x[:i] + [x1_nr] + all_other_x[i:]
all_other_u = all_other_u[:i] + [u1_nr] + all_other_u[i:]
all_other_x_des = all_other_x_des[:i] + [x1_des_nr] + all_other_x_des[i:]
min_response_cost = current_cost
min_response_warm = k_warm
min_bri = bri
# file_name = folder + "data/"+'%03d'%ibr_sub_it
# mibr.save_state(file_name, xamb, uamb, xamb_des, all_other_x, all_other_u, all_other_x_des)
# mibr.save_costs(file_name, bri)
except RuntimeError:
print(" Infeasibility: k_warm %s"%k_warm)
# ibr_sub_it +=1
#
print(" IBR Done: Rd %02d / %02d"%(i_rounds_ibr, n_rounds_ibr))
file_name = folder + "data/"+'r%02d%03d'%(i_mpc, i_rounds_ibr)
if not INFEASIBLE:
mibr.save_state(file_name, xamb, uamb, xamb_des, xothers, uothers, xothers_des)
mibr.save_costs(file_name, bri)
actual_t = i_mpc * number_ctrl_pts_executed
actual_xamb[:,actual_t:actual_t+number_ctrl_pts_executed+1] = xamb[:,:number_ctrl_pts_executed+1]
print(" MPC Done: Rd %02d / %02d"%(i_mpc, n_rounds_mpc))
print(" Full MPC Solution", xamb[0:2,:])
print(" Executed MPC", xamb[0:2,:number_ctrl_pts_executed+1])
print(" Solution Costs...")
for cost in bri.car1_costs_list:
print("%.04f"%bri.solution.value(cost))
print(min_bri.solution.value(min_bri.k_CA * min_bri.collision_cost), min_bri.solution.value(min_bri.collision_cost))
print(min_bri.solution.value(min_bri.k_slack * min_bri.slack_cost), min_bri.solution.value(min_bri.slack_cost))
print(" Save to...", file_name)
actual_uamb[:,actual_t:actual_t+number_ctrl_pts_executed] = uamb[:,:number_ctrl_pts_executed]
plot_range = range(N+1)
for k in plot_range:
cmplot.plot_multiple_cars( k, min_bri.responseMPC, xothers, xamb, True, None, None, None, min_bri.world, 0)
plt.show()
plt.plot(xamb[4,:],'--')
plt.plot(xamb[4,:] * np.cos(xamb[2,:]))
plt.ylabel("Velocity / Vx")
plt.hlines(35*0.447,0,xamb.shape[1])
plt.show()
plt.plot(uamb[1,:],'o')
plt.hlines(amb_MPC.max_v_u,0,xamb.shape[1])
plt.ylabel("delta_u_v")
plt.show()
for i in range(len(xothers)):
actual_xothers[i][:,actual_t:actual_t+number_ctrl_pts_executed+1] = xothers[i][:,:number_ctrl_pts_executed+1]
actual_uothers[i][:,actual_t:actual_t+number_ctrl_pts_executed] = uothers[i][:,:number_ctrl_pts_executed]
# all_other_u[i] = np.concatenate((uothers[i][:, number_ctrl_pts_executed:],uothers[i][:,:number_ctrl_pts_executed]),axis=1)
else:
raise Exception("Xamb is None", i_mpc, i_rounds_ibr, "slack cost", bri.solution.value(bri.slack_cost))
print("Solver Done! Runtime: %.1d"%(time.time()-t_start_time))
| 47.863271 | 286 | 0.58752 |
4dbda883d570920c96f7e2803783e01d7ee5bd65 | 712 | py | Python | RenameMaps.py | uesp/uesp-dbmapscripts | f2bb914661423d19a5bd4b7c090af2b2142654c2 | [
"MIT"
] | null | null | null | RenameMaps.py | uesp/uesp-dbmapscripts | f2bb914661423d19a5bd4b7c090af2b2142654c2 | [
"MIT"
] | null | null | null | RenameMaps.py | uesp/uesp-dbmapscripts | f2bb914661423d19a5bd4b7c090af2b2142654c2 | [
"MIT"
] | null | null | null | import os
import sys
import shutil
import re
INPUT_PATH = "d:\\dbmaps\\test\\final\\"
OUTPUT_PATH = "d:\\dbmaps\\test\\zoom17\\"
for filename in os.listdir(INPUT_PATH):
InputFile = INPUT_PATH + filename
matchResult = re.search('([a-zA-Z]+)-([0-9]+)-([0-9]+)-([0-9]+)\.', filename)
if (not matchResult): continue
cellX = int(matchResult.group(2)) - 90
cellY = int(matchResult.group(3)) - 40
zoom = matchResult.group(4)
newFilename = "db-" + str(cellX) + "-" + str(cellY) + "-" + str(zoom) + ".jpg"
OutputFile = OUTPUT_PATH + newFilename
print("Copying " + filename + " to " + newFilename + "...")
shutil.copyfile(InputFile, OutputFile)
| 25.428571 | 83 | 0.594101 |
4dbe3d037f49de1a5bf9a99b444cecb6fba61822 | 2,984 | py | Python | my/rtm.py | almereyda/HPI | c83bfbd21ce94a96f7af01ab0a82f20535f4aefb | [
"MIT"
] | null | null | null | my/rtm.py | almereyda/HPI | c83bfbd21ce94a96f7af01ab0a82f20535f4aefb | [
"MIT"
] | null | null | null | my/rtm.py | almereyda/HPI | c83bfbd21ce94a96f7af01ab0a82f20535f4aefb | [
"MIT"
] | null | null | null | """
[[https://rememberthemilk.com][Remember The Milk]] tasks and notes
"""
REQUIRES = [
'icalendar',
]
import re
from pathlib import Path
from typing import Dict, List, Optional, Iterator
from datetime import datetime
from .common import LazyLogger, get_files, group_by_key, cproperty, make_dict
from my.config import rtm as config
import icalendar # type: ignore
from icalendar.cal import Todo # type: ignore
logger = LazyLogger(__name__)
# TODO extract in a module to parse RTM's ical?
# TODO tz?
def is_completed(self) -> bool:
return self.get_status() == 'COMPLETED'
def __repr__(self):
return repr(self.todo)
def __str__(self):
return str(self.todo)
def dal():
last = get_files(config.export_path)[-1]
data = last.read_text()
return DAL(data=data, revision='TODO')
def all_tasks() -> Iterator[MyTodo]:
yield from dal().all_todos()
def active_tasks() -> Iterator[MyTodo]:
for t in all_tasks():
if not t.is_completed():
yield t
| 24.661157 | 78 | 0.612936 |
4dc1180d1faca62d2a5375c9a1f39fa3d254832a | 350 | py | Python | learnpython/fib.py | Octoberr/swm0920 | 8f05a6b91fc205960edd57f9076facec04f49a1a | [
"Apache-2.0"
] | 2 | 2019-05-19T11:54:26.000Z | 2019-05-19T12:03:49.000Z | learnpython/fib.py | Octoberr/swm0920 | 8f05a6b91fc205960edd57f9076facec04f49a1a | [
"Apache-2.0"
] | 1 | 2020-11-27T07:55:15.000Z | 2020-11-27T07:55:15.000Z | learnpython/fib.py | Octoberr/swm0920 | 8f05a6b91fc205960edd57f9076facec04f49a1a | [
"Apache-2.0"
] | 2 | 2021-09-06T18:06:12.000Z | 2021-12-31T07:44:43.000Z | # def fib2(n): # n
# result = []
# a, b = 0, 1
# while b < n:
# result.append(b)
# a, b = b, a+b
# return result
#
# a = fib2(500)
# print(a)
def recur_fibo(n):
"""
"""
if n <= 1:
return n
else:
return(recur_fibo(n-1) + recur_fibo(n-2))
a = recur_fibo(10)
print(a) | 15.217391 | 48 | 0.485714 |
4dc36673ab2928033fca50873ad953a0612cf554 | 820 | py | Python | taco/tests/aws_wrappers/dynamodb/integration/consts.py | Intsights/taco | f9a912d146d74a6539d31c33ec289eff3fbfca8f | [
"Apache-2.0"
] | 18 | 2019-09-05T07:53:26.000Z | 2021-02-15T18:23:45.000Z | taco/tests/aws_wrappers/dynamodb/integration/consts.py | Intsights/taco | f9a912d146d74a6539d31c33ec289eff3fbfca8f | [
"Apache-2.0"
] | null | null | null | taco/tests/aws_wrappers/dynamodb/integration/consts.py | Intsights/taco | f9a912d146d74a6539d31c33ec289eff3fbfca8f | [
"Apache-2.0"
] | null | null | null | import taco.aws_wrappers.dynamodb_wrapper.consts as dynamodb_consts
from taco.boto3.boto_config import Regions
DEFAULT_REGION = Regions.n_virginia.value
RESPONSE_KEY_NAME = 'Responses'
PRIMARY_KEY_NAME = 'KEY1'
ATTRIBUTE_DEFINITIONS = [
dynamodb_consts.property_schema(PRIMARY_KEY_NAME, dynamodb_consts.AttributeTypes.string_type.value)
]
PRIMARY_KEYS = [dynamodb_consts.property_schema(PRIMARY_KEY_NAME, dynamodb_consts.PrimaryKeyTypes.hash_type.value)]
ITEMS_TO_PUT_WITHOUT_PRIMARY_KEY = [{'q': 'qqq'}]
ITEMS_TO_PUT_WITH_MISMATCH_PRIMARY_KEY_VALUE = [{PRIMARY_KEY_NAME: 12}]
DEFAULT_PRIMARY_KEY_VALUE = '123abc'
VALID_ITEMS_TO_PUT = [{PRIMARY_KEY_NAME: DEFAULT_PRIMARY_KEY_VALUE}]
TABLE_ALREADY_EXISTS_MESSAGE = 'Table already exists'
SKIP_TABLE_DELETION_ERROR_MESSAGE = 'Table does not exists, skip deletion'
| 41 | 115 | 0.842683 |
4dc47970f015540fdb076bfa3a3c9a472b731090 | 1,790 | py | Python | examples/HMF_oxidation_WO3/model.py | flboudoire/chemical-kinetics | 70db1b3fc899f357d86834708950b9559b4d19fb | [
"MIT"
] | null | null | null | examples/HMF_oxidation_WO3/model.py | flboudoire/chemical-kinetics | 70db1b3fc899f357d86834708950b9559b4d19fb | [
"MIT"
] | null | null | null | examples/HMF_oxidation_WO3/model.py | flboudoire/chemical-kinetics | 70db1b3fc899f357d86834708950b9559b4d19fb | [
"MIT"
] | 2 | 2021-09-23T14:17:33.000Z | 2022-03-26T01:06:34.000Z |
import numpy as np
from scipy import constants
measured_species = ["HMF", "DFF", "HMFCA", "FFCA", "FDCA"]
all_species = measured_species.copy()
all_species.extend(["H_" + s for s in measured_species])
all_species.extend(["Hx_" + s for s in measured_species])
def derivatives(y, t, p):
"""
Calculates the derivatives from local values, used by scipy.integrate.solve_ivp
"""
c = {s:y[i] for i, s in enumerate(all_species)}
dc = dict()
dc["HMF"] = - (p["k11"] + p["k12"] + p["kH1"])*c["HMF"]
dc["DFF"] = p["k11"]*c["HMF"] - (p["k21"] + p["kH21"])*c["DFF"]
dc["HMFCA"] = p["k12"]*c["HMF"] - (p["k22"] + p["kH22"])*c["HMFCA"]
dc["FFCA"] = p["k21"]*c["DFF"] + p["k22"]*c["HMFCA"] - (p["k3"] + p["kH3"])*c["FFCA"]
dc["FDCA"] = p["k3"]*c["FFCA"] - p["kH4"]*c["FDCA"]
dc["H_HMF"] = p["kH1"]*c["HMF"] - p["kHx"]*c["H_HMF"]
dc["H_DFF"] = p["kH21"]*c["DFF"] - p["kHx"]*c["H_DFF"]
dc["H_HMFCA"] = p["kH22"]*c["HMFCA"] - p["kHx"]*c["H_HMFCA"]
dc["H_FFCA"] = p["kH3"]*c["FFCA"] - p["kHx"]*c["H_FFCA"]
dc["H_FDCA"] = p["kH4"]*c["FDCA"] - p["kHx"]*c["H_FDCA"]
dc["Hx_HMF"] = p["kHx"]*c["H_HMF"]
dc["Hx_DFF"] = p["kHx"]*c["H_DFF"]
dc["Hx_HMFCA"] = p["kHx"]*c["H_HMFCA"]
dc["Hx_FFCA"] = p["kHx"]*c["H_FFCA"]
dc["Hx_FDCA"] = p["kHx"]*c["H_FDCA"]
dy = [dc[name] for name in all_species]
return dy | 31.403509 | 96 | 0.488268 |
4dc54e3f4ce59c3a9f8980ef33d1443e375f1870 | 905 | py | Python | cayennelpp/tests/test_lpp_type_humidity.py | smlng/pycayennelpp | 28f2ba4fba602527d3369c9cfbce16b783916933 | [
"MIT"
] | 16 | 2019-02-18T10:57:51.000Z | 2022-03-29T01:54:51.000Z | cayennelpp/tests/test_lpp_type_humidity.py | smlng/pycayennelpp | 28f2ba4fba602527d3369c9cfbce16b783916933 | [
"MIT"
] | 40 | 2018-11-04T17:28:49.000Z | 2021-11-26T16:05:16.000Z | cayennelpp/tests/test_lpp_type_humidity.py | smlng/pycayennelpp | 28f2ba4fba602527d3369c9cfbce16b783916933 | [
"MIT"
] | 12 | 2018-11-09T19:06:36.000Z | 2021-05-21T17:44:28.000Z | import pytest
from cayennelpp.lpp_type import LppType
| 21.046512 | 43 | 0.667403 |
4dc60387a6447f2906c626319f97969c75a2af08 | 118 | py | Python | src/frogtips/api/__init__.py | FROG-TIPS/frog.tips-python-client | 16d1603151469522d90f352fe5bac828e4fb3e3d | [
"MIT"
] | 2 | 2019-11-04T04:00:56.000Z | 2019-11-21T19:53:36.000Z | src/frogtips/api/__init__.py | FROG-TIPS/frog.tips-python-client | 16d1603151469522d90f352fe5bac828e4fb3e3d | [
"MIT"
] | null | null | null | src/frogtips/api/__init__.py | FROG-TIPS/frog.tips-python-client | 16d1603151469522d90f352fe5bac828e4fb3e3d | [
"MIT"
] | 1 | 2019-11-21T19:53:40.000Z | 2019-11-21T19:53:40.000Z | from frogtips.api.Credentials import Credentials
from frogtips.api.Tip import Tip
from frogtips.api.Tips import Tips
| 23.6 | 48 | 0.838983 |
4dc612995d0e9e6a026d052503209dc02bc22a03 | 5,218 | py | Python | scripts/studs_dist.py | inesc-tec-robotics/carlos_controller | ffcc45f24dd534bb953d5bd4a47badd3d3d5223d | [
"BSD-3-Clause"
] | null | null | null | scripts/studs_dist.py | inesc-tec-robotics/carlos_controller | ffcc45f24dd534bb953d5bd4a47badd3d3d5223d | [
"BSD-3-Clause"
] | null | null | null | scripts/studs_dist.py | inesc-tec-robotics/carlos_controller | ffcc45f24dd534bb953d5bd4a47badd3d3d5223d | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
from mission_ctrl_msgs.srv import *
from studs_defines import *
import rospy
import time
import carlos_vision as crlv
from geometry_msgs.msg import PointStamped
from geometry_msgs.msg import PoseArray
from geometry_msgs.msg import Pose
from carlos_controller.msg import StudsPoses
#import geometry_msgs.msg
#import std_msgs.msg
# incomming handlers
# publisher
# timer callback for state machine update
if __name__ == "__main__":
init_server()
| 30.87574 | 99 | 0.652166 |
4dc68b6c713419a1c2bd43c406530fcd60ac199b | 9,204 | py | Python | code/bodmas/utils.py | whyisyoung/BODMAS | 91e63bbacaa53060488c94e54af3a2fb91cfa88a | [
"BSD-2-Clause"
] | 18 | 2021-07-20T13:50:06.000Z | 2022-03-29T18:20:43.000Z | code/bodmas/utils.py | whyisyoung/BODMAS | 91e63bbacaa53060488c94e54af3a2fb91cfa88a | [
"BSD-2-Clause"
] | 1 | 2022-01-19T23:52:14.000Z | 2022-01-21T20:35:32.000Z | code/bodmas/utils.py | whyisyoung/BODMAS | 91e63bbacaa53060488c94e54af3a2fb91cfa88a | [
"BSD-2-Clause"
] | 2 | 2021-11-20T10:44:10.000Z | 2021-12-31T02:38:08.000Z | # -*- coding: utf-8 -*-
"""
utils.py
~~~~~~~~
Helper functions for setting up the environment and parsing args, etc.
"""
import os
os.environ['PYTHONHASHSEED'] = '0'
from numpy.random import seed
import random
random.seed(1)
seed(1)
import sys
import logging
import argparse
import pickle
import json
import numpy as np
import pandas as pd
import smtplib
import traceback
import lightgbm as lgb
from pprint import pformat
from collections import Counter
from email.mime.text import MIMEText
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
def parse_multiple_dataset_args():
"""Parse the command line configuration for a particular run.
Raises:
ValueError: if the tree value for RandomForest is negative.
Returns:
argparse.Namespace -- a set of parsed arguments.
"""
p = argparse.ArgumentParser()
p.add_argument('--task', default='binary', choices=['binary', 'multiclass'],
help='Whether to perform binary classification or multi-class classification.')
p.add_argument('--training-set',
help='Which extra dataset to use as training. Blue Hexagon first 3 months is the default training set.')
p.add_argument('--diversity', choices=['no', 'size', 'family', 'timestamp', 'timestamp_part', 'legacy', 'packed', 'family_fixed_size'],
help='Which diversity metric to use in the training set: size, timestamp, family, packed. \
"no" means the original setting: use the 3 months of bluehex dataset as the training set.')
p.add_argument('--setting-name', help='name for this particular setting, for saving corresponding data, model, and results')
p.add_argument('-c', '--classifier', choices=['rf', 'gbdt', 'mlp'],
help='The classifier used for binary classification or multi-class classification')
p.add_argument('--testing-time',
help='The beginning time and ending time (separated by comma) for a particular testing set (bluehex data)')
p.add_argument('--quiet', default=1, type=int, choices=[0, 1], help='whether to print DEBUG logs or just INFO')
p.add_argument('--retrain', type=int, choices=[0, 1], default=0,
help='Whether to retrain the classifier, default NO.')
p.add_argument('--seed', type=int, default=42, help='random seed for training and validation split.')
# sub-arguments for the family (binary) and family_fixed_size (binary) diversity and multi-class classification
p.add_argument('--families', type=int, help='add top N families from the first three months of bluehex.')
# sub-arguments for the MLP classifier.
p.add_argument('--mlp-hidden',
help='The hidden layers of the MLP classifier, e.g.,: "2400-1200-1200", would make the architecture as 2381-2400-1200-1200-2')
p.add_argument('--mlp-batch-size', default=32, type=int,
help='MLP classifier batch_size.')
p.add_argument('--mlp-lr', default=0.001, type=float,
help='MLP classifier Adam learning rate.')
p.add_argument('--mlp-epochs', default=50, type=int,
help='MLP classifier epochs.')
p.add_argument('--mlp-dropout', default=0.2, type=float,
help='MLP classifier Droput rate.')
# sub-arguments for the RandomForest classifier.
p.add_argument('--tree',
type=int,
default=100,
help='The n_estimators of RandomForest classifier when --classifier = "rf"')
args = p.parse_args()
if args.tree < 0:
raise ValueError('invalid tree value')
return args
def get_model_dims(model_name, input_layer_num, hidden_layer_num, output_layer_num):
"""convert hidden layer arguments to the architecture of a model (list)
Arguments:
model_name {str} -- 'MLP' or 'Contrastive AE'.
input_layer_num {int} -- The number of the features.
hidden_layer_num {str} -- The '-' connected numbers indicating the number of neurons in hidden layers.
output_layer_num {int} -- The number of the classes.
Returns:
[list] -- List represented model architecture.
"""
try:
if '-' not in hidden_layer_num:
dims = [input_layer_num, int(hidden_layer_num), output_layer_num]
else:
hidden_layers = [int(dim) for dim in hidden_layer_num.split('-')]
dims = [input_layer_num]
for dim in hidden_layers:
dims.append(dim)
dims.append(output_layer_num)
logging.debug(f'{model_name} dims: {dims}')
except:
logging.error(f'get_model_dims {model_name}\n{traceback.format_exc()}')
sys.exit(-1)
return dims
def parse_drift_args():
"""Parse the command line configuration for a particular run.
Raises:
ValueError: if the tree value for RandomForest is negative.
Returns:
argparse.Namespace -- a set of parsed arguments.
"""
p = argparse.ArgumentParser()
p.add_argument('--task', default='binary', choices=['binary', 'multiclass'],
help='Whether to perform binary classification or multi-class classification.')
p.add_argument('--setting-name', help='name for this particular setting, for saving corresponding data, model, and results')
p.add_argument('-c', '--classifier', choices=['rf', 'gbdt', 'mlp'],
help='The classifier used for binary classification or multi-class classification')
p.add_argument('--testing-time',
help='The beginning time and ending time (separated by comma) for a particular testing set (bluehex data)')
p.add_argument('--month-interval', type=int, default=1, help='specify how many months for sampling.')
# sub-arguments for the family (binary) and family_fixed_size (binary) diversity and multi-class classification
p.add_argument('--families', type=int, help='add top N families from the first three months of bluehex.')
p.add_argument('--quiet', default=1, type=int, choices=[0, 1], help='whether to print DEBUG logs or just INFO')
p.add_argument('--retrain', type=int, choices=[0, 1], default=0,
help='Whether to retrain the classifier, default NO.')
p.add_argument('--sample-ratio', default=0.01, type=float, help='how many samples to add back to the training set for retraining to combat concept drift.')
p.add_argument('--ember-ratio', default=0.3, type=float, help='how many Ember samples to train Transcend / CADE.')
p.add_argument('--seed', default=1, type=int, help='random seed for the random experiment')
args = p.parse_args()
return args
| 39.165957 | 159 | 0.661669 |
4dc6f19452f1928a12cb21256eb1100495d990ef | 2,539 | py | Python | session11.py | sahanashetty31/session_11_epai3_assignment | 4b2d7f299fea2d3cb6f8cb1c90804f3cc4976647 | [
"MIT"
] | null | null | null | session11.py | sahanashetty31/session_11_epai3_assignment | 4b2d7f299fea2d3cb6f8cb1c90804f3cc4976647 | [
"MIT"
] | null | null | null | session11.py | sahanashetty31/session_11_epai3_assignment | 4b2d7f299fea2d3cb6f8cb1c90804f3cc4976647 | [
"MIT"
] | null | null | null | import math
from functools import lru_cache
class Polygons:
def __init__(self, m, R):
if m < 3:
raise ValueError('m must be greater than 3')
self._m = m
self._R = R
self._polygons = [Polygon(i, R) for i in range(3, m+1)] | 25.39 | 102 | 0.568334 |
4dc6fa3514e2ac738a922e6c666fe8ccb1623cf7 | 1,937 | py | Python | Software for Other Building Blocks and Integration/PoW.py | fkerem/Cryptocurrency-Blockchain | 965268a09a6f8b3e700e8bbc741e49a4d54805c6 | [
"MIT"
] | null | null | null | Software for Other Building Blocks and Integration/PoW.py | fkerem/Cryptocurrency-Blockchain | 965268a09a6f8b3e700e8bbc741e49a4d54805c6 | [
"MIT"
] | null | null | null | Software for Other Building Blocks and Integration/PoW.py | fkerem/Cryptocurrency-Blockchain | 965268a09a6f8b3e700e8bbc741e49a4d54805c6 | [
"MIT"
] | null | null | null | """
PoW.py
"""
import DSA
import sys
import hashlib
if sys.version_info < (3, 6):
import sha3
| 29.8 | 90 | 0.588539 |
4dc7d26cc18475fbd2b690b1e9dc6d7f0d1003fa | 637 | py | Python | core/cdag/node/sub.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 84 | 2017-10-22T11:01:39.000Z | 2022-02-27T03:43:48.000Z | core/cdag/node/sub.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 22 | 2017-12-11T07:21:56.000Z | 2021-09-23T02:53:50.000Z | core/cdag/node/sub.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 23 | 2017-12-06T06:59:52.000Z | 2022-02-24T00:02:25.000Z | # ----------------------------------------------------------------------
# SubNode
# ----------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
from typing import Optional
# NOC modules
from .base import BaseCDAGNode, ValueType, Category
| 25.48 | 75 | 0.44113 |
4dc88ebb3a9af63d834dc6d3c95d28f963145c6a | 287 | py | Python | chapter03/3.5_simulate_output_layer.py | Myeonghan-Jeong/deep-learning-from-scratch | 0df7f9f352920545f5309e8e11c7cf879ad477e5 | [
"MIT"
] | null | null | null | chapter03/3.5_simulate_output_layer.py | Myeonghan-Jeong/deep-learning-from-scratch | 0df7f9f352920545f5309e8e11c7cf879ad477e5 | [
"MIT"
] | 3 | 2021-06-08T21:22:11.000Z | 2021-09-08T01:55:11.000Z | chapter03/3.5_simulate_output_layer.py | myeonghan-nim/deep-learning-from-scratch | fef3e327c49593b5df74728a1cba1144948a2999 | [
"MIT"
] | null | null | null | import numpy as np
# softmax function
# modified softmax function
| 15.105263 | 28 | 0.634146 |
4dc8b44f56e787d0b3156d5c7fc12d0fb557c818 | 1,770 | py | Python | example.py | luisfciencias/intro-cv | 2908d21dd8058acf13b5479a2cb409a6e00859c1 | [
"MIT"
] | null | null | null | example.py | luisfciencias/intro-cv | 2908d21dd8058acf13b5479a2cb409a6e00859c1 | [
"MIT"
] | 5 | 2020-01-28T22:54:12.000Z | 2022-02-10T00:26:51.000Z | example.py | luisfciencias/intro-cv | 2908d21dd8058acf13b5479a2cb409a6e00859c1 | [
"MIT"
] | null | null | null | # example of mask inference with a pre-trained model (COCO)
import sys
from keras.preprocessing.image import img_to_array
from keras.preprocessing.image import load_img
from mrcnn.config import Config
from mrcnn.model import MaskRCNN
from mrcnn.visualize import display_instances
from tools import load_config
# load config params - labels
cfg_dict = load_config('config.yaml')
class_names = cfg_dict['class_names']
# config settings for model inference
# replicate the model for pure inference
rcnn_model = MaskRCNN(mode='inference', model_dir='models/', config=ConfigParams())
# model weights input
<<<<<<< HEAD
rcnn_model.load_weights('models/mask_rcnn_coco.h5', by_name=True)
=======
>>>>>>> 2ffc4581f4632ec494d19a7af0f5912e7482a631
path_weights_file = 'models/mask_rcnn_coco.h5'
rcnn_model.load_weights(path_weights_file, by_name=True)
# single image input
path_to_image = sys.argv[1]
img = load_img(path_to_image)
# transition to array
img = img_to_array(img)
print('Image shape:', img.shape)
# make inference
results = rcnn_model.detect([img], verbose=0)
# the output is a list of dictionaries, where each dict has a single object detection
# {'rois': array([[ 30, 54, 360, 586]], dtype=int32),
# 'class_ids': array([21], dtype=int32),
# 'scores': array([0.9999379], dtype=float32),
# 'masks': huge_boolean_array_here ...
result_params = results[0]
# show photo with bounding boxes, masks, class labels and scores
display_instances(img,
result_params['rois'],
result_params['masks'],
result_params['class_ids'],
class_names,
result_params['scores'])
| 32.181818 | 85 | 0.719774 |
4dc8ffc44718b6bc253375644e19671ce86d5269 | 8,260 | py | Python | rubi/datasets/vqa2.py | abhipsabasu/rubi.bootstrap.pytorch | 9fa9639c1ee4a040958d976eeb5dca2dd2203980 | [
"BSD-3-Clause"
] | 83 | 2021-03-02T07:49:14.000Z | 2022-03-30T03:07:26.000Z | rubi/datasets/vqa2.py | abhipsabasu/rubi.bootstrap.pytorch | 9fa9639c1ee4a040958d976eeb5dca2dd2203980 | [
"BSD-3-Clause"
] | 14 | 2019-07-14T14:10:28.000Z | 2022-01-27T18:53:34.000Z | cfvqa/cfvqa/datasets/vqa2.py | yuleiniu/introd | a40407c7efee9c34e3d4270d7947f5be2f926413 | [
"Apache-2.0"
] | 14 | 2019-09-20T01:49:13.000Z | 2022-03-29T16:42:34.000Z | import os
import csv
import copy
import json
import torch
import numpy as np
from os import path as osp
from bootstrap.lib.logger import Logger
from bootstrap.lib.options import Options
from block.datasets.vqa_utils import AbstractVQA
from copy import deepcopy
import random
import tqdm
import h5py
| 45.635359 | 122 | 0.607385 |
4dcb123bec4d3c0380f0862774b7117039deb91f | 281 | py | Python | W3Schools/dates.py | FRX-DEV/Python-Practice-Challenges | 8cddfb8f4181f987aa71cb75dee1f65d4d766954 | [
"MIT"
] | null | null | null | W3Schools/dates.py | FRX-DEV/Python-Practice-Challenges | 8cddfb8f4181f987aa71cb75dee1f65d4d766954 | [
"MIT"
] | null | null | null | W3Schools/dates.py | FRX-DEV/Python-Practice-Challenges | 8cddfb8f4181f987aa71cb75dee1f65d4d766954 | [
"MIT"
] | null | null | null | import datetime
x = datetime.datetime.now()
print(x)
# 2021-07-13 22:55:43.029046
print(x.year)
print(x.strftime("%A"))
"""
2021
Tuesday
"""
x = datetime.datetime(2020, 5, 17)
print(x)
# 2020-05-17 00:00:00
x = datetime.datetime(2018, 6, 1)
print(x.strftime("%B"))
# June | 12.217391 | 34 | 0.647687 |
4dcb5c28ab7f560dea6a9712a1a25dda90260ee7 | 2,564 | py | Python | venv/lib/python2.7/site-packages/ebcli/objects/tier.py | zwachtel11/fruitful-backend | 45b8994917182e7b684b9e25944cc79c9494c9f3 | [
"MIT"
] | 4 | 2017-01-17T09:09:07.000Z | 2018-12-19T14:06:22.000Z | venv/lib/python2.7/site-packages/ebcli/objects/tier.py | zwachtel11/fruitful-backend | 45b8994917182e7b684b9e25944cc79c9494c9f3 | [
"MIT"
] | 1 | 2020-06-03T13:57:07.000Z | 2020-06-22T10:27:48.000Z | venv/lib/python2.7/site-packages/ebcli/objects/tier.py | zwachtel11/fruitful-backend | 45b8994917182e7b684b9e25944cc79c9494c9f3 | [
"MIT"
] | 4 | 2017-08-13T09:09:31.000Z | 2020-11-04T04:58:58.000Z | # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from ..objects.exceptions import NotFoundError
import re
| 31.654321 | 73 | 0.559282 |