hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b7c06f515d7439b0a77375fbc6fb115fb4977fe1 | 1,042 | py | Python | swarmlib/util/functions.py | nkoutsov/swarmlib | fa70a5d9de50de5dacd5d499eba3b6bb72c39c05 | [
"BSD-3-Clause"
] | null | null | null | swarmlib/util/functions.py | nkoutsov/swarmlib | fa70a5d9de50de5dacd5d499eba3b6bb72c39c05 | [
"BSD-3-Clause"
] | null | null | null | swarmlib/util/functions.py | nkoutsov/swarmlib | fa70a5d9de50de5dacd5d499eba3b6bb72c39c05 | [
"BSD-3-Clause"
] | null | null | null | # ------------------------------------------------------------------------------------------------------
# Copyright (c) Leo Hanisch. All rights reserved.
# Licensed under the BSD 3-Clause License. See LICENSE.txt in the project root for license information.
# ------------------------------------------------------------------------------------------------------
#pylint: disable=invalid-name
import inspect
from functools import wraps
import landscapes.single_objective
import numpy as np
# Wrapper for landscapes.single_objective functions for inputs > 1d
# Add all functions from landscapes.single_objective
FUNCTIONS = {
name: wrap_landscapes_func(func)
for (name, func) in inspect.getmembers(
landscapes.single_objective, inspect.isfunction
)
if name not in ['colville', 'wolfe'] # Don't include 3D and 4D functions
}
| 33.612903 | 104 | 0.600768 |
b7c327b6206469cd0cf73575f1196729fde0be3b | 1,695 | py | Python | nps/network_entity.py | Dry8r3aD/penta-nps | a4c74a2cd90eb2f95158e2040b7eca7056b062db | [
"MIT"
] | 6 | 2016-09-25T07:26:22.000Z | 2022-03-16T06:30:05.000Z | nps/network_entity.py | Dry8r3aD/penta-nps | a4c74a2cd90eb2f95158e2040b7eca7056b062db | [
"MIT"
] | 14 | 2016-10-04T00:02:20.000Z | 2017-02-22T03:06:21.000Z | nps/network_entity.py | Dry8r3aD/penta-nps | a4c74a2cd90eb2f95158e2040b7eca7056b062db | [
"MIT"
] | 5 | 2016-10-06T04:53:32.000Z | 2019-12-08T13:48:58.000Z | # -*- coding: UTF-8 -*-
from collections import deque
# def set_use_nat_port(self, use_or_not):
# self._use_nat_port = use_or_not
#
# def get_use_nat_port(self):
# return self._use_nat_port
#
# def set_dut_nat_port(self, port):
# self._nat_port = port
#
# def get_dut_nat_port(self):
# return self._nat_port
#
# def get_nat_magic_number(self):
# return self._nat_magic_number
#
| 25.298507 | 54 | 0.645428 |
b7c3aa3be6cad1fc615356fe4a0db24f49f796d6 | 898 | py | Python | source/_sample/scipy/interp_spline_interest.py | showa-yojyo/notebook | 82c15074c24d64a1dfcb70a526bc1deb2ecffe68 | [
"MIT"
] | 14 | 2016-04-13T08:10:02.000Z | 2021-04-19T09:42:51.000Z | source/_sample/scipy/interp_spline_interest.py | showa-yojyo/note | 5f262ecda3df132cb66206c465d16e174061d6b9 | [
"MIT"
] | 88 | 2017-09-27T15:07:05.000Z | 2019-10-02T04:05:03.000Z | source/_sample/scipy/interp_spline_interest.py | showa-yojyo/note | 5f262ecda3df132cb66206c465d16e174061d6b9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""interp_spline_interest.py: Demonstrate spline interpolation.
"""
from scipy.interpolate import splrep, splev
import numpy as np
import matplotlib.pyplot as plt
# pylint: disable=invalid-name
# Interest rates of Jan, Feb, Mar, Jun, Dec.
x = np.array([1, 2, 3, 6, 12])
y = np.array([0.080, 0.100, 0.112, 0.144, 0.266])
# Interpolate the rates.
tck = splrep(x, y)
# Print the spline curve.
np.set_printoptions(formatter={'float': '{:.3f}'.format})
print("knot vector:\n", tck[0])
print("control points:\n", tck[1])
print("degree:\n", tck[2])
# Evaluate interest rates for each month.
for i in range(1, 13):
print(f"month[{i:02d}]: {float(splev(i, tck)):.3f}%")
# Plot the interest curve.
time = np.linspace(1, 12, 1000, endpoint=True)
rate = splev(time, tck)
plt.figure()
plt.plot(time, rate, color='deeppink')
plt.xlabel("Month")
plt.ylabel("Rate (%)")
plt.show()
| 24.944444 | 63 | 0.679287 |
b7c3bf02cb16b87bf7d4abf283104f4f08eda387 | 1,351 | py | Python | Pytorch/Scratch CNN and Pytorch/part1-convnet/tests/test_sgd.py | Kuga23/Deep-Learning | 86980338208c702b6bfcbcfffdb18498e389a56b | [
"MIT"
] | 3 | 2022-01-16T14:46:57.000Z | 2022-02-20T22:40:16.000Z | Pytorch/Scratch CNN and Pytorch/part1-convnet/tests/test_sgd.py | Kuga23/Deep-Learning | 86980338208c702b6bfcbcfffdb18498e389a56b | [
"MIT"
] | null | null | null | Pytorch/Scratch CNN and Pytorch/part1-convnet/tests/test_sgd.py | Kuga23/Deep-Learning | 86980338208c702b6bfcbcfffdb18498e389a56b | [
"MIT"
] | 6 | 2021-09-29T11:42:37.000Z | 2022-02-02T02:33:51.000Z | import unittest
import numpy as np
from optimizer import SGD
from modules import ConvNet
from .utils import *
| 28.744681 | 104 | 0.624722 |
b7c3c9491c620a60056834ce6902dd96ab059f3b | 3,373 | py | Python | Scripts/simulation/tunable_utils/create_object.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/simulation/tunable_utils/create_object.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/simulation/tunable_utils/create_object.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\tunable_utils\create_object.py
# Compiled at: 2020-05-07 00:26:47
# Size of source mod 2**32: 4106 bytes
from crafting.crafting_tunable import CraftingTuning
from objects.components.state import TunableStateValueReference, CommodityBasedObjectStateValue
from objects.system import create_object
from sims4.random import weighted_random_item
from sims4.tuning.tunable import TunableReference, TunableTuple, TunableList, TunableRange, AutoFactoryInit, HasTunableSingletonFactory, TunableFactory
import crafting, services, sims4
logger = sims4.log.Logger('CreateObject')
| 54.403226 | 240 | 0.714794 |
b7c4849c094e9c707d5b2331ea5e37f6828cbb6d | 1,583 | py | Python | 题源分类/LeetCode/LeetCode日刷/python/47.全排列-ii.py | ZhengyangXu/Algorithm-Daily-Practice | 3017a3d476fc9a857026190ea4fae2911058df59 | [
"MIT"
] | null | null | null | 题源分类/LeetCode/LeetCode日刷/python/47.全排列-ii.py | ZhengyangXu/Algorithm-Daily-Practice | 3017a3d476fc9a857026190ea4fae2911058df59 | [
"MIT"
] | null | null | null | 题源分类/LeetCode/LeetCode日刷/python/47.全排列-ii.py | ZhengyangXu/Algorithm-Daily-Practice | 3017a3d476fc9a857026190ea4fae2911058df59 | [
"MIT"
] | null | null | null | #
# @lc app=leetcode.cn id=47 lang=python3
#
# [47] II
#
# https://leetcode-cn.com/problems/permutations-ii/description/
#
# algorithms
# Medium (59.58%)
# Likes: 371
# Dislikes: 0
# Total Accepted: 78.7K
# Total Submissions: 132.1K
# Testcase Example: '[1,1,2]'
#
#
#
# :
#
# : [1,1,2]
# :
# [
# [1,1,2],
# [1,2,1],
# [2,1,1]
# ]
#
#
# @lc code=start
# @lc code=end
# def permuteUnique(self, nums: List[int]) -> List[List[int]]:
# def helper(nums,res,path):
# if not nums and path not in res:
# res.append(path)
# for i in range(len(nums)):
# helper(nums[:i]+nums[i+1:],res,path+[nums[i]])
# res = []
# helper(nums,res,[])
# return res
| 21.986111 | 67 | 0.475679 |
b7c4b41079ffcb026b138a48570833eeaf51d196 | 149 | py | Python | testing/run-tests.py | 8enmann/blobfile | 34bf6fac2a0cd4ff5eb5c3e4964914758f264c0b | [
"Unlicense"
] | 21 | 2020-02-26T08:00:20.000Z | 2022-02-28T00:06:50.000Z | testing/run-tests.py | 8enmann/blobfile | 34bf6fac2a0cd4ff5eb5c3e4964914758f264c0b | [
"Unlicense"
] | 146 | 2020-02-28T18:15:53.000Z | 2022-03-24T06:37:57.000Z | testing/run-tests.py | 8enmann/blobfile | 34bf6fac2a0cd4ff5eb5c3e4964914758f264c0b | [
"Unlicense"
] | 15 | 2020-04-10T08:31:57.000Z | 2022-02-28T03:43:02.000Z | import subprocess as sp
import sys
sp.run(["pip", "install", "-e", "."], check=True)
sp.run(["pytest", "blobfile"] + sys.argv[1:], check=True)
| 24.833333 | 58 | 0.604027 |
b7c583ce42f7da52ba4b620e07b7b1dce4f64729 | 6,467 | py | Python | examples/Components/collision/PrimitiveCreation.py | sofa-framework/issofa | 94855f488465bc3ed41223cbde987581dfca5389 | [
"OML"
] | null | null | null | examples/Components/collision/PrimitiveCreation.py | sofa-framework/issofa | 94855f488465bc3ed41223cbde987581dfca5389 | [
"OML"
] | null | null | null | examples/Components/collision/PrimitiveCreation.py | sofa-framework/issofa | 94855f488465bc3ed41223cbde987581dfca5389 | [
"OML"
] | null | null | null | import Sofa
import random
from cmath import *
############################################################################################
# this is a PythonScriptController example script
############################################################################################
############################################################################################
# following defs are used later in the script
############################################################################################
# utility methods
falling_speed = 0
capsule_height = 5
capsule_chain_height = 5
| 34.216931 | 256 | 0.66043 |
b7c6df93916a72fa3dc3b5903a942a8fbc2d13cd | 350 | py | Python | examples/tensorboard/nested.py | dwolfschlaeger/guildai | f82102ad950d7c89c8f2c2eafe596b2d7109dc57 | [
"Apache-2.0"
] | 694 | 2018-11-30T01:06:30.000Z | 2022-03-31T14:46:26.000Z | examples/tensorboard/nested.py | dwolfschlaeger/guildai | f82102ad950d7c89c8f2c2eafe596b2d7109dc57 | [
"Apache-2.0"
] | 323 | 2018-11-05T17:44:34.000Z | 2022-03-31T16:56:41.000Z | examples/tensorboard/nested.py | dwolfschlaeger/guildai | f82102ad950d7c89c8f2c2eafe596b2d7109dc57 | [
"Apache-2.0"
] | 68 | 2019-04-01T04:24:47.000Z | 2022-02-24T17:22:04.000Z | import tensorboardX
with tensorboardX.SummaryWriter("foo") as w:
w.add_scalar("a", 1.0, 1)
w.add_scalar("a", 2.0, 2)
with tensorboardX.SummaryWriter("foo/bar") as w:
w.add_scalar("a", 3.0, 3)
w.add_scalar("a", 4.0, 4)
with tensorboardX.SummaryWriter("foo/bar/baz") as w:
w.add_scalar("a", 5.0, 5)
w.add_scalar("a", 6.0, 6)
| 25 | 52 | 0.634286 |
b7c7e5d7b1958fefce1bb2170ee1a05f5b0e1bc0 | 444 | py | Python | cobalt/__init__.py | NicolasDenoyelle/cobalt | 08742676214e728ed83f3a90a118b9c020a347fd | [
"BSD-3-Clause"
] | null | null | null | cobalt/__init__.py | NicolasDenoyelle/cobalt | 08742676214e728ed83f3a90a118b9c020a347fd | [
"BSD-3-Clause"
] | null | null | null | cobalt/__init__.py | NicolasDenoyelle/cobalt | 08742676214e728ed83f3a90a118b9c020a347fd | [
"BSD-3-Clause"
] | null | null | null | ###############################################################################
# Copyright 2020 UChicago Argonne, LLC.
# (c.f. AUTHORS, LICENSE)
# For more info, see https://xgitlab.cels.anl.gov/argo/cobalt-python-wrapper
# SPDX-License-Identifier: BSD-3-Clause
##############################################################################
import subprocess
from cobalt.cobalt import Cobalt, UserPolicy
__all__ = [ 'Cobalt', 'UserPolicy' ]
| 37 | 79 | 0.481982 |
b7c83d7466393b727423c1185dc55c5006258a81 | 859 | py | Python | anand.py | kyclark/py-grepper | ca7a17b1ffc2d666d62da6c80eb4cbc0bd2e547e | [
"MIT"
] | null | null | null | anand.py | kyclark/py-grepper | ca7a17b1ffc2d666d62da6c80eb4cbc0bd2e547e | [
"MIT"
] | null | null | null | anand.py | kyclark/py-grepper | ca7a17b1ffc2d666d62da6c80eb4cbc0bd2e547e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
orderNumbers = open("orders.txt", "r") #Order numbers to match
#Network path to a directory of files that has full details of the order
directoryEntries = os.scandir("")
outputFile = open("matchedData.dat", "w")
for entry in directoryEntries:
print("Currently parsing file ", entry.path)
fullOrderData = open(entry.path, "r")
#loop through each order from the ordernumber file
for orderNo in OrderNumbers:
for row in fullOrderData:
if orderNo.strip() in row:
outputFile.write(row)
#go back to start of orderdetails data to match on next order number
fullOrderData.seek(0)
#go back to order numbers again to match on the next order details file
orderNumbers.seek(0)
fullOrderData.close()
OrderNumbers.close()
outputFile.close()
print("done")
| 31.814815 | 76 | 0.696158 |
b7c97b1397f5b96121b2b0909bc775d38cbcd523 | 2,968 | py | Python | tests/test_manager.py | Vizzuality/cog_worker | ae12d2fc42945fedfea4a22394247db9a73d867e | [
"MIT"
] | 24 | 2021-08-23T14:51:02.000Z | 2021-12-20T09:45:10.000Z | tests/test_manager.py | Vizzuality/cog_worker | ae12d2fc42945fedfea4a22394247db9a73d867e | [
"MIT"
] | null | null | null | tests/test_manager.py | Vizzuality/cog_worker | ae12d2fc42945fedfea4a22394247db9a73d867e | [
"MIT"
] | 1 | 2021-08-24T01:09:36.000Z | 2021-08-24T01:09:36.000Z | import pytest
import rasterio as rio
from rasterio.io import DatasetWriter
from cog_worker import Manager
from rasterio import MemoryFile, crs
TEST_COG = "tests/roads_cog.tif"
def test_preview(molleweide_manager, sample_function):
arr, bbox = molleweide_manager.preview(sample_function, max_size=123)
assert max(arr.shape) == 123, "Expected maximum array dimension to be 123px"
def test_tile(molleweide_manager, sample_function):
arr, bbox = molleweide_manager.tile(sample_function, x=1, y=2, z=3)
assert arr.shape == (1, 256, 256), "Expected 256x256 tile"
def test_chunk_execute(molleweide_manager, sample_function):
chunks = list(molleweide_manager.chunk_execute(sample_function, chunksize=123))
for arr, bbox in chunks:
assert max(arr.shape) <= 123, "Max chunk size should be 123px"
| 31.913978 | 83 | 0.686995 |
b7c9f4fcfbbd13ff61698bd25e58c747a3f4a5c0 | 1,031 | py | Python | CLIP/experiments/tagger/main_binary.py | ASAPP-H/clip2 | e8ba2a3cf4be01ec26bde5107c5a2813bddf8a3b | [
"MIT"
] | null | null | null | CLIP/experiments/tagger/main_binary.py | ASAPP-H/clip2 | e8ba2a3cf4be01ec26bde5107c5a2813bddf8a3b | [
"MIT"
] | 3 | 2021-09-08T02:07:49.000Z | 2022-03-12T00:33:51.000Z | CLIP/experiments/tagger/main_binary.py | ASAPP-H/clip2 | e8ba2a3cf4be01ec26bde5107c5a2813bddf8a3b | [
"MIT"
] | null | null | null | from train import train_model
from utils import *
import os
import sys
pwd = os.environ.get('CLIP_DIR')
DATA_DIR = "%s/data/processed/" % pwd
exp_name = "non_multilabel"
run_name = "sentence_structurel_with_crf"
train_file_name = "MIMIC_train_binary.csv"
dev_file_name = "MIMIC_val_binary.csv"
test_file_name = "test_binary.csv"
exp_name = "outputs_binary"
train = read_sentence_structure(os.path.join(DATA_DIR, train_file_name))
dev = read_sentence_structure(os.path.join(DATA_DIR, dev_file_name))
test = read_sentence_structure(os.path.join(DATA_DIR, test_file_name))
run_name = "binary"
if __name__ == "__main__":
main(sys.argv[1:])
| 25.775 | 72 | 0.696411 |
b7caeb322abf8aa00666ef3387b5272abace4020 | 528 | py | Python | persons/urls.py | nhieckqo/lei | f461d8dcbc8f9e037c661abb18b226aa6fa7acae | [
"MIT"
] | null | null | null | persons/urls.py | nhieckqo/lei | f461d8dcbc8f9e037c661abb18b226aa6fa7acae | [
"MIT"
] | null | null | null | persons/urls.py | nhieckqo/lei | f461d8dcbc8f9e037c661abb18b226aa6fa7acae | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
app_name = 'persons'
urlpatterns = [
path('', views.PersonsTableView.as_view(),name='persons_list'),
path('persons_details/<int:pk>',views.PersonsUpdateView.as_view(),name='persons_details_edit'),
path('persons_details/create',views.PersonsCreateView.as_view(),name='persons_details_add'),
path('persons_details/<int:pk>/delete',views.PersonsDeleteView.as_view(),name="persons_details_delete"),
path('persons_details/sort',views.event_gate, name='sort'),
]
| 40.615385 | 108 | 0.753788 |
b7cb10c335526f698fe7f642c39ab4db21115697 | 246 | py | Python | logxs/__version__.py | minlaxz/logxs | e225e7a3c69b01595e1f2c11552b70e4b1540d47 | [
"MIT"
] | null | null | null | logxs/__version__.py | minlaxz/logxs | e225e7a3c69b01595e1f2c11552b70e4b1540d47 | [
"MIT"
] | null | null | null | logxs/__version__.py | minlaxz/logxs | e225e7a3c69b01595e1f2c11552b70e4b1540d47 | [
"MIT"
] | null | null | null | __title__ = 'logxs'
__description__ = 'Replacing with build-in `print` with nice formatting.'
__url__ = 'https://github.com/minlaxz/logxs'
__version__ = '0.3.2'
__author__ = 'Min Latt'
__author_email__ = 'minminlaxz@gmail.com'
__license__ = 'MIT' | 35.142857 | 73 | 0.747967 |
b7cb5d32a878f3d9855d96b75ff3e715c839115f | 977 | py | Python | src/PyMud/Systems/system.py | NichCritic/pymud | 583ec16f5a75dc7b45146564b39851291dc07b6c | [
"MIT"
] | null | null | null | src/PyMud/Systems/system.py | NichCritic/pymud | 583ec16f5a75dc7b45146564b39851291dc07b6c | [
"MIT"
] | null | null | null | src/PyMud/Systems/system.py | NichCritic/pymud | 583ec16f5a75dc7b45146564b39851291dc07b6c | [
"MIT"
] | null | null | null | import time
| 23.261905 | 71 | 0.551689 |
b7cb98a29e28bbca96a3da9a3ddecb43eea2b232 | 2,918 | py | Python | hytra/plugins/transition_feature_vector_construction/transition_feature_subtraction.py | m-novikov/hytra | 0dc28deaa2571fa8bea63ca178f0e53cc1cd7508 | [
"MIT"
] | null | null | null | hytra/plugins/transition_feature_vector_construction/transition_feature_subtraction.py | m-novikov/hytra | 0dc28deaa2571fa8bea63ca178f0e53cc1cd7508 | [
"MIT"
] | null | null | null | hytra/plugins/transition_feature_vector_construction/transition_feature_subtraction.py | m-novikov/hytra | 0dc28deaa2571fa8bea63ca178f0e53cc1cd7508 | [
"MIT"
] | null | null | null | from hytra.pluginsystem import transition_feature_vector_construction_plugin
import numpy as np
from compiler.ast import flatten
| 35.585366 | 91 | 0.48732 |
b7cbae55dbd90dfb87f2e9c515ec5098f54466ea | 5,438 | py | Python | sprites/player.py | hectorpadin1/FICGames | 6d75c3ef74f0d6d2881021833fe06cd67e207ab1 | [
"MIT"
] | null | null | null | sprites/player.py | hectorpadin1/FICGames | 6d75c3ef74f0d6d2881021833fe06cd67e207ab1 | [
"MIT"
] | null | null | null | sprites/player.py | hectorpadin1/FICGames | 6d75c3ef74f0d6d2881021833fe06cd67e207ab1 | [
"MIT"
] | 1 | 2022-03-29T15:38:18.000Z | 2022-03-29T15:38:18.000Z | from matplotlib.style import available
import pygame as pg
from sprites.character import Character
from pygame.math import Vector2
from settings import *
from math import cos, pi
from control import Controler
from sprites.gun import MachineGun, Pistol, Rifle
from managers.resourcemanager import ResourceManager as GR
from utils.observable import Observable
| 36.743243 | 142 | 0.590291 |
b7cbe1a4f3d3609804f5ba47a2634ce6c4505d36 | 716 | py | Python | yocto/poky/bitbake/lib/bb/ui/crumbs/__init__.py | jxtxinbing/ops-build | 9008de2d8e100f3f868c66765742bca9fa98f3f9 | [
"Apache-2.0"
] | 16 | 2017-01-17T15:20:43.000Z | 2021-03-19T05:45:14.000Z | yocto/poky/bitbake/lib/bb/ui/crumbs/__init__.py | jxtxinbing/ops-build | 9008de2d8e100f3f868c66765742bca9fa98f3f9 | [
"Apache-2.0"
] | 415 | 2016-12-20T17:20:45.000Z | 2018-09-23T07:59:23.000Z | yocto/poky/bitbake/lib/bb/ui/crumbs/__init__.py | jxtxinbing/ops-build | 9008de2d8e100f3f868c66765742bca9fa98f3f9 | [
"Apache-2.0"
] | 10 | 2016-12-20T13:24:50.000Z | 2021-03-19T05:46:43.000Z | #
# Gtk+ UI pieces for BitBake
#
# Copyright (C) 2006-2007 Richard Purdie
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
| 39.777778 | 73 | 0.765363 |
b7cc1da3745ec1958d532f60dd1185d8b2057b84 | 10,198 | py | Python | mytardisbf/migrations/0001_initial_data.py | keithschulze/mytardisbf | cc15fc9af89cf96c4d860c41fe5b0f366d4ee0d6 | [
"MIT"
] | 2 | 2020-07-09T01:21:00.000Z | 2022-02-06T17:33:57.000Z | mytardisbf/migrations/0001_initial_data.py | keithschulze/mytardisbf | cc15fc9af89cf96c4d860c41fe5b0f366d4ee0d6 | [
"MIT"
] | 14 | 2015-07-21T05:12:58.000Z | 2017-11-16T10:46:30.000Z | mytardisbf/migrations/0001_initial_data.py | keithschulze/mytardisbf | cc15fc9af89cf96c4d860c41fe5b0f366d4ee0d6 | [
"MIT"
] | 4 | 2015-08-04T10:57:29.000Z | 2017-11-28T10:50:33.000Z | # -*- coding: utf-8 -*-
from django.db import migrations
from tardis.tardis_portal.models import (
Schema,
ParameterName,
DatafileParameter,
DatafileParameterSet
)
from mytardisbf.apps import (
OMESCHEMA,
BFSCHEMA
)
def forward_func(apps, schema_editor):
"""Create mytardisbf schemas and parameternames"""
db_alias = schema_editor.connection.alias
ome_schema, _ = Schema.objects\
.using(db_alias)\
.update_or_create(
name="OME Metadata",
namespace="http://tardis.edu.au/schemas/bioformats/1",
subtype=None,
hidden=True,
type=3,
immutable=True,
defaults={
'namespace': OMESCHEMA
}
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="ome",
data_type=5,
is_searchable=False,
choices="",
comparison_type=1,
full_name="OME Metadata",
units="xml",
order=1,
immutable=True,
schema=ome_schema,
defaults={
"full_name": "OMEXML Metadata"
}
)
series_schema, _ = Schema.objects\
.using(db_alias)\
.update_or_create(
name="Series Metadata",
namespace=BFSCHEMA,
subtype="",
hidden=False,
type=3,
immutable=True
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="id",
data_type=2,
is_searchable=True,
choices="",
comparison_type=8,
full_name="ID",
units="",
order=9999,
immutable=True,
schema=series_schema,
defaults={
"is_searchable": False
}
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="name",
data_type=2,
is_searchable=True,
choices="",
comparison_type=8,
full_name="Name",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="type",
data_type=2,
is_searchable=True,
choices="",
comparison_type=8,
full_name="Pixel Type",
units="",
order=9999,
immutable=True,
schema=series_schema,
defaults={
"name": "pixel_type"
}
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="dimensionorder",
data_type=2,
is_searchable=True,
choices="",
comparison_type=8,
full_name="Dimension Order",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="sizex",
data_type=1,
is_searchable=True,
choices="",
comparison_type=1,
full_name="SizeX",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="sizey",
data_type=1,
is_searchable=True,
choices="",
comparison_type=1,
full_name="SizeY",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="sizeZ",
data_type=1,
is_searchable=True,
choices="",
comparison_type=1,
full_name="SizeZ",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="sizec",
data_type=1,
is_searchable=True,
choices="",
comparison_type=1,
full_name="SizeC",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="sizet",
data_type=1,
is_searchable=True,
choices="",
comparison_type=1,
full_name="SizeT",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="physicalsizex",
data_type=1,
is_searchable=True,
choices="",
comparison_type=1,
full_name="Voxel Size X",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="physicalsizey",
data_type=1,
is_searchable=True,
choices="",
comparison_type=1,
full_name="Voxel Size Y",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="physicalsizez",
data_type=1,
is_searchable=True,
choices="",
comparison_type=1,
full_name="Voxel Size Z",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="timeincrement",
data_type=1,
is_searchable=True,
choices="",
comparison_type=1,
full_name="Time Increment",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="excitationwavelength",
data_type=2,
is_searchable=True,
choices="",
comparison_type=1,
full_name="Excitation Wavelength",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="samplesperpixel",
data_type=2,
is_searchable=True,
choices="",
comparison_type=1,
full_name="Samples per Pixel",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="emissionwavelength",
data_type=2,
is_searchable=True,
choices="",
comparison_type=1,
full_name="Emission Wavelength",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="pinholesize",
data_type=2,
is_searchable=True,
choices="",
comparison_type=1,
full_name="Pinhole Size",
units="",
order=9999,
immutable=True,
schema=series_schema
)
ParameterName.objects\
.using(db_alias)\
.update_or_create(
name="previewImage",
data_type=5,
is_searchable=False,
choices="",
comparison_type=1,
full_name="Preview",
units="image",
order=1,
immutable=True,
schema=series_schema,
defaults={
"name": "preview_image"
}
)
| 24.995098 | 80 | 0.499314 |
b7cc56e3520e5aa20afd04452b3d297df2206e1a | 1,473 | py | Python | ipmanagement/models.py | smilelhong/ip_manage | 7581c596a84e943dc5dea4122eca3de14263992b | [
"Apache-2.0"
] | null | null | null | ipmanagement/models.py | smilelhong/ip_manage | 7581c596a84e943dc5dea4122eca3de14263992b | [
"Apache-2.0"
] | null | null | null | ipmanagement/models.py | smilelhong/ip_manage | 7581c596a84e943dc5dea4122eca3de14263992b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from django.db import models
from datetime import datetime
# Create your models here. | 64.043478 | 104 | 0.745418 |
b7d02035de2ed671a7db2b55074f9e4dd487d817 | 9,616 | py | Python | tests/scripts/thread-cert/border_router/MATN_05_ReregistrationToSameMulticastGroup.py | kkasperczyk-no/sdk-openthread | 385e19da1ae15f27872c2543b97276a42f102ead | [
"BSD-3-Clause"
] | null | null | null | tests/scripts/thread-cert/border_router/MATN_05_ReregistrationToSameMulticastGroup.py | kkasperczyk-no/sdk-openthread | 385e19da1ae15f27872c2543b97276a42f102ead | [
"BSD-3-Clause"
] | null | null | null | tests/scripts/thread-cert/border_router/MATN_05_ReregistrationToSameMulticastGroup.py | kkasperczyk-no/sdk-openthread | 385e19da1ae15f27872c2543b97276a42f102ead | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright (c) 2021, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import logging
import unittest
import pktverify
from pktverify import packet_verifier, packet_filter, consts
from pktverify.consts import MA1, PBBR_ALOC
import config
import thread_cert
# Test description:
# The purpose of this test case is to verify that a Primary BBR (DUT) can manage
# a re-registration of a device on its network to remain receiving multicasts.
# The test also verifies the usage of UDP multicast packets across backbone and
# internal Thread network.
#
# Topology:
# ----------------(eth)------------------
# | | |
# BR_1 (Leader) ---- BR_2 HOST
# | |
# | |
# Router_1 -----------+
#
BR_1 = 1
BR_2 = 2
ROUTER_1 = 3
HOST = 4
REG_DELAY = 10
UDP_HEADER_LENGTH = 8
if __name__ == '__main__':
unittest.main()
| 39.089431 | 119 | 0.629992 |
b7d0fb3e2eab434c02f0ab81e51febbe5297c8ae | 3,457 | py | Python | senseye_cameras/input/camera_pylon.py | senseye-inc/senseye-cameras | 9d9cdb95e64aaa8d08aa56bd9a79641263e65940 | [
"BSD-3-Clause"
] | 5 | 2020-03-20T17:07:35.000Z | 2022-01-25T23:48:52.000Z | senseye_cameras/input/camera_pylon.py | senseye-inc/senseye-cameras | 9d9cdb95e64aaa8d08aa56bd9a79641263e65940 | [
"BSD-3-Clause"
] | 5 | 2020-03-05T20:55:06.000Z | 2022-03-24T22:41:56.000Z | senseye_cameras/input/camera_pylon.py | senseye-inc/senseye-cameras | 9d9cdb95e64aaa8d08aa56bd9a79641263e65940 | [
"BSD-3-Clause"
] | null | null | null | import time
import logging
try:
from pypylon import pylon
except:
pylon = None
from . input import Input
log = logging.getLogger(__name__)
# writes the framenumber to the 8-11 bytes of the image as a big-endian set of octets
# converts time from a float in seconds to an int64 in microseconds
# writes the time to the first 7 bytes of the image as a big-endian set of octets
| 33.563107 | 102 | 0.60486 |
b7d28e8d5b3bd12fe72a9a971fff5626e0a64791 | 3,100 | py | Python | vise/tests/util/phonopy/test_phonopy_input.py | kumagai-group/vise | 8adfe61ad8f31767ec562f02f271e2495f357cd4 | [
"MIT"
] | 16 | 2020-07-14T13:14:05.000Z | 2022-03-04T13:39:30.000Z | vise/tests/util/phonopy/test_phonopy_input.py | kumagai-group/vise | 8adfe61ad8f31767ec562f02f271e2495f357cd4 | [
"MIT"
] | 10 | 2021-03-15T20:47:45.000Z | 2021-08-19T00:47:12.000Z | vise/tests/util/phonopy/test_phonopy_input.py | kumagai-group/vise | 8adfe61ad8f31767ec562f02f271e2495f357cd4 | [
"MIT"
] | 6 | 2020-03-03T00:42:39.000Z | 2022-02-22T02:34:47.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2021. Distributed under the terms of the MIT License.
from phonopy.interface.calculator import read_crystal_structure
from phonopy.structure.atoms import PhonopyAtoms
from vise.util.phonopy.phonopy_input import structure_to_phonopy_atoms
import numpy as np
#
# def test_make_phonopy_input(mc_structure, mc_structure_conv):
# actual = make_phonopy_input(unitcell=mc_structure,
# supercell_matrix=np.eye(3).tolist(),
# conventional_base=True)
# supercell_matrix = [[ 1., 1., 0.],
# [-1., 1., 0.],
# [ 0., 0., 1.]]
# supercell = mc_structure * supercell_matrix
# expected = PhonopyInput(unitcell=mc_structure,
# supercell=supercell,
# supercell_matrix=supercell_matrix)
# assert actual == expected
#
#
# def test_make_phonopy_input_default(mc_structure, mc_structure_conv):
# actual = make_phonopy_input(unitcell=mc_structure)
# supercell_matrix = [[ 2., 2., 0.],
# [-2., 2., 0.],
# [ 0., 0., 2.]]
# supercell = mc_structure * supercell_matrix
# expected = PhonopyInput(unitcell=mc_structure,
# supercell=supercell,
# supercell_matrix=supercell_matrix)
# assert actual == expected
#
#
# def test_make_phonopy_input_default_hexa():
# structure = Structure(Lattice.hexagonal(1.0, 2.0), species=["H"],
# coords=[[0.0]*3])
# actual = make_phonopy_input(unitcell=structure)
# supercell_matrix = [[2, -1, 0], [2, 1, 0], [0, 0, 2]]
# supercell = structure * supercell_matrix
# expected = PhonopyInput(unitcell=structure,
# supercell=supercell,
# supercell_matrix=supercell_matrix)
# assert actual == expected
| 41.333333 | 73 | 0.59 |
b7d2c3d5b85f7571232ad665184ca7a2e111ef5a | 1,419 | py | Python | 2020/day15.py | andypymont/adventofcode | 912aa48fc5b31ec9202fb9654380991fc62afcd1 | [
"MIT"
] | null | null | null | 2020/day15.py | andypymont/adventofcode | 912aa48fc5b31ec9202fb9654380991fc62afcd1 | [
"MIT"
] | null | null | null | 2020/day15.py | andypymont/adventofcode | 912aa48fc5b31ec9202fb9654380991fc62afcd1 | [
"MIT"
] | null | null | null | """
2020 Day 15
https://adventofcode.com/2020/day/15
"""
from collections import deque
from typing import Dict, Iterable, Optional
import aocd # type: ignore
def main() -> None:
"""
Calculate and output the solutions based on the real puzzle input.
"""
data = aocd.get_data(year=2020, day=15)
emg = ElfMemoryGame(map(int, data.split(",")))
emg.extend(2020)
print(f"Part 1: {emg.latest}")
emg.extend(30_000_000)
print(f"Part 2: {emg.latest}")
if __name__ == "__main__":
main()
| 25.8 | 82 | 0.621564 |
b7d37af2b6bf8f16d281543414e0b3b8888f7e5c | 1,121 | py | Python | src/spring-cloud/azext_spring_cloud/_validators_enterprise.py | SanyaKochhar/azure-cli-extensions | ff845c73e3110d9f4025c122c1938dd24a43cca0 | [
"MIT"
] | 2 | 2021-03-23T02:34:41.000Z | 2021-06-03T05:53:34.000Z | src/spring-cloud/azext_spring_cloud/_validators_enterprise.py | SanyaKochhar/azure-cli-extensions | ff845c73e3110d9f4025c122c1938dd24a43cca0 | [
"MIT"
] | 21 | 2021-03-16T23:04:40.000Z | 2022-03-24T01:45:54.000Z | src/spring-cloud/azext_spring_cloud/_validators_enterprise.py | SanyaKochhar/azure-cli-extensions | ff845c73e3110d9f4025c122c1938dd24a43cca0 | [
"MIT"
] | 9 | 2021-03-11T02:59:39.000Z | 2022-02-24T21:46:34.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=too-few-public-methods, unused-argument, redefined-builtin
from azure.cli.core.azclierror import ClientRequestError
from ._util_enterprise import is_enterprise_tier
| 56.05 | 131 | 0.667261 |
b7d4dda1b3752a19244c734487e74c4425e170d8 | 8,796 | py | Python | fluentql/function.py | RaduG/fluentql | 653a77bb95b40724eb58744f5f8dbed9c88eaebd | [
"MIT"
] | 4 | 2020-04-15T10:50:03.000Z | 2021-07-22T12:23:50.000Z | fluentql/function.py | RaduG/fluentql | 653a77bb95b40724eb58744f5f8dbed9c88eaebd | [
"MIT"
] | 2 | 2020-05-24T08:54:56.000Z | 2020-05-24T09:04:31.000Z | fluentql/function.py | RaduG/fluentql | 653a77bb95b40724eb58744f5f8dbed9c88eaebd | [
"MIT"
] | null | null | null | from typing import Any, TypeVar, Union
from types import MethodType, FunctionType
from .base_types import BooleanType, Constant, StringType, Collection, Referenceable
from .type_checking import TypeChecker
AnyArgs = TypeVar("AnyArgs")
NoArgs = TypeVar("NoArgs")
VarArgs = TypeVar("VarArgs")
T = TypeVar("T")
class ArithmeticF(WithOperatorSupport, F):
| 23.393617 | 96 | 0.624034 |
b7d5141df884819f6f2e7164679f65c6fbc05ccf | 5,741 | py | Python | trainer.py | tkuboi/my-Punctuator | 17c2c43f3397387b7c21a8ef25584c4fdab73f1b | [
"MIT"
] | 3 | 2018-11-29T02:12:12.000Z | 2020-01-15T10:52:38.000Z | trainer.py | tkuboi/my-Punctuator | 17c2c43f3397387b7c21a8ef25584c4fdab73f1b | [
"MIT"
] | 3 | 2020-01-15T10:52:25.000Z | 2020-05-03T17:24:56.000Z | trainer.py | tkuboi/my-Punctuator | 17c2c43f3397387b7c21a8ef25584c4fdab73f1b | [
"MIT"
] | 5 | 2018-11-19T13:37:31.000Z | 2021-06-25T07:03:38.000Z | """This script is for training and evaluating a model."""
import sys
import os
import traceback
import numpy as np
from functools import partial
from utils import *
from punctuator import Punctuator
from bidirectional_gru_with_gru import BidirectionalGruWithGru
from keras.callbacks import ModelCheckpoint
from keras.models import Model, load_model, Sequential
from keras.layers import Dense, Activation, Dropout, Input, Masking, TimeDistributed, LSTM, Conv1D, Embedding, RepeatVector, Lambda, Dot, Multiply, Concatenate, Permute
from keras.layers import GRU, Bidirectional, BatchNormalization, Reshape, Flatten, ThresholdedReLU
from keras.optimizers import Adam
EMBEDDING_FILE = 'data/glove.6B.50d.txt'
MODEL_FILE = 'data/model.json'
WEIGHTS_FILE = 'data/model.h5'
TEXT_FILE = 'data/utterances.txt'
BATCH = 128
EPOCH = 1000
DEV_SIZE = 100
def load_text_data(textfile):
"""Read a text file containing lines of text.
Args:
textfile: string representing a path name to a file
Returns:
list of words
"""
words = []
with open(textfile, 'r') as lines:
for line in lines:
words.extend(line.split())
return words
def main():
"""Train a model using lines of text contained in a file
and evaluates the model.
"""
#read golve vecs
#words, word_to_index, index_to_word, word_to_vec_map = read_glove_vecs(EMBEDDING_FILE)
#create word embedding matrix
#embedding_matrix = create_emb_matrix(word_to_index, word_to_vec_map)
embedding_matrix = None
#print('shape of embedding_matrix:', embedding_matrix.shape)
#load trainig text from a file
utterances = load_text_data(TEXT_FILE)
punctuator = Punctuator(None, None)
X, Y = punctuator.create_training_data(utterances[:3], False)
print(X.shape)
print(X.shape[1])
print(Y.shape)
#if a model already exists, load the model
if os.path.isfile(MODEL_FILE) and False:
punctuator.load_model(MODEL_FILE)
else:
model = BidirectionalGruWithGru.create_model(
input_shape=(X.shape[1], X.shape[2], ), embedding_matrix=None,
vocab_len=0, n_d1=128, n_d2=128, n_c=len(punctuator.labels))
print(model.summary())
punctuator.__model__ = model
#if the model has been already trained, use the pre-trained weights
if os.path.isfile(WEIGHTS_FILE):
punctuator.load_weights(WEIGHTS_FILE)
for i in range(100):
shuffle(utterances)
print(utterances[0])
#create an instance of Punctutor and create training data
X, Y = punctuator.create_training_data(utterances[:300000], False)
#shuffle the training data
shuffle(X,Y)
denom_Y = Y.swapaxes(0,1).sum((0,1))
print ('Summary of Y:', denom_Y)
print('shape of X:', X.shape)
print(X[0:10])
print('shape of Y:', Y.shape)
print(Y[0:10])
#define optimizer and compile the model
opt = Adam(lr=0.007, beta_1=0.9, beta_2=0.999, decay=0.01)
punctuator.compile(opt, loss='categorical_crossentropy', metrics=['accuracy'])
#split the training data into training set, test set, and dev set
t_size = int(X.shape[0] * 0.9)
train_X, train_Y = X[:t_size], Y[:t_size]
test_X, test_Y = X[t_size:-DEV_SIZE], Y[t_size:-DEV_SIZE]
dev_X, dev_Y = X[-DEV_SIZE:], Y[-DEV_SIZE:]
print (train_Y.swapaxes(0,1).sum((0,1)))
print (test_Y.swapaxes(0,1).sum((0,1)))
#train the model
punctuator.fit([train_X], train_Y, batch_size = BATCH,
epochs=EPOCH)
punctuator.save_model(MODEL_FILE)
punctuator.save_weights(WEIGHTS_FILE)
#evaluate the model on the dev set (or the test set)
for i,example in enumerate(dev_X):
prediction = punctuator.predict(example)
punctuator.check_result(prediction, dev_Y[i])
#manually evaluate the model on an example
examples = ["good morning chairman who I saw and members of the committee it's my pleasure to be here today I'm Elizabeth Ackles director of the office of rate payer advocates and I appreciate the chance to present on oris key activities from 2017 I have a short presentation and I'm going to move through it really quickly because you've had a long morning already and be happy to answer any questions that you have", "this was a measure that first was introduced back in 1979 known as the International bill of rights for women it is the first and only international instrument that comprehensively addresses women's rights within political cultural economic social and family life", "I'm Elizabeth Neumann from the San Francisco Department on the status of women Sita is not just about naming equal rights for women and girls it provides a framework to identify and address inequality", "we have monitored the demographics of commissioners and board members in San Francisco to assess the equality of political opportunities and after a decade of reports women are now half of appointees but white men are still over-represented and Asian and Latina men and women are underrepresented", "when the city and county faced a 300 million dollar budget deficit in 2003 a gender analysis of budget cuts by city departments identified the disproportionate effect on women and particularly women of color in the proposed layoffs and reduction of services"]
for example in examples:
words = example.split()
x = punctuator.create_live_data(words)
print x
for s in x:
print s
prediction = punctuator.predict(s)
result = punctuator.add_punctuation(prediction, words)
print(result)
if __name__ == "__main__":
main()
| 43.492424 | 1,454 | 0.708413 |
b7d54fe8e9a77f05bf236b9a737834d1a8f3821a | 5,719 | py | Python | gqn_v2/gqn_predictor.py | goodmattg/tf-gqn | a2088761f11a9806500dcaf28edc28ecd7fc514e | [
"Apache-2.0"
] | null | null | null | gqn_v2/gqn_predictor.py | goodmattg/tf-gqn | a2088761f11a9806500dcaf28edc28ecd7fc514e | [
"Apache-2.0"
] | null | null | null | gqn_v2/gqn_predictor.py | goodmattg/tf-gqn | a2088761f11a9806500dcaf28edc28ecd7fc514e | [
"Apache-2.0"
] | null | null | null | """
Contains a canned predictor for a GQN.
"""
import os
import json
import numpy as np
import tensorflow as tf
from .gqn_graph import gqn_draw
from .gqn_params import create_gqn_config
def _normalize_pose(pose):
"""
Converts a camera pose into the GQN format.
Args:
pose: [x, y, z, yaw, pitch]; x, y, z in [-1, 1]; yaw, pitch in euler degree
Returns:
[x, y, z, cos(yaw), sin(yaw), cos(pitch), sin(pitch)]
"""
norm_pose = np.zeros((7, ))
norm_pose[0:3] = pose[0:3]
norm_pose[3] = np.cos(np.deg2rad(pose[3]))
norm_pose[4] = np.sin(np.deg2rad(pose[3]))
norm_pose[5] = np.cos(np.deg2rad(pose[4]))
norm_pose[6] = np.sin(np.deg2rad(pose[4]))
# print("Normalized pose: %s -> %s" % (pose, norm_pose)) # DEBUG
return norm_pose
def clear_context(self):
"""Clears the current context."""
self._context_frames.clear()
self._context_poses.clear()
def render_query_view(self, pose: np.ndarray):
"""
Renders the scene from the given camera pose.
Args:
pose: [x, y, z, yaw, pitch]; x, y, z in [-1, 1]; yaw, pitch in euler degree
"""
assert len(self._context_frames) >= self._ctx_size \
and len(self._context_poses) >= self._ctx_size, \
"Not enough context points available. Required %d. Given: %d" % \
(self._ctx_size, np.min(len(self._context_frames), len(self._context_poses)))
assert pose.shape == (self._dim_pose, ) or pose.shape == (5, ), \
"The pose's shape %s does not match the specification (either %s or %s)." % \
(pose.shape, self._dim_pose, (5, ))
if pose.shape == (5, ): # assume un-normalized pose
pose = _normalize_pose(pose)
ctx_frames = np.expand_dims(
np.stack(self._context_frames[-self._ctx_size:]), axis=0)
ctx_poses = np.expand_dims(
np.stack(self._context_poses[-self._ctx_size:]), axis=0)
query_pose = np.expand_dims(pose, axis=0)
feed_dict = {
self._ph_query_pose : query_pose,
self._ph_ctx_frames : ctx_frames,
self._ph_ctx_poses : ctx_poses
}
[pred_frame] = self._sess.run([self._net], feed_dict=feed_dict)
pred_frame = np.clip(pred_frame, a_min=0.0, a_max=1.0)
return pred_frame
| 36.660256 | 110 | 0.652736 |
b7d6284562e6fc98442dc3568881e4543f4597b6 | 6,054 | py | Python | mamba/post_solve_handling.py | xhochy/mamba | 249546a95abf358f116cc1b546bfb51e427001fd | [
"BSD-3-Clause"
] | null | null | null | mamba/post_solve_handling.py | xhochy/mamba | 249546a95abf358f116cc1b546bfb51e427001fd | [
"BSD-3-Clause"
] | null | null | null | mamba/post_solve_handling.py | xhochy/mamba | 249546a95abf358f116cc1b546bfb51e427001fd | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (C) 2019, QuantStack
# SPDX-License-Identifier: BSD-3-Clause
from conda.base.constants import DepsModifier, UpdateModifier
from conda._vendor.boltons.setutils import IndexedSet
from conda.core.prefix_data import PrefixData
from conda.models.prefix_graph import PrefixGraph
from conda._vendor.toolz import concatv
from conda.models.match_spec import MatchSpec
| 47.669291 | 96 | 0.652296 |
b7d668041de4ae36e76a177a55158ac9e8eab418 | 264 | py | Python | Young Physicist.py | techonair/Codeforces | 1f854424e2de69ea4fdf7c6cde8ab04eddfb4566 | [
"MIT"
] | null | null | null | Young Physicist.py | techonair/Codeforces | 1f854424e2de69ea4fdf7c6cde8ab04eddfb4566 | [
"MIT"
] | null | null | null | Young Physicist.py | techonair/Codeforces | 1f854424e2de69ea4fdf7c6cde8ab04eddfb4566 | [
"MIT"
] | null | null | null | num = input()
lucky = 0
for i in num:
if i == '4' or i == '7':
lucky += 1
counter = 0
for c in str(lucky):
if c == '4' or c == '7':
counter += 1
if counter == len(str(lucky)):
print("YES")
else:
print("NO")
| 11 | 30 | 0.431818 |
b7d7bf07253855c146dc1edf490b5b90c54ec05e | 477 | py | Python | snakebids/utils/__init__.py | tkkuehn/snakebids | 641026ea91c84c4403f0a654d2aaf2bfa50eaa19 | [
"MIT"
] | null | null | null | snakebids/utils/__init__.py | tkkuehn/snakebids | 641026ea91c84c4403f0a654d2aaf2bfa50eaa19 | [
"MIT"
] | null | null | null | snakebids/utils/__init__.py | tkkuehn/snakebids | 641026ea91c84c4403f0a654d2aaf2bfa50eaa19 | [
"MIT"
] | null | null | null | from snakebids.utils.output import (
Mode,
get_time_hash,
prepare_output,
retrofit_output,
write_config_file,
write_output_mode,
)
from snakebids.utils.snakemake_io import (
glob_wildcards,
regex,
update_wildcard_constraints,
)
__all__ = [
"Mode",
"get_time_hash",
"glob_wildcards",
"prepare_output",
"regex",
"retrofit_output",
"update_wildcard_constraints",
"write_config_file",
"write_output_mode",
]
| 18.346154 | 42 | 0.681342 |
b7d83061ac773421e6029dc4c038d3f9bc4b0679 | 659 | py | Python | examples/custom_renderer/custom_renderer.py | victorbenichoux/vizno | 87ed98f66914a27e4b71d835734ca2a17a09412f | [
"MIT"
] | 5 | 2020-12-02T08:46:06.000Z | 2022-01-15T12:58:27.000Z | examples/custom_renderer/custom_renderer.py | victorbenichoux/vizno | 87ed98f66914a27e4b71d835734ca2a17a09412f | [
"MIT"
] | null | null | null | examples/custom_renderer/custom_renderer.py | victorbenichoux/vizno | 87ed98f66914a27e4b71d835734ca2a17a09412f | [
"MIT"
] | null | null | null | import pydantic
from vizno.renderers import ContentConfiguration, render
from vizno.report import Report
r = Report()
r.widget(CustomObject(parameter=10))
r.render("./output")
r.widget(
CustomObject(parameter=1000),
name="It works with a name",
description="and a description",
)
r.render("./output")
| 19.969697 | 56 | 0.728376 |
b7d854946bf40e07210624df5e0576dbd5f15fb1 | 945 | py | Python | coregent/net/core.py | landoffire/coregent | 908aaacbb7b2b9d8ea044d47b9518e8914dad08b | [
"Apache-2.0"
] | 1 | 2021-04-25T07:26:07.000Z | 2021-04-25T07:26:07.000Z | coregent/net/core.py | neurite-interactive/coregent | 908aaacbb7b2b9d8ea044d47b9518e8914dad08b | [
"Apache-2.0"
] | null | null | null | coregent/net/core.py | neurite-interactive/coregent | 908aaacbb7b2b9d8ea044d47b9518e8914dad08b | [
"Apache-2.0"
] | 2 | 2021-06-12T23:00:12.000Z | 2021-06-12T23:01:57.000Z | import abc
import collections.abc
import socket
__all__ = ['get_socket_type', 'get_server_socket', 'get_client_socket',
'SocketReader', 'SocketWriter', 'JSONReader', 'JSONWriter']
| 21 | 71 | 0.671958 |
b7d90dcc48241b77ca82bd93f406aefe69d173b9 | 360 | py | Python | hackdayproject/urls.py | alstn2468/Naver_Campus_Hackday_Project | e8c3b638638182ccb8b4631c03cf5cb153c7278a | [
"MIT"
] | 1 | 2019-11-15T05:03:54.000Z | 2019-11-15T05:03:54.000Z | hackdayproject/urls.py | alstn2468/Naver_Campus_Hackday_Project | e8c3b638638182ccb8b4631c03cf5cb153c7278a | [
"MIT"
] | null | null | null | hackdayproject/urls.py | alstn2468/Naver_Campus_Hackday_Project | e8c3b638638182ccb8b4631c03cf5cb153c7278a | [
"MIT"
] | null | null | null | from django.urls import path, include
from django.contrib import admin
import hackdayproject.main.urls as main_urls
import hackdayproject.repo.urls as repo_urls
urlpatterns = [
path('admin/', admin.site.urls),
path('oauth/', include('social_django.urls', namespace='social')),
path('', include(main_urls)),
path('repo/', include(repo_urls))
]
| 30 | 70 | 0.727778 |
b7d98d9548c561ff4d20a9c30014735028dc693b | 19,134 | py | Python | tests/test_ciftify_recon_all.py | lgrennan/ciftify | 8488423bd081370614b676a2e1d1a8dbfd9aba1c | [
"MIT"
] | null | null | null | tests/test_ciftify_recon_all.py | lgrennan/ciftify | 8488423bd081370614b676a2e1d1a8dbfd9aba1c | [
"MIT"
] | null | null | null | tests/test_ciftify_recon_all.py | lgrennan/ciftify | 8488423bd081370614b676a2e1d1a8dbfd9aba1c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import unittest
import logging
import importlib
import copy
import os
from mock import patch
from nose.tools import raises
logging.disable(logging.CRITICAL)
ciftify_recon_all = importlib.import_module('ciftify.bin.ciftify_recon_all')
| 40.710638 | 89 | 0.669907 |
b7da270be2ee04de235dd0dfc5b966c52ba7cf65 | 35,831 | py | Python | Wrangle OSM Dataset.py | Boykai/Project-3-Wrangle-OpenStreetMap-Dataset | 493a4346ae12fb0fe853d4d07e4e8b03ef6a430f | [
"MIT"
] | 1 | 2017-09-01T11:07:26.000Z | 2017-09-01T11:07:26.000Z | Wrangle OSM Dataset.py | Boykai/Project-3-Wrangle-OpenStreetMap-Dataset | 493a4346ae12fb0fe853d4d07e4e8b03ef6a430f | [
"MIT"
] | null | null | null | Wrangle OSM Dataset.py | Boykai/Project-3-Wrangle-OpenStreetMap-Dataset | 493a4346ae12fb0fe853d4d07e4e8b03ef6a430f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Created on Tue Jan 17 16:19:36 2017
@author: Boykai
'''
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import xml.etree.cElementTree as ET # Use cElementTree or lxml if too slow
from collections import defaultdict
import re
import pprint
import string
import codecs
import json
import os
from pymongo import MongoClient
def mongoAggregate(cursor):
'''
Takes in pymongo aggregate cursor object, iterates through each element
within the aggregation, then returns the list of elements
cursor: pymongo aggreate cursor object, which is iterated (a cursor object)
@return: List of aggregation elements (a list)
'''
results_list = []
[results_list.append(result) for result in cursor]
return results_list
if __name__ == '__main__':
# Get OSM File, which is Brooklyn OpenStreetMap
# https://mapzen.com/data/metro-extracts/metro/brooklyn_new-york/
xml_original_file = 'brooklyn_new-york.osm' # Original OSM File input name
xml_sample_file = 'sample.osm' # Sample OSM File output name
xml_cleaned_file = 'output.osm'
sample_size = 1
# Initialize and create OSM original file and sample file
if sample_size == 1:
xml_sample_file = xml_original_file
osm = OSMFile(xml_original_file, xml_sample_file, sample_size)
if sample_size != 1:
osm.createSampleFile()
# Initialize and clean street type tag attributes
print('\nInitialzing and getting street type tag attributes...')
cleanSt = CleanStreets(xml_sample_file)
# Audit street tag attributes and store vales in unexpected_street dict
# returns street type keys with street name values dict
print('\nPerforming audit on street types...')
audit_results = cleanSt.audit(xml_sample_file)
unexpected_streets = audit_results[0]
unexpected_zips = audit_results[1]
print('There are ' + str(len(unexpected_streets.values())) + ' unique unexpected streets.')
print('Dictionary of unexpected street name types with street names: ')
pprint.pprint(unexpected_streets)
print('\nThere are ' + str(len(unexpected_zips.values())) + ' unique unexpected zip codes.')
print('Dictionary of unexpected zip code types with street names: ')
pprint.pprint(unexpected_zips)
# Clean street values and store cleaned streets in clean_street_dict
print('\nCleaning street type values...')
clean_streets_dict = cleanSt.clean(unexpected_streets)
print('There are ' + str(len(cleanSt.getCleanStreetsDict().values())) + ' street names to be replaced.')
print('Dictionary of dirty street keys and clean street values: ')
pprint.pprint(clean_streets_dict)
# Find and write clean street names to XML file, save updated XML file
print('\nCreating new output.osm file with cleaned street types...')
cleanSt.writeClean(clean_streets_dict)
clean_audit_results = cleanSt.audit(xml_sample_file)
clean_unexpected_streets = clean_audit_results[0]
print('There are ' + str(len(clean_unexpected_streets.values())) + ' unique unexpected streets.')
print('New audit after street names have been replaced with clean street names: ')
pprint.pprint(clean_unexpected_streets)
if sample_size != 1:
print('\nDeleting XML sample file...')
#os.remove(xml_sample_file)
# Initialize and create JSON file from cleaned XML output.osm file
print('\nCreating new JSON file from cleaned XML file...')
js = JsonFile(xml_cleaned_file)
data = js.processMap()
print('\nDeleting XML cleaned file...')
os.remove(xml_cleaned_file)
# Initialize and create MongoDB database from JSON document list 'data'
print('\nCreating new MongoDB database \'brooklyn\' from cleaned JSON file...')
client = MongoClient('mongodb://localhost:27017')
db = client.osm_results
db.brooklyn.insert_many(data, bypass_document_validation=True)
del data[:]
# Run and output MongoDB querires and results
print('\nRunning MongoDB queries...')
print('\nTotal number of documents: ')
print('db.brooklyn.find().count()')
print(str(db.brooklyn.find().count()))
print('\nNumber of \'way\' type documents: ')
print('db.brooklyn.find({\'type\' :\'way\'}).count()')
print(str(db.brooklyn.find({'type' :'way'}).count()))
print('\nNumber of \'node\' type documents: ')
print('db.brooklyn.find({\'type\' :\'node\'}).count()')
print(str(db.brooklyn.find({'type' :'node'}).count()))
print('\nNumber of unique users: ')
print('len(db.brooklyn.distinct(\'created.user\'))')
print(str(len(db.brooklyn.distinct('created.user'))))
print('\nTop 1 contributing user: ')
top_contributor_pipeline = [{'$group':
{'_id':'$created.user',
'count':{'$sum':1}}},
{'$sort':
{'count':1}},
{'$limit':1}]
print('db.brooklyn.aggregate(' + str(top_contributor_pipeline) + ')')
top_contributor = mongoAggregate(db.brooklyn.aggregate(top_contributor_pipeline))
print(str(top_contributor[0]))
print('\nNumber of users appearing only once (having 1 post): ')
unique_user_count_pipeline =[{'$group':
{'_id':'$created.user',
'count':{'$sum':1}}},
{'$group':
{'_id':'$count',
'num_users':{'$sum':1}}},
{'$sort':
{'_id':1}},
{'$limit':1}]
print('db.brooklyn.aggregate(' + str(unique_user_count_pipeline) + ')')
unique_user_count = mongoAggregate(db.brooklyn.aggregate(unique_user_count_pipeline))
print(str(unique_user_count[0]))
print('\nTop 10 appearing amenities: ')
top_10_amenities_pipeline = [{'$match':
{'amenity':{'$exists':1}}},
{'$group':
{'_id':'$amenity',
'count':{'$sum':1}}},
{'$sort':
{'count':1}},
{"$limit":10}]
print('db.brooklyn.aggregate(' + str(top_10_amenities_pipeline) + ')')
top_10_amenities = mongoAggregate(db.brooklyn.aggregate(top_10_amenities_pipeline))
print(str(top_10_amenities))
print('\nHighest population religion: ')
most_pop_religion_pipeline = [{'$match':
{'amenity':{'$exists':1},
'amenity':'place_of_worship'}},
{'$group':
{'_id':'$religion',
'count':{'$sum':1}}},
{'$sort':
{'count':1}},
{'$limit':1}]
print('db.brooklyn.aggregate(' + str(most_pop_religion_pipeline) + ')')
most_pop_religion = mongoAggregate(db.brooklyn.aggregate(most_pop_religion_pipeline))
print(str(most_pop_religion[0]))
print('\nMost popular cuisines: ')
most_pop_cuisine_pipeline = [{'$match':
{'amenity':{'$exists':1},
'amenity':'restaurant'}},
{'$group':
{'_id':'$cuisine',
'count':{'$sum':1}}},
{'$sort':
{'count':1}},
{'$limit':2}]
print('db.brooklyn.aggregate(' + str(most_pop_cuisine_pipeline) + ')')
most_pop_cuisine = mongoAggregate(db.brooklyn.aggregate(most_pop_cuisine_pipeline))
print(str(most_pop_cuisine[0]))
print('\nPostal Codes: ')
postal_codes_pipeline = [{'$match':
{'address.postcode':{'$exists':1},
'address.postcode':'NaN'}},
{'$group':
{'_id':'$address.postcode',
'count':{'$sum':1}}},
{'$sort':{'count':1}}]
print('db.brooklyn.aggregate(' + str(postal_codes_pipeline) + ')')
postal_codes = mongoAggregate(db.brooklyn.aggregate(postal_codes_pipeline))
print(str(postal_codes[0])) | 40.996568 | 127 | 0.471966 |
b7da43e450c1cde9be925061435a5d471ad6ae05 | 640 | py | Python | Wrapping/Python/vtkmodules/__init__.py | cads-build/VTK | ee0c9688a082c88bfe070afc08f4eb0f0a546487 | [
"BSD-3-Clause"
] | 1 | 2019-09-11T12:30:57.000Z | 2019-09-11T12:30:57.000Z | Wrapping/Python/vtkmodules/__init__.py | AndyJMR/VTK | 3cc9e5f7539107e5dbaeadc2d28f7a8db6de8571 | [
"BSD-3-Clause"
] | null | null | null | Wrapping/Python/vtkmodules/__init__.py | AndyJMR/VTK | 3cc9e5f7539107e5dbaeadc2d28f7a8db6de8571 | [
"BSD-3-Clause"
] | null | null | null | r"""
Currently, this package is experimental and may change in the future.
"""
from __future__ import absolute_import
#------------------------------------------------------------------------------
# this little trick is for static builds of VTK. In such builds, if
# the user imports this Python package in a non-statically linked Python
# interpreter i.e. not of the of the VTK-python executables, then we import the
# static components importer module.
try:
from . import vtkCommonCore
except ImportError:
from . import _vtkpythonmodules_importer
#------------------------------------------------------------------------------
| 37.647059 | 79 | 0.582813 |
b7daad942b4ee13674b01a3bc7990323f036b3a5 | 1,176 | py | Python | Financely/basic_app/models.py | Frostday/Financely | 23226aca0ad21971cb61d13509e16651b304d207 | [
"MIT"
] | 8 | 2021-05-28T16:09:36.000Z | 2022-02-27T23:12:48.000Z | Financely/basic_app/models.py | Frostday/Financely | 23226aca0ad21971cb61d13509e16651b304d207 | [
"MIT"
] | null | null | null | Financely/basic_app/models.py | Frostday/Financely | 23226aca0ad21971cb61d13509e16651b304d207 | [
"MIT"
] | 8 | 2021-05-28T16:01:48.000Z | 2022-02-27T23:12:50.000Z | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
| 40.551724 | 119 | 0.747449 |
b7dd25cebefde2e55f7311a4ace4a861586de3c9 | 1,299 | py | Python | lims/models/shipping.py | razorlabs/BRIMS-backend | 2c5b7bd126debec459b775e9d11e96fc09975059 | [
"MIT"
] | 1 | 2020-03-20T23:00:24.000Z | 2020-03-20T23:00:24.000Z | lims/models/shipping.py | razorlabs/BRIMS-backend | 2c5b7bd126debec459b775e9d11e96fc09975059 | [
"MIT"
] | null | null | null | lims/models/shipping.py | razorlabs/BRIMS-backend | 2c5b7bd126debec459b775e9d11e96fc09975059 | [
"MIT"
] | 1 | 2020-03-09T09:57:25.000Z | 2020-03-09T09:57:25.000Z | from django.db import models
"""
ShipmentModels have a one to many relationship with boxes and aliquot
Aliquot and Box foreign keys to a ShipmentModel determine manifest contents
for shipping purposes (resolved in schema return for manifest view)
"""
| 34.184211 | 79 | 0.628176 |
b7defbba24700ce1dff5cfd0c991ccf13a0c39e0 | 1,857 | py | Python | part-2/2-iterators/Example-consuming_iterators_manually.py | boconlonton/python-deep-dive | c01591a4943c7b77d4d2cd90a8b23423280367a3 | [
"MIT"
] | null | null | null | part-2/2-iterators/Example-consuming_iterators_manually.py | boconlonton/python-deep-dive | c01591a4943c7b77d4d2cd90a8b23423280367a3 | [
"MIT"
] | null | null | null | part-2/2-iterators/Example-consuming_iterators_manually.py | boconlonton/python-deep-dive | c01591a4943c7b77d4d2cd90a8b23423280367a3 | [
"MIT"
] | null | null | null | """
Consuming Iterator manually
"""
from collections import namedtuple
def cast(data_type, value):
"""Cast the value into a correct data type"""
if data_type == 'DOUBLE':
return float(value)
elif data_type == 'STRING':
return str(value)
elif data_type == 'INT':
return int(value)
# cars = []
# with open('cars.csv') as file:
# row_index = 0
# for line in file:
# if row_index == 0:
# # Header row
# headers = line.strip('\n').split(';')
# Car = namedtuple('Car', headers)
# elif row_index == 1:
# data_types = line.strip('\n').split(';')
# # print('types', data_types)
# else:
# # data row
# data = line.strip('\n').split(';')
# data = cast_row(data_types, data)
# car = Car(*data)
# cars.append(car)
# # print(data)
# row_index += 1
# with open('cars.csv') as file:
# file_iter = iter(file)
# headers = next(file_iter).strip('\n').split(';')
# Car = namedtuple('Car', headers)
# data_types = next(file_iter).strip('\n').split(';')
# for line in file_iter:
# data = line.strip('\n').split(';')
# data = cast_row(data_types, data)
# car = Car(*data)
# cars.append(car)
with open('cars.csv') as file:
file_iter = iter(file)
headers = next(file_iter).strip('\n').split(';')
Car = namedtuple('Car', headers)
data_types = next(file_iter).strip('\n').split(';')
cars = [Car(*cast_row(
data_types,
line.strip('\n').split(';')
))
for line in file_iter]
print(cars)
| 27.308824 | 58 | 0.525579 |
b7dfa49c85bfb3c402f6a966ce46d040dfc275f6 | 1,675 | py | Python | instance_server/services/startpage.py | Geierhaas/developer-observatory | f2e840ab9a283ea82353a8c5bbb6b1905567fbe4 | [
"MIT"
] | 4 | 2017-08-26T04:51:52.000Z | 2022-01-02T23:07:48.000Z | instance_server/services/startpage.py | Geierhaas/developer-observatory | f2e840ab9a283ea82353a8c5bbb6b1905567fbe4 | [
"MIT"
] | 3 | 2020-11-04T11:13:55.000Z | 2021-03-08T19:47:52.000Z | instance_server/services/startpage.py | Geierhaas/developer-observatory | f2e840ab9a283ea82353a8c5bbb6b1905567fbe4 | [
"MIT"
] | 6 | 2017-10-24T14:44:05.000Z | 2022-01-13T14:26:05.000Z | #! Copyright (C) 2017 Christian Stransky
#!
#! This software may be modified and distributed under the terms
#! of the MIT license. See the LICENSE file for details.
from flask import Flask, redirect, request, make_response
from shutil import copyfile
import json
import requests
import os.path
import uuid
import urllib
app = Flask(__name__)
remote_task_file = "%landingURL%/get_ipynb/"
target_file = "/home/jupyter/tasks.ipynb"
user_data_file = "/home/jupyter/.instanceInfo"
if __name__ == '__main__':
#app.debug = True
app.run(host='127.0.0.1', port=60000)
| 34.183673 | 110 | 0.677612 |
b7e02aed4c2632acfe7ae12115128aac02a396d3 | 672 | py | Python | utils/linalg.py | cimat-ris/TrajectoryInference | 27d1d2d692df52b403cf6557ecba628f818cd380 | [
"Apache-2.0"
] | 6 | 2019-11-05T00:56:06.000Z | 2021-12-05T21:11:14.000Z | utils/linalg.py | cimat-ris/TrajectoryInference | 27d1d2d692df52b403cf6557ecba628f818cd380 | [
"Apache-2.0"
] | 2 | 2021-05-22T11:16:45.000Z | 2021-05-31T00:42:07.000Z | utils/linalg.py | cimat-ris/TrajectoryInference | 27d1d2d692df52b403cf6557ecba628f818cd380 | [
"Apache-2.0"
] | 1 | 2021-05-22T10:35:18.000Z | 2021-05-22T10:35:18.000Z | import numpy as np
import math
import logging
from termcolor import colored
# Check a matrix for: negative eigenvalues, asymmetry and negative diagonal values
| 29.217391 | 82 | 0.616071 |
b7e0fbad2360576b896a69e1a30c6d6156b68c38 | 282 | py | Python | problemsets/Codeforces/Python/A1020.py | juarezpaulino/coderemite | a4649d3f3a89d234457032d14a6646b3af339ac1 | [
"Apache-2.0"
] | null | null | null | problemsets/Codeforces/Python/A1020.py | juarezpaulino/coderemite | a4649d3f3a89d234457032d14a6646b3af339ac1 | [
"Apache-2.0"
] | null | null | null | problemsets/Codeforces/Python/A1020.py | juarezpaulino/coderemite | a4649d3f3a89d234457032d14a6646b3af339ac1 | [
"Apache-2.0"
] | null | null | null | """
*
* Author: Juarez Paulino(coderemite)
* Email: juarez.paulino@gmail.com
*
"""
I=lambda:map(int,input().split())
f=abs
n,_,a,b,k=I()
while k:
p,q,u,v=I()
P=[a,b]
if a<=q<=b:P+=[q]
if a<=v<=b:P+=[v]
print([min(f(q-x)+f(v-x)for x in P)+f(p-u),f(q-v)][p==u])
k-=1 | 17.625 | 59 | 0.521277 |
b7e289ea7bf92691efc481deeec6261bf7909c3b | 850 | py | Python | get_tweet.py | Na27i/tweet_generator | 92a5156e041982dd12d9850445f15a599fb6ec5e | [
"MIT"
] | null | null | null | get_tweet.py | Na27i/tweet_generator | 92a5156e041982dd12d9850445f15a599fb6ec5e | [
"MIT"
] | null | null | null | get_tweet.py | Na27i/tweet_generator | 92a5156e041982dd12d9850445f15a599fb6ec5e | [
"MIT"
] | null | null | null | import json
import sys
import pandas
args = sys.argv
if len(args) == 1 :
import main as settings
else :
import sub as settings
from requests_oauthlib import OAuth1Session
CK = settings.CONSUMER_KEY
CS = settings.CONSUMER_SECRET
AT = settings.ACCESS_TOKEN
ATS = settings.ACCESS_TOKEN_SECRET
twitter = OAuth1Session(CK, CS, AT, ATS)
tweetlist = []
url = "https://api.twitter.com/1.1/statuses/user_timeline.json"
params = {"count" : 200}
for i range(5):
res = twitter.get(url, params = params)
if res.status_code == 200:
timelines = json.loads(res.text)
for tweet in timelines:
tweetlist.append(tweet["text"])
else:
print("(%d)" % res.status_code)
datafile = pandas.DataFrame(tweetlist)
datafile.to_csv("tweetlist.csv", encoding='utf_8_sig') | 22.972973 | 64 | 0.661176 |
b7e377e1a140ad61d79142b999a2e7a703c9e2ef | 1,284 | py | Python | idact/detail/config/validation/validate_scratch.py | intdata-bsc/idact | 54cb65a711c145351e205970c27c83e6393cccf5 | [
"MIT"
] | 5 | 2018-12-06T15:40:34.000Z | 2019-06-19T11:22:58.000Z | idact/detail/config/validation/validate_scratch.py | garstka/idact | b9c8405c94db362c4a51d6bfdf418b14f06f0da1 | [
"MIT"
] | 9 | 2018-12-06T16:35:26.000Z | 2019-04-28T19:01:40.000Z | idact/detail/config/validation/validate_scratch.py | intdata-bsc/idact | 54cb65a711c145351e205970c27c83e6393cccf5 | [
"MIT"
] | 2 | 2019-04-28T19:18:58.000Z | 2019-06-17T06:56:28.000Z | """This module contains a function for validating a scratch config entry."""
import re
from idact.detail.config.validation.validation_error_message import \
validation_error_message
VALID_SCRATCH_DESCRIPTION = 'Non-empty absolute path, or environment' \
' variable name.'
VALID_SCRATCH_REGEX = r"^(/.*)|(\$[A-Za-z][A-Za-z0-9]*)$" # noqa, pylint: disable=line-too-long
__COMPILED = re.compile(pattern=VALID_SCRATCH_REGEX)
def validate_scratch(scratch) -> str:
"""Returns the parameter if it's a valid scratch config entry, otherwise
raises an exception.
Key path is optional, non-empty string.
:param scratch: Object to validate.
:raises TypeError: On wrong type.
:raises ValueError: On regex mismatch.
"""
if not isinstance(scratch, str):
raise TypeError(validation_error_message(
label='scratch',
value=scratch,
expected=VALID_SCRATCH_DESCRIPTION,
regex=VALID_SCRATCH_REGEX))
if not __COMPILED.match(scratch):
raise ValueError(validation_error_message(
label='scratch',
value=scratch,
expected=VALID_SCRATCH_DESCRIPTION,
regex=VALID_SCRATCH_REGEX))
return scratch
| 29.181818 | 96 | 0.660436 |
b7e39de3f444fe8cb279979f19de1ae9ea72a25e | 10,135 | py | Python | paramak/parametric_components/blanket_fp.py | zmarkan/paramak | ecf9a46394adb4d6bb5744000ec6e2f74c30f2ba | [
"MIT"
] | null | null | null | paramak/parametric_components/blanket_fp.py | zmarkan/paramak | ecf9a46394adb4d6bb5744000ec6e2f74c30f2ba | [
"MIT"
] | null | null | null | paramak/parametric_components/blanket_fp.py | zmarkan/paramak | ecf9a46394adb4d6bb5744000ec6e2f74c30f2ba | [
"MIT"
] | null | null | null |
import warnings
from typing import Callable, List, Optional, Union
import mpmath
import numpy as np
import paramak
import sympy as sp
from paramak import RotateMixedShape, diff_between_angles
from paramak.parametric_components.tokamak_plasma_plasmaboundaries import \
PlasmaBoundaries
from scipy.interpolate import interp1d
def create_offset_points(self, thetas, offset):
"""generates a list of points following parametric equations with an
offset
Args:
thetas (np.array): the angles in degrees.
offset (callable): offset value (cm). offset=0 will follow the
parametric equations.
Returns:
list: list of points [[R1, Z1, connection1], [R2, Z2, connection2],
...]
"""
# create sympy objects and derivatives
theta_sp = sp.Symbol("theta")
R_sp, Z_sp = self.distribution(theta_sp, pkg=sp)
R_derivative = sp.diff(R_sp, theta_sp)
Z_derivative = sp.diff(Z_sp, theta_sp)
points = []
for theta in thetas:
# get local value of derivatives
val_R_derivative = float(R_derivative.subs("theta", theta))
val_Z_derivative = float(Z_derivative.subs("theta", theta))
# get normal vector components
nx = val_Z_derivative
ny = -val_R_derivative
# normalise normal vector
normal_vector_norm = (nx ** 2 + ny ** 2) ** 0.5
nx /= normal_vector_norm
ny /= normal_vector_norm
# calculate outer points
val_R_outer = self.distribution(theta)[0] + offset(theta) * nx
val_Z_outer = self.distribution(theta)[1] + offset(theta) * ny
if float(val_R_outer) > 0:
points.append(
[float(val_R_outer), float(val_Z_outer), "spline"])
else:
self._overlapping_shape = True
return points
def distribution(self, theta, pkg=np):
"""Plasma distribution theta in degrees
Args:
theta (float or np.array or sp.Symbol): the angle(s) in degrees.
pkg (module, optional): Module to use in the funciton. If sp, as
sympy object will be returned. If np, a np.array or a float
will be returned. Defaults to np.
Returns:
(float, float) or (sympy.Add, sympy.Mul) or
(numpy.array, numpy.array): The R and Z coordinates of the
point with angle theta
"""
if pkg == np:
theta = np.radians(theta)
else:
theta = mpmath.radians(theta)
R = self.major_radius + self.minor_radius * pkg.cos(
theta + self.triangularity * pkg.sin(theta)
)
Z = (
self.elongation * self.minor_radius * pkg.sin(theta)
+ self.vertical_displacement
)
return R, Z
| 36.456835 | 79 | 0.597139 |
b7e4658365995b8bd790113c73797283daaf0910 | 907 | py | Python | 3.7.1/solution.py | luxnlex/stepic-python | 92a4b25391f76935c3c2a70fb8552e7f93928d9b | [
"MIT"
] | 1 | 2021-05-07T18:20:51.000Z | 2021-05-07T18:20:51.000Z | 3.7.1/solution.py | luxnlex/stepic-python | 92a4b25391f76935c3c2a70fb8552e7f93928d9b | [
"MIT"
] | null | null | null | 3.7.1/solution.py | luxnlex/stepic-python | 92a4b25391f76935c3c2a70fb8552e7f93928d9b | [
"MIT"
] | 2 | 2017-12-27T07:51:57.000Z | 2020-08-03T22:10:55.000Z | s=str(input())
a=[]
for i in range(len(s)):
si=s[i]
a.append(si)
b=[]
n=str(input())
for j in range(len(n)):
sj=n[j]
b.append(sj)
p={}
for pi in range(len(s)):
key=s[pi]
p[key]=0
j1=0
for i in range(0,len(a)):
key=a[i]
while j1<len(b):
bj=b[0]
if key in p:
p[key]=bj
b.remove(bj)
break
c=[]
si=str(input())
for si1 in range(0,len(si)):
ci=si[si1]
c.append(ci)
co=[]
for ci in range(0,len(c)):
if c[ci] in p:
cco=c[ci]
pco=p[cco]
co.append(pco)
d=[]
di=str(input())
for sj1 in range(0,len(di)):
dj=di[sj1]
d.append(dj)
do=[]
for di in range(0,len(d)):
for key in p:
pkey=key
if p.get(key) == d[di]:
ddo=pkey
do.append(ddo)
for i in range (0,len(co)):
print(co[i],end='')
print()
for j in range (0,len(do)):
print(do[j],end='') | 14.868852 | 31 | 0.485116 |
b7e4fae61f0aabd32e88f180183fcddc115ab0ca | 4,352 | py | Python | airbyte-integrations/connectors/source-plaid/source_plaid/source.py | OTRI-Unipd/OTRI-airbyte | 50eeeb773f75246e86c6e167b0cd7d2dda6efe0d | [
"MIT"
] | 2 | 2022-03-02T13:46:05.000Z | 2022-03-05T12:31:28.000Z | airbyte-integrations/connectors/source-plaid/source_plaid/source.py | OTRI-Unipd/OTRI-airbyte | 50eeeb773f75246e86c6e167b0cd7d2dda6efe0d | [
"MIT"
] | 29 | 2021-10-07T17:20:29.000Z | 2021-12-27T13:07:09.000Z | airbyte-integrations/connectors/source-plaid/source_plaid/source.py | OTRI-Unipd/OTRI-airbyte | 50eeeb773f75246e86c6e167b0cd7d2dda6efe0d | [
"MIT"
] | 1 | 2021-07-30T07:24:51.000Z | 2021-07-30T07:24:51.000Z | #
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import datetime
import json
from typing import Any, Iterable, List, Mapping, MutableMapping, Optional, Tuple, Union
import plaid
from airbyte_cdk.logger import AirbyteLogger
from airbyte_cdk.models import SyncMode
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.streams import Stream
from plaid.api import plaid_api
from plaid.model.accounts_balance_get_request import AccountsBalanceGetRequest
from plaid.model.transactions_get_request import TransactionsGetRequest
SPEC_ENV_TO_PLAID_ENV = {
"production": plaid.Environment.Production,
"development": plaid.Environment.Development,
"sandbox": plaid.Environment.Sandbox,
}
class IncrementalTransactionStream(PlaidStream):
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]):
return {"date": latest_record.get("date")}
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
stream_state = stream_state or {}
date = stream_state.get("date")
if not date:
date = datetime.date.fromtimestamp(0)
else:
date = datetime.date.fromisoformat(date)
if date >= datetime.datetime.utcnow().date():
return
transaction_response = self.client.transactions_get(
TransactionsGetRequest(access_token=self.access_token, start_date=date, end_date=datetime.datetime.utcnow().date())
)
yield from map(lambda x: x.to_dict(), sorted(transaction_response["transactions"], key=lambda t: t["date"]))
class SourcePlaid(AbstractSource):
| 35.966942 | 135 | 0.667509 |
b7e5547eb715244c2608406503ff045d83d45b75 | 17,939 | py | Python | demo/demo.py | taewhankim/DeepHRnet | c316b4a9f5f3002f6fcc0398c12d80de82195ef0 | [
"MIT"
] | null | null | null | demo/demo.py | taewhankim/DeepHRnet | c316b4a9f5f3002f6fcc0398c12d80de82195ef0 | [
"MIT"
] | null | null | null | demo/demo.py | taewhankim/DeepHRnet | c316b4a9f5f3002f6fcc0398c12d80de82195ef0 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import csv
import os
import shutil
from PIL import Image
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision
import cv2
import numpy as np
import time
import math
import _init_paths
import models
from config import cfg
from config import update_config
from core.function import get_final_preds
from utils.transforms import get_affine_transform
COCO_KEYPOINT_INDEXES = {
0: 'nose',
1: 'left_eye',
2: 'right_eye',
3: 'left_ear',
4: 'right_ear',
5: 'left_shoulder',
6: 'right_shoulder',
7: 'left_elbow',
8: 'right_elbow',
9: 'left_wrist',
10: 'right_wrist',
11: 'left_hip',
12: 'right_hip',
13: 'left_knee',
14: 'right_knee',
15: 'left_ankle',
16: 'right_ankle'
}
COCO_INSTANCE_CATEGORY_NAMES = [
'__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table',
'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book',
'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'
]
SKELETON = [
[5, 7], [7, 9],[5, 6],[6, 8], [8, 10]
]
## :
# SKELETON = [
# [1, 3], [1, 0], [2, 4], [2, 0], [0, 5], [0, 6], [5, 7], [7, 9], [6, 8], [8, 10], [5, 11], [6, 12], [11, 12],
# [11, 13], [13, 15], [12, 14], [14, 16]
#]
CocoColors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0],
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255],
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
NUM_KPTS = 17
CTX = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
def draw_pose(keypoints, img):
"""draw the keypoints and the skeletons.
:params keypoints: the shape should be equal to [17,2]
:params img:
"""
#
# assert keypoints.shape == (NUM_KPTS, 2)
# for i in range(len(SKELETON)):
# kpt_a, kpt_b = SKELETON[i][0], SKELETON[i][1]
# x_a, y_a = keypoints[kpt_a][0], keypoints[kpt_a][1]
# x_b, y_b = keypoints[kpt_b][0], keypoints[kpt_b][1]
# cv2.circle(img, (int(x_a), int(y_a)), 6, CocoColors[i], -1)
# cv2.circle(img, (int(x_b), int(y_b)), 6, CocoColors[i], -1)
# cv2.line(img, (int(x_a), int(y_a)), (int(x_b), int(y_b)), CocoColors[i], 2)
for i in range(len(SKELETON)):
kpt_a, kpt_b = SKELETON[i][0], SKELETON[i][1]
x_a, y_a = keypoints[kpt_a][0], keypoints[kpt_a][1]
x_b, y_b = keypoints[kpt_b][0], keypoints[kpt_b][1]
cv2.circle(img, (int(x_a), int(y_a)), 10, CocoColors[i], -1)
cv2.circle(img, (int(x_b), int(y_b)), 10, CocoColors[i], -1)
cv2.line(img, (int(x_a), int(y_a)), (int(x_b), int(y_b)), CocoColors[i], 7)
def draw_bbox(box, img):
"""draw the detected bounding box on the image.
:param img:
"""
cv2.rectangle(img, box[0], box[1], color=(0, 255, 0), thickness=3)
def box_to_center_scale(box, model_image_width, model_image_height):
"""convert a box to center,scale information required for pose transformation
Parameters
----------
box : list of tuple
list of length 2 with two tuples of floats representing
bottom left and top right corner of a box
model_image_width : int
model_image_height : int
Returns
-------
(numpy array, numpy array)
Two numpy arrays, coordinates for the center of the box and the scale of the box
"""
center = np.zeros((2), dtype=np.float32)
bottom_left_corner = box[0]
top_right_corner = box[1]
box_width = top_right_corner[0] - bottom_left_corner[0]
box_height = top_right_corner[1] - bottom_left_corner[1]
bottom_left_x = bottom_left_corner[0]
bottom_left_y = bottom_left_corner[1]
center[0] = bottom_left_x + box_width * 0.5
center[1] = bottom_left_y + box_height * 0.5
aspect_ratio = model_image_width * 1.0 / model_image_height
pixel_std = 200
if box_width > aspect_ratio * box_height:
box_height = box_width * 1.0 / aspect_ratio
elif box_width < aspect_ratio * box_height:
box_width = box_height * aspect_ratio
scale = np.array(
[box_width * 1.0 / pixel_std, box_height * 1.0 / pixel_std],
dtype=np.float32)
if center[0] != -1:
scale = scale * 1.25
return center, scale
if __name__ == '__main__':
main() | 36.911523 | 118 | 0.572997 |
b7e6129db622711592b894cfa7f14f8bbe198a09 | 2,749 | py | Python | feemodeldata/plotting/plotwaits.py | bitcoinfees/bitcoin-feemodel-data | 3eb09cf2a64b1aa23d328484bbcd7e4d55291898 | [
"MIT"
] | 2 | 2015-07-10T20:14:54.000Z | 2017-06-08T11:01:03.000Z | feemodeldata/plotting/plotwaits.py | bitcoinfees/bitcoin-feemodel-data | 3eb09cf2a64b1aa23d328484bbcd7e4d55291898 | [
"MIT"
] | null | null | null | feemodeldata/plotting/plotwaits.py | bitcoinfees/bitcoin-feemodel-data | 3eb09cf2a64b1aa23d328484bbcd7e4d55291898 | [
"MIT"
] | null | null | null | from __future__ import division
import sqlite3
from bisect import bisect_left
import plotly.plotly as py
from plotly.graph_objs import Scatter, Figure, Layout, Data, YAxis, XAxis
from feemodel.util import DataSample
from feemodel.app.predict import PVALS_DBFILE
from feemodeldata.plotting.plotrrd import BASEDIR
def get_txgroups(txs, feerates=(10000, 15000, 20000, 50000)):
"""Sort the txs by feerate."""
txs.sort()
txfeerates, _dum = zip(*txs)
idxs = [bisect_left(txfeerates, feerate) for feerate in feerates]
idxs.insert(0, 0)
print("idxs are {}.".format(idxs))
txgroups = [txs[idxs[i]:idxs[i+1]] for i in range(len(idxs)-1)]
return txgroups
| 28.936842 | 75 | 0.628592 |
b7e7c6200dfbf2600bb1a1bc581331cb427697e7 | 5,181 | py | Python | utils/pytorch_utils.py | shoegazerstella/BTC-ISMIR19 | fc4c8ef792711460d98b502ddc2e5befc800d2e5 | [
"MIT"
] | 1 | 2020-07-23T23:46:24.000Z | 2020-07-23T23:46:24.000Z | utils/pytorch_utils.py | shoegazerstella/BTC-ISMIR19 | fc4c8ef792711460d98b502ddc2e5befc800d2e5 | [
"MIT"
] | null | null | null | utils/pytorch_utils.py | shoegazerstella/BTC-ISMIR19 | fc4c8ef792711460d98b502ddc2e5befc800d2e5 | [
"MIT"
] | null | null | null |
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import os
import math
from utils import logger
use_cuda = torch.cuda.is_available()
# utility
# optimization
# reference: http://pytorch.org/docs/master/_modules/torch/optim/lr_scheduler.html#ReduceLROnPlateau
# model save and loading
# class weighted_BCELoss(Module):
# def __init__(self, mode):
# self.mode = mode
#
# def forward(self, input, target, weight=10):
# if not (input.size() == target.size()):
# raise ValueError("Target and input must have the same size. target size ({}) "
# "!= input size ({})".format(target.size(), input.size()))
# loss_matrix = - (torch.mul(target, input.log()) + torch.mul(1 - target, (1 - input).log()))
# one_matrix = Variable(torch.ones(input.size()))
# if use_cuda:
# one_matrix = one_matrix.cuda()
# if self.mode == 'one':
# weight_matrix = (weight - 1) * target + one_matrix
# elif self.mode == 'pitch':
#
# weighted_loss_matrix = torch.mul(loss_matrix, weight_matrix)
# return torch.mean(weighted_loss_matrix)
# loss
| 35.244898 | 107 | 0.640803 |
b7e805c3fdc6130f33ad7d70c4f57afa4833b9f9 | 3,630 | py | Python | ecosante/users/schemas/__init__.py | betagouv/recosante-api | 4560b2cf2ff4dc19597792fe15a3805f6259201d | [
"MIT"
] | 3 | 2021-09-24T14:07:51.000Z | 2021-12-14T13:48:34.000Z | ecosante/users/schemas/__init__.py | betagouv/recosante-api | 4560b2cf2ff4dc19597792fe15a3805f6259201d | [
"MIT"
] | 187 | 2021-03-25T16:43:49.000Z | 2022-03-23T14:40:31.000Z | ecosante/users/schemas/__init__.py | betagouv/recosante-api | 4560b2cf2ff4dc19597792fe15a3805f6259201d | [
"MIT"
] | null | null | null | from dataclasses import field
from marshmallow import Schema, ValidationError, post_load, schema
from marshmallow.validate import OneOf, Length
from marshmallow.fields import Bool, Str, List, Nested, Email
from flask_rebar import ResponseSchema, RequestSchema, errors
from ecosante.inscription.models import Inscription
from ecosante.utils.custom_fields import TempList
from ecosante.api.schemas.commune import CommuneSchema
from ecosante.extensions import celery
from indice_pollution.history.models import Commune as CommuneModel
from flask import request
| 43.214286 | 115 | 0.689532 |
b7ea33cae6c817255b7381a86f5b2cf3631857b7 | 933 | py | Python | Course 01 - Getting Started with Python/Extra Studies/Basics/ex022.py | marcoshsq/python_practical_exercises | 77136cd4bc0f34acde3380ffdc5af74f7a960670 | [
"MIT"
] | 9 | 2022-03-22T16:45:17.000Z | 2022-03-25T20:22:35.000Z | Course 01 - Getting Started with Python/Extra Studies/Basics/ex022.py | marcoshsq/python_practical_exercises | 77136cd4bc0f34acde3380ffdc5af74f7a960670 | [
"MIT"
] | null | null | null | Course 01 - Getting Started with Python/Extra Studies/Basics/ex022.py | marcoshsq/python_practical_exercises | 77136cd4bc0f34acde3380ffdc5af74f7a960670 | [
"MIT"
] | 3 | 2022-03-22T17:03:38.000Z | 2022-03-29T17:20:55.000Z | import math
# Exercise 017: Right Triangle
"""Write a program that reads the length of the opposite side and the adjacent side of a right triangle.
Calculate and display the length of the hypotenuse."""
# To do this we will use the Pythagorean theorem: a^2 = b^2 + c^2
# Method 01, without the module Math:
# First we ask for the leg values
leg_a = float(input("Enter the value of leg a: "))
leg_b = float(input("Enter the value of leg b: "))
# Then we do the Pythagorean theorem: sqrt((leg_a^2)+(leg_b^2))
hyp = ((leg_a**2) + (leg_b**2)) ** 0.5
print(f"The triangle hypotenuse measures {hyp:.2f} m.u. ")
# Method 02, with the module using pow function:
hypo = math.sqrt(math.pow(leg_a, 2) + math.pow(leg_b, 2))
print(f"The triangle hypotenuse measures {hypo:.2f} m.u. ")
# Method 03 using the module with the hypotenuse function u.u
hypot = math.hypot(leg_a, leg_b)
print(f"The triangle hypotenuse measures {hypot:.2f} m.u. ")
| 38.875 | 104 | 0.710611 |
b7ebf597cf4af041d284ceb92dfc3840fcf8cea7 | 146 | py | Python | annuaire/commands/__init__.py | djacomy/layer-annuaire | b0312534e31dd98d98568a83918cf7dd583aa4c7 | [
"MIT"
] | null | null | null | annuaire/commands/__init__.py | djacomy/layer-annuaire | b0312534e31dd98d98568a83918cf7dd583aa4c7 | [
"MIT"
] | null | null | null | annuaire/commands/__init__.py | djacomy/layer-annuaire | b0312534e31dd98d98568a83918cf7dd583aa4c7 | [
"MIT"
] | null | null | null | """Package groups the different commands modules."""
from annuaire.commands import download, import_lawyers
__all__ = [download, import_lawyers]
| 29.2 | 54 | 0.80137 |
b7eda2093d6d54b12bba13592c13c99ac642ca74 | 15,883 | py | Python | eventsourcing/application/actors.py | vladimirnani/eventsourcing | f49d2b9aaa585073aca4dc20c59d46db5a14eb57 | [
"BSD-3-Clause"
] | 1 | 2020-02-10T08:12:31.000Z | 2020-02-10T08:12:31.000Z | eventsourcing/application/actors.py | vladimirnani/eventsourcing | f49d2b9aaa585073aca4dc20c59d46db5a14eb57 | [
"BSD-3-Clause"
] | null | null | null | eventsourcing/application/actors.py | vladimirnani/eventsourcing | f49d2b9aaa585073aca4dc20c59d46db5a14eb57 | [
"BSD-3-Clause"
] | null | null | null | import logging
from thespian.actors import *
from eventsourcing.application.process import ProcessApplication, Prompt
from eventsourcing.application.system import System, SystemRunner
from eventsourcing.domain.model.events import subscribe, unsubscribe
from eventsourcing.interface.notificationlog import RecordManagerNotificationLog
logger = logging.getLogger()
# Todo: Send timer message to run slave every so often (in master or slave?).
DEFAULT_ACTORS_LOGCFG = {
'version': 1,
'formatters': {
'normal': {
'format': '%(levelname)-8s %(message)s'
}
},
'handlers': {
# 'h': {
# 'class': 'logging.FileHandler',
# 'filename': 'hello.log',
# 'formatter': 'normal',
# 'level': logging.INFO
# }
},
'loggers': {
# '': {'handlers': ['h'], 'level': logging.DEBUG}
}
}
# def start_multiproc_udp_base_system():
# start_actor_system(system_base='multiprocUDPBase')
#
#
# def start_multiproc_queue_base_system():
# start_actor_system(system_base='multiprocQueueBase')
| 37.637441 | 116 | 0.661336 |
b7edb2af66a1ef0492b215ff19713cb25d91778e | 4,517 | py | Python | sudoku/board.py | DariaMinieieva/sudoku_project | acfe6b6ff4e0343ad0dae597e783f9da40a7faee | [
"MIT"
] | 5 | 2021-05-27T09:26:30.000Z | 2021-05-28T10:33:46.000Z | sudoku/board.py | DariaMinieieva/sudoku_project | acfe6b6ff4e0343ad0dae597e783f9da40a7faee | [
"MIT"
] | null | null | null | sudoku/board.py | DariaMinieieva/sudoku_project | acfe6b6ff4e0343ad0dae597e783f9da40a7faee | [
"MIT"
] | 1 | 2021-05-28T08:43:05.000Z | 2021-05-28T08:43:05.000Z | """This module implements backtracking algorithm to solve sudoku."""
| 30.938356 | 92 | 0.556121 |
b7f128c1c030f4883afe9da12b85ac98f1c9b3dd | 9,603 | py | Python | openfl/component/ca/ca.py | saransh09/openfl-1 | beba571929a56771f2fc1671154a3dbe60b38785 | [
"Apache-2.0"
] | null | null | null | openfl/component/ca/ca.py | saransh09/openfl-1 | beba571929a56771f2fc1671154a3dbe60b38785 | [
"Apache-2.0"
] | 1 | 2022-03-02T18:07:11.000Z | 2022-03-10T02:43:12.000Z | openfl/component/ca/ca.py | saransh09/openfl-1 | beba571929a56771f2fc1671154a3dbe60b38785 | [
"Apache-2.0"
] | 1 | 2022-03-03T00:50:15.000Z | 2022-03-03T00:50:15.000Z | # Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""Aggregator module."""
import base64
import json
import os
import platform
import shutil
import signal
import subprocess
import time
import urllib.request
from logging import getLogger
from pathlib import Path
from subprocess import call
import requests
from click import confirm
logger = getLogger(__name__)
TOKEN_DELIMITER = '.'
CA_STEP_CONFIG_DIR = Path('step_config')
CA_PKI_DIR = Path('cert')
CA_PASSWORD_FILE = Path('pass_file')
CA_CONFIG_JSON = Path('config/ca.json')
def get_system_and_architecture():
"""Get system and architecture of machine."""
uname_res = platform.uname()
system = uname_res.system.lower()
architecture_aliases = {
'x86_64': 'amd64',
'armv6l': 'armv6',
'armv7l': 'armv7',
'aarch64': 'arm64'
}
architecture = uname_res.machine.lower()
for alias in architecture_aliases:
if architecture == alias:
architecture = architecture_aliases[alias]
break
return system, architecture
def download_step_bin(url, grep_name, architecture, prefix='.', confirmation=True):
"""
Donwload step binaries from github.
Args:
url: address of latest release
grep_name: name to grep over github assets
architecture: architecture type to grep
prefix: folder path to download
confirmation: request user confirmation or not
"""
if confirmation:
confirm('CA binaries from github will be downloaded now', default=True, abort=True)
result = requests.get(url)
if result.status_code != 200:
logger.warning('Can\'t download binaries from github. Please try lately.')
return
assets = result.json().get('assets', [])
archive_urls = [
a['browser_download_url']
for a in assets
if (grep_name in a['name'] and architecture in a['name']
and 'application/gzip' in a['content_type'])
]
if len(archive_urls) == 0:
raise Exception('Applicable CA binaries from github were not found '
f'(name: {grep_name}, architecture: {architecture})')
archive_url = archive_urls[-1]
archive_url = archive_url.replace('https', 'http')
name = archive_url.split('/')[-1]
logger.info(f'Downloading {name}')
urllib.request.urlretrieve(archive_url, f'{prefix}/{name}')
shutil.unpack_archive(f'{prefix}/{name}', f'{prefix}/step')
def get_token(name, ca_url, ca_path='.'):
"""
Create authentication token.
Args:
name: common name for following certificate
(aggregator fqdn or collaborator name)
ca_url: full url of CA server
ca_path: path to ca folder
"""
ca_path = Path(ca_path)
step_config_dir = ca_path / CA_STEP_CONFIG_DIR
pki_dir = ca_path / CA_PKI_DIR
step_path, _ = get_ca_bin_paths(ca_path)
if not step_path:
raise Exception('Step-CA is not installed!\nRun `fx pki install` first')
priv_json = step_config_dir / 'secrets' / 'priv.json'
pass_file = pki_dir / CA_PASSWORD_FILE
root_crt = step_config_dir / 'certs' / 'root_ca.crt'
try:
token = subprocess.check_output(
f'{step_path} ca token {name} '
f'--key {priv_json} --root {root_crt} '
f'--password-file {pass_file} 'f'--ca-url {ca_url}', shell=True)
except subprocess.CalledProcessError as exc:
logger.error(f'Error code {exc.returncode}: {exc.output}')
return
token = token.strip()
token_b64 = base64.b64encode(token)
with open(root_crt, mode='rb') as file:
root_certificate_b = file.read()
root_ca_b64 = base64.b64encode(root_certificate_b)
return TOKEN_DELIMITER.join([
token_b64.decode('utf-8'),
root_ca_b64.decode('utf-8'),
])
def get_ca_bin_paths(ca_path):
"""Get paths of step binaries."""
ca_path = Path(ca_path)
step = None
step_ca = None
if (ca_path / 'step').exists():
dirs = os.listdir(ca_path / 'step')
for dir_ in dirs:
if 'step_' in dir_:
step = ca_path / 'step' / dir_ / 'bin' / 'step'
if 'step-ca' in dir_:
step_ca = ca_path / 'step' / dir_ / 'bin' / 'step-ca'
return step, step_ca
def certify(name, cert_path: Path, token_with_cert, ca_path: Path):
"""Create an envoy workspace."""
os.makedirs(cert_path, exist_ok=True)
token, root_certificate = token_with_cert.split(TOKEN_DELIMITER)
token = base64.b64decode(token).decode('utf-8')
root_certificate = base64.b64decode(root_certificate)
step_path, _ = get_ca_bin_paths(ca_path)
if not step_path:
url = 'http://api.github.com/repos/smallstep/cli/releases/latest'
system, arch = get_system_and_architecture()
download_step_bin(url, f'step_{system}', arch, prefix=ca_path)
step_path, _ = get_ca_bin_paths(ca_path)
if not step_path:
raise Exception('Step-CA is not installed!\nRun `fx pki install` first')
with open(f'{cert_path}/root_ca.crt', mode='wb') as file:
file.write(root_certificate)
call(f'{step_path} ca certificate {name} {cert_path}/{name}.crt '
f'{cert_path}/{name}.key --kty EC --curve P-384 -f --token {token}', shell=True)
def remove_ca(ca_path):
"""Kill step-ca process and rm ca directory."""
_check_kill_process('step-ca')
shutil.rmtree(ca_path, ignore_errors=True)
def install(ca_path, ca_url, password):
"""
Create certificate authority for federation.
Args:
ca_path: path to ca directory
ca_url: url for ca server like: 'host:port'
password: Simple password for encrypting root private keys
"""
logger.info('Creating CA')
ca_path = Path(ca_path)
ca_path.mkdir(parents=True, exist_ok=True)
step_config_dir = ca_path / CA_STEP_CONFIG_DIR
os.environ['STEPPATH'] = str(step_config_dir)
step_path, step_ca_path = get_ca_bin_paths(ca_path)
if not (step_path and step_ca_path and step_path.exists() and step_ca_path.exists()):
confirm('CA binaries from github will be downloaded now', default=True, abort=True)
system, arch = get_system_and_architecture()
url = 'http://api.github.com/repos/smallstep/certificates/releases/latest'
download_step_bin(url, f'step-ca_{system}', arch, prefix=ca_path, confirmation=False)
url = 'http://api.github.com/repos/smallstep/cli/releases/latest'
download_step_bin(url, f'step_{system}', arch, prefix=ca_path, confirmation=False)
step_config_dir = ca_path / CA_STEP_CONFIG_DIR
if (not step_config_dir.exists()
or confirm('CA exists, do you want to recreate it?', default=True)):
_create_ca(ca_path, ca_url, password)
_configure(step_config_dir)
def run_ca(step_ca, pass_file, ca_json):
"""Run CA server."""
if _check_kill_process('step-ca', confirmation=True):
logger.info('Up CA server')
call(f'{step_ca} --password-file {pass_file} {ca_json}', shell=True)
def _check_kill_process(pstring, confirmation=False):
"""Kill process by name."""
pids = []
proc = subprocess.Popen(f'ps ax | grep {pstring} | grep -v grep',
shell=True, stdout=subprocess.PIPE)
text = proc.communicate()[0].decode('utf-8')
for line in text.splitlines():
fields = line.split()
pids.append(fields[0])
if len(pids):
if confirmation and not confirm('CA server is already running. Stop him?', default=True):
return False
for pid in pids:
os.kill(int(pid), signal.SIGKILL)
time.sleep(2)
return True
def _create_ca(ca_path: Path, ca_url: str, password: str):
"""Create a ca workspace."""
import os
pki_dir = ca_path / CA_PKI_DIR
step_config_dir = ca_path / CA_STEP_CONFIG_DIR
pki_dir.mkdir(parents=True, exist_ok=True)
step_config_dir.mkdir(parents=True, exist_ok=True)
with open(f'{pki_dir}/pass_file', 'w') as f:
f.write(password)
os.chmod(f'{pki_dir}/pass_file', 0o600)
step_path, step_ca_path = get_ca_bin_paths(ca_path)
assert (step_path and step_ca_path and step_path.exists() and step_ca_path.exists())
logger.info('Create CA Config')
os.environ['STEPPATH'] = str(step_config_dir)
shutil.rmtree(step_config_dir, ignore_errors=True)
name = ca_url.split(':')[0]
call(f'{step_path} ca init --name name --dns {name} '
f'--address {ca_url} --provisioner prov '
f'--password-file {pki_dir}/pass_file', shell=True)
call(f'{step_path} ca provisioner remove prov --all', shell=True)
call(f'{step_path} crypto jwk create {step_config_dir}/certs/pub.json '
f'{step_config_dir}/secrets/priv.json --password-file={pki_dir}/pass_file', shell=True)
call(
f'{step_path} ca provisioner add provisioner {step_config_dir}/certs/pub.json',
shell=True
)
| 34.793478 | 97 | 0.656357 |
b7f17afa5fddb406481a5085256bccee3d1bcc8c | 574 | py | Python | bin/optimization/cosmo_optimizer_hod_only.py | mclaughlin6464/pearce | 746f2bf4bf45e904d66996e003043661a01423ba | [
"MIT"
] | null | null | null | bin/optimization/cosmo_optimizer_hod_only.py | mclaughlin6464/pearce | 746f2bf4bf45e904d66996e003043661a01423ba | [
"MIT"
] | 16 | 2016-11-04T22:24:32.000Z | 2018-05-01T22:53:39.000Z | bin/optimization/cosmo_optimizer_hod_only.py | mclaughlin6464/pearce | 746f2bf4bf45e904d66996e003043661a01423ba | [
"MIT"
] | 3 | 2016-10-04T08:07:52.000Z | 2019-05-03T23:50:01.000Z | from pearce.emulator import OriginalRecipe, ExtraCrispy
import numpy as np
training_file = '/home/users/swmclau2/scratch/PearceRedMagicWpCosmo.hdf5'
em_method = 'gp'
split_method = 'random'
a = 1.0
z = 1.0/a - 1.0
fixed_params = {'z':z, 'cosmo': 1}#, 'r':0.18477483}
n_leaves, n_overlap = 5, 2
emu = ExtraCrispy(training_file,n_leaves, n_overlap, split_method, method = em_method, fixed_params=fixed_params,\
custom_mean_function = None)
results = emu.train_metric()
print results
print
print dict(zip(emu.get_param_names(), np.exp(results.x)))
| 23.916667 | 115 | 0.721254 |
b7f255f31605c7a9c29e736bc41dc0df25f503be | 294 | py | Python | tests/test_xmllint_map_html.py | sthagen/python-xmllint_map_html | 23363cfe1c126bc72efddf8fea084283375e2204 | [
"MIT"
] | null | null | null | tests/test_xmllint_map_html.py | sthagen/python-xmllint_map_html | 23363cfe1c126bc72efddf8fea084283375e2204 | [
"MIT"
] | 16 | 2020-09-11T11:07:09.000Z | 2020-12-06T16:42:18.000Z | tests/test_xmllint_map_html.py | sthagen/python-xmllint_map_html | 23363cfe1c126bc72efddf8fea084283375e2204 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# pylint: disable=missing-docstring,unused-import,reimported
import json
import pytest # type: ignore
import xmllint_map_html.xmllint_map_html as xmh
| 22.615385 | 60 | 0.714286 |
b7f62fa1d5695f548ee6f73816a2ab82ef2fbcfd | 1,318 | py | Python | apps/transmissions/views/transmissions.py | felipebarraza6/amamaule | 1da7cd542a7e610bc8fa230684770732a41520c9 | [
"MIT"
] | null | null | null | apps/transmissions/views/transmissions.py | felipebarraza6/amamaule | 1da7cd542a7e610bc8fa230684770732a41520c9 | [
"MIT"
] | null | null | null | apps/transmissions/views/transmissions.py | felipebarraza6/amamaule | 1da7cd542a7e610bc8fa230684770732a41520c9 | [
"MIT"
] | null | null | null | from rest_framework import mixins, viewsets, status
from rest_framework.permissions import (
AllowAny,
IsAuthenticated
)
from apps.transmissions.models import Transmission
from apps.transmissions.serializers import TransmissionModelSerializer, CommentModelserializer
from django_filters import rest_framework as filters
| 30.651163 | 94 | 0.636571 |
b7f67bcee29d8224470eff2f3efe74022a5ab08f | 4,751 | py | Python | amstramdam/events/game.py | felix-martel/multigeo | 2a1af9abae1fcef399744f6d88c4b1c25e8a25ab | [
"CC-BY-4.0",
"CC0-1.0"
] | 3 | 2020-11-28T15:00:56.000Z | 2021-04-06T14:10:47.000Z | amstramdam/events/game.py | felix-martel/amstramdam | 7142c34bda5aecfb5f7059a576a0ea7015a1edbc | [
"CC0-1.0",
"CC-BY-4.0"
] | 9 | 2021-04-11T17:28:57.000Z | 2022-02-19T13:53:35.000Z | amstramdam/events/game.py | felix-martel/multigeo | 2a1af9abae1fcef399744f6d88c4b1c25e8a25ab | [
"CC-BY-4.0",
"CC0-1.0"
] | 2 | 2020-11-17T09:34:50.000Z | 2020-11-28T14:57:58.000Z | from amstramdam import app, socketio, timers, manager
from flask import session
from flask_socketio import emit
from .types import GameEndNotification, GameEndPayload
from .utils import safe_cancel, wait_and_run
from ..game.types import GameName, Coordinates
| 30.261146 | 88 | 0.603662 |
b7f6d5055a8a870cf0186a412e583a2dc0833fd5 | 1,515 | py | Python | src/glod/unittests/in_out/test_statement_csv.py | gordon-elliott/glod | a381e21455d05d9c005942a3dee4ac67e10f366a | [
"MIT"
] | null | null | null | src/glod/unittests/in_out/test_statement_csv.py | gordon-elliott/glod | a381e21455d05d9c005942a3dee4ac67e10f366a | [
"MIT"
] | 1 | 2021-03-10T16:48:34.000Z | 2021-03-10T16:48:34.000Z | src/glod/unittests/in_out/test_statement_csv.py | gordon-elliott/glod | a381e21455d05d9c005942a3dee4ac67e10f366a | [
"MIT"
] | null | null | null |
__copyright__ = 'Copyright(c) Gordon Elliott 2017'
"""
"""
from datetime import date
from decimal import Decimal
from io import StringIO
from unittest import TestCase
from glod.model.statement_item import StatementItem
from glod.model.account import Account
from glod.in_out.statement_item import statement_item_csv
| 25.677966 | 75 | 0.570957 |
b7f7145927c059a2c43b18ff8ea2eb1911103a21 | 1,072 | py | Python | ExifExtractor.py | MalwareJunkie/PythonScripts | ad827a8aafaae4a50970c9df11b674f4472eb371 | [
"MIT"
] | null | null | null | ExifExtractor.py | MalwareJunkie/PythonScripts | ad827a8aafaae4a50970c9df11b674f4472eb371 | [
"MIT"
] | null | null | null | ExifExtractor.py | MalwareJunkie/PythonScripts | ad827a8aafaae4a50970c9df11b674f4472eb371 | [
"MIT"
] | null | null | null | # Tested with Python 3.6
# Install Pillow: pip install pillow
""" This script extracts exif data from JPEG images """
from PIL import Image
from PIL.ExifTags import TAGS
import sys
main()
| 23.304348 | 62 | 0.527052 |
b7f7a2d524260e395bf0b274a89d51e8f9652827 | 240 | py | Python | nbgrader/nbgraderformat/__init__.py | FrattisUC/nbgrader | f6402dcbb875e41ee3317be9e7af518afda9f72c | [
"BSD-3-Clause-Clear"
] | 2 | 2021-09-11T20:32:18.000Z | 2021-09-11T20:32:37.000Z | nbgrader/nbgraderformat/__init__.py | FrattisUC/nbgrader | f6402dcbb875e41ee3317be9e7af518afda9f72c | [
"BSD-3-Clause-Clear"
] | null | null | null | nbgrader/nbgraderformat/__init__.py | FrattisUC/nbgrader | f6402dcbb875e41ee3317be9e7af518afda9f72c | [
"BSD-3-Clause-Clear"
] | 1 | 2019-09-13T07:46:09.000Z | 2019-09-13T07:46:09.000Z | SCHEMA_VERSION = 2
from .common import ValidationError, SchemaMismatchError
from .v2 import MetadataValidatorV2 as MetadataValidator
from .v2 import read_v2 as read, write_v2 as write
from .v2 import reads_v2 as reads, writes_v2 as writes
| 34.285714 | 56 | 0.829167 |
b7f7e17dac70dc7137a4fbc2c1596760a4b65113 | 9,537 | py | Python | testFiles/test_script.py | Janga-Lab/Penguin-1 | f6162be3549c470416da0fab590ae7d04c74bfa5 | [
"MIT"
] | null | null | null | testFiles/test_script.py | Janga-Lab/Penguin-1 | f6162be3549c470416da0fab590ae7d04c74bfa5 | [
"MIT"
] | null | null | null | testFiles/test_script.py | Janga-Lab/Penguin-1 | f6162be3549c470416da0fab590ae7d04c74bfa5 | [
"MIT"
] | null | null | null | import h5py
from ont_fast5_api.conversion_tools import multi_to_single_fast5
from ont_fast5_api import fast5_interface
import SequenceGenerator.align as align
import SignalExtractor.Nanopolish as events
from testFiles.test_commands import *
import os, sys
import subprocess
#todo get basecall data
#test to check if required files are created
#create event info file for machine learning models
| 37.695652 | 165 | 0.567998 |
b7f84a7d5201859ed1a739cf1602952494964553 | 7,702 | py | Python | channels/italiaserie.py | sodicarus/channels | d77402f4f460ea6daa66959aa5384aaffbff70b5 | [
"MIT"
] | null | null | null | channels/italiaserie.py | sodicarus/channels | d77402f4f460ea6daa66959aa5384aaffbff70b5 | [
"MIT"
] | null | null | null | channels/italiaserie.py | sodicarus/channels | d77402f4f460ea6daa66959aa5384aaffbff70b5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# ------------------------------------------------------------
# streamondemand-pureita.- XBMC Plugin
# Canale italiaserie
# http://www.mimediacenter.info/foro/viewtopic.php?f=36&t=7808
# ------------------------------------------------------------
import re
from core import httptools
from core import logger
from core import config
from core import servertools
from core import scrapertools
from core.item import Item
from core.tmdb import infoSod
__channel__ = "italiaserie"
host = "https://italiaserie.org"
headers = [['Referer', host]]
# ==================================================================================================================================================
# ==================================================================================================================================================
# ==================================================================================================================================================
# ==================================================================================================================================================
# ==================================================================================================================================================
| 43.514124 | 149 | 0.531291 |
b7f8e6d0c8a700576343e9ec9966950fe6696251 | 629 | py | Python | setup.py | jeffleary00/greenery | cb5b5d037b6fd297463633d2d3315c722851161f | [
"MIT"
] | null | null | null | setup.py | jeffleary00/greenery | cb5b5d037b6fd297463633d2d3315c722851161f | [
"MIT"
] | null | null | null | setup.py | jeffleary00/greenery | cb5b5d037b6fd297463633d2d3315c722851161f | [
"MIT"
] | 1 | 2018-02-25T17:29:37.000Z | 2018-02-25T17:29:37.000Z | from setuptools import setup
setup(
name='potnanny-api',
version='0.2.6',
packages=['potnanny_api'],
include_package_data=True,
description='Part of the Potnanny greenhouse controller application. Contains Flask REST API and basic web interface.',
author='Jeff Leary',
author_email='potnanny@gmail.com',
url='https://github.com/jeffleary00/potnanny-api',
install_requires=[
'requests',
'passlib',
'sqlalchemy',
'marshmallow',
'flask',
'flask-restful',
'flask-jwt-extended',
'flask-wtf',
'potnanny-core==0.2.9',
],
)
| 26.208333 | 123 | 0.616852 |
b7f8ec16e2bfb80be5a624728d6c0040fc0bbacb | 16,352 | py | Python | cpp-linux/Release/envcpp.py | thu-media/Comyco | 38cc0266b1c0a9f20e48a173d0157452cb411b85 | [
"BSD-2-Clause"
] | 40 | 2019-08-09T07:33:41.000Z | 2021-11-26T06:58:44.000Z | cpp-linux/Release/envcpp.py | ragnarkor/Comyco | 38cc0266b1c0a9f20e48a173d0157452cb411b85 | [
"BSD-2-Clause"
] | 9 | 2019-10-09T03:10:46.000Z | 2021-12-26T15:31:15.000Z | cpp-linux/Release/envcpp.py | ragnarkor/Comyco | 38cc0266b1c0a9f20e48a173d0157452cb411b85 | [
"BSD-2-Clause"
] | 12 | 2019-11-06T08:31:19.000Z | 2021-11-12T09:56:37.000Z | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 4.0.0
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError('Python 2.7 or later required')
# Import the low-level C/C++ module
if __package__ or '.' in __name__:
from . import _envcpp
else:
import _envcpp
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
return wrapper
# Register SwigPyIterator in _envcpp:
_envcpp.SwigPyIterator_swigregister(SwigPyIterator)
# Register vectori in _envcpp:
_envcpp.vectori_swigregister(vectori)
# Register vectord in _envcpp:
_envcpp.vectord_swigregister(vectord)
# Register vectors in _envcpp:
_envcpp.vectors_swigregister(vectors)
# Register Environment in _envcpp:
_envcpp.Environment_swigregister(Environment)
| 31.446154 | 145 | 0.707131 |
b7f8f59fb0fb637edfdf3e834168a1ea050cd659 | 3,912 | py | Python | eda_rf.py | lel23/Student-Performance-Prediction | 93f850d299f6e6ad88a90e606f494fcd931e56b6 | [
"MIT"
] | 1 | 2021-11-27T01:55:44.000Z | 2021-11-27T01:55:44.000Z | eda_rf.py | lel23/Student-Performance-Prediction | 93f850d299f6e6ad88a90e606f494fcd931e56b6 | [
"MIT"
] | null | null | null | eda_rf.py | lel23/Student-Performance-Prediction | 93f850d299f6e6ad88a90e606f494fcd931e56b6 | [
"MIT"
] | 1 | 2021-12-13T15:46:43.000Z | 2021-12-13T15:46:43.000Z | """
Final Project
EDA
"""
import pandas as pd
import matplotlib.pyplot as plt
from mlxtend.plotting import scatterplotmatrix
import numpy as np
import seaborn as sns
from imblearn.over_sampling import SMOTE
from sklearn.utils import resample
from mlxtend.plotting import heatmap
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.feature_selection import SelectFromModel
import sys
from sklearn.model_selection import train_test_split
from collections import Counter
df = pd.read_csv('student-mat-edited.csv')
df['school'] = df['school'].replace(['GP', 'MS'], [1, 0])
df['sex'] = df['sex'].replace(['M', 'F'], [1, 0])
df['address'] = df['address'].replace(['U', 'R'], [1, 0])
df['famsize'] = df['famsize'].replace(['GT3', 'LE3'], [1, 0])
df['Pstatus'] = df['Pstatus'].replace(['T', 'A'], [1, 0])
df = df.replace(to_replace={'yes':1, 'no':0})
df = pd.get_dummies(df, prefix= ['Mjob', 'Fjob', 'reason', 'guardian'])
#code from: https://stackoverflow.com/questions/46168450/replace-a-specific-range-of-values-in-a-pandas-dataframe
#convert the scores to integers representing the letter grade range specified in the paper. higher the number, the higher the grade
df['scores'] = df[['G1', 'G2', 'G3']].mean(axis=1)
df['scores'] = np.where(df['scores'].between(0, 10), 0, df['scores'])
df['scores'] = np.where(df['scores'].between(10, 12), 1, df['scores'])
df['scores'] = np.where(df['scores'].between(12, 14), 2, df['scores'])
df['scores'] = np.where(df['scores'].between(14, 16), 3, df['scores'])
df['scores'] = np.where(df['scores'].between(16, 21), 4, df['scores'])
df['scores'] = df['scores'].astype(np.int)
df = df.drop(index=1, columns=['G1', 'G2', 'G3'])
#separate into features and target
X = df[[i for i in list(df.columns) if i != 'scores']]
y = df['scores']
# fixing class imbalance
#https://machinelearningmastery.com/multi-class-imbalanced-classification/
oversample = SMOTE(random_state=0)
X, y = oversample.fit_resample(X, y)
# splitting training and test data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0, stratify=y)
# min-max scaling
mms = MinMaxScaler()
X_train_norm = mms.fit_transform(X_train)
X_test_norm = mms.transform(X_test)
# standardizing the data
stdsc = StandardScaler()
X_train_std = stdsc.fit_transform(X_train)
X_test_std = stdsc.transform(X_test)
# Random Forest Feature Selection
feat_labels = X.columns
forest = RandomForestClassifier(n_estimators=500, random_state=0)
forest.fit(X_train, y_train)
importances = forest.feature_importances_
indices = np.argsort(importances)[::-1]
for f in range(X_train.shape[1]):
print("%2d) %-*s %f" % (f + 1, 30, feat_labels[indices[f]], importances[indices[f]]))
plt.title('Feature Importance')
plt.bar(range(X_train.shape[1]), importances[indices], align='center')
plt.xticks(range(X_train.shape[1]), feat_labels[indices], rotation=90)
plt.xlim([-1, X_train.shape[1]])
plt.tight_layout()
plt.savefig("rf_selection.png")
plt.show()
sfm = SelectFromModel(forest, threshold=0.04, prefit=True)
X_selected = sfm.transform(X_train)
print('Number of features that meet this threshold', 'criterion:', X_selected.shape[1])
# # Now, let's print the features that met the threshold criterion for feature selection that we set earlier (note that this code snippet does not appear in the actual book but was added to this notebook later for illustrative purposes):
cols = []
for f in range(X_selected.shape[1]):
cols.append(feat_labels[indices[f]])
print("%2d) %-*s %f" % (f + 1, 30,
feat_labels[indices[f]],
importances[indices[f]]))
# Correlation heatmap
cols.append("scores")
cm = np.corrcoef(df[cols].values.T)
hm = heatmap(cm, row_names=cols, column_names=cols, figsize=(10, 8))
plt.savefig("corr_matrix.png")
plt.show()
| 35.563636 | 238 | 0.707311 |
b7f97933fc0d2a4db780092adb873088bd108cdc | 3,771 | py | Python | feastruct/fea/utils.py | geosharma/feastruct | 67cbf1c07d5f718c5eed4a1ac69e5cf0dc588ca1 | [
"MIT"
] | 37 | 2018-11-08T12:51:53.000Z | 2022-02-01T19:40:48.000Z | feastruct/fea/utils.py | geosharma/feastruct | 67cbf1c07d5f718c5eed4a1ac69e5cf0dc588ca1 | [
"MIT"
] | 2 | 2018-11-01T12:39:24.000Z | 2022-01-23T01:26:47.000Z | feastruct/fea/utils.py | geosharma/feastruct | 67cbf1c07d5f718c5eed4a1ac69e5cf0dc588ca1 | [
"MIT"
] | 12 | 2019-04-09T04:14:02.000Z | 2022-01-08T14:04:32.000Z | import numpy as np
def gauss_points(el_type, n):
"""Returns the Gaussian weights and locations for *n* point Gaussian integration of a finite
element. Refer to xxx for a list of the element types.
:param string el_type: String describing the element type
:param int n: Number of Gauss points
:returns: The integration weights *(n x 1)* and an *(n x i)* matrix consisting of the values of
the *i* shape functions for *n* Gauss points
:rtype: tuple(list[float], :class:`numpy.ndarray`)
"""
if el_type == 'Tri6':
# one point gaussian integration
if n == 1:
weights = [1]
gps = np.array([[1.0 / 3, 1.0 / 3, 1.0 / 3]])
# three point gaussian integration
elif n == 3:
weights = [1.0 / 3, 1.0 / 3, 1.0 / 3]
gps = np.array([
[2.0 / 3, 1.0 / 6, 1.0 / 6],
[1.0 / 6, 2.0 / 3, 1.0 / 6],
[1.0 / 6, 1.0 / 6, 2.0 / 3]
])
# six point gaussian integration
elif n == 6:
g1 = 1.0 / 18 * (8 - np.sqrt(10) + np.sqrt(38 - 44 * np.sqrt(2.0 / 5)))
g2 = 1.0 / 18 * (8 - np.sqrt(10) - np.sqrt(38 - 44 * np.sqrt(2.0 / 5)))
w1 = (620 + np.sqrt(213125 - 53320 * np.sqrt(10))) / 3720
w2 = (620 - np.sqrt(213125 - 53320 * np.sqrt(10))) / 3720
weights = [w2, w2, w2, w1, w1, w1]
gps = np.array([
[1 - 2 * g2, g2, g2],
[g2, 1 - 2 * g2, g2],
[g2, g2, 1 - 2 * g2],
[g1, g1, 1 - 2 * g1],
[1 - 2 * g1, g1, g1],
[g1, 1 - 2 * g1, g1]
])
return (weights, gps)
def shape_function(el_type, coords, gp):
"""Computes shape functions, shape function derivatives and the determinant of the Jacobian
matrix for a number of different finite elements at a given Gauss point. Refer to xxx for a
list of the element types.
:param string el_type: String describing the element type
:param coords: Global coordinates of the element nodes *(n x 3)*, where *n* is the number of
nodes
:type coords: :class:`numpy.ndarray`
:param gp: Isoparametric location of the Gauss point
:type gp: :class:`numpy.ndarray`
:returns: The value of the shape functions *N(i)* at the given Gauss point *(1 x n)*, the
derivative of the shape functions in the j-th global direction *B(i,j)* *(3 x n)* and the
determinant of the Jacobian matrix *j*
:rtype: tuple(:class:`numpy.ndarray`, :class:`numpy.ndarray`, float)
"""
if el_type == 'Tri6':
# location of isoparametric co-ordinates for each Gauss point
eta = gp[0]
xi = gp[1]
zeta = gp[2]
# value of the shape functions
N = np.array([
eta * (2 * eta - 1),
xi * (2 * xi - 1),
zeta * (2 * zeta - 1),
4 * eta * xi,
4 * xi * zeta,
4 * eta * zeta
])
# derivatives of the sf wrt the isoparametric co-ordinates
B_iso = np.array([
[4 * eta - 1, 0, 0, 4 * xi, 0, 4 * zeta],
[0, 4 * xi - 1, 0, 4 * eta, 4 * zeta, 0],
[0, 0, 4 * zeta - 1, 0, 4 * xi, 4 * eta]
])
# form Jacobian matrix
J_upper = np.array([[1, 1, 1]])
J_lower = np.dot(coords, np.transpose(B_iso))
J = np.vstack((J_upper, J_lower))
# calculate the jacobian
j = 0.5 * np.linalg.det(J)
# cacluate the P matrix
P = np.dot(np.linalg.inv(J), np.array([[0, 0], [1, 0], [0, 1]]))
# calculate the B matrix in terms of cartesian co-ordinates
B = np.transpose(np.dot(np.transpose(B_iso), P))
return (N, B, j)
| 35.914286 | 99 | 0.515248 |
b7f9ec5f1030d590ec1e3d249bcbd427149dded0 | 1,447 | py | Python | webscrap.py | ircykk/webscrap | b43d2a1075dbe6c6644391c3b79785375b207559 | [
"MIT"
] | null | null | null | webscrap.py | ircykk/webscrap | b43d2a1075dbe6c6644391c3b79785375b207559 | [
"MIT"
] | 2 | 2021-03-31T19:16:56.000Z | 2021-12-13T20:19:00.000Z | webscrap.py | ircykk/webscrap | b43d2a1075dbe6c6644391c3b79785375b207559 | [
"MIT"
] | null | null | null | import requests
import time
import argparse
import sys
import os
from bs4 import BeautifulSoup
from urllib.parse import urlparse
# Instantiate the parser
parser = argparse.ArgumentParser(description='URL scrapper')
parser.add_argument('--url', help='Root URL page')
parser.add_argument('--limit', type=int, default=1000, help='Limit urls to scrape')
parser.add_argument('--output', default='output.csv', help='Path to output file')
args = parser.parse_args()
urls = []
urls_visited = []
if is_url(args.url) != True:
print('Invalid root URL [--url]')
sys.exit(1)
fetch_urls(args.url)
urls_visited.append(args.url);
for url in urls:
if len(urls) > args.limit:
break
print_progress(len(urls), args.limit)
if url not in urls_visited:
urls_visited.append(url);
fetch_urls(url)
# Save output
os.remove(args.output)
with open(args.output, 'a') as output:
for url in urls:
output.write(url + '\n')
| 22.968254 | 83 | 0.691776 |
b7fa464b97651a98f542160b4536fc5d2f36512c | 3,035 | py | Python | lib/recipetool/shift_oelint_adv/rule_base/rule_var_src_uri_checksum.py | shift-left-test/meta-shift | effce9bea894f990703cc047157e3f30d53d9365 | [
"MIT"
] | 2 | 2022-01-19T02:39:43.000Z | 2022-02-07T01:58:17.000Z | lib/recipetool/shift_oelint_adv/rule_base/rule_var_src_uri_checksum.py | shift-left-test/meta-shift | effce9bea894f990703cc047157e3f30d53d9365 | [
"MIT"
] | null | null | null | lib/recipetool/shift_oelint_adv/rule_base/rule_var_src_uri_checksum.py | shift-left-test/meta-shift | effce9bea894f990703cc047157e3f30d53d9365 | [
"MIT"
] | null | null | null | from shift_oelint_parser.cls_item import Variable
from shift_oelint_adv.cls_rule import Rule
from shift_oelint_parser.helper_files import get_scr_components
from shift_oelint_parser.parser import INLINE_BLOCK
| 41.013514 | 93 | 0.45832 |
b7fab4376dcf24e3dbd079130cdac6cf32133a5b | 1,084 | py | Python | verba/apps/auth/backends.py | nhsuk/verba | c0354ae2012a046e7f7cc7482e293737de9d28bc | [
"MIT"
] | null | null | null | verba/apps/auth/backends.py | nhsuk/verba | c0354ae2012a046e7f7cc7482e293737de9d28bc | [
"MIT"
] | 2 | 2016-08-11T09:30:41.000Z | 2016-08-11T15:04:08.000Z | verba/apps/auth/backends.py | nhsuk/verba | c0354ae2012a046e7f7cc7482e293737de9d28bc | [
"MIT"
] | 1 | 2021-04-11T07:41:27.000Z | 2021-04-11T07:41:27.000Z | from github import User as GitHubUser
from github.auth import get_token
from github.exceptions import AuthValidationError
from . import get_user_model
| 28.526316 | 77 | 0.612546 |
b7fb6f9d3e04e66224e9cdb811584decc5862d2f | 798 | py | Python | examples/apds9960_color_simpletest.py | tannewt/Adafruit_CircuitPython_APDS9960 | becfa166b91124aa0f2ed1e5bb1ecee7a4d86fab | [
"MIT"
] | null | null | null | examples/apds9960_color_simpletest.py | tannewt/Adafruit_CircuitPython_APDS9960 | becfa166b91124aa0f2ed1e5bb1ecee7a4d86fab | [
"MIT"
] | null | null | null | examples/apds9960_color_simpletest.py | tannewt/Adafruit_CircuitPython_APDS9960 | becfa166b91124aa0f2ed1e5bb1ecee7a4d86fab | [
"MIT"
] | null | null | null | import time
import board
import busio
import digitalio
from adafruit_apds9960.apds9960 import APDS9960
from adafruit_apds9960 import colorutility
i2c = busio.I2C(board.SCL, board.SDA)
int_pin = digitalio.DigitalInOut(board.A2)
apds = APDS9960(i2c)
apds.enable_color = True
while True:
#create some variables to store the color data in
#wait for color data to be ready
while not apds.color_data_ready:
time.sleep(0.005)
#get the data and print the different channels
r, g, b, c = apds.color_data
print("red: ", r)
print("green: ", g)
print("blue: ", b)
print("clear: ", c)
print("color temp {}".format(colorutility.calculate_color_temperature(r, g, b)))
print("light lux {}".format(colorutility.calculate_lux(r, g, b)))
time.sleep(0.5)
| 24.9375 | 84 | 0.699248 |
b7fbdc11c64c416322347545771908c98a2d730b | 158 | py | Python | abc/abc205/abc205b.py | c-yan/atcoder | 940e49d576e6a2d734288fadaf368e486480a948 | [
"MIT"
] | 1 | 2019-08-21T00:49:34.000Z | 2019-08-21T00:49:34.000Z | abc/abc205/abc205b.py | c-yan/atcoder | 940e49d576e6a2d734288fadaf368e486480a948 | [
"MIT"
] | null | null | null | abc/abc205/abc205b.py | c-yan/atcoder | 940e49d576e6a2d734288fadaf368e486480a948 | [
"MIT"
] | null | null | null | N, *A = map(int, open(0).read().split())
A.sort()
for i in range(N):
if i == A[i] - 1:
continue
print('No')
break
else:
print('Yes')
| 14.363636 | 40 | 0.487342 |
b7fc5371e78fe759e9cfc9ac2a197cc1a24c7ba9 | 1,114 | py | Python | CPAC/cwas/tests/features/steps/base_cwas.py | Lawreros/C-PAC | ce26ba9a38cbd401cd405150eeed23b805007724 | [
"BSD-3-Clause"
] | 1 | 2021-08-02T23:23:39.000Z | 2021-08-02T23:23:39.000Z | CPAC/cwas/tests/features/steps/base_cwas.py | Lawreros/C-PAC | ce26ba9a38cbd401cd405150eeed23b805007724 | [
"BSD-3-Clause"
] | null | null | null | CPAC/cwas/tests/features/steps/base_cwas.py | Lawreros/C-PAC | ce26ba9a38cbd401cd405150eeed23b805007724 | [
"BSD-3-Clause"
] | 2 | 2021-08-02T23:23:40.000Z | 2022-02-26T12:39:30.000Z | from behave import *
from hamcrest import assert_that, is_not, greater_than
import numpy as np
import nibabel as nib
import rpy2.robjects as robjects
from rpy2.robjects.numpy2ri import numpy2ri
from rpy2.robjects.packages import importr
robjects.conversion.py2ri = numpy2ri
from os import path as op
import sys
curfile = op.abspath(__file__)
testpath = op.dirname(op.dirname(op.dirname(curfile)))
rpath = op.join(testpath, "R")
pypath = op.dirname(testpath)
sys.path.append(pypath)
from cwas import *
from utils import *
def custom_corrcoef(X, Y=None):
"""Each of the columns in X will be correlated with each of the columns in
Y. Each column represents a variable, with the rows containing the observations."""
if Y is None:
Y = X
if X.shape[0] != Y.shape[0]:
raise Exception("X and Y must have the same number of rows.")
X = X.astype(float)
Y = Y.astype(float)
X -= X.mean(axis=0)[np.newaxis,...]
Y -= Y.mean(axis=0)
xx = np.sum(X**2, axis=0)
yy = np.sum(Y**2, axis=0)
r = np.dot(X.T, Y)/np.sqrt(np.multiply.outer(xx,yy))
return r
| 25.906977 | 87 | 0.684022 |
b7fcfd8dcf5ce827a8535f6ece099e74d61fb49d | 15,109 | py | Python | Analysis/CardioVascularLab/ExVivo/exvivo.py | sassystacks/TissueMechanicsLab | 0f881a57ebf7cbadfeb2041daabd4e4b79b25b91 | [
"MIT"
] | null | null | null | Analysis/CardioVascularLab/ExVivo/exvivo.py | sassystacks/TissueMechanicsLab | 0f881a57ebf7cbadfeb2041daabd4e4b79b25b91 | [
"MIT"
] | null | null | null | Analysis/CardioVascularLab/ExVivo/exvivo.py | sassystacks/TissueMechanicsLab | 0f881a57ebf7cbadfeb2041daabd4e4b79b25b91 | [
"MIT"
] | null | null | null | import sys
sys.path.append('..')
from Analyzer.TransitionProperties import ProcessTransitionProperties
from tkinter import *
from tkinter import messagebox, ttk, filedialog
# from tkFileDialog import *
import uniaxanalysis.getproperties as getprops
from uniaxanalysis.plotdata import DataPlotter
from uniaxanalysis.saveproperties import write_props_csv
from exvivoframes import *
from matplotlib import pyplot as plt
import time
'''
The GUI for uniax data analysis of soft tissue.
inputs:
- Dimensions file - a file with format: sample name, width, thickness and initial distance
- directory - Folder with raw uniax data files in csv format with format: time, distance, force
To Do:
- polymorphic method for handling input data (variable names to get) <done>
- control when line for manual control shows up <done>
- test rdp for finding linear region - done (check implementation)
- fix point picking on plot so that can work in desceding order of x value - <done>
- tick boxes for properties <done>
- config file
- scroll bar for large data sets <done>
Bugs:
- work out bug in the 2nd order gaussian - done
- work out bug in the display for automatic linear find
- destroy instance of toolbar on graph create
- destroy instance of plot everytime
'''
if __name__ == '__main__':
main()
| 38.347716 | 170 | 0.581243 |
b7fdfc063cfae7dcf94caa90899dd03c0a4da68d | 8,028 | py | Python | cats/cats.py | BrandtH22/CAT-admin-tool | f58f76e5b3af5484089652616c17c669c4adebb7 | [
"Apache-2.0"
] | 1 | 2022-03-22T21:59:15.000Z | 2022-03-22T21:59:15.000Z | cats/cats.py | BrandtH22/CAT-admin-tool | f58f76e5b3af5484089652616c17c669c4adebb7 | [
"Apache-2.0"
] | null | null | null | cats/cats.py | BrandtH22/CAT-admin-tool | f58f76e5b3af5484089652616c17c669c4adebb7 | [
"Apache-2.0"
] | null | null | null | import click
import aiohttp
import asyncio
import re
import json
from typing import Optional, Tuple, Iterable, Union, List
from blspy import G2Element, AugSchemeMPL
from chia.cmds.wallet_funcs import get_wallet
from chia.rpc.wallet_rpc_client import WalletRpcClient
from chia.util.default_root import DEFAULT_ROOT_PATH
from chia.util.config import load_config
from chia.util.ints import uint16
from chia.util.byte_types import hexstr_to_bytes
from chia.types.blockchain_format.program import Program
from clvm_tools.clvmc import compile_clvm_text
from clvm_tools.binutils import assemble
from chia.types.spend_bundle import SpendBundle
from chia.wallet.cc_wallet.cc_utils import (
construct_cc_puzzle,
CC_MOD,
SpendableCC,
unsigned_spend_bundle_for_spendable_ccs,
)
from chia.util.bech32m import decode_puzzle_hash
# Loading the client requires the standard chia root directory configuration that all of the chia commands rely on
# The clvm loaders in this library automatically search for includable files in the directory './include'
def append_include(search_paths: Iterable[str]) -> List[str]:
if search_paths:
search_list = list(search_paths)
search_list.append("./include")
return search_list
else:
return ["./include"]
def parse_program(program: Union[str, Program], include: Iterable = []) -> Program:
if isinstance(program, Program):
return program
else:
if "(" in program: # If it's raw clvm
prog = Program.to(assemble(program))
elif "." not in program: # If it's a byte string
prog = Program.from_bytes(hexstr_to_bytes(program))
else: # If it's a file
with open(program, "r") as file:
filestring: str = file.read()
if "(" in filestring: # If it's not compiled
# TODO: This should probably be more robust
if re.compile(r"\(mod\s").search(filestring): # If it's Chialisp
prog = Program.to(
compile_clvm_text(filestring, append_include(include))
)
else: # If it's CLVM
prog = Program.to(assemble(filestring))
else: # If it's serialized CLVM
prog = Program.from_bytes(hexstr_to_bytes(filestring))
return prog
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
if __name__ == "__main__":
main()
| 29.733333 | 114 | 0.646736 |
b7ff6526e37679ba17f2e315aceade4303222790 | 1,997 | py | Python | tagging/tag_net.py | zhuzhutingru123/Semantics-AssistedVideoCaptioning | 28c7b3fa57964f734f0fb38ecb89c9e8e21e5aaf | [
"MIT"
] | 55 | 2019-09-23T12:21:47.000Z | 2022-03-29T19:50:57.000Z | tagging/tag_net.py | zhuzhutingru123/Semantics-AssistedVideoCaptioning | 28c7b3fa57964f734f0fb38ecb89c9e8e21e5aaf | [
"MIT"
] | 13 | 2019-10-02T05:10:03.000Z | 2021-11-03T11:33:32.000Z | tagging/tag_net.py | WingsBrokenAngel/Semantics-AssistedVideoCaptioning | 409ca8b5be336d8957f3345825c8815a3070af19 | [
"MIT"
] | 15 | 2019-09-20T07:10:47.000Z | 2022-03-11T09:05:18.000Z | # -*- coding: utf-8 -*-
# Author: Haoran Chen
# Date: 2019-4-28
import tensorflow as tf
from tensorflow import placeholder, glorot_normal_initializer, zeros_initializer
from tensorflow.nn import dropout
import numpy as np
n_z = 3584
n_y = 300
MSVD_PATH = None
MSRVTT_PATH = None
MSVD_GT_PATH = None
MSRVTT_GT_PATH = None
max_epochs = 1000
lr = 0.0002
batch_size = 128
keep_prob = 1.0
batch_size = 64
| 33.847458 | 101 | 0.613921 |
b7ffe90a656352b24d635be78e2f3b9924c3cd33 | 1,625 | py | Python | example/keraslogistic/cloudmesh_ai/logistic_regression.py | cloudmesh-community/fa19-516-174 | 1b1aed0dcb4aa2fbe70de86a281c089a75f7aa72 | [
"Apache-2.0"
] | null | null | null | example/keraslogistic/cloudmesh_ai/logistic_regression.py | cloudmesh-community/fa19-516-174 | 1b1aed0dcb4aa2fbe70de86a281c089a75f7aa72 | [
"Apache-2.0"
] | null | null | null | example/keraslogistic/cloudmesh_ai/logistic_regression.py | cloudmesh-community/fa19-516-174 | 1b1aed0dcb4aa2fbe70de86a281c089a75f7aa72 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
from cloudmesh import mongo
from flask import request
from flask_pymongo import PyMongo
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from .file import upload
| 36.931818 | 87 | 0.647385 |
4d0003163267427736e0367162b90a4c31a4952a | 18,450 | py | Python | Scripts/plot_ObservationsPrediction_RawHiatus_OHClevels-lag-EDA_v2.py | zmlabe/predictGMSTrate | 2bde4a106de1988d772f15a52d283d23bb7128f4 | [
"MIT"
] | 2 | 2022-01-20T20:20:04.000Z | 2022-02-21T12:33:37.000Z | Dark_Scripts/plot_ObservationsPrediction_RawHiatus_OHClevels-lag-EDA_v2.py | zmlabe/predictGMSTrate | 2bde4a106de1988d772f15a52d283d23bb7128f4 | [
"MIT"
] | null | null | null | Dark_Scripts/plot_ObservationsPrediction_RawHiatus_OHClevels-lag-EDA_v2.py | zmlabe/predictGMSTrate | 2bde4a106de1988d772f15a52d283d23bb7128f4 | [
"MIT"
] | 3 | 2022-01-19T16:25:37.000Z | 2022-03-22T13:25:00.000Z | """
Explore raw composites based on indices from predicted testing data and
showing all the difference OHC levels for OBSERVATIONS
Author : Zachary M. Labe
Date : 21 September 2021
Version : 2 (mostly for testing)
"""
### Import packages
import sys
import matplotlib.pyplot as plt
import numpy as np
import calc_Utilities as UT
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
import palettable.cubehelix as cm
import cmocean as cmocean
import calc_dataFunctions as df
import calc_Stats as dSS
from netCDF4 import Dataset
### Plotting defaults
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
###############################################################################
###############################################################################
###############################################################################
### Data preliminaries
modelGCMs = ['CESM2le']
dataset_obs = 'ERA5'
allDataLabels = modelGCMs
monthlychoiceq = ['annual']
variables = ['T2M']
vari_predict = ['SST','OHC100','OHC300','OHC700']
reg_name = 'SMILEGlobe'
level = 'surface'
###############################################################################
###############################################################################
randomalso = False
timeper = 'hiatus'
shuffletype = 'GAUSS'
###############################################################################
###############################################################################
land_only = False
ocean_only = False
###############################################################################
###############################################################################
baseline = np.arange(1951,1980+1,1)
###############################################################################
###############################################################################
window = 0
if window == 0:
rm_standard_dev = False
ravel_modelens = False
ravelmodeltime = False
else:
rm_standard_dev = True
ravelmodeltime = False
ravel_modelens = True
yearsall = np.arange(1979+window,2099+1,1)
yearsobs = np.arange(1979+window,2020+1,1)
###############################################################################
###############################################################################
numOfEns = 40
lentime = len(yearsall)
###############################################################################
###############################################################################
lat_bounds,lon_bounds = UT.regions(reg_name)
###############################################################################
###############################################################################
ravelyearsbinary = False
ravelbinary = False
lensalso = True
###############################################################################
###############################################################################
### Remove ensemble mean
rm_ensemble_mean = True
###############################################################################
###############################################################################
### Accuracy for composites
accurate = True
if accurate == True:
typemodel = 'correcthiatus_obs'
elif accurate == False:
typemodel = 'extrahiatus_obs'
elif accurate == 'WRONG':
typemodel = 'wronghiatus_obs'
elif accurate == 'HIATUS':
typemodel = 'allhiatus_obs'
###############################################################################
###############################################################################
### Call functions
trendlength = 10
AGWstart = 1990
years_newmodel = np.arange(AGWstart,yearsall[-1]-8,1)
years_newobs = np.arange(AGWstart,yearsobs[-1]-8,1)
vv = 0
mo = 0
variq = variables[vv]
monthlychoice = monthlychoiceq[mo]
directoryfigure = '/Users/zlabe/Desktop/GmstTrendPrediction/ANN_v2/Obs/'
saveData = monthlychoice + '_' + variq + '_' + reg_name + '_' + dataset_obs
print('*Filename == < %s >' % saveData)
###############################################################################
###############################################################################
### Function to read in predictor variables (SST/OHC)
###############################################################################
###############################################################################
### Loop through to read all the variables
ohcHIATUS = np.empty((len(vari_predict),92,144))
for vvv in range(len(vari_predict)):
### Function to read in predictor variables (SST/OHC)
models_var = []
for i in range(len(modelGCMs)):
if vari_predict[vvv][:3] == 'OHC':
obs_predict = 'OHC'
else:
obs_predict = 'ERA5'
obsq_var,lats,lons = read_obs_dataset(vari_predict[vvv],obs_predict,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,lat_bounds=lat_bounds,lon_bounds=lon_bounds)
### Save predictor
models_var.append(obsq_var)
models_var = np.asarray(models_var).squeeze()
### Remove ensemble mean
if rm_ensemble_mean == True:
models_var = dSS.remove_trend_obs(models_var,'surface')
print('\n*Removed observational linear trend*')
### Standardize
models_varravel = models_var.squeeze().reshape(yearsobs.shape[0],lats.shape[0]*lons.shape[0])
meanvar = np.nanmean(models_varravel,axis=0)
stdvar = np.nanstd(models_varravel,axis=0)
modelsstd_varravel = (models_varravel-meanvar)/stdvar
models_var = modelsstd_varravel.reshape(yearsobs.shape[0],lats.shape[0],lons.shape[0])
### Slice for number of years
yearsq_m = np.where((yearsobs >= AGWstart))[0]
models_slice = models_var[yearsq_m,:,:]
if rm_ensemble_mean == False:
variq = 'T2M'
fac = 0.7
random_segment_seed = int(np.genfromtxt('/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/SelectedSegmentSeed.txt',unpack=True))
random_network_seed = 87750
hidden = [20,20]
n_epochs = 500
batch_size = 128
lr_here = 0.001
ridgePenalty = 0.05
actFun = 'relu'
fractWeight = 0.5
elif rm_ensemble_mean == True:
variq = 'T2M'
fac = 0.7
random_segment_seed = int(np.genfromtxt('/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/SelectedSegmentSeed.txt',unpack=True))
random_network_seed = 87750
hidden = [30,30]
n_epochs = 500
batch_size = 128
lr_here = 0.001
ridgePenalty = 0.5
actFun = 'relu'
fractWeight = 0.5
else:
print(ValueError('SOMETHING IS WRONG WITH DATA PROCESSING!'))
sys.exit()
### Naming conventions for files
directorymodel = '/Users/zlabe/Documents/Research/GmstTrendPrediction/SavedModels/'
savename = 'ANNv2_'+'OHC100'+'_hiatus_' + actFun + '_L2_'+ str(ridgePenalty)+ '_LR_' + str(lr_here)+ '_Batch'+ str(batch_size)+ '_Iters' + str(n_epochs) + '_' + str(len(hidden)) + 'x' + str(hidden[0]) + '_SegSeed' + str(random_segment_seed) + '_NetSeed'+ str(random_network_seed)
if(rm_ensemble_mean==True):
savename = savename + '_EnsembleMeanRemoved'
### Directories to save files
directorydata = '/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/'
###############################################################################
###############################################################################
###############################################################################
### Read in data for testing predictions and actual hiatuses
actual_test = np.genfromtxt(directorydata + 'obsActualLabels_' + savename + '.txt')
predict_test = np.genfromtxt(directorydata + 'obsLabels_' + savename+ '.txt')
### Reshape arrays for [ensemble,year]
act_re = actual_test
pre_re = predict_test
### Slice ensembles for testing data
ohcready = models_slice[:,:,:].squeeze()
### Pick all hiatuses
if accurate == True: ### correct predictions
ohc_allenscomp = []
for yr in range(ohcready.shape[0]):
if (pre_re[yr]) == 1 and (act_re[yr] == 1):
ohc_allenscomp.append(ohcready[yr,:,:])
elif accurate == False: ### picks all hiatus predictions
ohc_allenscomp = []
for yr in range(ohcready.shape[0]):
if pre_re[yr] == 1:
ohc_allenscomp.append(ohcready[yr,:,:])
elif accurate == 'WRONG': ### picks hiatus but is wrong
ohc_allenscomp = []
for yr in range(ohcready.shape[0]):
if (pre_re[yr]) == 1 and (act_re[yr] == 0):
ohc_allenscomp.append(ohcready[yr,:,:])
elif accurate == 'HIATUS': ### accurate climate change
ohc_allenscomp = []
for yr in range(ohcready.shape[0]):
if (act_re[yr] == 1):
ohc_allenscomp.append(ohcready[yr,:,:])
else:
print(ValueError('SOMETHING IS WRONG WITH ACCURACY COMPOSITES!'))
sys.exit()
### Composite across all years to get hiatuses
ohcHIATUS[vvv,:,:] = np.nanmean(np.asarray(ohc_allenscomp),axis=0)
###############################################################################
###############################################################################
### Loop through to read all the variables
lag1 = 3
lag2 = 7
lag = lag2-lag1
ohcHIATUSlag = np.empty((len(vari_predict),92,144))
for vvv in range(len(vari_predict)):
### Function to read in predictor variables (SST/OHC)
models_var = []
for i in range(len(modelGCMs)):
if vari_predict[vvv][:3] == 'OHC':
obs_predict = 'OHC'
else:
obs_predict = 'ERA5'
obsq_var,lats,lons = read_obs_dataset(vari_predict[vvv],obs_predict,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,lat_bounds=lat_bounds,lon_bounds=lon_bounds)
### Save predictor
models_var.append(obsq_var)
models_var = np.asarray(models_var).squeeze()
### Remove ensemble mean
if rm_ensemble_mean == True:
models_var = dSS.remove_trend_obs(models_var,'surface')
print('\n*Removed observational linear trend*')
### Standardize
models_varravel = models_var.squeeze().reshape(yearsobs.shape[0],lats.shape[0]*lons.shape[0])
meanvar = np.nanmean(models_varravel,axis=0)
stdvar = np.nanstd(models_varravel,axis=0)
modelsstd_varravel = (models_varravel-meanvar)/stdvar
models_var = modelsstd_varravel.reshape(yearsobs.shape[0],lats.shape[0],lons.shape[0])
### Slice for number of years
yearsq_m = np.where((yearsobs >= AGWstart))[0]
models_slice = models_var[yearsq_m,:,:]
if rm_ensemble_mean == False:
variq = 'T2M'
fac = 0.7
random_segment_seed = int(np.genfromtxt('/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/SelectedSegmentSeed.txt',unpack=True))
random_network_seed = 87750
hidden = [20,20]
n_epochs = 500
batch_size = 128
lr_here = 0.001
ridgePenalty = 0.05
actFun = 'relu'
fractWeight = 0.5
elif rm_ensemble_mean == True:
variq = 'T2M'
fac = 0.7
random_segment_seed = int(np.genfromtxt('/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/SelectedSegmentSeed.txt',unpack=True))
random_network_seed = 87750
hidden = [30,30]
n_epochs = 500
batch_size = 128
lr_here = 0.001
ridgePenalty = 0.5
actFun = 'relu'
fractWeight = 0.5
else:
print(ValueError('SOMETHING IS WRONG WITH DATA PROCESSING!'))
sys.exit()
### Naming conventions for files
directorymodel = '/Users/zlabe/Documents/Research/GmstTrendPrediction/SavedModels/'
savename = 'ANNv2_'+'OHC100'+'_hiatus_' + actFun + '_L2_'+ str(ridgePenalty)+ '_LR_' + str(lr_here)+ '_Batch'+ str(batch_size)+ '_Iters' + str(n_epochs) + '_' + str(len(hidden)) + 'x' + str(hidden[0]) + '_SegSeed' + str(random_segment_seed) + '_NetSeed'+ str(random_network_seed)
if(rm_ensemble_mean==True):
savename = savename + '_EnsembleMeanRemoved'
### Directories to save files
directorydata = '/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/'
###############################################################################
###############################################################################
###############################################################################
### Read in data for testing predictions and actual hiatuses
actual_test = np.genfromtxt(directorydata + 'obsActualLabels_' + savename + '.txt')
predict_test = np.genfromtxt(directorydata + 'obsLabels_' + savename+ '.txt')
### Reshape arrays for [ensemble,year]
act_re = actual_test
pre_re = predict_test
### Slice ensembles for testing data
ohcready = models_slice[:,:,:].squeeze()
### Pick all hiatuses
if accurate == True: ### correct predictions
ohc_allenscomp = []
for yr in range(ohcready.shape[0]):
if (pre_re[yr]) == 1 and (act_re[yr] == 1):
ohc_allenscomp.append(np.nanmean(ohcready[yr+lag1:yr+lag2,:,:],axis=0))
elif accurate == False: ### picks all hiatus predictions
ohc_allenscomp = []
for yr in range(ohcready.shape[0]):
if pre_re[yr] == 1:
ohc_allenscomp.append(np.nanmean(ohcready[yr+lag1:yr+lag2,:,:],axis=0))
elif accurate == 'WRONG': ### picks hiatus but is wrong
ohc_allenscomp = []
for yr in range(ohcready.shape[0]):
if (pre_re[yr]) == 1 and (act_re[yr] == 0):
ohc_allenscomp.append(np.nanmean(ohcready[yr+lag1:yr+lag2,:,:],axis=0))
elif accurate == 'HIATUS': ### accurate climate change
ohc_allenscomp = []
for yr in range(ohcready.shape[0]):
if (act_re[yr] == 1):
ohc_allenscomp.append(np.nanmean(ohcready[yr+lag1:yr+lag2,:,:],axis=0))
else:
print(ValueError('SOMETHING IS WRONG WITH ACCURACY COMPOSITES!'))
sys.exit()
### Composite across all years to get hiatuses
ohcHIATUSlag[vvv,:,:] = np.nanmean(np.asarray(ohc_allenscomp),axis=0)
### Composite all for plotting
ohc_allcomp = np.append(ohcHIATUS,ohcHIATUSlag,axis=0)
###############################################################################
###############################################################################
### Plot subplot of obser+++++++++++++++vations
letters = ["a","b","c","d","e","f","g","h","i","j","k","l","m","n"]
plotloc = [1,3,5,7,2,4,6,8]
if rm_ensemble_mean == False:
limit = np.arange(-1.5,1.51,0.02)
barlim = np.round(np.arange(-1.5,1.6,0.5),2)
elif rm_ensemble_mean == True:
limit = np.arange(-1.5,1.6,0.02)
barlim = np.round(np.arange(-1.5,1.6,0.5),2)
cmap = cmocean.cm.balance
label = r'\textbf{[ HIATUS COMPOSITE ]}'
fig = plt.figure(figsize=(8,10))
###############################################################################
for ppp in range(ohc_allcomp.shape[0]):
ax1 = plt.subplot(ohc_allcomp.shape[0]//2,2,plotloc[ppp])
m = Basemap(projection='robin',lon_0=-180,resolution='l',area_thresh=10000)
m.drawcoastlines(color='darkgrey',linewidth=0.27)
### Variable
varn = ohc_allcomp[ppp]
if ppp == 0:
lons = np.where(lons >180,lons-360,lons)
x, y = np.meshgrid(lons,lats)
circle = m.drawmapboundary(fill_color='dimgrey',color='dimgray',
linewidth=0.7)
circle.set_clip_on(False)
cs1 = m.contourf(x,y,varn,limit,extend='both',latlon=True)
cs1.set_cmap(cmap)
m.fillcontinents(color='dimgrey',lake_color='dimgrey')
ax1.annotate(r'\textbf{[%s]}' % letters[ppp],xy=(0,0),xytext=(0.95,0.93),
textcoords='axes fraction',color='k',fontsize=10,
rotation=0,ha='center',va='center')
if ppp < 4:
ax1.annotate(r'\textbf{%s}' % vari_predict[ppp],xy=(0,0),xytext=(-0.08,0.5),
textcoords='axes fraction',color='dimgrey',fontsize=20,
rotation=90,ha='center',va='center')
if ppp == 0:
plt.title(r'\textbf{Onset}',fontsize=15,color='k')
if ppp == 4:
plt.title(r'\textbf{%s-Year Composite}' % lag,fontsize=15,color='k')
###############################################################################
cbar_ax1 = fig.add_axes([0.38,0.05,0.3,0.02])
cbar1 = fig.colorbar(cs1,cax=cbar_ax1,orientation='horizontal',
extend='both',extendfrac=0.07,drawedges=False)
cbar1.set_label(label,fontsize=6,color='dimgrey',labelpad=1.4)
cbar1.set_ticks(barlim)
cbar1.set_ticklabels(list(map(str,barlim)))
cbar1.ax.tick_params(axis='x', size=.01,labelsize=4)
cbar1.outline.set_edgecolor('dimgrey')
plt.tight_layout()
plt.subplots_adjust(bottom=0.08,wspace=0.01)
if rm_ensemble_mean == True:
plt.savefig(directoryfigure + 'RawCompositesHiatus_OBSERVATIONS_OHClevels-lag%s_v2_AccH-%s_AccR-%s_rmENSEMBLEmean.png' % (lag,accurate,accurate),dpi=300)
else:
plt.savefig(directoryfigure + 'RawCompositesHiatus_OBSERVATIONS_OHClevels-lag%s_v2_AccH-%s_AccR-%s.png' % (lag,accurate,accurate),dpi=300) | 44.244604 | 284 | 0.547805 |
4d0095e3df86b0354c6a7f3fe8432d1caf5ff121 | 3,807 | py | Python | osnexus_flocker_driver/osnexusdriver.py | OSNEXUS/flocker-driver | 22a6ecf57c6841359df82657659f8e945b206f1b | [
"Apache-2.0"
] | 2 | 2016-04-29T22:38:05.000Z | 2016-04-29T22:39:06.000Z | osnexus_flocker_driver/osnexusdriver.py | OSNEXUS/flocker-driver | 22a6ecf57c6841359df82657659f8e945b206f1b | [
"Apache-2.0"
] | null | null | null | osnexus_flocker_driver/osnexusdriver.py | OSNEXUS/flocker-driver | 22a6ecf57c6841359df82657659f8e945b206f1b | [
"Apache-2.0"
] | 2 | 2016-05-08T07:39:12.000Z | 2019-07-05T18:35:12.000Z | # Copyright 2016 OSNEXUS Corporation
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import socket
from zope.interface import implementer
from flocker.node.agents.blockdevice import (
AlreadyAttachedVolume, IBlockDeviceAPI, IProfiledBlockDeviceAPI,
BlockDeviceVolume, UnknownVolume, UnattachedVolume
)
from osnexusutil import osnexusAPI
import logging
from eliot import Message, Logger
#_logger = Logger()
| 36.961165 | 133 | 0.727607 |
4d009e96e973b11eba741f0ee1dbc7d7ed84b7ed | 2,629 | py | Python | rescan-script.py | fivepiece/electrum-personal-server | dae6eb3954f3916e13aa88969a5b6ac65a488a13 | [
"MIT"
] | null | null | null | rescan-script.py | fivepiece/electrum-personal-server | dae6eb3954f3916e13aa88969a5b6ac65a488a13 | [
"MIT"
] | null | null | null | rescan-script.py | fivepiece/electrum-personal-server | dae6eb3954f3916e13aa88969a5b6ac65a488a13 | [
"MIT"
] | null | null | null | #! /usr/bin/python3
from configparser import ConfigParser, NoSectionError, NoOptionError
from electrumpersonalserver.jsonrpc import JsonRpc, JsonRpcError
from datetime import datetime
import server
main()
| 36.013699 | 79 | 0.63218 |
4d01262d0ab1840560717880a8567c3e85b8f930 | 1,082 | py | Python | tests/application/register/test_views.py | AlexKouzy/ethnicity-facts-and-figures-publisher | 18ab2495a8633f585e18e607c7f75daa564a053d | [
"MIT"
] | 1 | 2021-10-06T13:48:36.000Z | 2021-10-06T13:48:36.000Z | tests/application/register/test_views.py | AlexKouzy/ethnicity-facts-and-figures-publisher | 18ab2495a8633f585e18e607c7f75daa564a053d | [
"MIT"
] | 116 | 2018-11-02T17:20:47.000Z | 2022-02-09T11:06:22.000Z | tests/application/register/test_views.py | racedisparityaudit/rd_cms | a12f0e3f5461cc41eed0077ed02e11efafc5dd76 | [
"MIT"
] | 2 | 2018-11-09T16:47:35.000Z | 2020-04-09T13:06:48.000Z | from bs4 import BeautifulSoup
from flask import url_for
from application.utils import generate_token
from application.auth.models import TypeOfUser
from tests.models import UserFactory
| 38.642857 | 115 | 0.756932 |
4d014fe4ec193e53774cf70e289d81ecdf7c7e43 | 1,205 | py | Python | setup.py | OriHoch/ckan-cloud-operator | 125c3eb10f843ac62fc85659e756bd1d9620eae7 | [
"MIT"
] | null | null | null | setup.py | OriHoch/ckan-cloud-operator | 125c3eb10f843ac62fc85659e756bd1d9620eae7 | [
"MIT"
] | null | null | null | setup.py | OriHoch/ckan-cloud-operator | 125c3eb10f843ac62fc85659e756bd1d9620eae7 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
from os import path
from time import time
here = path.abspath(path.dirname(__file__))
if path.exists("VERSION.txt"):
# this file can be written by CI tools (e.g. Travis)
with open("VERSION.txt") as version_file:
version = version_file.read().strip().strip("v")
else:
version = str(time())
setup(
name='ckan_cloud_operator',
version=version,
description='''CKAN Cloud Kubernetes operator''',
url='https://github.com/datopian/ckan-cloud-operator',
author='''Viderum''',
license='MIT',
packages=find_packages(exclude=['examples', 'tests', '.tox']),
install_requires=[
'httpagentparser',
'boto3',
'coverage',
'psycopg2',
# 'pyyaml<5.2,>=3.10',
'kubernetes',
'click',
'toml',
# 'dataflows>=0.0.37',
# 'dataflows-shell>=0.0.8',
# 'jupyterlab',
'awscli',
'urllib3<1.25',
'ruamel.yaml<1',
'requests==2.21',
# 'python-dateutil<2.8.1',
'botocore',
],
entry_points={
'console_scripts': [
'ckan-cloud-operator = ckan_cloud_operator.cli:main',
]
},
)
| 25.638298 | 66 | 0.575104 |
4d01db8b99d5d581962d295f65f32a07a2a32b59 | 652 | py | Python | extension/magic/activate.py | ianpreston/oh-my-py | 17e37974c203cb28aa2de340c6ac66143c16bd4e | [
"Unlicense",
"MIT"
] | 3 | 2016-04-10T20:08:57.000Z | 2021-12-05T19:03:37.000Z | extension/magic/activate.py | ianpreston/oh-my-py | 17e37974c203cb28aa2de340c6ac66143c16bd4e | [
"Unlicense",
"MIT"
] | null | null | null | extension/magic/activate.py | ianpreston/oh-my-py | 17e37974c203cb28aa2de340c6ac66143c16bd4e | [
"Unlicense",
"MIT"
] | null | null | null | import os
import os.path
def activate(ipython, venv):
"""
Shortcut to run execfile() on `venv`/bin/activate_this.py
"""
venv = os.path.abspath(venv)
venv_activate = os.path.join(venv, 'bin', 'activate_this.py')
if not os.path.exists(venv_activate):
print('Not a virtualenv: {}'.format(venv))
return
# activate_this.py doesn't set VIRTUAL_ENV, so we must set it here
os.environ['VIRTUAL_ENV'] = venv
os.putenv('VIRTUAL_ENV', venv)
execfile(venv_activate, {'__file__': venv_activate})
print('Activated: {}'.format(venv))
| 25.076923 | 70 | 0.662577 |
4d034751cf7a5ae250a1f9a85e64ff78986aa837 | 4,201 | py | Python | storage/__init__.py | daqbroker/daqbrokerServer | e8d2b72b4e3ab12c26dfa7b52e9d77097ede3f33 | [
"MIT"
] | null | null | null | storage/__init__.py | daqbroker/daqbrokerServer | e8d2b72b4e3ab12c26dfa7b52e9d77097ede3f33 | [
"MIT"
] | null | null | null | storage/__init__.py | daqbroker/daqbrokerServer | e8d2b72b4e3ab12c26dfa7b52e9d77097ede3f33 | [
"MIT"
] | null | null | null | import base64
import os
import threading
from pathlib import Path
#from sqlitedict import SqliteDict
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, scoped_session
from daqbrokerServer.web.utils import hash_password
from daqbrokerServer.storage.server_schema import ServerBase, User, Connection
from daqbrokerServer.storage.contextual_session import session_open
# ###### THIS CREATES THE LOCAL STRUCTURE NECESSARY TO HOLD LOCAL DATABASES #######
# if not os.path.isdir(db_folder):
# os.mkdir(db_folder)
# # Initialise the local settings database
# local_url = "sqlite+pysqlite:///" + str(db_folder / "settings.sqlite")
# local_engine = create_engine(local_url)
# #################################################################################
# # This should create the mappings necessary on the local database
# Base.metadata.reflect(local_engine, extend_existing= True, autoload_replace= False)
# Base.metadata.create_all(local_engine, checkfirst= True)
# #This starts a session - probably not ideal, should consider using scoped session
# #LocalSession = scoped_session(sessionmaker(bind=local_engine))
# Session = sessionmaker(bind=local_engine)
# session = Session()
# Experimenting a class that will handle the folder definition of the session for the server class
# ######## THIS IS VERY DANGEROUS - IT SHOULD BE A PROMPT CREATED WHEN INSTALLING THE LIBRARY
# query = session.query(User).filter(User.id == 0)
# if not query.count() > 0:
# pwd = "admin"
# password = hash_password(pwd)
# user = User(id= 0, type= 3, email= "mail", username= "admin", password= password)
# ##########################################################################################
# ##### THIS SHOULD LOOK FOR RECORDS OF LOCAL DATABASE, CREATES IF IT DOES NOT EXIST #######
# query2 = session.query(Connection).filter(Connection.id == 0)
# if not query2.count() > 0:
# connection = Connection(id= 0, type= "sqlite+pysqlite", hostname= "local", username= "admin", password= base64.b64encode(b"admin"), port=0)
# ##########################################################################################
# #Actually adding the objects - if one does not exist the other will most likely not exist too
# if (not query.count() > 0) or (not query2.count() > 0):
# connection.users.append(user)
# session.add(user)
# session.add(connection)
# session.commit()
| 40.009524 | 143 | 0.653416 |
4d03f7e180eeb633a961138f2a85fdbfb2a84df1 | 1,786 | py | Python | tempest/api/queuing/test_queues.py | NetApp/tempest | dd86b1517ec5ac16c26975ed0ce0d8b7ddcac6cc | [
"Apache-2.0"
] | null | null | null | tempest/api/queuing/test_queues.py | NetApp/tempest | dd86b1517ec5ac16c26975ed0ce0d8b7ddcac6cc | [
"Apache-2.0"
] | null | null | null | tempest/api/queuing/test_queues.py | NetApp/tempest | dd86b1517ec5ac16c26975ed0ce0d8b7ddcac6cc | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from tempest.api.queuing import base
from tempest.common.utils import data_utils
from tempest import test
LOG = logging.getLogger(__name__)
| 29.278689 | 69 | 0.702688 |
4d04229e05bd8f6f6995b6ba536b1ed9096df15a | 478 | py | Python | checkin/tests.py | MAKENTNU/web | 7a5b512bf4c087d1561cdb623d7df4b3d04811a2 | [
"MIT"
] | 10 | 2017-11-25T01:47:20.000Z | 2020-03-24T18:28:24.000Z | checkin/tests.py | MAKENTNU/web | 7a5b512bf4c087d1561cdb623d7df4b3d04811a2 | [
"MIT"
] | 319 | 2017-11-16T09:56:03.000Z | 2022-03-28T00:24:37.000Z | checkin/tests.py | MAKENTNU/web | 7a5b512bf4c087d1561cdb623d7df4b3d04811a2 | [
"MIT"
] | 6 | 2017-11-12T14:04:08.000Z | 2021-03-10T09:41:18.000Z | from django.test import TestCase
from django_hosts import reverse
from util.test_utils import Get, assert_requesting_paths_succeeds
| 29.875 | 65 | 0.709205 |
4d04bfd380e253ed326e19219946bfffe57dc0dc | 10,757 | py | Python | tests/gdata_tests/live_client_test.py | lqc/google-data-api | b720582a472d627a0853d02e51e13dbce4cfe6ae | [
"Apache-2.0"
] | null | null | null | tests/gdata_tests/live_client_test.py | lqc/google-data-api | b720582a472d627a0853d02e51e13dbce4cfe6ae | [
"Apache-2.0"
] | null | null | null | tests/gdata_tests/live_client_test.py | lqc/google-data-api | b720582a472d627a0853d02e51e13dbce4cfe6ae | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
# These tests attempt to connect to Google servers.
__author__ = 'j.s@google.com (Jeff Scudder)'
import os
import unittest
import gdata.gauth
import gdata.client
import atom.http_core
import atom.mock_http_core
import atom.core
import gdata.data
# TODO: switch to using v2 atom data once it is available.
import atom
import gdata.test_config as conf
conf.options.register_option(conf.BLOG_ID_OPTION)
# Utility methods.
# The Atom XML namespace.
ATOM = 'http://www.w3.org/2005/Atom'
# URL used as the scheme for a blog post tag.
TAG = 'http://www.blogger.com/atom/ns#'
# Namespace for Google Data API elements.
GD = 'http://schemas.google.com/g/2005'
WORK_REL = 'http://schemas.google.com/g/2005#work'
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
| 35.50165 | 78 | 0.685972 |
4d054d1c9024db142794eb18e583cbea3e61dd43 | 125 | py | Python | apps/work_order/admin.py | joewen85/devops_study | 6bbfbac7e70f295ef6068393bd9cf7d418ab4417 | [
"Apache-2.0"
] | null | null | null | apps/work_order/admin.py | joewen85/devops_study | 6bbfbac7e70f295ef6068393bd9cf7d418ab4417 | [
"Apache-2.0"
] | null | null | null | apps/work_order/admin.py | joewen85/devops_study | 6bbfbac7e70f295ef6068393bd9cf7d418ab4417 | [
"Apache-2.0"
] | 1 | 2020-10-28T09:12:47.000Z | 2020-10-28T09:12:47.000Z | from django.contrib import admin
# Register your models here.
from .models import WorkOrder
admin.site.register(WorkOrder)
| 17.857143 | 32 | 0.808 |
4d0941aea75adaa006d884337e5c4d550547f131 | 6,030 | py | Python | updates.py | knowledgetechnologyuhh/hipss | 518bf3e6a4d02e234cbe29506b9afda0a6ccb187 | [
"MIT"
] | null | null | null | updates.py | knowledgetechnologyuhh/hipss | 518bf3e6a4d02e234cbe29506b9afda0a6ccb187 | [
"MIT"
] | null | null | null | updates.py | knowledgetechnologyuhh/hipss | 518bf3e6a4d02e234cbe29506b9afda0a6ccb187 | [
"MIT"
] | null | null | null | import torch
import numpy as np
import torch.nn.functional as F
from torch.nn.utils.clip_grad import clip_grad_norm_
from mpi_utils.mpi_utils import sync_grads
| 42.167832 | 119 | 0.703814 |
4d098b2bde7f0fef38c7be3632c1ac962fd07aad | 125,107 | py | Python | spaghetti/network.py | gegen07/spaghetti | f10f9d016deeb8d4cdd63377304fc8e3b8492a0f | [
"BSD-3-Clause"
] | 182 | 2018-07-23T20:17:32.000Z | 2022-03-28T07:08:43.000Z | spaghetti/network.py | gegen07/spaghetti | f10f9d016deeb8d4cdd63377304fc8e3b8492a0f | [
"BSD-3-Clause"
] | 563 | 2017-04-14T23:39:21.000Z | 2022-02-12T20:34:21.000Z | spaghetti/network.py | gegen07/spaghetti | f10f9d016deeb8d4cdd63377304fc8e3b8492a0f | [
"BSD-3-Clause"
] | 51 | 2017-04-14T23:40:31.000Z | 2022-03-31T01:41:56.000Z | from collections import defaultdict, OrderedDict
from itertools import islice
import copy, os, pickle, warnings
import esda
import numpy
from .analysis import GlobalAutoK
from . import util
from libpysal import cg, examples, weights
from libpysal.common import requires
try:
from libpysal import open
except ImportError:
import libpysal
open = libpysal.io.open
__all__ = ["Network", "PointPattern", "GlobalAutoK"]
SAME_SEGMENT = (-0.1, -0.1)
dep_msg = (
"The next major release of pysal/spaghetti (2.0.0) will "
"drop support for all ``libpysal.cg`` geometries. This change "
"is a first step in refactoring ``spaghetti`` that is "
"expected to result in dramatically reduced runtimes for "
"network instantiation and operations. Users currently "
"requiring network and point pattern input as ``libpysal.cg`` "
"geometries should prepare for this simply by converting "
"to ``shapely`` geometries."
)
warnings.warn(f"{dep_msg}", FutureWarning)
def extract_component(net, component_id, weightings=None):
"""Extract a single component from a network object.
Parameters
----------
net : spaghetti.Network
Full network object.
component_id : int
The ID of the desired network component.
weightings : {dict, bool}
See the ``weightings`` keyword argument in ``spaghetti.Network``.
Returns
-------
cnet : spaghetti.Network
The pruned network containing the component specified in
``component_id``.
Notes
-----
Point patterns are not reassigned when extracting a component. Therefore,
component extraction should be performed prior to snapping any point
sets onto the network. Also, if the ``spaghetti.Network`` object
has ``distance_matrix`` or ``network_trees`` attributes, they are
deleted and must be computed again on the single component.
Examples
--------
Instantiate a network object.
>>> from libpysal import examples
>>> import spaghetti
>>> snow_net = examples.get_path("Soho_Network.shp")
>>> ntw = spaghetti.Network(in_data=snow_net, extractgraph=False)
The network is not fully connected.
>>> ntw.network_fully_connected
False
Examine the number of network components.
>>> ntw.network_n_components
45
Extract the longest component.
>>> longest = spaghetti.extract_component(ntw, ntw.network_longest_component)
>>> longest.network_n_components
1
>>> longest.network_component_lengths
{0: 13508.169276875526}
"""
def _reassign(attr, cid):
"""Helper for reassigning attributes."""
# set for each attribute(s)
if attr == "_fully_connected":
_val = [True for objt in obj_type]
attr = [objt + attr for objt in obj_type]
elif attr == "_n_components":
_val = [1 for objt in obj_type]
attr = [objt + attr for objt in obj_type]
elif attr in ["_longest_component", "_largest_component"]:
_val = [cid for objt in obj_type]
attr = [objt + attr for objt in obj_type]
elif attr == "vertex_list":
# reassigns vertex list + network, graph component vertices
supp = [objt + "_component_vertices" for objt in obj_type]
_val = [getattr(cnet, supp[0])[cid]]
_val += [{cid: getattr(cnet, s)[cid]} for s in supp]
attr = [attr] + supp
elif attr == "vertex_coords":
# reassigns both vertex_coords and vertices
supp = getattr(cnet, "vertex_list")
_val = [{k: v for k, v in getattr(cnet, attr).items() if k in supp}]
_val += [{v: k for k, v in _val[0].items()}]
attr = [attr, "vertices"]
elif attr == "_component_vertex_count":
# reassigns both network and graph _component_vertex_count
supp = len(getattr(cnet, "vertex_list"))
_val = [{cid: supp} for objt in obj_type]
attr = [objt + attr for objt in obj_type]
elif attr == "adjacencylist":
supp_adj = copy.deepcopy(list(getattr(cnet, attr).keys()))
supp_vtx = getattr(cnet, "vertex_list")
supp_rmv = [v for v in supp_adj if v not in supp_vtx]
[getattr(cnet, attr).pop(s) for s in supp_rmv]
return
elif attr == "_component_is_ring":
# reassigns both network and graph _component_is_ring
supp = [getattr(cnet, objt + attr) for objt in obj_type]
_val = [{cid: s[cid]} for s in supp]
attr = [objt + attr for objt in obj_type]
elif attr == "non_articulation_points":
supp_vtx = getattr(cnet, "vertex_list")
_val = [[s for s in getattr(cnet, attr) if s in supp_vtx]]
attr = [attr]
elif attr == "_component2":
# reassigns both network and graph _component2 attributes
supp = [_n + "_component2" + _a]
if hasgraph:
supp += [_g + "_component2" + _e]
_val = [{cid: getattr(cnet, s)[cid]} for s in supp]
attr = supp
elif attr == "arcs":
# reassigns both arcs and edges
c2 = "_component2"
supp = [_n + c2 + _a]
if hasgraph:
supp += [_g + c2 + _e]
_val = [getattr(cnet, s)[cid] for s in supp]
attr = [attr]
if hasgraph:
attr += ["edges"]
elif attr == "_component_labels":
# reassigns both network and graph _component_labels
supp = [len(getattr(cnet, o + "s")) for o in obj]
_val = [numpy.array([cid] * s) for s in supp]
attr = [objt + attr for objt in obj_type]
elif attr == "_component_lengths":
# reassigns both network and graph _component_lengths
supp = [objt + attr for objt in obj_type]
_val = [{cid: getattr(cnet, s)[cid]} for s in supp]
attr = supp
elif attr == "_lengths":
# reassigns both arc and edge _lengths
supp_name = [o + attr for o in obj]
supp_lens = [getattr(cnet, s) for s in supp_name]
supp_link = [getattr(cnet, o + "s") for o in obj]
supp_ll = list(zip(supp_lens, supp_link))
_val = [{k: v for k, v in l1.items() if k in l2} for l1, l2 in supp_ll]
attr = supp_name
# reassign attributes
for a, av in zip(attr, _val):
setattr(cnet, a, av)
# provide warning (for now) if the network contains a point pattern
if getattr(net, "pointpatterns"):
msg = "There is a least one point pattern associated with the network."
msg += " Component extraction should be performed prior to snapping"
msg += " point patterns to the network object; failing to do so may"
msg += " lead to unexpected results."
warnings.warn(msg)
# provide warning (for now) if the network contains a point pattern
dm, nt = "distance_matrix", "network_trees"
if hasattr(net, dm) or hasattr(net, nt):
msg = "Either one or both (%s, %s) attributes" % (dm, nt)
msg += " are present and will be deleted. These must be"
msg += " recalculated following component extraction."
warnings.warn(msg)
for attr in [dm, nt]:
if hasattr(net, attr):
_attr = getattr(net, attr)
del _attr
# make initial copy of the network
cnet = copy.deepcopy(net)
# set labels
_n, _a, _g, _e = "network", "arc", "graph", "edge"
obj_type = [_n]
obj = [_a]
hasgraph = False
if hasattr(cnet, "w_graph"):
obj_type += [_g]
obj += [_e]
hasgraph = True
# attributes to reassign
update_attributes = [
"_fully_connected",
"_n_components",
"_longest_component",
"_largest_component",
"vertex_list",
"vertex_coords",
"_component_vertex_count",
"adjacencylist",
"_component_is_ring",
"_component2",
"arcs",
"_component_lengths",
"_lengths",
"_component_labels",
]
if hasgraph:
update_attributes.append("non_articulation_points")
# reassign attributes
for attribute in update_attributes:
_reassign(attribute, component_id)
# recreate spatial weights
cnet.w_network = cnet.contiguityweights(graph=False, weightings=weightings)
if hasgraph:
cnet.w_graph = cnet.contiguityweights(graph=True, weightings=weightings)
return cnet
def spanning_tree(net, method="sort", maximum=False, silence_warnings=True):
"""Extract a minimum or maximum spanning tree from a network.
Parameters
----------
net : spaghetti.Network
Instance of a network object.
method : str
Method for determining spanning tree. Currently, the only
supported method is 'sort', which sorts the network arcs
by length prior to building intermediary networks and checking
for cycles within the tree/subtrees. Future methods may
include linear programming approachs, etc.
maximum : bool
When ``True`` a maximum spanning tree is created. When ``False``
a minimum spanning tree is created. Default is ``False``.
silence_warnings : bool
Warn if there is more than one connected component. Default is
``False`` due to the nature of constructing a minimum
spanning tree.
Returns
-------
net : spaghetti.Network
Pruned instance of the network object.
Notes
-----
For in-depth background and details see
:cite:`GrahamHell_1985`,
:cite:`AhujaRavindraK`, and
:cite:`Okabe2012`.
See also
--------
networkx.algorithms.tree.mst
scipy.sparse.csgraph.minimum_spanning_tree
Examples
--------
Create a network instance.
>>> from libpysal import cg
>>> import spaghetti
>>> p00 = cg.Point((0,0))
>>> lines = [cg.Chain([p00, cg.Point((0,3)), cg.Point((4,0)), p00])]
>>> ntw = spaghetti.Network(in_data=lines)
Extract the minimum spanning tree.
>>> minst_net = spaghetti.spanning_tree(ntw)
>>> min_len = sum(minst_net.arc_lengths.values())
>>> min_len
7.0
Extract the maximum spanning tree.
>>> maxst_net = spaghetti.spanning_tree(ntw, maximum=True)
>>> max_len = sum(maxst_net.arc_lengths.values())
>>> max_len
9.0
>>> max_len > min_len
True
"""
# (un)silence warning
weights_kws = {"silence_warnings": silence_warnings}
# do not extract graph object while testing for cycles
net_kws = {"extractgraph": False, "weights_kws": weights_kws}
# if the network has no cycles, it is already a spanning tree
if util.network_has_cycle(net.adjacencylist):
if method.lower() == "sort":
spanning_tree = mst_weighted_sort(net, maximum, net_kws)
else:
msg = "'%s' not a valid method for minimum spanning tree creation"
raise ValueError(msg % method)
# instantiate the spanning tree as a network object
net = Network(in_data=spanning_tree, weights_kws=weights_kws)
return net
def mst_weighted_sort(net, maximum, net_kws):
"""Extract a minimum or maximum spanning tree from a network used
the length-weighted sort method.
Parameters
----------
net : spaghetti.Network
See ``spanning_tree()``.
maximum : bool
See ``spanning_tree()``.
net_kws : dict
Keywords arguments for instaniating a ``spaghetti.Network``.
Returns
-------
spanning_tree : list
All networks arcs that are members of the spanning tree.
Notes
-----
This function is based on the method found in Chapter 3
Section 4.3 of :cite:`Okabe2012`.
"""
# network arcs dictionary sorted by arc length
sort_kws = {"key": net.arc_lengths.get, "reverse": maximum}
sorted_lengths = sorted(net.arc_lengths, **sort_kws)
# the spanning tree is initially empty
spanning_tree = []
# iterate over each lengths of network arc
while sorted_lengths:
_arc = sorted_lengths.pop(0)
# make a spatial representation of an arc
chain_rep = util.chain_constr(net.vertex_coords, [_arc])
# current set of network arcs as libpysal.cg.Chain
_chains = spanning_tree + chain_rep
# current network iteration
_ntw = Network(in_data=_chains, **net_kws)
# determine if the network contains a cycle
if not util.network_has_cycle(_ntw.adjacencylist):
# If no cycle is present, add the arc to the spanning tree
spanning_tree.extend(chain_rep)
return spanning_tree
def regular_lattice(bounds, nh, nv=None, exterior=False):
"""Generate a regular lattice of line segments
(`libpysal.cg.Chain objects <https://pysal.org/libpysal/generated/libpysal.cg.Chain.html#libpysal.cg.Chain>`_).
Parameters
----------
bounds : {tuple, list}
Area bounds in the form - <minx,miny,maxx,maxy>.
nh : int
The number of internal horizontal lines of the lattice.
nv : int
The number of internal vertical lines of the lattice. Defaults to
``nh`` if left as None.
exterior : bool
Flag for including the outer bounding box segments. Default is False.
Returns
-------
lattice : list
The ``libpysal.cg.Chain`` objects forming a regular lattice.
Notes
-----
The ``nh`` and ``nv`` parameters do not include the external
line segments. For example, setting ``nh=3, nv=2, exterior=True``
will result in 5 horizontal line sets and 4 vertical line sets.
Examples
--------
Create a 5x5 regular lattice with an exterior
>>> import spaghetti
>>> lattice = spaghetti.regular_lattice((0,0,4,4), 3, exterior=True)
>>> lattice[0].vertices
[(0.0, 0.0), (1.0, 0.0)]
Create a 5x5 regular lattice without an exterior
>>> lattice = spaghetti.regular_lattice((0,0,5,5), 3, exterior=False)
>>> lattice[-1].vertices
[(3.75, 3.75), (3.75, 5.0)]
Create a 7x9 regular lattice with an exterior from the
bounds of ``streets.shp``.
>>> path = libpysal.examples.get_path("streets.shp")
>>> shp = libpysal.io.open(path)
>>> lattice = spaghetti.regular_lattice(shp.bbox, 5, nv=7, exterior=True)
>>> lattice[0].vertices
[(723414.3683108028, 875929.0396895551), (724286.1381211297, 875929.0396895551)]
"""
# check for bounds validity
if len(bounds) != 4:
bounds_len = len(bounds)
msg = "The 'bounds' parameter is %s elements " % bounds_len
msg += "but should be exactly 4 - <minx,miny,maxx,maxy>."
raise RuntimeError(msg)
# check for bounds validity
if not nv:
nv = nh
try:
nh, nv = int(nh), int(nv)
except TypeError:
nlines_types = type(nh), type(nv)
msg = "The 'nh' and 'nv' parameters (%s, %s) " % nlines_types
msg += "could not be converted to integers."
raise TypeError(msg)
# bounding box line lengths
len_h, len_v = bounds[2] - bounds[0], bounds[3] - bounds[1]
# horizontal and vertical increments
incr_h, incr_v = len_h / float(nh + 1), len_v / float(nv + 1)
# define the horizontal and vertical space
space_h = [incr_h * slot for slot in range(nv + 2)]
space_v = [incr_v * slot for slot in range(nh + 2)]
# create vertical and horizontal lines
lines_h = util.build_chains(space_h, space_v, exterior, bounds)
lines_v = util.build_chains(space_h, space_v, exterior, bounds, h=False)
# combine into one list
lattice = lines_h + lines_v
return lattice
| 35.714245 | 146 | 0.588632 |
4d09a5a4cc57e4e453dca3ac3e67a8ff83298706 | 340 | py | Python | tests/resources/mlflow-test-plugin/mlflow_test_plugin/default_experiment_provider.py | Sohamkayal4103/mlflow | 4e444efdf73c710644ee039b44fa36a31d716f69 | [
"Apache-2.0"
] | 1 | 2022-01-11T02:51:17.000Z | 2022-01-11T02:51:17.000Z | tests/resources/mlflow-test-plugin/mlflow_test_plugin/default_experiment_provider.py | Sohamkayal4103/mlflow | 4e444efdf73c710644ee039b44fa36a31d716f69 | [
"Apache-2.0"
] | null | null | null | tests/resources/mlflow-test-plugin/mlflow_test_plugin/default_experiment_provider.py | Sohamkayal4103/mlflow | 4e444efdf73c710644ee039b44fa36a31d716f69 | [
"Apache-2.0"
] | 2 | 2019-05-11T08:13:38.000Z | 2019-05-14T13:33:54.000Z | from mlflow.tracking.default_experiment.abstract_context import DefaultExperimentProvider
| 28.333333 | 89 | 0.791176 |
4d09ec45c4e1965510df15bcf08b297cda5ab9d9 | 1,097 | py | Python | ac_loss_plot.py | atul799/CarND-Semantic-Segmentation | dbec928d3ba9cc68f3de9bbb7707df85131c1d5c | [
"MIT"
] | null | null | null | ac_loss_plot.py | atul799/CarND-Semantic-Segmentation | dbec928d3ba9cc68f3de9bbb7707df85131c1d5c | [
"MIT"
] | null | null | null | ac_loss_plot.py | atul799/CarND-Semantic-Segmentation | dbec928d3ba9cc68f3de9bbb7707df85131c1d5c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
plot acc loss
@author: atpandey
"""
#%%
import matplotlib.pyplot as plt
#%%
ff='./to_laptop/trg_file.txt'
with open(ff,'r') as trgf:
listidx=[]
listloss=[]
listacc=[]
ctr=0
for line in trgf:
if(ctr>0):
ll=line.split(',')
listidx.append(ll[0])
listloss.append(ll[1])
listacc.append(ll[2])
#listf.append(line)
ctr +=1
#for i in range(len(listidx)):
# print("idx: {}, loss: {}, acc: {}".format(listidx[i],listloss[i],listacc[i]))
# Make a figure
fig = plt.figure()
plt.subplots_adjust(top = 0.99, bottom=0.05, hspace=0.5, wspace=0.4)
# The axes
ax1 = fig.add_subplot(2, 1, 1)
ax2 = fig.add_subplot(2, 1, 2)
#plots
ax1.plot(listloss,'bo-',label='loss')
ax2.plot(listacc,'go-',label='accuracy')
ax1.set_xlabel('training idx')
ax1.set_ylabel('Loss')
ax1.set_title('loss data set')
ax1.legend()
ax2.set_xlabel('training idx')
ax2.set_ylabel('accuracy')
ax2.set_title('accuracydata set')
ax2.legend()
plt.show()
plt.savefig('./outputs/loss_accuracy.png') | 18.913793 | 82 | 0.606199 |
4d0a6ad7788dddfb228aeaaea80d6d51b9e09fa7 | 8,611 | py | Python | VA_multiples/src/main.py | brown9804/Modelos_Probabilisticos- | 8ddc6afbe4da5975af9eb5dc946ff19daa1171bc | [
"Apache-2.0"
] | null | null | null | VA_multiples/src/main.py | brown9804/Modelos_Probabilisticos- | 8ddc6afbe4da5975af9eb5dc946ff19daa1171bc | [
"Apache-2.0"
] | null | null | null | VA_multiples/src/main.py | brown9804/Modelos_Probabilisticos- | 8ddc6afbe4da5975af9eb5dc946ff19daa1171bc | [
"Apache-2.0"
] | null | null | null | ##--------------------------------Main file------------------------------------
##
## Copyright (C) 2020 by Belinda Brown Ramrez (belindabrownr04@gmail.com)
## June, 2020
## timna.brown@ucr.ac.cr
##-----------------------------------------------------------------------------
# Variables aleatorias mltiples
# Se consideran dos bases de datos las cuales contienen los descrito
# a continuacin:
# 1. ****** Registro de la frecuencia relativa de dos variables aleatorias
# conjuntas en forma de tabla: xy.csv
# 2. ****** Pares (x, y) y su probabilidad asociada: xyp.csv
# Recordando que variable aleatoria es una funcin determinista.
#### **************** Algoritmo **************** ####
#******************************************************
# IMPORTANDO PAQUETES
#******************************************************
# Es importante considerar que notas son necesarias pero si
# fueron usadas durante el desarrollo de la tarea por diversas
# razones por lo cual se mantiene dentro del algortimo en forma
# comentario.
# from __future__ import division
# from pylab import *
# from sklearn import *
# from sklearn.preprocessing import PolynomialFeatures
# import math
# import decimal
# import pandas as pd
# from scipy.stats import norm
# from scipy.stats import rayleigh
# import csv
import pandas as pd
from collections import OrderedDict
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from mpl_toolkits.mplot3d import axes3d
from numpy import *
import numpy as np
from matplotlib import cm
import scipy.stats as stats
from scipy.optimize import curve_fit
#******************************************************
# DEFINICIONES
#******************************************************
#******************************************************
# OBTENIENDO VALORES
# DE LOS CSV
#******************************************************
data = pd.read_csv("/Users/belindabrown/Desktop/VA_multiples/data_base/xy.csv", index_col=0)
data_xyp = pd.read_csv("/Users/belindabrown/Desktop/VA_multiples/data_base/xyp.csv")
#******************************************************
# CURVA DE MEJOR AJUSTE
# DE LAS FUNCIONES DE
# DENSIDAD MARGINALES X & Y
#******************************************************
# Se requieren los valores marginales tanto de x como de y
# Columna con la sumatoria de todas las columnas es la probabilidad marginal de X
marg_value_x = [n for n in data.sum(axis=1, numeric_only=True)]
# Fila con la sumatoria de todas las filas es la probabilidad marginal de Y
marg_value_y = [n for n in data.sum(axis=0, numeric_only=True)]
print("\nValor marginal de X: ", marg_value_x)
print("\nValor marginal de Y: ", marg_value_y)
x_curva_modelo, x_mu, x_sigma = ajuste_curva(marg_value_x, 5, 15, distribucion_normal, "Datos que pertenencen a X","Datos_de_X", "Modelos de X(x)", "Modelado_X(x)")
y_curva_modelo, y_mu, y_sigma = ajuste_curva(marg_value_y, 5, 25, distribucion_normal, "Datos que pertenencen a Y","Datos_de_Y", "Modelos de Y(y)", "Modelado_Y(y)")
#******************************************************
# FUNCION DE DENSIDAD
# CONJUNTA DE
# X & Y
#******************************************************
probabi_conjuntaX = distribucion_normal(x_curva_modelo,x_mu,x_sigma)
probabi_conjuntaY = distribucion_normal(y_curva_modelo,y_mu,y_sigma)
#******************************************************
# VALORES DE CORRELACION, COVARIANZA
# COEFICIENTE DE CORRELACION (PEARSON)
# Y SIGNIFICADO
#******************************************************
###### OBTENIDOS CON XY.CSV
# Se requieren los valores anteriormente calculados. Para calcular
# E[X] & E[Y] lo que se conoce como los valores.
# Valores inicializados de los valores de X y Y (E[X] y E[Y])
# Este rango es de [x0, x1], es decir, incluye los limites
e_x = valor_esperado(marg_value_x,5,15, "X")
e_y = valor_esperado(marg_value_y,5,25, "Y")
multi_valor_esperados = e_x*e_y
# Se calcula E[X]*E[Y]
print("\n\nEl valor de E[X]E[Y] es de: ", multi_valor_esperados)
###### OBTENIDOS CON XYP.CSV
# Dado que la primera fila contiene las etiquetas de x, y, p
todos_mu_sum = data_xyp.x * data_xyp.y * data_xyp.p
# La sumatoria de E[XY] nos brinda su correlacin
correlacion = todos_mu_sum.sum()
# Ahora para la covarianza, de acuerdo a lo visto en clase la
# covarianza es la correlacion menos la multiplicacion de los
# valores.
covarianza = correlacion - multi_valor_esperados
# Se requiere calcular el coeficiente de correlacion de
# Pearson en el cual se utilizan los valores de la data brindada de
# obtenidos entonces ...
# De acuerdo a los resultados obtenidos al correr el programa
# se ve que:
# SigmaDatos_de_X = 3.2994428707078436
# SigmaDatos_de_Y = 6.0269377486808775
# Para el coeficiente pearson se calcula como la covarianza
# divida entre la multiplicacion de los sigmas
coef_pearson = covarianza/(3.2994428707078436*6.0269377486808775)
print("\nEl resultado de la correlacin es de: ", correlacion)
print("\nEl resultado de la covarianza es de: ",covarianza)
print("\nDe acuerdo a los datos obtenidos y considerando todo sus decimales se tiene que el coeficiente de Pearson es de: ", coef_pearson)
#******************************************************
# GRAFICA EN 2D DE LAS FUNCIONES
# DE DENSIDAD MARGINALES
# &
# GRAFICA EN 3D DE LA FUNCION
# DE DENSIDAD CONJUNTA
#******************************************************
# Dado que se requiere redondear los valores para la grfica se toma en
# cuenta que los parmetros completos para el modelo seran los ya calculados
distribucion_de_x = grafica_en2d(x_mu, x_sigma, 100,"Distribucion_de_X")
distribucion_de_y = grafica_en2d(y_mu, y_sigma, 100,"Distribucion_de_Y")
dis_cojun3d = grafica_en3d(x_curva_modelo, y_curva_modelo, probabi_conjuntaX, probabi_conjuntaY, "Distribucion_en_3D")
| 46.048128 | 164 | 0.652537 |
4d0a9eaef2e9a5554500cb97127b08aa78c0807c | 7,527 | py | Python | official/mnist/mnist.py | TuKJet/models | 984fbc754943c849c55a57923f4223099a1ff88c | [
"Apache-2.0"
] | 3,326 | 2018-01-26T22:42:25.000Z | 2022-02-16T13:16:39.000Z | official/mnist/mnist.py | lianlengyunyu/models | 984fbc754943c849c55a57923f4223099a1ff88c | [
"Apache-2.0"
] | 150 | 2017-08-28T14:59:36.000Z | 2022-03-11T23:21:35.000Z | official/mnist/mnist.py | lianlengyunyu/models | 984fbc754943c849c55a57923f4223099a1ff88c | [
"Apache-2.0"
] | 1,474 | 2018-02-01T04:33:18.000Z | 2022-03-08T07:02:20.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convolutional Neural Network Estimator for MNIST, built with tf.layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import tensorflow as tf
import dataset
def model_fn(features, labels, mode, params):
"""The model_fn argument for creating an Estimator."""
model = Model(params['data_format'])
image = features
if isinstance(image, dict):
image = features['image']
if mode == tf.estimator.ModeKeys.PREDICT:
logits = model(image, training=False)
predictions = {
'classes': tf.argmax(logits, axis=1),
'probabilities': tf.nn.softmax(logits),
}
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.PREDICT,
predictions=predictions,
export_outputs={
'classify': tf.estimator.export.PredictOutput(predictions)
})
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=1e-4)
logits = model(image, training=True)
loss = tf.losses.softmax_cross_entropy(onehot_labels=labels, logits=logits)
accuracy = tf.metrics.accuracy(
labels=tf.argmax(labels, axis=1), predictions=tf.argmax(logits, axis=1))
# Name the accuracy tensor 'train_accuracy' to demonstrate the
# LoggingTensorHook.
tf.identity(accuracy[1], name='train_accuracy')
tf.summary.scalar('train_accuracy', accuracy[1])
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.TRAIN,
loss=loss,
train_op=optimizer.minimize(loss, tf.train.get_or_create_global_step()))
if mode == tf.estimator.ModeKeys.EVAL:
logits = model(image, training=False)
loss = tf.losses.softmax_cross_entropy(onehot_labels=labels, logits=logits)
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.EVAL,
loss=loss,
eval_metric_ops={
'accuracy':
tf.metrics.accuracy(
labels=tf.argmax(labels, axis=1),
predictions=tf.argmax(logits, axis=1)),
})
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--batch_size',
type=int,
default=100,
help='Number of images to process in a batch')
parser.add_argument(
'--data_dir',
type=str,
default='/tmp/mnist_data',
help='Path to directory containing the MNIST dataset')
parser.add_argument(
'--model_dir',
type=str,
default='/tmp/mnist_model',
help='The directory where the model will be stored.')
parser.add_argument(
'--train_epochs', type=int, default=40, help='Number of epochs to train.')
parser.add_argument(
'--data_format',
type=str,
default=None,
choices=['channels_first', 'channels_last'],
help='A flag to override the data format used in the model. channels_first '
'provides a performance boost on GPU but is not always compatible '
'with CPU. If left unspecified, the data format will be chosen '
'automatically based on whether TensorFlow was built for CPU or GPU.')
parser.add_argument(
'--export_dir',
type=str,
help='The directory where the exported SavedModel will be stored.')
tf.logging.set_verbosity(tf.logging.INFO)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| 36.1875 | 82 | 0.689385 |
4d0b5e5a16eda393441922d1c3ec56983303e265 | 523 | py | Python | pep_92.py | sayantan3/project-euler | 9b856c84a0b174754819ed15f86eb0f30181e94e | [
"MIT"
] | null | null | null | pep_92.py | sayantan3/project-euler | 9b856c84a0b174754819ed15f86eb0f30181e94e | [
"MIT"
] | null | null | null | pep_92.py | sayantan3/project-euler | 9b856c84a0b174754819ed15f86eb0f30181e94e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
TERMINALS = (1, 89)
sq_sum = [sum(int(c)**2 for c in str(i)) for i in range(1000)]
if __name__ == "__main__":
print(calculate())
| 18.678571 | 62 | 0.565966 |
4d0e5f2a06efaa32ab6853b48bd163c479f22bbd | 467 | py | Python | Visualization/ConstrainedOpt.py | zhijieW94/SAGNet | 017b58853cb51d50851a5a3728b3205d235ff889 | [
"MIT"
] | 25 | 2019-09-15T09:10:17.000Z | 2021-04-08T07:44:16.000Z | Visualization/ConstrainedOpt.py | zhijieW-94/SAGNet | 017b58853cb51d50851a5a3728b3205d235ff889 | [
"MIT"
] | 9 | 2019-11-16T07:06:08.000Z | 2021-03-07T09:14:32.000Z | Visualization/ConstrainedOpt.py | zhijieW94/SAGNet | 017b58853cb51d50851a5a3728b3205d235ff889 | [
"MIT"
] | 7 | 2019-09-25T18:07:54.000Z | 2021-12-21T08:41:47.000Z | from PyQt5.QtCore import * | 24.578947 | 55 | 0.631692 |