hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
672ca5a86d4634cb29b428fe498eec5d2e6591d7 | 17,041 | py | Python | clustering.py | t20100/ccCluster | 9645d80dcfe579c23b3d52e8d536a39d469b184a | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | clustering.py | t20100/ccCluster | 9645d80dcfe579c23b3d52e8d536a39d469b184a | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | clustering.py | t20100/ccCluster | 9645d80dcfe579c23b3d52e8d536a39d469b184a | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | from __future__ import print_function
__author__ = "Gianluca Santoni"
__copyright__ = "Copyright 20150-2019"
__credits__ = ["Gianluca Santoni, Alexander Popov"]
__license__ = ""
__version__ = "1.0"
__maintainer__ = "Gianluca Santoni"
__email__ = "gianluca.santoni@esrf.fr"
__status__ = "Beta"
from scipy.cluster import hierarchy
import scipy
import matplotlib.pyplot as plt
import os
import numpy as np
import subprocess
import collections
import operator
import stat
import json
import random
if __name__== '__main__':
main()
| 38.123043 | 146 | 0.589813 |
672fde99dcb82eabf8b0425ec9a63d4e04194da7 | 9,992 | py | Python | wrappaconda.py | nckz/wrappaconda | 43203be36f2de17fdf8fe77c151c5628bd98321f | [
"BSD-2-Clause"
] | null | null | null | wrappaconda.py | nckz/wrappaconda | 43203be36f2de17fdf8fe77c151c5628bd98321f | [
"BSD-2-Clause"
] | null | null | null | wrappaconda.py | nckz/wrappaconda | 43203be36f2de17fdf8fe77c151c5628bd98321f | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# Author: Nick Zwart
# Date: 2015oct31
from __future__ import print_function
import os
import sys
import stat
import errno
import shutil
import optparse
import traceback
import subprocess
wrappaconda_name_string = 'Wr[App]-A-Conda'
if __name__ == '__main__':
main()
| 39.650794 | 236 | 0.618995 |
673174539407b646c8c0d2d08573c676c84a2fa0 | 557 | py | Python | watchtower/wallet/wallet.py | paytaca/watchtower-py | a9a4fb83ba4a9a15379efdd41bb91546821b4be8 | [
"MIT"
] | null | null | null | watchtower/wallet/wallet.py | paytaca/watchtower-py | a9a4fb83ba4a9a15379efdd41bb91546821b4be8 | [
"MIT"
] | null | null | null | watchtower/wallet/wallet.py | paytaca/watchtower-py | a9a4fb83ba4a9a15379efdd41bb91546821b4be8 | [
"MIT"
] | null | null | null | import requests
| 26.52381 | 66 | 0.606822 |
6733155cbc1b3ee12cbd1d7e111f38daa85f1326 | 858 | py | Python | test/unit/test_finalize.py | phated/binaryen | 50e66800dc28d67ea1cc88172f459df1ca96507d | [
"Apache-2.0"
] | 5,871 | 2015-11-13T19:06:43.000Z | 2022-03-31T17:40:21.000Z | test/unit/test_finalize.py | sthagen/binaryen | ce592cbdc8e58f36e7f39a3bd24b403f43adae34 | [
"Apache-2.0"
] | 2,743 | 2015-11-13T03:46:49.000Z | 2022-03-31T20:27:05.000Z | test/unit/test_finalize.py | sthagen/binaryen | ce592cbdc8e58f36e7f39a3bd24b403f43adae34 | [
"Apache-2.0"
] | 626 | 2015-11-23T08:00:11.000Z | 2022-03-17T01:58:18.000Z | from scripts.test import shared
from . import utils
| 33 | 79 | 0.637529 |
67351c5ed22ca30713ae796c8d4fe75b64c848ee | 6,206 | py | Python | mc/tools/TreeWidget.py | zy-sunshine/falkon-pyqt5 | bc2b60aa21c9b136439bd57a11f391d68c736f99 | [
"MIT"
] | 1 | 2021-04-29T05:36:44.000Z | 2021-04-29T05:36:44.000Z | mc/tools/TreeWidget.py | zy-sunshine/falkon-pyqt5 | bc2b60aa21c9b136439bd57a11f391d68c736f99 | [
"MIT"
] | 1 | 2020-03-28T17:43:18.000Z | 2020-03-28T17:43:18.000Z | mc/tools/TreeWidget.py | zy-sunshine/falkon-pyqt5 | bc2b60aa21c9b136439bd57a11f391d68c736f99 | [
"MIT"
] | 1 | 2021-01-15T20:09:24.000Z | 2021-01-15T20:09:24.000Z | from PyQt5.QtWidgets import QTreeWidget
from PyQt5.Qt import pyqtSignal
from PyQt5.QtWidgets import QTreeWidgetItem
from PyQt5.Qt import Qt
| 28.731481 | 75 | 0.586046 |
673572261f6221c9f0594203352cc527924c075f | 1,400 | py | Python | app/api/v2/models/sales.py | danuluma/dannstore | e5b59f08542c1cacdac60e380b5c2945195ba64a | [
"MIT"
] | null | null | null | app/api/v2/models/sales.py | danuluma/dannstore | e5b59f08542c1cacdac60e380b5c2945195ba64a | [
"MIT"
] | 21 | 2018-10-16T09:29:03.000Z | 2022-03-11T23:31:35.000Z | app/api/v2/models/sales.py | danuluma/dannstore | e5b59f08542c1cacdac60e380b5c2945195ba64a | [
"MIT"
] | null | null | null | import os
import sys
LOCALPATH = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, LOCALPATH + '/../../../../')
from app.api.v2.db import Db
def format_sale(sale):
"""Formats the results to a dictionary"""
sale = {
"id": sale[0],
"books": sale[1],
"total": sale[2],
"created_by": sale[3],
"attendant_name": sale[5],
"created_at": str(sale[4])
}
return sale
| 25.454545 | 89 | 0.547143 |
67366bf1792d0f436d2ce6181f326bfb3e3aea15 | 4,035 | py | Python | ubirch/linux/bleManager.py | ubirch/ubirch-ble-tool | 1399d018957e9a8424071296a71431c8ffa27e6f | [
"Apache-2.0"
] | 4 | 2018-07-20T16:35:52.000Z | 2020-11-12T13:38:58.000Z | ubirch/linux/bleManager.py | ubirch/ubirch-ble-tool | 1399d018957e9a8424071296a71431c8ffa27e6f | [
"Apache-2.0"
] | 1 | 2021-04-03T13:37:40.000Z | 2021-04-03T13:37:40.000Z | ubirch/linux/bleManager.py | ubirch/ubirch-ble-tool | 1399d018957e9a8424071296a71431c8ffa27e6f | [
"Apache-2.0"
] | null | null | null | from bleSuite import bleConnectionManager, bleServiceManager
from bluepy.btle import Scanner
from ubirch.linux.bleServiceManager import BLEServiceManager
| 34.487179 | 105 | 0.666419 |
67366ca8b5a32e45010c5e5c8a95158feb06f5b0 | 1,952 | py | Python | sysinv/cgts-client/cgts-client/cgtsclient/v1/load.py | SidneyAn/config | d694cc5d79436ea7d6170881c23cbfc8441efc0f | [
"Apache-2.0"
] | null | null | null | sysinv/cgts-client/cgts-client/cgtsclient/v1/load.py | SidneyAn/config | d694cc5d79436ea7d6170881c23cbfc8441efc0f | [
"Apache-2.0"
] | null | null | null | sysinv/cgts-client/cgts-client/cgtsclient/v1/load.py | SidneyAn/config | d694cc5d79436ea7d6170881c23cbfc8441efc0f | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2015-2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from cgtsclient.common import base
from cgtsclient import exc
CREATION_ATTRIBUTES = ['software_version', 'compatible_version',
'required_patches']
IMPORT_ATTRIBUTES = ['path_to_iso', 'path_to_sig', 'active']
| 26.378378 | 81 | 0.589139 |
6736c3bf19a38443467bf3214084087a92e23009 | 10,984 | py | Python | tests/keras_tests/feature_networks_tests/feature_networks/weights_mixed_precision_tests.py | haihabi/model_optimization | 97372a9596378bb2287c59f1180b5059f741b2d6 | [
"Apache-2.0"
] | null | null | null | tests/keras_tests/feature_networks_tests/feature_networks/weights_mixed_precision_tests.py | haihabi/model_optimization | 97372a9596378bb2287c59f1180b5059f741b2d6 | [
"Apache-2.0"
] | null | null | null | tests/keras_tests/feature_networks_tests/feature_networks/weights_mixed_precision_tests.py | haihabi/model_optimization | 97372a9596378bb2287c59f1180b5059f741b2d6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Sony Semiconductors Israel, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import tensorflow as tf
from model_compression_toolkit.tpc_models.default_tp_model import get_op_quantization_configs
from model_compression_toolkit.tpc_models.keras_tp_models.keras_default import generate_keras_default_tpc
from tests.common_tests.helpers.generate_test_tp_model import generate_mixed_precision_test_tp_model
from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
import model_compression_toolkit as mct
from model_compression_toolkit.common.mixed_precision.kpi import KPI
from model_compression_toolkit.common.mixed_precision.mixed_precision_quantization_config import \
MixedPrecisionQuantizationConfig
from model_compression_toolkit.common.user_info import UserInformation
from tests.common_tests.base_feature_test import BaseFeatureNetworkTest
from tests.common_tests.helpers.tensors_compare import cosine_similarity
keras = tf.keras
layers = keras.layers
tp = mct.target_platform
| 47.549784 | 120 | 0.638929 |
67379ede0d1ebb11453ed5424da8aed4d1402f30 | 33,494 | py | Python | src/ansible_navigator/actions/run.py | NaincyKumariKnoldus/ansible-navigator | 2ac043aea4ce897f30df3c47c1444a5747c9446c | [
"Apache-2.0"
] | null | null | null | src/ansible_navigator/actions/run.py | NaincyKumariKnoldus/ansible-navigator | 2ac043aea4ce897f30df3c47c1444a5747c9446c | [
"Apache-2.0"
] | null | null | null | src/ansible_navigator/actions/run.py | NaincyKumariKnoldus/ansible-navigator | 2ac043aea4ce897f30df3c47c1444a5747c9446c | [
"Apache-2.0"
] | null | null | null | """:run
"""
import curses
import datetime
import json
import logging
import os
import re
import shlex
import shutil
import time
import uuid
from math import floor
from queue import Queue
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
from ..action_base import ActionBase
from ..action_defs import RunStdoutReturn
from ..app_public import AppPublic
from ..configuration_subsystem import ApplicationConfiguration
from ..runner import CommandAsync
from ..steps import Step
from ..ui_framework import CursesLine
from ..ui_framework import CursesLinePart
from ..ui_framework import CursesLines
from ..ui_framework import Interaction
from ..ui_framework import dict_to_form
from ..ui_framework import form_to_dict
from ..ui_framework import nonblocking_notification
from ..ui_framework import warning_notification
from ..utils.functions import abs_user_path
from ..utils.functions import human_time
from ..utils.functions import remove_ansi
from ..utils.functions import round_half_up
from ..utils.serialize import json_dump
from . import _actions as actions
from . import run_action
RESULT_TO_COLOR = [
("(?i)^failed$", 9),
("(?i)^ok$", 10),
("(?i)^ignored$", 13),
("(?i)^skipped$", 14),
("(?i)^in_progress$", 8),
]
get_color = lambda word: next( # noqa: E731
(x[1] for x in RESULT_TO_COLOR if re.match(x[0], word)),
0,
)
def color_menu(_colno: int, colname: str, entry: Dict[str, Any]) -> Tuple[int, int]:
# pylint: disable=too-many-branches
"""Find matching color for word
:param colname: A word to match
"""
colval = entry[colname]
color = 0
decoration = 0
if "__play_name" in entry:
if not colval:
color = 8
elif colname in ["__task_count", "__play_name", "__progress"]:
failures = entry["__failed"] + entry["__unreachable"]
if failures:
color = 9
elif entry["__ok"]:
color = 10
else:
color = 8
elif colname == "__changed":
color = 11
else:
color = get_color(colname[2:])
if colname == "__progress" and entry["__progress"].strip().lower() == "complete":
decoration = curses.A_BOLD
elif "task" in entry:
if entry["__result"].lower() == "__in_progress":
color = get_color(entry["__result"])
elif colname in ["__result", "__host", "__number", "__task", "__task_action"]:
color = get_color(entry["__result"])
elif colname == "__changed":
if colval is True:
color = 11
else:
color = get_color(entry["__result"])
elif colname == "__duration":
color = 12
return color, decoration
def content_heading(obj: Any, screen_w: int) -> Union[CursesLines, None]:
"""create a heading for some piece of content showing
:param obj: The content going to be shown
:param screen_w: The current screen width
:return: The heading
"""
if isinstance(obj, dict) and "task" in obj:
detail = f"PLAY [{obj['play']}:{obj['__number']}] "
stars = "*" * (screen_w - len(detail))
line_1 = CursesLine(
(CursesLinePart(column=0, string=detail + stars, color=0, decoration=0),),
)
detail = f"TASK [{obj['task']}] "
stars = "*" * (screen_w - len(detail))
line_2 = CursesLine(
(CursesLinePart(column=0, string=detail + stars, color=0, decoration=0),),
)
if obj["__changed"] is True:
color = 11
res = "CHANGED"
else:
color = next((x[1] for x in RESULT_TO_COLOR if re.match(x[0], obj["__result"])), 0)
res = obj["__result"]
if "res" in obj and "msg" in obj["res"]:
msg = str(obj["res"]["msg"]).replace("\n", " ").replace("\r", "")
else:
msg = ""
string = f"{res}: [{obj['__host']}] {msg}"
string = string + (" " * (screen_w - len(string) + 1))
line_3 = CursesLine(
(CursesLinePart(column=0, string=string, color=color, decoration=curses.A_UNDERLINE),),
)
return CursesLines((line_1, line_2, line_3))
return None
def filter_content_keys(obj: Dict[Any, Any]) -> Dict[Any, Any]:
"""when showing content, filter out some keys"""
return {k: v for k, v in obj.items() if not (k.startswith("_") or k.endswith("uuid"))}
PLAY_COLUMNS = [
"__play_name",
"__ok",
"__changed",
"__unreachable",
"__failed",
"__skipped",
"__ignored",
"__in_progress",
"__task_count",
"__progress",
]
TASK_LIST_COLUMNS = [
"__result",
"__host",
"__number",
"__changed",
"__task",
"__task_action",
"__duration",
]
| 37.091916 | 100 | 0.559921 |
6738c6913f593e8f3489b3d849753c160556f231 | 480 | py | Python | storagetest/pkgs/pts/__init__.py | liufeng-elva/storage-test2 | 5364cc00dbe71b106f1bb740bf391e6124788bf4 | [
"MIT"
] | null | null | null | storagetest/pkgs/pts/__init__.py | liufeng-elva/storage-test2 | 5364cc00dbe71b106f1bb740bf391e6124788bf4 | [
"MIT"
] | null | null | null | storagetest/pkgs/pts/__init__.py | liufeng-elva/storage-test2 | 5364cc00dbe71b106f1bb740bf391e6124788bf4 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
@file : __init__.py.py
@Time : 2020/11/12 13:37
@Author: Tao.Xu
@Email : tao.xu2008@outlook.com
"""
"""
phoronix-test-suite: Main for Performance Test
===================
https://github.com/phoronix-test-suite/phoronix-test-suite
The Phoronix Test Suite is the most comprehensive testing and
benchmarking platform available for Linux, Solaris, macOS, Windows,
and BSD operating systems.
"""
if __name__ == '__main__':
pass
| 22.857143 | 68 | 0.683333 |
673a1a8a7022fbc7e3838045a6969aad19ff37aa | 8,279 | py | Python | owlbot.py | rahul2393/python-spanner | 86d33905269accabfc6d68dae0f2b78bec96026a | [
"Apache-2.0"
] | null | null | null | owlbot.py | rahul2393/python-spanner | 86d33905269accabfc6d68dae0f2b78bec96026a | [
"Apache-2.0"
] | null | null | null | owlbot.py | rahul2393/python-spanner | 86d33905269accabfc6d68dae0f2b78bec96026a | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
from pathlib import Path
from typing import List, Optional
import synthtool as s
from synthtool import gcp
from synthtool.languages import python
common = gcp.CommonTemplates()
def get_staging_dirs(
# This is a customized version of the s.get_staging_dirs() function
# from synthtool to # cater for copying 3 different folders from
# googleapis-gen:
# spanner, spanner/admin/instance and spanner/admin/database.
# Source:
# https://github.com/googleapis/synthtool/blob/master/synthtool/transforms.py#L280
default_version: Optional[str] = None,
sub_directory: Optional[str] = None,
) -> List[Path]:
"""Returns the list of directories, one per version, copied from
https://github.com/googleapis/googleapis-gen. Will return in lexical sorting
order with the exception of the default_version which will be last (if specified).
Args:
default_version (str): the default version of the API. The directory for this version
will be the last item in the returned list if specified.
sub_directory (str): if a `sub_directory` is provided, only the directories within the
specified `sub_directory` will be returned.
Returns: the empty list if no file were copied.
"""
staging = Path("owl-bot-staging")
if sub_directory:
staging /= sub_directory
if staging.is_dir():
# Collect the subdirectories of the staging directory.
versions = [v.name for v in staging.iterdir() if v.is_dir()]
# Reorder the versions so the default version always comes last.
versions = [v for v in versions if v != default_version]
versions.sort()
if default_version is not None:
versions += [default_version]
dirs = [staging / v for v in versions]
for dir in dirs:
s._tracked_paths.add(dir)
return dirs
else:
return []
spanner_default_version = "v1"
spanner_admin_instance_default_version = "v1"
spanner_admin_database_default_version = "v1"
for library in get_staging_dirs(spanner_default_version, "spanner"):
# Work around gapic generator bug https://github.com/googleapis/gapic-generator-python/issues/902
s.replace(
library / f"google/cloud/spanner_{library.name}/types/transaction.py",
r""".
Attributes:""",
r""".\n
Attributes:""",
)
# Work around gapic generator bug https://github.com/googleapis/gapic-generator-python/issues/902
s.replace(
library / f"google/cloud/spanner_{library.name}/types/transaction.py",
r""".
Attributes:""",
r""".\n
Attributes:""",
)
# Remove headings from docstring. Requested change upstream in cl/377290854 due to https://google.aip.dev/192#formatting.
s.replace(
library / f"google/cloud/spanner_{library.name}/types/transaction.py",
"""\n ==.*?==\n""",
":",
)
# Remove headings from docstring. Requested change upstream in cl/377290854 due to https://google.aip.dev/192#formatting.
s.replace(
library / f"google/cloud/spanner_{library.name}/types/transaction.py",
"""\n --.*?--\n""",
":",
)
s.move(
library,
excludes=[
"google/cloud/spanner/**",
"*.*",
"docs/index.rst",
"google/cloud/spanner_v1/__init__.py",
],
)
for library in get_staging_dirs(
spanner_admin_instance_default_version, "spanner_admin_instance"
):
s.move(
library,
excludes=["google/cloud/spanner_admin_instance/**", "*.*", "docs/index.rst"],
)
for library in get_staging_dirs(
spanner_admin_database_default_version, "spanner_admin_database"
):
s.move(
library,
excludes=["google/cloud/spanner_admin_database/**", "*.*", "docs/index.rst"],
)
s.remove_staging_dirs()
# ----------------------------------------------------------------------------
# Add templated files
# ----------------------------------------------------------------------------
templated_files = common.py_library(
microgenerator=True, samples=True, cov_level=99, split_system_tests=True,
)
s.move(templated_files,
excludes=[
".coveragerc",
".github/workflows", # exclude gh actions as credentials are needed for tests
]
)
# Ensure CI runs on a new instance each time
s.replace(
".kokoro/build.sh",
"# Remove old nox",
"""\
# Set up creating a new instance for each system test run
export GOOGLE_CLOUD_TESTS_CREATE_SPANNER_INSTANCE=true
# Remove old nox""",
)
# Update samples folder in CONTRIBUTING.rst
s.replace("CONTRIBUTING.rst", "samples/snippets", "samples/samples")
# ----------------------------------------------------------------------------
# Samples templates
# ----------------------------------------------------------------------------
python.py_samples()
# ----------------------------------------------------------------------------
# Customize noxfile.py
# ----------------------------------------------------------------------------
open_telemetry_test = """
# XXX Work around Kokoro image's older pip, which borks the OT install.
session.run("pip", "install", "--upgrade", "pip")
session.install("-e", ".[tracing]", "-c", constraints_path)
# XXX: Dump installed versions to debug OT issue
session.run("pip", "list")
# Run py.test against the unit tests with OpenTelemetry.
session.run(
"py.test",
"--quiet",
"--cov=google.cloud.spanner",
"--cov=google.cloud",
"--cov=tests.unit",
"--cov-append",
"--cov-config=.coveragerc",
"--cov-report=",
"--cov-fail-under=0",
os.path.join("tests", "unit"),
*session.posargs,
)
"""
place_before(
"noxfile.py",
"@nox.session(python=UNIT_TEST_PYTHON_VERSIONS)",
open_telemetry_test,
escape="()",
)
skip_tests_if_env_var_not_set = """# Sanity check: Only run tests if the environment variable is set.
if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", "") and not os.environ.get(
"SPANNER_EMULATOR_HOST", ""
):
session.skip(
"Credentials or emulator host must be set via environment variable"
)
"""
place_before(
"noxfile.py",
"# Install pyopenssl for mTLS testing.",
skip_tests_if_env_var_not_set,
escape="()",
)
s.replace(
"noxfile.py",
"""f"--junitxml=unit_{session.python}_sponge_log.xml",
"--cov=google",
"--cov=tests/unit",""",
"""\"--cov=google.cloud.spanner",
"--cov=google.cloud",
"--cov=tests.unit",""",
)
s.replace(
"noxfile.py",
r"""session.install\("-e", "."\)""",
"""session.install("-e", ".[tracing]")""",
)
s.replace(
"noxfile.py",
r"""# Install all test dependencies, then install this package into the
# virtualenv's dist-packages.
session.install\("mock", "pytest", "google-cloud-testutils", "-c", constraints_path\)
session.install\("-e", ".", "-c", constraints_path\)""",
"""# Install all test dependencies, then install this package into the
# virtualenv's dist-packages.
session.install("mock", "pytest", "google-cloud-testutils", "-c", constraints_path)
session.install("-e", ".[tracing]", "-c", constraints_path)""",
)
s.shell.run(["nox", "-s", "blacken"], hide_output=False)
| 32.214008 | 125 | 0.613721 |
673a564ceef3de9745d7d4bb80242204d7ba623d | 1,843 | py | Python | k_means.py | sokrutu/imagemean | 680bab26a1841cd8d4e03beba020709a5cb434a2 | [
"MIT"
] | null | null | null | k_means.py | sokrutu/imagemean | 680bab26a1841cd8d4e03beba020709a5cb434a2 | [
"MIT"
] | null | null | null | k_means.py | sokrutu/imagemean | 680bab26a1841cd8d4e03beba020709a5cb434a2 | [
"MIT"
] | null | null | null | from random import randint
def k_means(data, K):
"""
k-Means clustering
TODO: Assumes values from 0-255
:param data: NxD array of numbers
:param K: The number of clusters
:return: Tuple of cluster means (KxD array) and cluster assignments (Nx1 with values from 1 to K)
"""
N = len(data)
D = len(data[0])
means = [None]*K
for i in range(0,K):
means[i] = [randint(0, 255), randint(0, 255), randint(0, 255)]
assignments = [None]*N
changed = True
while(changed):
old_means = means
# Find closest centroid
for n in range(0, N):
"max distance in RGB"
min = 442.0
index = -1
for k in range(0,K):
temp = __distance(data[n], means[k], D)
if temp <= min:
min = temp
index = k
assignments[n] = index
# Calculate the new centers
for k in range(0,K):
# Aus assignments die Indizes mit Eintrag k finden
indices = [i for i,x in enumerate(assignments) if x == k]
# ... und dann anhand derer in Data die Werte schauen
temp_data = [x for i,x in enumerate(data) if i in indices]
# ... und mitteln
means[k] = __mean(temp_data, D)
# Check if something changed
changed = False
for k in range(0,K):
if old_means[k] != means[k]:
changed = True
break
return (means, assignments)
| 25.597222 | 101 | 0.511666 |
673ab82d9ec7dbd59a48086985188478a17a2fc5 | 756 | py | Python | contrib/analysis_server/src/analysis_server/__init__.py | Kenneth-T-Moore/OpenMDAO-Framework | 76e0ebbd6f424a03b547ff7b6039dea73d8d44dc | [
"Apache-2.0"
] | 3 | 2015-06-02T00:36:28.000Z | 2018-11-03T00:35:21.000Z | contrib/analysis_server/src/analysis_server/__init__.py | JustinSGray/OpenMDAO-Framework | 7ebd7fda0b10fbe8a86ae938dc4f135396dd9759 | [
"Apache-2.0"
] | null | null | null | contrib/analysis_server/src/analysis_server/__init__.py | JustinSGray/OpenMDAO-Framework | 7ebd7fda0b10fbe8a86ae938dc4f135396dd9759 | [
"Apache-2.0"
] | 1 | 2020-07-15T02:45:54.000Z | 2020-07-15T02:45:54.000Z | """
Support for interacting with ModelCenter via the AnalysisServer protocol.
Client-mode access to an AnalysisServer is provided by the 'client', 'factory',
and 'proxy' modules. Server-mode access by ModelCenter is provided by the
'server' and 'wrapper' modules.
An extension to the protocol allows 'eggs' to pe 'published': the egg is sent
to the server and made part of the server's set of supported components.
"""
from __future__ import absolute_import
from .client import Client
from .factory import ASFactory
from .server import Server, start_server, stop_server, DEFAULT_PORT
from .stream import Stream
from .units import have_translation, get_translation, set_translation
from .publish import publish_class, publish_object, publish_egg
| 36 | 79 | 0.797619 |
673b17b5d8b3ab21d7358bca547447f1eb5fad33 | 24,476 | py | Python | 3rd party/YOLO_network.py | isaiasfsilva/ROLO | 6612007e35edb73dac734e7a4dac2cd4c1dca6c1 | [
"Apache-2.0"
] | 962 | 2016-07-22T01:36:20.000Z | 2022-03-30T01:34:35.000Z | 3rd party/YOLO_network.py | isaiasfsilva/ROLO | 6612007e35edb73dac734e7a4dac2cd4c1dca6c1 | [
"Apache-2.0"
] | 57 | 2016-08-12T15:33:31.000Z | 2022-01-29T19:16:01.000Z | 3rd party/YOLO_network.py | isaiasfsilva/ROLO | 6612007e35edb73dac734e7a4dac2cd4c1dca6c1 | [
"Apache-2.0"
] | 342 | 2016-07-22T01:36:26.000Z | 2022-02-26T23:00:25.000Z | import os
import numpy as np
import tensorflow as tf
import cv2
import time
import sys
import pickle
import ROLO_utils as util
'''----------------------------------------main-----------------------------------------------------'''
if __name__=='__main__':
main(sys.argv)
| 35.6793 | 209 | 0.664774 |
673cf80cda7d6f2ddfed4ffa2f717379b2c4aa55 | 3,146 | py | Python | pipenv/cmdparse.py | sthagen/pipenv | 0924f75fd1004c848ea67d4272315eda4210b352 | [
"MIT"
] | 23 | 2017-01-20T01:18:31.000Z | 2017-01-20T17:25:11.000Z | pipenv/cmdparse.py | sthagen/pipenv | 0924f75fd1004c848ea67d4272315eda4210b352 | [
"MIT"
] | 1 | 2017-01-20T05:13:58.000Z | 2017-01-20T05:13:58.000Z | pipenv/cmdparse.py | sthagen/pipenv | 0924f75fd1004c848ea67d4272315eda4210b352 | [
"MIT"
] | null | null | null | import itertools
import re
import shlex
def cmdify(self):
"""Encode into a cmd-executable string.
This re-implements CreateProcess's quoting logic to turn a list of
arguments into one single string for the shell to interpret.
* All double quotes are escaped with a backslash.
* Existing backslashes before a quote are doubled, so they are all
escaped properly.
* Backslashes elsewhere are left as-is; cmd will interpret them
literally.
The result is then quoted into a pair of double quotes to be grouped.
An argument is intentionally not quoted if it does not contain
foul characters. This is done to be compatible with Windows built-in
commands that don't work well with quotes, e.g. everything with `echo`,
and DOS-style (forward slash) switches.
Foul characters include:
* Whitespaces.
* Carets (^). (pypa/pipenv#3307)
* Parentheses in the command. (pypa/pipenv#3168)
Carets introduce a difficult situation since they are essentially
"lossy" when parsed. Consider this in cmd.exe::
> echo "foo^bar"
"foo^bar"
> echo foo^^bar
foo^bar
The two commands produce different results, but are both parsed by the
shell as `foo^bar`, and there's essentially no sensible way to tell
what was actually passed in. This implementation assumes the quoted
variation (the first) since it is easier to implement, and arguably
the more common case.
The intended use of this function is to pre-process an argument list
before passing it into ``subprocess.Popen(..., shell=True)``.
See also: https://docs.python.org/3/library/subprocess.html#converting-argument-sequence
"""
return " ".join(
itertools.chain(
[_quote_if_contains(self.command, r"[\s^()]")],
(_quote_if_contains(arg, r"[\s^]") for arg in self.args),
)
)
| 30.543689 | 96 | 0.62206 |
673d6da7ddbe2f62dc10d702de83d4dd27b4df32 | 1,059 | py | Python | msph/clients/ms_online.py | CultCornholio/solenya | 583cb5f36825808c7cdc2de03f565723a32ae8d3 | [
"MIT"
] | 11 | 2021-09-01T05:04:08.000Z | 2022-02-17T01:09:58.000Z | msph/clients/ms_online.py | CultCornholio/solenya | 583cb5f36825808c7cdc2de03f565723a32ae8d3 | [
"MIT"
] | null | null | null | msph/clients/ms_online.py | CultCornholio/solenya | 583cb5f36825808c7cdc2de03f565723a32ae8d3 | [
"MIT"
] | 2 | 2021-09-08T19:12:53.000Z | 2021-10-05T17:52:11.000Z | from .framework import Client, Resource
from . import constants as const
client = Client(
base_url='https://login.microsoftonline.com',
base_headers={
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0',
'Content-Type': 'application/x-www-form-urlencoded',
}
)
| 32.090909 | 110 | 0.686497 |
673f2e75107755cce6965c485de6141329c56f72 | 1,868 | py | Python | warn/platforms/job_center/cache.py | anikasikka/warn-scraper | 13efac478ac06982bf68ce67e15db976ac07f101 | [
"Apache-2.0"
] | 12 | 2022-01-18T20:04:41.000Z | 2022-03-24T21:26:31.000Z | warn/platforms/job_center/cache.py | anikasikka/warn-scraper | 13efac478ac06982bf68ce67e15db976ac07f101 | [
"Apache-2.0"
] | 163 | 2022-01-14T19:30:23.000Z | 2022-03-31T23:48:48.000Z | warn/platforms/job_center/cache.py | anikasikka/warn-scraper | 13efac478ac06982bf68ce67e15db976ac07f101 | [
"Apache-2.0"
] | 4 | 2022-01-19T20:40:13.000Z | 2022-02-22T21:36:34.000Z | import logging
import re
from warn.cache import Cache as BaseCache
from .urls import urls
logger = logging.getLogger(__name__)
| 34.592593 | 83 | 0.579764 |
673f39d965787c5f1eaa35294c38eb2b5dda219c | 7,312 | py | Python | ebcli/core/abstractcontroller.py | senstb/aws-elastic-beanstalk-cli | ef27ae50e8be34ccbe29bc6dc421323bddc3f485 | [
"Apache-2.0"
] | 110 | 2020-01-15T22:58:46.000Z | 2022-03-27T20:47:33.000Z | ebcli/core/abstractcontroller.py | senstb/aws-elastic-beanstalk-cli | ef27ae50e8be34ccbe29bc6dc421323bddc3f485 | [
"Apache-2.0"
] | 89 | 2020-01-15T23:18:34.000Z | 2022-03-31T21:56:05.000Z | ebcli/core/abstractcontroller.py | senstb/aws-elastic-beanstalk-cli | ef27ae50e8be34ccbe29bc6dc421323bddc3f485 | [
"Apache-2.0"
] | 50 | 2020-01-15T22:58:53.000Z | 2022-02-11T17:39:28.000Z | # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import textwrap
import json
import sys
import os
from cement.core import controller
from ebcli import __version__
from ebcli.core.ebglobals import Constants
from ebcli.lib import elasticbeanstalk, utils
from ebcli.core import io, fileoperations
from ebcli.objects.exceptions import (
NoEnvironmentForBranchError,
PlatformWorkspaceNotSupportedError,
ApplicationWorkspaceNotSupportedError,
EBCLIException,
NotInitializedError
)
from ebcli.resources.strings import strings, flag_text
from ebcli.objects import region
from ebcli.operations import commonops
| 33.085973 | 101 | 0.603665 |
673f86c193b95f2ceb11fd09422584819f2d7221 | 346 | py | Python | python/speaktest.py | kyle-cook/templates | f1047a8c31a42507acbd7a27e66db0825be811a6 | [
"MIT"
] | null | null | null | python/speaktest.py | kyle-cook/templates | f1047a8c31a42507acbd7a27e66db0825be811a6 | [
"MIT"
] | null | null | null | python/speaktest.py | kyle-cook/templates | f1047a8c31a42507acbd7a27e66db0825be811a6 | [
"MIT"
] | null | null | null | import unittest
import speak
if __name__ == "__main__":
unittest.main()
| 21.625 | 64 | 0.66474 |
674032fc8a912ba3dd53e6c5a60619d54e34cbd4 | 482 | py | Python | c2f_loop.py | devopsprosiva/python | 07311d7597c0895554efe8013b57f218a0f11bb5 | [
"MIT"
] | null | null | null | c2f_loop.py | devopsprosiva/python | 07311d7597c0895554efe8013b57f218a0f11bb5 | [
"MIT"
] | null | null | null | c2f_loop.py | devopsprosiva/python | 07311d7597c0895554efe8013b57f218a0f11bb5 | [
"MIT"
] | null | null | null | #!/usr/local/bin/python
import sys
temperatures=[10,-20,-289,100]
for temp in temperatures:
file = open('temperatures.txt','a+')
if temp > -273.15:
temp_output = c2f(temp)
file.write(str(temp_output))
file.write("\n")
file.close()
| 22.952381 | 91 | 0.620332 |
67409afcdfe55eae6e448c076e01c6ac7a7788be | 2,285 | py | Python | problems/eggs/services/confirm_min_throws_server.py | giuliagalvan/TAlight | 3471ea9c7f13ade595ae579db0713135da849f13 | [
"MIT"
] | null | null | null | problems/eggs/services/confirm_min_throws_server.py | giuliagalvan/TAlight | 3471ea9c7f13ade595ae579db0713135da849f13 | [
"MIT"
] | null | null | null | problems/eggs/services/confirm_min_throws_server.py | giuliagalvan/TAlight | 3471ea9c7f13ade595ae579db0713135da849f13 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# METADATA OF THIS TAL_SERVICE:
problem="eggs"
service="confirm_min_throws"
args_list = [
('min',int),
('n_eggs',int),
('n_floors',int),
('lang',str),
('ISATTY',bool),
]
from sys import stderr, exit, argv
from random import randrange
from math import inf as IMPOSSIBLE
from multilanguage import Env, Lang, TALcolors
ENV =Env(problem, service, args_list)
TAc =TALcolors(ENV)
LANG=Lang(ENV, TAc, lambda fstring: eval(f"f'{fstring}'"))
TAc.print(LANG.opening_msg, "green")
# START CODING YOUR SERVICE:
# INITIALIZATON: allocation, base cases, sentinels
table = [ [0] + [IMPOSSIBLE] * ENV['n_floors'] ]
for u in range(ENV['n_eggs']):
table.append([0] + [None] * ENV['n_floors'])
# INDUCTTVE STEP: the min-max recursion with nature playing against
for u in range(1,1+ENV['n_eggs']):
for f in range(1,1+ENV['n_floors']):
table[u][f] = IMPOSSIBLE
for first_launch_floor in range(1,1+f):
table[u][f] = min(table[u][f],1+max(table[u][f-first_launch_floor],table[u-1][first_launch_floor-1]))
if table[ENV['n_eggs']][ENV['n_floors']] < ENV['min']:
print(f"No! When you are given {ENV['n_eggs']} eggs and the floors are {ENV['n_floors']} then there exists a policy that guarantees you to find out the truth in strictly less than {ENV['min']} launches, whatever will happen (worst case).")
#English: print("No! When you are given {ENV['n_eggs']} eggs and the floors are {ENV['n_floors']} then there exists a policy that guarantees you to find out the truth in strictly less than {ENV['min']} launches, whatever will happen (worst case).")
if table[ENV['n_eggs']][ENV['n_floors']] > ENV['min']:
print(f"No! When you are given {ENV['n_eggs']} eggs and the floors are {ENV['n_floors']} then no policy guarantees you to find out the truth within {ENV['min']} launches in every possible scenario (aka, whathever the truth is).")
#English:
if table[ENV['n_eggs']][ENV['n_floors']] == ENV['min']:
print(f"Yes! Indeed, {ENV['min']} is the smallest possible natural B such that, when you are given {ENV['n_eggs']} eggs and the floors are {ENV['n_floors']}, still there exists a policy that guarantees you to find out the truth within B launches in every possible scenario.")
#English:
exit(0)
| 46.632653 | 279 | 0.688403 |
67416b98862ed94f8c8dd26ec4773d955430f943 | 460 | py | Python | pylox/error_reporting.py | hculpan/pylox | a5bde624f289115575e9e01bd171b6271c2e899a | [
"MIT"
] | 1 | 2018-05-18T08:16:02.000Z | 2018-05-18T08:16:02.000Z | pylox/error_reporting.py | hculpan/pylox | a5bde624f289115575e9e01bd171b6271c2e899a | [
"MIT"
] | null | null | null | pylox/error_reporting.py | hculpan/pylox | a5bde624f289115575e9e01bd171b6271c2e899a | [
"MIT"
] | null | null | null | errorFound = False
| 19.166667 | 73 | 0.630435 |
674628d16822f8d4efcc764dcb583fc1ae5fb351 | 86 | py | Python | tests/syntax/scripts/annotated_comments.py | toddrme2178/pyccel | deec37503ab0c5d0bcca1a035f7909f7ce8ef653 | [
"MIT"
] | null | null | null | tests/syntax/scripts/annotated_comments.py | toddrme2178/pyccel | deec37503ab0c5d0bcca1a035f7909f7ce8ef653 | [
"MIT"
] | null | null | null | tests/syntax/scripts/annotated_comments.py | toddrme2178/pyccel | deec37503ab0c5d0bcca1a035f7909f7ce8ef653 | [
"MIT"
] | null | null | null | #$ header variable x :: int
#$ acc parallel private(idx)
#$ omp parallel private(idx)
| 21.5 | 28 | 0.697674 |
6746ba919e9bbb1f397db2429492049488882aa8 | 1,361 | py | Python | server/admin.py | allisto/allistic-server | 848edb71b4709ad0734b83a43de4ac8c58e88fdf | [
"Apache-2.0"
] | 5 | 2019-03-04T08:28:08.000Z | 2019-03-05T05:55:55.000Z | server/admin.py | allisto/allistic-server | 848edb71b4709ad0734b83a43de4ac8c58e88fdf | [
"Apache-2.0"
] | 7 | 2019-03-03T19:45:02.000Z | 2021-03-18T21:26:08.000Z | server/admin.py | allisto/allistic-server | 848edb71b4709ad0734b83a43de4ac8c58e88fdf | [
"Apache-2.0"
] | 1 | 2019-03-01T11:15:07.000Z | 2019-03-01T11:15:07.000Z | from django.contrib import admin
from .models import Doctor, ConsultationTime, Medicine, Allergy, Child, Parent
admin.site.site_header = "Allisto - We Do Good"
admin.site.register(ConsultationTime)
| 30.931818 | 87 | 0.702425 |
67470f3c7a77e0bc298ea17e0cb678c91fe2570a | 4,067 | py | Python | backend/ir/ir.py | zengljnwpu/yaspc | 5e85efb5fb8bee02471814b10e950dfb5b04c5d5 | [
"MIT"
] | null | null | null | backend/ir/ir.py | zengljnwpu/yaspc | 5e85efb5fb8bee02471814b10e950dfb5b04c5d5 | [
"MIT"
] | null | null | null | backend/ir/ir.py | zengljnwpu/yaspc | 5e85efb5fb8bee02471814b10e950dfb5b04c5d5 | [
"MIT"
] | null | null | null |
from backend.entity.entity import DefinedFuntion
from backend.ir.dumper import Dumper
from backend.ir.stmt import Assign
from backend.ir.stmt import Return
from backend.ir.expr import Bin
from backend.ir.expr import Call
from backend.entity.scope import *
# This class were used to import IR from json text
| 27.856164 | 113 | 0.588149 |
67475ec9e070602cd855d1d0690b385ad1b9adb8 | 10,060 | py | Python | forest/benchmarking/tests/test_superoperator_transformations.py | stjordanis/forest-benchmarking | f9ad9701c2d253de1a0c922d7220ed7de75ac685 | [
"Apache-2.0"
] | 40 | 2019-01-25T18:35:24.000Z | 2022-03-13T11:21:18.000Z | forest/benchmarking/tests/test_superoperator_transformations.py | stjordanis/forest-benchmarking | f9ad9701c2d253de1a0c922d7220ed7de75ac685 | [
"Apache-2.0"
] | 140 | 2019-01-25T20:09:02.000Z | 2022-03-12T01:08:01.000Z | forest/benchmarking/tests/test_superoperator_transformations.py | stjordanis/forest-benchmarking | f9ad9701c2d253de1a0c922d7220ed7de75ac685 | [
"Apache-2.0"
] | 22 | 2019-02-01T13:18:35.000Z | 2022-01-12T15:03:13.000Z | import numpy as np
from pyquil.gate_matrices import X, Y, Z, H
from forest.benchmarking.operator_tools.superoperator_transformations import *
# Test philosophy:
# Using the by hand calculations found in the docs we check conversion
# between one qubit channels with one Kraus operator (Hadamard) and two
# Kraus operators (the amplitude damping channel). Additionally we check
# a few two qubit channel conversions to get additional confidence.
HADChi = 0.5 * np.asarray([[0, 0, 0, 0],
[0, 1, 0, 1],
[0, 0, 0, 0],
[0, 1, 0, 1]])
HADPauli = 1.0 * np.asarray([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, -1, 0],
[0, 1, 0, 0]])
HADSuper = 0.5 * np.asarray([[1, 1, 1, 1],
[1, -1, 1, -1],
[1, 1, -1, -1],
[1, -1, -1, 1]])
HADChoi = 0.5 * np.asarray([[1, 1, 1, -1],
[1, 1, 1, -1],
[1, 1, 1, -1],
[-1, -1, -1, 1]])
# Single Qubit Pauli Channel
# Pauli twirled Amplitude damping channel
# I \otimes Z channel or gate (two qubits)
two_qubit_paulis = n_qubit_pauli_basis(2)
IZKraus = two_qubit_paulis.ops_by_label['IZ']
IZSuper = np.diag([1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1])
# one and zero state as a density matrix
ONE_STATE = np.asarray([[0, 0], [0, 1]])
ZERO_STATE = np.asarray([[1, 0], [0, 0]])
# Amplitude damping Kraus operators with p = 0.1
AdKrausOps = amplitude_damping_kraus(.1)
# Use Kraus operators to find output of channel i.e.
# rho_out = A_0 rho A_0^\dag + A_1 rho A_1^\dag.
rho_out = np.matmul(np.matmul(AdKrausOps[0], ONE_STATE), AdKrausOps[0].transpose().conj()) + \
np.matmul(np.matmul(AdKrausOps[1], ONE_STATE), AdKrausOps[1].transpose().conj())
| 34.930556 | 98 | 0.602485 |
674979db2e403ec19a4fc12df3f2a373c9172b77 | 86 | py | Python | OIL/__init__.py | vjdad4m/OIL | a664fe213723fe354796245632f58f31583bcba0 | [
"MIT"
] | 1 | 2021-06-22T22:14:16.000Z | 2021-06-22T22:14:16.000Z | OIL/__init__.py | vjdad4m/OIL | a664fe213723fe354796245632f58f31583bcba0 | [
"MIT"
] | null | null | null | OIL/__init__.py | vjdad4m/OIL | a664fe213723fe354796245632f58f31583bcba0 | [
"MIT"
] | null | null | null | import OIL.color
import OIL.label
import OIL.parser
import OIL.tools
import OIL.errors | 17.2 | 17 | 0.837209 |
6749e169faceb4050a87041472715faed2d19901 | 2,866 | py | Python | lib/spack/spack/cmd/load.py | padamson/spack | d3f67a48552691b4846ccc4a10f76740b154090c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2021-03-05T10:54:32.000Z | 2021-03-05T14:14:52.000Z | lib/spack/spack/cmd/load.py | padamson/spack | d3f67a48552691b4846ccc4a10f76740b154090c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 32 | 2020-12-15T17:29:20.000Z | 2022-03-21T15:08:31.000Z | lib/spack/spack/cmd/load.py | padamson/spack | d3f67a48552691b4846ccc4a10f76740b154090c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2021-07-19T20:31:27.000Z | 2021-07-19T21:14:14.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import sys
import spack.cmd
import spack.cmd.common.arguments as arguments
import spack.environment as ev
import spack.store
import spack.user_environment as uenv
import spack.util.environment
description = "add package to the user environment"
section = "user environment"
level = "short"
def setup_parser(subparser):
"""Parser is only constructed so that this prints a nice help
message with -h. """
arguments.add_common_arguments(
subparser, ['recurse_dependencies', 'installed_specs'])
shells = subparser.add_mutually_exclusive_group()
shells.add_argument(
'--sh', action='store_const', dest='shell', const='sh',
help="print sh commands to load the package")
shells.add_argument(
'--csh', action='store_const', dest='shell', const='csh',
help="print csh commands to load the package")
shells.add_argument(
'--fish', action='store_const', dest='shell', const='fish',
help="print fish commands to load the package")
subparser.add_argument(
'--first',
action='store_true',
default=False,
dest='load_first',
help="load the first match if multiple packages match the spec"
)
subparser.add_argument(
'--only',
default='package,dependencies',
dest='things_to_load',
choices=['package', 'dependencies'],
help="""select whether to load the package and its dependencies
the default is to load the package and all dependencies
alternatively one can decide to load only the package or only
the dependencies"""
)
| 34.119048 | 79 | 0.664689 |
674ba1aa522d2bf108faa75b0291c6fcbe497e66 | 1,680 | py | Python | poisson_image_editing.py | zishun/Poisson-EVA2019 | de3dd88f4046f63575d02c9395b26a4b1d0b6258 | [
"BSD-3-Clause"
] | null | null | null | poisson_image_editing.py | zishun/Poisson-EVA2019 | de3dd88f4046f63575d02c9395b26a4b1d0b6258 | [
"BSD-3-Clause"
] | null | null | null | poisson_image_editing.py | zishun/Poisson-EVA2019 | de3dd88f4046f63575d02c9395b26a4b1d0b6258 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import imageio
from PoissonTemperature import FiniteDifferenceMatrixConstruction
if __name__ == '__main__':
folder = './data/pie/'
mask = imageio.imread(folder+'mask.png')[:, :, 0].astype(np.float32)
background = imageio.imread(folder+'mona.png')[:, :, :3]/255
foreground = imageio.imread(folder+'gine.png')[:, :, :3]/255
mask[mask > 0] = np.nan
ind2sub_fn = folder+'ind2sub.npy'
sub2ind_fn = folder+'sub2ind.npy'
ind_sub_conversion(mask, ind2sub_fn, sub2ind_fn)
FDMC = FiniteDifferenceMatrixConstruction(ind2sub_fn, sub2ind_fn)
result = pie(FDMC, background, foreground)
imageio.imwrite(folder+'result.png', result)
| 32.941176 | 72 | 0.671429 |
674c93e05bb72036422e17078331287c9f481a64 | 10,343 | py | Python | mindsdb/api/http/initialize.py | mindsdb/main | 2c7c09a756c17a47f2ff4a38bf45203d706240ee | [
"MIT"
] | 261 | 2018-09-28T02:32:17.000Z | 2018-12-10T06:30:54.000Z | mindsdb/api/http/initialize.py | mindsdb/main | 2c7c09a756c17a47f2ff4a38bf45203d706240ee | [
"MIT"
] | 27 | 2018-09-26T08:49:11.000Z | 2018-12-10T14:42:52.000Z | mindsdb/api/http/initialize.py | mindsdb/main | 2c7c09a756c17a47f2ff4a38bf45203d706240ee | [
"MIT"
] | 46 | 2018-10-06T10:11:18.000Z | 2018-12-10T04:02:17.000Z | from distutils.version import LooseVersion
import requests
import os
import shutil
import threading
import webbrowser
from zipfile import ZipFile
from pathlib import Path
import traceback
import tempfile
# import concurrent.futures
from flask import Flask, url_for, make_response
from flask.json import dumps
from flask_restx import Api
from mindsdb.__about__ import __version__ as mindsdb_version
from mindsdb.interfaces.datastore.datastore import DataStore
from mindsdb.interfaces.model.model_interface import ModelInterface
from mindsdb.interfaces.database.integrations import IntegrationController
from mindsdb.utilities.ps import is_pid_listen_port, wait_func_is_true
from mindsdb.utilities.telemetry import inject_telemetry_to_static
from mindsdb.utilities.config import Config
from mindsdb.utilities.log import get_log
from mindsdb.interfaces.storage.db import session
from mindsdb.utilities.json_encoder import CustomJSONEncoder
def update_static():
''' Update Scout files basing on compatible-config.json content.
Files will be downloaded and updated if new version of GUI > current.
Current GUI version stored in static/version.txt.
'''
config = Config()
log = get_log('http')
static_path = Path(config['paths']['static'])
last_gui_version_lv = get_last_compatible_gui_version()
current_gui_version_lv = get_current_gui_version()
if last_gui_version_lv is False:
return False
if current_gui_version_lv is not None:
if current_gui_version_lv >= last_gui_version_lv:
return True
log.info(f'New version of GUI available ({last_gui_version_lv.vstring}). Downloading...')
temp_dir = tempfile.mkdtemp(prefix='mindsdb_gui_files_')
success = download_gui(temp_dir, last_gui_version_lv.vstring)
if success is False:
shutil.rmtree(temp_dir)
return False
temp_dir_for_rm = tempfile.mkdtemp(prefix='mindsdb_gui_files_')
shutil.rmtree(temp_dir_for_rm)
shutil.copytree(str(static_path), temp_dir_for_rm)
shutil.rmtree(str(static_path))
shutil.copytree(temp_dir, str(static_path))
shutil.rmtree(temp_dir_for_rm)
log.info(f'GUI version updated to {last_gui_version_lv.vstring}')
return True
def _open_webbrowser(url: str, pid: int, port: int, init_static_thread, static_folder):
"""Open webbrowser with url when http service is started.
If some error then do nothing.
"""
init_static_thread.join()
inject_telemetry_to_static(static_folder)
logger = get_log('http')
try:
is_http_active = wait_func_is_true(func=is_pid_listen_port, timeout=10,
pid=pid, port=port)
if is_http_active:
webbrowser.open(url)
except Exception as e:
logger.error(f'Failed to open {url} in webbrowser with exception {e}')
logger.error(traceback.format_exc())
session.close()
| 34.591973 | 178 | 0.667601 |
674df0520020cb5c060d141941c47d1d5a1e8c48 | 9,686 | py | Python | pyrocov/io.py | corneliusroemer/pyro-cov | 54e89d128293f9ff9e995c442f72fa73f5f99b76 | [
"Apache-2.0"
] | 22 | 2021-09-14T04:33:11.000Z | 2022-02-01T21:33:05.000Z | pyrocov/io.py | corneliusroemer/pyro-cov | 54e89d128293f9ff9e995c442f72fa73f5f99b76 | [
"Apache-2.0"
] | 7 | 2021-11-02T13:48:35.000Z | 2022-03-23T18:08:35.000Z | pyrocov/io.py | corneliusroemer/pyro-cov | 54e89d128293f9ff9e995c442f72fa73f5f99b76 | [
"Apache-2.0"
] | 6 | 2021-09-18T01:06:51.000Z | 2022-01-10T02:22:06.000Z | # Copyright Contributors to the Pyro-Cov project.
# SPDX-License-Identifier: Apache-2.0
import functools
import io
import logging
import math
import re
import sys
import torch
import torch.multiprocessing as mp
from Bio import AlignIO
from Bio.Phylo.NewickIO import Parser
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from .phylo import Phylogeny
logger = logging.getLogger(__name__)
FILE_FORMATS = {
"nex": "nexus",
"nexus": "nexus",
"fasta": "fasta",
"xml": "beast",
}
def count_nexus_trees(filename):
"""
Counts the number of trees in a nexus file.
"""
return sum(read_nexus_trees(filename, format="count"))
def stack_nexus_trees(filename, *, max_num_trees=math.inf, processes=0):
"""
Loads a batch of trees from a nexus file.
"""
trees = read_nexus_trees(
filename, format="torch", max_num_trees=max_num_trees, processes=processes
)
return Phylogeny.stack(trees)
def read_newick_tree(filename):
"""
Parse a single newick tree and convert to a ``Phylogeny``.
"""
with open(filename) as f:
line = f.read().strip()
tree = next(Parser.from_string(line).parse())
return Phylogeny.from_bio_phylo(tree)
def read_alignment(
filename, format=None, *, max_taxa=math.inf, max_characters=math.inf
):
"""
Reads a single alignment file to a torch tensor of probabilites.
:param str filename: Name of input file.
:param str format: Optional input format, e.g. "nexus" or "fasta".
:param int max_taxa: Optional number of taxa for truncation.
:param int max_characters: Optional number of characters for truncation.
:rtype: torch.Tensor
:returns: A float tensor of shape ``(num_sequences, num_characters,
num_bases)`` that is normalized along its rightmost dimension. Note
that ``num_bases`` is 5 = 4 + 1, where the final base denots a gap or
indel.
"""
# Load a Bio.Align.MultipleSeqAlignment object.
logger.info(f"Loading data from {filename}")
if format is None:
suffix = filename.split(".")[-1].lower()
format = FILE_FORMATS.get(suffix)
if format is None:
raise ValueError("Please specify a file format, e.g. 'nexus' or 'fasta'")
elif format == "nexus":
alignment = _read_alignment_nexus(filename)
elif format == "beast":
alignment = _read_alignment_beast(filename)
else:
alignment = AlignIO.read(filename, format)
# Convert to a single torch.Tensor.
num_taxa = min(len(alignment), max_taxa)
if num_taxa < len(alignment):
alignment = alignment[:num_taxa]
num_characters = min(len(alignment[0]), max_characters)
if num_characters < len(alignment[0]):
alignment = alignment[:, :num_characters]
logger.info(f"parsing {num_taxa} taxa x {num_characters} characters")
codebook = _get_codebook()
probs = torch.full((num_taxa, num_characters, 5), 1 / 5)
for i in range(num_taxa):
seq = alignment[i].seq
if not VALID_CODES.issuperset(seq):
raise ValueError(f"Invalid characters: {set(seq) - VALID_CODES}")
# Replace gaps at ends with missing.
beg, end = 0, probs.size(1)
if seq[0] in "-.N":
seq, old = seq.lstrip(seq[0]), seq
beg += len(old) - len(seq)
if seq[-1] in "-.N":
seq, old = seq.rstrip(seq[-1]), seq
end -= len(old) - len(seq)
probs[i, beg:end] = codebook[list(map(ord, seq))]
assert torch.isfinite(probs).all()
return probs
# See https://www.bioinformatics.org/sms/iupac.html
NUCLEOTIDE_CODES = {
# [ A, C, G, T, gap]
"?": [1 / 5, 1 / 5, 1 / 5, 1 / 5, 1 / 5], # missing
"n": [1 / 5, 1 / 5, 1 / 5, 1 / 5, 1 / 5], # missing
"A": [1 / 1, 0.0, 0.0, 0.0, 0.0], # adenine
"C": [0.0, 1 / 1, 0.0, 0.0, 0.0], # cytosine
"G": [0.0, 0.0, 1 / 1, 0.0, 0.0], # guanine
"T": [0.0, 0.0, 0.0, 1 / 1, 0.0], # thymine
"U": [0.0, 0.0, 0.0, 1 / 1, 0.0], # uracil
"R": [1 / 2, 0.0, 1 / 2, 0.0, 0.0],
"Y": [0.0, 1 / 2, 0.0, 1 / 2, 0.0],
"S": [0.0, 1 / 2, 1 / 2, 0.0, 0.0],
"W": [1 / 2, 0.0, 0.0, 1 / 2, 0.0],
"K": [0.0, 0.0, 1 / 2, 1 / 2, 0.0],
"M": [1 / 2, 1 / 2, 0.0, 0.0, 0.0],
"B": [0.0, 1 / 3, 1 / 3, 1 / 3, 0.0],
"D": [1 / 3, 0.0, 1 / 3, 1 / 3, 0.0],
"H": [1 / 3, 1 / 3, 0.0, 1 / 3, 0.0],
"V": [1 / 3, 1 / 3, 1 / 3, 0.0, 0.0],
"N": [1 / 4, 1 / 4, 1 / 4, 1 / 4, 0.0],
"-": [0.0, 0.0, 0.0, 0.0, 1 / 1], # gap
".": [0.0, 0.0, 0.0, 0.0, 1 / 1], # gap
}
VALID_CODES = set(NUCLEOTIDE_CODES)
AMBIGUOUS_CODES = {
frozenset("AG"): "R",
frozenset("CT"): "Y",
frozenset("CG"): "S",
frozenset("AT"): "W",
frozenset("GT"): "K",
frozenset("AC"): "M",
frozenset("CGT"): "B",
frozenset("AGT"): "D",
frozenset("ACT"): "H",
frozenset("ACG"): "V",
frozenset("ACGT"): "N",
}
assert len(AMBIGUOUS_CODES) == 6 + 4 + 1
| 31.044872 | 88 | 0.574024 |
674dfe34110c0256d54ed4a145016c108d5fa7fa | 1,439 | py | Python | core.py | mistifiedwarrior/house_price_prediction | c935650130ea6464f948706d057af6f044abbff6 | [
"MIT"
] | null | null | null | core.py | mistifiedwarrior/house_price_prediction | c935650130ea6464f948706d057af6f044abbff6 | [
"MIT"
] | null | null | null | core.py | mistifiedwarrior/house_price_prediction | c935650130ea6464f948706d057af6f044abbff6 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
| 31.977778 | 100 | 0.648367 |
674e48cd30f8211b37cb1b97721c2c716552aabd | 605 | py | Python | Python/bank-robbers.py | JaredLGillespie/CodinGame | 7e14078673300f66d56c8af4f66d9bf5d2229fa6 | [
"MIT"
] | 1 | 2020-01-05T17:44:57.000Z | 2020-01-05T17:44:57.000Z | Python/bank-robbers.py | JaredLGillespie/CodinGame | 7e14078673300f66d56c8af4f66d9bf5d2229fa6 | [
"MIT"
] | null | null | null | Python/bank-robbers.py | JaredLGillespie/CodinGame | 7e14078673300f66d56c8af4f66d9bf5d2229fa6 | [
"MIT"
] | 2 | 2020-09-27T16:02:53.000Z | 2021-11-24T09:08:59.000Z | # https://www.codingame.com/training/easy/bank-robbers
from heapq import *
solution()
| 20.862069 | 66 | 0.609917 |
674e497c1af4728fb031faf7f24fbf2bf5bd7b4b | 576 | py | Python | 14Django/day04/BookManager/introduction1.py | HaoZhang95/PythonAndMachineLearning | b897224b8a0e6a5734f408df8c24846a98c553bf | [
"MIT"
] | 937 | 2019-05-08T08:46:25.000Z | 2022-03-31T12:56:07.000Z | 14Django/day04/BookManager/introduction1.py | Sakura-gh/Python24 | b97e18867264a0647d5645c7d757a0040e755577 | [
"MIT"
] | 47 | 2019-09-17T10:06:02.000Z | 2022-03-11T23:46:52.000Z | 14Django/day04/BookManager/introduction1.py | Sakura-gh/Python24 | b97e18867264a0647d5645c7d757a0040e755577 | [
"MIT"
] | 354 | 2019-05-10T02:15:26.000Z | 2022-03-30T05:52:57.000Z | """
{{ }}
{% %}
{% |, Book.id | add: 1 <= 2 id+12
|: %} 2
{% if book.name|length > 4 %} |name.length
{{ book.pub_date|date:'Ymj' }}
"""
"""
CSRF
()
CSRF
"""
"""
session
session
""" | 24 | 85 | 0.670139 |
674eb289511fbd351f416105eb842fadb81a491d | 291 | py | Python | gammapy/maps/__init__.py | watsonjj/gammapy | 8d2498c8f63f73d1fbe4ba81ab02d9e72552df67 | [
"BSD-3-Clause"
] | null | null | null | gammapy/maps/__init__.py | watsonjj/gammapy | 8d2498c8f63f73d1fbe4ba81ab02d9e72552df67 | [
"BSD-3-Clause"
] | 1 | 2020-10-29T19:55:46.000Z | 2020-10-29T19:55:46.000Z | gammapy/maps/__init__.py | watsonjj/gammapy | 8d2498c8f63f73d1fbe4ba81ab02d9e72552df67 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Sky maps."""
from .base import *
from .geom import *
from .hpx import *
from .hpxnd import *
from .hpxsparse import *
from .hpxmap import *
from .wcs import *
from .wcsnd import *
from .wcsmap import *
from .sparse import *
| 22.384615 | 63 | 0.71134 |
674ebc40e603703da0b0ddbc5fe2fad3846b9a69 | 3,305 | py | Python | lhotse/dataset/sampling/utils.py | stachu86/lhotse | d5e78154db2d4d52f15aaadc8882f76eb5b77640 | [
"Apache-2.0"
] | 353 | 2020-10-31T10:38:51.000Z | 2022-03-30T05:22:52.000Z | lhotse/dataset/sampling/utils.py | stachu86/lhotse | d5e78154db2d4d52f15aaadc8882f76eb5b77640 | [
"Apache-2.0"
] | 353 | 2020-10-27T23:25:12.000Z | 2022-03-31T22:16:05.000Z | lhotse/dataset/sampling/utils.py | stachu86/lhotse | d5e78154db2d4d52f15aaadc8882f76eb5b77640 | [
"Apache-2.0"
] | 66 | 2020-11-01T06:08:08.000Z | 2022-03-29T02:03:07.000Z | import warnings
from typing import Dict, Tuple
from lhotse import CutSet
from lhotse.dataset.sampling.base import CutSampler
def find_pessimistic_batches(
sampler: CutSampler, batch_tuple_index: int = 0
) -> Tuple[Dict[str, CutSet], Dict[str, float]]:
"""
Function for finding 'pessimistic' batches, i.e. batches that have the highest potential
to blow up the GPU memory during training. We will fully iterate the sampler and record
the most risky batches under several criteria:
- single longest cut
- single longest supervision
- largest batch cuts duration
- largest batch supervisions duration
- max num cuts
- max num supervisions
.. note: It is up to the users to convert the sampled CutSets into actual batches and test them
by running forward and backward passes with their model.
Example of how this function can be used with a PyTorch model
and a :class:`~lhotse.dataset.K2SpeechRecognitionDataset`::
sampler = SingleCutSampler(cuts, max_duration=300)
dataset = K2SpeechRecognitionDataset()
batches, scores = find_pessimistic_batches(sampler)
for reason, cuts in batches.items():
try:
batch = dset[cuts]
outputs = model(batch)
loss = loss_fn(outputs)
loss.backward()
except:
print(f"Exception caught when evaluating pessimistic batch for: {reason}={scores[reason]}")
raise
:param sampler: An instance of a Lhotse :class:`.CutSampler`.
:param batch_tuple_index: Applicable to samplers that return tuples of :class:`~lhotse.cut.CutSet`.
Indicates which position in the tuple we should look up for the CutSet.
:return: A tuple of dicts: the first with batches (as CutSets) and the other with criteria values, i.e.:
``({"<criterion>": <CutSet>, ...}, {"<criterion>": <value>, ...})``
"""
criteria = {
"single_longest_cut": lambda cuts: max(c.duration for c in cuts),
"single_longest_supervision": lambda cuts: max(
sum(s.duration for s in c.supervisions) for c in cuts
),
"largest_batch_cuts_duration": lambda cuts: sum(c.duration for c in cuts),
"largest_batch_supervisions_duration": lambda cuts: sum(
s.duration for c in cuts for s in c.supervisions
),
"max_num_cuts": len,
"max_num_supervisions": lambda cuts: sum(
1 for c in cuts for _ in c.supervisions
),
}
try:
sampler = iter(sampler)
first_batch = next(sampler)
if isinstance(first_batch, tuple):
first_batch = first_batch[batch_tuple_index]
except StopIteration:
warnings.warn("Empty sampler encountered in find_pessimistic_batches()")
return {}, {}
top_batches = {k: first_batch for k in criteria}
top_values = {k: fn(first_batch) for k, fn in criteria.items()}
for batch in sampler:
if isinstance(batch, tuple):
batch = batch[batch_tuple_index]
for crit, fn in criteria.items():
val = fn(batch)
if val > top_values[crit]:
top_values[crit] = val
top_batches[crit] = batch
return top_batches, top_values
| 39.345238 | 108 | 0.644781 |
674f2806f73a13483671e5b0ce4735f88b2f1c4f | 606 | py | Python | book/migrations/0010_auto_20170603_1441.py | pyprism/Hiren-Mail-Notify | 324583a2edd25da5d2077914a79da291e00c743e | [
"MIT"
] | null | null | null | book/migrations/0010_auto_20170603_1441.py | pyprism/Hiren-Mail-Notify | 324583a2edd25da5d2077914a79da291e00c743e | [
"MIT"
] | 144 | 2015-10-18T17:19:03.000Z | 2021-06-27T07:05:56.000Z | book/migrations/0010_auto_20170603_1441.py | pyprism/Hiren-Mail-Notify | 324583a2edd25da5d2077914a79da291e00c743e | [
"MIT"
] | 1 | 2015-10-18T17:04:39.000Z | 2015-10-18T17:04:39.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-06-03 08:41
from __future__ import unicode_literals
from django.db import migrations, models
| 23.307692 | 64 | 0.587459 |
674faa0b694ce161c45416e214ad1d35c7eb77fc | 1,218 | py | Python | contrib/ComparisonStatistics/Test/test_1.py | xylar/cdat | 8a5080cb18febfde365efc96147e25f51494a2bf | [
"BSD-3-Clause"
] | 62 | 2018-03-30T15:46:56.000Z | 2021-12-08T23:30:24.000Z | contrib/ComparisonStatistics/Test/test_1.py | xylar/cdat | 8a5080cb18febfde365efc96147e25f51494a2bf | [
"BSD-3-Clause"
] | 114 | 2018-03-21T01:12:43.000Z | 2021-07-05T12:29:54.000Z | contrib/ComparisonStatistics/Test/test_1.py | CDAT/uvcdat | 5133560c0c049b5c93ee321ba0af494253b44f91 | [
"BSD-3-Clause"
] | 14 | 2018-06-06T02:42:47.000Z | 2021-11-26T03:27:00.000Z | #!/usr/bin/env python
import ComparisonStatistics
import cdutil
import os,sys
# Reference
ref = os.path.join(cdutil.__path__[0],'..','..','..','..','sample_data','tas_dnm-95a.xml')
Ref=cdutil.VariableConditioner(ref)
Ref.var='tas'
Ref.id='reference'
# Test
tst = os.path.join(cdutil.__path__[0],'..','..','..','..','sample_data','tas_ccsr-95a.xml')
Tst=cdutil.VariableConditioner(tst)
Tst.var='tas'
Tst.id='test'
# Final Grid
FG=cdutil.WeightedGridMaker()
FG.longitude.n=36
FG.longitude.first=0.
FG.longitude.delta=10.
FG.latitude.n=18
FG.latitude.first=-85.
FG.latitude.delta=10.
# Now the compall thing
c=ComparisonStatistics.ComparisonStatistics(Tst,Ref,weightedGridMaker=FG)
c.fracmin=.5
c.minyr=3
icall=19
# Let's force the indices to be the same
c.variableConditioner1.cdmsKeywords['time']=('1979','1982','co')
c.variableConditioner2.cdmsKeywords['time']=slice(0,36)
print "Before computing:"
print c.variableConditioner1
#print 'C printing:\n',c
## (test,tfr),(ref,reffrc)=c()
(test,tfr),(ref,reffrc) = c.compute()
print "Test:",test
# Retrieve the rank for th etime_domain 19 (monthly space time)
rank=c.rank(time_domain=19)
print 'Result for Rank:',rank
c.write('tmp.nc',comments='A simple example')
| 24.36 | 91 | 0.728243 |
674fc8fb47108fcde4353966aaff882285b50e79 | 1,087 | py | Python | mathipy/functions/linearithmic.py | BatiDyDx/maths-tools-python | e9a58aa669b5f36d7ee01402fe1f16a1db7b0e50 | [
"MIT"
] | 1 | 2021-02-02T02:58:38.000Z | 2021-02-02T02:58:38.000Z | mathipy/functions/linearithmic.py | BatiDyDx/maths-tools-python | e9a58aa669b5f36d7ee01402fe1f16a1db7b0e50 | [
"MIT"
] | null | null | null | mathipy/functions/linearithmic.py | BatiDyDx/maths-tools-python | e9a58aa669b5f36d7ee01402fe1f16a1db7b0e50 | [
"MIT"
] | null | null | null | import math
import numpy as np
from mathipy.math import calculus | 30.194444 | 92 | 0.554738 |
674feabbfb04fd43b656a2ee09e804a9db0cc338 | 11,479 | py | Python | pivot_based_eccv2018/misc/expander/disambiguate.py | gujiuxiang/unpaired_im2text_iccv19 | cf71b82b3d2616b0b1fb5c2dfd7f7832cd1e8ec2 | [
"MIT"
] | 18 | 2019-11-01T13:50:03.000Z | 2022-03-14T03:07:34.000Z | pivot_based_eccv2018/misc/expander/disambiguate.py | gujiuxiang/unpaired_im2text_iccv19 | cf71b82b3d2616b0b1fb5c2dfd7f7832cd1e8ec2 | [
"MIT"
] | 7 | 2020-01-03T13:53:26.000Z | 2021-03-25T22:55:52.000Z | pivot_based_eccv2018/misc/expander/disambiguate.py | gujiuxiang/unpaired_im2text_iccv19 | cf71b82b3d2616b0b1fb5c2dfd7f7832cd1e8ec2 | [
"MIT"
] | 3 | 2019-09-16T02:03:59.000Z | 2021-06-12T07:03:03.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module contains the necessary functions to load a text-corpus from
NLTK, contract all possible sentences, applying POS-tags to the
contracted sentences and compare that with the original text.
The information about which contraction+pos-tag pair gets expanded to
which full form will be saved in a dictionary for use in expander.py
"""
__author__ = "Yannick Couzini"
# standard library imports
import pprint
import yaml
# third-party library imports
import nltk
# local library imports
import utils
# increase the allowed ram size that the models can use
# nltk.internals.config_java(options='-xmx2G')
def _find_sub_list(sublist, full_list):
"""
Args:
- sublist is a list of words that are supposed to be found in
the full list.
- full list is a list of words that is supposed to be searched
in.
Returns:
- List of tuples with the form
(first_index_of_occurence, last_index_of_occurence)
This function finds all occurences of sublist in the full_list.
"""
# this is the output list
results = []
sublist_len = len(sublist)
# loop over all ind if the word in full_list[ind] matches the first
# word of the sublist
for ind in (i for i, word in enumerate(full_list)
if word == sublist[0]):
# check that the complete sublist is matched
if full_list[ind:ind+sublist_len] == sublist:
# then append this to the results
results.append((ind, ind+sublist_len-1))
return results
def _invert_contractions_dict():
"""
This is just a short function to return the inverted dictionary
of the contraction dictionary.
"""
with open("contractions.yaml", "r") as stream:
# load the dictionary containing all the contractions
contractions = yaml.load(stream)
# invert the dictionary for quicker finding of contractions
expansions = dict()
for key, value in contractions.items():
if len(value) == 1:
continue
for expansion in value:
if expansion in expansions:
print("WARNING: As an contraction to {}, {} is replaced with"
" {}.".format(expansion,
expansions[expansion],
key))
expansions[expansion] = key
return expansions
def write_dictionary(pos_model,
sent_lst,
add_tags=0,
use_ner=False,
ner_args=None):
"""
Args:
- pos_model is an instance of StanfordPOSTagger
- sent-lst a list of sentences which themselves are lists of the
single words.
- add_tags is the amount of pos tags used after the
relevant contraction, this can be used to further
disambiguate but (of course) spreads out the data.
- use_ner is boolean to decide whether to use
named-entity-recognition for a potential increase in
accuracy but with the obvious costs of performance.
- ner_args is a list with an object of StanfordNERTagger and
the tag to be used. This only needs to be
supplied if use_ner is true.
Returns:
- None, but writes a disambiguations.yaml file with disambiguations
for the ambiguous contractions in contractions.yaml.
Raises:
ValueError if use_ner is True but no ner_model is supplied.
Using the provided list of sentences, contract them and pos-tag them.
Using the pos-tags it is then possible to classify which
(contraction, pos-tag) combinations get expanded to which ambiguous
long form.
"""
# pylint: disable=too-many-locals
if use_ner and (ner_args is None):
raise ValueError("The use_ner flag is True but no NER"
" model has been supplied!")
expansions = _invert_contractions_dict()
output_dict = dict()
ambiguity_counter = 0
for tuple_rslt in _contract_sentences(expansions,
sent_lst,
use_ner=use_ner,
ner_args=ner_args):
# pos tag the sentence
if use_ner:
# first replace the NER tag with "it"
pos_sent = [word.replace(ner_args[1], "it") for word
in tuple_rslt[2]]
# tag the sentence
pos_sent = pos_model.tag(pos_sent)
# and replace it with the tag again
pos_sent = [(tuple_rslt[2][i], word_pos[1]) for i, word_pos
in enumerate(pos_sent)]
else:
pos_sent = pos_model.tag(tuple_rslt[2])
# extract the pos tags on the contracted part
contr_word_pos = pos_sent[tuple_rslt[0]:(tuple_rslt[0] +
len(tuple_rslt[1]))]
if add_tags == 0:
contr_pos = tuple(contr_word_pos)
else:
add_pos_list = pos_sent[len(tuple_rslt[1]):(len(tuple_rslt[1]) +
add_tags)]
add_pos = [pos_word[1] for pos_word in add_pos_list]
contr_pos = tuple(contr_word_pos + add_pos)
# write a dictionary entry connecting the (words, pos) of the
# contraction to the expanded part
word = ' '.join(tuple_rslt[1])
if contr_pos not in output_dict:
output_dict[contr_pos] = dict()
output_dict[contr_pos][word] = 1
# keep track of the progress
print("\n\n ---- \n\n")
pprint.pprint(output_dict)
print("Ambiguity counter is {}.".format(ambiguity_counter))
print("\n\n ---- \n\n")
elif word in output_dict[contr_pos].keys():
# check whether the entry is already there
output_dict[contr_pos][word] += 1
continue
else:
# if the combination of pos tags with words already occured
# once then a list has to be made. Ideally this case doesn't
# occur
ambiguity_counter += 1
output_dict[contr_pos][word] = 1
print("\n\n ---- \n\n")
print("AMBIGUITY ADDED!")
pprint.pprint(output_dict)
print("Ambiguity counter is {}.".format(ambiguity_counter))
print("\n\n ---- \n\n")
with open("disambiguations.yaml", "w") as stream:
yaml.dump(output_dict, stream)
if __name__ == '__main__':
# if you call this function directly just build the disambiguation
# dictionary.
# load a corpus that has the form of list of sentences which is
# split up into a list of words
SENT_LST = nltk.corpus.brown.sents()
SENT_LST += nltk.corpus.gutenberg.sents()
SENT_LST += nltk.corpus.reuters.sents()
SENT_LST += nltk.corpus.inaugural.sents()
POS_MODEL = utils.load_stanford('pos')
NER_MODEL = utils.load_stanford('ner')
write_dictionary(POS_MODEL,
SENT_LST,
add_tags=1,
use_ner=False,
ner_args=[NER_MODEL, "<NE>"])
| 40.850534 | 77 | 0.586811 |
675069879b1d492d1df7599b3ec43ea76978d06f | 1,881 | py | Python | setup.py | baye0630/paperai | 717f6c5a6652d6bc1bdb70d4a248a4751f820ddb | [
"Apache-2.0"
] | null | null | null | setup.py | baye0630/paperai | 717f6c5a6652d6bc1bdb70d4a248a4751f820ddb | [
"Apache-2.0"
] | null | null | null | setup.py | baye0630/paperai | 717f6c5a6652d6bc1bdb70d4a248a4751f820ddb | [
"Apache-2.0"
] | null | null | null | # pylint: disable = C0111
from setuptools import find_packages, setup
setup(name="paperai",
# version="1.5.0",
# author="NeuML",
# description="AI-powered literature discovery and review engine for medical/scientific papers",
# long_description=DESCRIPTION,
# long_description_content_type="text/markdown",
# url="https://github.com/neuml/paperai",
# project_urls={
# "Documentation": "https://github.com/neuml/paperai",
# "Issue Tracker": "https://github.com/neuml/paperai/issues",
# "Source Code": "https://github.com/neuml/paperai",
# },
# C:\Users\sxm\Desktop\paperai
# project_urls={
# "Documentation": "C:\\Users\\sxm\\Desktop\\paperai",
# "Source Code": "C:\\Users\\sxm\\Desktop\\paperai",
#},
license="Apache 2.0: C:\\Users\\sxm\\Desktop\\paperai\\LICENSE",
packages=find_packages(where="C:\\Users\\sxm\\Desktop\\paperai\\src\\python"),
package_dir={"": "src\\python"},
keywords="search embedding machine-learning nlp covid-19 medical scientific papers",
python_requires=">=3.6",
entry_points={
"console_scripts": [
"paperai = paperai.shell:main",
],
},
install_requires=[
"html2text>=2020.1.16",
# "mdv>=1.7.4",
"networkx>=2.4",
"PyYAML>=5.3",
"regex>=2020.5.14",
"txtai>=1.4.0",
"txtmarker>=1.0.0"
],
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development",
"Topic :: Text Processing :: Indexing",
"Topic :: Utilities"
]) | 36.882353 | 102 | 0.569378 |
6751ed6431d090ba5f0d7abc986bd5b1a678af78 | 3,295 | py | Python | hit_analysis/image/cut_reconstruction.py | credo-science/credo-classify | 1cc5e00a4df36c4069c0d0fbc19f579780b79ca5 | [
"MIT"
] | null | null | null | hit_analysis/image/cut_reconstruction.py | credo-science/credo-classify | 1cc5e00a4df36c4069c0d0fbc19f579780b79ca5 | [
"MIT"
] | 8 | 2021-03-30T12:52:01.000Z | 2022-03-12T00:19:45.000Z | hit_analysis/image/cut_reconstruction.py | credo-science/credo-classify | 1cc5e00a4df36c4069c0d0fbc19f579780b79ca5 | [
"MIT"
] | 1 | 2020-06-12T13:29:34.000Z | 2020-06-12T13:29:34.000Z | from io import BytesIO
from typing import List, Dict
from PIL import Image
from hit_analysis.commons.config import Config
from hit_analysis.commons.consts import IMAGE, CROP_X, CROP_Y, CROP_SIZE, FRAME_DECODED, CLASSIFIED, CLASS_ARTIFACT, ORIG_IMAGE
def do_reconstruct(detections: List[dict], config: Config) -> None:
"""
Reconstruction the fill by black cropped frame in CREDO Detector app v2.
The detection[x]['frame_decoded'] will be replaced by new value, old value will be stored in detection[x]['frame_decoded_orig'].
No any changes when count of detections is less or equal 1
:param detections: should be sorted by detection_id
:param config: config object
"""
if len(detections) <= 1:
return
sp = [str(detections[0].get('device_id')), str(detections[0].get('timestamp'))]
image = Image.new('RGBA', (detections[0].get('width'), detections[0].get('height')), (0, 0, 0))
edge = 'no_edge'
for d in detections:
if d.get('edge'):
edge = 'edge'
for d in reversed(detections):
append_to_frame(image, d)
config.store_png(['recostruct', edge, *sp, 'orig'], d.get('id'), d.get(IMAGE))
for d in detections:
replace_from_frame(image, d)
config.store_png(['recostruct', edge, *sp], d.get('id'), d.get(IMAGE))
if config.out_dir:
image.save('%s/recostruct/%s/%s/frame.png' % (config.out_dir, edge, "/".join(sp)))
def check_all_artifacts(detections: List[dict]) -> bool:
"""
Check if all detections is just classified as artifacts
:param detections: list of detections to check
:return: True - all detections is artifacts
"""
for d in detections:
if d.get(CLASSIFIED) != CLASS_ARTIFACT:
return False
return True
def filter_unclassified(by_timestamp: Dict[int, List[dict]]) -> List[int]:
"""
Filter detections with one or more unclassified as artifact.
:param by_timestamp: detections grouped by timestamp
:return: list of filtered timestamp keys
"""
ret = []
for timestamp, detections in by_timestamp.items():
if not check_all_artifacts(detections):
ret.append(timestamp)
return ret
| 35.815217 | 132 | 0.643703 |
67525ed3e9b1efee9050769baa49e34f54d058e4 | 7,215 | py | Python | tests/st/fallback/control_flow/test_fallback_010_if_in_if.py | httpsgithu/mindspore | c29d6bb764e233b427319cb89ba79e420f1e2c64 | [
"Apache-2.0"
] | 1 | 2022-02-23T09:13:43.000Z | 2022-02-23T09:13:43.000Z | tests/st/fallback/control_flow/test_fallback_010_if_in_if.py | 949144093/mindspore | c29d6bb764e233b427319cb89ba79e420f1e2c64 | [
"Apache-2.0"
] | null | null | null | tests/st/fallback/control_flow/test_fallback_010_if_in_if.py | 949144093/mindspore | c29d6bb764e233b427319cb89ba79e420f1e2c64 | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test graph fallback control flow if in if scenario"""
import pytest
import numpy as np
from mindspore import Tensor, ms_function, context
context.set_context(mode=context.GRAPH_MODE)
| 27.43346 | 78 | 0.595981 |
6756dc638ee04975afad0eae2f92936de0c1062f | 42,937 | py | Python | EPOpt/SpectrumAnalysis.py | ruixueqingyang/GPOEO | 8fe65ac3e0ae4d097fdd0d58878aa2cf3201a18c | [
"MIT"
] | 5 | 2021-09-01T18:04:18.000Z | 2022-02-25T04:48:21.000Z | EPOpt/SpectrumAnalysis.py | ruixueqingyang/GPOEO | 8fe65ac3e0ae4d097fdd0d58878aa2cf3201a18c | [
"MIT"
] | null | null | null | EPOpt/SpectrumAnalysis.py | ruixueqingyang/GPOEO | 8fe65ac3e0ae4d097fdd0d58878aa2cf3201a18c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from scipy.fftpack import fft, fftshift, ifft
from scipy.fftpack import fftfreq
from scipy.signal import find_peaks
from scipy import signal
import numpy as np
import matplotlib.pyplot as plt
import pickle
import warnings
import sys
from scipy.signal.filter_design import maxflat
warnings.filterwarnings("ignore")
MAPEMax = 1e4
MAPEStdMax = 1e4
FigCount = 0
TLowBoundBase = 0.4
TLowBound = TLowBoundBase
TRound = 6
SetTLowBound = False
GrpFigCount = 0
#
# wfr 20210107
# wfr 20210130 N , N
# PctRange , PctRange N
# wfr 20210126 , ,
# T MAPE(Mean Absolute Percentage Error)
# arraySample
# arrayTimeStamp
# T
# , ,
# wfr 20210108
# wfr 20210116
# wfr 20201230
| 42.851297 | 153 | 0.613038 |
6757319350181b82afbdb20fa5b589436eb598b6 | 3,623 | py | Python | slippy/core/tests/test_materials.py | KDriesen/slippy | 816723fe6ab9f5ed26b14b4fe0f66423649b85e6 | [
"MIT"
] | 12 | 2020-12-06T15:30:06.000Z | 2021-12-14T06:37:15.000Z | slippy/core/tests/test_materials.py | KDriesen/slippy | 816723fe6ab9f5ed26b14b4fe0f66423649b85e6 | [
"MIT"
] | null | null | null | slippy/core/tests/test_materials.py | KDriesen/slippy | 816723fe6ab9f5ed26b14b4fe0f66423649b85e6 | [
"MIT"
] | 5 | 2021-03-18T05:53:11.000Z | 2022-02-16T15:18:43.000Z | import numpy as np
import numpy.testing as npt
import slippy
import slippy.core as core
"""
If you add a material you need to add the properties that it will be tested with to the material_parameters dict,
the key should be the name of the class (what ever it is declared as after the class key word).
The value should be a tuple of dicts:
The first dict in the tuple will be unpacked to instantiate the class,
The second will be used with the displacement from loads method
The third will be used with the loads from displacement method to ensure that the methods are inverses of each other
If there is a limit the applicability of the displacements from loads method (such as for a perfectly plastic material
the _max_load key word should be set in the second dict.
For more complex behaviour please also implement your own tests
"""
material_parameters = {
'Elastic': ({'name': 'steel_5', 'properties': {'E': 200e9, 'v': 0.3}},
{'grid_spacing': 0.01, 'simple': True},
{'grid_spacing': 0.01, 'simple': True, 'tol': 1e-9}),
'Rigid': ({}, {}, {})
}
exceptions = [core.Rigid]
| 43.650602 | 118 | 0.666299 |
675790d51afdb63e5ecaf1442d2db56ff733f532 | 2,602 | py | Python | python/dash_tools/restore_from_bup.py | Dash-Industry-Forum/media-tools | 66be01ce09c8998d47d05729e0721857b2517017 | [
"BSD-3-Clause"
] | 60 | 2017-01-02T07:44:17.000Z | 2022-03-29T07:39:53.000Z | media-tools/python/dash_tools/restore_from_bup.py | roolrz/ABR-Alg-Implementation | 02ba8fbc804eeabeae1dcd51d359c6b0a2dc7566 | [
"MIT"
] | 4 | 2018-03-23T07:56:21.000Z | 2021-11-22T06:45:12.000Z | media-tools/python/dash_tools/restore_from_bup.py | roolrz/ABR-Alg-Implementation | 02ba8fbc804eeabeae1dcd51d359c6b0a2dc7566 | [
"MIT"
] | 36 | 2016-08-04T14:28:30.000Z | 2022-03-20T09:41:17.000Z | #!/usr/bin/env python
"""Restore files with ending BACKUP_ENDING to original files."""
# The copyright in this software is being made available under the BSD License,
# included below. This software may be subject to other third party and contributor
# rights, including patent rights, and no such rights are granted under this license.
#
# Copyright (c) 2016, Dash Industry Forum.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
# * Neither the name of Dash Industry Forum nor the names of its
# contributors may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS AS IS AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import sys
from backup_handler import BACKUP_ENDING
def main():
"Command-line function."
from optparse import OptionParser
parser = OptionParser()
#pylint: disable=unused-variable
(options, args) = parser.parse_args()
if len(args) < 1:
parser.error("Wrong number of arguments")
sys.exit(1)
for file_name in args:
if file_name.endswith(BACKUP_ENDING):
old_name = file_name[:-len(BACKUP_ENDING)]
print("moving %s to %s" % (file_name, old_name))
if os.path.exists(old_name):
os.unlink(old_name)
os.rename(file_name, old_name)
continue
if __name__ == "__main__":
main()
| 41.967742 | 85 | 0.737894 |
67588c7659b325ae0aa6ae1b1ce63ec6f84fa51d | 4,851 | py | Python | src/opendr/simulation/human_model_generation/utilities/joint_extractor.py | makistsantekidis/opendr | 07dee3b59d3487b9c5a93d6946317178a02c9890 | [
"Apache-2.0"
] | 3 | 2021-06-24T01:54:25.000Z | 2021-12-12T16:21:24.000Z | src/opendr/simulation/human_model_generation/utilities/joint_extractor.py | makistsantekidis/opendr | 07dee3b59d3487b9c5a93d6946317178a02c9890 | [
"Apache-2.0"
] | 79 | 2021-06-23T10:40:10.000Z | 2021-12-16T07:59:42.000Z | src/opendr/simulation/human_model_generation/utilities/joint_extractor.py | makistsantekidis/opendr | 07dee3b59d3487b9c5a93d6946317178a02c9890 | [
"Apache-2.0"
] | 5 | 2021-07-04T07:38:50.000Z | 2021-12-12T16:18:47.000Z | # Copyright 2020-2021 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pyglet
import numpy as np
import sklearn.preprocessing
| 46.644231 | 119 | 0.520305 |
6758d510a825ee1d3b5115d43a4e119fa4dab901 | 956 | py | Python | bluebottle/donations/migrations/0009_auto_20190130_1140.py | jayvdb/bluebottle | 305fea238e6aa831598a8b227223a1a2f34c4fcc | [
"BSD-3-Clause"
] | null | null | null | bluebottle/donations/migrations/0009_auto_20190130_1140.py | jayvdb/bluebottle | 305fea238e6aa831598a8b227223a1a2f34c4fcc | [
"BSD-3-Clause"
] | null | null | null | bluebottle/donations/migrations/0009_auto_20190130_1140.py | jayvdb/bluebottle | 305fea238e6aa831598a8b227223a1a2f34c4fcc | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-01-30 10:40
from __future__ import unicode_literals
import bluebottle.utils.fields
from decimal import Decimal
from django.db import migrations, models
import django.db.models.deletion
import djmoney.models.fields
| 31.866667 | 179 | 0.66318 |
675926d38ebca3605bde9778baaa7d1ff647176f | 95 | py | Python | pickle_storage/tests/__init__.py | PyUnchained/pickle_storage | c0a978701ae59a9feeb3e14026ff0b2353b2e7f5 | [
"MIT"
] | null | null | null | pickle_storage/tests/__init__.py | PyUnchained/pickle_storage | c0a978701ae59a9feeb3e14026ff0b2353b2e7f5 | [
"MIT"
] | null | null | null | pickle_storage/tests/__init__.py | PyUnchained/pickle_storage | c0a978701ae59a9feeb3e14026ff0b2353b2e7f5 | [
"MIT"
] | null | null | null | # import os
# os.environ.setdefault('PICKLE_STORAGE_SETTINGS', 'pickle_storage.tests.settings') | 47.5 | 83 | 0.810526 |
6759d2fab349039ee4a85d50f2f8ff9d4646da91 | 6,592 | py | Python | src/config.py | NicolasSommer/valuenet | 1ce7e56956b378a8f281e9f9919e6aa98516a9d9 | [
"Apache-2.0"
] | null | null | null | src/config.py | NicolasSommer/valuenet | 1ce7e56956b378a8f281e9f9919e6aa98516a9d9 | [
"Apache-2.0"
] | null | null | null | src/config.py | NicolasSommer/valuenet | 1ce7e56956b378a8f281e9f9919e6aa98516a9d9 | [
"Apache-2.0"
] | null | null | null | import argparse
import json
import os
| 40.944099 | 104 | 0.715564 |
675abb614add4be960125080b494d7201adec0de | 2,352 | py | Python | aqg/utils/summarizer.py | Sicaida/Automatic_Question_Generation | a228c166d40103a194e1daa23ff37f73c9488a5d | [
"MIT"
] | 134 | 2018-04-04T19:06:09.000Z | 2022-02-24T03:24:36.000Z | aqg/utils/summarizer.py | Sicaida/Automatic_Question_Generation | a228c166d40103a194e1daa23ff37f73c9488a5d | [
"MIT"
] | 22 | 2018-09-20T07:17:11.000Z | 2022-03-11T23:45:15.000Z | aqg/utils/summarizer.py | sagarparikh2013/Automatic-Question-Generation-NLP | 6a2cf5d90e47980676f57c67f2ed73be6f8d7fed | [
"MIT"
] | 50 | 2018-07-09T16:29:15.000Z | 2021-12-20T11:37:33.000Z | from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
from sumy.parsers.html import HtmlParser
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
#from sumy.summarizers.lsa import LsaSummarizer as Summarizer
from sumy.summarizers.lex_rank import LexRankSummarizer as Summarizer
from sumy.nlp.stemmers import Stemmer
from sumy.utils import get_stop_words
# t = TextSummarizer()
# t.summarize_from_file("obama_short.txt")
# pdf = pdfgeneration()
# pdf.generate_pdf_summarizer("summarizer_output2.txt")
| 31.783784 | 79 | 0.676446 |
675aeab4c1e2b9cf3c2dce4e2188f947ea6ee089 | 50 | py | Python | tests/__init__.py | AdamRuddGH/super_json_normalize | 4a3c77d0e0dce632678ffe40b37fbd98fd2b4be8 | [
"MIT"
] | 2 | 2021-10-03T02:43:41.000Z | 2021-10-04T10:15:20.000Z | tests/__init__.py | AdamRuddGH/super_json_normalize | 4a3c77d0e0dce632678ffe40b37fbd98fd2b4be8 | [
"MIT"
] | null | null | null | tests/__init__.py | AdamRuddGH/super_json_normalize | 4a3c77d0e0dce632678ffe40b37fbd98fd2b4be8 | [
"MIT"
] | null | null | null | """Unit test package for super_json_normalize."""
| 25 | 49 | 0.76 |
675c672977a46ec740f1a913b376d847b4aabb59 | 4,212 | py | Python | examples/turbulent_condensate/run.py | chrisjbillington/parpde | 4f882cbbb9ad6c57814e4422e9ba063fa27886a0 | [
"BSD-2-Clause"
] | null | null | null | examples/turbulent_condensate/run.py | chrisjbillington/parpde | 4f882cbbb9ad6c57814e4422e9ba063fa27886a0 | [
"BSD-2-Clause"
] | null | null | null | examples/turbulent_condensate/run.py | chrisjbillington/parpde | 4f882cbbb9ad6c57814e4422e9ba063fa27886a0 | [
"BSD-2-Clause"
] | null | null | null | # An example of a turbulent BEC in a harmonic trap. The groundstate is found
# and then some vortices randomly printed about with a phase printing. Some
# evolution in imaginary time is then performed to smooth things out before
# evolving the BEC in time.
# Run with 'mpirun -n <N CPUs> python run_example.py'
from __future__ import division, print_function
import sys
# sys.path.insert(0, '../..') # The location of the modules we need to import
import numpy as np
from parPDE import Simulator2D, LAPLACIAN
from parPDE.BEC2D import BEC2D
def get_number_and_trap(rhomax, R):
"""Gives the 2D normalisation constant and trap frequency required for the
specified maximum density and radius of a single-component condensate in
the Thomas-Fermi approximation"""
N = pi * rhomax * R**2 / 2
omega = np.sqrt(2 * g * rhomax / (m * R**2))
return N, omega
# Constants:
pi = np.pi
hbar = 1.054571726e-34 # Reduced Planck's constant
a_0 = 5.29177209e-11 # Bohr radius
u = 1.660539e-27 # unified atomic mass unit
m = 86.909180*u # 87Rb atomic mass
a = 98.98*a_0 # 87Rb |2,2> scattering length
g = 4*pi*hbar**2*a/m # 87Rb self interaction constant
rhomax = 2.5e14 * 1e6 # Desired peak condensate density
R = 7.5e-6 # Desired condensate radius
mu = g * rhomax # Approximate chemical potential for desired max density
# (assuming all population is in in mF=+1 or mF=-1)
N_2D, omega = get_number_and_trap(rhomax, R) # 2D normalisation constant and trap frequency
# required for specified radius and peak density
# Space:
nx_global = ny_global = 256
x_max_global = y_max_global = 10e-6
simulator = Simulator2D(-x_max_global, x_max_global, -y_max_global, y_max_global, nx_global, ny_global,
periodic_x=True, periodic_y=True, operator_order=6)
bec2d = BEC2D(simulator, natural_units=False, use_ffts=True)
x = simulator.x
y = simulator.y
dx = simulator.dx
dy = simulator.dy
r2 = x**2.0 + y**2.0
r = np.sqrt(r2)
# A Harmonic trap:
V = 0.5 * m * omega**2 * R**2.0 * (r/R)**2
dispersion_timescale = dx**2 * m / (pi * hbar)
chemical_potential_timescale = 2*pi*hbar/mu
potential_timescale = 2*pi*hbar/V.max()
K = -hbar**2/(2*m)*LAPLACIAN
def H(t, psi):
"""The Hamiltonian for single-component wavefunction psi. Returns the
kinetic term as an OperatorSum instance, and the local terms separately."""
H_local_lin = V
H_local_nonlin = g * abs(psi)**2
return K, H_local_lin, H_local_nonlin
if __name__ == '__main__':
# The initial Thomas-Fermi guess:
psi = rhomax * (1 - (x**2 + y**2) / R**2)
psi[psi < 0] = 0
psi = np.sqrt(psi)
# Find the groundstate:
psi = bec2d.find_groundstate(H, mu, psi, relaxation_parameter=1.7, convergence=1e-13,
output_interval=100, output_directory='groundstate', convergence_check_interval=10)
# psi is real so far, convert it to complex:
psi = np.array(psi, dtype=complex)
# Print some vortices, seeding the pseudorandom number generator so that
# MPI processes all agree on where the vortices are:
np.random.seed(42)
for i in range(30):
sign = np.sign(np.random.normal())
x_vortex = np.random.normal(0, scale=R)
y_vortex = np.random.normal(0, scale=R)
psi[:] *= np.exp(sign * 1j*np.arctan2(x - y_vortex, y - x_vortex))
# Smooth it a bit in imaginary time:
psi = bec2d.evolve(dt=dispersion_timescale/2, t_final=chemical_potential_timescale,
H=H, psi=psi, mu=mu, method='rk4', imaginary_time=True,
output_interval=100, output_directory='smoothing')
# And evolve it in time for 10ms:
psi = bec2d.evolve(dt=dispersion_timescale/2, t_final=10e-3,
H=H, psi=psi, mu=mu, method='rk4', imaginary_time=False,
output_interval=100, output_directory='evolution')
| 39.364486 | 116 | 0.626068 |
675c80c6427e7f597a41119f9db761e49256c6ca | 3,918 | py | Python | src/Test_Sfepy_NavierStokes.py | somu15/Small_Pf_code | 35f3d28faab2aa80f2332499f5e7ab19b040eabe | [
"MIT"
] | null | null | null | src/Test_Sfepy_NavierStokes.py | somu15/Small_Pf_code | 35f3d28faab2aa80f2332499f5e7ab19b040eabe | [
"MIT"
] | null | null | null | src/Test_Sfepy_NavierStokes.py | somu15/Small_Pf_code | 35f3d28faab2aa80f2332499f5e7ab19b040eabe | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 28 09:33:53 2020
@author: dhulls
"""
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct, Struct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.postprocess.probes_vtk import ProbeFromFile, Probe
import numpy as np
helps = {
'show' : 'show the results figure',
}
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
mesh = Mesh.from_file(data_dir + '/meshes/3d/fluid_mesh.inp')
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field_1 = Field.from_args(name='3_velocity', dtype=nm.float64, shape=3, region=omega, approx_order=1)
field_2 = Field.from_args(name='pressure', dtype=nm.float64, shape=1, region=omega, approx_order=1)
region_0 = domain.create_region(name='Walls1', select='vertices in (y < -0.049)', kind='facet')
region_1 = domain.create_region(name='Walls2', select='vertices in (y > 0.049)', kind='facet')
region_2 = domain.create_region(name='Inlet', select='vertices in (x < -0.499)', kind='facet')
region_3 = domain.create_region(name='Outlet', select='vertices in (x > -0.499)', kind='facet')
ebc_1 = EssentialBC(name='Walls1', region=region_0, dofs={'u.[0,1,2]' : 0.0})
ebc_2 = EssentialBC(name='Walls2', region=region_1, dofs={'u.[0,1,2]' : 0.0})
ebc_3 = EssentialBC(name='Inlet', region=region_2, dofs={'u.0' : 1.0, 'u.[1,2]' : 0.0})
ebc_4 = EssentialBC(name='Outlet', region=region_3, dofs={'p':0.0, 'u.[1,2]' : 0.0})
viscosity = Material(name='viscosity', value=1.25e-3)
variable_1 = FieldVariable('u', 'unknown', field_1)
variable_2 = FieldVariable(name='v', kind='test', field=field_1, primary_var_name='u')
variable_3 = FieldVariable(name='p', kind='unknown', field=field_2)
variable_4 = FieldVariable(name='q', kind='test', field=field_2, primary_var_name='p')
integral_1 = Integral('i1', order=2)
integral_2 = Integral('i2', order=3)
t1 = Term.new(name='dw_div_grad(viscosity.value, v, u)',
integral=integral_2, region=omega, viscosity=viscosity, v=variable_2, u=variable_1)
t2 = Term.new(name='dw_convect(v, u)',
integral=integral_2, region=omega, v=variable_2, u=variable_1)
t3 = Term.new(name='dw_stokes(v, p)',
integral=integral_1, region=omega, v=variable_2, p=variable_3)
t4 = Term.new(name='dw_stokes(u, q)',
integral=integral_1, region=omega, u=variable_1, q=variable_4)
eq1 = Equation('balance', t1+t2-t3)
eq2 = Equation('incompressibility', t4)
eqs = Equations([eq1,eq2])
ls = ScipyDirect({})
nls_status = IndexedStruct()
nls = Newton({'i_max' : 20, 'eps_a' : 1e-8, 'eps_r' : 1.0, 'macheps' : 1e-16, 'lin_red' : 1e-2, 'ls_red' : 0.1, 'ls_red_warp' : 0.001, 'ls_on' : 0.99999, 'ls_min' : 1e-5, 'check' : 0, 'delta' : 1e-6}, lin_solver=ls, status=nls_status)
pb = Problem('Navier-Stokes', equations=eqs)
pb.set_bcs(ebcs=Conditions([ebc_1, ebc_2, ebc_3]))
pb.set_solver(nls)
status = IndexedStruct()
state = pb.solve(status=status, save_results=True)
out = state.create_output_dict()
pb.save_state('Navier_Stokes.vtk', out=out)
view = Viewer('Navier_Stokes.vtk')
view(rel_scaling=2,
is_scalar_bar=True, is_wireframe=True) | 41.242105 | 234 | 0.70342 |
675d0fb6b3b1a21973d25abe79cafce5b94844f8 | 4,063 | py | Python | nd_customization/api/lab_test.py | libermatic/nd_customization | 4ee14c661651b09ef16aaf64952ceedc67bb602d | [
"MIT"
] | null | null | null | nd_customization/api/lab_test.py | libermatic/nd_customization | 4ee14c661651b09ef16aaf64952ceedc67bb602d | [
"MIT"
] | 10 | 2018-11-12T21:53:56.000Z | 2019-04-27T06:24:13.000Z | nd_customization/api/lab_test.py | libermatic/nd_customization | 4ee14c661651b09ef16aaf64952ceedc67bb602d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import frappe
import json
from frappe.utils import now, cint
from functools import partial
from toolz import compose
_get_subsections = compose(
partial(map, lambda x: x.get("test_event") or x.get("particulars")),
partial(filter, lambda x: cint(x.is_subsection) == 1),
)
| 36.603604 | 87 | 0.628846 |
675e10e80e4d185d1bb67fc4f8ca4f7d8148f472 | 2,283 | py | Python | build-flask-app.py | Abdur-rahmaanJ/build-flask-app | 476d1f0e0c505a60acadde13397b2787f49bd7dc | [
"MIT"
] | 1 | 2020-02-24T04:09:25.000Z | 2020-02-24T04:09:25.000Z | build-flask-app.py | Abdur-rahmaanJ/build-flask-app | 476d1f0e0c505a60acadde13397b2787f49bd7dc | [
"MIT"
] | null | null | null | build-flask-app.py | Abdur-rahmaanJ/build-flask-app | 476d1f0e0c505a60acadde13397b2787f49bd7dc | [
"MIT"
] | 1 | 2020-07-15T05:03:18.000Z | 2020-07-15T05:03:18.000Z | #!/usr/bin/env python3
from scripts.workflow import get_app_name, is_name_valid
from scripts.workflow import get_args, is_args_valid
from scripts.workflow import create_dir, create_app, create_templates_folder, create_static_folder, create_dockerfile
from scripts.manual import print_manual
from scripts.messages import empty_name, success_msg, failure_msg
import sys
app_name = get_app_name()
args = get_args()
args.remove(app_name)
# validate name of app!!
if (is_name_valid(app_name)):
# validate all arguments first!!
if(is_args_valid(args)):
# Create folder named app_name
create_dir(app_name)
# Arguments
debugger_mode = False
import_css_js = False
use_docker = False
if '-d' in args or '--debugger' in args:
debugger_mode = True
print("- Debugger mode on")
print(" |__ added debug=True")
else:
print("- Debugger mode off")
if '-cj' in args or '--css-js' in args:
import_css_js = True
create_static_folder(app_name)
print("- Css and Js mode on")
print(" |__ import static/stylesheet/style.css")
print(" |__ import static/js/app.css")
else:
print("- Css and Js mode off")
if '-dc' in args or '--docker-container' in args:
use_docker = True
print("- Docker mode on")
print(' |__ cd %s' % app_name)
print(' |__ \"docker-compose up -d\" to start app')
else:
print("- Docker mode off")
# create templates folder to hold index.html
create_templates_folder(app_name, import_css_js)
# create app.py in root directory(app_name)
create_app(app_name, debugger_mode)
# move application to docker container;
if (use_docker):
# generate Dockerfile
create_dockerfile(app_name)
success_msg(app_name)
else:
print('Unknown argument detected! Please check the help section\n')
print_manual()
failure_msg(app_name)
else:
if (app_name == '-h' or app_name == '--help'):
print_manual()
else:
print('Please choose another app name')
failure_msg(app_name) | 31.708333 | 117 | 0.616732 |
675e7374895c08103fdfc9d9f90f2f45da303fe7 | 2,960 | py | Python | stacker/assembler.py | unrahul/stacker | f94e9e6ad9351fd8fa94bef4ae0c4ed0afc8305d | [
"Apache-2.0"
] | null | null | null | stacker/assembler.py | unrahul/stacker | f94e9e6ad9351fd8fa94bef4ae0c4ed0afc8305d | [
"Apache-2.0"
] | 11 | 2020-01-23T16:45:07.000Z | 2020-02-08T16:53:22.000Z | stacker/assembler.py | unrahul/stacker | f94e9e6ad9351fd8fa94bef4ae0c4ed0afc8305d | [
"Apache-2.0"
] | 2 | 2020-01-29T18:18:20.000Z | 2020-01-29T19:55:25.000Z | import os
from pathlib import Path
from jinja2 import Template
import parser
from utils import write_to_file
from utils import mkdir_p
parser.init()
# parse and assign to vars
spec = parser.spec
def _concat(slice: str) -> str:
"""helper to concatenate each template slice."""
return "{}\n".format(slice)
def slices_filename_content_hash() -> dict:
"""create a dict of filename: content for slices"""
docker_slices = {}
path = Path.cwd().joinpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "slices")
)
for file in path.iterdir():
docker_slices[file.name] = file.read_text()
return docker_slices
def concat_slices(component: str = "tensorflow", flavor: str = "mkl") -> str:
"""concatenate templates based on the what user want"""
docker_slices = slices_filename_content_hash()
names = ["os.dockerfile"]
dockerfile = ""
if component == "tensorflow" and flavor == "mkl":
names.append("tensorflow.dockerfile")
names.append("horovod.dockerfile")
if component == "pytorch" and flavor == "mkl":
names.append("pytorch.dockerfile")
names.append("horovod.dockerfile")
for name in names:
dockerfile += _concat(docker_slices[name])
return "".join(dockerfile)
def generate_dockerfile(os: str, framework: str, file_name: str = "Dockerfile"):
"""generate and write to dir dockerfiles per `os` and `framework`"""
dlrs = spec["stack"]["dlrs"]
os_version = dlrs[os]["version"]
pkgs = dlrs[os]["os_pkgs"]
tf_version = dlrs[os]["tensorflow"]["mkl"]["version"]
hvd_version = dlrs[os]["horovod"]["version"]
torch_version = dlrs[os]["pytorch"]["mkl"]["version"]
pkg_installer = "apt-get install -y" if os == "ubuntu" else "swupd bundle-add"
kwargs = {
"os": "{}:{}".format(os, os_version),
"pkg_install": "{} {}".format(pkg_installer, " ".join(pkgs)),
"tf_version": tf_version,
"hvd_version": hvd_version,
"torch_version": torch_version,
}
dockerfile_template = concat_slices(framework)
dockerfile = insert_template_values(dockerfile_template, kwargs)
write_to_file(file_name, dockerfile)
def generate_all_dockerfiles(generate: bool = True, build: bool = False) -> None:
"""generate all dockerfiles for all frameworks and OSes"""
if generate:
base_dir = "./dockerfiles"
for framework in ["pytorch", "tensorflow"]:
for _os in ["ubuntu", "clearlinux"]:
save_to_dir = mkdir_p(os.path.join(base_dir, _os, framework))
save_to_file = os.path.join(save_to_dir, "Dockerfile")
generate_dockerfile(_os, framework, save_to_file)
if build:
# TOOD(unrahul) build the dockerfiles
pass
| 33.636364 | 82 | 0.65777 |
675e9236debc2ccf610756e1ddfa6942ba31102c | 621 | py | Python | akshare/fx/cons.py | PKUuu/akshare | 03967312b6c8afdec32e081fb23ae5916b674936 | [
"MIT"
] | 1 | 2020-05-14T13:20:48.000Z | 2020-05-14T13:20:48.000Z | akshare/fx/cons.py | 13767849/akshare | 5b7e4daaa80b1ccaf3f5a980a1205848e2e8570d | [
"MIT"
] | null | null | null | akshare/fx/cons.py | 13767849/akshare | 5b7e4daaa80b1ccaf3f5a980a1205848e2e8570d | [
"MIT"
] | 2 | 2020-09-23T08:50:14.000Z | 2020-09-28T09:57:07.000Z | # -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Author: Albert King
date: 2019/10/20 10:58
contact: jindaxiang@163.com
desc:
"""
# headers
SHORT_HEADERS = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.91 Safari/537.36'
}
# url
FX_SPOT_URL = "http://www.chinamoney.com.cn/r/cms/www/chinamoney/data/fx/rfx-sp-quot.json"
FX_SWAP_URL = "http://www.chinamoney.com.cn/r/cms/www/chinamoney/data/fx/rfx-sw-quot.json"
FX_PAIR_URL = "http://www.chinamoney.com.cn/r/cms/www/chinamoney/data/fx/cpair-quot.json"
# payload
SPOT_PAYLOAD = {
"t": {}
}
| 29.571429 | 134 | 0.698873 |
675ffb2c535d8805575601fc596c61d52191a22a | 1,283 | py | Python | entropylab/tests/test_issue_204.py | qguyk/entropy | e43077026c83fe84de022cf8636b2c9d42f1d330 | [
"BSD-3-Clause"
] | null | null | null | entropylab/tests/test_issue_204.py | qguyk/entropy | e43077026c83fe84de022cf8636b2c9d42f1d330 | [
"BSD-3-Clause"
] | null | null | null | entropylab/tests/test_issue_204.py | qguyk/entropy | e43077026c83fe84de022cf8636b2c9d42f1d330 | [
"BSD-3-Clause"
] | 1 | 2022-03-29T11:47:31.000Z | 2022-03-29T11:47:31.000Z | import os
from datetime import datetime
import pytest
from entropylab import ExperimentResources, SqlAlchemyDB, PyNode, Graph
| 26.729167 | 85 | 0.681216 |
67607806f4f757a440672ca409795cb6fc24a8c8 | 97 | py | Python | src/__init__.py | PY-GZKY/fconversion | f1da069ac258444c8a6b2a5fe77d0e1295a0d4e4 | [
"Apache-2.0"
] | 1 | 2022-02-11T09:39:08.000Z | 2022-02-11T09:39:08.000Z | src/__init__.py | PY-GZKY/fconversion | f1da069ac258444c8a6b2a5fe77d0e1295a0d4e4 | [
"Apache-2.0"
] | null | null | null | src/__init__.py | PY-GZKY/fconversion | f1da069ac258444c8a6b2a5fe77d0e1295a0d4e4 | [
"Apache-2.0"
] | null | null | null | from .file_core import FileEngine
from src.utils.utils import *
from .version import __version__
| 24.25 | 33 | 0.824742 |
676261a506ad81b93b8c0f929316b27e9a10621d | 169 | py | Python | app/app/calc.py | benning55/recipe-app-api | a63366c7bb576fefbc755fe873731d2edf3e74d2 | [
"MIT"
] | null | null | null | app/app/calc.py | benning55/recipe-app-api | a63366c7bb576fefbc755fe873731d2edf3e74d2 | [
"MIT"
] | null | null | null | app/app/calc.py | benning55/recipe-app-api | a63366c7bb576fefbc755fe873731d2edf3e74d2 | [
"MIT"
] | null | null | null | #
# def add(x, y):
# """
# Add Number Together
# """
# return x+y
#
#
# def subtract(x, y):
# """
# Subtract x from y
# """
# return x-y
| 12.071429 | 25 | 0.402367 |
676411e3c65abd02fa317570d558db02833381e4 | 7,673 | py | Python | open_anafi/lib/indicator_tools.py | Cour-des-comptes/open-anafi-backend | 1d3ebcfe7b46315e91618f540ef1c95b4e20d9af | [
"MIT"
] | 7 | 2020-01-10T09:34:52.000Z | 2020-01-27T13:51:12.000Z | open_anafi/lib/indicator_tools.py | Cour-des-comptes/open-anafi-backend | 1d3ebcfe7b46315e91618f540ef1c95b4e20d9af | [
"MIT"
] | 6 | 2020-01-26T20:38:07.000Z | 2022-02-10T12:12:53.000Z | open_anafi/lib/indicator_tools.py | Cour-des-comptes/open-anafi-backend | 1d3ebcfe7b46315e91618f540ef1c95b4e20d9af | [
"MIT"
] | 4 | 2020-01-27T16:44:31.000Z | 2021-02-11T16:52:26.000Z | from open_anafi.models import Indicator, IndicatorParameter, IndicatorLibelle
from open_anafi.serializers import IndicatorSerializer
from .frame_tools import FrameTools
from open_anafi.lib import parsing_tools
from open_anafi.lib.ply.parsing_classes import Indic
import re
from django.db import transaction
from django.core.exceptions import ObjectDoesNotExist
| 37.985149 | 129 | 0.639776 |
67656a05cc2aa8785f99e903c16b411d139ad81d | 3,576 | py | Python | src/python/commands/LikeImpl.py | plewis/phycas | 9f5a4d9b2342dab907d14a46eb91f92ad80a5605 | [
"MIT"
] | 3 | 2015-09-24T23:12:57.000Z | 2021-04-12T07:07:01.000Z | src/python/commands/LikeImpl.py | plewis/phycas | 9f5a4d9b2342dab907d14a46eb91f92ad80a5605 | [
"MIT"
] | null | null | null | src/python/commands/LikeImpl.py | plewis/phycas | 9f5a4d9b2342dab907d14a46eb91f92ad80a5605 | [
"MIT"
] | 1 | 2015-11-23T10:35:43.000Z | 2015-11-23T10:35:43.000Z | import os,sys,math,random
from phycas import *
from MCMCManager import LikelihoodCore
from phycas.utilities.PhycasCommand import *
from phycas.readnexus import NexusReader
from phycas.utilities.CommonFunctions import CommonFunctions
| 39.733333 | 131 | 0.576622 |
67659e478a5e5c7c61b17fe40c449153891a0e5c | 291 | py | Python | app/models.py | dangger/awesome-flask-todo | 8eb2ec5357a028a76015035940d6f7844623ff98 | [
"MIT"
] | null | null | null | app/models.py | dangger/awesome-flask-todo | 8eb2ec5357a028a76015035940d6f7844623ff98 | [
"MIT"
] | null | null | null | app/models.py | dangger/awesome-flask-todo | 8eb2ec5357a028a76015035940d6f7844623ff98 | [
"MIT"
] | null | null | null | from app import db
import datetime
from flask_mongoengine.wtf import model_form
TodoForm = model_form(Todo)
| 26.454545 | 60 | 0.766323 |
6767a8053401b419268988cde796fcad2ed726b3 | 157 | py | Python | Python/Mundo01/teste/teste2.py | eStev4m/CursoPython | 8b52a618e67c80d66518ef91c1d4596a2bfddc22 | [
"MIT"
] | null | null | null | Python/Mundo01/teste/teste2.py | eStev4m/CursoPython | 8b52a618e67c80d66518ef91c1d4596a2bfddc22 | [
"MIT"
] | null | null | null | Python/Mundo01/teste/teste2.py | eStev4m/CursoPython | 8b52a618e67c80d66518ef91c1d4596a2bfddc22 | [
"MIT"
] | null | null | null | dia = int(input('Dia = '))
mes = str(input('Ms = '))
ano = int(input('Ano = '))
print('Voc nasceu no dia {} de {} de {}. Correto?' .format(dia, mes, ano))
| 31.4 | 75 | 0.56051 |
6767ec882d17e62fa49469a5e7630e14c022c42d | 16,089 | py | Python | firebirdsql/services.py | dand-oss/pyfirebirdsql | 1b8148f8937929cdd74774fef2611dd55ea6a757 | [
"BSD-2-Clause"
] | 31 | 2015-03-28T09:43:53.000Z | 2022-02-27T18:20:06.000Z | firebirdsql/services.py | dand-oss/pyfirebirdsql | 1b8148f8937929cdd74774fef2611dd55ea6a757 | [
"BSD-2-Clause"
] | 24 | 2015-01-16T03:00:33.000Z | 2022-02-08T00:06:05.000Z | firebirdsql/services.py | dand-oss/pyfirebirdsql | 1b8148f8937929cdd74774fef2611dd55ea6a757 | [
"BSD-2-Clause"
] | 21 | 2015-01-15T23:00:26.000Z | 2020-11-04T08:30:13.000Z | ##############################################################################
# Copyright (c) 2009-2021, Hajime Nakagami<nakagami@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Python DB-API 2.0 module for Firebird.
##############################################################################
from firebirdsql.consts import * # noqa
from firebirdsql.utils import * # noqa
from firebirdsql.fbcore import Connection
| 38.675481 | 85 | 0.604139 |
6768a012fa3b71acafcce223de6b3ec16122e616 | 763 | py | Python | source/utils/converters.py | GoBoopADog/maelstrom | fce79fa964578dfee5d7beb4ec440deec5f8f25d | [
"MIT"
] | 2 | 2021-03-02T15:37:01.000Z | 2021-04-21T10:45:32.000Z | source/utils/converters.py | GoBoopADog/maelstrom | fce79fa964578dfee5d7beb4ec440deec5f8f25d | [
"MIT"
] | 1 | 2021-02-28T20:26:04.000Z | 2021-03-01T17:55:55.000Z | source/utils/converters.py | GoBoopADog/maelstrom | fce79fa964578dfee5d7beb4ec440deec5f8f25d | [
"MIT"
] | 4 | 2021-02-28T04:08:03.000Z | 2021-09-05T17:16:44.000Z | from discord.ext import commands
from typing import Union
from types import ModuleType
from .context import Context
| 30.52 | 77 | 0.655308 |
6769164b195db417c53c603f5e118948e48af7f8 | 8,230 | py | Python | credstuffer/db/creator.py | bierschi/credstuffer | 1a37aef30654028885d0d2caa456f38f58af4def | [
"MIT"
] | null | null | null | credstuffer/db/creator.py | bierschi/credstuffer | 1a37aef30654028885d0d2caa456f38f58af4def | [
"MIT"
] | null | null | null | credstuffer/db/creator.py | bierschi/credstuffer | 1a37aef30654028885d0d2caa456f38f58af4def | [
"MIT"
] | 1 | 2020-10-05T12:10:32.000Z | 2020-10-05T12:10:32.000Z | import logging
from credstuffer.db.connector import DBConnector
from credstuffer.exceptions import DBCreatorError
| 31.776062 | 116 | 0.615188 |
67692e8a3e167b8004f399714ed1c11e30cf9ebb | 897 | py | Python | src/poetry/core/masonry/builder.py | DavidVujic/poetry-core | d7b5572aabc762f138e4d15f461f13a28c8258d6 | [
"MIT"
] | null | null | null | src/poetry/core/masonry/builder.py | DavidVujic/poetry-core | d7b5572aabc762f138e4d15f461f13a28c8258d6 | [
"MIT"
] | null | null | null | src/poetry/core/masonry/builder.py | DavidVujic/poetry-core | d7b5572aabc762f138e4d15f461f13a28c8258d6 | [
"MIT"
] | null | null | null | from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from poetry.core.poetry import Poetry
| 27.181818 | 76 | 0.630992 |
676b193319b9f06972fcafcb462e36e367c9d59d | 659 | py | Python | migrations/versions/429d596c43a7_users_country.py | bilginfurkan/Anonimce | 7d73c13ae8d5c873b6863878370ad83ec9ee5acc | [
"Apache-2.0"
] | 2 | 2021-02-15T12:56:58.000Z | 2021-02-21T12:38:47.000Z | migrations/versions/429d596c43a7_users_country.py | bilginfurkan/Anonimce | 7d73c13ae8d5c873b6863878370ad83ec9ee5acc | [
"Apache-2.0"
] | null | null | null | migrations/versions/429d596c43a7_users_country.py | bilginfurkan/Anonimce | 7d73c13ae8d5c873b6863878370ad83ec9ee5acc | [
"Apache-2.0"
] | null | null | null | """users.country
Revision ID: 429d596c43a7
Revises: 77e0c0edaa04
Create Date: 2020-10-23 21:26:55.598146
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '429d596c43a7'
down_revision = '77e0c0edaa04'
branch_labels = None
depends_on = None
| 22.724138 | 84 | 0.691958 |
676bce0736ccad204cb3cef87d200632b75f487f | 4,535 | py | Python | tweet_processor.py | cristynhoward/connectfour | a6727cbe47696a0a3dd278a3929d81dc6e158999 | [
"MIT"
] | 1 | 2018-06-28T09:45:59.000Z | 2018-06-28T09:45:59.000Z | tweet_processor.py | cristynhoward/connectfour | a6727cbe47696a0a3dd278a3929d81dc6e158999 | [
"MIT"
] | null | null | null | tweet_processor.py | cristynhoward/connectfour | a6727cbe47696a0a3dd278a3929d81dc6e158999 | [
"MIT"
] | null | null | null | """ Module for processing mentions of the bot via the Twitter API.
"""
from ConnectFourGame import *
from databasehelpers import *
from helpers import *
from minimax import *
def process_mentions():
""" Scan through recent mentions and send them to be processed.
"""
api = get_twitter_api()
first = True
since_id = get_read_since()
newest_tweet_id = None
for tweet in limit_handled(tweepy.Cursor(api.mentions_timeline).items()):
if int(tweet.id_str) <= int(since_id): # if tweet has already been processed...
if first is True: # & we haven't seen any other tweets yet:
log("No new mentions to process.")
else: # we have processed other tweets, thus:
log("Processed mentions from " + str(since_id) + " to " + str(newest_tweet_id) + ".")
set_read_since(newest_tweet_id)
return
if first is True: # Collect ID of first tweet processed.
newest_tweet_id = tweet.id_str
first = False
if tweet.in_reply_to_status_id is None: # Check if mention starts a game thread.
result_newgame = try_newgame(tweet)
if result_newgame is not None:
record_outgoing_tweet(result_newgame)
else: # Check if mention is a valid play on an existing game thread.
doc = get_active_game(str(tweet.in_reply_to_status_id))
if doc is not None:
result_game = try_playturn(tweet, doc)
if result_game is not None:
record_outgoing_tweet(result_game)
remove_active_game(str(tweet.in_reply_to_status_id))
def try_newgame(tweet):
""" Process a single attempted new game.
:param tweet: The tweet to be processed as new game.
:type tweet: Tweepy.Status, dict
:return: The resulting new game, or None if no new game made.
:rtype: None, ConnectFourGame
"""
if tweet.in_reply_to_status_id is None: # not reply to another tweet
if tweet.text.split(" ")[1] == "new": # second word is 'new'
user1 = tweet.user.screen_name
# TWO PLAYER GAME
if len(tweet.entities[u'user_mentions']) > 1:
user2 = tweet.entities[u'user_mentions'][1][u'screen_name']
newgame = ConnectFourGame.new_game(get_next_game_id(), user1, user2, int(tweet.id_str))
log("Created two player game: " + newgame.game_to_string())
return newgame
# ONE PLAYER GAME
if tweet.text.split(" ")[2] == "singleplayer":
user2 = " mimimax_ai_alpha"
newgame = ConnectFourGame.new_game(get_next_game_id(), user1, user2, int(tweet.id_str))
newgame.play_turn(int(tweet.id_str), minimax(newgame, 3))
log("Created one player game: " + newgame.game_to_string())
return newgame
def try_playturn(tweet, doc):
""" Process a single tweet as an attempted move on an open game.
:param tweet: The tweet to be processed as an attempted move on an open game.
:type tweet: Tweepy.Status, dict
:param doc: The database item storing the game onto which the turn is played.
:type doc: dict
:return: The resulting game after the move is played, or None if move not played.
:rtype: ConnectFourGame, None
"""
game = ConnectFourGame.game_from_string(doc["game"])
active_user = game.user2
if game.user1_is_playing == 1:
active_user = game.user1
move_index = 2
if game.user1 == game.user2 or game.user2 == " mimimax_ai_alpha":
move_index = 1
tweet_text = tweet.text.split(" ")
if len(tweet_text) >= move_index + 1:
column_played = tweet_text[move_index]
if any(column_played == s for s in ["1", "2", "3", "4", "5", "6", "7"]):
if (tweet.user.screen_name == active_user) & game.can_play(int(column_played)):
# PLAY TURN
game.play_turn(int(tweet.id_str), int(column_played))
log(active_user + " played a " + column_played + " resulting in game: " + game.game_to_string())
if game.user2 == ' mimimax_ai_alpha':
ai_move = minimax(game, 3)
game.play_turn(int(tweet.id_str), ai_move)
log("mimimax_ai_v1 played a " + str(ai_move) + " resulting in game: " + game.game_to_string())
return game
if __name__ == '__main__':
process_mentions()
| 39.780702 | 114 | 0.613892 |
676c11480ace0b3ea4cda5237879a07a2c1fe362 | 3,448 | py | Python | code/seasonality.py | geangohn/RecSys | f53d0322fed414caa820cebf23bef5a0a9237517 | [
"MIT"
] | 2 | 2019-07-22T09:42:25.000Z | 2021-03-31T09:29:29.000Z | code/seasonality.py | geangohn/RecSys | f53d0322fed414caa820cebf23bef5a0a9237517 | [
"MIT"
] | null | null | null | code/seasonality.py | geangohn/RecSys | f53d0322fed414caa820cebf23bef5a0a9237517 | [
"MIT"
] | null | null | null | import pandas as pd
| 74.956522 | 120 | 0.690255 |
676d15fe9000290c81a06864a2972f44722d480f | 1,729 | py | Python | discord_api/applications.py | tuna2134/discord-api.py | 0e5e9f469d852f81e6fc0b561c54a78ea6fe8fcb | [
"MIT"
] | 10 | 2021-11-30T06:22:20.000Z | 2021-12-16T00:36:14.000Z | discord_api/applications.py | tuna2134/discord-api.py | 0e5e9f469d852f81e6fc0b561c54a78ea6fe8fcb | [
"MIT"
] | 5 | 2021-12-03T10:21:15.000Z | 2022-01-18T11:08:48.000Z | discord_api/applications.py | tuna2134/discord-api.py | 0e5e9f469d852f81e6fc0b561c54a78ea6fe8fcb | [
"MIT"
] | 3 | 2021-12-10T08:34:28.000Z | 2022-01-21T11:59:46.000Z | from .command import Command, ApiCommand
| 28.816667 | 72 | 0.520532 |
676d466a108d99b100b2c3a5a8c5c61b4428733b | 280 | py | Python | SinglePackage/tests/test_single.py | CJosephides/PythonApplicationStructures | b82385f7a35f3097eac08011d24d9d1429cee171 | [
"RSA-MD"
] | 1 | 2019-02-05T11:45:11.000Z | 2019-02-05T11:45:11.000Z | SinglePackage/tests/test_single.py | CJosephides/PythonApplicationStructures | b82385f7a35f3097eac08011d24d9d1429cee171 | [
"RSA-MD"
] | null | null | null | SinglePackage/tests/test_single.py | CJosephides/PythonApplicationStructures | b82385f7a35f3097eac08011d24d9d1429cee171 | [
"RSA-MD"
] | null | null | null | from unittest import TestCase, main
from single_package.single import Single
if __name__ == "__main__":
main()
| 17.5 | 50 | 0.692857 |
676d7655b19bd0498b46ef17e54ab70538bcef0d | 1,563 | py | Python | tests/spot/sub_account/test_sub_account_deposit_address.py | Banging12/binance-connector-python | dc6fbbd0bb64fb08d73ad8b31e0b81d776efa30b | [
"MIT"
] | 512 | 2021-06-15T08:52:44.000Z | 2022-03-31T09:49:53.000Z | tests/spot/sub_account/test_sub_account_deposit_address.py | Banging12/binance-connector-python | dc6fbbd0bb64fb08d73ad8b31e0b81d776efa30b | [
"MIT"
] | 75 | 2021-06-20T13:49:50.000Z | 2022-03-30T02:45:31.000Z | tests/spot/sub_account/test_sub_account_deposit_address.py | Banging12/binance-connector-python | dc6fbbd0bb64fb08d73ad8b31e0b81d776efa30b | [
"MIT"
] | 156 | 2021-06-18T11:56:36.000Z | 2022-03-29T16:34:22.000Z | import responses
from tests.util import random_str
from tests.util import mock_http_response
from binance.spot import Spot as Client
from binance.lib.utils import encoded_string
from binance.error import ParameterRequiredError
mock_item = {"key_1": "value_1", "key_2": "value_2"}
key = random_str()
secret = random_str()
params = {
"email": "alice@test.com",
"coin": "BNB",
"network": "BNB",
"recvWindow": 1000,
}
def test_sub_account_deposit_address_without_email():
"""Tests the API endpoint to get deposit address without email"""
params = {"email": "", "coin": "BNB", "network": "BNB", "recvWindow": 1000}
client = Client(key, secret)
client.sub_account_deposit_address.when.called_with(**params).should.throw(
ParameterRequiredError
)
def test_sub_account_deposit_address_without_coin():
"""Tests the API endpoint to get deposit address without coin"""
params = {
"email": "alice@test.com",
"coin": "",
"network": "BNB",
"recvWindow": 1000,
}
client = Client(key, secret)
client.sub_account_deposit_address.when.called_with(**params).should.throw(
ParameterRequiredError
)
| 26.05 | 79 | 0.690339 |
676e003414de3f2f5ddecf2d26540316287d4189 | 6,232 | py | Python | tools/telemetry/telemetry/results/page_test_results.py | Fusion-Rom/android_external_chromium_org | d8b126911c6ea9753e9f526bee5654419e1d0ebd | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2020-01-25T09:58:49.000Z | 2020-01-25T09:58:49.000Z | tools/telemetry/telemetry/results/page_test_results.py | Fusion-Rom/android_external_chromium_org | d8b126911c6ea9753e9f526bee5654419e1d0ebd | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | tools/telemetry/telemetry/results/page_test_results.py | Fusion-Rom/android_external_chromium_org | d8b126911c6ea9753e9f526bee5654419e1d0ebd | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2020-11-04T06:34:36.000Z | 2020-11-04T06:34:36.000Z | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import copy
import traceback
from telemetry import value as value_module
from telemetry.results import page_run
from telemetry.results import progress_reporter as progress_reporter_module
from telemetry.value import failure
from telemetry.value import skip
def DidRunPage(self, page, discard_run=False): # pylint: disable=W0613
"""
Args:
page: The current page under test.
discard_run: Whether to discard the entire run and all of its
associated results.
"""
assert self._current_page_run, 'Did not call WillRunPage.'
self._progress_reporter.DidRunPage(self)
if not discard_run:
self._all_page_runs.append(self._current_page_run)
self._current_page_run = None
def WillAttemptPageRun(self, attempt_count, max_attempts):
"""To be called when a single attempt on a page run is starting.
This is called between WillRunPage and DidRunPage and can be
called multiple times, one for each attempt.
Args:
attempt_count: The current attempt number, start at 1
(attempt_count == 1 for the first attempt, 2 for second
attempt, and so on).
max_attempts: Maximum number of page run attempts before failing.
"""
self._progress_reporter.WillAttemptPageRun(
self, attempt_count, max_attempts)
# Clear any values from previous attempts for this page run.
self._current_page_run.ClearValues()
| 33.869565 | 75 | 0.72914 |
676fd905727818efa8eda82566b5e796e9f06ce8 | 11,273 | py | Python | src/utils/gradcam.py | xmuyzz/IVContrast | f3100e54f1808e1a796acd97ef5d23d0a2fd4f6c | [
"MIT"
] | 3 | 2022-02-23T09:05:45.000Z | 2022-02-23T20:18:18.000Z | src/utils/gradcam.py | xmuyzz/IVContrast | f3100e54f1808e1a796acd97ef5d23d0a2fd4f6c | [
"MIT"
] | null | null | null | src/utils/gradcam.py | xmuyzz/IVContrast | f3100e54f1808e1a796acd97ef5d23d0a2fd4f6c | [
"MIT"
] | null | null | null | from tensorflow.keras.models import Model
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
import cv2
import numpy as np
import pandas as pd
from tensorflow.keras.models import load_model
import tensorflow as tf
import os
#---------------------------------------------------------------------------------
# get data
#---------------------------------------------------------------------------------
#------------------------------------------------------------------------------------
# find last conv layer
#-----------------------------------------------------------------------------------
#----------------------------------------------------------------------------------
# calculate gradient class actiavtion map
#----------------------------------------------------------------------------------
def compute_heatmap(model, saved_model, image, pred_index, last_conv_layer):
"""
construct our gradient model by supplying (1) the inputs
to our pre-trained model, (2) the output of the (presumably)
final 4D layer in the network, and (3) the output of the
softmax activations from the model
"""
gradModel = Model(
inputs=[model.inputs],
outputs=[model.get_layer(last_conv_layer).output, model.output]
)
# record operations for automatic differentiation
with tf.GradientTape() as tape:
"""
cast the image tensor to a float-32 data type, pass the
image through the gradient model, and grab the loss
associated with the specific class index
"""
print(pred_index)
inputs = tf.cast(image, tf.float32)
print(image.shape)
last_conv_layer_output, preds = gradModel(inputs)
print(preds)
print(preds.shape)
# class_channel = preds[:, pred_index]
class_channel = preds
# use automatic differentiation to compute the gradients
grads = tape.gradient(class_channel, last_conv_layer_output)
"""
This is a vector where each entry is the mean intensity of the gradient
over a specific feature map channel
"""
pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2))
"""
We multiply each channel in the feature map array
by "how important this channel is" with regard to the top predicted class
then sum all the channels to obtain the heatmap class activation
"""
last_conv_layer_output = last_conv_layer_output[0]
heatmap = last_conv_layer_output @ pooled_grads[..., tf.newaxis]
heatmap = tf.squeeze(heatmap)
# For visualization purpose, we will also normalize the heatmap between 0 & 1
heatmap = tf.maximum(heatmap, 0) / tf.math.reduce_max(heatmap)
heatmap = heatmap.numpy()
return heatmap
#------------------------------------------------------------------------------------
# save gradcam heat map
#-----------------------------------------------------------------------------------
# jet_heatmap0.save(os.path.join(save_dir, fn2))
# jet_heatmap.save(os.path.join(save_dir, fn3))
# img0.save(os.path.join(save_dir, fn4))
if __name__ == '__main__':
train_img_dir = '/media/bhkann/HN_RES1/HN_CONTRAST/train_img_dir'
val_save_dir = '/mnt/aertslab/USERS/Zezhong/constrast_detection/val'
test_save_dir = '/mnt/aertslab/USERS/Zezhong/constrast_detection/test'
exval_save_dir = '/mnt/aertslab/USERS/Zezhong/constrast_detection/exval'
val_gradcam_dir = '/mnt/aertslab/USERS/Zezhong/constrast_detection/val/gradcam'
test_gradcam_dir = '/mnt/aertslab/USERS/Zezhong/constrast_detection/test/gradcam'
exval_gradcam_dir = '/mnt/aertslab/USERS/Zezhong/constrast_detection/test/gradcam'
pro_data_dir = '/home/bhkann/zezhong/git_repo/IV-Contrast-CNN-Project/pro_data'
model_dir = '/mnt/aertslab/USERS/Zezhong/contrast_detection/model'
input_channel = 3
re_size = (192, 192)
i = 72
crop = True
alpha = 0.9
saved_model = 'ResNet_2021_07_18_06_28_40'
show_network = False
conv_n = 'conv5'
run_type = 'val'
#---------------------------------------------------------
# run main function
#--------------------------------------------------------
if run_type == 'val':
save_dir = val_save_dir
elif run_type == 'test':
save_dir = test_save_dir
## load model and find conv layers
model = load_model(os.path.join(model_dir, saved_model))
# model.summary()
list_i = [100, 105, 110, 115, 120, 125]
for i in list_i:
image, label, pred_index, y_pred, ID = data(
input_channel=input_channel,
i=i,
val_save_dir=val_save_dir,
test_save_dir=test_save_dir
)
conv_list = ['conv2', 'conv3', 'conv4', 'conv5']
conv_list = ['conv4']
for conv_n in conv_list:
if conv_n == 'conv2':
last_conv_layer = 'conv2_block3_1_conv'
elif conv_n == 'conv3':
last_conv_layer = 'conv3_block4_1_conv'
elif conv_n == 'conv4':
last_conv_layer = 'conv4_block6_1_conv'
elif conv_n == 'conv5':
last_conv_layer = 'conv5_block3_out'
heatmap = compute_heatmap(
model=model,
saved_model=saved_model,
image=image,
pred_index=pred_index,
last_conv_layer=last_conv_layer
)
save_gradcam(
image=image,
heatmap=heatmap,
val_gradcam_dir=val_gradcam_dir,
test_gradcam_dir=test_gradcam_dir,
alpha=alpha,
i=i
)
print('label:', label)
print('ID:', ID)
print('y_pred:', y_pred)
print('prediction:', pred_index)
print('conv layer:', conv_n)
# if last_conv_layer is None:
# last_conv_layer = find_target_layer(
# model=model,
# saved_model=saved_model
# )
# print(last_conv_layer)
#
# if show_network == True:
# for idx in range(len(model.layers)):
# print(model.get_layer(index = idx).name)
# # compute the guided gradients
# castConvOutputs = tf.cast(convOutputs > 0, "float32")
# castGrads = tf.cast(grads > 0, "float32")
# guidedGrads = castConvOutputs * castGrads * grads
# # the convolution and guided gradients have a batch dimension
# # (which we don't need) so let's grab the volume itself and
# # discard the batch
# convOutputs = convOutputs[0]
# guidedGrads = guidedGrads[0]
#
# # compute the average of the gradient values, and using them
# # as weights, compute the ponderation of the filters with
# # respect to the weights
# weights = tf.reduce_mean(guidedGrads, axis=(0, 1))
# cam = tf.reduce_sum(tf.multiply(weights, convOutputs), axis=-1)
#
# # grab the spatial dimensions of the input image and resize
# # the output class activation map to match the input image
# # dimensions
## (w, h) = (image.shape[2], image.shape[1])
## heatmap = cv2.resize(cam.numpy(), (w, h))
# heatmap = cv2.resize(heatmap.numpy(), (64, 64))
# # normalize the heatmap such that all values lie in the range
## # [0, 1], scale the resulting values to the range [0, 255],
## # and then convert to an unsigned 8-bit integer
# numer = heatmap - np.min(heatmap)
# eps = 1e-8
# denom = (heatmap.max() - heatmap.min()) + eps
# heatmap = numer / denom
# heatmap = (heatmap * 255).astype("uint8")
# colormap=cv2.COLORMAP_VIRIDIS
# heatmap = cv2.applyColorMap(heatmap, colormap)
# print('heatmap shape:', heatmap.shape)
## img = image[:, :, :, 0]
## print('img shape:', img.shape)
# img = image.reshape((64, 64, 3))
# print(img.shape)
# output = cv2.addWeighted(img, 0.5, heatmap, 0.5, 0)
#
#
# return heatmap, output
| 37.327815 | 86 | 0.593808 |
677019eb7c18145cccb4dc9a2d50f339eddc7e89 | 5,038 | py | Python | start.py | xylovedd/yangyang | 4cb99491c0f046da9a39f7c916e0c85cb473c002 | [
"Apache-2.0"
] | 20 | 2019-11-14T02:53:53.000Z | 2022-03-26T02:44:04.000Z | start.py | janlle/12306 | 73b1d5423492013447ebdbbfcc6f1fe3a719ee0b | [
"Apache-2.0"
] | 9 | 2019-11-17T09:16:37.000Z | 2022-03-12T00:07:14.000Z | start.py | xylovedd/yangyang | 4cb99491c0f046da9a39f7c916e0c85cb473c002 | [
"Apache-2.0"
] | 7 | 2019-12-05T09:26:09.000Z | 2020-11-15T15:13:16.000Z | # coding:utf-8
"""
start rob task good luck!
> python start.py
"""
import datetime
import time
from sys import version_info
import threadpool
import ticket_config as config
from config.stations import check_station_exists
from train.login import Login
from train.order import Order
from train.ticket import Ticket
from util.app_util import current_date, validate_date_str, current_hour, current_timestamp, datetime_str_timestamp, \
validate_time_str
from util.logger import Logger
log = Logger('INFO')
if __name__ == '__main__':
if version_info.major != 3 or version_info.minor != 6:
log.error("Python3.6")
# Checking config information
if not validate_date_str(config.DATE):
log.error('')
exit(0)
today = datetime.datetime.strptime(current_date(), '%Y-%m-%d')
depart_day = datetime.datetime.strptime(config.DATE, '%Y-%m-%d')
difference = (depart_day - today).days
if difference > 29 or difference < 0:
log.error('12306')
exit(0)
if not check_station_exists(config.FROM_STATION) or not check_station_exists(config.TO_STATION):
log.error('')
exit(0)
if config.SELL_TIME != '':
if not validate_time_str(config.SELL_TIME):
log.error('')
exit(0)
login = Login()
while True:
hour = current_hour()
if hour > 22 or hour < 6:
time.sleep(1.5)
continue
else:
login.login()
order = Order(None)
if not order.search_unfinished_order():
break
count = 0
# Sell time
if config.SELL_TIME != '':
start_time = datetime_str_timestamp(config.DATE + ' ' + config.SELL_TIME)
log.info('Waiting for sell ticket...')
while True:
current_time = current_timestamp() + 2505600
if start_time - current_time < 0:
break
log.info('Starting...')
while True:
ticket_list = Ticket.search_stack(from_station=config.FROM_STATION, to_station=config.TO_STATION,
train_date=config.DATE)
# Filter unable ticket
ticket_list = list(filter(lambda x: x.sell_time == '', ticket_list))
if len(ticket_list) < 1:
log.info('')
continue
count += 1
if config.SEAT_TYPE:
ticket_list = [i for i in ticket_list if i.train_no in config.TRAINS_NO]
Ticket.show_tickets(ticket_list)
seat_level_all = [([0] * len(config.TRAINS_NO)) for i in range(len(config.SEAT_TYPE))]
for j, ticket in enumerate(ticket_list):
ticket_seat = ticket.get_seat_level(config.SEAT_TYPE)
for i, seat in enumerate(ticket_seat):
seat_level_all[i][j] = seat
# Choose a ticket that you can order
usable_ticket = {}
for i in seat_level_all:
for j in i:
train_no = j['train_no']
usable = j['usable']
seat_type = j['type']
if usable == '--' or usable == 'no' or usable == '*':
usable = 0
elif usable == 'yes':
usable = 21
usable = int(usable)
if usable > 0:
usable_ticket = {'train_no': train_no, 'type': seat_type, 'seat_count': usable}
break
else:
continue
break
if usable_ticket:
order_ticket = None
for ticket in ticket_list:
if ticket.train_no == usable_ticket['train_no']:
order_ticket = ticket
break
order_ticket.seat_type = usable_ticket['type']
order_ticket.seat_count = usable_ticket['seat_count']
order = Order(order_ticket)
order.submit()
log.info(order)
log.info('...')
order.order_callback()
break
else:
log.warning(': {}'.format(count))
time.sleep(1)
break
| 37.044118 | 117 | 0.502382 |
6770f980c35e8599c5cad58c26a50fad3654f206 | 2,769 | py | Python | frameworks/PHP/cakephp/setup.py | idlewan/FrameworkBenchmarks | f187ec69752f369d84ef5a262efaef85c3a6a5ab | [
"BSD-3-Clause"
] | null | null | null | frameworks/PHP/cakephp/setup.py | idlewan/FrameworkBenchmarks | f187ec69752f369d84ef5a262efaef85c3a6a5ab | [
"BSD-3-Clause"
] | null | null | null | frameworks/PHP/cakephp/setup.py | idlewan/FrameworkBenchmarks | f187ec69752f369d84ef5a262efaef85c3a6a5ab | [
"BSD-3-Clause"
] | null | null | null |
import subprocess
import sys
import os
import setup_util
from os.path import expanduser
| 57.6875 | 186 | 0.717226 |
67719e766692980e9b9fa0f337632160d3b1343e | 624 | py | Python | Functions/parsetool.py | AlessandroChen/KindleHelper | 7b102fec44e80585ba7a4b425429f11f0c2ca4e1 | [
"Apache-2.0"
] | 19 | 2019-02-23T02:17:28.000Z | 2022-03-17T16:27:10.000Z | Functions/parsetool.py | AlessandroChen/KindleHelper | 7b102fec44e80585ba7a4b425429f11f0c2ca4e1 | [
"Apache-2.0"
] | 1 | 2019-05-05T09:11:22.000Z | 2019-06-15T04:48:29.000Z | Functions/parsetool.py | AlessandroChen/KindleHelper | 7b102fec44e80585ba7a4b425429f11f0c2ca4e1 | [
"Apache-2.0"
] | 3 | 2019-06-09T01:53:48.000Z | 2019-09-09T07:04:51.000Z | import os, stat
| 26 | 65 | 0.464744 |
6772f47be90751a8ab2cbacfba1c7b99baa2b64a | 102 | py | Python | caiman/models.py | Rockstreet/usman_min | c15145a444cbc913a1349b69dffc0b8a45e38dbb | [
"MIT"
] | null | null | null | caiman/models.py | Rockstreet/usman_min | c15145a444cbc913a1349b69dffc0b8a45e38dbb | [
"MIT"
] | null | null | null | caiman/models.py | Rockstreet/usman_min | c15145a444cbc913a1349b69dffc0b8a45e38dbb | [
"MIT"
] | null | null | null | from django.db import models
from django.utils.translation import ugettext_lazy as _, ugettext
| 10.2 | 65 | 0.784314 |
677367dc85c6f920d38d59e7cc33a0e5eafc5a8c | 6,987 | py | Python | Code/utils.py | minna-ust/SemanticMapGeneration | ab50ed853552713d4d4447b4c1d44e0b8f147318 | [
"BSD-3-Clause"
] | 8 | 2020-01-15T02:49:35.000Z | 2021-11-26T08:29:50.000Z | Code/utils.py | Hezip/SemanticMapGeneration | 98920045c1da5812f6691e6eb75bcc3413406035 | [
"BSD-3-Clause"
] | null | null | null | Code/utils.py | Hezip/SemanticMapGeneration | 98920045c1da5812f6691e6eb75bcc3413406035 | [
"BSD-3-Clause"
] | 6 | 2020-03-05T06:40:24.000Z | 2022-02-16T04:56:38.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Common utility functions
Created on Sun May 27 16:37:42 2018
@author: chen
"""
import math
import cv2
import os
from imutils import paths
import numpy as np
import scipy.ndimage
def rotate_cooridinate(cooridinate_og,rotate_angle,rotate_center):
"""
calculate the coordinates after rotation
"""
rotate_angle = rotate_angle*(math.pi/180)
rotated_x = (cooridinate_og[0]-rotate_center[0])*math.cos(rotate_angle)\
-(cooridinate_og[1]-rotate_center[1])*math.sin(rotate_angle)+rotate_center[0]
rotated_y = (cooridinate_og[0]-rotate_center[0])*math.sin(rotate_angle)\
+(cooridinate_og[1]-rotate_center[1])*math.cos(rotate_angle)+rotate_center[1]
rotated_coordinate = np.array([rotated_x,rotated_y])
rotated_coordinate = np.round(rotated_coordinate).astype(np.int)
return rotated_coordinate
def mkdir(path):
"""
create new folder automatically
"""
folder = os.path.exists(path)
if not folder:
os.makedirs(path)
def load_data(path):
"""
load data from specified folder
"""
print("[INFO] loading images...")
imgs = []
# grab the image paths and randomly shuffle them
imagePaths = sorted(list(paths.list_images(path)))
for imagePath in imagePaths:
# load the image, pre-process it, and store it in the data list
image = cv2.imread(imagePath,cv2.IMREAD_GRAYSCALE)
imgs.append(image)
return imgs
def normfun(x,sigma):
"""
function of normal distribution
"""
mu = 45
pdf = np.exp(-((x - mu)**2)/(2*sigma**2)) / (sigma * np.sqrt(2*np.pi))
return pdf
def calc_box(box,x_gap,y_gap,rotate_angle,center):
"""
calculate the size of the required surrounding environment for doorway segmentation
box: four corners' coordinates of doorway
x_gap: remained space in the vertical way
y_gap: remained space in the horizontal way
"""
door_box = np.array([box[0][::-1]+[y_gap,x_gap],box[1][::-1]+[y_gap,-x_gap],
box[2][::-1]-[y_gap,x_gap],box[3][::-1]-[y_gap,-x_gap]])
rotated_box = []
for coordinate in door_box:
box_coordinate = rotate_cooridinate(coordinate,rotate_angle,center)
rotated_box.append(box_coordinate)
rotated_box = np.array(rotated_box)
box = [np.min(rotated_box[:,0]),np.min(rotated_box[:,1]),np.max(rotated_box[:,0]),np.max(rotated_box[:,1])]
return box
def calc_IoU(candidateBound, groundTruthBounds):
"""
calculate the intersection over union
"""
cx1 = candidateBound[0]
cy1 = candidateBound[1]
cx2 = candidateBound[2]
cy2 = candidateBound[3]
gx1 = groundTruthBounds[:,0]
gy1 = groundTruthBounds[:,1]
gx2 = groundTruthBounds[:,2]
gy2 = groundTruthBounds[:,3]
carea = (cx2 - cx1) * (cy2 - cy1)
garea = (gx2 - gx1) * (gy2 - gy1)
x1 = np.maximum(cx1, gx1)
y1 = np.maximum(cy1, gy1)
x2 = np.minimum(cx2, gx2)
y2 = np.minimum(cy2, gy2)
w = np.maximum(0, x2 - x1)
h = np.maximum(0, y2 - y1)
area = w * h
ious = area / (carea + garea - area)
return ious
def overlapp(candidateBound, groundTruthBounds):
"""
calculate the proportion of prediction to groundtruth
"""
cx1 = candidateBound[0]
cy1 = candidateBound[1]
cx2 = candidateBound[2]
cy2 = candidateBound[3]
gx1 = groundTruthBounds[:,0]
gy1 = groundTruthBounds[:,1]
gx2 = groundTruthBounds[:,2]
gy2 = groundTruthBounds[:,3]
garea = (gx2 - gx1) * (gy2 - gy1)
x1 = np.maximum(cx1, gx1)
y1 = np.maximum(cy1, gy1)
x2 = np.minimum(cx2, gx2)
y2 = np.minimum(cy2, gy2)
w = np.maximum(0, x2 - x1)
h = np.maximum(0, y2 - y1)
area = w * h
reious = area / garea
return reious
def calc_corner(door_center,door_size,door_depth,side):
"""
calculate the corners' coordinates from the centroid, size and depth of doorway
door_corners_inside is a list of coordinates of corners close to the corridor
door_corners_outside is a list of coordinates of corners close to the room
"""
door_corners_inside = [door_center-np.array([np.int(door_size/2),0]),
door_center+np.array([door_size-np.int(door_size/2),0])]
door_corners_outside = [x-np.array([0,np.power(-1,side)*door_depth[side]])
for x in door_corners_inside]
door_corners_outside = np.array(door_corners_outside)
return door_corners_inside,door_corners_outside
def draw_door(mask,complete_map,door,door_depth,side):
"""
label the doorway on the mask and add some error inside the doorway region
"""
door_size = abs(door[1,0]-door[0,0])
door_area_inside = door+np.array([0,np.power(-1,side)*door_depth[side]])
# label the doorway on the mask
cv2.rectangle(mask,tuple(door[0][::-1]),tuple(door_area_inside[1][::-1]),255,-1)
# add a small point to emulate the error in the doorway region
if door_size>20:
if np.random.randint(4)==0:
if side ==0:
pt_center = [np.random.randint(door[0,0]+4,door[1,0]-3),np.random.randint(door[0,1],door_area_inside[0,1])]
else:
pt_center = [np.random.randint(door[0,0]+3,door[1,0]-2),np.random.randint(door_area_inside[0,1],door[0,1])]
cv2.circle(complete_map,tuple(pt_center[::-1]),np.random.choice([1,2,3]),0,-1)
return door_size
def room_division(room_space,num_room):
"""
assign the lengths of rooms according to the length of corridor and number of rooms
room_space: coordinates of corridor's side
num_room: the number of rooms on one side
rooms: a list of the coordinates belonging to different rooms
rooms_corners: a list of only the top and bottom cooridnates of different rooms
"""
rooms = []
rooms_corners=[]
a = num_room
thickness = np.random.randint(2,5)
length = room_space.shape[0]-(num_room-1)*thickness
start_point = 0
for i in range(num_room-1):
room_size = np.random.randint(length/(a+0.7),length/(a-0.7))
room = room_space[start_point:start_point+room_size,:]
rooms.append(room)
start_point +=room_size+thickness
room = room_space[start_point:,:]
rooms.append(room)
rooms = [room.astype(np.int) for room in rooms]
for x in rooms:
rooms_corner = np.concatenate((x[0,:][np.newaxis,:],x[-1,:][np.newaxis,:]),axis = 0)
rooms_corners.append(rooms_corner)
return rooms,rooms_corners
def calc_gradient(gmap):
"""
calculate the gradient of image to find the contour
"""
kernel = np.array([[1,1,1],[1,-8,1],[1,1,1]])
img = gmap.astype(np.int16)
gradient = scipy.ndimage.correlate(img,kernel,mode = 'constant',cval =127)
return gradient
| 32.347222 | 123 | 0.640046 |
6773e2cae4ca1a7fe539b33cf15047934bd21fc6 | 1,225 | py | Python | py_git/working_with_github/main.py | gabrieldemarmiesse/my_work_environment | 6175afbee154d0108992259633a1c89e560fd12f | [
"MIT"
] | 1 | 2021-02-27T19:34:43.000Z | 2021-02-27T19:34:43.000Z | py_git/working_with_github/main.py | gabrieldemarmiesse/my_work_environment | 6175afbee154d0108992259633a1c89e560fd12f | [
"MIT"
] | null | null | null | py_git/working_with_github/main.py | gabrieldemarmiesse/my_work_environment | 6175afbee154d0108992259633a1c89e560fd12f | [
"MIT"
] | null | null | null | import os
import sys
from subprocess import CalledProcessError
from working_with_github.utils import run
| 26.630435 | 80 | 0.646531 |
67752967909d812410a7c0a4e3e611d417d432d0 | 4,144 | py | Python | main.py | Lee-Kevin/Danboard | 28b4b0ecada4f29a7106bb3af38f608c0bd681b2 | [
"MIT"
] | null | null | null | main.py | Lee-Kevin/Danboard | 28b4b0ecada4f29a7106bb3af38f608c0bd681b2 | [
"MIT"
] | null | null | null | main.py | Lee-Kevin/Danboard | 28b4b0ecada4f29a7106bb3af38f608c0bd681b2 | [
"MIT"
] | null | null | null | import logging
import time
import re
import serial
from threading import Thread, Event
from respeaker import Microphone
from respeaker import BingSpeechAPI
from respeaker import PixelRing,pixel_ring
BING_KEY = '95e4fe8b3a324389be4595bd1813121c'
ser = serial.Serial('/dev/ttyS1',115200,timeout=0)
data=[0xAA,0x01,0x64,0x55]
data1=[0xAA,0x01,0x00,0x55]
data2=[0xAA,0x01,0x00,0x55,0xAA,0x00,0x00,0x55]
data3=[0xAA,0x01,0x64,0x55,0xAA,0x00,0x64,0x55]
lefthand = [0xAA,0x00,0x32,0x55]
righthand = [0xAA,0x01,0x32,0x55]
nodhead = [0xAA,0x02,0x32,0x55]
shakehead = [0xAA,0x03,0x32,0x55]
wakeup = [0xAA,0x02,0x64,0x55,0xAA,0x03,0x64,0x55]
origin = [lefthand,righthand,nodhead,shakehead]
if __name__ == '__main__':
main() | 30.925373 | 129 | 0.516651 |
677586a1690b5ab7c02ad679b07e602f0cadd49c | 1,063 | py | Python | apis/vote_message/account_voteCredit.py | DerWalundDieKatze/Yumekui | cb3174103ced7474ce6d1abd774b399557dcaf4f | [
"Apache-2.0"
] | null | null | null | apis/vote_message/account_voteCredit.py | DerWalundDieKatze/Yumekui | cb3174103ced7474ce6d1abd774b399557dcaf4f | [
"Apache-2.0"
] | null | null | null | apis/vote_message/account_voteCredit.py | DerWalundDieKatze/Yumekui | cb3174103ced7474ce6d1abd774b399557dcaf4f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
'''
@author: caroline
@license: (C) Copyright 2019-2022, Node Supply Chain Manager Corporation Limited.
@contact: caroline.fang.cc@gmail.com
@software: pycharm
@file: account_voteCredit.py
@time: 2020/1/8 11:23
@desc:
'''
from apis.API import request_Api
def voteCredit(api_name, params):
'''
curl -H "Content-Type: application/json" -X post --data '{"jsonrpc":"2.0","method":"account_voteCredit","params":["0x300fc5a14e578be28c64627c0e7e321771c58cd4","0x0ad472fd967eb77fb6e36ec40901790065155d5e","0xf4240","0x110","0x30000"],"id":1}' http://127.0.0.1:15645
:param api_name:
:param params:fromto gas
:return:hash
'''
try:
result = request_Api(api_name, params)
print("api{}".format(result))
except Exception as e:
print("api:{}".format(e))
if __name__ == '__main__':
api_name = "account_voteCredit"
params = ["0xaD3dC2D8aedef155eabA42Ab72C1FE480699336c", "0xef32f718642426fba949b42e3aff6c56fe08b23c", "0xf4240", "0x110", "0x30000"]
voteCredit(api_name, params) | 31.264706 | 265 | 0.74318 |
67761a50a32aba1e5e8aa2095f886f17d951b648 | 1,582 | py | Python | src/pla.py | socofels/ML_base_alg | 2f84a2a35b0217d31cbcd39a881ab5eb2eff1772 | [
"MIT"
] | null | null | null | src/pla.py | socofels/ML_base_alg | 2f84a2a35b0217d31cbcd39a881ab5eb2eff1772 | [
"MIT"
] | null | null | null | src/pla.py | socofels/ML_base_alg | 2f84a2a35b0217d31cbcd39a881ab5eb2eff1772 | [
"MIT"
] | null | null | null | import numpy as np
from matplotlib import pyplot as plt
#
np.random.seed(3)
shape = (100, 2)
x = (np.random.random((shape[0], shape[1])) * 100).astype(int)
x = np.c_[np.ones((shape[0], 1)), x]
w = np.array([-5, -2, 2])
w = w.reshape(-1, 1)
y = np.dot(x, w)
pos_index = np.where(y > 10)[0]
neg_index = np.where(y < 10)[0]
plt.scatter(x[:, 1][pos_index], x[:, 2][pos_index], marker="P")
plt.scatter(x[:, 1][neg_index], x[:, 2][neg_index], marker=0)
plt.show()
best_w = pla(x, y,100)
print(best_w)
| 28.763636 | 98 | 0.506953 |
6776496cc3fbe1aa360c8eaeeea056808934a9e1 | 5,974 | py | Python | pupa/scrape/vote_event.py | azban/pupa | 158378e19bcc322796aa4fb766784cbd4fd08413 | [
"BSD-3-Clause"
] | 62 | 2015-01-08T05:46:46.000Z | 2022-01-31T03:27:14.000Z | pupa/scrape/vote_event.py | azban/pupa | 158378e19bcc322796aa4fb766784cbd4fd08413 | [
"BSD-3-Clause"
] | 199 | 2015-01-10T03:19:37.000Z | 2021-05-21T20:34:58.000Z | pupa/scrape/vote_event.py | azban/pupa | 158378e19bcc322796aa4fb766784cbd4fd08413 | [
"BSD-3-Clause"
] | 35 | 2015-03-09T19:41:42.000Z | 2021-06-22T20:01:35.000Z | from ..utils import _make_pseudo_id
from .base import BaseModel, cleanup_list, SourceMixin
from .bill import Bill
from .popolo import pseudo_organization
from .schemas.vote_event import schema
from pupa.exceptions import ScrapeValueError
import re
| 37.810127 | 99 | 0.610311 |
6776771ca007095afc605ceffe189d17a91d3508 | 2,472 | py | Python | Q/questionnaire/models/models_publications.py | ES-DOC/esdoc-questionnaire | 9301eda375c4046323265b37ba96d94c94bf8b11 | [
"MIT"
] | null | null | null | Q/questionnaire/models/models_publications.py | ES-DOC/esdoc-questionnaire | 9301eda375c4046323265b37ba96d94c94bf8b11 | [
"MIT"
] | 477 | 2015-01-07T18:22:27.000Z | 2017-07-17T15:05:48.000Z | Q/questionnaire/models/models_publications.py | ES-DOC/esdoc-questionnaire | 9301eda375c4046323265b37ba96d94c94bf8b11 | [
"MIT"
] | null | null | null | ####################
# ES-DOC CIM Questionnaire
# Copyright (c) 2017 ES-DOC. All rights reserved.
#
# University of Colorado, Boulder
# http://cires.colorado.edu/
#
# This project is distributed according to the terms of the MIT license [http://www.opensource.org/licenses/MIT].
####################
from django.db import models
from django.conf import settings
import os
from Q.questionnaire import APP_LABEL, q_logger
from Q.questionnaire.q_fields import QVersionField
from Q.questionnaire.q_utils import EnumeratedType, EnumeratedTypeList
from Q.questionnaire.q_constants import *
###################
# local constants #
###################
PUBLICATION_UPLOAD_DIR = "publications"
PUBLICATION_UPLOAD_PATH = os.path.join(APP_LABEL, PUBLICATION_UPLOAD_DIR)
QPublicationFormats = EnumeratedTypeList([
QPublicactionFormat("CIM2_XML", "CIM2 XML"),
])
####################
# the actual class #
####################
| 29.783133 | 137 | 0.666667 |
67779dcfb1a4b8df315b4a6173872f0c4446530e | 3,902 | py | Python | tests/management/commands/test_create_command.py | kaozdl/django-extensions | bbc3ae686d2cba9c0bb0a6b88f5e71ddf1a6af36 | [
"MIT"
] | null | null | null | tests/management/commands/test_create_command.py | kaozdl/django-extensions | bbc3ae686d2cba9c0bb0a6b88f5e71ddf1a6af36 | [
"MIT"
] | null | null | null | tests/management/commands/test_create_command.py | kaozdl/django-extensions | bbc3ae686d2cba9c0bb0a6b88f5e71ddf1a6af36 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import shutil
from django.conf import settings
from django.core.management import call_command
from django.test import TestCase
from six import StringIO
try:
from unittest.mock import patch
except ImportError:
from mock import patch
| 41.073684 | 163 | 0.686315 |
6777f51fd9e946ab36c26ec73ae09aa80a69635c | 4,032 | py | Python | pca.py | mghaffarynia/PCA | 4f6a041b56bcba0d772c696dc83500b83fbc0215 | [
"Apache-2.0"
] | null | null | null | pca.py | mghaffarynia/PCA | 4f6a041b56bcba0d772c696dc83500b83fbc0215 | [
"Apache-2.0"
] | null | null | null | pca.py | mghaffarynia/PCA | 4f6a041b56bcba0d772c696dc83500b83fbc0215 | [
"Apache-2.0"
] | null | null | null |
import pandas as pd
import numpy as np
from cvxopt import matrix
from cvxopt import solvers
import math
main() | 33.6 | 82 | 0.640377 |
6778560530351b13b5aa71d380046a6c4d5f1c9f | 307 | py | Python | pyadds/__init__.py | wabu/pyadds | a09ac4ca89a809fecffe4e9f63b29b20df7c2872 | [
"MIT"
] | null | null | null | pyadds/__init__.py | wabu/pyadds | a09ac4ca89a809fecffe4e9f63b29b20df7c2872 | [
"MIT"
] | null | null | null | pyadds/__init__.py | wabu/pyadds | a09ac4ca89a809fecffe4e9f63b29b20df7c2872 | [
"MIT"
] | null | null | null |
Anything = AnythingType()
| 17.055556 | 34 | 0.602606 |
6778c22f5231a134154a3cc716c3a2ed3620a01a | 626 | py | Python | lookup.py | apinkney97/IP2Location-Python | 5841dcdaf826f7f0ef3e26e91524319552f4c7f8 | [
"MIT"
] | 90 | 2015-01-21T01:15:56.000Z | 2022-02-25T05:12:16.000Z | lookup.py | Guantum/IP2Location-Python | dfa5710cd527ddbd446bbd2206242de6c62758fc | [
"MIT"
] | 17 | 2015-11-09T12:48:44.000Z | 2022-03-21T00:29:00.000Z | lookup.py | Guantum/IP2Location-Python | dfa5710cd527ddbd446bbd2206242de6c62758fc | [
"MIT"
] | 36 | 2016-01-12T11:33:56.000Z | 2021-10-02T12:34:39.000Z | import os, IP2Location, sys, ipaddress
# database = IP2Location.IP2Location(os.path.join("data", "IPV6-COUNTRY.BIN"), "SHARED_MEMORY")
database = IP2Location.IP2Location(os.path.join("data", "IPV6-COUNTRY.BIN"))
try:
ip = sys.argv[1]
if ip == '' :
print ('You cannot enter an empty IP address.')
sys.exit(1)
else:
try:
ipaddress.ip_address(ip)
except ValueError:
print ('Invalid IP address')
sys.exit(1)
rec = database.get_all(ip)
print (rec)
except IndexError:
print ("Please enter an IP address to continue.")
database.close() | 25.04 | 95 | 0.618211 |
677a3f9b4fdf1b1623975d077e5ac1590631e821 | 1,927 | py | Python | ADTs/ADT_of_staff.py | hitachinsk/DataStructure | 91214dd56d9c0493458e8a36af27a46b0a2fdc03 | [
"MIT"
] | null | null | null | ADTs/ADT_of_staff.py | hitachinsk/DataStructure | 91214dd56d9c0493458e8a36af27a46b0a2fdc03 | [
"MIT"
] | null | null | null | ADTs/ADT_of_staff.py | hitachinsk/DataStructure | 91214dd56d9c0493458e8a36af27a46b0a2fdc03 | [
"MIT"
] | null | null | null | import ADT_of_person as AP
import datetime as dm
#ADT Staff()
# Staff(self, str name, str sex, tuple birthday, tuple entey_date, int salary, str position)
# name(self)
# sex(self)
# en_year(self)
# salary(self)
# set_salary(self, new_salary)
# position(self)
# set_position(self, new_position)
# birthday(self)
# detail(self)
| 28.338235 | 95 | 0.570317 |
677a7514628e1106435199d272ca3cc1956ae53f | 5,734 | py | Python | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/tests/completion_integration/test_handlers.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 3 | 2021-12-15T04:58:18.000Z | 2022-02-06T12:15:37.000Z | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/tests/completion_integration/test_handlers.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | null | null | null | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/tests/completion_integration/test_handlers.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 1 | 2019-01-02T14:38:50.000Z | 2019-01-02T14:38:50.000Z | """
Test signal handlers for completion.
"""
from datetime import datetime
from unittest.mock import patch
import ddt
import pytest
from completion import handlers
from completion.models import BlockCompletion
from completion.test_utils import CompletionSetUpMixin
from django.test import TestCase
from pytz import utc
from xblock.completable import XBlockCompletionMode
from xblock.core import XBlock
from lms.djangoapps.grades.api import signals as grades_signals
from openedx.core.djangolib.testing.utils import skip_unless_lms
| 34.751515 | 109 | 0.678758 |
677b8b180da6f57636a31d49b5e83be1a6466cab | 907 | py | Python | objects/moving_wall.py | krzysztofarendt/ballroom | 7e99d14278e71be873edaf415e7253e87bc81724 | [
"MIT"
] | null | null | null | objects/moving_wall.py | krzysztofarendt/ballroom | 7e99d14278e71be873edaf415e7253e87bc81724 | [
"MIT"
] | 1 | 2020-04-05T16:46:16.000Z | 2020-04-05T16:46:16.000Z | objects/moving_wall.py | krzysztofarendt/ballroom | 7e99d14278e71be873edaf415e7253e87bc81724 | [
"MIT"
] | null | null | null | from typing import Tuple
import pygame
import numpy as np
from .wall import Wall
| 25.194444 | 67 | 0.566703 |
677b969f256bb511f2d6671783f23985dd593352 | 1,962 | py | Python | src/example/4.Color_sensor/color_sensor.light_up.py | rundhall/ESP-LEGO-SPIKE-Simulator | dc83b895ff2aac5cf2fe576d0ba98426fea60827 | [
"MIT"
] | null | null | null | src/example/4.Color_sensor/color_sensor.light_up.py | rundhall/ESP-LEGO-SPIKE-Simulator | dc83b895ff2aac5cf2fe576d0ba98426fea60827 | [
"MIT"
] | null | null | null | src/example/4.Color_sensor/color_sensor.light_up.py | rundhall/ESP-LEGO-SPIKE-Simulator | dc83b895ff2aac5cf2fe576d0ba98426fea60827 | [
"MIT"
] | null | null | null | light_up(light_1, light_2, light_3)
Sets the brightness of the individual lights on the Color Sensor.
This causes the Color Sensor to change modes, which can affect your program in unexpected ways. For example, the Color Sensor can't read colors when it's in light up mode.
Parameters
light_1
The desired brightness of light 1.
Type
:
integer (a positive or negative whole number, including 0)
Values
:
0 to 100% ("0" is off, and "100" is full brightness.)
Default
:
100%
light_2
The desired brightness of light 2.
Type
:
integer (a positive or negative whole number, including 0)
Values
:
0 to 100% ("0" is off, and "100" is full brightness.)
Default
:
100%
light_3
The desired brightness of light 3.
Type
:
integer (a positive or negative whole number, including 0)
Values
:
0 to 100% ("0" is off, and "100" is full brightness.)
Default
:
100%
Errors
TypeError
light_1, light_2, or light_3 is not an integer.
RuntimeError
The sensor has been disconnected from the Port.
Example
light_up(light_1, light_2, light_3)
Sets the brightness of the individual lights on the Color Sensor.
This causes the Color Sensor to change modes, which can affect your program in unexpected ways. For example, the Color Sensor can't read colors when it's in light up mode.
Parameters
light_1
The desired brightness of light 1.
Type
:
integer (a positive or negative whole number, including 0)
Values
:
0 to 100% ("0" is off, and "100" is full brightness.)
Default
:
100%
light_2
The desired brightness of light 2.
Type
:
integer (a positive or negative whole number, including 0)
Values
:
0 to 100% ("0" is off, and "100" is full brightness.)
Default
:
100%
light_3
The desired brightness of light 3.
Type
:
integer (a positive or negative whole number, including 0)
Values
:
0 to 100% ("0" is off, and "100" is full brightness.)
Default
:
100%
Errors
TypeError
light_1, light_2, or light_3 is not an integer.
RuntimeError
The sensor has been disconnected from the Port.
Example | 22.044944 | 171 | 0.761468 |
677c27e42ac69be12805363f7ae3e1fa6d495b1b | 7,711 | py | Python | utils.py | gbene/pydip | e16647c46611f597910a10651b38cd62191a9eaf | [
"MIT"
] | null | null | null | utils.py | gbene/pydip | e16647c46611f597910a10651b38cd62191a9eaf | [
"MIT"
] | null | null | null | utils.py | gbene/pydip | e16647c46611f597910a10651b38cd62191a9eaf | [
"MIT"
] | null | null | null | '''
Script by: Gabriele Bendetti
date: 25/06/2021
Utilities functions. This file is used to have a more organized main script. It contains:
+ Random plane orientation generator that can be used to practice plane attitude interpretation
+ Random fold generator
+ Plotter
+ Data converter from pandas dataframe to dict following the format used in plane_plot
'''
import numpy as np
import matplotlib.pyplot as plt
import mplstereonet
import obspy.imaging.beachball as bb
import mplstereonet as mpl
# Convert CSV in dictionary with valid format such as {nset:{dd:[..],d:[..]},..}
| 36.372642 | 436 | 0.727143 |
677ca1e5c9f7d3101dacf177a4ff6c8f860424e0 | 3,574 | py | Python | debug/free_transition_vi_lofar_dr2_realdata.py | Joshuaalbert/bayes_filter | 2997d60d8cf07f875e42c0b5f07944e9ab7e9d33 | [
"Apache-2.0"
] | null | null | null | debug/free_transition_vi_lofar_dr2_realdata.py | Joshuaalbert/bayes_filter | 2997d60d8cf07f875e42c0b5f07944e9ab7e9d33 | [
"Apache-2.0"
] | 3 | 2019-02-21T16:00:53.000Z | 2020-03-31T01:33:00.000Z | debug/free_transition_vi_lofar_dr2_realdata.py | Joshuaalbert/bayes_filter | 2997d60d8cf07f875e42c0b5f07944e9ab7e9d33 | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
import os
from bayes_filter import logging
from bayes_filter.filters import FreeTransitionVariationalBayes
from bayes_filter.feeds import DatapackFeed, IndexFeed
from bayes_filter.misc import make_example_datapack, maybe_create_posterior_solsets, get_screen_directions
from bayes_filter.datapack import DataPack, _load_array_file
import numpy as np
if __name__ == '__main__':
output_folder = os.path.join(os.path.abspath('test_filter_vi_P126+65'), 'run15')
os.makedirs(output_folder, exist_ok=True)
# datapack = make_example_datapack(5, 10, 2, name=os.path.join(output_folder, 'test_data.h5'), gain_noise=0.3,
# index_n=1, obs_type='DTEC', clobber=True,
# kernel_hyperparams={'variance': 3.5 ** 2, 'lengthscales': 15., 'a': 250.,
# 'b': 100., 'timescale': 50.})
datapack = DataPack('/net/lofar1/data1/albert/imaging/data/P126+65_compact_raw/P126+65_full_compact_raw.h5')
datapack.current_solset = 'sol000'
actual_antenna_labels, _ = datapack.antennas
antenna_labels, antennas = _load_array_file(DataPack.lofar_array)
antennas = np.stack([antennas[list(antenna_labels).index(a.astype(antenna_labels.dtype)),:] for a in actual_antenna_labels],axis=0)
datapack.set_antennas(antenna_labels, antennas)
patch_names, _ = datapack.directions
_, screen_directions = datapack.get_directions(patch_names)
screen_directions = get_screen_directions('/home/albert/ftp/image.pybdsm.srl.fits', max_N=None)
maybe_create_posterior_solsets(datapack, 'sol000', posterior_name='posterior', screen_directions=screen_directions)
# config = tf.ConfigProto(allow_soft_placement = True)
sess = tf.Session(graph=tf.Graph())#,config=config)
# from tensorflow.python import debug as tf_debug
# sess = tf_debug.LocalCLIDebugWrapperSession(sess)
with sess:
with tf.device('/device:CPU:0'):
logging.info("Setting up the index and datapack feeds.")
datapack_feed = DatapackFeed(datapack,
selection={'ant': list(range(1,7,2)) + list(range(45, 62, 1)),'dir':None, 'pol':slice(0,1,1), 'time':slice(0,None,1)},
solset='sol000',
postieror_name='posterior',
index_n=1)
logging.info("Setting up the filter.")
free_transition = FreeTransitionVariationalBayes(datapack_feed=datapack_feed, output_folder=output_folder)
free_transition.init_filter()
filter_op = free_transition.filter(
parallel_iterations=10,
kernel_params={'resolution': 4, 'fed_kernel': 'M52', 'obs_type': 'DTEC'},
num_parallel_filters=10,
solver_params=dict(iters=200,
learning_rate=0.1,
gamma=0.3,
stop_patience=6),
num_mcmc_param_samples_learn=50,
num_mcmc_param_samples_infer=100,
minibatch_size=None,
y_sigma=0.1)
logging.info("Initializing the filter")
sess.run(free_transition.initializer)
# print(sess.run([free_transition.full_block_size, free_transition.datapack_feed.time_feed.slice_size, free_transition.datapack_feed.index_feed.step]))
logging.info("Running the filter")
sess.run(filter_op)
| 56.730159 | 159 | 0.640179 |
677d0d25d6f511de2789f723ba24d4b56d61d93f | 13,237 | py | Python | train.py | Aoi-hosizora/NER-BiLSTM-CRF-Affix-PyTorch | 2ab7f218c11854f75b3fbb626f257672baaf7572 | [
"MIT"
] | null | null | null | train.py | Aoi-hosizora/NER-BiLSTM-CRF-Affix-PyTorch | 2ab7f218c11854f75b3fbb626f257672baaf7572 | [
"MIT"
] | null | null | null | train.py | Aoi-hosizora/NER-BiLSTM-CRF-Affix-PyTorch | 2ab7f218c11854f75b3fbb626f257672baaf7572 | [
"MIT"
] | null | null | null | import argparse
import json
import matplotlib.pyplot as plt
import numpy as np
import pickle
import time
import torch
from torch import optim
from typing import Tuple, List, Dict
import dataset
from model import BiLSTM_CRF
import utils
if __name__ == '__main__':
main()
| 46.939716 | 212 | 0.64637 |