hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
55fc362ece90946015f4b5b227a527251bc8be9e | 1,463 | py | Python | geolocator.py | Kugeleis/TeslaInventoryChecker | 93b6e8e2885bf8e0c15942e940d5d5626754f7a8 | [
"MIT"
] | 7 | 2021-08-13T16:46:32.000Z | 2021-12-23T17:54:33.000Z | geolocator.py | Kugeleis/TeslaInventoryChecker | 93b6e8e2885bf8e0c15942e940d5d5626754f7a8 | [
"MIT"
] | null | null | null | geolocator.py | Kugeleis/TeslaInventoryChecker | 93b6e8e2885bf8e0c15942e940d5d5626754f7a8 | [
"MIT"
] | 5 | 2021-08-13T04:38:05.000Z | 2021-12-14T06:29:11.000Z | import http.client
import json
from types import SimpleNamespace | 28.686275 | 89 | 0.584416 |
55fd77fad6026ba26284584227c80ea384f74fc0 | 4,942 | py | Python | client/runTFpose.py | BamLubi/tf-pose_Client | 07032a8b7ba80f717e74f6c893fadc6e2faa6573 | [
"MIT"
] | 1 | 2022-03-21T18:02:05.000Z | 2022-03-21T18:02:05.000Z | client/runTFpose.py | BamLubi/tf-pose_Client | 07032a8b7ba80f717e74f6c893fadc6e2faa6573 | [
"MIT"
] | null | null | null | client/runTFpose.py | BamLubi/tf-pose_Client | 07032a8b7ba80f717e74f6c893fadc6e2faa6573 | [
"MIT"
] | null | null | null | import argparse
import cv2
import time
import numpy as np
from tf_pose.estimator import TfPoseEstimator
from tf_pose.networks import get_graph_path, model_wh
"""
tf-openpose
"""
if __name__ == '__main__':
TFPOSE() | 37.439394 | 151 | 0.593484 |
55fe127a3e15c5c409ac7dd672e540ee28e8d786 | 413 | py | Python | oldPython/driving_app.py | Awarua-/Can-I-Have-Your-Attention-COSC475-Research | 71b5140b988aa6512a7cf5b5b6d043e20fd02084 | [
"MIT"
] | null | null | null | oldPython/driving_app.py | Awarua-/Can-I-Have-Your-Attention-COSC475-Research | 71b5140b988aa6512a7cf5b5b6d043e20fd02084 | [
"MIT"
] | null | null | null | oldPython/driving_app.py | Awarua-/Can-I-Have-Your-Attention-COSC475-Research | 71b5140b988aa6512a7cf5b5b6d043e20fd02084 | [
"MIT"
] | null | null | null | from kivy.app import App
from kivy.uix.label import Label
from kivy.core.window import Window
if __name__ == "__main__":
DrivingApp.run()
| 21.736842 | 56 | 0.653753 |
55fe69df7aecb356db95a682b17146dfaf4521ce | 3,103 | py | Python | api/src/opentrons/calibration_storage/helpers.py | faliester/opentrons | e945d0f72fed39b0f68c0b30b7afd1981644184f | [
"Apache-2.0"
] | 1 | 2022-03-17T20:38:04.000Z | 2022-03-17T20:38:04.000Z | api/src/opentrons/calibration_storage/helpers.py | faliester/opentrons | e945d0f72fed39b0f68c0b30b7afd1981644184f | [
"Apache-2.0"
] | null | null | null | api/src/opentrons/calibration_storage/helpers.py | faliester/opentrons | e945d0f72fed39b0f68c0b30b7afd1981644184f | [
"Apache-2.0"
] | null | null | null | """ opentrons.calibration_storage.helpers: various miscellaneous
functions
This module has functions that you can import to save robot or
labware calibration to its designated file location.
"""
import json
from typing import Union, List, Dict, TYPE_CHECKING
from dataclasses import is_dataclass, asdict
from hashlib import sha256
from . import types as local_types
if TYPE_CHECKING:
from opentrons_shared_data.labware.dev_types import LabwareDefinition
DictionaryFactoryType = Union[List, Dict]
def dict_filter_none(data: DictionaryFactoryType) -> Dict:
"""
Helper function to filter out None keys from a dataclass
before saving to file.
"""
return dict(item for item in data if item[1] is not None)
def hash_labware_def(labware_def: 'LabwareDefinition') -> str:
"""
Helper function to take in a labware definition and return
a hashed string of key elemenets from the labware definition
to make it a unique identifier.
:param labware_def: Full labware definitino
:returns: sha256 string
"""
# remove keys that do not affect run
blocklist = ['metadata', 'brand', 'groups']
def_no_metadata = {
k: v for k, v in labware_def.items() if k not in blocklist}
sorted_def_str = json.dumps(
def_no_metadata, sort_keys=True, separators=(',', ':'))
return sha256(sorted_def_str.encode('utf-8')).hexdigest()
def details_from_uri(uri: str, delimiter='/') -> local_types.UriDetails:
"""
Unpack a labware URI to get the namespace, loadname and version
"""
if uri:
info = uri.split(delimiter)
return local_types.UriDetails(
namespace=info[0], load_name=info[1], version=int(info[2]))
else:
# Here we are assuming that the 'uri' passed in is actually
# the loadname, though sometimes it may be an empty string.
return local_types.UriDetails(
namespace='', load_name=uri, version=1)
def uri_from_details(namespace: str, load_name: str,
version: Union[str, int],
delimiter='/') -> str:
""" Build a labware URI from its details.
A labware URI is a string that uniquely specifies a labware definition.
:returns str: The URI.
"""
return f'{namespace}{delimiter}{load_name}{delimiter}{version}'
def uri_from_definition(definition: 'LabwareDefinition', delimiter='/') -> str:
""" Build a labware URI from its definition.
A labware URI is a string that uniquely specifies a labware definition.
:returns str: The URI.
"""
return uri_from_details(definition['namespace'],
definition['parameters']['loadName'],
definition['version'])
| 32.663158 | 79 | 0.684821 |
55fe802b2df8f3e2a5853155117ec23bac4176ca | 3,264 | py | Python | scripts/OpenRobotPyxl.py | coder-cell/robotframework-openpyxl | abc839755a1e8c0208065e9c9568d7df732a6792 | [
"MIT"
] | null | null | null | scripts/OpenRobotPyxl.py | coder-cell/robotframework-openpyxl | abc839755a1e8c0208065e9c9568d7df732a6792 | [
"MIT"
] | null | null | null | scripts/OpenRobotPyxl.py | coder-cell/robotframework-openpyxl | abc839755a1e8c0208065e9c9568d7df732a6792 | [
"MIT"
] | null | null | null | import openpyxl
from robot.api.deco import keyword, library
from robot.api import logger
| 32.969697 | 75 | 0.636336 |
55fec657248ea9359324a70a7e7e0fc53b322616 | 1,852 | py | Python | club/urls.py | NSYT0607/DONGKEY | 83f926f22a10a28895c9ad71038c9a27d200e231 | [
"MIT"
] | 1 | 2018-04-10T11:47:16.000Z | 2018-04-10T11:47:16.000Z | club/urls.py | NSYT0607/DONGKEY | 83f926f22a10a28895c9ad71038c9a27d200e231 | [
"MIT"
] | null | null | null | club/urls.py | NSYT0607/DONGKEY | 83f926f22a10a28895c9ad71038c9a27d200e231 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
app_name = 'club'
urlpatterns = [
path('create/', views.create_club, name='create_club'),
path('update/<int:club_pk>', views.update_club, name='update_club'),
path('read_admin_club/<str:club>/<int:ctg_pk>/', views.read_admin_club, name='read_admin_club_ctg'),
path('<int:pk>/', views.ClubView.as_view(), name='club_view'),
path('read_admin_club/<str:club>/', views.read_admin_club, name='read_admin_club'),
path('read_non_admin_club/<str:club>/<int:ctg_pk>/', views.read_non_admin_club, name='read_non_admin_club_ctg'),
path('read_non_admin_club/<str:club>/', views.read_non_admin_club, name='read_non_admin_club'),
path('apply/<str:club>/', views.apply_club, name='apply_club'),
path('admit/<int:club>/<int:pk>/', views.admit, name='admit'),
path('update_is_admin/<int:club_pk>/<int:user_pk>/', views.update_is_admin, name='update_is_admin'),
path('manage/<int:club_pk>/', views.manage_member, name='manage_member'),
path('member_list/<int:club_pk>/non_admin', views.member_list_for_non_admin,
name='member_list_for_non_admin'),
path('create/club/rule/<str:club>/', views.create_club_rule, name='create_club_rule'),
path('read/admin_club/apply_list/<str:club>/', views.read_apply_list, name='read_apply_list'),
path('read/admin_club/rule/<str:club>/', views.read_admin_club_rule, name='read_admin_club_rule'),
path('read/non_admin_club/rule/<str:club>/', views.read_non_admin_club_rule, name='read_non_admin_club_rule'),
path('update/club/rule/<str:club>/<int:rule_pk>/', views.update_club_rule, name='update_club_rule'),
path('delete/club/rule/<str:club>/<int:rule_pk>/', views.delete_club_rule, name='delete_club_rule'),
path('exit_club/<int:club_pk>/<int:user_pk>/', views.exit_club, name='exit_club'),
]
| 52.914286 | 116 | 0.721922 |
55feec79a1027ecfba7881baf9cccd2719790498 | 1,270 | py | Python | interview_kickstart/01_sorting_algorithms/class_discussed_problems/python/0215_kth_largest_element_in_an_array.py | mrinalini-m/data_structures_and_algorithms | f9bebcca8002064e26ba5b46e47b8abedac39c3e | [
"MIT"
] | 2 | 2020-12-18T21:42:05.000Z | 2020-12-21T06:07:33.000Z | interview_kickstart/01_sorting_algorithms/class_discussed_problems/python/0215_kth_largest_element_in_an_array.py | mrinalini-m/data_structures_and_algorithms | f9bebcca8002064e26ba5b46e47b8abedac39c3e | [
"MIT"
] | null | null | null | interview_kickstart/01_sorting_algorithms/class_discussed_problems/python/0215_kth_largest_element_in_an_array.py | mrinalini-m/data_structures_and_algorithms | f9bebcca8002064e26ba5b46e47b8abedac39c3e | [
"MIT"
] | 2 | 2020-07-04T20:30:19.000Z | 2021-08-31T08:32:36.000Z | from random import randint
from typing import List
print(Solution().findKthLargest([4, 1, 2, 11], 2))
| 27.021277 | 90 | 0.574803 |
55ff7e57e726077e74bb90a288c442b6922782cb | 3,033 | py | Python | termpixels/util.py | loganzartman/termpixels | 4353cc0eb9f6947cd5bb8286322a8afea597d741 | [
"MIT"
] | 17 | 2019-04-11T20:05:13.000Z | 2022-03-08T22:26:44.000Z | termpixels/util.py | loganzartman/termpixels | 4353cc0eb9f6947cd5bb8286322a8afea597d741 | [
"MIT"
] | 14 | 2019-05-16T19:26:58.000Z | 2020-10-27T09:35:02.000Z | termpixels/util.py | loganzartman/termpixels | 4353cc0eb9f6947cd5bb8286322a8afea597d741 | [
"MIT"
] | 1 | 2020-12-09T16:39:44.000Z | 2020-12-09T16:39:44.000Z | from unicodedata import east_asian_width, category
from functools import lru_cache
import re
def corners_to_box(x0, y0, x1, y1):
"""convert two corners (x0, y0, x1, y1) to (x, y, width, height)"""
x0, x1 = min(x0, x1), max(x0, x1)
y0, y1 = min(y0, y1), max(y0, y1)
return x0, y0, x1 - x0 + 1, y1 - y0 + 1
# not sure how to determine how ambiguous characters will be rendered
_ambiguous_is_wide = False
def set_ambiguous_is_wide(is_wide):
""" set whether ambiguous characters are considered to be wide """
global _ambiguous_is_wide
if _ambiguous_is_wide != is_wide:
_ambiguous_is_wide = is_wide
terminal_char_len.cache_clear()
def terminal_len(s):
""" return the width of a string in terminal cells """
return sum(map(terminal_char_len, s))
def terminal_printable(ch):
""" determine if a character is "printable" """
return not category(ch).startswith("C")
_newline_regex = re.compile(r"\r\n|\r|\n")
def splitlines_print(s):
""" like str.splitlines() but keeps all empty lines """
return _newline_regex.split(s)
def wrap_text(text, line_len, *, tab_size=4, word_sep=re.compile(r"\s+|\W"),
break_word=False, hyphen="", newline="\n"):
""" returns a terminal-line-wrapped version of text """
text = text.replace("\t", " " * tab_size)
hl = terminal_len(hyphen)
buf = []
i = 0
col = 0
while i < len(text):
match = word_sep.search(text, i)
word = text[i:]
sep = ""
if match:
word = text[i:match.start()]
sep = match.group(0)
i = match.end()
else:
i = len(text)
# handle wrappable/breakable words
wl = terminal_len(word)
while col + wl > line_len:
if break_word and col < line_len - hl or col == 0:
while col + terminal_char_len(word[0]) <= line_len - hl:
buf.append(word[0])
col += terminal_char_len(word[0])
word = word[1:]
buf.append(hyphen)
buf.append(newline)
col = 0
wl = terminal_len(word)
buf.append(word)
col += wl
# handle truncatable separators
sl = terminal_len(sep)
if col + sl > line_len:
while col + terminal_char_len(sep[0]) <= line_len:
buf.append(sep[0])
col += terminal_char_len(sep[0])
sep = sep[1:]
buf.append(newline)
col = 0
else:
buf.append(sep)
col += sl
return "".join(buf)
| 32.265957 | 76 | 0.574019 |
55ffa154fe658f0af46cbd92f080b7eac5967357 | 303 | py | Python | json.py | AbhijithGanesh/Flask-HTTP-Server | 78f6c6985e6ffd9f4f70738771d6fcdb802964cc | [
"BSD-3-Clause"
] | null | null | null | json.py | AbhijithGanesh/Flask-HTTP-Server | 78f6c6985e6ffd9f4f70738771d6fcdb802964cc | [
"BSD-3-Clause"
] | null | null | null | json.py | AbhijithGanesh/Flask-HTTP-Server | 78f6c6985e6ffd9f4f70738771d6fcdb802964cc | [
"BSD-3-Clause"
] | null | null | null | import json
'''
READ THE DATABASE README before operating
'''
File = r'''YOUR FILE'''
with open(File,'a') as fileObj:
data = json.load()
'''
YOUR DATA LOGIC GOES IN HERE
Once the data is changed, to write it to your JSON file use the following command.
'''
json.dump(object,File) | 25.25 | 86 | 0.656766 |
3600f4551fc329b671400ff96e43cfab6f75ddb4 | 3,128 | py | Python | slash/hooks.py | omergertel/slash | 7dd5710a05822bbbaadc6c6517cefcbaa6397eab | [
"BSD-3-Clause"
] | null | null | null | slash/hooks.py | omergertel/slash | 7dd5710a05822bbbaadc6c6517cefcbaa6397eab | [
"BSD-3-Clause"
] | null | null | null | slash/hooks.py | omergertel/slash | 7dd5710a05822bbbaadc6c6517cefcbaa6397eab | [
"BSD-3-Clause"
] | null | null | null | import gossip
from .conf import config
from .utils.deprecation import deprecated
_define('session_start', doc="Called right after session starts")
_define('session_end', doc="Called right before the session ends, regardless of the reason for termination")
_define('after_session_start', doc="Second entry point for session start, useful for plugins relying on other plugins' session_start routine")
_define('test_interrupt', doc="Called when a test is interrupted by a KeyboardInterrupt or other similar means")
_define('test_start', doc="Called right after a test starts")
_define('test_end', doc="Called right before a test ends, regardless of the reason for termination")
_define('test_success', doc="Called on test success")
_define('test_error', doc="Called on test error")
_define('test_failure', doc="Called on test failure")
_define('test_skip', doc="Called on test skip", arg_names=("reason",))
_define('result_summary', doc="Called at the end of the execution, when printing results")
_define('exception_caught_before_debugger',
doc="Called whenever an exception is caught, but a debugger hasn't been entered yet")
_define('exception_caught_after_debugger',
doc="Called whenever an exception is caught, and a debugger has already been run")
_slash_group = gossip.get_group('slash')
_slash_group.set_strict()
_slash_group.set_exception_policy(gossip.RaiseDefer())
| 34.373626 | 142 | 0.741368 |
36011f50763e2763762534e112d2a7cea6f3af2e | 65 | py | Python | experiments/archived/20210203/bag_model/models/__init__.py | fxnnxc/text_summarization | b8c8a5f491bc44622203602941c1514b2e006fe3 | [
"Apache-2.0"
] | 5 | 2020-10-14T02:30:44.000Z | 2021-05-06T12:48:28.000Z | experiments/archived/20210119/bag_model/models/__init__.py | fxnnxc/text_summarization | b8c8a5f491bc44622203602941c1514b2e006fe3 | [
"Apache-2.0"
] | 2 | 2020-12-19T05:59:31.000Z | 2020-12-22T11:05:31.000Z | experiments/archived/20210203/bag_model/models/__init__.py | fxnnxc/text_summarization | b8c8a5f491bc44622203602941c1514b2e006fe3 | [
"Apache-2.0"
] | null | null | null | from .hub_interface import * # noqa
from .model import * # noqa | 32.5 | 36 | 0.707692 |
360246393544aa24389fdcd4c6b8786fa1b242b5 | 232 | py | Python | src/CodeLearn/plaintextCode/BloomTech/BTU5W1/U5W1P2_Task3_w1.py | MingjunGeng/Code-Knowledge | 5b376f6b3ff9e7fa0ab41c7b57e3a80313fa0daa | [
"MIT"
] | null | null | null | src/CodeLearn/plaintextCode/BloomTech/BTU5W1/U5W1P2_Task3_w1.py | MingjunGeng/Code-Knowledge | 5b376f6b3ff9e7fa0ab41c7b57e3a80313fa0daa | [
"MIT"
] | null | null | null | src/CodeLearn/plaintextCode/BloomTech/BTU5W1/U5W1P2_Task3_w1.py | MingjunGeng/Code-Knowledge | 5b376f6b3ff9e7fa0ab41c7b57e3a80313fa0daa | [
"MIT"
] | 1 | 2022-03-18T04:52:10.000Z | 2022-03-18T04:52:10.000Z | #!/usr/bin/python3
# --- 001 > U5W2P1_Task3_w1
if __name__ == "__main__":
print('----------start------------')
i = 12
print(solution( i ))
print('------------end------------')
| 19.333333 | 40 | 0.465517 |
3603655d64ea26fd4eb5614d884927de08638bdc | 30,296 | py | Python | plugins/modules/oci_sch_service_connector.py | A7rMtWE57x/oci-ansible-collection | 80548243a085cd53fd5dddaa8135b5cb43612c66 | [
"Apache-2.0"
] | null | null | null | plugins/modules/oci_sch_service_connector.py | A7rMtWE57x/oci-ansible-collection | 80548243a085cd53fd5dddaa8135b5cb43612c66 | [
"Apache-2.0"
] | null | null | null | plugins/modules/oci_sch_service_connector.py | A7rMtWE57x/oci-ansible-collection | 80548243a085cd53fd5dddaa8135b5cb43612c66 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2017, 2020 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_sch_service_connector
short_description: Manage a ServiceConnector resource in Oracle Cloud Infrastructure
description:
- This module allows the user to create, update and delete a ServiceConnector resource in Oracle Cloud Infrastructure
- For I(state=present), creates a new service connector in the specified compartment.
A service connector is a logically defined flow for moving data from
a source service to a destination service in Oracle Cloud Infrastructure.
For general information about service connectors, see
L(Service Connector Hub Overview,https://docs.cloud.oracle.com/iaas/service-connector-hub/using/index.htm).
- For purposes of access control, you must provide the
L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment where
you want the service connector to reside. Notice that the service connector
doesn't have to be in the same compartment as the source or target services.
For information about access control and compartments, see
L(Overview of the IAM Service,https://docs.cloud.oracle.com/iaas/Content/Identity/Concepts/overview.htm).
- After you send your request, the new service connector's state is temporarily
CREATING. When the state changes to ACTIVE, data begins transferring from the
source service to the target service. For instructions on deactivating and
activating service connectors, see
L(To activate or deactivate a service connector,https://docs.cloud.oracle.com/iaas/service-connector-hub/using/index.htm).
- "This resource has the following action operations in the M(oci_service_connector_actions) module: activate, deactivate."
version_added: "2.9"
author: Oracle (@oracle)
options:
display_name:
description:
- A user-friendly name. It does not have to be unique, and it is changeable.
Avoid entering confidential information.
- Required for create using I(state=present).
- Required for update, delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
- This parameter is updatable when C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
type: str
aliases: ["name"]
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the
comparment to create the service connector in.
- Required for create using I(state=present).
- Required for update when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
- Required for delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
type: str
description:
description:
- The description of the resource. Avoid entering confidential information.
- This parameter is updatable.
type: str
source:
description:
- ""
- Required for create using I(state=present).
- This parameter is updatable.
type: dict
suboptions:
kind:
description:
- The type descriminator.
type: str
choices:
- "logging"
required: true
log_sources:
description:
- The resources affected by this work request.
type: list
required: true
suboptions:
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment containing the log
source.
type: str
required: true
log_group_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the log group.
type: str
log_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the log.
type: str
tasks:
description:
- The list of tasks.
- This parameter is updatable.
type: list
suboptions:
kind:
description:
- The type descriminator.
type: str
choices:
- "logRule"
required: true
condition:
description:
- A filter or mask to limit the source used in the flow defined by the service connector.
type: str
required: true
target:
description:
- ""
- Required for create using I(state=present).
- This parameter is updatable.
type: dict
suboptions:
kind:
description:
- The type descriminator.
type: str
choices:
- "notifications"
- "objectStorage"
- "monitoring"
- "functions"
- "streaming"
required: true
topic_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the topic.
- Required when kind is 'notifications'
type: str
namespace:
description:
- The namespace.
- Applicable when kind is 'objectStorage'
type: str
bucket_name:
description:
- The name of the bucket. Avoid entering confidential information.
- Required when kind is 'objectStorage'
type: str
object_name_prefix:
description:
- The prefix of the objects. Avoid entering confidential information.
- Applicable when kind is 'objectStorage'
type: str
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment containing the metric.
- Required when kind is 'monitoring'
type: str
metric_namespace:
description:
- The namespace of the metric.
- "Example: `oci_computeagent`"
- Required when kind is 'monitoring'
type: str
metric:
description:
- The name of the metric.
- "Example: `CpuUtilization`"
- Required when kind is 'monitoring'
type: str
function_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the function.
- Required when kind is 'functions'
type: str
stream_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the stream.
- Required when kind is 'streaming'
type: str
freeform_tags:
description:
- "Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\\"bar-key\\": \\"value\\"}`"
- This parameter is updatable.
type: dict
defined_tags:
description:
- "Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\\"foo-namespace\\": {\\"bar-key\\": \\"value\\"}}`"
- This parameter is updatable.
type: dict
service_connector_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the service connector.
- Required for update using I(state=present) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
- Required for delete using I(state=absent) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
type: str
aliases: ["id"]
state:
description:
- The state of the ServiceConnector.
- Use I(state=present) to create or update a ServiceConnector.
- Use I(state=absent) to delete a ServiceConnector.
type: str
required: false
default: 'present'
choices: ["present", "absent"]
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_creatable_resource, oracle.oci.oracle_wait_options ]
"""
EXAMPLES = """
- name: Create service_connector
oci_sch_service_connector:
display_name: display_name_example
compartment_id: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
source:
kind: logging
log_sources:
- compartment_id: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
target:
kind: notifications
- name: Update service_connector using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_sch_service_connector:
display_name: display_name_example
compartment_id: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
description: description_example
source:
kind: logging
log_sources:
- compartment_id: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
tasks:
- kind: logRule
condition: condition_example
target:
kind: notifications
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
- name: Update service_connector
oci_sch_service_connector:
display_name: display_name_example
description: description_example
service_connector_id: ocid1.serviceconnector.oc1..xxxxxxEXAMPLExxxxxx
- name: Delete service_connector
oci_sch_service_connector:
service_connector_id: ocid1.serviceconnector.oc1..xxxxxxEXAMPLExxxxxx
state: absent
- name: Delete service_connector using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_sch_service_connector:
display_name: display_name_example
compartment_id: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
state: absent
"""
RETURN = """
service_connector:
description:
- Details of the ServiceConnector resource acted upon by the current operation
returned: on success
type: complex
contains:
id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the service connector.
returned: on success
type: string
sample: ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx
display_name:
description:
- A user-friendly name. It does not have to be unique, and it is changeable.
Avoid entering confidential information.
returned: on success
type: string
sample: display_name_example
description:
description:
- The description of the resource. Avoid entering confidential information.
returned: on success
type: string
sample: description_example
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment containing the service connector.
returned: on success
type: string
sample: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
time_created:
description:
- "The date and time when the service connector was created.
Format is defined by L(RFC3339,https://tools.ietf.org/html/rfc3339).
Example: `2020-01-25T21:10:29.600Z`"
returned: on success
type: string
sample: 2020-01-25T21:10:29.600Z
time_updated:
description:
- "The date and time when the service connector was updated.
Format is defined by L(RFC3339,https://tools.ietf.org/html/rfc3339).
Example: `2020-01-25T21:10:29.600Z`"
returned: on success
type: string
sample: 2020-01-25T21:10:29.600Z
lifecycle_state:
description:
- The current state of the service connector.
returned: on success
type: string
sample: CREATING
lifecyle_details:
description:
- A message describing the current state in more detail.
For example, the message might provide actionable
information for a resource in a `FAILED` state.
returned: on success
type: string
sample: lifecyle_details_example
source:
description:
- ""
returned: on success
type: complex
contains:
kind:
description:
- The type descriminator.
returned: on success
type: string
sample: logging
log_sources:
description:
- The resources affected by this work request.
returned: on success
type: complex
contains:
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment containing the log
source.
returned: on success
type: string
sample: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
log_group_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the log group.
returned: on success
type: string
sample: ocid1.loggroup.oc1..xxxxxxEXAMPLExxxxxx
log_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the log.
returned: on success
type: string
sample: ocid1.log.oc1..xxxxxxEXAMPLExxxxxx
tasks:
description:
- The list of tasks.
returned: on success
type: complex
contains:
kind:
description:
- The type descriminator.
returned: on success
type: string
sample: logRule
condition:
description:
- A filter or mask to limit the source used in the flow defined by the service connector.
returned: on success
type: string
sample: condition_example
target:
description:
- ""
returned: on success
type: complex
contains:
kind:
description:
- The type descriminator.
returned: on success
type: string
sample: notifications
topic_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the topic.
returned: on success
type: string
sample: ocid1.topic.oc1..xxxxxxEXAMPLExxxxxx
namespace:
description:
- The namespace.
returned: on success
type: string
sample: namespace_example
bucket_name:
description:
- The name of the bucket. Avoid entering confidential information.
returned: on success
type: string
sample: bucket_name_example
object_name_prefix:
description:
- The prefix of the objects. Avoid entering confidential information.
returned: on success
type: string
sample: object_name_prefix_example
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment containing the metric.
returned: on success
type: string
sample: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
metric_namespace:
description:
- The namespace of the metric.
- "Example: `oci_computeagent`"
returned: on success
type: string
sample: oci_computeagent
metric:
description:
- The name of the metric.
- "Example: `CpuUtilization`"
returned: on success
type: string
sample: CpuUtilization
function_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the function.
returned: on success
type: string
sample: ocid1.function.oc1..xxxxxxEXAMPLExxxxxx
stream_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the stream.
returned: on success
type: string
sample: ocid1.stream.oc1..xxxxxxEXAMPLExxxxxx
freeform_tags:
description:
- "Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\\"bar-key\\": \\"value\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
defined_tags:
description:
- "Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\\"foo-namespace\\": {\\"bar-key\\": \\"value\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
system_tags:
description:
- "The system tags associated with this resource, if any. The system tags are set by Oracle Cloud Infrastructure services. Each key is
predefined and scoped to namespaces.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
Example: `{orcl-cloud: {free-tier-retain: true}}`"
returned: on success
type: dict
sample: {}
sample: {
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"display_name": "display_name_example",
"description": "description_example",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"time_created": "2020-01-25T21:10:29.600Z",
"time_updated": "2020-01-25T21:10:29.600Z",
"lifecycle_state": "CREATING",
"lifecyle_details": "lifecyle_details_example",
"source": {
"kind": "logging",
"log_sources": [{
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"log_group_id": "ocid1.loggroup.oc1..xxxxxxEXAMPLExxxxxx",
"log_id": "ocid1.log.oc1..xxxxxxEXAMPLExxxxxx"
}]
},
"tasks": [{
"kind": "logRule",
"condition": "condition_example"
}],
"target": {
"kind": "notifications",
"topic_id": "ocid1.topic.oc1..xxxxxxEXAMPLExxxxxx",
"namespace": "namespace_example",
"bucket_name": "bucket_name_example",
"object_name_prefix": "object_name_prefix_example",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"metric_namespace": "oci_computeagent",
"metric": "CpuUtilization",
"function_id": "ocid1.function.oc1..xxxxxxEXAMPLExxxxxx",
"stream_id": "ocid1.stream.oc1..xxxxxxEXAMPLExxxxxx"
},
"freeform_tags": {'Department': 'Finance'},
"defined_tags": {'Operations': {'CostCenter': 'US'}},
"system_tags": {}
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceHelperBase,
get_custom_class,
)
try:
from oci.sch import ServiceConnectorClient
from oci.sch.models import CreateServiceConnectorDetails
from oci.sch.models import UpdateServiceConnectorDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
ServiceConnectorHelperCustom = get_custom_class("ServiceConnectorHelperCustom")
if __name__ == "__main__":
main()
| 41.219048 | 159 | 0.568887 |
360379edca40aaeb8a9f20994bc3b04375f6c37f | 210 | py | Python | Kattis/fallingapart.py | ruidazeng/online-judge | 6bdf8bbf1af885637dab474d0ccb58aff22a0933 | [
"MIT"
] | null | null | null | Kattis/fallingapart.py | ruidazeng/online-judge | 6bdf8bbf1af885637dab474d0ccb58aff22a0933 | [
"MIT"
] | null | null | null | Kattis/fallingapart.py | ruidazeng/online-judge | 6bdf8bbf1af885637dab474d0ccb58aff22a0933 | [
"MIT"
] | 1 | 2020-06-22T21:07:24.000Z | 2020-06-22T21:07:24.000Z | n = int(input())
intz = [int(x) for x in input().split()]
alice = 0
bob = 0
for i, num in zip(range(n), sorted(intz)[::-1]):
if i%2 == 0:
alice += num
else:
bob += num
print(alice, bob) | 21 | 48 | 0.514286 |
3604769fe194e0541eba00a227334b835b8009c4 | 3,515 | py | Python | ffnn/rbf.py | RaoulMa/NeuralNets | f49072ac88686f753f9b5815d6cc5e71d536c3d2 | [
"MIT"
] | 1 | 2017-12-03T11:06:33.000Z | 2017-12-03T11:06:33.000Z | ffnn/rbf.py | RaoulMa/BasicNeuralNets | f49072ac88686f753f9b5815d6cc5e71d536c3d2 | [
"MIT"
] | null | null | null | ffnn/rbf.py | RaoulMa/BasicNeuralNets | f49072ac88686f753f9b5815d6cc5e71d536c3d2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Description: Choose a set of data points as weights and calculate RBF nodes for the
first layer. Those are then used as inputs for a one-layer perceptron, which gives the
output
"""
import numpy as np
import pcn
| 30.301724 | 132 | 0.588905 |
36056f0439b548a97fafa104e15d32abf2f73d7b | 836 | py | Python | Bot/config.py | faelbreseghello/Monsters-Bot | 9432cf05451ff36c3282a2d6873577e94239e724 | [
"MIT"
] | 7 | 2020-07-13T22:31:00.000Z | 2021-01-11T20:17:41.000Z | Bot/config.py | faelbreseghello/Monsters-Bot | 9432cf05451ff36c3282a2d6873577e94239e724 | [
"MIT"
] | 1 | 2020-08-19T18:58:07.000Z | 2020-08-19T18:58:07.000Z | Bot/config.py | faelbreseghello/Monsters-Bot | 9432cf05451ff36c3282a2d6873577e94239e724 | [
"MIT"
] | 1 | 2021-01-11T21:36:08.000Z | 2021-01-11T21:36:08.000Z | import datetime
import os
# General
Token = open('../Token.txt', 'r') # The token of the bot
Token = Token.read()
prefix = '*' # the command prefix
lang = 'en-us' # 'en-us' or 'pt-br'
memes = os.listdir('../Assets/monsters_memes') # memes db load
banchannel = None # the channel that will be used to ban messages
# Minigame setup
gamechannel = None # You can set here or with the command "*setup"
gameinterval = 3600 #interval between the sessions #TEMP VALUE
winnerPoints = 3 # points for who win the minigame
valid = False
end_day = 30 # The day of the end off the minigame - will verify at the start time
# log file path
logpath = '../logs'
# Language import
if lang == 'en-us':
from en_us import *
elif lang == 'pt-br':
from pt_br import *
else:
raise Exception(f'There are no lang option called {lang}')
| 26.967742 | 82 | 0.685407 |
3605823cc24094c58501be0321e78ef090f4367d | 11,294 | py | Python | postscripts/_city_transformer_postscripts.py | yasahi-hpc/CityTransformer | b285525d860b4cd522a30823351ecd3cb74dcdf3 | [
"MIT"
] | null | null | null | postscripts/_city_transformer_postscripts.py | yasahi-hpc/CityTransformer | b285525d860b4cd522a30823351ecd3cb74dcdf3 | [
"MIT"
] | null | null | null | postscripts/_city_transformer_postscripts.py | yasahi-hpc/CityTransformer | b285525d860b4cd522a30823351ecd3cb74dcdf3 | [
"MIT"
] | null | null | null | """
Convert data and then visualize
Data Manupulation
1. Save metrics for validation and test data
Save figures
1. Loss curve
2. plume dispersion and errors
3. metrics
"""
import pathlib
import numpy as np
import xarray as xr
from numpy import ma
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.style
from matplotlib.colors import LogNorm
from ._base_postscript import _BasePostscripts
from .metrics import get_metric
| 43.775194 | 190 | 0.573579 |
3606767125c21d0e6b93352716d5f01b3c40e053 | 664 | py | Python | OrangeInstaller/OrangeInstaller/Testing.py | mcolombo87/OrangeInstaller | 31486ed532409f08d3b22cd7fdb05f209e3fc3e8 | [
"Apache-2.0"
] | 3 | 2017-04-08T13:52:22.000Z | 2018-10-31T20:17:20.000Z | OrangeInstaller/OrangeInstaller/Testing.py | mcolombo87/OrangeInstaller | 31486ed532409f08d3b22cd7fdb05f209e3fc3e8 | [
"Apache-2.0"
] | 46 | 2017-03-16T10:20:11.000Z | 2018-11-16T15:54:38.000Z | OrangeInstaller/OrangeInstaller/Testing.py | mcolombo87/OrangeInstaller | 31486ed532409f08d3b22cd7fdb05f209e3fc3e8 | [
"Apache-2.0"
] | 1 | 2018-08-12T01:10:41.000Z | 2018-08-12T01:10:41.000Z | from Functions import functions, systemTools
import unittest
import sys
if __name__ == '__main__':
unittest.main() | 31.619048 | 125 | 0.641566 |
36067e37b136228914619d3370100e13fb6c3ddf | 61,464 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/cisco/iosxr/plugins/module_utils/network/iosxr/argspec/bgp_global/bgp_global.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 1 | 2020-01-22T13:11:23.000Z | 2020-01-22T13:11:23.000Z | venv/lib/python3.6/site-packages/ansible_collections/cisco/iosxr/plugins/module_utils/network/iosxr/argspec/bgp_global/bgp_global.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/cisco/iosxr/plugins/module_utils/network/iosxr/argspec/bgp_global/bgp_global.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2021 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the
# cli_rm_builder.
#
# Manually editing this file is not advised.
#
# To update the argspec make the desired changes
# in the module docstring and re-run
# cli_rm_builder.
#
#############################################
"""
The arg spec for the iosxr_bgp_global module
"""
| 49.647819 | 82 | 0.149974 |
36081a586f2b7afca6efc6de5e1d5480c80b61dc | 7,039 | py | Python | quince/ui/components/game_frame.py | DnrkasEFF/quince | 89b5699a63642fd1ed172b566670b4dd8a2f8e18 | [
"MIT"
] | null | null | null | quince/ui/components/game_frame.py | DnrkasEFF/quince | 89b5699a63642fd1ed172b566670b4dd8a2f8e18 | [
"MIT"
] | null | null | null | quince/ui/components/game_frame.py | DnrkasEFF/quince | 89b5699a63642fd1ed172b566670b4dd8a2f8e18 | [
"MIT"
] | null | null | null | """
The primary frame containing the content for the entire game
"""
import tkinter as tk
import random as random
from quince.utility import is_valid_pickup
from quince.ronda import Ronda
from quince.ui.components.opponents.opponent_frame \
import OpponentFrameHorizontal, OpponentFrameVertical
from quince.ui.components.table.table import Table
from quince.ui.components.player.player_frame import PlayerFrame
| 37.844086 | 79 | 0.574513 |
360825b11a2ba8661131f351d015f5a8ff5ce829 | 263 | py | Python | Python_Projects/numeric/lossofsignificance.py | arifBurakDemiray/TheCodesThatIWrote | 17d7bc81c516ec97110d0749e9c19d5e6ef9fc88 | [
"MIT"
] | 1 | 2019-11-01T20:18:06.000Z | 2019-11-01T20:18:06.000Z | Python_Projects/numeric/lossofsignificance.py | arifBurakDemiray/TheCodesThatIWrote | 17d7bc81c516ec97110d0749e9c19d5e6ef9fc88 | [
"MIT"
] | null | null | null | Python_Projects/numeric/lossofsignificance.py | arifBurakDemiray/TheCodesThatIWrote | 17d7bc81c516ec97110d0749e9c19d5e6ef9fc88 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 13 13:35:33 2020
"""
#for finding loss of significances
x=1e-1
flag = True
a=0
while (flag):
print (((2*x)/(1-(x**2))),"......",(1/(1+x))-(1/(1-x)))
x= x*(1e-1)
a=a+1
if(a==25):
flag=False
| 14.611111 | 59 | 0.48289 |
360a23f4d6f5c86eb8c653834fc1cf467b915bfa | 6,479 | py | Python | alphamind/model/treemodel.py | atefar2/alpha-mind | 66d839affb5d81d31d5cac7e5e224278e3f99a8b | [
"MIT"
] | 1 | 2020-05-18T20:57:25.000Z | 2020-05-18T20:57:25.000Z | alphamind/model/treemodel.py | atefar2/alpha-mind | 66d839affb5d81d31d5cac7e5e224278e3f99a8b | [
"MIT"
] | null | null | null | alphamind/model/treemodel.py | atefar2/alpha-mind | 66d839affb5d81d31d5cac7e5e224278e3f99a8b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on 2017-12-4
@author: cheng.li
"""
import arrow
import numpy as np
import pandas as pd
import xgboost as xgb
from sklearn.ensemble import RandomForestClassifier as RandomForestClassifierImpl
from sklearn.ensemble import RandomForestRegressor as RandomForestRegressorImpl
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier as XGBClassifierImpl
from xgboost import XGBRegressor as XGBRegressorImpl
from alphamind.model.modelbase import create_model_base
| 36.60452 | 91 | 0.515666 |
360b21f79c3d1e633d2504158f0ac62516a639e7 | 666 | py | Python | bot/welcome_leave.py | Thorappan7/loki | 26bed530997907c93914d6ac42f4a2ad62dc365c | [
"BSD-3-Clause"
] | null | null | null | bot/welcome_leave.py | Thorappan7/loki | 26bed530997907c93914d6ac42f4a2ad62dc365c | [
"BSD-3-Clause"
] | null | null | null | bot/welcome_leave.py | Thorappan7/loki | 26bed530997907c93914d6ac42f4a2ad62dc365c | [
"BSD-3-Clause"
] | null | null | null | from pyrogram import Client as bot, filters, emoji
MENTION = "[{}](tg://user?id={})"
text1="hi{} {} welcome to Group Chat"
group ="jangobotz"
| 33.3 | 140 | 0.683183 |
360b7ea47f3ce200b5ccf6c834ad2ed52c42e4f9 | 2,079 | py | Python | script.deluge/resources/lib/basictypes/xmlgenerator.py | ogero/Deluge-Manager-XBMC | 10c4f2a93ac1fffba01209444ba5e597036b968b | [
"MIT"
] | null | null | null | script.deluge/resources/lib/basictypes/xmlgenerator.py | ogero/Deluge-Manager-XBMC | 10c4f2a93ac1fffba01209444ba5e597036b968b | [
"MIT"
] | null | null | null | script.deluge/resources/lib/basictypes/xmlgenerator.py | ogero/Deluge-Manager-XBMC | 10c4f2a93ac1fffba01209444ba5e597036b968b | [
"MIT"
] | null | null | null | import locale
from xml.sax import saxutils
defaultEncoding = locale.getdefaultlocale()[-1]
| 32.484375 | 82 | 0.619529 |
360ce588463dab38c7d8f02e3de4947c05f44448 | 4,877 | py | Python | scrape.py | darenr/contemporary-art--rss-scraper | 92d66d18712e781e6e96980004a17f810568e652 | [
"MIT"
] | null | null | null | scrape.py | darenr/contemporary-art--rss-scraper | 92d66d18712e781e6e96980004a17f810568e652 | [
"MIT"
] | null | null | null | scrape.py | darenr/contemporary-art--rss-scraper | 92d66d18712e781e6e96980004a17f810568e652 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import json
import codecs
import traceback
import sys
import requests
import requests_cache
import feedparser
import collections
from bs4 import BeautifulSoup
from urlparse import urlparse, urljoin
one_day = 60 * 60 * 24
requests_cache.install_cache(
'rss_cache', backend='sqlite', expire_after=one_day)
headers = {
'User-Agent': 'Mozilla/5.0'
}
if __name__ == "__main__":
with codecs.open('sources.json', 'rb', 'utf-8') as f:
sources = json.loads(f.read().encode('utf-8'))
try:
ingest_rows = []
for feed in sources['feeds']:
ingest_rows += process_feed(feed)
print ' *', 'scraped %d records' % (len(ingest_rows))
except Exception, e:
traceback.print_exc()
print str(e)
| 29.029762 | 96 | 0.552184 |
360e9e36a16342872103b6bba5218132e5fe10ac | 3,102 | py | Python | src/main/admin_api/endpoint/table_endpoint.py | lemilliard/kibo-db | 2fa1832aa6a8457b428870491aaf64e399cca4d6 | [
"MIT"
] | null | null | null | src/main/admin_api/endpoint/table_endpoint.py | lemilliard/kibo-db | 2fa1832aa6a8457b428870491aaf64e399cca4d6 | [
"MIT"
] | null | null | null | src/main/admin_api/endpoint/table_endpoint.py | lemilliard/kibo-db | 2fa1832aa6a8457b428870491aaf64e399cca4d6 | [
"MIT"
] | null | null | null | from src.main.common.model import endpoint
| 40.815789 | 106 | 0.624758 |
360ffa9621191899023f1d394dd125777d985f49 | 10,326 | py | Python | tools/testbed_generator.py | vkolli/5.0_contrail-test | 1793f169a94100400a1b2fafbad21daf5aa4d48a | [
"Apache-2.0"
] | null | null | null | tools/testbed_generator.py | vkolli/5.0_contrail-test | 1793f169a94100400a1b2fafbad21daf5aa4d48a | [
"Apache-2.0"
] | 1 | 2021-06-01T22:18:29.000Z | 2021-06-01T22:18:29.000Z | tools/testbed_generator.py | lmadhusudhanan/contrail-test | bd39ff19da06a20bd79af8c25e3cde07375577cf | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import yaml
import json
import sys
import re
import argparse
from distutils.version import LooseVersion
from collections import defaultdict
discovery_port = '5998'
config_api_port = '8082'
analytics_api_port = '8081'
control_port = '8083'
dns_port = '8092'
agent_port = '8085'
if __name__ == "__main__":
main(sys.argv[1:])
| 43.56962 | 126 | 0.643521 |
36111dceb7e38307b2a633510d6f416394679b79 | 9,292 | py | Python | visualization/POF/data/Base2DReader.py | alvaro-budria/body2hands | 0eba438b4343604548120bdb03c7e1cb2b08bcd6 | [
"BSD-3-Clause"
] | 63 | 2021-05-14T02:55:16.000Z | 2022-03-13T01:51:12.000Z | visualization/POF/data/Base2DReader.py | human2b/body2hands | 8ab4b206dc397c3b326f2b4ec9448c84ee8801fe | [
"BSD-3-Clause"
] | 9 | 2021-06-24T09:59:41.000Z | 2021-12-31T08:15:20.000Z | visualization/POF/data/Base2DReader.py | human2b/body2hands | 8ab4b206dc397c3b326f2b4ec9448c84ee8801fe | [
"BSD-3-Clause"
] | 9 | 2021-05-17T03:33:28.000Z | 2022-02-17T02:30:44.000Z | import tensorflow as tf
from data.BaseReader import BaseReader
import numpy as np
| 57.006135 | 183 | 0.574365 |
361199dea80437ba6ce5df8eea417f22ea366fce | 301 | py | Python | api/indexer/tzprofiles_indexer/models.py | clehner/tzprofiles | e44497bccf28d2d75cfdfa0c417dbecc0f342c12 | [
"Apache-2.0"
] | null | null | null | api/indexer/tzprofiles_indexer/models.py | clehner/tzprofiles | e44497bccf28d2d75cfdfa0c417dbecc0f342c12 | [
"Apache-2.0"
] | null | null | null | api/indexer/tzprofiles_indexer/models.py | clehner/tzprofiles | e44497bccf28d2d75cfdfa0c417dbecc0f342c12 | [
"Apache-2.0"
] | null | null | null | from tortoise import Model, fields
| 23.153846 | 43 | 0.69103 |
3611a8921184c2a719ec2f7a6c28b90498243d94 | 6,006 | py | Python | pyscripts/Backups/wikipull.py | mrchaos10/AGRICULTURAL-DOMAIN-SPECIES-IDENTIFICATION-AND-SEMI-SUPERVISED-QUERYING-SYSTEM | 2697c806e4de565767efac276d58b3b3696e4893 | [
"MIT"
] | null | null | null | pyscripts/Backups/wikipull.py | mrchaos10/AGRICULTURAL-DOMAIN-SPECIES-IDENTIFICATION-AND-SEMI-SUPERVISED-QUERYING-SYSTEM | 2697c806e4de565767efac276d58b3b3696e4893 | [
"MIT"
] | null | null | null | pyscripts/Backups/wikipull.py | mrchaos10/AGRICULTURAL-DOMAIN-SPECIES-IDENTIFICATION-AND-SEMI-SUPERVISED-QUERYING-SYSTEM | 2697c806e4de565767efac276d58b3b3696e4893 | [
"MIT"
] | null | null | null | #api for extracting the results from wikidata
#https://www.wikidata.org/w/api.php?search=las&language=en&uselang=en&format=jsonfm&limit=25&action=wbsearchentities
# importing modules
import requests
from lxml import etree
import wikipedia
import sys
import re
import pickle
import numpy as np
import os
import sys
import pandas as pd
import seaborn as sns
import matplotlib as plt
from tensorflow import keras
from nltk.corpus import stopwords
from nltk.corpus import wordnet,words
# Ignore warnings
import warnings
warnings.filterwarnings('ignore')
SEARCHPAGE = str(sys.argv[1])
page=wikipedia.WikipediaPage(SEARCHPAGE)
content=page.content
content_list=content.split('.')
#for i in content_list:
# print(i)
pd.set_option('display.max_columns', None)
np.set_printoptions(threshold=sys.maxsize)
np.set_printoptions(precision=3)
sns.set(style="darkgrid")
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
max_words = 50
tokenize = keras.preprocessing.text.Tokenizer(num_words=max_words, char_level=False)
#defining the text fields from training set as train_text and similiarly test_text
train_text= pd.DataFrame({'words':content_list})
#print(train_text)
#print("################################### TRAINING DATASET DESCRIPTION ###############################################################")
#print(train_text.describe())
#remove unwanted from the questions
#query = 'What is Nahuatl word for tomato and how did Aztecs called tomato ?'
query=str(sys.argv[2])
stopperwords = ['what','where','when','who','which','whom','whose','why','how','?']
querywords = query.split()
resultwords = [word for word in querywords if word.lower() not in stopperwords]
result = ' '.join(resultwords)
result=result.replace('?','')
#print(result)
stop_words = set(stopwords.words('english'))
word_tokens = result.split(' ')
filtered_sentence = [w for w in word_tokens if not w in stop_words]
filtered_sentence = []
for w in word_tokens:
if w not in stop_words:
filtered_sentence.append(w)
result=filtered_sentence
#print(result)
syn_result=[]
ant_result=[]
for res in result:
synonyms = []
antonyms = []
for syn in wordnet.synsets(res):
for l in syn.lemmas():
synonyms.append(l.name())
if l.antonyms():
antonyms.append(l.antonyms()[0].name())
syn_result.append(synonyms)
ant_result.append(antonyms)
#print(syn_result)
simil=[]
jaccard_simil=[]
for ind in train_text.index:
sentence=str(train_text['words'][ind])
stop_words = set(stopwords.words('english'))
word_tokens = re.sub(r"[^a-zA-Z0-9]+", ' ', sentence).split(' ')
filtered_sentence = [w for w in word_tokens if not w in stop_words]
filtered_sentence = []
for w in word_tokens:
if w not in stop_words and len(w)>=3 :
#print(w)
filtered_sentence.append(w)
#print(filtered_sentence)
X_set = {w for w in filtered_sentence}
Y_set = {w for w in result}
if len(filtered_sentence)>=1:
sim=similiarity(X_set,Y_set)
simil.append(sim)
jaccard_simil.append(jaccard_similarity(X_set,Y_set))
else:
simil.append(0)
jaccard_simil.append(0)
#str1=" ";str2=" "
#QA=[str1.join(filtered_sentence),str2.join(result)]
#print(QA)
#google_encoder_similiarity(QA)
#cosine similiarity of question with each sentence is found
#print(simil)
result_text= pd.DataFrame({'sentence':content_list,'cosine_similiarity':simil,'jaccard_similiarity':jaccard_simil})
#print(result_text)
result_text.to_csv('simils.csv')
#for visualization purposes
result_text.plot(x='sentence', y='cosine_similiarity')
result_text.plot(x='sentence', y='jaccard_similiarity')
max=result_text.max()
max_cos=max.cosine_similiarity
max_jac=max.jaccard_similiarity
filter1 = result_text['cosine_similiarity']==max_cos
filter2 = result_text['jaccard_similiarity']==max_jac
res_record=result_text.loc[(result_text['cosine_similiarity'] == max_cos) & (result_text['jaccard_similiarity']==max_jac)]
res_sent=res_record.sentence.item()
print(res_sent) | 32.290323 | 141 | 0.705295 |
3612229195c84fc7e099e8d1a5caa6236355676b | 327 | py | Python | alice.py | atamurad/coinflip | ded3877c808baae843b55c1cfa4685459ba71b29 | [
"MIT"
] | 1 | 2022-02-24T09:29:53.000Z | 2022-02-24T09:29:53.000Z | alice.py | atamurad/coinflip | ded3877c808baae843b55c1cfa4685459ba71b29 | [
"MIT"
] | null | null | null | alice.py | atamurad/coinflip | ded3877c808baae843b55c1cfa4685459ba71b29 | [
"MIT"
] | null | null | null | from Crypto.Util.number import getRandomRange
from sympy.ntheory.residue_ntheory import jacobi_symbol
N = int(input("N ? "))
x = getRandomRange(2, N)
x2 = (x*x) % N
J = jacobi_symbol(x, N)
print(f"x2 = {x2}")
guess = int(input("j_guess ? "))
print(f"x = {x}")
print("Outcome = Heads" if guess == J else "Outcome = Tails")
| 20.4375 | 61 | 0.663609 |
36134c0670c8fbaeb545400c9c8d63641cf7bd8e | 248 | py | Python | accounts/management/commands/run-stats.py | ChristianJStarr/Scratch-Bowling-Series-Website | 283c7b1b38ffce660464889de3f4dc8050b4008c | [
"MIT"
] | 1 | 2021-05-19T19:30:40.000Z | 2021-05-19T19:30:40.000Z | accounts/management/commands/run-stats.py | ChristianJStarr/Scratch-Bowling-Series-Website | 283c7b1b38ffce660464889de3f4dc8050b4008c | [
"MIT"
] | null | null | null | accounts/management/commands/run-stats.py | ChristianJStarr/Scratch-Bowling-Series-Website | 283c7b1b38ffce660464889de3f4dc8050b4008c | [
"MIT"
] | null | null | null | from django.core.management.base import BaseCommand, CommandError
from scoreboard.ranking import calculate_statistics
| 24.8 | 65 | 0.758065 |
36136b9058bdd45bb7644ba4b0f512b2d1902d42 | 796 | py | Python | LeetCode/TwoSum.py | batumoglu/Python_Algorithms | f586f386693eaddb64d6a654a89af177fd0e838f | [
"MIT"
] | null | null | null | LeetCode/TwoSum.py | batumoglu/Python_Algorithms | f586f386693eaddb64d6a654a89af177fd0e838f | [
"MIT"
] | null | null | null | LeetCode/TwoSum.py | batumoglu/Python_Algorithms | f586f386693eaddb64d6a654a89af177fd0e838f | [
"MIT"
] | null | null | null |
if __name__ == '__main__':
sol = Solution()
print(sol.twoSum([2,7,11,15], 9))
print(sol.twoSum([3,3], 6)) | 24.121212 | 38 | 0.442211 |
3613905669a706db1108a17ee990707e01f2f9a0 | 9,028 | py | Python | src/rospy_crazyflie/crazyflie_server/crazyflie_control.py | JGSuw/rospy_crazyflie | 696aef900138c764419d33e2c8d44ca3f3e33fa1 | [
"BSD-2-Clause-FreeBSD"
] | 5 | 2019-07-26T22:19:53.000Z | 2021-03-04T12:44:35.000Z | src/rospy_crazyflie/crazyflie_server/crazyflie_control.py | JGSuw/rospy_crazyflie | 696aef900138c764419d33e2c8d44ca3f3e33fa1 | [
"BSD-2-Clause-FreeBSD"
] | 4 | 2021-02-17T23:30:48.000Z | 2021-11-29T18:33:05.000Z | src/rospy_crazyflie/crazyflie_server/crazyflie_control.py | JGSuw/rospy_crazyflie | 696aef900138c764419d33e2c8d44ca3f3e33fa1 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2019-04-24T19:00:31.000Z | 2019-04-24T19:00:31.000Z | """
Copyright (c) 2018, Joseph Sullivan
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the <project name> project.
"""
import numpy as np
import pickle
import time
from cflib.crazyflie import Crazyflie
from cflib.positioning.motion_commander import MotionCommander
import rospy
import actionlib
from std_msgs.msg import UInt16
from geometry_msgs.msg import Vector3
from rospy_crazyflie.msg import *
from rospy_crazyflie.srv import *
from rospy_crazyflie.motion_commands import *
| 37.305785 | 85 | 0.620735 |
3613d5c133ef8f38bb7353d844f6628f9fe5e6c6 | 901 | py | Python | examples/imagenet_resnet50.py | inaccel/keras | bebd0ca930b9e2c2aee320e2e40b3d00cd15e46a | [
"Apache-2.0"
] | 1 | 2021-01-27T12:20:35.000Z | 2021-01-27T12:20:35.000Z | examples/imagenet_resnet50.py | inaccel/keras | bebd0ca930b9e2c2aee320e2e40b3d00cd15e46a | [
"Apache-2.0"
] | null | null | null | examples/imagenet_resnet50.py | inaccel/keras | bebd0ca930b9e2c2aee320e2e40b3d00cd15e46a | [
"Apache-2.0"
] | null | null | null | import numpy as np
import time
from inaccel.keras.applications.resnet50 import decode_predictions, ResNet50
from inaccel.keras.preprocessing.image import ImageDataGenerator, load_img
model = ResNet50(weights='imagenet')
data = ImageDataGenerator(dtype='int8')
images = data.flow_from_directory('imagenet/', target_size=(224, 224), class_mode=None, batch_size=64)
begin = time.monotonic()
preds = model.predict(images, workers=16)
end = time.monotonic()
print('Duration for', len(preds), 'images: %.3f sec' % (end - begin))
print('FPS: %.3f' % (len(preds) / (end - begin)))
dog = load_img('data/dog.jpg', target_size=(224, 224))
dog = np.expand_dims(dog, axis=0)
elephant = load_img('data/elephant.jpg', target_size=(224, 224))
elephant = np.expand_dims(elephant, axis=0)
images = np.vstack([dog, elephant])
preds = model.predict(images)
print('Predicted:', decode_predictions(preds, top=1))
| 30.033333 | 102 | 0.739179 |
3613f877b238035dadd508a419d964a6d0b3a50e | 1,084 | py | Python | api/permissions.py | letsdowork/yamdb_api | f493309dc52528d980463047d311d898714f3111 | [
"MIT"
] | null | null | null | api/permissions.py | letsdowork/yamdb_api | f493309dc52528d980463047d311d898714f3111 | [
"MIT"
] | null | null | null | api/permissions.py | letsdowork/yamdb_api | f493309dc52528d980463047d311d898714f3111 | [
"MIT"
] | null | null | null | from rest_framework.permissions import BasePermission, SAFE_METHODS
from .models import User
| 31.882353 | 67 | 0.681734 |
3613fd30924745bd186e0751c87237612b35913e | 8,090 | py | Python | morphological_classifier/classifier.py | selflect11/morphological_classifier | 2ef3c3e1e894220238a36b633d4a164a14fe820f | [
"MIT"
] | null | null | null | morphological_classifier/classifier.py | selflect11/morphological_classifier | 2ef3c3e1e894220238a36b633d4a164a14fe820f | [
"MIT"
] | null | null | null | morphological_classifier/classifier.py | selflect11/morphological_classifier | 2ef3c3e1e894220238a36b633d4a164a14fe820f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from morphological_classifier.perceptron import AveragedPerceptron
from morphological_classifier.performance_metrics import PerformanceMetrics
from morphological_classifier.stats_plot import StatsPlotter
from morphological_classifier import constants, utils
import numpy as np
from collections import defaultdict
from sklearn import model_selection
import pickle
import random
| 38.341232 | 92 | 0.583931 |
361427d326c18b286127aad246549f8822f63a94 | 4,263 | py | Python | autoprover/evaluation/evaluation.py | nclab-admin/autoprover | 3fe5a0bb6132ae320461d538bb06c4f0fd604b27 | [
"MIT"
] | 1 | 2019-01-10T08:04:58.000Z | 2019-01-10T08:04:58.000Z | autoprover/evaluation/evaluation.py | nclab-admin/autoprover | 3fe5a0bb6132ae320461d538bb06c4f0fd604b27 | [
"MIT"
] | null | null | null | autoprover/evaluation/evaluation.py | nclab-admin/autoprover | 3fe5a0bb6132ae320461d538bb06c4f0fd604b27 | [
"MIT"
] | 1 | 2019-10-08T16:47:58.000Z | 2019-10-08T16:47:58.000Z | """evaluation function for chromosome
"""
import subprocess
from subprocess import PIPE, STDOUT
from autoprover.evaluation.coqstate import CoqState
def preprocess(theorem, chromosome):
"""
convert chromosome to complete Coq script
Args:
theorem (list): a list of string contains theorem or some pre-provided
tactic.
chromosome (list): a list of string.
Returns:
byte: a byte string will be passed to coqtop
"""
script = b''
script += b'\n'.join(line.encode("utf-8") for line in theorem) + b'\n'
script += b'\n'.join(line.encode("utf-8") for line in chromosome) + b'\n'
script += b'Qed.'
return script
def run_coqtop(script):
"""run Coq script and return output
Args:
script (byte): a coq script
Returns:
string: the output of coqtop
"""
coqtop = subprocess.Popen('coqtop', shell=False,
stdin=PIPE, stdout=PIPE, stderr=STDOUT)
# communicate with coqtop
(out, _) = coqtop.communicate(input=script)
return out.decode('utf-8')
def get_coq_states(result, proof, chromosome, threshold=-1):
"""return valid coq states, will ignore useless and error steps
Args:
result (string): Plain text output from coqtop
proof (Proof): Proof instance
chromosome (list): the corresponse chromosome of result
threshold (int): the number of error tactic tolerance, -1 will ignore
all error.
Returns:
list of Coqstate
"""
# the first and the last is useless
splited_result = split_coqtop_result(result, proof.theorem_name)[1:]
offset = proof.offset
coq_states = []
tactics_set = set()
error_count = 0
def check_overlap(coq_states, append_state):
"""If a state is equal to previous state, remove all element from that.
"""
for index, state in enumerate(coq_states):
if state == append_state:
del coq_states[index+1:]
return
coq_states.append(append_state)
for (i, step) in enumerate(splited_result):
if i < offset:
coq_states.append(CoqState(step, proof.pre_feed_tactic[i]))
continue
# create a new state
if i == (len(splited_result)-1):
# lastest step
state = CoqState(step, "Qed.")
else:
state = CoqState(step, chromosome[i-offset])
if state.is_proof:
coq_states.append(state)
break
elif state.is_error_state or state == coq_states[-1]:
error_count += 1
elif proof.tactics.is_unrepeatable(chromosome[i-offset]):
if chromosome[i-offset] in tactics_set:
error_count += 1
check_overlap(coq_states, state)
else:
tactics_set.add(chromosome[i-offset])
check_overlap(coq_states, state)
else:
check_overlap(coq_states, state)
if error_count == threshold:
break
return coq_states
def split_coqtop_result(result, theorem_name):
""" split result into steps
Args:
result (string): the output of coqtop
Returns:
list: a list of states(string) of coqtop
"""
spliter = theorem_name + " <"
return [spliter+x for x in result.split(spliter)]
def calculate_fitness(coq_states, limit_hyp=100, limit_goal=300):
"""calculate fitness from coqstates
score = sum(len(hypothesis)/len(goal))
Args:
coq_states (list): a list of Coqstate
Returns:
double: represent fitness of a gene, higher is better.
If raise ZeroDivisionError, means there is a bug.
"""
score = 0.0
for state in coq_states:
l_hyp = len(state.hypothesis)
l_goal = len(state.goal)
if l_hyp > limit_hyp:
score -= l_hyp / (l_hyp + limit_hyp)
print(state.hypothesis)
continue
if l_goal > limit_goal:
score -= l_goal / (l_goal + limit_goal)
# print(state.goal)
continue
try:
score += l_hyp / l_goal
except ZeroDivisionError:
print(state.data)
exit(1)
return score
| 29 | 79 | 0.601454 |
3616727077997c5d64715fd00bfc6be4f8ba4ad8 | 1,323 | py | Python | steapy/velocity_field.py | Sparsh-Sharma/SteaPy | d6f3bee7eb1385c83f65f345d466ef740db4ed3b | [
"MIT"
] | 1 | 2017-04-28T13:05:13.000Z | 2017-04-28T13:05:13.000Z | steapy/velocity_field.py | Sparsh-Sharma/SteaPy | d6f3bee7eb1385c83f65f345d466ef740db4ed3b | [
"MIT"
] | null | null | null | steapy/velocity_field.py | Sparsh-Sharma/SteaPy | d6f3bee7eb1385c83f65f345d466ef740db4ed3b | [
"MIT"
] | null | null | null | import os
import numpy
from numpy import *
import math
from scipy import integrate, linalg
from matplotlib import pyplot
from pylab import *
from .integral import *
def get_velocity_field(panels, freestream, X, Y):
"""
Computes the velocity field on a given 2D mesh.
Parameters
---------
panels: 1D array of Panel objects
The source panels.
freestream: Freestream object
The freestream conditions.
X: 2D Numpy array of floats
x-coordinates of the mesh points.
Y: 2D Numpy array of floats
y-coordinate of the mesh points.
Returns
-------
u: 2D Numpy array of floats
x-component of the velocity vector field.
v: 2D Numpy array of floats
y-component of the velocity vector field.
"""
# freestream contribution
u = freestream.u_inf * math.cos(freestream.alpha) * numpy.ones_like(X, dtype=float)
v = freestream.u_inf * math.sin(freestream.alpha) * numpy.ones_like(X, dtype=float)
# add the contribution from each source (superposition powers!!!)
vec_intregral = numpy.vectorize(integral)
for panel in panels:
u += panel.sigma / (2.0 * math.pi) * vec_intregral(X, Y, panel, 1, 0)
v += panel.sigma / (2.0 * math.pi) * vec_intregral(X, Y, panel, 0, 1)
return u, v
| 29.4 | 87 | 0.652305 |
3616ce719b349e94d2bd7c4da3e42707eb0de49d | 4,125 | py | Python | admin/hams_admin/container_manager.py | hku-systems/hams | 3a5720657252c650c9a6c5d9b674f7ea6153e557 | [
"Apache-2.0"
] | 6 | 2020-08-19T11:46:23.000Z | 2021-12-24T07:34:15.000Z | admin/hams_admin/container_manager.py | hku-systems/hams | 3a5720657252c650c9a6c5d9b674f7ea6153e557 | [
"Apache-2.0"
] | 1 | 2021-03-25T23:40:15.000Z | 2021-03-25T23:40:15.000Z | admin/hams_admin/container_manager.py | hku-systems/hams | 3a5720657252c650c9a6c5d9b674f7ea6153e557 | [
"Apache-2.0"
] | 2 | 2020-10-31T16:48:39.000Z | 2021-03-07T09:14:25.000Z | import abc
from .exceptions import HamsException
import logging
# Constants
HAMS_INTERNAL_QUERY_PORT = 1337
HAMS_INTERNAL_MANAGEMENT_PORT = 1338
HAMS_INTERNAL_RPC_PORT = 7000
HAMS_INTERNAL_METRIC_PORT = 1390
HAMS_INTERNAL_REDIS_PORT = 6379
HAMS_DOCKER_LABEL = "ai.hams.container.label"
HAMS_NAME_LABEL = "ai.hams.name"
HAMS_MODEL_CONTAINER_LABEL = "ai.hams.model_container.label"
HAMS_QUERY_FRONTEND_CONTAINER_LABEL = "ai.hams.query_frontend.label"
HAMS_MGMT_FRONTEND_CONTAINER_LABEL = "ai.hams.management_frontend.label"
HAMS_QUERY_FRONTEND_ID_LABEL = "ai.hams.query_frontend.id"
CONTAINERLESS_MODEL_IMAGE = "NO_CONTAINER"
HAMS_DOCKER_PORT_LABELS = {
'redis': 'ai.hams.redis.port',
'query_rest': 'ai.hams.query_frontend.query.port',
'query_rpc': 'ai.hams.query_frontend.rpc.port',
'management': 'ai.hams.management.port',
'metric': 'ai.hams.metric.port'
}
HAMS_METRIC_CONFIG_LABEL = 'ai.hams.metric.config'
# NOTE: we use '_' as the delimiter because kubernetes allows the use
# '_' in labels but not in deployment names. We force model names and
# versions to be compliant with both limitations, so this gives us an extra
# character to use when creating labels.
_MODEL_CONTAINER_LABEL_DELIMITER = "_"
| 28.448276 | 137 | 0.701333 |
36170542f3bcc2d21452673199202e71e6245707 | 11,044 | py | Python | solidata_api/api/api_auth/endpoint_user_tokens.py | co-demos/solidata-backend | 2c67aecbd457cdec78b0772d78dcf699e20dd3dc | [
"MIT"
] | 2 | 2019-12-17T22:27:53.000Z | 2020-06-22T12:47:37.000Z | solidata_api/api/api_auth/endpoint_user_tokens.py | co-demos/solidata-backend | 2c67aecbd457cdec78b0772d78dcf699e20dd3dc | [
"MIT"
] | 13 | 2019-06-16T15:42:33.000Z | 2022-02-26T05:12:34.000Z | solidata_api/api/api_auth/endpoint_user_tokens.py | co-demos/solidata-backend | 2c67aecbd457cdec78b0772d78dcf699e20dd3dc | [
"MIT"
] | 1 | 2019-12-17T22:27:58.000Z | 2019-12-17T22:27:58.000Z | # -*- encoding: utf-8 -*-
"""
endpoint_user_tokens.py
"""
from solidata_api.api import *
# from log_config import log, pformat
log.debug(">>> api_auth ... creating api endpoints for USER_TOKENS")
### create namespace
ns = Namespace('tokens', description='User : tokens freshening related endpoints')
### import models
from solidata_api._models.models_user import * #User_infos, AnonymousUser
model_user = User_infos(ns)
model_user_access = model_user.model_access
model_user_login_out = model_user.model_login_out
model_old_refresh_token = ExpiredRefreshToken(ns).model
### + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ###
### ROUTES
### + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ###
### cf : response codes : https://restfulapi.net/http-status-codes/
# cf : http://flask-jwt-extended.readthedocs.io/en/latest/refresh_tokens.html
"""
RESPONSE CODES
cf : https://restfulapi.net/http-status-codes/
200 (OK)
201 (Created)
202 (Accepted)
204 (No Content)
301 (Moved Permanently)
302 (Found)
303 (See Other)
304 (Not Modified)
307 (Temporary Redirect)
400 (Bad Request)
401 (Unauthorized)
403 (Forbidden)
404 (Not Found)
405 (Method Not Allowed)
406 (Not Acceptable)
412 (Precondition Failed)
415 (Unsupported Media Type)
500 (Internal Server Error)
501 (Not Implemented)
"""
# @ns.route('/new_refresh_token' )
# @ns.route('/new_refresh_token/', defaults={ 'old_refresh_token':'your_old_refresh_token' } )
| 30.174863 | 104 | 0.611554 |
361738fea8f68576a66d9ee50d5cd2a6da5685cc | 4,750 | py | Python | tektonbundle/tektonbundle.py | chmouel/tektonbundle | 6d44e47f9b6d5c2d1da4663f9c7bfcab50108074 | [
"MIT"
] | 3 | 2020-10-22T04:57:21.000Z | 2021-06-03T16:03:44.000Z | tektonbundle/tektonbundle.py | chmouel/tektonbundle | 6d44e47f9b6d5c2d1da4663f9c7bfcab50108074 | [
"MIT"
] | 3 | 2020-10-27T14:30:33.000Z | 2020-11-12T11:39:07.000Z | tektonbundle/tektonbundle.py | chmouel/tektonbundle | 6d44e47f9b6d5c2d1da4663f9c7bfcab50108074 | [
"MIT"
] | null | null | null | """Main module."""
import copy
import io
import logging
import re
from typing import Dict, List
import yaml
log = logging.getLogger(__name__)
TEKTON_TYPE = ("pipeline", "pipelinerun", "task", "taskrun", "condition")
def parse(yamlfiles: List[str], parameters: Dict[str, str],
skip_inlining: List[str]) -> Dict[str, str]:
"""parse a bunch of yaml files"""
yaml_documents = {} # type: Dict[str, Dict]
results = []
notkube_ignored = []
nottekton_ignored = []
for yaml_file in yamlfiles:
for document in yaml.load_all(tpl_apply(yaml_file, parameters),
Loader=yaml.Loader):
if 'apiVersion' not in document or 'kind' not in document:
notkube_ignored.append(
yaml.dump(
document,
Dumper=yaml.Dumper,
))
continue
name = (document['metadata']['generateName']
if 'generateName' in document['metadata'].keys() else
document['metadata']['name'])
kind = document['kind'].lower()
if kind not in TEKTON_TYPE:
nottekton_ignored.append(
yaml.dump(
document,
Dumper=yaml.Dumper,
))
continue
yaml_documents.setdefault(kind, {})
yaml_documents[kind][name] = document
if 'pipelinerun' not in yaml_documents:
raise TektonBundleError("We need at least a PipelineRun")
# if we have pipeline (i.e: not embedded) then expand all tasksRef insides.
if 'pipeline' in yaml_documents:
for pipeline in yaml_documents['pipeline']:
mpipe = copy.deepcopy(yaml_documents['pipeline'][pipeline])
resolved = resolve_task(mpipe, pipeline, yaml_documents,
skip_inlining)
yaml_documents['pipeline'][pipeline] = copy.deepcopy(resolved)
# For all pipelinerun expands the pipelineRef, keep it as is if it's a
# pipelineSpec.
for pipeline_run in yaml_documents['pipelinerun']:
mpr = copy.deepcopy(yaml_documents['pipelinerun'][pipeline_run])
if 'pipelineSpec' in mpr['spec']:
mpr = resolve_task(mpr, pipeline_run, yaml_documents,
skip_inlining)
elif 'pipelineRef' in mpr['spec']:
refpipeline = mpr['spec']['pipelineRef']['name']
if 'pipeline' not in yaml_documents or refpipeline not in yaml_documents[
'pipeline']:
raise TektonBundleError(
f"PR: {pipeline_run} reference a Pipeline: {refpipeline} not in repository"
)
del mpr['spec']['pipelineRef']
mpr['spec']['pipelineSpec'] = yaml_documents['pipeline'][
refpipeline]['spec']
# Adjust names with generateName if needed
# TODO(chmou): make it optional, we maybe don't want to do this sometime
if 'name' in mpr['metadata']:
name = mpr['metadata']['name']
mpr['metadata']['generateName'] = name + "-"
del mpr['metadata']['name']
results.append(mpr)
ret = {
'bundle':
yaml.dump_all(results,
Dumper=yaml.Dumper,
default_flow_style=False,
allow_unicode=True),
'ignored_not_tekton':
nottekton_ignored,
'ignored_not_k8':
notkube_ignored
}
return ret
| 32.986111 | 95 | 0.550105 |
3617e8e260511cf8ba4c78d54d81b23de02b0480 | 2,385 | py | Python | Scripts/sims4communitylib/classes/time/common_alarm_handle.py | ColonolNutty/Sims4CommunityLibrary | 684f28dc3c7deb4d9fd520e21e63942b65a91d31 | [
"CC-BY-4.0"
] | 118 | 2019-08-31T04:33:18.000Z | 2022-03-28T21:12:14.000Z | Scripts/sims4communitylib/classes/time/common_alarm_handle.py | ColonolNutty/Sims4CommunityLibrary | 684f28dc3c7deb4d9fd520e21e63942b65a91d31 | [
"CC-BY-4.0"
] | 15 | 2019-12-05T01:29:46.000Z | 2022-02-18T17:13:46.000Z | Scripts/sims4communitylib/classes/time/common_alarm_handle.py | ColonolNutty/Sims4CommunityLibrary | 684f28dc3c7deb4d9fd520e21e63942b65a91d31 | [
"CC-BY-4.0"
] | 28 | 2019-09-07T04:11:05.000Z | 2022-02-07T18:31:40.000Z | """
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
import os
from sims4.commands import Command, CommandType, CheatOutput
from sims4communitylib.utils.common_time_utils import CommonTimeUtils
from typing import Any, Callable
ON_RTD = os.environ.get('READTHEDOCS', None) == 'True'
if not ON_RTD:
from scheduling import Timeline
from alarms import AlarmHandle
from date_and_time import DateAndTime, TimeSpan
else:
# noinspection PyMissingOrEmptyDocstring
# noinspection PyMissingOrEmptyDocstring
# noinspection PyMissingOrEmptyDocstring
# noinspection PyMissingOrEmptyDocstring
if not ON_RTD:
| 33.125 | 176 | 0.704403 |
3617f1fcc07ed43dd799a0a44d4cb775cd1c7478 | 1,884 | py | Python | blackbook/migrations/0022_cleanup.py | bsiebens/blackbook | 636d1adc8966db158914abba43e360c6a0d23173 | [
"MIT"
] | 1 | 2021-05-10T19:15:48.000Z | 2021-05-10T19:15:48.000Z | blackbook/migrations/0022_cleanup.py | bsiebens/BlackBook | 636d1adc8966db158914abba43e360c6a0d23173 | [
"MIT"
] | 20 | 2020-12-27T15:56:24.000Z | 2021-09-22T18:25:02.000Z | blackbook/migrations/0022_cleanup.py | bsiebens/BlackBook | 636d1adc8966db158914abba43e360c6a0d23173 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.4 on 2021-01-22 22:56
from django.db import migrations
| 25.459459 | 56 | 0.525478 |
36188c3a24365e2e84cb2983da3bc80cf1611d71 | 1,431 | py | Python | core/myauthbackend.py | devendraotari/HRMS_project | c6480903c2a8212c6698987e8ced96a114c4d7c7 | [
"BSD-2-Clause"
] | null | null | null | core/myauthbackend.py | devendraotari/HRMS_project | c6480903c2a8212c6698987e8ced96a114c4d7c7 | [
"BSD-2-Clause"
] | null | null | null | core/myauthbackend.py | devendraotari/HRMS_project | c6480903c2a8212c6698987e8ced96a114c4d7c7 | [
"BSD-2-Clause"
] | null | null | null | from django.contrib.auth.backends import BaseBackend
from django.contrib.auth import get_user_model
| 40.885714 | 87 | 0.596785 |
3618b1890763a3badcdbdde17119e78da0fca799 | 1,655 | py | Python | apps/core/management/commands/update-banned-email.py | sparcs-kaist/sparcssso | 9aeedc02652dadacb44c6a4ba06901f6d2372223 | [
"MIT"
] | 18 | 2015-07-06T06:20:14.000Z | 2022-03-20T23:45:40.000Z | apps/core/management/commands/update-banned-email.py | sparcs-kaist/sparcssso | 9aeedc02652dadacb44c6a4ba06901f6d2372223 | [
"MIT"
] | 170 | 2015-07-07T08:42:03.000Z | 2022-03-24T17:31:17.000Z | apps/core/management/commands/update-banned-email.py | sparcs-kaist/sparcssso | 9aeedc02652dadacb44c6a4ba06901f6d2372223 | [
"MIT"
] | 11 | 2015-07-07T20:42:19.000Z | 2022-01-12T22:39:59.000Z | import requests
from django.core.management.base import BaseCommand, CommandError
from apps.core.models import EmailDomain
DATA_URL = (
'https://raw.githubusercontent.com/martenson/disposable-email-domains'
'/master/disposable_email_blacklist.conf'
)
| 30.648148 | 75 | 0.586103 |
361a68b0ba7eff6cb23d87bfa96dce0e03ec7a08 | 1,659 | py | Python | LeetCode/Python3/Math/1323. Maximum 69 Number.py | WatsonWangZh/CodingPractice | dc057dd6ea2fc2034e14fd73e07e73e6364be2ae | [
"MIT"
] | 11 | 2019-09-01T22:36:00.000Z | 2021-11-08T08:57:20.000Z | LeetCode/Python3/Math/1323. Maximum 69 Number.py | WatsonWangZh/LeetCodePractice | dc057dd6ea2fc2034e14fd73e07e73e6364be2ae | [
"MIT"
] | null | null | null | LeetCode/Python3/Math/1323. Maximum 69 Number.py | WatsonWangZh/LeetCodePractice | dc057dd6ea2fc2034e14fd73e07e73e6364be2ae | [
"MIT"
] | 2 | 2020-05-27T14:58:52.000Z | 2020-05-27T15:04:17.000Z | # Given a positive integer num consisting only of digits 6 and 9.
# Return the maximum number you can get by changing at most one digit (6 becomes 9, and 9 becomes 6).
# Example 1:
# Input: num = 9669
# Output: 9969
# Explanation:
# Changing the first digit results in 6669.
# Changing the second digit results in 9969.
# Changing the third digit results in 9699.
# Changing the fourth digit results in 9666.
# The maximum number is 9969.
# Example 2:
# Input: num = 9996
# Output: 9999
# Explanation: Changing the last digit 6 to 9 results in the maximum number.
# Example 3:
# Input: num = 9999
# Output: 9999
# Explanation: It is better not to apply any change.
# Constraints:
# 1 <= num <= 10^4
# num's digits are 6 or 9.
# Hints:
# Convert the number in an array of its digits.
# Brute force on every digit to get the maximum number.
| 25.921875 | 101 | 0.57384 |
361c83b1b112f9b41fc07f6d3ac9327c01a72ef7 | 3,245 | py | Python | ticketing/userticket/createqrcode.py | autlamps/tessera-backend | 1d02e8e3651c1ad75bdf4e5d0e61765a2a6de0c2 | [
"MIT"
] | null | null | null | ticketing/userticket/createqrcode.py | autlamps/tessera-backend | 1d02e8e3651c1ad75bdf4e5d0e61765a2a6de0c2 | [
"MIT"
] | 1 | 2018-08-14T03:15:00.000Z | 2018-08-21T00:33:34.000Z | ticketing/userticket/createqrcode.py | autlamps/tessera-backend | 1d02e8e3651c1ad75bdf4e5d0e61765a2a6de0c2 | [
"MIT"
] | null | null | null | import base64
import rsa
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from ticketing.models import BalanceTicket, RideTicket
def creatertqrcode(self, rtticket: RideTicket):
uid = rtticket.qr_code
type = 'r'
val = rtticket.initial_value
name = rtticket.short_name
return self.__sign(uid, type, val, name)
def __sign(self, uid, type, val, name):
tosign = str(uid) + '.' + type + '.' + val + '.' + name
signed = base64.b64encode(rsa.sign(tosign.encode('UTF-8'),
self.private, 'SHA-256'))
toreturn = str(tosign) + ':' + str(signed.decode('UTF-8'))
self.ID = toreturn
return toreturn
def verify(self, qrcode):
parts = qrcode.split(':')
hash = base64.b64decode(parts[1])
try:
rsa.verify(parts[0].encode(), hash, self.public)
print("Verified")
user = parts[0].split(".")
uuid = user[0]
ticketType = user[1]
if ticketType == "b":
try:
ticket = BalanceTicket.objects.get(qr_code_id=uuid)
return {"ticket": ticket, "type": ticketType}
except ObjectDoesNotExist:
raise VerifyFailedError()
elif ticketType == "r":
try:
ticket = RideTicket.objects.get(qr_code=uuid)
return {"ticket": ticket, "type": ticketType}
except ObjectDoesNotExist:
raise VerifyFailedError()
except rsa.VerificationError:
print("Verification Error")
raise VerifyFailedError
# Create an error for better usability
print("Hash 0 : " + parts[0])
print("Hash 1 : " + parts[1])
| 33.112245 | 79 | 0.561787 |
361df2d9546970e2a42e2d2a91b1abc8fb87455f | 3,015 | py | Python | CollabMoodle.py | dantonbertuol/PyCollab | b36c968f5f1aabf1a322559854db24aa6691ac63 | [
"MIT"
] | null | null | null | CollabMoodle.py | dantonbertuol/PyCollab | b36c968f5f1aabf1a322559854db24aa6691ac63 | [
"MIT"
] | null | null | null | CollabMoodle.py | dantonbertuol/PyCollab | b36c968f5f1aabf1a322559854db24aa6691ac63 | [
"MIT"
] | null | null | null | import datetime
from webService import WebService
import Utilidades as ut
import sys
if __name__ == "__main__":
param = ut.mainMoodle(sys.argv[1:])
#param = 'moodle_plugin_sessions.txt', '', '2020-08-01 00:00:00,2020-12-31 00:00:00'
webService = WebService()
report = []
ret = 0
dates = param[2].split(",")
if param[0] != '' and param[1] == '':
print("Moodle Sesions...")
moodlSession = ut.leerUUID(param[0])
for sesion in moodlSession:
try:
nombre_session, date_session = webService.get_moodle_sesion_name(sesion)
except:
print('Erro WS')
nombre_session = None
if nombre_session == None or nombre_session == ' ':
print("Session name not found!")
else:
print(nombre_session)
try:
lista_grabaciones = webService.get_moodle_lista_grabaciones(nombre_session, dates, date_session)
except:
lista_grabaciones = None
if lista_grabaciones is None:
print("There's no recording for: " + nombre_session)
else:
for grabacion in lista_grabaciones:
try:
ret = ut.downloadrecording(grabacion['recording_id'],grabacion['recording_name'], dates)
except:
ret = 2
try:
if ret == 1:
report.append([grabacion['recording_id'], grabacion['recording_name'], grabacion['duration'],
grabacion['storageSize'], grabacion['created']])
elif ret == 2:
report.append(
['Erro no download', grabacion['recording_name'], grabacion['duration'],
grabacion['storageSize'], grabacion['created']])
elif ret == 3:
if [grabacion['recording_id'], grabacion['recording_name'], grabacion['duration'],
grabacion['storageSize'], grabacion['created']] in report:
print("EXISTE")
else:
report.append(
[grabacion['recording_id'], grabacion['recording_name'], grabacion['duration'],
grabacion['storageSize'], grabacion['created']])
except:
print("Nao foi possivel criar o relatorio")
if len(report) > 0:
try:
print(ut.crearReporteMoodle(report, dates))
except:
print("Nao foi possivel criar o relatorio")
else:
print('No recordings was found')
| 47.109375 | 125 | 0.469983 |
361df35a0da6b8703efd3e8c9fc20bd6344aa676 | 5,549 | py | Python | eva/views_data.py | aqutor/CE_Backend | 1265f7169aea0b6b8cff3fda742a8a5a295fe9ea | [
"MIT"
] | null | null | null | eva/views_data.py | aqutor/CE_Backend | 1265f7169aea0b6b8cff3fda742a8a5a295fe9ea | [
"MIT"
] | null | null | null | eva/views_data.py | aqutor/CE_Backend | 1265f7169aea0b6b8cff3fda742a8a5a295fe9ea | [
"MIT"
] | null | null | null | from rest_framework.views import APIView
from rest_framework import status
from eva.serializers import WorkSerializer, PageSerializer, WordSerializer, RadicalSerializer
from eva.models import Work, Page, Word, Radical
from rest_framework.response import Response
from django.http import Http404
| 29.359788 | 93 | 0.548567 |
361ee510413d5ff2e8e4d3a5aa90b44d49e73ac2 | 1,447 | py | Python | program/appID3.py | trungvuong55555/FlaskAPI_ExpertSystem | 6f7a557fefd093e901070fe2ec363e0c2ed8ffa2 | [
"MIT"
] | null | null | null | program/appID3.py | trungvuong55555/FlaskAPI_ExpertSystem | 6f7a557fefd093e901070fe2ec363e0c2ed8ffa2 | [
"MIT"
] | null | null | null | program/appID3.py | trungvuong55555/FlaskAPI_ExpertSystem | 6f7a557fefd093e901070fe2ec363e0c2ed8ffa2 | [
"MIT"
] | null | null | null | from flask import Flask, request, render_template
import pickle
app = Flask(__name__)#khoi tao flask
model = pickle.load(open('modelID3.pkl', 'rb'))#unpicke model
if __name__ == "__main__":
app.run(debug=True)
| 28.94 | 99 | 0.57982 |
361ef035e9cacacdf5098c184cd2ac1fe4e53da4 | 474 | py | Python | examples/deldup.py | rlan/pydmv | 97619bbd2732b2ad8e64c97fe862a84dc147af93 | [
"MIT"
] | null | null | null | examples/deldup.py | rlan/pydmv | 97619bbd2732b2ad8e64c97fe862a84dc147af93 | [
"MIT"
] | null | null | null | examples/deldup.py | rlan/pydmv | 97619bbd2732b2ad8e64c97fe862a84dc147af93 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import os
import sys
import argparse
#Auto-import parent module
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import voc
#from pydmv import voc
parser = argparse.ArgumentParser(description="Print VOC index file without duplicates")
parser.add_argument("file", help="Input index file")
args = parser.parse_args()
in_file = args.file
my_bag = set()
for index in voc.stream(in_file):
if index not in my_bag:
print(index)
my_bag.add(index)
| 21.545455 | 87 | 0.736287 |
362141754e09b014da8e86cb87845189f022576c | 448 | py | Python | home_work/App/views.py | jianghaiming0707/python1806homework | 2509f75794ac0ef8711cb1d1c2c4378408619a75 | [
"Apache-2.0"
] | 1 | 2018-06-28T01:01:35.000Z | 2018-06-28T01:01:35.000Z | home_work/App/views.py | jianghaiming0707/python1806homework | 2509f75794ac0ef8711cb1d1c2c4378408619a75 | [
"Apache-2.0"
] | 6 | 2018-06-25T04:50:23.000Z | 2018-07-03T10:24:08.000Z | home_work/App/views.py | jianghaiming0707/python1806homework | 2509f75794ac0ef8711cb1d1c2c4378408619a75 | [
"Apache-2.0"
] | 42 | 2018-06-19T09:48:04.000Z | 2019-09-15T01:20:06.000Z | from django.shortcuts import render
from django.http import HttpResponse
from App.models import *
# Create your views here. | 34.461538 | 67 | 0.747768 |
3621452a8a1c3599be31b149a9b725b8f48992db | 962 | py | Python | Xiaomi_8/day_start/show_screen.py | Lezaza/hotpoor_autoclick_xhs | 52eafad8cce59353a9de5bf6e488e8a2602e5536 | [
"Apache-2.0"
] | 1 | 2021-12-21T10:42:46.000Z | 2021-12-21T10:42:46.000Z | Xiaomi_8/day_start/show_screen.py | 2218084076/hotpoor_autoclick_xhs | a52446ba691ac19e43410a465dc63f940c0e444d | [
"Apache-2.0"
] | 2 | 2021-11-03T11:36:44.000Z | 2021-11-05T07:58:13.000Z | Xiaomi_8/day_start/show_screen.py | 2218084076/hotpoor_autoclick_xhs | a52446ba691ac19e43410a465dc63f940c0e444d | [
"Apache-2.0"
] | 1 | 2021-10-09T10:28:57.000Z | 2021-10-09T10:28:57.000Z | import os
import cv2
import time
path = "C:/Users/lenovo/Documents/Sites/github/hotpoor_autoclick_xhs/Xiaomi_8/day_start/hotpoor_autoclick_cache"
cache = "hotpoor_autoclick_cache/screen.png"
while True:
get_image()
print("get_image")
# load_image()
i1 = cv2.imread("%s/screen.png"%path)
scale_percent=40
w=int(i1.shape[1]*scale_percent/100)
h=int(i1.shape[0]*scale_percent/100)
dim=(w,h)
resized = cv2.resize(i1,dim,interpolation=cv2.INTER_AREA)
cv2.imshow("path", resized)
k = cv2.waitKey(1) | 30.0625 | 112 | 0.686071 |
3622cd97012a4b31faded8cb9b89d6c988e04256 | 3,359 | py | Python | hknweb/events/views/event_transactions/show_event.py | jyxzhang/hknweb | a01ffd8587859bf63c46213be6a0c8b87164a5c2 | [
"MIT"
] | null | null | null | hknweb/events/views/event_transactions/show_event.py | jyxzhang/hknweb | a01ffd8587859bf63c46213be6a0c8b87164a5c2 | [
"MIT"
] | null | null | null | hknweb/events/views/event_transactions/show_event.py | jyxzhang/hknweb | a01ffd8587859bf63c46213be6a0c8b87164a5c2 | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect, reverse
from django.contrib import messages
from django.shortcuts import get_object_or_404
from django.core.paginator import Paginator
from hknweb.utils import markdownify
from hknweb.utils import allow_public_access
from hknweb.events.constants import (
ACCESSLEVEL_TO_DESCRIPTION,
ATTR,
RSVPS_PER_PAGE,
)
from hknweb.events.models import Event, Rsvp, AttendanceForm
from hknweb.events.utils import format_url
from hknweb.utils import get_access_level
| 33.59 | 88 | 0.669842 |
36230cd6aca7407d1176980b4ef533beffe100f8 | 9,756 | py | Python | pysnmp-with-texts/HPN-ICF-VOICE-IF-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/HPN-ICF-VOICE-IF-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/HPN-ICF-VOICE-IF-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module HPN-ICF-VOICE-IF-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HPN-ICF-VOICE-IF-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:41:57 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueSizeConstraint, ConstraintsUnion, ConstraintsIntersection, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueRangeConstraint")
hpnicfVoice, = mibBuilder.importSymbols("HPN-ICF-OID-MIB", "hpnicfVoice")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
TimeTicks, Unsigned32, Gauge32, NotificationType, MibIdentifier, ModuleIdentity, Counter32, IpAddress, iso, Counter64, ObjectIdentity, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "TimeTicks", "Unsigned32", "Gauge32", "NotificationType", "MibIdentifier", "ModuleIdentity", "Counter32", "IpAddress", "iso", "Counter64", "ObjectIdentity", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Bits")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
hpnicfVoiceInterface = ModuleIdentity((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 39, 13))
hpnicfVoiceInterface.setRevisions(('2007-12-10 17:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: hpnicfVoiceInterface.setRevisionsDescriptions(('The initial version of this MIB file.',))
if mibBuilder.loadTexts: hpnicfVoiceInterface.setLastUpdated('200712101700Z')
if mibBuilder.loadTexts: hpnicfVoiceInterface.setOrganization('')
if mibBuilder.loadTexts: hpnicfVoiceInterface.setContactInfo('')
if mibBuilder.loadTexts: hpnicfVoiceInterface.setDescription('This MIB file is to provide the definition of the voice interface general configuration.')
hpnicfVoiceIfObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 39, 13, 1))
hpnicfVoiceIfConfigTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 39, 13, 1, 1), )
if mibBuilder.loadTexts: hpnicfVoiceIfConfigTable.setStatus('current')
if mibBuilder.loadTexts: hpnicfVoiceIfConfigTable.setDescription('The table contains configurable parameters for both analog voice interface and digital voice interface.')
hpnicfVoiceIfConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 39, 13, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: hpnicfVoiceIfConfigEntry.setStatus('current')
if mibBuilder.loadTexts: hpnicfVoiceIfConfigEntry.setDescription('The entry of voice interface table.')
hpnicfVoiceIfCfgCngOn = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 39, 13, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('enable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfVoiceIfCfgCngOn.setStatus('current')
if mibBuilder.loadTexts: hpnicfVoiceIfCfgCngOn.setDescription('This object indicates whether the silence gaps should be filled with background noise. It is applicable to FXO, FXS, E&M subscriber lines and E1/T1 voice subscriber line.')
hpnicfVoiceIfCfgNonLinearSwitch = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 39, 13, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('enable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfVoiceIfCfgNonLinearSwitch.setStatus('current')
if mibBuilder.loadTexts: hpnicfVoiceIfCfgNonLinearSwitch.setDescription('This object expresses the nonlinear processing is enable or disable for the voice interface. It is applicable to FXO, FXS, E&M subscriber lines and E1/T1 voice subscriber line. Currently, only digital voice subscriber lines can be set disabled.')
hpnicfVoiceIfCfgInputGain = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 39, 13, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-140, 139))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfVoiceIfCfgInputGain.setStatus('current')
if mibBuilder.loadTexts: hpnicfVoiceIfCfgInputGain.setDescription('This object indicates the amount of gain added to the receiver side of the voice interface. Unit is 0.1 db. It is applicable to FXO, FXS, E&M subscriber lines and E1/T1 voice subscriber line.')
hpnicfVoiceIfCfgOutputGain = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 39, 13, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-140, 139))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfVoiceIfCfgOutputGain.setStatus('current')
if mibBuilder.loadTexts: hpnicfVoiceIfCfgOutputGain.setDescription('This object indicates the amount of gain added to the send side of the voice interface. Unit is 0.1 db. It is applicable to FXO, FXS, E&M subscriber lines and E1/T1 voice subscriber line.')
hpnicfVoiceIfCfgEchoCancelSwitch = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 39, 13, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('enable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfVoiceIfCfgEchoCancelSwitch.setStatus('current')
if mibBuilder.loadTexts: hpnicfVoiceIfCfgEchoCancelSwitch.setDescription('This object indicates whether the echo cancellation is enabled. It is applicable to FXO, FXS, E&M subscriber lines and E1/T1 voice subscriber line.')
hpnicfVoiceIfCfgEchoCancelDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 39, 13, 1, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 64))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfVoiceIfCfgEchoCancelDelay.setStatus('current')
if mibBuilder.loadTexts: hpnicfVoiceIfCfgEchoCancelDelay.setDescription("This object indicates the delay of the echo cancellation for the voice interface. This value couldn't be modified unless hpnicfVoiceIfCfgEchoCancelSwitch is enable. Unit is milliseconds. It is applicable to FXO, FXS, E&M subscriber lines and E1/T1 voice subscriber line. The default value of this object is 32.")
hpnicfVoiceIfCfgTimerDialInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 39, 13, 1, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 300))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfVoiceIfCfgTimerDialInterval.setStatus('current')
if mibBuilder.loadTexts: hpnicfVoiceIfCfgTimerDialInterval.setDescription('The interval, in seconds, between two dialing numbers. The default value of this object is 10 seconds. It is applicable to FXO, FXS, E&M subscriber lines and E1/T1 with loop-start or ground-start protocol voice subscriber line.')
hpnicfVoiceIfCfgTimerFirstDial = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 39, 13, 1, 1, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 300))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfVoiceIfCfgTimerFirstDial.setStatus('current')
if mibBuilder.loadTexts: hpnicfVoiceIfCfgTimerFirstDial.setDescription('The period of time, in seconds, before dialing the first number. The default value of this object is 10 seconds. It is applicable to FXO, FXS subscriber lines and E1/T1 with loop-start or ground-start protocol voice subscriber line.')
hpnicfVoiceIfCfgPrivateline = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 39, 13, 1, 1, 1, 9), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 31))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfVoiceIfCfgPrivateline.setStatus('current')
if mibBuilder.loadTexts: hpnicfVoiceIfCfgPrivateline.setDescription('This object indicates the E.164 phone number for plar mode. It is applicable to FXO, FXS, E&M subscriber lines and E1/T1 voice subscriber line.')
hpnicfVoiceIfCfgRegTone = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 39, 13, 1, 1, 1, 10), OctetString().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(2, 3), ))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfVoiceIfCfgRegTone.setStatus('current')
if mibBuilder.loadTexts: hpnicfVoiceIfCfgRegTone.setDescription('This object uses 2 or 3 letter country code specify voice parameters of different countrys. This value will take effect on all voice interfaces of all cards on the device.')
mibBuilder.exportSymbols("HPN-ICF-VOICE-IF-MIB", hpnicfVoiceInterface=hpnicfVoiceInterface, hpnicfVoiceIfCfgEchoCancelDelay=hpnicfVoiceIfCfgEchoCancelDelay, hpnicfVoiceIfConfigEntry=hpnicfVoiceIfConfigEntry, PYSNMP_MODULE_ID=hpnicfVoiceInterface, hpnicfVoiceIfObjects=hpnicfVoiceIfObjects, hpnicfVoiceIfCfgNonLinearSwitch=hpnicfVoiceIfCfgNonLinearSwitch, hpnicfVoiceIfCfgTimerFirstDial=hpnicfVoiceIfCfgTimerFirstDial, hpnicfVoiceIfCfgPrivateline=hpnicfVoiceIfCfgPrivateline, hpnicfVoiceIfCfgInputGain=hpnicfVoiceIfCfgInputGain, hpnicfVoiceIfCfgRegTone=hpnicfVoiceIfCfgRegTone, hpnicfVoiceIfCfgTimerDialInterval=hpnicfVoiceIfCfgTimerDialInterval, hpnicfVoiceIfCfgCngOn=hpnicfVoiceIfCfgCngOn, hpnicfVoiceIfCfgEchoCancelSwitch=hpnicfVoiceIfCfgEchoCancelSwitch, hpnicfVoiceIfCfgOutputGain=hpnicfVoiceIfCfgOutputGain, hpnicfVoiceIfConfigTable=hpnicfVoiceIfConfigTable)
| 154.857143 | 863 | 0.791513 |
3624a7b0fa4de41698f562d63ac67b0fc5033a54 | 1,230 | py | Python | data_access_layer/abstract_classes/customer_dao.py | Alejandro-Fuste/python-bank-application | 46e44c830ab8c13fd64c08e2db4f743a7d1d35de | [
"MIT"
] | null | null | null | data_access_layer/abstract_classes/customer_dao.py | Alejandro-Fuste/python-bank-application | 46e44c830ab8c13fd64c08e2db4f743a7d1d35de | [
"MIT"
] | 15 | 2021-11-22T16:05:42.000Z | 2021-12-08T16:43:37.000Z | data_access_layer/abstract_classes/customer_dao.py | Alejandro-Fuste/python-bank-application | 46e44c830ab8c13fd64c08e2db4f743a7d1d35de | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
from entities.customers import Customer
from typing import List
| 27.954545 | 103 | 0.68374 |
3624ec443278ac728598d1df9f161910bd3e69fe | 975 | py | Python | examples/make_sphere_graphic.py | itamar-dw/spherecluster | 7c9b81d8bb6c6c2a0c569c17093bf0b4550f2768 | [
"MIT"
] | 186 | 2018-09-14T06:51:59.000Z | 2022-03-30T12:56:01.000Z | examples/make_sphere_graphic.py | itamar-dw/spherecluster | 7c9b81d8bb6c6c2a0c569c17093bf0b4550f2768 | [
"MIT"
] | 20 | 2018-10-16T15:40:08.000Z | 2022-03-23T14:37:52.000Z | examples/make_sphere_graphic.py | itamar-dw/spherecluster | 7c9b81d8bb6c6c2a0c569c17093bf0b4550f2768 | [
"MIT"
] | 40 | 2018-09-13T21:05:50.000Z | 2022-03-09T16:05:53.000Z | import sys
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # NOQA
import seaborn # NOQA
from spherecluster import sample_vMF
plt.ion()
n_clusters = 3
mus = np.random.randn(3, n_clusters)
mus, r = np.linalg.qr(mus, mode='reduced')
kappas = [15, 15, 15]
num_points_per_class = 250
Xs = []
for nn in range(n_clusters):
new_X = sample_vMF(mus[nn], kappas[nn], num_points_per_class)
Xs.append(new_X.T)
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(
1, 1, 1, aspect='equal', projection='3d',
adjustable='box-forced', xlim=[-1.1, 1.1], ylim=[-1.1, 1.1],
zlim=[-1.1, 1.1]
)
colors = ['b', 'r', 'g']
for nn in range(n_clusters):
ax.scatter(Xs[nn][0, :], Xs[nn][1, :], Xs[nn][2, :], c=colors[nn])
ax.set_aspect('equal')
plt.axis('off')
plt.show()
r_input()
| 20.744681 | 70 | 0.644103 |
3626cc57d851fc7ca881f30af21ead100d822372 | 1,043 | py | Python | pointnet2/tf_ops/sampling/tf_sampling.py | ltriess/pointnet2_keras | 29be56161c8c772442b85b8fda300d10ff7fe7b3 | [
"MIT"
] | 2 | 2022-02-06T23:12:15.000Z | 2022-03-28T06:48:52.000Z | pointnet2/tf_ops/sampling/tf_sampling.py | ltriess/pointnet2_keras | 29be56161c8c772442b85b8fda300d10ff7fe7b3 | [
"MIT"
] | null | null | null | pointnet2/tf_ops/sampling/tf_sampling.py | ltriess/pointnet2_keras | 29be56161c8c772442b85b8fda300d10ff7fe7b3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Furthest point sampling
Original author: Haoqiang Fan
Modified by Charles R. Qi
All Rights Reserved. 2017.
Modified by Larissa Triess (2020)
"""
import os
import sys
import tensorflow as tf
from tensorflow.python.framework import ops
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sampling_module = tf.load_op_library(os.path.join(BASE_DIR, "tf_sampling_so.so"))
def farthest_point_sample(k: int, points: tf.Tensor) -> tf.Tensor:
"""Returns the indices of the k farthest points in points
Arguments:
k : int
The number of points to consider.
points : tf.Tensor(shape=(batch_size, P1, 3), dtype=tf.float32)
The points with P1 dataset points given in xyz.
Returns:
indices : tf.Tensor(shape=(batch_size, k), dtype=tf.int32)
The indices of the k farthest points in points.
"""
return sampling_module.farthest_point_sample(points, k)
ops.NoGradient("FarthestPointSample")
| 25.439024 | 81 | 0.701822 |
3626d45d010076e81364291684b9ea5d2493fb6c | 561 | py | Python | gql/resolvers/mutations/scope.py | apoveda25/graphql-python-server | eb7b911aa1116327120b857beb17da3e30523e74 | [
"Apache-2.0"
] | 4 | 2020-06-20T11:54:04.000Z | 2021-09-07T11:41:32.000Z | gql/resolvers/mutations/scope.py | apoveda25/graphql-python-server | eb7b911aa1116327120b857beb17da3e30523e74 | [
"Apache-2.0"
] | null | null | null | gql/resolvers/mutations/scope.py | apoveda25/graphql-python-server | eb7b911aa1116327120b857beb17da3e30523e74 | [
"Apache-2.0"
] | null | null | null | from ariadne import MutationType
from datetime import datetime as dt
from models.scope import Scope
from schemas.helpers.normalize import change_keys
from schemas.scope import ScopeCreate
mutations_resolvers = MutationType()
| 31.166667 | 78 | 0.773619 |
36288867b24d81ec55fecb507750b334c645d763 | 5,188 | py | Python | napari_subboxer/interactivity_utils.py | alisterburt/napari-subboxer | f450e72a5c1c64c527c4f999644f99f3109c36e8 | [
"BSD-3-Clause"
] | 3 | 2021-11-01T18:18:43.000Z | 2021-11-25T02:59:50.000Z | napari_subboxer/interactivity_utils.py | alisterburt/napari-subboxer | f450e72a5c1c64c527c4f999644f99f3109c36e8 | [
"BSD-3-Clause"
] | 1 | 2021-11-24T20:59:18.000Z | 2021-11-24T20:59:24.000Z | napari_subboxer/interactivity_utils.py | alisterburt/napari-subboxer | f450e72a5c1c64c527c4f999644f99f3109c36e8 | [
"BSD-3-Clause"
] | null | null | null | from typing import Optional
import napari
import napari.layers
import numpy as np
from napari.utils.geometry import project_point_onto_plane
def point_in_bounding_box(point: np.ndarray, bounding_box: np.ndarray) -> bool:
"""Determine whether an nD point is inside an nD bounding box.
Parameters
----------
point : np.ndarray
(n,) array containing nD point coordinates to check.
bounding_box : np.ndarray
(2, n) array containing the min and max of the nD bounding box.
As returned by `Layer._extent_data`.
"""
if np.all(point > bounding_box[0]) and np.all(point < bounding_box[1]):
return True
return False
def drag_data_to_projected_distance(
start_position, end_position, view_direction, vector
):
"""Calculate the projected distance between two mouse events.
Project the drag vector between two mouse events onto a 3D vector
specified in data coordinates.
The general strategy is to
1) find mouse drag start and end positions, project them onto a
pseudo-canvas (a plane aligned with the canvas) in data coordinates.
2) project the mouse drag vector onto the (normalised) vector in data
coordinates
Parameters
----------
start_position : np.ndarray
Starting point of the drag vector in data coordinates
end_position : np.ndarray
End point of the drag vector in data coordinates
view_direction : np.ndarray
Vector defining the plane normal of the plane onto which the drag
vector is projected.
vector : np.ndarray
(3,) unit vector or (n, 3) array thereof on which to project the drag
vector from start_event to end_event. This argument is defined in data
coordinates.
Returns
-------
projected_distance : (1, ) or (n, ) np.ndarray of float
"""
# enforce at least 2d input
vector = np.atleast_2d(vector)
# Store the start and end positions in world coordinates
start_position = np.array(start_position)
end_position = np.array(end_position)
# Project the start and end positions onto a pseudo-canvas, a plane
# parallel to the rendered canvas in data coordinates.
start_position_canvas = start_position
end_position_canvas = project_point_onto_plane(
end_position, start_position_canvas, view_direction
)
# Calculate the drag vector on the pseudo-canvas.
drag_vector_canvas = np.squeeze(
end_position_canvas - start_position_canvas
)
# Project the drag vector onto the specified vector(s), return the distance
return np.einsum('j, ij -> i', drag_vector_canvas, vector).squeeze()
def rotation_matrices_to_align_vectors(a: np.ndarray, b: np.ndarray):
"""
Find rotation matrices r such that r @ a = b
Implementation designed to avoid trig calls, a and b must be normalised.
based on https://iquilezles.org/www/articles/noacos/noacos.htm
Parameters
----------
a : np.ndarray
(1 or n, 3) normalised vector(s) of length 3.
b : np.ndarray
(1 or n, 3) normalised vector(s) of length 3.
Returns
-------
r : np.ndarray
(3, 3) rotation matrix or (n, 3, 3) array thereof.
"""
# setup
a = a.reshape(-1, 3)
b = b.reshape(-1, 3)
n_vectors = a.shape[0]
# cross product to find axis about which rotation should occur
axis = np.cross(a, b, axis=1)
# dot product equals cosine of angle between normalised vectors
cos_angle = np.einsum('ij, ij -> i', a, b)
# k is a constant which appears as a factor in the rotation matrix
k = 1 / (1 + cos_angle)
# construct rotation matrix
r = np.empty((n_vectors, 3, 3))
r[:, 0, 0] = (axis[:, 0] * axis[:, 0] * k) + cos_angle
r[:, 0, 1] = (axis[:, 1] * axis[:, 0] * k) - axis[:, 2]
r[:, 0, 2] = (axis[:, 2] * axis[:, 0] * k) + axis[:, 1]
r[:, 1, 0] = (axis[:, 0] * axis[:, 1] * k) + axis[:, 2]
r[:, 1, 1] = (axis[:, 1] * axis[:, 1] * k) + cos_angle
r[:, 1, 2] = (axis[:, 2] * axis[:, 1] * k) - axis[:, 0]
r[:, 2, 0] = (axis[:, 0] * axis[:, 2] * k) - axis[:, 1]
r[:, 2, 1] = (axis[:, 1] * axis[:, 2] * k) + axis[:, 0]
r[:, 2, 2] = (axis[:, 2] * axis[:, 2] * k) + cos_angle
return r.squeeze()
def theta2rotz(theta: np.ndarray) -> np.ndarray:
"""
Rz = [[c(t), -s(t), 0],
[s(t), c(t), 0],
[ 0, 0, 1]]
"""
theta = np.deg2rad(np.asarray(theta).reshape(-1))
rotation_matrices = np.zeros((theta.shape[0], 3, 3), dtype=float)
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
rotation_matrices[:, 2, 2] = 1
rotation_matrices[:, (0, 1), (0, 1)] = cos_theta[:, np.newaxis]
rotation_matrices[:, 0, 1] = -sin_theta
rotation_matrices[:, 1, 0] = sin_theta
return rotation_matrices.squeeze() | 35.292517 | 79 | 0.632999 |
3628f30f1da84eb0aeefd00f476c1a8932e5c523 | 1,245 | py | Python | src/attribute_generator.py | neutron101/cs231A-project | a147a3cc7de66c852dfc6b8cb9c65780c9d55d07 | [
"MIT"
] | null | null | null | src/attribute_generator.py | neutron101/cs231A-project | a147a3cc7de66c852dfc6b8cb9c65780c9d55d07 | [
"MIT"
] | null | null | null | src/attribute_generator.py | neutron101/cs231A-project | a147a3cc7de66c852dfc6b8cb9c65780c9d55d07 | [
"MIT"
] | null | null | null | import numpy as np
| 24.411765 | 144 | 0.702811 |
362a49ef92737d73a5b3be88d93c98a6d215ec47 | 6,265 | py | Python | theDarkArtsClass.py | biechuyangwang/UniversalAutomaticAnswer | 4c558396cc04b36224e9be4409f80f9654c4aa88 | [
"Apache-2.0"
] | 2 | 2021-12-11T19:11:59.000Z | 2021-12-24T19:32:12.000Z | theDarkArtsClass.py | biechuyangwang/UniversalAutomaticAnswer | 4c558396cc04b36224e9be4409f80f9654c4aa88 | [
"Apache-2.0"
] | null | null | null | theDarkArtsClass.py | biechuyangwang/UniversalAutomaticAnswer | 4c558396cc04b36224e9be4409f80f9654c4aa88 | [
"Apache-2.0"
] | null | null | null | #
import cv2
import sys
sys.path.append(r"C:\\Users\\SAT") #
from UniversalAutomaticAnswer.conf.confImp import get_yaml_file
from UniversalAutomaticAnswer.screen.screenImp import ScreenImp #
from UniversalAutomaticAnswer.ocr.ocrImp import OCRImp
from UniversalAutomaticAnswer.util.filter import filterQuestion, filterLine, filterPersonState
from paddleocr import PaddleOCR
#
conf_path = 'conf/conf.yml'
conf_data = get_yaml_file(conf_path)
# ocr
ocr = OCRImp(conf_data)
#
screen = ScreenImp(conf_data)
# left click
import win32api
import win32con
walk_coordinate = [[330,640],[1260,630],[740,550]] #
card_coordinate = [[522,820],[695,798],[838,821],[987,818],[1185,830]] # ~ 1 2 3 4
# charms_coordinate = [[200,770,300,855],[630,700,676,777],[765,690,818,778],[910,700,960,775],[1060,700,1108,786],[556, 878,637, 922]] # states: steps 1 2 3 4 HP
# copy_coordinate = [[540,400,650,500],[980,345,1090,445],[1160,320,1260,420]]
win_rect, img= screen.get_screenshot()
# img_path = './img/harry_charmsclass.png'
# img = cv2.imread(img_path)
# img_steps = img[770:855,200:300]
# img1 = img[700:800,600:700]
# img2 = img[690:778,765:818] # 850 716
# img3 = img[700:775,910:960]
# img4 = img[700:786,1060:1108]
# img5 = img[878:932,556:637] #
# walk_coordinate = [[850,716],[846,712],[854,720]]
# card_coordinate = [[522,820],[695,798],[838,821],[987,818],[1122,830]] # ~ 1 2 3 4
import matplotlib.pyplot as plt
# result = ocr.ocr(img, det=True, cls=True)
# print(result)
# plt.imshow(img)
# plt.show()
# """
count_steps = 0
epoch_num = 3
while True:
if epoch_num == 0:
break
import time
time.sleep(2)
win_rect, img= screen.get_screenshot()
# img_path = './img/harry_darkclass3.png' #
# img = cv2.imread(img_path)
# print(img.shape)
# img = img[875:920,1185:1300] # [1185, 875, 1300, 920]
# img = img[830:880, 1234:1414] # [1234,830,1414,880]
#
flag1 = is_start(img, '')
flag2 = is_start(img, '')
if flag1 or flag2: #
epoch_num -= 1
continue
#
img_continue = img[875:920,1185:1300]
result_continue = ocr.ocr(img_continue)
content_continue = ocr.ocr_content(result_continue)
content_continue = filterLine(content_continue)
if len(content_continue)>0 and content_continue[0] == '':
x, y = 1200, 890
left_click(win_rect[0]+x,win_rect[1]+y,2)
time.sleep(1)
continue
img_steps, img_1, img_2, img_3, img_4, img_5 = '-1', '15', '15', '15', '15', '11'
img_steps = img[800:850, 200:265]
img_1 = img[710:777, 615:665] # 1
img_2 = img[710:777, 770:820] # 2
img_3 = img[710:777, 920:970] # 3
img_4 = img[720:787, 1060:1110] # 4
img_nextcard = img[768:816, 1205:1246,::-1] #
img_5 = img[878:932,556:637] #
result_steps = ocr.ocr(img_steps)
result_1 = ocr.ocr(img_1)
result_2 = ocr.ocr(img_2)
result_3 = ocr.ocr(img_3)
result_4 = ocr.ocr(img_4)
result_nextcard = ocr.ocr(img_nextcard)
result_5 = ocr.ocr(img_5)
result_steps = ocr.ocr_content(result_steps)
result_steps = filterLine(result_steps)
result_1 = ocr.ocr_content(result_1)
result_1 = filterLine(result_1)
result_2 = ocr.ocr_content(result_2)
result_2 = filterLine(result_2)
result_3 = ocr.ocr_content(result_3)
result_3 = filterLine(result_3)
result_4 = ocr.ocr_content(result_4)
result_4 = filterLine(result_4)
result_5 = ocr.ocr_content(result_5)
result_5 = filterLine(result_5)
if (result_steps!=None) and len(result_steps) > 0 and result_steps[0].isdigit():
result_steps = int(result_steps[0][0][0])
else:
result_steps = 0
if (result_1!=None) and len(result_1) > 0 and result_1[0].isdigit():
result_1 = int(result_1[0][0][0])
else:
result_1 = 15
if (result_2!=None) and len(result_2) > 0 and result_2[0].isdigit():
result_2 = int(result_2[0][0][0])
else:
result_2 = 15
if (result_3!=None) and len(result_3) > 0 and result_3[0].isdigit():
result_3 = int(result_3[0][0][0])
else:
result_3 = 15
if (result_4!=None) and len(result_4) > 0 and result_4[0].isdigit():
result_4 = int(result_4[0][0][0])
else:
result_4 = 15
if (result_5!=None) and len(result_5) > 0 and result_5[0].isdigit():
result_5 = int(result_5[0][0][0])
else:
result_5 = -1
fee = [result_1,result_2,result_3,result_4]
idx = fee.index(min(fee))
import random
# idx = random.randint(0, 3)
# if fee[idx]>7:
# continue
walk_idx = random.randint(0, 2)
x_walk, y_walk = walk_coordinate[walk_idx][0], walk_coordinate[walk_idx][1]
x_0, y_0 = card_coordinate[0][0], card_coordinate[0][1] #
x, y = card_coordinate[idx+1][0], card_coordinate[idx+1][1]
if result_5 == -1 or result_5 > 5:
if count_steps % 3 == 0:
left_click(win_rect[0]+x_walk,win_rect[1]+y_walk,4) #
left_click(win_rect[0]+x_0,win_rect[1]+y_0,4) #
count_steps += 1
left_click(win_rect[0]+x,win_rect[1]+y,4) #
print('',result_steps)
print('1',result_1)
print('2',result_2)
print('3',result_3)
print('4',result_4)
print('',result_5)
print('', x, y)
# """
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# import matplotlib.pyplot as plt
# plt.imshow(img)
# plt.show()
# cv2.imwrite('./img/harry_charmsclass.png',img) | 34.423077 | 162 | 0.65012 |
362bc4e36845077cd1de93becd4b863b9767b65f | 521 | py | Python | lt_104.py | fdzhonglin/trees | 9a13412a5c424560722abf2caac797540fa508e4 | [
"MIT"
] | null | null | null | lt_104.py | fdzhonglin/trees | 9a13412a5c424560722abf2caac797540fa508e4 | [
"MIT"
] | null | null | null | lt_104.py | fdzhonglin/trees | 9a13412a5c424560722abf2caac797540fa508e4 | [
"MIT"
] | null | null | null | # standard traversal problem | 30.647059 | 91 | 0.589251 |
362d167af1df22dfcc0fab4281874b494b14c018 | 826 | py | Python | src/anim.py | JovialKnoll/monsters | 15d969d0220fd003c2c28ae690f66633da370682 | [
"MIT"
] | 2 | 2017-05-14T06:37:14.000Z | 2022-03-07T02:25:32.000Z | src/anim.py | JovialKnoll/monsters | 15d969d0220fd003c2c28ae690f66633da370682 | [
"MIT"
] | 2 | 2017-10-08T19:41:18.000Z | 2021-04-08T04:40:50.000Z | src/anim.py | JovialKnoll/monsters | 15d969d0220fd003c2c28ae690f66633da370682 | [
"MIT"
] | null | null | null | import pygame.mixer
from vec2d import Vec2d
from saveable import Saveable
| 25.030303 | 83 | 0.596852 |
362e01958a44c444693e75555e77973e632954c9 | 5,926 | py | Python | nevermined_compute_api/workflow_utils.py | nevermined-io/compute-api | c0d3b1875b3b95ffa78374ff89a4fefd0d3af598 | [
"Apache-2.0"
] | null | null | null | nevermined_compute_api/workflow_utils.py | nevermined-io/compute-api | c0d3b1875b3b95ffa78374ff89a4fefd0d3af598 | [
"Apache-2.0"
] | 3 | 2020-11-20T11:57:04.000Z | 2021-04-06T10:56:49.000Z | nevermined_compute_api/workflow_utils.py | nevermined-io/compute-api | c0d3b1875b3b95ffa78374ff89a4fefd0d3af598 | [
"Apache-2.0"
] | null | null | null | import os
from pathlib import Path
import json
from contracts_lib_py.utils import get_account
from common_utils_py.ddo.ddo import DDO
from nevermined_sdk_py import Nevermined, Config
import yaml
from configparser import ConfigParser
config_parser = ConfigParser()
configuration = config_parser.read('config.ini')
GROUP = config_parser.get('resources', 'group') # str | The custom resource's group name
VERSION = config_parser.get('resources', 'version') # str | The custom resource's version
NAMESPACE = config_parser.get('resources', 'namespace') # str | The custom resource's namespace
KEYFILE = json.loads(Path(os.getenv("PROVIDER_KEYFILE")).read_text())
def create_execution(service_agreement_id, workflow):
"""Creates the argo workflow template
Args:
service_agreement_id (str): The id of the service agreement being executed
workflow (dict): The workflow submitted to the compute api
Returns:
dict: The workflow template filled by the compute api with all the parameters
"""
ddo = DDO(dictionary=workflow)
workflow_template = get_workflow_template()
workflow_template['apiVersion'] = GROUP + '/' + VERSION
workflow_template['metadata']['namespace'] = NAMESPACE
workflow_template['spec']['arguments']['parameters'] += create_arguments(ddo)
workflow_template["spec"]["workflowMetadata"]["labels"][
"serviceAgreementId"] = service_agreement_id
if ddo.metadata["main"]["type"] == "fl-coordinator":
workflow_template["spec"]["entrypoint"] = "coordinator-workflow"
else:
workflow_template["spec"]["entrypoint"] = "compute-workflow"
return workflow_template
def create_arguments(ddo):
"""Create the arguments that need to be add to the argo template.
Args:
ddo (:py:class:`common_utils_py.ddo.ddo.DDO`): The workflow DDO.
Returns:
list: The list of arguments to be appended to the argo workflow
"""
args = ''
image = ''
tag = ''
if ddo.metadata["main"]["type"] != "fl-coordinator":
workflow = ddo.metadata["main"]["workflow"]
options = {
"resources": {
"metadata.url": "http://172.17.0.1:5000",
},
"keeper-contracts": {
"keeper.url": "http://172.17.0.1:8545"
}
}
config = Config(options_dict=options)
nevermined = Nevermined(config)
# TODO: Currently this only supports one stage
transformation_did = workflow["stages"][0]["transformation"]["id"]
transformation_ddo = nevermined.assets.resolve(transformation_did)
transformation_metadata = transformation_ddo.get_service("metadata")
# get args and container
args = transformation_metadata.main["algorithm"]["entrypoint"]
image = transformation_metadata.main["algorithm"]["requirements"]["container"]["image"]
tag = transformation_metadata.main["algorithm"]["requirements"]["container"]["tag"]
arguments = [
{
"name": "credentials",
# remove white spaces
"value": json.dumps(KEYFILE, separators=(",", ":"))
},
{
"name": "password",
"value": os.getenv("PROVIDER_PASSWORD")
},
{
"name": "metadata_url",
"value": "http://172.17.0.1:5000"
},
{
"name": "gateway_url",
"value": "http://172.17.0.1:8030"
},
{
"name": "node",
"value": "http://172.17.0.1:8545"
},
{
"name": "secret_store_url",
"value": "http://172.17.0.1:12001"
},
{
"name": "workflow",
"value": f"did:nv:{ddo.asset_id[2:]}"
},
{
"name": "verbose",
"value": "false"
},
{
"name": "transformation_container_image",
"value": f"{image}:{tag}"
},
{
"name": "transformation_arguments",
"value": args
}
]
return arguments
def get_workflow_template():
"""Returns a pre configured argo workflow template.
Returns:
dict: argo workflow template
"""
path = Path(__file__).parent / "argo-workflow.yaml"
with path.open() as f:
workflow_template = yaml.safe_load(f)
return workflow_template
| 35.065089 | 99 | 0.602599 |
362f493d8462bb8006f529fc1fed6929dd628362 | 1,206 | py | Python | providers/scoop_mock_provider.py | prezesp/scoop-viewer | 115f413979ba2e4e766e334f0240082a9343e314 | [
"MIT"
] | 86 | 2018-07-17T14:21:05.000Z | 2022-03-29T03:00:40.000Z | providers/scoop_mock_provider.py | prezesp/scoop-viewer | 115f413979ba2e4e766e334f0240082a9343e314 | [
"MIT"
] | 16 | 2018-04-24T22:45:24.000Z | 2021-12-15T08:37:38.000Z | providers/scoop_mock_provider.py | prezesp/scoop-viewer | 115f413979ba2e4e766e334f0240082a9343e314 | [
"MIT"
] | 5 | 2018-03-28T18:24:52.000Z | 2022-01-08T11:28:31.000Z | """ Module to interact with scoop. """
from subprocess import Popen, PIPE # nosec
import os
| 32.594595 | 82 | 0.609453 |
362ff49962e9b464199213d8822138a4aa8efdf5 | 515 | py | Python | services/movies_streaming_converter/src/models/convertation.py | fuodorov/yacinema | 43ad869575fbaab7c7056229538638666aa87110 | [
"MIT"
] | null | null | null | services/movies_streaming_converter/src/models/convertation.py | fuodorov/yacinema | 43ad869575fbaab7c7056229538638666aa87110 | [
"MIT"
] | null | null | null | services/movies_streaming_converter/src/models/convertation.py | fuodorov/yacinema | 43ad869575fbaab7c7056229538638666aa87110 | [
"MIT"
] | 1 | 2021-09-30T09:49:40.000Z | 2021-09-30T09:49:40.000Z | import datetime
import uuid
from typing import Optional
from models.base import CustomBaseModel
| 21.458333 | 59 | 0.751456 |
36307c13abd4a232603e88d4d656fa8c1d5c6d39 | 3,965 | py | Python | flasc/circular_statistics.py | NREL/flasc | ac734892efc1bc7684e2393ffa1ce7a97a54efa1 | [
"Apache-2.0"
] | 3 | 2022-01-23T19:33:32.000Z | 2022-03-14T10:29:36.000Z | flasc/circular_statistics.py | NREL/flasc | ac734892efc1bc7684e2393ffa1ce7a97a54efa1 | [
"Apache-2.0"
] | 2 | 2022-03-02T20:45:30.000Z | 2022-03-22T18:49:24.000Z | flasc/circular_statistics.py | NREL/flasc | ac734892efc1bc7684e2393ffa1ce7a97a54efa1 | [
"Apache-2.0"
] | 4 | 2022-02-17T18:40:36.000Z | 2022-03-24T05:44:31.000Z | # Copyright 2021 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import numpy as np
from floris.utilities import wrap_360
# def calc_wd_mean_radial_list(angles_array_list):
# if isinstance(angles_array_list, (pd.DataFrame, pd.Series)):
# array = np.array(angles_array_list)
# elif isinstance(angles_array_list, list):
# array = np.vstack(angles_array_list).T
# else:
# array = np.array(angles_array_list)
# # Use unit vectors to calculate the mean
# dir_x = np.cos(array * np.pi / 180.).sum(axis=1)
# dir_y = np.sin(array * np.pi / 180.).sum(axis=1)
# mean_dirs = np.arctan2(dir_y, dir_x)
# mean_out = wrap_360(mean_dirs * 180. / np.pi)
# return mean_out
def calculate_wd_statistics(angles_array_deg, axis=0,
calc_median_min_max_std=True):
"""Determine statistical properties of an array of wind directions.
This includes the mean of the array, the median, the standard deviation,
the minimum value and the maximum value.
Args:
angles_array_deg ([float/int]): Array of angles in degrees
Returns:
mean_wd (float): Mean wind direction in [0, 360] deg
median_wd (float): Median wind direction in [0, 360] deg
std_wd (float): Standard deviation in deg
min_wd (float): Minimum wind direction in [0, 360] deg
max_wd (float): Maximum wind direction in [0, 360] deg
"""
# Preprocessing
angles_array_deg = np.array(angles_array_deg, dtype=float)
angles_array_deg = wrap_360(angles_array_deg)
# Check for unique cases
if angles_array_deg.shape[0] <= 0:
if calc_median_min_max_std:
return np.nan, np.nan, np.nan, np.nan, np.nan
else:
return np.nan
if np.unique(angles_array_deg).shape[0] == 1:
mean_wd = angles_array_deg[0]
if not calc_median_min_max_std:
return mean_wd
median_wd = angles_array_deg[0]
std_wd = 0.0
min_wd = angles_array_deg[0]
max_wd = angles_array_deg[0]
return mean_wd, median_wd, std_wd, min_wd, max_wd
# Calculate the mean
mean_wd = calc_wd_mean_radial(angles_array_deg, axis=axis)
# Return if we dont need to calculate statistical properties
if not calc_median_min_max_std:
return mean_wd
# Upsample mean_wd for next calculations
new_shape = list(mean_wd.shape)
new_shape.insert(axis, 1) # Add dimension at axis
new_shape = tuple(new_shape)
mean_wd_full = mean_wd.reshape(new_shape).repeat(
angles_array_deg.shape[axis], axis=axis)
# Copy angles_array_deg and wrap values around its mean value
angles_wrp = angles_array_deg
angles_wrp[angles_wrp > (mean_wd_full + 180.)] += -360.
angles_wrp[angles_wrp < (mean_wd_full - 180.)] += 360.
median_wd = wrap_360(np.nanmedian(angles_wrp, axis=axis))
std_wd = np.nanstd(angles_wrp, axis=axis)
min_wd = wrap_360(np.nanmin(angles_wrp, axis=axis))
max_wd = wrap_360(np.nanmax(angles_wrp, axis=axis))
return mean_wd, median_wd, std_wd, min_wd, max_wd
| 35.088496 | 79 | 0.682219 |
36323555756558519c34b677df24af6e2865a756 | 2,797 | py | Python | src/cltl/backend/source/pyaudio_source.py | leolani/cltl-backend | 4ecc6227f9d48e40b9f59e6d78e0fcee9cdadbd4 | [
"MIT"
] | null | null | null | src/cltl/backend/source/pyaudio_source.py | leolani/cltl-backend | 4ecc6227f9d48e40b9f59e6d78e0fcee9cdadbd4 | [
"MIT"
] | null | null | null | src/cltl/backend/source/pyaudio_source.py | leolani/cltl-backend | 4ecc6227f9d48e40b9f59e6d78e0fcee9cdadbd4 | [
"MIT"
] | null | null | null | import logging
import uuid
from typing import Iterable
import numpy as np
import pyaudio
from cltl.backend.api.util import raw_frames_to_np
from cltl.backend.spi.audio import AudioSource
logger = logging.getLogger(__name__)
| 27.15534 | 98 | 0.620665 |
363237db275189c9b2a840bb149422bab3cd8c25 | 21,097 | py | Python | toolkit4nlp/optimizers.py | xv44586/toolkit4nlp | 0ca8c45efe4ad4c6dc20b47016a13326aadcd0bd | [
"Apache-2.0"
] | 94 | 2020-07-16T03:07:59.000Z | 2022-03-13T08:06:30.000Z | toolkit4nlp/optimizers.py | xv44586/toolkit4nlp | 0ca8c45efe4ad4c6dc20b47016a13326aadcd0bd | [
"Apache-2.0"
] | 14 | 2020-11-24T04:26:26.000Z | 2021-09-13T02:44:51.000Z | toolkit4nlp/optimizers.py | xv44586/toolkit4nlp | 0ca8c45efe4ad4c6dc20b47016a13326aadcd0bd | [
"Apache-2.0"
] | 17 | 2020-09-04T07:24:24.000Z | 2021-11-19T06:35:18.000Z | # -*- coding: utf-8 -*-
# @Date : 2020/7/6
# @Author : mingming.xu
# @Email : xv44586@gmail.com
# @File : optimizers.py
import re
import numpy as np
import tensorflow as tf
from keras.optimizers import *
from toolkit4nlp.backend import keras, K, is_tf_keras, piecewise_linear
from toolkit4nlp.utils import *
def export_to_custom_objects(extend_with_func):
return new_extend_with_func
# keras or tf.keras
if is_tf_keras:
extend_with_piecewise_linear_lr = extend_with_piecewise_linear_lr_tf2
extend_with_gradient_accumulation = extend_with_gradient_accumulation_tf2
extend_with_weight_decay = extend_with_weight_decay_tf2
AdaBelief = AdaBeliefTf
else:
Adam = keras.optimizers.Adam
custom_objects = {
'Adam': Adam,
'AdaBelief': AdaBelief,
}
keras.utils.get_custom_objects().update(custom_objects)
| 37.273852 | 118 | 0.593592 |
3632e12e345819f464e0f6feced15ba246770c00 | 5,832 | py | Python | quadpy/triangle/_laursen_gellert.py | dariusarnold/quadpy | 9dc7c1ebff99d15ae57ed9195cde94d97a599be8 | [
"MIT"
] | null | null | null | quadpy/triangle/_laursen_gellert.py | dariusarnold/quadpy | 9dc7c1ebff99d15ae57ed9195cde94d97a599be8 | [
"MIT"
] | null | null | null | quadpy/triangle/_laursen_gellert.py | dariusarnold/quadpy | 9dc7c1ebff99d15ae57ed9195cde94d97a599be8 | [
"MIT"
] | null | null | null | from sympy import Rational as frac
from ..helpers import article
from ._helpers import TriangleScheme, concat, s1, s2, s3
citation = article(
authors=["M.E. Laursen", "M. Gellert"],
title="Some criteria for numerically integrated matrices and quadrature formulas for triangles",
journal="International Journal for Numerical Methods in Engineering",
volume="12",
number="1",
year="1978",
pages="6776",
url="https://doi.org/10.1002/nme.1620120107",
)
| 32.043956 | 100 | 0.641632 |
36359877c7a4f6573f92718849e22bc0b0b933eb | 624 | py | Python | python2/examples/tutorial_threadednotifier.py | openEuler-BaseService/pyinotify | d6c8b832177945106901fb6c0cd5ae7d54df8247 | [
"MIT"
] | 1,509 | 2015-01-04T01:20:06.000Z | 2022-03-29T08:06:41.000Z | python2/examples/tutorial_threadednotifier.py | openEuler-BaseService/pyinotify | d6c8b832177945106901fb6c0cd5ae7d54df8247 | [
"MIT"
] | 98 | 2015-01-09T20:58:57.000Z | 2022-03-29T11:53:44.000Z | python2/examples/tutorial_threadednotifier.py | openEuler-BaseService/pyinotify | d6c8b832177945106901fb6c0cd5ae7d54df8247 | [
"MIT"
] | 333 | 2015-01-02T09:22:01.000Z | 2022-03-24T01:51:40.000Z | # ThreadedNotifier example from tutorial
#
# See: http://github.com/seb-m/pyinotify/wiki/Tutorial
#
import pyinotify
wm = pyinotify.WatchManager() # Watch Manager
mask = pyinotify.IN_DELETE | pyinotify.IN_CREATE # watched events
#log.setLevel(10)
notifier = pyinotify.ThreadedNotifier(wm, EventHandler())
notifier.start()
wdd = wm.add_watch('/tmp', mask, rec=True)
wm.rm_watch(wdd.values())
notifier.stop()
| 24 | 66 | 0.735577 |
36360d07dd0f1e6bcc68b6986125359b768850eb | 885 | py | Python | VersionMonitorDeamonForPy/deamon/ZTest.py | xblia/Upgrade-service-for-java-application | 6118cb270daba5d6511f41a2b3f0784c5a444c17 | [
"Apache-2.0"
] | null | null | null | VersionMonitorDeamonForPy/deamon/ZTest.py | xblia/Upgrade-service-for-java-application | 6118cb270daba5d6511f41a2b3f0784c5a444c17 | [
"Apache-2.0"
] | null | null | null | VersionMonitorDeamonForPy/deamon/ZTest.py | xblia/Upgrade-service-for-java-application | 6118cb270daba5d6511f41a2b3f0784c5a444c17 | [
"Apache-2.0"
] | null | null | null | #coding=utf-8
'''/*
* Copyright 2015 lixiaobo
*
* VersionUpgrade project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/'''
'''
Created on 20151230
@author: xiaobolx
'''
import os
if __name__ == '__main__':
os.rename(r"D:\eclipse_workspace\VersionMonitorDeamonForPy\build\aaa", r"D:\eclipse_workspace\VersionMonitorDeamonForPy\build\exe.win32xxxx") | 34.038462 | 145 | 0.748023 |
3636162e87cf5572646ae4d4770a37dc7c29083e | 9,158 | py | Python | ROBOTIS/DynamixelSDK/python/tests/protocol2_0/sync_read_write.py | andy-Chien/timda_dual_arm | 94170d8889218ea0dc4e6031dcbbf59b7e37e70c | [
"MIT"
] | 3 | 2020-02-17T12:56:22.000Z | 2020-09-30T11:17:03.000Z | ROBOTIS/DynamixelSDK/python/tests/protocol2_0/sync_read_write.py | andy-Chien/timda_dual_arm | 94170d8889218ea0dc4e6031dcbbf59b7e37e70c | [
"MIT"
] | 12 | 2019-05-14T12:24:02.000Z | 2020-03-24T14:00:48.000Z | ROBOTIS/DynamixelSDK/python/tests/protocol2_0/sync_read_write.py | andy-Chien/timda_dual_arm | 94170d8889218ea0dc4e6031dcbbf59b7e37e70c | [
"MIT"
] | 9 | 2021-02-01T08:20:53.000Z | 2021-09-17T05:52:35.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
# Copyright 2017 ROBOTIS CO., LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
# Author: Ryu Woon Jung (Leon)
#
# ********* Sync Read and Sync Write Example *********
#
#
# Available Dynamixel model on this example : All models using Protocol 2.0
# This example is tested with two Dynamixel PRO 54-200, and an USB2DYNAMIXEL
# Be sure that Dynamixel PRO properties are already set as %% ID : 1 / Baudnum : 1 (Baudrate : 57600)
#
import os
if os.name == 'nt':
import msvcrt
else:
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
from dynamixel_sdk import * # Uses Dynamixel SDK library
# Control table address
ADDR_PRO_TORQUE_ENABLE = 64 # Control table address is different in Dynamixel model
ADDR_PRO_GOAL_POSITION = 116
ADDR_PRO_PRESENT_POSITION = 132
# Data Byte Length
LEN_PRO_GOAL_POSITION = 4
LEN_PRO_PRESENT_POSITION = 4
# Protocol version
PROTOCOL_VERSION = 2.0 # See which protocol version is used in the Dynamixel
# Default setting
DXL1_ID = 1 # Dynamixel#1 ID : 1
DXL2_ID = 2 # Dynamixel#1 ID : 2
BAUDRATE = 57600 # Dynamixel default baudrate : 57600
DEVICENAME = '/dev/ttyUSB0' # Check which port is being used on your controller
# ex) Windows: "COM1" Linux: "/dev/ttyUSB0" Mac: "/dev/tty.usbserial-*"
TORQUE_ENABLE = 1 # Value for enabling the torque
TORQUE_DISABLE = 0 # Value for disabling the torque
DXL_MINIMUM_POSITION_VALUE = 100 # Dynamixel will rotate between this value
DXL_MAXIMUM_POSITION_VALUE = 4000 # and this value (note that the Dynamixel would not move when the position value is out of movable range. Check e-manual about the range of the Dynamixel you use.)
DXL_MOVING_STATUS_THRESHOLD = 20 # Dynamixel moving status threshold
index = 0
dxl_goal_position = [DXL_MINIMUM_POSITION_VALUE, DXL_MAXIMUM_POSITION_VALUE] # Goal position
# Initialize PortHandler instance
# Set the port path
# Get methods and members of PortHandlerLinux or PortHandlerWindows
portHandler = PortHandler(DEVICENAME)
# Initialize PacketHandler instance
# Set the protocol version
# Get methods and members of Protocol1PacketHandler or Protocol2PacketHandler
packetHandler = PacketHandler(PROTOCOL_VERSION)
# Initialize GroupSyncWrite instance
groupSyncWrite = GroupSyncWrite(portHandler, packetHandler, ADDR_PRO_GOAL_POSITION, LEN_PRO_GOAL_POSITION)
# Initialize GroupSyncRead instace for Present Position
groupSyncRead = GroupSyncRead(portHandler, packetHandler, ADDR_PRO_PRESENT_POSITION, LEN_PRO_PRESENT_POSITION)
# Open port
if portHandler.openPort():
print("Succeeded to open the port")
else:
print("Failed to open the port")
print("Press any key to terminate...")
getch()
quit()
# Set port baudrate
if portHandler.setBaudRate(BAUDRATE):
print("Succeeded to change the baudrate")
else:
print("Failed to change the baudrate")
print("Press any key to terminate...")
getch()
quit()
# Enable Dynamixel#1 Torque
dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, DXL1_ID, ADDR_PRO_TORQUE_ENABLE, TORQUE_ENABLE)
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
elif dxl_error != 0:
print("%s" % packetHandler.getRxPacketError(dxl_error))
else:
print("Dynamixel#%d has been successfully connected" % DXL1_ID)
# Enable Dynamixel#2 Torque
dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, DXL2_ID, ADDR_PRO_TORQUE_ENABLE, TORQUE_ENABLE)
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
elif dxl_error != 0:
print("%s" % packetHandler.getRxPacketError(dxl_error))
else:
print("Dynamixel#%d has been successfully connected" % DXL2_ID)
# Add parameter storage for Dynamixel#1 present position value
dxl_addparam_result = groupSyncRead.addParam(DXL1_ID)
if dxl_addparam_result != True:
print("[ID:%03d] groupSyncRead addparam failed" % DXL1_ID)
quit()
# Add parameter storage for Dynamixel#2 present position value
dxl_addparam_result = groupSyncRead.addParam(DXL2_ID)
if dxl_addparam_result != True:
print("[ID:%03d] groupSyncRead addparam failed" % DXL2_ID)
quit()
while 1:
print("Press any key to continue! (or press ESC to quit!)")
if getch() == chr(0x1b):
break
# Allocate goal position value into byte array
param_goal_position = [DXL_LOBYTE(DXL_LOWORD(dxl_goal_position[index])), DXL_HIBYTE(DXL_LOWORD(dxl_goal_position[index])), DXL_LOBYTE(DXL_HIWORD(dxl_goal_position[index])), DXL_HIBYTE(DXL_HIWORD(dxl_goal_position[index]))]
# Add Dynamixel#1 goal position value to the Syncwrite parameter storage
dxl_addparam_result = groupSyncWrite.addParam(DXL1_ID, param_goal_position)
if dxl_addparam_result != True:
print("[ID:%03d] groupSyncWrite addparam failed" % DXL1_ID)
quit()
# Add Dynamixel#2 goal position value to the Syncwrite parameter storage
dxl_addparam_result = groupSyncWrite.addParam(DXL2_ID, param_goal_position)
if dxl_addparam_result != True:
print("[ID:%03d] groupSyncWrite addparam failed" % DXL2_ID)
quit()
# Syncwrite goal position
dxl_comm_result = groupSyncWrite.txPacket()
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
# Clear syncwrite parameter storage
groupSyncWrite.clearParam()
while 1:
# Syncread present position
dxl_comm_result = groupSyncRead.txRxPacket()
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
# Check if groupsyncread data of Dynamixel#1 is available
dxl_getdata_result = groupSyncRead.isAvailable(DXL1_ID, ADDR_PRO_PRESENT_POSITION, LEN_PRO_PRESENT_POSITION)
if dxl_getdata_result != True:
print("[ID:%03d] groupSyncRead getdata failed" % DXL1_ID)
quit()
# Check if groupsyncread data of Dynamixel#2 is available
dxl_getdata_result = groupSyncRead.isAvailable(DXL2_ID, ADDR_PRO_PRESENT_POSITION, LEN_PRO_PRESENT_POSITION)
if dxl_getdata_result != True:
print("[ID:%03d] groupSyncRead getdata failed" % DXL2_ID)
quit()
# Get Dynamixel#1 present position value
dxl1_present_position = groupSyncRead.getData(DXL1_ID, ADDR_PRO_PRESENT_POSITION, LEN_PRO_PRESENT_POSITION)
# Get Dynamixel#2 present position value
dxl2_present_position = groupSyncRead.getData(DXL2_ID, ADDR_PRO_PRESENT_POSITION, LEN_PRO_PRESENT_POSITION)
print("[ID:%03d] GoalPos:%03d PresPos:%03d\t[ID:%03d] GoalPos:%03d PresPos:%03d" % (DXL1_ID, dxl_goal_position[index], dxl1_present_position, DXL2_ID, dxl_goal_position[index], dxl2_present_position))
if not ((abs(dxl_goal_position[index] - dxl1_present_position) > DXL_MOVING_STATUS_THRESHOLD) and (abs(dxl_goal_position[index] - dxl2_present_position) > DXL_MOVING_STATUS_THRESHOLD)):
break
# Change goal position
if index == 0:
index = 1
else:
index = 0
# Clear syncread parameter storage
groupSyncRead.clearParam()
# Disable Dynamixel#1 Torque
dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, DXL1_ID, ADDR_PRO_TORQUE_ENABLE, TORQUE_DISABLE)
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
elif dxl_error != 0:
print("%s" % packetHandler.getRxPacketError(dxl_error))
# Disable Dynamixel#2 Torque
dxl_comm_result, dxl_error = packetHandler.write1ByteTxRx(portHandler, DXL2_ID, ADDR_PRO_TORQUE_ENABLE, TORQUE_DISABLE)
if dxl_comm_result != COMM_SUCCESS:
print("%s" % packetHandler.getTxRxResult(dxl_comm_result))
elif dxl_error != 0:
print("%s" % packetHandler.getRxPacketError(dxl_error))
# Close port
portHandler.closePort()
| 40.166667 | 226 | 0.700371 |
3636470ba1388bdc81e02a4d210d625e92578097 | 2,063 | py | Python | models/globalsenti.py | movabo/newstsc | dcf0cff31c0e463c9a96cdaa24e9b662ed53f7ed | [
"MIT"
] | 3 | 2021-02-28T19:14:49.000Z | 2022-03-29T12:10:14.000Z | models/globalsenti.py | movabo/newstsc | dcf0cff31c0e463c9a96cdaa24e9b662ed53f7ed | [
"MIT"
] | null | null | null | models/globalsenti.py | movabo/newstsc | dcf0cff31c0e463c9a96cdaa24e9b662ed53f7ed | [
"MIT"
] | 1 | 2021-05-13T10:27:12.000Z | 2021-05-13T10:27:12.000Z | # -*- coding: utf-8 -*-
# file: lcf_bert.py
# author: yangheng <yangheng@m.scnu.edu.cn>
# Copyright (C) 2019. All Rights Reserved.
# The code is based on repository: https://github.com/yangheng95/LCF-ABSA
import torch
import torch.nn as nn
from models.lcf import LCF_BERT
| 38.924528 | 118 | 0.713039 |
36364741a2a1bcdc096a9a1390acb2038c00084b | 10,351 | py | Python | analysis/outflows/__init__.py | lconaboy/seren3 | 5a2ec80adf0d69664d2ee874f5ba12cc02d6c337 | [
"CNRI-Python"
] | 1 | 2017-09-21T14:58:23.000Z | 2017-09-21T14:58:23.000Z | analysis/outflows/__init__.py | lconaboy/seren3 | 5a2ec80adf0d69664d2ee874f5ba12cc02d6c337 | [
"CNRI-Python"
] | 1 | 2020-09-09T08:52:43.000Z | 2020-09-09T08:52:43.000Z | analysis/outflows/__init__.py | lconaboy/seren3 | 5a2ec80adf0d69664d2ee874f5ba12cc02d6c337 | [
"CNRI-Python"
] | 1 | 2019-01-21T10:57:41.000Z | 2019-01-21T10:57:41.000Z | def integrate_surface_flux(flux_map, r):
'''
Integrates a healpix surface flux to compute the total
net flux out of the sphere.
r is the radius of the sphere in meters
'''
import numpy as np
import healpy as hp
from scipy.integrate import trapz
from seren3.array import SimArray
if not ((isinstance(flux_map, SimArray) or isinstance(r, SimArray))):
raise Exception("Must pass SimArrays")
# Compute theta/phi
npix = len(flux_map)
nside = hp.npix2nside(npix)
# theta, phi = hp.pix2ang(nside, range(npix))
theta, phi = hp.pix2ang(nside, range(npix))
r = r.in_units("kpc") # make sure r is in meters
# Compute the integral
# integrand = np.zeros(len(theta))
ix = theta.argsort()
integrand = r**2 * np.sin(theta[ix]) * flux_map[ix]
# for i in range(len(theta)):
# th, ph = (theta[i], phi[i])
# integrand[i] = r**2 * np.sin(th) * flux_map[i] # mass_flux_radial function already deals with unit vev
# integrand = integrand[:, None] + np.zeros(len(phi)) # 2D over theta and phi
# I = trapz(trapz(integrand, phi), theta)
I = trapz(integrand, theta[ix]) * 2.*np.pi
return SimArray(I, "Msol yr**-1")
def dm_by_dt(subsnap, filt=False, **kwargs):
'''
Compute mass flux at the virial sphere
'''
import numpy as np
from seren3.array import SimArray
from seren3.analysis.render import render_spherical
reload(render_spherical)
rvir = SimArray(subsnap.region.radius, subsnap.info["unit_length"])
to_distance = rvir/4.
# to_distance = rvir
in_units = "kg s**-1 m**-2"
s = kwargs.pop("s", subsnap.pynbody_snapshot(filt=filt))
if "nside" not in kwargs:
kwargs["nside"] = 2**3
kwargs["radius"] = to_distance
kwargs["denoise"] = True
im = render_spherical.render_quantity(subsnap.g, "mass_flux_radial", s=s, in_units=in_units, out_units=in_units, **kwargs)
im.convert_units("Msol yr**-1 kpc**-2")
F = _compute_flux(im, to_distance)
F_plus = _compute_flux(im, to_distance, direction="out")
F_minus = _compute_flux(im, to_distance, direction="in")
return (F, F_plus, F_minus), im
def mass_flux_hist(halo, back_to_aexp, return_data=True, **kwargs):
'''
Compute history of in/outflows
'''
import numpy as np
from seren3.scripts.mpi import write_mass_flux_hid_dict
db = kwargs.pop("db", write_mass_flux_hid_dict.load_db(halo.base.path, halo.base.ioutput))
if (int(halo["id"]) in db.keys()):
catalogue = halo.base.halos(finder="ctrees")
F = []
age_arr = []
hids = []
iouts = []
_compute(halo, db)
for prog in catalogue.iterate_progenitors(halo, back_to_aexp=back_to_aexp):
prog_db = write_mass_flux_hid_dict.load_db(prog.base.path, prog.base.ioutput)
if (int(prog["id"]) in prog_db.keys()):
_compute(prog, prog_db)
else:
break
F = np.array(F)
age_arr = np.array(age_arr)
hids = np.array(hids, dtype=np.int64)
iouts = np.array(iouts)
lbtime = halo.base.age - age_arr
if return_data:
return F, age_arr, lbtime, hids, iouts
return F
else:
return None
def fesc_tot_outflow(snapshot):
'''
Integrate the total mass ourflowed and photons escaped for all haloes
'''
import numpy as np
from scipy.integrate import trapz
from seren3.array import SimArray
from seren3.scripts.mpi import time_int_fesc_all_halos, history_mass_flux_all_halos
fesc_db = time_int_fesc_all_halos.load(snapshot)
mass_flux_db = history_mass_flux_all_halos.load(snapshot)
mass_flux_hids = np.array( [int(res.idx) for res in mass_flux_db] )
nphotons_escaped = np.zeros(len(fesc_db))
tot_mass_outflowed = np.zeros(len(fesc_db))
mvir = np.zeros(len(fesc_db))
for i in range(len(fesc_db)):
hid = int(fesc_db[i].idx)
fesc_res = fesc_db[i].result
mass_flux_res_ix = np.abs(mass_flux_hids - hid).argmin()
mass_flux_res = mass_flux_db[mass_flux_res_ix].result
nphotons_escaped[i], tot_mass_outflowed[i] = _integrate_halo(fesc_res, mass_flux_res)
mvir[i] = fesc_res["Mvir"]
ix = np.where( np.logical_and( ~np.isnan(nphotons_escaped), ~np.isnan(tot_mass_outflowed)) )
nphotons_escaped = nphotons_escaped[ix]
tot_mass_outflowed = tot_mass_outflowed[ix]
mvir = mvir[ix]
return nphotons_escaped, tot_mass_outflowed, mvir
def fesc_mean_time_outflow(snapshot):
'''
Integrate the total mass outflowed and photons escaped for all haloes
'''
import numpy as np
from scipy.integrate import trapz
from seren3.array import SimArray
from seren3.scripts.mpi import time_int_fesc_all_halos, history_mass_flux_all_halos
fesc_db = time_int_fesc_all_halos.load(snapshot)
mass_flux_db = history_mass_flux_all_halos.load(snapshot)
mass_flux_hids = np.array( [int(res.idx) for res in mass_flux_db] )
nphotons_escaped = np.zeros(len(fesc_db))
time_spent_net_outflow = np.zeros(len(fesc_db))
mvir = np.zeros(len(fesc_db))
for i in range(len(fesc_db)):
hid = int(fesc_db[i].idx)
fesc_res = fesc_db[i].result
mass_flux_res_ix = np.abs(mass_flux_hids - hid).argmin()
mass_flux_res = mass_flux_db[mass_flux_res_ix].result
nphotons_escaped[i], time_spent_net_outflow[i] = _integrate_halo(fesc_res, mass_flux_res)
mvir[i] = fesc_res["Mvir"]
ix = np.where( np.logical_and( ~np.isnan(nphotons_escaped),\
np.logical_and(~np.isnan(time_spent_net_outflow),\
time_spent_net_outflow > 0) ) )
nphotons_escaped = nphotons_escaped[ix]
time_spent_net_outflow = time_spent_net_outflow[ix]
mvir = mvir[ix]
return nphotons_escaped, SimArray(time_spent_net_outflow, "Gyr"), mvir | 34.734899 | 144 | 0.628925 |
3637422656965fc8f3771e5007feaef41fa1973f | 2,859 | py | Python | evalution/composes/utils/matrix_utils.py | esantus/evalution2 | 622a9faf729b7c704ad45047911b9a03cf7c8dae | [
"MIT"
] | 1 | 2017-12-06T21:46:26.000Z | 2017-12-06T21:46:26.000Z | evalution/composes/utils/matrix_utils.py | esantus/EVALution-2.0 | 622a9faf729b7c704ad45047911b9a03cf7c8dae | [
"MIT"
] | 5 | 2020-03-24T15:27:40.000Z | 2021-06-01T21:47:18.000Z | evalution/composes/utils/matrix_utils.py | esantus/EVALution-2.0 | 622a9faf729b7c704ad45047911b9a03cf7c8dae | [
"MIT"
] | 1 | 2018-02-15T17:13:02.000Z | 2018-02-15T17:13:02.000Z |
import numpy as np
from composes.matrix.sparse_matrix import SparseMatrix
from composes.matrix.dense_matrix import DenseMatrix
from composes.matrix.matrix import Matrix
from scipy.sparse import issparse
from composes.utils.py_matrix_utils import is_array
from warnings import warn
def to_matrix(matrix_):
"""
Converts an array-like structure to a DenseMatrix/SparseMatrix
"""
if issparse(matrix_):
return SparseMatrix(matrix_)
else:
return DenseMatrix(matrix_)
| 27.490385 | 84 | 0.666317 |
3637cf787bdf4e4784cdc6527a8256c98d6b4fec | 1,646 | py | Python | cpu/pipeline/writeback_unit.py | tim-roderick/simple-cpu-simulator | 334baf1934751527b7e5ffa0ad85d5e53e7215a1 | [
"MIT"
] | 2 | 2019-12-09T12:02:50.000Z | 2019-12-09T22:40:01.000Z | cpu/pipeline/writeback_unit.py | tim-roderick/simple-cpu-simulator | 334baf1934751527b7e5ffa0ad85d5e53e7215a1 | [
"MIT"
] | null | null | null | cpu/pipeline/writeback_unit.py | tim-roderick/simple-cpu-simulator | 334baf1934751527b7e5ffa0ad85d5e53e7215a1 | [
"MIT"
] | 1 | 2020-05-04T09:13:50.000Z | 2020-05-04T09:13:50.000Z | from .component import Component
from cpu.Memory import SCOREBOARD
from isa.Instructions import ALUInstruction as alu
| 38.27907 | 94 | 0.567436 |
36397c2f3323af879bfcf0a875f647ed132668eb | 273 | py | Python | ex026.py | juniorpedroso/Exercicios-CEV-Python | 4adad3b6f3994cf61f9ead5564124b8b9c58d304 | [
"MIT"
] | null | null | null | ex026.py | juniorpedroso/Exercicios-CEV-Python | 4adad3b6f3994cf61f9ead5564124b8b9c58d304 | [
"MIT"
] | null | null | null | ex026.py | juniorpedroso/Exercicios-CEV-Python | 4adad3b6f3994cf61f9ead5564124b8b9c58d304 | [
"MIT"
] | null | null | null | frase = str(input('Digite uma frase: ').strip().upper())
print('A letra a aparece {} vezes'.format(frase.count('A')))
print('Sua primeira apario na posio {}'.format(frase.find('A') + 1))
print('Ela aparece pela ltima vez na posio {}'.format(frase.rfind('A') + 1))
| 54.6 | 79 | 0.673993 |
363ab7e49354291dcd24ad4beee0131449a7700e | 3,269 | py | Python | MyDataLoader.py | WynMew/WaifuLite | fbd9680dda4a5f501b7c66515c9fef1444f2d9e7 | [
"Apache-2.0"
] | 22 | 2019-07-16T13:59:18.000Z | 2022-01-17T02:58:01.000Z | MyDataLoader.py | WynMew/WaifuLite | fbd9680dda4a5f501b7c66515c9fef1444f2d9e7 | [
"Apache-2.0"
] | null | null | null | MyDataLoader.py | WynMew/WaifuLite | fbd9680dda4a5f501b7c66515c9fef1444f2d9e7 | [
"Apache-2.0"
] | 3 | 2020-02-19T19:37:52.000Z | 2021-05-11T05:48:09.000Z | import glob
import io
import numpy as np
import re
import os
from io import BytesIO
import random
from uuid import uuid4
import torch
from PIL import Image
from torch.utils.data import Dataset
from torchvision.transforms import RandomCrop
from torchvision.transforms.functional import to_tensor
| 32.69 | 126 | 0.632303 |
363b300b4584703dde103216ec3118b56fec2aec | 179 | py | Python | model/get_data.py | qq1010903229/OIer | ec1f4c60d76188efd18af157f46849b27dd8ddae | [
"Apache-2.0"
] | null | null | null | model/get_data.py | qq1010903229/OIer | ec1f4c60d76188efd18af157f46849b27dd8ddae | [
"Apache-2.0"
] | null | null | null | model/get_data.py | qq1010903229/OIer | ec1f4c60d76188efd18af157f46849b27dd8ddae | [
"Apache-2.0"
] | null | null | null | f = open("OI_school.csv")
op = open("mdt.txt","w")
for i in f.readlines():
c = i.split('","')
op.write(c[-3]+','+c[-2]+','+"".join([i+',' for i in eval(c[1])])[:-1]+'\n')
| 29.833333 | 80 | 0.463687 |
363ecc9fcc777c09f95b187bd0eb4e97cd4e05fe | 2,068 | py | Python | power_data_to_sat_passes/filtersatpowerfiles.py | abrahamneben/orbcomm_beam_mapping | 71b3e7d6e4214db0a6f4e68ebeeb7d7f846f5004 | [
"MIT"
] | 1 | 2019-04-10T02:50:19.000Z | 2019-04-10T02:50:19.000Z | power_data_to_sat_passes/filtersatpowerfiles.py | abrahamneben/orbcomm_beam_mapping | 71b3e7d6e4214db0a6f4e68ebeeb7d7f846f5004 | [
"MIT"
] | null | null | null | power_data_to_sat_passes/filtersatpowerfiles.py | abrahamneben/orbcomm_beam_mapping | 71b3e7d6e4214db0a6f4e68ebeeb7d7f846f5004 | [
"MIT"
] | null | null | null | #!/users/aneben/python/bin/python
import sys
import commands
import numpy as np
import string
np.set_printoptions(precision=3,linewidth=200)
months={'Jan':'01','Feb':'02','Mar':'03','Apr':'04','May':'05','Jun':'06','Jul':'07','Aug':'08','Sept':'09','Oct':'10','Nov':'11','Dec':'12'}
label = sys.argv[1]
satpowerdir = '/media/disk-1/MWA_Tile/newdata/'+label
satpowerfnames = commands.getoutput('ls '+satpowerdir+'/satpower*').split()
outf = open('../phase3/composite_'+label+'/'+label+'_filteredsatpows.txt','w')
satbins = np.array([102, 115, 128, 225, 236, 339, 352, 365 ,378, 410])
skip=4
for fname in satpowerfnames:
f = open(fname)
print 'reading '+fname
acq_num = 0
[header,spect] = read_next_refew_spectrum(f)
while len(spect) != 0:
satstrs = header.split('\n')[3:-2]
allsats = np.zeros(8,dtype=int)
sats = [int(satstr[2:4]) for satstr in satstrs]
allsats[0:len(sats)] = sats
if acq_num%skip == 0:
datetime = header.split('\n')[2]
outf.write('\n'+make_datetime_numeric(datetime))
for i in range(len(satbins)): outf.write(",%1.3f"%(20*np.log10(spect[satbins[i]])))
outf.write(',')
outf.write(','.join(map(str,allsats)))
acq_num += 1
if acq_num%5000==0: print acq_num/50000.
[header,spect] = read_next_refew_spectrum(f)
f.close()
outf.close()
| 26.512821 | 141 | 0.597679 |
363f007b5be683fdae2cae98f2ef185659366c8a | 6,060 | py | Python | scripts/utils/prepare.py | Glaciohound/VCML | 5a0f01a0baba238cef2f63131fccd412e3d7822b | [
"MIT"
] | 52 | 2019-12-04T22:26:56.000Z | 2022-03-31T17:04:15.000Z | scripts/utils/prepare.py | guxiwuruo/VCML | 5a0f01a0baba238cef2f63131fccd412e3d7822b | [
"MIT"
] | 6 | 2020-08-25T07:35:14.000Z | 2021-09-09T04:57:09.000Z | scripts/utils/prepare.py | guxiwuruo/VCML | 5a0f01a0baba238cef2f63131fccd412e3d7822b | [
"MIT"
] | 5 | 2020-02-10T07:39:24.000Z | 2021-06-23T02:53:42.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File : prepare.py
# Author : Chi Han, Jiayuan Mao
# Email : haanchi@gmail.com, maojiayuan@gmail.com
# Date : 17.07.2019
# Last Modified Date: 03.12.2019
# Last Modified By : Chi Han
#
# This file is part of the VCML codebase
# Distributed under MIT license
import os
from dataset.visual_dataset.visual_dataset import Dataset
from dataset.question_dataset.question_dataset import Dataset as QDataset
from dataset.visual_dataset.utils.sceneGraph_loader import \
load_multiple_sceneGraphs
from utility.common import load, make_dir
from utility.cache import Cache
from reason.models.parser import Seq2seqParser
from . import register
| 33.854749 | 74 | 0.64769 |
363f6b85601d80ec792d9609a878c76ff8a2a456 | 14,280 | py | Python | burst_paper/all_ds/plot_allband_ds.py | jackievilladsen/dynspec | 87101b188d7891644d848e781bca00f044fe3f0b | [
"MIT"
] | 2 | 2019-05-01T00:34:28.000Z | 2021-02-10T09:18:10.000Z | burst_paper/all_ds/plot_allband_ds.py | jackievilladsen/dynspec | 87101b188d7891644d848e781bca00f044fe3f0b | [
"MIT"
] | null | null | null | burst_paper/all_ds/plot_allband_ds.py | jackievilladsen/dynspec | 87101b188d7891644d848e781bca00f044fe3f0b | [
"MIT"
] | null | null | null | '''
plot_allband_ds.py - Load P,L,S band dynamic spectrum for a given epoch, bin to specified resolution, and plot to file
'''
import dynspec.plot
reload(dynspec.plot)
from dynspec import load_dict
from dynspec.plot import *
from pylab import *
import os, subprocess
import matplotlib.gridspec as gridspec
'''
def get_obsname(obsfile):
# take a file directory such as '/data/jrv/15A-416/YZCMi/1' and
# convert to obs name such as '15A-416_YZCMi_1' and srcname 'YZCMi'
names = obsfile.split('/')
srcname = names[4]
obsname = names[3]+'_'+names[4]+'_'+names[5]
return obsname,srcname
'''
params = {'legend.fontsize': 'small',
'axes.titlesize': 'small',
'axes.labelsize': 'small',
'xtick.labelsize': 'x-small',
'ytick.labelsize': 'x-small',
'image.interpolation': 'nearest'}
rcParams.update(params)
loadfile = '/data/jrv/burst_paper/all_burst_epoch_dynspec_LSband.npy'
ds_list = load_dict(loadfile)
loadfileP = '/data/jrv/burst_paper/all_burst_epoch_dynspec_Pband.npy'
dsP_list = load_dict(loadfileP)
ds_dir = '/data/jrv/burst_paper/ds/all_burst_dynspec/' # where to save ds plots
if not os.path.exists(ds_dir):
os.system('mkdir '+ds_dir)
close('all')
# note: throughout, "LS" can also include C band, I initially wrote this code for 2015 data (which only has LS band)
# but it works for the 2013 data with LSC band
# params that can be changed are listed in default_fig_params
default_fig_params = {
'tint_P': 300,
'tint_LS': 60,
'df_MHz_P': 16,
'df_MHz_LS': 16,
'smax_P': None,
'smax_LS': None,
'pixflag_sigfacP': 7.,
'pixflag_sigfacLS': 10.,
'chanflag_sigfacP': 3.,
'chanflag_sigfacLS': 7.,
'colorscale_P':'linear',
'colorscale_LS':'linear',
'maskpartial_P':0.5,
'maskpartial_LS':0.5,
'linthresh_P':None,
'linthresh_LS':None}
fig_params_dict = {
'13A-423_UVCet_1':{'tint_LS':60,'df_MHz_LS':32,'smax_LS':None,'colorscale_LS':'symlog','pixflag_sigfacLS':100,'maskpartial_LS':1.0},
'13A-423_UVCet_2':{'tint_LS':60,'df_MHz_LS':32,'smax_LS':0.015,'maskpartial_LS':0.55},
'13A-423_UVCet_2_b':{'tint_LS':300,'df_MHz_LS':64,'smax_LS':0.008,'linthresh_LS':0.002,'maskpartial_LS':0.55,'colorscale_LS':'symlog'},
'15A-416_ADLeo_3':{'smax_LS':0.03,'smax_P':0.02},
'15A-416_ADLeo_4':{'smax_LS':0.045,'smax_P':0.02,'pixflag_sigfacLS':50.},
'15A-416_ADLeo_5':{'tint_LS':120,'df_MHz_LS':32,'tint_P':150,'df_MHz_P':8},
'15A-416_EQPeg_2':{'tint_LS':120,'df_MHz_LS':32,'tint_P':180,'df_MHz_P':8,'chanflag_sigfacP':2.5,'maskpartial_P':0.9,'pixflag_sigfacP':5.,'smax_P':0.1,'maskpartial_LS':0.7},
'15A-416_UVCet_1':{'df_MHz_LS':32},
'15A-416_UVCet_2':{'tint_P':150,'smax_P':0.05},
'15A-416_UVCet_3':{'tint_P':180,'df_MHz_P':16,'smax_P':0.05},
'15A-416_UVCet_4':{'colorscale_LS':'symlog','smax_LS':0.1,'df_MHz_LS':16,'maskpartial_LS':0.9,'linthresh_LS':0.012,'tint_P':180,'smax_P':0.05},
'15A-416_UVCet_5':{'smax_P':0.04,'maskpartial_P':0.7,'maskpartial_LS':0.9},
'15A-416_YZCMi_1':{'smax_P':0.05,'maskpartial_P':0.7,'maskpartial_LS':0.8,'tint_LS':150,'df_MHz_LS':32,'colorscale_LS':'symlog','smax_LS':0.05,'linthresh_LS':0.0075,'chanflag_sigfacLS':4.},
'15A-416_YZCMi_2':{'smax_P':0.05,'tint_LS':120,'df_MHz_LS':32,'smax_LS':0.015}
}
### PLOT INDIVIDUAL OBSERVATIONS ###
obs_list = fig_params_dict.keys()
#obs_list = ['15A-416_EQPeg_2'] # so I can work on just this event
fig_max_width=6.5
fig_max_height=8.25
for obsname in obs_list:
for func in [real,imag]:
# load dynamic spectra for this observation
print '\n-----', obsname, '-----'
obsfile,srcname = get_obsfile(obsname)
ds = ds_list[obsfile]
dsP = dsP_list.get(obsfile,None)
# load custom parameters for plotting this epoch (binning, RFI flagging, color scale)
fig_params = deepcopy(default_fig_params)
fp_dict_temp = fig_params_dict.get(obsname,{})
for k in fp_dict_temp:
fig_params[k] = fp_dict_temp[k]
# Duration of observation relative to 3h40m (max duration of any) - scale x-axis by this
# so they are all on the same time scale
duration = ds.get_tlist()[-1]*ds.dt()
print 'Duration:',duration,'sec'
frac_duration = duration/(3*3600+40*60)
print 'Fractional duration compared to 3h40m:', frac_duration
# Bandwidth of >1 GHz data relative to 3 GHz (default for 2015) - scale y-axis of >1 GHz dynspec by this
BW_LSC = max(ds.f)-min(ds.f)
frac_BW = BW_LSC/3.e9
print 'Fractional bandwidth of >1 GHz data compared to 3 GHz:',frac_BW
# bin LS band dynamic spectrum to desired resolution
# mask RFI pix and chans before binning, pix after binning
ds.mask_RFI_pixels(rmsfac=fig_params['pixflag_sigfacLS'],func=imag)
ds.mask_RFI(rmsfac=fig_params['chanflag_sigfacLS'])
nt = int(round(fig_params['tint_LS']/ds.dt())) # number of integrations to bin together
nf = int(round(fig_params['df_MHz_LS']/(ds.df()/1e6))) # number of channels to bin together
ds = ds.bin_dynspec(nt=nt,nf=nf,mask_partial=fig_params['maskpartial_LS'])
ds.mask_RFI_pixels(rmsfac=fig_params['pixflag_sigfacLS'],func=imag)
if dsP:
dsP.mask_RFI_pixels(rmsfac=fig_params['pixflag_sigfacP'])
dsP.mask_RFI(rmsfac=fig_params['chanflag_sigfacP'])
# bin P band dynamic spectrum to desired resolution
nt = int(round(fig_params['tint_P']/dsP.dt())) # number of integrations to bin together
nf = int(round(fig_params['df_MHz_P']/(dsP.df()/1e6))) # number of channels to bin together
dsP = dsP.bin_dynspec(nt=nt,nf=nf,mask_partial=fig_params['maskpartial_P'])
dsP.mask_RFI(rmsfac=fig_params['chanflag_sigfacP'])
# calculate horizontal positions of subplots in units from 0 to 1
# (0 is left edge)
dsplot_w = 3.2 * frac_duration # width of dynamic spectrum in inches
gap_l = 0.55 # width of x-axis blank space (left) in inches
gap_c = 0.15 # width of x-axis blank space (center) in inches
gap_cbar = 0.45 # width of blank space between V plot & cbar in inches
gap_r = 0.57 # width of x-axis blank space (right) in inches
cbar_w = 0.13 # width of colorbar in inches
tot_w = 2*dsplot_w + cbar_w + gap_l + gap_c + gap_cbar + gap_r # total width in inches
#if obs == '13A-423_UVCet_2':
# tot_w += gap_c + dsplot_w + gap_cbar + gap_r
print 'Total width of figure in inches:', tot_w, '(goal: <=8.25)'
x1 = gap_l/tot_w # left edge of Stokes I dynspec
x2 = x1 + dsplot_w/tot_w # right edge of Stokes I dynspec
x3 = x2 + gap_c/tot_w # left edge of Stokes V dynspec
x4 = x3 + dsplot_w/tot_w # right edge of Stokes V dynspec
x5 = x4 + gap_cbar/tot_w # left edge of colorbar
x6 = x5+cbar_w/tot_w # right edge of colorbar
#if obs == '13A-423_UVCet_2':
# x7 = x6 + (gap_r+gap_c)/tot_w # left edge of second Stokes V dynspec
# x8 = x
# calculate vertical positions of subplots in units from 0 to 1
# (0 is bottom edge)
dsLS_h = 3.2 * frac_BW # height of LS band dynspec in inches
dsP_h = 0.9 # height of P band dynspec in inches
gap_t = 0.43 # height of y-axis blank space at top (includes titles) in inches
gap_rows = 0.5 # heights of each gap between rows of dynspecs in inches
gap_b = 0.36 # height of y-axis blank space at bottom in inches
if dsP:
tot_h = dsLS_h + 2*dsP_h + gap_t + 2*gap_rows + gap_b # total height in inches
else:
tot_h = gap_t + dsLS_h + gap_b # total height in inches if no P band data
print 'Total height of figure in inches:', tot_h, '(goal: <=6.8)'
y1 = 1-(gap_t/tot_h) # top edge of LS band dynspec
y2 = y1 - dsLS_h/tot_h # bottom edge of LS band dynspec
y3 = y2 - gap_rows/tot_h # top edge of P band I,V dynspecs
y4 = y3 - dsP_h/tot_h # bottom edge of P band I,V dynspecs
y5 = y4 - gap_rows/tot_h # top edge of P band U dynspec
y6 = y5 - dsP_h/tot_h # bottom edge of P band U dynspec
cbarP_h = (2*dsP_h + gap_rows)/tot_h
# create figure
close('all')
figname = ds_dir+obsname+'.pdf'
if func == imag:
figname = ds_dir+obsname+'_imag.pdf'
fig=figure(figsize=(tot_w,tot_h))
# First row of plots: Stokes I LS, Stokes V LS, colorbar LS
# Format for axes command is axes([x_left, y_bottom, width, height])
# First row: y_bottom is y2, x_left is x1, x3, x5
# set flux limits for LS band
smax = fig_params['smax_LS']
if smax is None:
smax = max(percentile(real(ds.spec['i']),99)*1.1,median(real(ds.spec['i']))*2)
smin = -smax # make colorbar symmetric about zero
# set axis ratio to 'auto' in order to fill specified subplot areas
# IMPORTANT: must not include 'cbar' and 'cbar_label' in axis_labels
ar0 = 'auto'
# plot Stokes I real, LS band
ax = axes([x1,y2,dsplot_w/tot_w,dsLS_h/tot_h])
#ax.set_autoscale_on(False)
pp = {'pol':'i','smin':smin,'smax':smax,'trim_mask':False,'axis_labels':[],'ar0':ar0,'dy':0.5,'scale':fig_params['colorscale_LS'],'func':func}
if fig_params['linthresh_LS']:
pp['linthresh']=fig_params['linthresh_LS']
plt,cbar_ticks,cbar_ticklbls = ds.plot_dynspec(plot_params=pp)
#gca().xaxis.set_visible(False)
#gca().yaxis.set_label_coords(-0.2,0)
if dsP:
title('Stokes I, 1-4 GHz')
else:
title('Stokes I')
fig.text(0.01,0.5,'Frequency (GHz)',va='center',rotation='vertical',fontsize='small')
# plot Stokes V real, LS band
ax=axes([x3,y2,dsplot_w/tot_w,dsLS_h/tot_h])
pp = {'pol':'v','smin':smin,'smax':smax,'trim_mask':False,'axis_labels':['xlabel'],'ar0':ar0,'dy':0.5,'scale':fig_params['colorscale_LS'],'func':func}
if fig_params['linthresh_LS']:
pp['linthresh']=fig_params['linthresh_LS']
ds.plot_dynspec(plot_params=pp)
gca().yaxis.tick_right()
xlabel_text = ax.xaxis.get_label_text()
ax.set_xlabel('')
#gca().xaxis.set_visible(False)
if dsP:
title('Stokes V, 1-4 GHz')
else:
title('Stokes V')
# plot LS band colorbar
ax = axes([x5,y2,cbar_w/tot_w,dsLS_h/tot_h])
cbar=colorbar(plt,cax=ax)
cbar.set_ticks(cbar_ticks)
cbar.set_ticklabels(cbar_ticklbls)
ax = cbar.ax
if dsP:
cbar_label = '1-4 Flux Density (mJy)'
ycbar = 0.75
else:
cbar_label = 'Flux Density (mJy)'
ycbar=0.65
if obsname=='15A-416_UVCet_1':
ycbar=0.98
ax.text(4.2,ycbar,cbar_label,rotation=90,fontsize='small')
if dsP:
# Second row of plots: Stokes I P, apparent Stokes V P
# Format for axes command is axes([x_left, y_bottom, width, height])
# Second row: y_bottom is y4, x_left is x1, x3
# set flux limits for P band
smaxP = fig_params['smax_P']
if smaxP is None:
smaxP = dsP.get_rms('v')*6.
sminP = -smaxP
# plot Stokes I real, P band
ax = axes([x1,y4,dsplot_w/tot_w,dsP_h/tot_h])
pp = {'pol':'i','smin':sminP,'smax':smaxP,'trim_mask':False,'axis_labels':[],'dy':0.05,'ar0':ar0,'scale':fig_params['colorscale_P'],'func':func}
if fig_params['linthresh_P']:
pp['linthresh']=fig_params['linthresh_P']
dsP.plot_dynspec(plot_params=pp)
title('Stokes I, 0.2-0.5 GHz')
# plot Stokes V real, P band
ax = axes([x3,y4,dsplot_w/tot_w,dsP_h/tot_h])
pp = {'pol':'v','smin':sminP,'smax':smaxP,'trim_mask':False,'axis_labels':[],'dy':0.05,'ar0':ar0,'scale':fig_params['colorscale_P'],'func':func}
if fig_params['linthresh_P']:
pp['linthresh']=fig_params['linthresh_P']
plt,cbar_ticks,cbar_ticklbls=dsP.plot_dynspec(plot_params=pp)
gca().yaxis.tick_right()
title('Stokes V\', 0.2-0.5 GHz')
# Third row of plots: [empty], apparent Stokes U P, P band colorbar (extra height)
# Format for axes command is axes([x_left, y_bottom, width, height])
# Third row: y_bottom is y6
# x_left is x3 (Stokes U), x5 (colorbar)
# height is dsP_h (Stokes U), 2*dsP_h+gap_rows (colorbar)
# plot Stokes U real, P band
ax = axes([x3,y6,dsplot_w/tot_w,dsP_h/tot_h])
pp = {'pol':'u','smin':sminP,'smax':smaxP,'trim_mask':False,'axis_labels':[],'dy':0.05,'ar0':ar0,'scale':fig_params['colorscale_P'],'func':func}
if fig_params['linthresh_P']:
pp['linthresh']=fig_params['linthresh_P']
dsP.plot_dynspec(plot_params=pp)
gca().yaxis.tick_right()
title('Stokes U\', 0.2-0.5 GHz')
# plot P band colorbar
ax = axes([x5,y6,cbar_w/tot_w,cbarP_h])
cbar=colorbar(plt,cax=ax)
cbar.set_ticks(cbar_ticks)
cbar.set_ticklabels(cbar_ticklbls)
ax = cbar.ax
ax.text(4.2,0.9,'0.2-0.5 GHz Flux Density (mJy)',rotation=90,fontsize='small')
fig.text(0.5,0.01,xlabel_text,ha='center',fontsize='small')
date = ds.t0().split()[0]
fig_title = srcname[0:2]+' '+srcname[2:5]+' - '+date
if func == imag:
fig_title += ' - Imag(vis)'
suptitle(fig_title,y=0.99,fontsize='medium')
savefig(figname)
| 45.769231 | 193 | 0.614566 |
363f98c059fbed994ba92f98a94c9d889c901242 | 2,518 | py | Python | src/utils.py | jungtaekkim/On-Uncertainty-Estimation-by-Tree-based-Surrogate-Models-in-SMO | de195a391f1f9bfc4428dadda9400850408e88ca | [
"MIT"
] | null | null | null | src/utils.py | jungtaekkim/On-Uncertainty-Estimation-by-Tree-based-Surrogate-Models-in-SMO | de195a391f1f9bfc4428dadda9400850408e88ca | [
"MIT"
] | null | null | null | src/utils.py | jungtaekkim/On-Uncertainty-Estimation-by-Tree-based-Surrogate-Models-in-SMO | de195a391f1f9bfc4428dadda9400850408e88ca | [
"MIT"
] | null | null | null | import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
if __name__ == '__main__':
pass
| 27.67033 | 125 | 0.623511 |
363fef05c1d19fcf588faad011da861494aa03e5 | 1,191 | py | Python | cocos-example.py | halflings/terrasim | a51c0e7cb28d3a3ec0d9c687d58c1c753d956c2d | [
"Apache-2.0"
] | null | null | null | cocos-example.py | halflings/terrasim | a51c0e7cb28d3a3ec0d9c687d58c1c753d956c2d | [
"Apache-2.0"
] | null | null | null | cocos-example.py | halflings/terrasim | a51c0e7cb28d3a3ec0d9c687d58c1c753d956c2d | [
"Apache-2.0"
] | null | null | null |
import random
import cocos
from cocos.tiles import TileSet, RectCell, RectMapLayer
from cocos.director import director
from cocos.layer.scrolling import ScrollingManager
import pyglet
from game import Game
from views import WorldMap, CharacterView2
if __name__ == '__main__':
director.init(width=800, height=600, resizable=False, autoscale=False)
director.set_show_FPS(True)
main_layer = MainLayer()
main_scene = cocos.scene.Scene(main_layer)
director.run(main_scene)
| 26.466667 | 74 | 0.687657 |
3641d06d971b0ebba597cba4a1a138c64156e641 | 3,532 | py | Python | view.py | ykmoon04/2021-2-OSSP1-Smith-3 | 66d86e01444b822414a254d0944657ca4ce7dc22 | [
"Apache-2.0"
] | 1 | 2021-10-31T13:01:08.000Z | 2021-10-31T13:01:08.000Z | view.py | ykmoon04/2021-2-OSSP1-Smith-3 | 66d86e01444b822414a254d0944657ca4ce7dc22 | [
"Apache-2.0"
] | null | null | null | view.py | ykmoon04/2021-2-OSSP1-Smith-3 | 66d86e01444b822414a254d0944657ca4ce7dc22 | [
"Apache-2.0"
] | 4 | 2021-11-04T09:03:37.000Z | 2021-12-28T06:28:15.000Z | from itertools import takewhile
from eunjeon import Mecab
import queue
from jamo import h2j, j2hcj
import numpy as np
import re
import json
import sys
from pkg_resources import VersionConflict
global script_table
global voice_table
global s_idx
global v_idx
script_table = []
voice_table = []
# script_table
if __name__=="__main__":
main(sys.argv[1], sys.argv[2]) # argv[1]: , argv[2]: | 29.433333 | 88 | 0.558607 |
364307863e32ccdc999357c039cf0832ac94b380 | 103 | py | Python | rboard/board/__init__.py | joalon/rboard | cc743d8c08837c20bcc9382655e36bb79aecd524 | [
"MIT"
] | null | null | null | rboard/board/__init__.py | joalon/rboard | cc743d8c08837c20bcc9382655e36bb79aecd524 | [
"MIT"
] | null | null | null | rboard/board/__init__.py | joalon/rboard | cc743d8c08837c20bcc9382655e36bb79aecd524 | [
"MIT"
] | null | null | null | from flask import Blueprint
blueprint = Blueprint('board', __name__)
from rboard.board import routes
| 17.166667 | 40 | 0.796117 |
36446df7ecc8c55d638710c593c4957d62d9704f | 615 | py | Python | examples/second_node.py | csunny/kademlia | 5513ff7851aa00601ebc7fd9eb610de4e2147f96 | [
"MIT"
] | 1 | 2018-11-30T13:52:37.000Z | 2018-11-30T13:52:37.000Z | examples/second_node.py | csunny/kademlia | 5513ff7851aa00601ebc7fd9eb610de4e2147f96 | [
"MIT"
] | null | null | null | examples/second_node.py | csunny/kademlia | 5513ff7851aa00601ebc7fd9eb610de4e2147f96 | [
"MIT"
] | null | null | null | import logging
import asyncio
import sys
from kademlia.network import Server
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
log = logging.getLogger('kademlia')
log.addHandler(handler)
log.setLevel(logging.DEBUG)
loop = asyncio.get_event_loop()
loop.set_debug(True)
server = Server()
server.listen(1234)
bootstrap_node = ('0.0.0.0', 8468)
loop.run_until_complete(server.bootstrap([bootstrap_node]))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
server.stop()
loop.close() | 21.206897 | 85 | 0.749593 |
3645c7b92794db29663c1c763622e5f0554a803c | 1,771 | py | Python | src/commons/big_query/big_query_job_reference.py | Morgenz/bbq | f0fd3f626841c610aee80ad08a61123b7cccb775 | [
"Apache-2.0"
] | 41 | 2018-05-08T11:54:37.000Z | 2022-02-09T21:19:17.000Z | src/commons/big_query/big_query_job_reference.py | Morgenz/bbq | f0fd3f626841c610aee80ad08a61123b7cccb775 | [
"Apache-2.0"
] | 139 | 2018-06-07T13:45:21.000Z | 2021-04-30T20:44:06.000Z | src/commons/big_query/big_query_job_reference.py | Morgenz/bbq | f0fd3f626841c610aee80ad08a61123b7cccb775 | [
"Apache-2.0"
] | 5 | 2019-09-11T12:28:24.000Z | 2022-02-04T21:38:29.000Z | from src.commons.big_query.copy_job_async.result_check.result_check_request import \
ResultCheckRequest
from src.commons.big_query.copy_job_async.task_creator import TaskCreator
| 36.142857 | 84 | 0.631846 |
36472112e71a6f099b1f967e54265e83e3ef22d7 | 2,068 | py | Python | PyInstaller/hooks/hook-numpy.py | mathiascode/pyinstaller | eaad76a75a5cc7be90e445f974f4bf1731045496 | [
"Apache-2.0"
] | 9,267 | 2015-01-01T04:08:45.000Z | 2022-03-31T11:42:38.000Z | PyInstaller/hooks/hook-numpy.py | bwoodsend/pyinstaller | 2a16bc2fe0a1234d0f89836d39b7877c74b3bca1 | [
"Apache-2.0"
] | 5,150 | 2015-01-01T12:09:56.000Z | 2022-03-31T18:06:12.000Z | PyInstaller/hooks/hook-numpy.py | bwoodsend/pyinstaller | 2a16bc2fe0a1234d0f89836d39b7877c74b3bca1 | [
"Apache-2.0"
] | 2,101 | 2015-01-03T10:25:27.000Z | 2022-03-30T11:04:42.000Z | #!/usr/bin/env python3
# --- Copyright Disclaimer ---
#
# In order to support PyInstaller with numpy<1.20.0 this file will be duplicated for a short period inside
# PyInstaller's repository [1]. However this file is the intellectual property of the NumPy team and is
# under the terms and conditions outlined their repository [2].
#
# .. refs:
#
# [1] PyInstaller: https://github.com/pyinstaller/pyinstaller/
# [2] NumPy's license: https://github.com/numpy/numpy/blob/master/LICENSE.txt
#
"""
This hook should collect all binary files and any hidden modules that numpy needs.
Our (some-what inadequate) docs for writing PyInstaller hooks are kept here:
https://pyinstaller.readthedocs.io/en/stable/hooks.html
PyInstaller has a lot of NumPy users so we consider maintaining this hook a high priority.
Feel free to @mention either bwoodsend or Legorooj on Github for help keeping it working.
"""
from PyInstaller.compat import is_conda, is_pure_conda
from PyInstaller.utils.hooks import collect_dynamic_libs
# Collect all DLLs inside numpy's installation folder, dump them into built app's root.
binaries = collect_dynamic_libs("numpy", ".")
# If using Conda without any non-conda virtual environment manager:
if is_pure_conda:
# Assume running the NumPy from Conda-forge and collect it's DLLs from the communal Conda bin directory. DLLs from
# NumPy's dependencies must also be collected to capture MKL, OpenBlas, OpenMP, etc.
from PyInstaller.utils.hooks import conda_support
datas = conda_support.collect_dynamic_libs("numpy", dependencies=True)
# Submodules PyInstaller cannot detect (probably because they are only imported by extension modules, which PyInstaller
# cannot read).
hiddenimports = ['numpy.core._dtype_ctypes']
if is_conda:
hiddenimports.append("six")
# Remove testing and building code and packages that are referenced throughout NumPy but are not really dependencies.
excludedimports = [
"scipy",
"pytest",
"nose",
"distutils",
"f2py",
"setuptools",
"numpy.f2py",
"numpy.distutils",
]
| 38.296296 | 119 | 0.758704 |
36480cab3e7b7b34c639f6dcb640a7d9ee3f2cc1 | 4,480 | py | Python | test_proj/blog/admin.py | Ivan-Feofanov/django-inline-actions | a9410a67e9932152d65a063bea0848c98f5c8d73 | [
"BSD-3-Clause"
] | 204 | 2016-05-10T05:38:27.000Z | 2022-03-25T11:22:28.000Z | test_proj/blog/admin.py | Ivan-Feofanov/django-inline-actions | a9410a67e9932152d65a063bea0848c98f5c8d73 | [
"BSD-3-Clause"
] | 45 | 2016-07-18T15:39:48.000Z | 2022-02-28T17:06:38.000Z | test_proj/blog/admin.py | Ivan-Feofanov/django-inline-actions | a9410a67e9932152d65a063bea0848c98f5c8d73 | [
"BSD-3-Clause"
] | 40 | 2016-09-23T07:27:50.000Z | 2022-03-22T09:44:10.000Z | from django.contrib import admin, messages
from django.shortcuts import render
from django.utils.translation import gettext_lazy as _
from inline_actions.actions import DefaultActionsMixin, ViewAction
from inline_actions.admin import InlineActionsMixin, InlineActionsModelAdminMixin
from . import forms
from .models import Article, Author, AuthorProxy
| 29.668874 | 87 | 0.667857 |
3649038aeb95961f992580df722315d018924dd9 | 12,731 | py | Python | ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/macInMACv42_template.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 20 | 2019-05-07T01:59:14.000Z | 2022-02-11T05:24:47.000Z | ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/macInMACv42_template.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 60 | 2019-04-03T18:59:35.000Z | 2022-02-22T12:05:05.000Z | ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/macInMACv42_template.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 13 | 2019-05-20T10:48:31.000Z | 2021-10-06T07:45:44.000Z | from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
| 35.561453 | 127 | 0.657843 |
36490aa7054830d00893922cc4300184b33b2ea9 | 1,037 | py | Python | aries_cloudagent/commands/__init__.py | ldej/aries-cloudagent-python | 25b7a9c08921e67b0962c434102489884ac403b2 | [
"Apache-2.0"
] | 1 | 2021-01-15T01:04:43.000Z | 2021-01-15T01:04:43.000Z | aries_cloudagent/commands/__init__.py | ldej/aries-cloudagent-python | 25b7a9c08921e67b0962c434102489884ac403b2 | [
"Apache-2.0"
] | 1 | 2020-03-06T12:11:29.000Z | 2020-03-06T12:11:29.000Z | aries_cloudagent/commands/__init__.py | ldej/aries-cloudagent-python | 25b7a9c08921e67b0962c434102489884ac403b2 | [
"Apache-2.0"
] | 1 | 2021-01-15T08:45:02.000Z | 2021-01-15T08:45:02.000Z | """Commands module common setup."""
from importlib import import_module
from typing import Sequence
def available_commands():
"""Index available commands."""
return [
{"name": "help", "summary": "Print available commands"},
{"name": "provision", "summary": "Provision an agent"},
{"name": "start", "summary": "Start a new agent process"},
]
def load_command(command: str):
"""Load the module corresponding with a named command."""
module = None
module_path = None
for cmd in available_commands():
if cmd["name"] == command:
module = cmd["name"]
module_path = cmd.get("module")
break
if module and not module_path:
module_path = f"{__package__}.{module}"
if module_path:
return import_module(module_path)
def run_command(command: str, argv: Sequence[str] = None):
"""Execute a named command with command line arguments."""
module = load_command(command) or load_command("help")
module.execute(argv)
| 29.628571 | 66 | 0.636451 |
36493db41d822a42cd12a9cb95ab495245aeb761 | 3,646 | py | Python | AI/Lab 2/astar.py | abikoraj/CSIT | 68ba4944d2b6366a8d5b70b92bdc16b19b7e9208 | [
"MIT"
] | 9 | 2021-11-29T00:56:41.000Z | 2022-03-19T04:41:05.000Z | AI/Lab 2/astar.py | abikoraj/CSIT | 68ba4944d2b6366a8d5b70b92bdc16b19b7e9208 | [
"MIT"
] | null | null | null | AI/Lab 2/astar.py | abikoraj/CSIT | 68ba4944d2b6366a8d5b70b92bdc16b19b7e9208 | [
"MIT"
] | 3 | 2021-11-29T06:30:33.000Z | 2022-03-18T14:27:23.000Z | gScore = 0 #use this to index g(n)
fScore = 1 #use this to index f(n)
previous = 2 #use this to index previous node
inf = 10000 #use this for value of infinity
#we represent the graph usind adjacent list
#as dictionary of dictionaries
G = {
'biratnagar' : {'itahari' : 22, 'biratchowk' : 30, 'rangeli': 25},
'itahari' : {'biratnagar' : 22, 'dharan' : 20, 'biratchowk' : 11},
'dharan' : {'itahari' : 20},
'biratchowk' : {'biratnagar' : 30, 'itahari' : 11, 'kanepokhari' :10},
'rangeli' : {'biratnagar' : 25, 'kanepokhari' : 25, 'urlabari' : 40},
'kanepokhari' : {'rangeli' : 25, 'biratchowk' : 10, 'urlabari' : 12},
'urlabari' : {'rangeli' : 40, 'kanepokhari' : 12, 'damak' : 6},
'damak' : {'urlabari' : 6}
}
start = 'biratnagar'
goal = 'damak'
visitSequence = aStar(G, start, goal)
path = findPath(visitSequence, goal)
print(path)
| 33.145455 | 94 | 0.545804 |
3649d664027df60736783975e94228ac3542abe3 | 19,918 | py | Python | plio/io/io_controlnetwork.py | jlaura/plio | 980c92d88cc78d27729392c14b3113cfac4f89cd | [
"Unlicense"
] | 11 | 2018-02-01T02:56:26.000Z | 2022-02-21T12:08:12.000Z | plio/io/io_controlnetwork.py | jlaura/plio | 980c92d88cc78d27729392c14b3113cfac4f89cd | [
"Unlicense"
] | 151 | 2016-06-15T21:31:37.000Z | 2021-11-15T16:55:53.000Z | plio/io/io_controlnetwork.py | jlaura/plio | 980c92d88cc78d27729392c14b3113cfac4f89cd | [
"Unlicense"
] | 21 | 2016-06-17T17:02:39.000Z | 2021-03-08T20:47:50.000Z | from enum import IntEnum
from time import gmtime, strftime
import warnings
import pandas as pd
import numpy as np
import pvl
import struct
from plio.io import ControlNetFileV0002_pb2 as cnf
from plio.io import ControlNetFileHeaderV0005_pb2 as cnh5
from plio.io import ControlPointFileEntryV0005_pb2 as cnp5
from plio.utils.utils import xstr, find_in_dict
HEADERSTARTBYTE = 65536
DEFAULTUSERNAME = 'None'
def write_filelist(lst, path="fromlist.lis"):
"""
Writes a filelist to a file so it can be used in ISIS3.
Parameters
----------
lst : list
A list containing full paths to the images used, as strings.
path : str
The name of the file to write out. Default: fromlist.lis
"""
handle = open(path, 'w')
for filename in lst:
handle.write(filename)
handle.write('\n')
return
def from_isis(path, remove_empty=True):
# Now get ready to work with the binary
with IsisStore(path, mode='rb') as store:
df = store.read()
return df
def to_isis(obj, path, mode='wb', version=2,
headerstartbyte=HEADERSTARTBYTE,
networkid='None', targetname='None',
description='None', username=DEFAULTUSERNAME,
creation_date=None, modified_date=None,
pointid_prefix=None, pointid_suffix=None):
if targetname == 'None':
warnings.warn("Users should provide a targetname to this function such as 'Moon' or 'Mars' in order to generate a valid ISIS control network.")
with IsisStore(path, mode) as store:
if not creation_date:
creation_date = strftime("%Y-%m-%d %H:%M:%S", gmtime())
if not modified_date:
modified_date = strftime("%Y-%m-%d %H:%M:%S", gmtime())
point_messages, point_sizes = store.create_points(obj, pointid_prefix, pointid_suffix)
points_bytes = sum(point_sizes)
buffer_header, buffer_header_size = store.create_buffer_header(networkid,
targetname,
description,
username,
point_sizes,
creation_date,
modified_date)
# Write the buffer header
store.write(buffer_header, HEADERSTARTBYTE)
# Then write the points, so we know where to start writing, + 1 to avoid overwrite
point_start_offset = HEADERSTARTBYTE + buffer_header_size
for i, point in enumerate(point_messages):
store.write(point, point_start_offset)
point_start_offset += point_sizes[i]
header = store.create_pvl_header(version, headerstartbyte, networkid,
targetname, description, username,
buffer_header_size, points_bytes,
creation_date, modified_date)
store.write(header.encode('utf-8'))
| 39.836 | 151 | 0.570288 |
364ab9b65eb9e9388a14433c72e77abdba6bec4c | 4,028 | py | Python | resources.py | slowiklukasz/qgis-inventories | 6bd247f41ec3340964522b3cac9dd9a924cefbf2 | [
"MIT"
] | null | null | null | resources.py | slowiklukasz/qgis-inventories | 6bd247f41ec3340964522b3cac9dd9a924cefbf2 | [
"MIT"
] | null | null | null | resources.py | slowiklukasz/qgis-inventories | 6bd247f41ec3340964522b3cac9dd9a924cefbf2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.15.2)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x02\x05\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\x00\x00\x00\
\x09\x70\x48\x59\x73\x00\x00\x12\x74\x00\x00\x12\x74\x01\xde\x66\
\x1f\x78\x00\x00\x01\x9a\x49\x44\x41\x54\x58\x47\xc5\x94\x3b\x4e\
\x03\x41\x10\x44\x7d\x01\x22\x12\x02\x9c\x20\x0e\x40\xc2\x2d\xe0\
\x42\xdc\x84\x63\x10\x70\x25\x32\x62\x42\xa3\xb2\x54\xab\x47\x6f\
\xf5\x78\x96\x9f\x83\x27\xe1\xe9\xea\xee\xb7\xe3\xc5\xbb\xd7\xb7\
\xfd\xe1\x9c\x4c\x0b\xdc\x3f\xdd\xc5\x73\x32\x93\xa9\x4c\x09\x68\
\xb0\x49\x75\x31\x93\x49\xfc\x89\xc0\xe3\xf3\x65\xcc\x24\x4e\x0a\
\x6c\x19\xcc\xec\xcd\xcb\xc3\x42\xca\x9a\x4d\x02\xa9\x4e\x98\x95\
\xec\xc5\xc7\xd5\x91\x91\xc4\xbf\x08\x8c\x24\x86\x02\x75\x60\xca\
\x54\xd8\xf3\xab\x02\xa9\x9e\x60\xcf\xd9\x05\xfc\x35\x74\xcb\xdf\
\xaf\x6f\xd7\x02\x0a\x8b\x3a\xa8\xe6\x46\xb0\x77\xb4\x7c\x25\xa0\
\xb0\xaf\x8c\x43\x98\x99\xe1\x54\xaf\x97\xeb\xef\x45\x80\xcb\xab\
\x40\xf7\x14\x1d\xec\x4d\x75\x2f\x17\x51\x80\x03\x74\xfd\x3f\x11\
\x10\xac\xf1\xe9\xc5\x49\x01\x7d\xde\x2a\x20\x38\x43\xfd\xa2\x2e\
\x17\xab\x77\x80\x8d\x6e\x66\x66\x16\xce\xf0\x62\x51\xe7\x7d\x11\
\x10\x6c\xdc\xfa\xf6\x13\xce\x11\x5a\xee\x1b\xa6\xc4\x50\xa0\xd6\
\xcc\x4c\x46\x30\xe7\x1b\x18\x0a\xb0\x41\xb0\xd6\x65\xba\x9c\x60\
\x46\x8b\x2d\xc1\x4c\x2b\x90\xae\x9f\xf5\x4a\xcd\xa6\xbc\x9e\xbc\
\x4a\xb4\x02\x3c\xaf\xb5\x0e\xe6\xb5\x44\x0f\x91\xea\x94\x58\x04\
\x18\x64\x38\xd5\x7c\x3b\x75\x81\xe1\x02\x9e\x73\xa6\x33\x51\x80\
\xd7\xcf\x73\xe1\x73\xd3\x49\xb8\x9e\xce\x4c\x2b\x90\xce\x78\x5e\
\x19\x49\xd4\x5a\xed\x3d\x0a\x30\xe0\xa7\xe7\x99\x60\x93\xd0\x0b\
\x45\xd4\xd7\x89\x90\x3a\x67\x25\x50\x3f\xfb\x8c\x68\xa1\x7f\x54\
\xcc\xac\x44\x9d\xb5\x12\xa8\xd4\x86\xb4\xdc\xa8\xa6\xcc\x16\x89\
\x5d\x0a\x18\x06\xcd\x8c\x80\x18\xdd\x06\xe7\xb5\x02\x0c\x91\x59\
\x01\xd1\x49\x30\x13\xbf\x02\x06\x12\x49\xa2\x2e\x37\x49\x82\xf5\
\xe5\xdf\x70\x2b\x5a\x48\x52\x66\x86\x6f\x0b\xfc\x0e\xfb\xc3\x27\
\x2f\x90\x9e\xc6\xb7\x8c\xf7\x21\x00\x00\x00\x00\x49\x45\x4e\x44\
\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x13\
\x0e\xb7\x46\xa2\
\x00\x69\
\x00\x6e\x00\x76\x00\x65\x00\x6e\x00\x74\x00\x6f\x00\x72\x00\x79\x00\x5f\x00\x76\x00\x61\x00\x6c\x00\x69\x00\x64\x00\x61\x00\x74\
\x00\x6f\x00\x72\
\x00\x08\
\x0a\x61\x5a\xa7\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x40\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x40\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x7e\xb7\x66\x8e\xd2\
"
qt_version = [int(v) for v in QtCore.qVersion().split('.')]
if qt_version < [5, 8, 0]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
qInitResources()
| 41.102041 | 130 | 0.708292 |
364b2da593ffc26a8e80419fd18f3ad6526af7ad | 2,130 | py | Python | pulsarpy_to_encodedcc/scripts/patch_r2_paired_with.py | yunhailuo/pulsarpy-to-encodedcc | 9fd0ce2b81b502dbd2e1e39910f373bd9635f787 | [
"MIT"
] | null | null | null | pulsarpy_to_encodedcc/scripts/patch_r2_paired_with.py | yunhailuo/pulsarpy-to-encodedcc | 9fd0ce2b81b502dbd2e1e39910f373bd9635f787 | [
"MIT"
] | null | null | null | pulsarpy_to_encodedcc/scripts/patch_r2_paired_with.py | yunhailuo/pulsarpy-to-encodedcc | 9fd0ce2b81b502dbd2e1e39910f373bd9635f787 | [
"MIT"
] | 1 | 2020-02-21T18:09:12.000Z | 2020-02-21T18:09:12.000Z | #!/usr/bin/env python
"""
Given one or more DCC experiment IDs, looks at all read2s that were submitted and updates each r2 file
object such that it's paired_with property points to the correct r1. This works by looking at the aliases
in the r2 file object to see if there is one with _R2_001 in it. If so, it sets paired_with to be
the same alias, but with that segment replace with _R1_001. Thus, this script is nice if submissions
went wrong with regard to the file pairings, and this is one way to fix that.
"""
import argparse
import encode_utils.connection as euc
import re
if __name__ == "__main__":
main()
| 37.368421 | 105 | 0.553521 |
364b86ae80f99f078899fde9b937f621e0386d77 | 1,022 | py | Python | ibsng/handler/bw/update_node.py | ParspooyeshFanavar/pyibsng | d48bcf4f25e3f23461528bf0ff8870cc3d537444 | [
"MIT"
] | 6 | 2018-03-06T10:16:36.000Z | 2021-12-05T12:43:10.000Z | ibsng/handler/bw/update_node.py | ParspooyeshFanavar/pyibsng | d48bcf4f25e3f23461528bf0ff8870cc3d537444 | [
"MIT"
] | 3 | 2018-03-06T10:27:08.000Z | 2022-01-02T15:21:27.000Z | ibsng/handler/bw/update_node.py | ParspooyeshFanavar/pyibsng | d48bcf4f25e3f23461528bf0ff8870cc3d537444 | [
"MIT"
] | 3 | 2018-01-06T16:28:31.000Z | 2018-09-17T19:47:19.000Z | """Update node API method."""
from ibsng.handler.handler import Handler
| 28.388889 | 66 | 0.629159 |