hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e14d6f0551ebee50376c52df3cd3465b333386e1 | 9,395 | py | Python | ktsp.py | lum4chi/mylearn | 8a66fd5ebc32a70783132e185b4f5ce18ce14c5f | [
"MIT"
] | null | null | null | ktsp.py | lum4chi/mylearn | 8a66fd5ebc32a70783132e185b4f5ce18ce14c5f | [
"MIT"
] | null | null | null | ktsp.py | lum4chi/mylearn | 8a66fd5ebc32a70783132e185b4f5ce18ce14c5f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# coding: utf-8
# author: Francesco Lumachi <francesco.lumachi@gmail.com>
import pandas as pd
import numpy as np
from itertools import islice
from sklearn.utils.validation import check_X_y
| 41.570796 | 92 | 0.571368 |
e14da54b265b1cbaa55d62627f5c4770644546b4 | 11,089 | py | Python | picmodels/models/care_advisors/daily_metrics_models/services/read.py | bbcawodu/careadvisors-backend | 5ebd3c0fc189b2486cea92b2a13c0bd8a0ee3838 | [
"MIT"
] | null | null | null | picmodels/models/care_advisors/daily_metrics_models/services/read.py | bbcawodu/careadvisors-backend | 5ebd3c0fc189b2486cea92b2a13c0bd8a0ee3838 | [
"MIT"
] | null | null | null | picmodels/models/care_advisors/daily_metrics_models/services/read.py | bbcawodu/careadvisors-backend | 5ebd3c0fc189b2486cea92b2a13c0bd8a0ee3838 | [
"MIT"
] | null | null | null | import datetime
import picmodels.models
from picmodels.models.utils import filter_db_queryset_by_id
from picmodels.models.care_advisors.navigator_models.services.read import filter_navigator_objs_by_f_and_l_name
from picmodels.models.care_advisors.navigator_models.services.read import filter_navigator_objs_by_first_name
from picmodels.models.care_advisors.navigator_models.services.read import filter_navigator_objs_by_last_name
from picmodels.models.care_advisors.navigator_models.services.read import filter_navigator_objs_by_email
from picmodels.models.care_advisors.navigator_models.services.read import filter_navigator_objs_by_mpn
| 44.179283 | 155 | 0.742447 |
e14dbf37cac6b30fd02dacd5e179dc9f00f542ab | 3,405 | py | Python | beakerx/beakerx/commands.py | acq/beakerx | 584023ce0fdb052713855d8a9455e6d7422e53da | [
"Apache-2.0"
] | null | null | null | beakerx/beakerx/commands.py | acq/beakerx | 584023ce0fdb052713855d8a9455e6d7422e53da | [
"Apache-2.0"
] | null | null | null | beakerx/beakerx/commands.py | acq/beakerx | 584023ce0fdb052713855d8a9455e6d7422e53da | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 TWO SIGMA OPEN SOURCE, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import beakerx
from notebook import notebookapp as app
from .install import install, uninstall
from .bkr2ipynb import main
from beakerx_magics import Py4JServer
| 35.842105 | 138 | 0.698678 |
e14e630e471443793b9fd29816d201fc888da13e | 2,512 | py | Python | attnganw/runner.py | cptanalatriste/AttnGAN | 6b8641cd5eb9c3a0bba73904b5c639784d6c3ec8 | [
"MIT"
] | null | null | null | attnganw/runner.py | cptanalatriste/AttnGAN | 6b8641cd5eb9c3a0bba73904b5c639784d6c3ec8 | [
"MIT"
] | null | null | null | attnganw/runner.py | cptanalatriste/AttnGAN | 6b8641cd5eb9c3a0bba73904b5c639784d6c3ec8 | [
"MIT"
] | null | null | null | from datetime import datetime
from typing import List
import dateutil
from datasets import TextDataset
from miscc.config import cfg_from_file, cfg
from torchvision.transforms import transforms
from attnganw.train import GanTrainerWrapper, BirdGenerationFromCaption
| 44.857143 | 108 | 0.624602 |
e150737ff1e7de27f34b49c4df0d1658c30b7b57 | 2,469 | py | Python | Gds/src/fprime_gds/common/data_types/sys_data.py | m-aleem/fprime | ae8a2a43a39d0e8a1908a82b48106467357d6cba | [
"Apache-2.0"
] | 1 | 2020-05-12T03:43:36.000Z | 2020-05-12T03:43:36.000Z | Gds/src/fprime_gds/common/data_types/sys_data.py | abcouwer-jpl/fprime | f28c92e31d58e7e44bff09ad57d574ca5d5e91c7 | [
"Apache-2.0"
] | 5 | 2020-05-26T21:38:02.000Z | 2020-05-26T21:43:33.000Z | Gds/src/fprime_gds/common/data_types/sys_data.py | abcouwer-jpl/fprime | f28c92e31d58e7e44bff09ad57d574ca5d5e91c7 | [
"Apache-2.0"
] | 3 | 2020-09-05T18:17:21.000Z | 2020-11-15T04:06:24.000Z | '''
@brief Base class for system data classes.
This class defines the interface for cata classes which are intended to hold
a specific data item (packet, channel, event). This data item includes the time
of the data as well as data such as channel value or argument value.
@date Created July 2, 2018
@author R. Joseph Paetz (rpaetz@jpl.nasa.gov)
@bug No known bugs
'''
from fprime.common.models.serialize import time_type
from fprime_gds.common.templates import data_template
import fprime_gds.common.utils.jsonable
if __name__ == '__main__':
pass
| 24.939394 | 79 | 0.617254 |
e15162de1790d30c48e2c7c4e83b30934c311fba | 3,063 | py | Python | txdav/carddav/datastore/query/test/test_filter.py | eventable/CalendarServer | 384444edb1966b530bc391789afbe3fb9cd6fd3e | [
"Apache-2.0"
] | 1 | 2017-02-18T19:22:19.000Z | 2017-02-18T19:22:19.000Z | txdav/carddav/datastore/query/test/test_filter.py | eventable/CalendarServer | 384444edb1966b530bc391789afbe3fb9cd6fd3e | [
"Apache-2.0"
] | null | null | null | txdav/carddav/datastore/query/test/test_filter.py | eventable/CalendarServer | 384444edb1966b530bc391789afbe3fb9cd6fd3e | [
"Apache-2.0"
] | null | null | null | ##
# Copyright (c) 2011-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twext.enterprise.dal.syntax import SQLFragment
from twisted.trial.unittest import TestCase
from twistedcaldav import carddavxml
from txdav.carddav.datastore.query.filter import Filter, FilterBase
from txdav.common.datastore.sql_tables import schema
from txdav.carddav.datastore.query.builder import buildExpression
from txdav.common.datastore.query.generator import SQLQueryGenerator
from txdav.carddav.datastore.index_file import sqladdressbookquery
| 31.57732 | 224 | 0.647405 |
e151f9085880a3aa0708d3748aa966d97630b3db | 8,259 | py | Python | train/erfnet_pspnet_hier33.py | elnino9ykl/Sequential-Hierarchical-ERF-PSPNet | c7fe42967894b15f6ed82608cd1836a17fec0260 | [
"MIT"
] | null | null | null | train/erfnet_pspnet_hier33.py | elnino9ykl/Sequential-Hierarchical-ERF-PSPNet | c7fe42967894b15f6ed82608cd1836a17fec0260 | [
"MIT"
] | null | null | null | train/erfnet_pspnet_hier33.py | elnino9ykl/Sequential-Hierarchical-ERF-PSPNet | c7fe42967894b15f6ed82608cd1836a17fec0260 | [
"MIT"
] | null | null | null | # ERFNet full model definition for Pytorch
# Sept 2017
# Eduardo Romera
#######################
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
#ERFNet
| 33.987654 | 124 | 0.593413 |
e15240f37f48e80c43aa04132d7d8be1877669db | 1,009 | py | Python | wsgi.py | nam4dev/evedom_demo | 3cba0cf8e37f6fa75af006c4a99a0b3fab7c2e13 | [
"MIT"
] | null | null | null | wsgi.py | nam4dev/evedom_demo | 3cba0cf8e37f6fa75af006c4a99a0b3fab7c2e13 | [
"MIT"
] | null | null | null | wsgi.py | nam4dev/evedom_demo | 3cba0cf8e37f6fa75af006c4a99a0b3fab7c2e13 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
WSGI script
Setup Application, Authentication, ...
"""
import os
from eve import Eve
from evedom import loader
# from your_app.authentication.token import TokenBasedAuth
__author__ = "nam4dev"
__created__ = '08/11/2017'
ROOT_PATH = os.path.dirname(
os.path.abspath(__file__)
)
EVE_SETTINGS = os.path.join(ROOT_PATH, 'settings.py')
def runner(*_, **options):
"""
A simple runner
Args:
*_:
**options:
Returns:
Flask App run
"""
arguments = dict(
debug=1,
port=5000,
)
arguments.update(options)
if 'EVE_SETTINGS' not in os.environ:
os.environ['EVE_SETTINGS'] = EVE_SETTINGS
application = Eve(
settings=EVE_SETTINGS,
# auth=TokenBasedAuth,
)
application.root_path = ROOT_PATH
with application.app_context():
loader.init()
return application.run(**arguments)
if __name__ == "__main__":
exit(runner())
| 16.274194 | 58 | 0.620416 |
e152f4d6dde06ac4acdcd8bfa8623f41a066db20 | 1,654 | py | Python | workflow/migrations/0027_tolausercountryroles_tolauserprogramroles.py | mercycorps/TolaWorkflow | 59542132fafd611081adb0e8cfaa04abc5886d7a | [
"Apache-2.0"
] | null | null | null | workflow/migrations/0027_tolausercountryroles_tolauserprogramroles.py | mercycorps/TolaWorkflow | 59542132fafd611081adb0e8cfaa04abc5886d7a | [
"Apache-2.0"
] | 268 | 2020-03-31T15:46:59.000Z | 2022-03-31T18:01:08.000Z | workflow/migrations/0027_tolausercountryroles_tolauserprogramroles.py | Falliatcom-sa/falliatcom | 39fb926de072c296ed32d50cccfb8003ca870739 | [
"Apache-2.0"
] | 1 | 2021-01-05T01:58:24.000Z | 2021-01-05T01:58:24.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2019-01-18 17:16
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| 47.257143 | 151 | 0.628174 |
e15353b2bdb09ab7d8c47ead4ff13403eb177890 | 753 | py | Python | set-2/challenge-11.py | natehouk/cryptopals-crypto-challenges-solutions | 3b89a94d42a9b052b2f79d37ba3fa9e3ec17c869 | [
"MIT"
] | null | null | null | set-2/challenge-11.py | natehouk/cryptopals-crypto-challenges-solutions | 3b89a94d42a9b052b2f79d37ba3fa9e3ec17c869 | [
"MIT"
] | null | null | null | set-2/challenge-11.py | natehouk/cryptopals-crypto-challenges-solutions | 3b89a94d42a9b052b2f79d37ba3fa9e3ec17c869 | [
"MIT"
] | null | null | null | import sys, os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import random
from util.util import pad, detect_aes_ecb, generate_key, ammend_plaintext, encrypt_random
# Chosen plaintext
plaintext = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
# Generate data and encrypt plaintext
key = generate_key()
plaintext = pad(ammend_plaintext(plaintext), 16)
ciphertext = encrypt_random(key, plaintext)
# Detect AES in ECB mode
detect = detect_aes_ecb(ciphertext)
# Print answer
print("Plaintext: " + str(plaintext, 'latin-1'))
print("Ciphertext: " + str(ciphertext, 'latin-1'))
if (detect[1] == 6):
print("Guess: ECB without CBC mode")
elif (detect[1] == 4):
print("Guess: ECB with CBC mode")
else:
raise Exception | 30.12 | 89 | 0.749004 |
e1570b21e8b27475e013ab8eb1dbd45fef7957ed | 5,457 | py | Python | lib/python3.6/site-packages/example/authorize_driver.py | venkyyPoojari/Smart-Mirror | 256b7a870f8cda2965b848a66574ee38254274f5 | [
"MIT"
] | 187 | 2015-10-02T13:47:33.000Z | 2022-03-23T08:09:22.000Z | lib/python3.6/site-packages/example/authorize_driver.py | venkyyPoojari/Smart-Mirror | 256b7a870f8cda2965b848a66574ee38254274f5 | [
"MIT"
] | 44 | 2015-12-08T04:31:14.000Z | 2022-03-14T17:33:11.000Z | lib/python3.6/site-packages/example/authorize_driver.py | venkyyPoojari/Smart-Mirror | 256b7a870f8cda2965b848a66574ee38254274f5 | [
"MIT"
] | 88 | 2015-10-11T03:09:01.000Z | 2022-03-19T04:16:37.000Z | # Copyright (c) 2017 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Initializes an UberRidesClient with OAuth 2.0 Credentials.
This example demonstrates how to get an access token through the
OAuth 2.0 Authorization Code Grant and use credentials to create
an UberRidesClient.
To run this example:
(1) Set your app credentials in config.driver.yaml
(2) Run `python authorize_driver.py`
(3) A success message will print, 'Hello {YOUR_NAME}'
(4) User OAuth 2.0 credentials are recorded in
'oauth_driver_session_store.yaml'
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import input
from yaml import safe_dump
from example import utils # NOQA
from example.utils import fail_print
from example.utils import response_print
from example.utils import success_print
from example.utils import import_app_credentials
from uber_rides.auth import AuthorizationCodeGrant
from uber_rides.client import UberRidesClient
from uber_rides.errors import ClientError
from uber_rides.errors import ServerError
from uber_rides.errors import UberIllegalState
def authorization_code_grant_flow(credentials, storage_filename):
"""Get an access token through Authorization Code Grant.
Parameters
credentials (dict)
All your app credentials and information
imported from the configuration file.
storage_filename (str)
Filename to store OAuth 2.0 Credentials.
Returns
(UberRidesClient)
An UberRidesClient with OAuth 2.0 Credentials.
"""
auth_flow = AuthorizationCodeGrant(
credentials.get('client_id'),
credentials.get('scopes'),
credentials.get('client_secret'),
credentials.get('redirect_url'),
)
auth_url = auth_flow.get_authorization_url()
login_message = 'Login as a driver and grant access by going to:\n\n{}\n'
login_message = login_message.format(auth_url)
response_print(login_message)
redirect_url = 'Copy the URL you are redirected to and paste here:\n\n'
result = input(redirect_url).strip()
try:
session = auth_flow.get_session(result)
except (ClientError, UberIllegalState) as error:
fail_print(error)
return
credential = session.oauth2credential
credential_data = {
'client_id': credential.client_id,
'redirect_url': credential.redirect_url,
'access_token': credential.access_token,
'expires_in_seconds': credential.expires_in_seconds,
'scopes': list(credential.scopes),
'grant_type': credential.grant_type,
'client_secret': credential.client_secret,
'refresh_token': credential.refresh_token,
}
with open(storage_filename, 'w') as yaml_file:
yaml_file.write(safe_dump(credential_data, default_flow_style=False))
return UberRidesClient(session, sandbox_mode=True)
def hello_user(api_client):
"""Use an authorized client to fetch and print profile information.
Parameters
api_client (UberRidesClient)
An UberRidesClient with OAuth 2.0 credentials.
"""
try:
response = api_client.get_driver_profile()
except (ClientError, ServerError) as error:
fail_print(error)
return
else:
profile = response.json
first_name = profile.get('first_name')
last_name = profile.get('last_name')
email = profile.get('email')
message = 'Hello, {} {}. Successfully granted access token to {}.'
message = message.format(first_name, last_name, email)
success_print(message)
success_print(profile)
success_print('---')
response = api_client.get_driver_trips()
trips = response.json
success_print(trips)
success_print('---')
response = api_client.get_driver_payments()
payments = response.json
success_print(payments)
if __name__ == '__main__':
"""Run the example.
Get an access token through the OAuth 2.0 Authorization Code Grant
and use credentials to create an UberRidesClient.
"""
credentials = import_app_credentials('config.driver.yaml')
api_client = authorization_code_grant_flow(
credentials,
'oauth_driver_session_store.yaml',
)
hello_user(api_client)
| 33.478528 | 79 | 0.721092 |
e1572ceb180b2c59c917246045f255f6d9bcd968 | 10,766 | py | Python | orangeplus/OPTICS_w.py | panatronic-git/orange-plus | b51643d5edaa7de78388bc38a7695eec64331d27 | [
"CC0-1.0"
] | null | null | null | orangeplus/OPTICS_w.py | panatronic-git/orange-plus | b51643d5edaa7de78388bc38a7695eec64331d27 | [
"CC0-1.0"
] | null | null | null | orangeplus/OPTICS_w.py | panatronic-git/orange-plus | b51643d5edaa7de78388bc38a7695eec64331d27 | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
""" A data clustering widget for the Orange3.
This is a data clustering widget for Orange3, that implements the OPTICS algorithm.
OPTICS stands for "Ordering Points To Identify the Clustering Structure".
This is a very useful algorithm for clustering data when the dataset is unlabeled with
Non-flat geometry or when it has uneven cluster sizes or variable cluster density.
The package used is called "sklearn". Source: https://scikit-learn.org/stable/index.html
To run the addon, just install it using 'pip install -e .' from its package folder.
Don't forget to first activate the orange environment.
__author__ = Panagiotis Papadopoulos
__date__ = Feb 2020
__version__ = 0.1.0
__type__ = Orange Addon
__platform__ = Windows (Orange enviroment)
__email__ = 'Panagiotis Papadopoulos' <panatronic@outlook.com>
__status__ = Dev
"""
import numpy as np
from AnyQt.QtCore import Qt
from AnyQt.QtGui import QColor
from Orange.widgets import widget, gui
from Orange.widgets import settings
from Orange.widgets.widget import Msg
from Orange.widgets.utils.signals import Input, Output
from Orange.widgets.utils.widgetpreview import WidgetPreview
from Orange.widgets.utils.slidergraph import SliderGraph
from Orange.data import Table, Domain, DiscreteVariable
from pyqtgraph import mkPen
from pyqtgraph.functions import intColor
from sklearn.cluster import OPTICS
from sklearn.neighbors import VALID_METRICS
""" OPTICS Parameters
class sklearn.cluster.OPTICS(
* min_samples=5, {default=5 or int > 1}, title: Min samples
max_eps=inf, {default=np.inf}, not changed
* metric='minkowski', {default='minkowski' or [1]}, title: Metric
p=2, {default=2}, not changed
cluster_method='xi', {default='xi'}, not changed
eps=None, {default=None}, not changed
* xi=0.05, {default=0.05 or float, between 0 and 1}, title: Minimum steepness
predecessor_correction=True, {default=True}, not changed
min_cluster_size=None, {default=None}, not changed
* algorithm='auto', {default=auto or ball_tree, kd_tree, brute, auto}, title: Algorithm for nearest neighbors:
leaf_size=30, {default=30}, not changed
n_jobs=None, {default=None}, not changed
)
[1] Valid values for metric are:
from scikit-learn: [cityblock, cosine, euclidean, l1, l2, manhattan]
from scipy.spatial.distance: [braycurtis, canberra, chebyshev, correlation, dice, hamming, jaccard,
kulsinski, mahalanobis, minkowski, rogerstanimoto, russellrao, seuclidean, sokalmichener, sokalsneath, sqeuclidean, yule]
See the documentation for scipy.spatial.distance for details on these metrics.
"""
OPTICS_METRICS = [
("cityblock", "cityblock"),
("cosine", "cosine"),
("euclidean", "euclidean"),
("l1", "l1"),
("l2", "l2"),
("manhattan", "manhattan"),
("braycurtis", "braycurtis"),
("canberra", "canberra"),
("chebyshev", "chebyshev"),
("correlation", "correlation"),
("hamming", "hamming"),
("minkowski", "minkowski"),
("sqeuclidean", "sqeuclidean"),
]
OPTICS_ALGORITHM = [
("Auto","auto"),
("Ball Tree","ball_tree"),
("kd Tree","kd_tree"),
("Brute","brute"),
]
if __name__ == "__main__":
WidgetPreview(OPTICS_w).run(Table("iris-imbalanced"))
| 34.396166 | 145 | 0.612205 |
e157ca6b782a8b3accbd943a60246d4523efb4e1 | 2,590 | py | Python | message_prototypes/base_message.py | agratoth/py-message-prototypes | a23527f8264631e947ed2a6657b325036005330f | [
"MIT"
] | 1 | 2021-02-26T04:40:00.000Z | 2021-02-26T04:40:00.000Z | message_prototypes/base_message.py | agratoth/py-message-prototypes | a23527f8264631e947ed2a6657b325036005330f | [
"MIT"
] | null | null | null | message_prototypes/base_message.py | agratoth/py-message-prototypes | a23527f8264631e947ed2a6657b325036005330f | [
"MIT"
] | null | null | null | import json
from message_prototypes.exceptions import MissingModelException
| 30.116279 | 70 | 0.508108 |
e1597f1ccc9fdad83bd98d093bad683b7edf4352 | 2,015 | py | Python | Tests/Methods/Geometry/test_is_inside.py | Eomys/pyleecan | 9a8cea3e62f63bc73417fe09770bd4d480021e35 | [
"Apache-2.0"
] | 95 | 2019-01-23T04:19:45.000Z | 2022-03-17T18:22:10.000Z | Tests/Methods/Geometry/test_is_inside.py | Eomys/pyleecan | 9a8cea3e62f63bc73417fe09770bd4d480021e35 | [
"Apache-2.0"
] | 366 | 2019-02-20T07:15:08.000Z | 2022-03-31T13:37:23.000Z | Tests/Methods/Geometry/test_is_inside.py | Eomys/pyleecan | 9a8cea3e62f63bc73417fe09770bd4d480021e35 | [
"Apache-2.0"
] | 74 | 2019-01-24T01:47:31.000Z | 2022-02-25T05:44:42.000Z | # -*- coding: utf-8 -*
import pytest
from pyleecan.Classes.Circle import Circle
from pyleecan.Classes.Segment import Segment
from pyleecan.Classes.SurfLine import SurfLine
# Configuring the test of is_inside
inside_test = list()
# Test 1 : checking if a point is inside a circle of radius 1 at 0 + 0j
C1 = Circle()
inside_test.append({"surf": C1, "Z": 0, "result": True}) # inside
inside_test.append({"surf": C1, "Z": 20, "result": False}) # outside
inside_test.append({"surf": C1, "Z": 1, "result": False}) # online not OK
inside_test.append({"surf": C1, "Z": 1, "if_online": True, "result": True}) # online OK
# Test 2 : checking if a point is inside a "C-shape" surface
A0 = 0
A1 = 0 + 4j
A2 = 3 + 4j
A3 = 3 + 3j
A4 = 1 + 3j
A5 = 1 + 1j
A6 = 3 + 1j
A7 = 3
line_list1 = list()
line_list1.append(Segment(A0, A1))
line_list1.append(Segment(A1, A2))
line_list1.append(Segment(A2, A3))
line_list1.append(Segment(A3, A4))
line_list1.append(Segment(A4, A5))
line_list1.append(Segment(A5, A6))
line_list1.append(Segment(A6, A7))
line_list1.append(Segment(A7, A0))
C2 = SurfLine(line_list=line_list1, point_ref=A0)
inside_test.append({"surf": C2, "Z": 0.5 + 2j, "result": True}) # inside
inside_test.append({"surf": C2, "Z": 2 + 2j, "result": False}) # outside
inside_test.append({"surf": C2, "Z": 2.03, "result": False}) # online not OK
inside_test.append(
{"surf": C2, "Z": 2.03, "if_online": True, "result": True}
) # online OK
if __name__ == "__main__":
for test_dict in inside_test:
test_is_inside(test_dict)
| 31 | 89 | 0.651613 |
e15a7dcff0d33ea587927f028856b4941738c99e | 542 | py | Python | examples/polling_tweets_example.py | Cheetah97/nitter_scraper | 2da2cf9dca66c7ff9b02c06fccf1cfad772f14a5 | [
"MIT"
] | 21 | 2020-08-31T06:20:36.000Z | 2022-01-10T19:22:00.000Z | examples/polling_tweets_example.py | Cheetah97/nitter_scraper | 2da2cf9dca66c7ff9b02c06fccf1cfad772f14a5 | [
"MIT"
] | 2 | 2021-02-09T18:19:51.000Z | 2021-07-25T17:27:59.000Z | examples/polling_tweets_example.py | Cheetah97/nitter_scraper | 2da2cf9dca66c7ff9b02c06fccf1cfad772f14a5 | [
"MIT"
] | 4 | 2020-12-20T01:31:30.000Z | 2022-01-24T14:22:13.000Z | import time
from nitter_scraper import NitterScraper
last_tweet_id = None
with NitterScraper(port=8008) as nitter:
while True:
for tweet in nitter.get_tweets("dgnsrekt", pages=1, break_on_tweet_id=last_tweet_id):
if tweet.is_pinned is True:
continue
if tweet.is_retweet is True:
continue
if tweet.tweet_id != last_tweet_id:
print(tweet.json(indent=4))
last_tweet_id = tweet.tweet_id
break
time.sleep(0.1)
| 21.68 | 93 | 0.605166 |
e15ca3d18b760bf74faf9a038392f2b6d8bb59b6 | 11,697 | py | Python | esse/mainapp/migrations/0003_auto_20210225_0350.py | alexeevivan/bookstore | d5698a5681a5d4f6b7616aacd3ac384d25b306e5 | [
"Unlicense"
] | null | null | null | esse/mainapp/migrations/0003_auto_20210225_0350.py | alexeevivan/bookstore | d5698a5681a5d4f6b7616aacd3ac384d25b306e5 | [
"Unlicense"
] | null | null | null | esse/mainapp/migrations/0003_auto_20210225_0350.py | alexeevivan/bookstore | d5698a5681a5d4f6b7616aacd3ac384d25b306e5 | [
"Unlicense"
] | null | null | null | # Generated by Django 3.1.6 on 2021-02-25 00:50
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| 62.550802 | 162 | 0.608703 |
e15ca6e7927c7dfaebe88887cd584126de16a196 | 45 | py | Python | mypo/sampler/__init__.py | sonesuke/my-portfolio | 4fd19fdee8a0aa13194cab0df53c83218c5664e3 | [
"MIT"
] | 2 | 2021-03-14T00:14:25.000Z | 2021-09-04T16:26:02.000Z | mypo/sampler/__init__.py | sonesuke/my-portfolio | 4fd19fdee8a0aa13194cab0df53c83218c5664e3 | [
"MIT"
] | 104 | 2021-02-21T08:11:11.000Z | 2021-09-26T03:02:27.000Z | mypo/sampler/__init__.py | sonesuke/mypo | 4fd19fdee8a0aa13194cab0df53c83218c5664e3 | [
"MIT"
] | null | null | null | # flake8: noqa
from .sampler import Sampler
| 11.25 | 28 | 0.755556 |
e15cf3b8a65ba6fdc8baa289bed190ee2b034ffe | 295 | py | Python | tools/com/test/test_alpha.py | AnthonyEdvalson/Machina | fefb058591dd7b62817c75277d5ca0eb6dbd8c3a | [
"MIT"
] | null | null | null | tools/com/test/test_alpha.py | AnthonyEdvalson/Machina | fefb058591dd7b62817c75277d5ca0eb6dbd8c3a | [
"MIT"
] | null | null | null | tools/com/test/test_alpha.py | AnthonyEdvalson/Machina | fefb058591dd7b62817c75277d5ca0eb6dbd8c3a | [
"MIT"
] | null | null | null | from tools.com.alpha import Flow, Path
| 17.352941 | 74 | 0.522034 |
e15d3dcf46973ac9dc62ab07128caefad49a8319 | 907 | py | Python | compile.py | DrSuiunbek/pbc | 6dd3dc4e480483a885d80cd9c01b80c4ae9fb076 | [
"MIT"
] | null | null | null | compile.py | DrSuiunbek/pbc | 6dd3dc4e480483a885d80cd9c01b80c4ae9fb076 | [
"MIT"
] | null | null | null | compile.py | DrSuiunbek/pbc | 6dd3dc4e480483a885d80cd9c01b80c4ae9fb076 | [
"MIT"
] | null | null | null | import argparse
import os
import subprocess
from subprocess import call
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("filename", help="the name of the solidity file")
args = parser.parse_args()
filename = args.filename
cwd = os.getcwd()
datadir = os.path.join(cwd, "data")
contractsdir = os.path.join(cwd, "contracts")
compile_separate()
| 29.258065 | 136 | 0.680265 |
e15d5e4f994eb2a0d1d81f21279363ad0216ad9f | 1,283 | py | Python | app/machine_learning/utils.py | jonzxz/project-piscator | 588c8b1ac9355f9a82ac449fdbeaa1ef7eb441ef | [
"MIT"
] | null | null | null | app/machine_learning/utils.py | jonzxz/project-piscator | 588c8b1ac9355f9a82ac449fdbeaa1ef7eb441ef | [
"MIT"
] | null | null | null | app/machine_learning/utils.py | jonzxz/project-piscator | 588c8b1ac9355f9a82ac449fdbeaa1ef7eb441ef | [
"MIT"
] | 1 | 2021-02-18T03:08:21.000Z | 2021-02-18T03:08:21.000Z | import re
import joblib
from sklearn.ensemble import RandomForestClassifier
from typing import List, Tuple
# Cleans up a messed up HTML / tabbed raw content into space delimited content
# Flattens a list of tuples for (Sender, SenderDomain) into [Sender, SenderDomain]
# By right there SHOULD only be a single pair but kept in list just in case!
# Even indexes are Sender and odd indexs are SenderDomains
# Retrieves a list of [Sender, SenderDomain] and returns domain names only
# eg. ['Person', 'Person@Company.com']
# Returns [Company.com]
# By right there should only be one entry but kept in list just in case
# set list to remove duplicates
| 42.766667 | 82 | 0.747467 |
e15e1ca2f5eb878425d154d27fca023b5942afb5 | 1,637 | py | Python | sendgrid_email.py | ssiddhantsharma/open-source-library-data-collector | 9def970707a3995239ef75958a9b03736da4a73e | [
"MIT"
] | null | null | null | sendgrid_email.py | ssiddhantsharma/open-source-library-data-collector | 9def970707a3995239ef75958a9b03736da4a73e | [
"MIT"
] | null | null | null | sendgrid_email.py | ssiddhantsharma/open-source-library-data-collector | 9def970707a3995239ef75958a9b03736da4a73e | [
"MIT"
] | null | null | null | import os
import sendgrid
from sendgrid.helpers.mail import Content, Email, Mail
from bs4 import BeautifulSoup
| 41.974359 | 132 | 0.592547 |
e15e7f996a734bfc0abd1bd0f37b1c7a308de458 | 2,141 | py | Python | ckilpailija.py | bittikettu/JTimer | d0b4a6173e84c89286874865427741bd595cf955 | [
"MIT"
] | null | null | null | ckilpailija.py | bittikettu/JTimer | d0b4a6173e84c89286874865427741bd595cf955 | [
"MIT"
] | null | null | null | ckilpailija.py | bittikettu/JTimer | d0b4a6173e84c89286874865427741bd595cf955 | [
"MIT"
] | null | null | null | import json
| 33.984127 | 176 | 0.566558 |
e15f0f7648e65dbaf7aa7dffa649f9d29bce17dd | 1,436 | py | Python | hessen/frankfurt.py | risklayer/corona-landkreis-crawler | 2e82448ff614240365de9493eafa0e6a620ac615 | [
"Unlicense"
] | 12 | 2022-02-23T11:06:06.000Z | 2022-03-04T17:21:44.000Z | hessen/frankfurt.py | risklayer/corona-landkreis-crawler | 2e82448ff614240365de9493eafa0e6a620ac615 | [
"Unlicense"
] | null | null | null | hessen/frankfurt.py | risklayer/corona-landkreis-crawler | 2e82448ff614240365de9493eafa0e6a620ac615 | [
"Unlicense"
] | null | null | null | #!/usr/bin/python3
## Tommy
from botbase import *
_frankfurt_st = re.compile(r"Stand:\s*(\d\d?\. *\w+ 20\d\d, \d\d?(?::\d\d)?) Uhr")
schedule.append(Task(8, 5, 12, 5, 360, frankfurt, 6412))
if __name__ == '__main__': frankfurt(googlesheets())
| 47.866667 | 180 | 0.66922 |
e15f87d69b9f385338407a9fb5c01c89ecaa7425 | 2,065 | py | Python | lib/utils/timeout.py | kustodian/aerospike-admin | 931ee55ccd65ba3e20e6611a0294c92b09e8cfcb | [
"Apache-2.0"
] | null | null | null | lib/utils/timeout.py | kustodian/aerospike-admin | 931ee55ccd65ba3e20e6611a0294c92b09e8cfcb | [
"Apache-2.0"
] | null | null | null | lib/utils/timeout.py | kustodian/aerospike-admin | 931ee55ccd65ba3e20e6611a0294c92b09e8cfcb | [
"Apache-2.0"
] | null | null | null | # Copyright 2013-2018 Aerospike, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import signal
import commands
DEFAULT_TIMEOUT = 5.0
def timeout(timeout):
"""This decorator takes a timeout parameter in seconds."""
return wrap_function
def default_timeout(function):
"""This simple decorator 'timesout' after DEFAULT_TIMEOUT seconds."""
return call_with_timeout(function)
def getstatusoutput(command, timeout=DEFAULT_TIMEOUT):
"""This is a timeout wrapper aroung getstatusoutput."""
_gso = call_with_timeout(commands.getstatusoutput, timeout)
try:
return _gso(command)
except TimeoutException:
return (-1, "The command '%s' timed-out after %i seconds." % (command, timeout))
| 29.927536 | 88 | 0.701695 |
e15f893232695e92454619ed0274fe5e5ba282b5 | 101 | py | Python | src/myapp/admin.py | anmquangw/viu-upload-file | bfbff413cc92e454226fced5fe504b7cebc6c102 | [
"MIT"
] | null | null | null | src/myapp/admin.py | anmquangw/viu-upload-file | bfbff413cc92e454226fced5fe504b7cebc6c102 | [
"MIT"
] | 2 | 2020-06-21T01:47:59.000Z | 2020-06-27T12:39:24.000Z | src/myapp/admin.py | sonnhfit/DocShare | 50d9b8c333144780385f970197519ddda61bd502 | [
"MIT"
] | null | null | null | """from django.contrib import admin
from .models import DemoModel
admin.site.register(DemoModel)"""
| 20.2 | 35 | 0.782178 |
e15fdd67f6de5e7d590eb91ddbbf06d3e3d45dea | 2,015 | py | Python | straightlanespipeline.py | semeniuta/CarND-AdvancedLaneLines | 57fbdc6cc9596f299b4517514d487573f7c373b4 | [
"MIT"
] | null | null | null | straightlanespipeline.py | semeniuta/CarND-AdvancedLaneLines | 57fbdc6cc9596f299b4517514d487573f7c373b4 | [
"MIT"
] | null | null | null | straightlanespipeline.py | semeniuta/CarND-AdvancedLaneLines | 57fbdc6cc9596f299b4517514d487573f7c373b4 | [
"MIT"
] | null | null | null | import lanelines
from compgraph import CompGraph, CompGraphRunner
import numpy as np
import cv2
func_dict = {
'grayscale': lanelines.grayscale,
'get_image_shape': lambda im : im.shape,
'canny': lanelines.canny,
'define_lanes_region': lanelines.define_lanes_region,
'apply_region_mask': lanelines.apply_region_mask,
'gaussian_blur': lanelines.gaussian_blur,
'hough_lines': lanelines.find_hough_lines,
'compute_line_tangents': lanelines.compute_line_tangents,
'extend_lines': lanelines.extend_lane_lines_grouped_by_slopes,
'average_endpoints_left': lanelines.average_lines_endpoints,
'average_endpoints_right': lanelines.average_lines_endpoints
}
func_io = {
'grayscale': ('image', 'image_gray'),
'get_image_shape': ('image_gray', ('n_rows', 'n_cols')),
'define_lanes_region': (
('n_rows', 'n_cols', 'x_from', 'x_to', 'y_lim', 'left_offset', 'right_offset'),
'region_vertices'
),
'gaussian_blur': (('image_gray', 'blur_kernel'), 'blurred_image'),
'canny': (('blurred_image', 'canny_lo', 'canny_hi'), 'image_canny'),
'apply_region_mask': (('image_canny', 'region_vertices'), 'masked_image'),
'hough_lines': (('masked_image', 'rho', 'theta', 'hough_threshold', 'min_line_length', 'max_line_gap'), 'lines'),
'compute_line_tangents': ('lines', 'tangents'),
'extend_lines': (('lines', 'tangents', 'y_lim', 'n_rows', 'abs_slope_threshold'), ('extended_lines_left', 'extended_lines_right')),
'average_endpoints_left': ('extended_lines_left', 'avg_line_left'),
'average_endpoints_right': ('extended_lines_right', 'avg_line_right')
}
computational_graph = CompGraph(func_dict, func_io)
parameters = {
'x_from': 560,
'x_to': 710,
'y_lim': 450,
'left_offset': 50,
'right_offset': 0,
'blur_kernel': 11,
'canny_lo': 70,
'canny_hi': 200,
'rho': 1,
'theta': np.pi/180,
'hough_threshold': 20,
'min_line_length': 7,
'max_line_gap': 1,
'abs_slope_threshold': 0.2
}
| 35.350877 | 135 | 0.68536 |
e1601ec501793267dab5b7a344de5c414ede0c73 | 2,904 | py | Python | PA1/ArrayListTests/main_create_tests.py | tordisuna/SC-T-201-GSKI | 1e89e5b31e7d74aeecae3dffe2df7ac9e8bb40f2 | [
"MIT"
] | null | null | null | PA1/ArrayListTests/main_create_tests.py | tordisuna/SC-T-201-GSKI | 1e89e5b31e7d74aeecae3dffe2df7ac9e8bb40f2 | [
"MIT"
] | null | null | null | PA1/ArrayListTests/main_create_tests.py | tordisuna/SC-T-201-GSKI | 1e89e5b31e7d74aeecae3dffe2df7ac9e8bb40f2 | [
"MIT"
] | 1 | 2021-02-12T11:36:53.000Z | 2021-02-12T11:36:53.000Z |
import random
from random import Random
r = Random()
f = open("extra_tests.txt", "w+")
f.write("new int")
c = 2
for _ in range(64):
c = write_test_line(f, r, c, 0)
c = write_test_line(f, r, c, 1)
for _ in range(128):
c = write_test_line(f, r, c, 0)
c = write_test_line(f, r, c, 1)
for _ in range(512):
c = write_test_line(f, r, c, 5)
for _ in range(20):
c = write_insert_ordered_line(f, r, c)
c = write_test_line(f, r, c, 1)
for _ in range(20):
c = write_test_line(f, r, c, 2, 2)
c = write_insert_ordered_line(f, r, c)
for _ in range(32):
c = write_test_line(f, r, c, 2, 1)
for _ in range(10):
c = write_find_line(f, r, c)
for _ in range(10):
c = write_remove_value_line(f, r, c)
c = write_test_line(f, r, c, 1)
for _ in range(32):
c = write_insert_ordered_line(f, r, c)
for _ in range(10):
c = write_find_line(f, r, c)
for _ in range(10):
c = write_remove_value_line(f, r, c)
for _ in range(32):
c = write_test_line(f, r, c, 2, 2)
for _ in range(10):
c = write_find_line(f, r, c)
for _ in range(10):
c = write_remove_value_line(f, r, c)
f.close()
| 19.755102 | 43 | 0.512741 |
e160ba46a79b2de84010dbbe846ade7d792604fe | 4,305 | py | Python | stock deep learning/7-2.LSTM(stock).py | nosy0411/Deep-learning-project | b0864579ec1fef4c6224397e3c39e4fce051c93a | [
"MIT"
] | null | null | null | stock deep learning/7-2.LSTM(stock).py | nosy0411/Deep-learning-project | b0864579ec1fef4c6224397e3c39e4fce051c93a | [
"MIT"
] | null | null | null | stock deep learning/7-2.LSTM(stock).py | nosy0411/Deep-learning-project | b0864579ec1fef4c6224397e3c39e4fce051c93a | [
"MIT"
] | null | null | null | # LSTM(GRU) : KODEX200 (2010 ~ ) .
# KODEX200 , 10, 40 10 .
# 20 (step = 20) , .
# ??
#
# 2018.11.22, ()
# --------------------------------------------------------------------------
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from MyUtil import YahooData
nInput = 3
nOutput = 3
nStep = 20
nNeuron = 50
# 2 .
# return : xBatch - RNN
# yBatch - RNN
#
# step = 2, n = 3 ,
# xData = [[1,2,3], [4,5,6], [7,8,9], [10,11,12], ...]
# xBatch = [[[1,2,3], [4,5,6]], [[7,8,9], [10,11,12]], ...]
# yBatch = [[[4,5,6], [7,8,9]], [[10,11,12], [13,14,15]], ...]
#
#df = YahooData.getStockDataYahoo('^KS11', start='2007-01-01')
df = pd.read_csv('StockData/^KS11.csv', index_col=0, parse_dates=True)
df = pd.DataFrame(df['Close'])
df['ma_10'] = pd.DataFrame(df['Close']).rolling(window=10).mean()
df['ma_40'] = pd.DataFrame(df['Close']).rolling(window=40).mean()
df = df.dropna()
df = (df - df.mean()) / df.std()
# .
data = np.array(df)
xBatch, yBatch = createTrainData(data, nStep)
# RNN (Wx, Wh). xBatch RNN .
tf.reset_default_graph()
x = tf.placeholder(tf.float32, [None, nStep, nInput])
rnn = tf.nn.rnn_cell.LSTMCell(nNeuron)
#rnn = tf.nn.rnn_cell.GRUCell(nNeuron)
output, state = tf.nn.dynamic_rnn(rnn, x, dtype=tf.float32)
# RNN 3 y feed-forward network . (Wy)
y = tf.placeholder(tf.float32, [None, nStep, nOutput])
inFC = tf.reshape(output, [-1, nNeuron])
fc1 = tf.contrib.layers.fully_connected(inputs=inFC, num_outputs=nNeuron)
predY = tf.contrib.layers.fully_connected(inputs=fc1, num_outputs=nOutput, activation_fn=None)
predY = tf.reshape(predY, [-1, nStep, nOutput])
# Mean square error (MSE) Loss . xBatch yBatch .
loss = tf.reduce_sum(tf.square(predY - y))
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
minLoss = optimizer.minimize(loss)
# . . (Wx, Wh, Wy )
sess = tf.Session()
sess.run(tf.global_variables_initializer())
lossHist = []
for i in range(300):
sess.run(minLoss, feed_dict={x: xBatch, y: yBatch})
if i % 5 == 0:
ploss = sess.run(loss, feed_dict={x: xBatch, y: yBatch})
lossHist.append(ploss)
print(i, "\tLoss:", ploss)
# 10 . 1 , 2 .
# 10 .
nFuture = 10
if len(data) > 100:
lastData = np.copy(data[-100:]) # 100
else:
lastData = np.copy(data)
dx = np.copy(lastData)
estimate = [dx[-1]]
for i in range(nFuture):
# nStep
px = dx[-nStep:,]
px = np.reshape(px, (1, nStep, nInput))
# .
yHat = sess.run(predY, feed_dict={x: px})[0][-1]
#
estimate.append(yHat)
#
dx = np.vstack([dx, yHat])
# Loss history
plt.figure(figsize=(8, 3))
plt.plot(lossHist, color='red')
plt.title("Loss History")
plt.xlabel("epoch")
plt.ylabel("loss")
plt.show()
# .
plt.figure(figsize=(8, 3))
plt.plot(df['Close'], color='red')
plt.plot(df['ma_10'], color='blue')
plt.plot(df['ma_40'], color='green')
plt.title("KODEX-200 stock price")
plt.show()
#
CLOSE = 0 #
estimate = np.array(estimate)
ax1 = np.arange(1, len(lastData[:, CLOSE]) + 1)
ax2 = np.arange(len(lastData), len(lastData) + len(estimate))
plt.figure(figsize=(8, 3))
plt.plot(ax1, lastData[:, CLOSE], 'b-o', color='blue', markersize=4, label='Stock price', linewidth=1)
plt.plot(ax2, estimate[:, CLOSE], 'b-o', color='red', markersize=4, label='Estimate')
plt.axvline(x=ax1[-1], linestyle='dashed', linewidth=1)
plt.legend()
plt.title("KODEX-200 prediction")
plt.show()
| 30.75 | 102 | 0.625784 |
e1627f07a28ba766216726e8582242703156a953 | 1,486 | py | Python | media.py | wduncanfraser/movie_trailer_website | deaa134862772df09cf8af79a6990d140848fb80 | [
"MIT"
] | null | null | null | media.py | wduncanfraser/movie_trailer_website | deaa134862772df09cf8af79a6990d140848fb80 | [
"MIT"
] | null | null | null | media.py | wduncanfraser/movie_trailer_website | deaa134862772df09cf8af79a6990d140848fb80 | [
"MIT"
] | null | null | null | """media.py: Module for movie_trailer_website, contains Movie class"""
import webbrowser
import urllib
import json
| 37.15 | 101 | 0.672275 |
e163903fd0678839e9ef90435028e77dc1cbf097 | 103 | py | Python | src/moredataframes/mdf_core.py | GlorifiedStatistics/MoreDataframes | 147d5b8104d1cbd1cf2836220f43fb6c8ca099b7 | [
"MIT"
] | null | null | null | src/moredataframes/mdf_core.py | GlorifiedStatistics/MoreDataframes | 147d5b8104d1cbd1cf2836220f43fb6c8ca099b7 | [
"MIT"
] | null | null | null | src/moredataframes/mdf_core.py | GlorifiedStatistics/MoreDataframes | 147d5b8104d1cbd1cf2836220f43fb6c8ca099b7 | [
"MIT"
] | null | null | null | """
A collection of useful functions for manipulating/encoding pandas dataframes for data science.
"""
| 25.75 | 94 | 0.786408 |
e163f95d62fc70e17e021921220d7ea02e910aa6 | 23,493 | py | Python | superslomo/model.py | myungsub/meta-interpolation | f7afee9d1786f67e6f548c2734f91858f803c5dc | [
"MIT"
] | 74 | 2020-04-03T06:26:39.000Z | 2022-03-25T16:51:28.000Z | superslomo/model.py | baiksung/meta-interpolation | 72dd3b2e56054bb411ed20301583a0e67d9ea293 | [
"MIT"
] | 6 | 2020-07-09T20:09:23.000Z | 2021-09-20T11:12:24.000Z | superslomo/model.py | baiksung/meta-interpolation | 72dd3b2e56054bb411ed20301583a0e67d9ea293 | [
"MIT"
] | 19 | 2020-04-16T09:18:38.000Z | 2021-12-28T08:25:12.000Z | import torch
import torchvision
import torchvision.transforms as transforms
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from model_utils import *
# Creating an array of `t` values for the 7 intermediate frames between
# reference frames I0 and I1.
t = np.linspace(0.125, 0.875, 7)
def getFlowCoeff (indices, device):
"""
Gets flow coefficients used for calculating intermediate optical
flows from optical flows between I0 and I1: F_0_1 and F_1_0.
F_t_0 = C00 x F_0_1 + C01 x F_1_0
F_t_1 = C10 x F_0_1 + C11 x F_1_0
where,
C00 = -(1 - t) x t
C01 = t x t
C10 = (1 - t) x (1 - t)
C11 = -t x (1 - t)
Parameters
----------
indices : tensor
indices corresponding to the intermediate frame positions
of all samples in the batch.
device : device
computation device (cpu/cuda).
Returns
-------
tensor
coefficients C00, C01, C10, C11.
"""
# Convert indices tensor to numpy array
ind = indices.detach().numpy()
C11 = C00 = - (1 - (t[ind])) * (t[ind])
C01 = (t[ind]) * (t[ind])
C10 = (1 - (t[ind])) * (1 - (t[ind]))
return torch.Tensor(C00)[None, None, None, :].permute(3, 0, 1, 2).to(device), torch.Tensor(C01)[None, None, None, :].permute(3, 0, 1, 2).to(device), torch.Tensor(C10)[None, None, None, :].permute(3, 0, 1, 2).to(device), torch.Tensor(C11)[None, None, None, :].permute(3, 0, 1, 2).to(device)
def getWarpCoeff (indices, device):
"""
Gets coefficients used for calculating final intermediate
frame `It_gen` from backwarped images using flows F_t_0 and F_t_1.
It_gen = (C0 x V_t_0 x g_I_0_F_t_0 + C1 x V_t_1 x g_I_1_F_t_1) / (C0 x V_t_0 + C1 x V_t_1)
where,
C0 = 1 - t
C1 = t
V_t_0, V_t_1 --> visibility maps
g_I_0_F_t_0, g_I_1_F_t_1 --> backwarped intermediate frames
Parameters
----------
indices : tensor
indices corresponding to the intermediate frame positions
of all samples in the batch.
device : device
computation device (cpu/cuda).
Returns
-------
tensor
coefficients C0 and C1.
"""
# Convert indices tensor to numpy array
ind = indices.detach().numpy()
C0 = 1 - t[ind]
C1 = t[ind]
return torch.Tensor(C0)[None, None, None, :].permute(3, 0, 1, 2).to(device), torch.Tensor(C1)[None, None, None, :].permute(3, 0, 1, 2).to(device)
| 35.011923 | 293 | 0.548717 |
e164e5d3815dee52bdeb5d8a12184fbf1db5055b | 11,970 | py | Python | adaptfx/discrete_programs/updater_discrete.py | rmnldwg/Adaptive-fractionation | 5525cbd635e9afdbf5556b2a95dd31bdb222db66 | [
"MIT"
] | 1 | 2021-07-15T12:23:25.000Z | 2021-07-15T12:23:25.000Z | adaptfx/discrete_programs/updater_discrete.py | rmnldwg/Adaptive-fractionation | 5525cbd635e9afdbf5556b2a95dd31bdb222db66 | [
"MIT"
] | 1 | 2021-11-29T18:50:05.000Z | 2021-12-10T15:32:50.000Z | adaptfx/discrete_programs/updater_discrete.py | rmnldwg/Adaptive-fractionation | 5525cbd635e9afdbf5556b2a95dd31bdb222db66 | [
"MIT"
] | 1 | 2021-11-29T10:52:42.000Z | 2021-11-29T10:52:42.000Z | # -*- coding: utf-8 -*-
"""
In this file are all the needed functions to calculate an adaptive fractionation treatment plan. The value_eval and the result_calc function are the only ones that should be used
This file requires all sparing factors to be known, therefore, it isnt suited to do active treatment planning but to analyze patient data.
value_eval and result_calc_BEDNT are the most essential codes. The results from value_eval can be used to calculate a treatment plan with result_calc_BEDNT.
The optimal policies for each fraction can be extracted manually(pol4 = first fraction, first index in pol is the last fraction and the last index is the first fraction). but one must know what index represents which sparing factor
Note: This file does not assume all sparing factors to be known at the start, but simulates the treatment planning as if we would get a new sparing factor at each fraction!
This program uses a discrete state space and does not interpolate between states. Therefore, it is less precise than the interpolation programs
"""
import numpy as np
from scipy.stats import truncnorm
import time
from scipy.stats import invgamma
def get_truncated_normal(mean=0, sd=1, low=0, upp=10):
'''produces a truncated normal distribution'''
return truncnorm((low - mean) / sd, (upp - mean) / sd, loc=mean, scale=sd)
def std_calc(measured_data,alpha,beta):
'''calculates the most likely standard deviation for a list of k sparing factors and an inverse-gamma conjugate prior
measured_data: list/array with k sparing factors
alpha: shape of inverse-gamma distribution
beta: scale of inverse-gamme distrinbution
return: most likely std based on the measured data and inverse-gamma prior'''
n = len(measured_data)
var_values = np.arange(0.00001,0.25,0.00001)
likelihood_values = np.zeros(len(var_values))
for index,value in enumerate(var_values):
likelihood_values[index] = value**(-alpha-1)/value**(n/2)*np.exp(-beta/value)*np.exp(-np.var(measured_data)*n/(2*value))
std = (np.sqrt(var_values[np.argmax(likelihood_values)]))
return std
def distribution_update(sparing_factors, alpha, beta):
'''produces the updated probability distribution for each fraction based on Variance prior
sparing_factors: list/array of k spraring factors
alpha: shape of inverse-gamma distribution
beta: scale of inverse-gamme distrinbution
return: k-1 dimensional mean and std arrays starting from the second sparing factor (index 1)
'''
means = np.zeros(len(sparing_factors))
stds = np.zeros(len(sparing_factors))
for i in range(len(sparing_factors)):
means[i] = np.mean(sparing_factors[:(i+1)])
stds[i] = std_calc(sparing_factors[:(i+1)],alpha,beta)
means = np.delete(means,0)
stds = np.delete(stds,0) #we get rid of the first value as it is only the planning value and not used in a fraction
return [means,stds]
def updated_distribution_calc(data,sparing_factors):
'''calculates the updated distribution based on prior data that is used to setup an inverse gamma distribution
data shape: nxk where n is the amount of patients and k the amount of sparingfactors per patient
sparing_factors shape: list/array with k entries with the first sparing factor being the planning sparing factor, therefore not being included in the treatment
return: updated means and stds for k-1 fractions.'''
variances = data.var(axis = 1)
alpha,loc,beta = invgamma.fit(variances, floc = 0) #here beta is the scale parameter
[means,stds] = distribution_update(sparing_factors,alpha,beta)
return[means,stds]
def probdistributions(means,stds):
'''produces the truncated normal distribution for several means and standard deviations
means: list/array of n means
stds: list/array of n standard deviations
return: n probability distributions for values [0.01,1.40]'''
distributions = np.zeros(141*len(means)).reshape(len(means),141)
for i in range(len(means)):
X = get_truncated_normal(means[i], stds[i], low=0, upp=1.4)
for index,value in enumerate(np.arange(0,1.41,0.01)):
distributions[i][index] = X.cdf(value+0.004999999999999999999)-X.cdf(value-0.005)
return distributions
def value_eval(sparing_factors,data,abt = 10,abn = 3,bound = 90,riskfactor = 0):
'''calculates the best policy for a list of k sparing factors with k-1 fractions based on a dynamic programming algorithm. Estimation of the probability distribution is based on prior patient data
sparing_factors: list/array of k sparing factors. A planning sparing factor is necessary!
data: nxk dimensional data of n prior patients with k sparing factors.
abt: alpha beta ratio of tumor
abn: alpha beta ratio of Organ at risk
bound: upper limit of BED in OAR
riskfactor: "risk reducing" factor of zero is a full adaptive fractionation algorithm while a sparing factor of 0.1 slightly forces the algorithm to stay close to the 6Gy per fraction plan. a risk factor of 1 results in a 6Gy per fraction plan.
return:
Values: a sparing_factor-2 x BEDT x sf dimensional matrix with the value of each BEDT/sf state
Values4: Values of the first fraction
policy: a sparing_factor-2 x BEDT x sf dimensional matrix with the policy of each BEDT/sf state. fourth index = first fraction, first index = last fraction
policy4: policy of the first fraction'''
sf= np.arange(0,1.41,0.01) #list of all possible sparing factors
BEDT = np.arange(0,90.3,0.1) #list of all possible Biological effective doses
Values = np.zeros(len(BEDT)*len(sf)*4).reshape(4,len(BEDT),len(sf)) #2d values list with first indice being the BED and second being the sf
actionspace = np.arange(0,22.4,0.1) #list of all possible dose actions
[means,stds] =updated_distribution_calc(data,sparing_factors)
distributions = probdistributions(means,stds)
policy = np.zeros((4,len(BEDT),len(sf)))
upperbound = 90.2
start = time.time()
#here we add the calculation of the distance to the standard treatment
useless,calculator = np.meshgrid(np.zeros(len(actionspace)),sf) #calculator is matrix that has the correct sparing factors
actionspace_expand,useless = np.meshgrid(actionspace,sf)
risk_penalty = abs(6/calculator-actionspace_expand)
delivered_doses = np.round(BED_calc(sf,abn,actionspace),1)
BEDT_rew = BED_calc(1, abt,actionspace) #this is the reward for the dose deposited inside the normal tissue.
BEDT_transformed, meaningless = np.meshgrid(BEDT_rew,np.zeros(len(sf)))
risk_penalty[0] = risk_penalty[1]
for update_loop in range (0,5):
prob = distributions[update_loop]
for state in range(0,5-update_loop): #We have five fractionations with 2 special cases 0 and 4
print(str(state+1) +' loop done')
if state == 4: #first state with no prior dose delivered so we dont loop through BEDT
future_bed = delivered_doses
future_bed[future_bed > upperbound] = upperbound #any dose surpassing 95 is set to 95. Furthermore, 95 will be penalized so strong that the program avoids it at all costs. (95 is basically the upper bound and can be adapted)
future_values_prob = (Values[state-1][(future_bed*10).astype(int)]*prob).sum(axis = 2) #in this array are all future values multiplied with the probability of getting there. shape = sparing factors x actionspace
penalties = np.zeros(future_bed.shape)
penalties[future_bed > bound] = -(future_bed[future_bed > bound]-bound)*5
Vs = future_values_prob + BEDT_transformed + penalties - risk_penalty*riskfactor
policy4 = Vs.argmax(axis=1)
Values4 = Vs.max(axis=1)
else:
future_values_prob_all = (Values[state-1]*prob).sum(axis = 1)
for bed in range(len(BEDT)): #this and the next for loop allow us to loop through all states
future_bed = delivered_doses + bed/10
future_bed[future_bed > upperbound] = upperbound #any dose surpassing 95 is set to 95.
if state == 0: #last state no more further values to add
penalties = np.zeros(future_bed.shape)
penalties[future_bed > bound] = -(future_bed[future_bed > bound]-bound)*5
penalties[future_bed == upperbound] = -10000 #here we produced the penalties for all the values surpassing the limit
Vs = BEDT_transformed + penalties# Value of each sparing factor for each action
else:
penalties = np.zeros(future_bed.shape)
penalties[future_bed == upperbound] = -100
future_values_prob = (future_values_prob_all[(future_bed*10).astype(int)])#in this array are all future values multiplied with the probability of getting there. shape = sparing factors x actionspace
Vs = future_values_prob + BEDT_transformed + penalties - risk_penalty*riskfactor
best_action = Vs.argmax(axis=1)
valer = Vs.max(axis=1)
policy[state][bed] = best_action
Values[state][bed] = valer
end = time.time()
print('time elapsed = ' +str(end - start))
return [Values,policy,Values4,policy4]
def result_calc_BEDNT(pol4,pol,sparing_factors,abt = 10,abn = 3): #this function calculates the fractionation plan according to the reinforcement learning
'''in this function gives the treatment plan for a set of sparing factors based on the sparing factors that have been used to calculate the optimal policy
the pol4 and pol matrices are the ones that are returnedin the value_eval function
pol4: first fraction policy
pol: second - fifth fraction policy
sparing_factors: sparing factors that should be used to make a plan. list starting from first fraction'''
actionspace = np.arange(0,22.4,0.1) #list of all possible dose actions
total_bedt = BED_calc0(actionspace[pol4[round(sparing_factors[0]*100)]],abt)
total_bednt = BED_calc0(actionspace[pol4[round(sparing_factors[0]*100)]],abn,sparing_factors[0])
print('fraction 1 dose delivered: ',actionspace[pol4[round(sparing_factors[0]*100)]])
print('total accumulated biological effective dose in tumor; fraction 1 = ',round(total_bedt,1))
print('total accumulated biological effective dose in normal tissue; fraction 1 = ',round(total_bednt,1))
for index,fraction in enumerate(range(3,-1,-1)):
if fraction == 0:
dose_action = (-sparing_factors[index+1]+np.sqrt(sparing_factors[index+1]**2+4*sparing_factors[index+1]**2*(90-total_bednt)/abn))/(2*sparing_factors[index+1]**2/abn)
else:
dose_action = actionspace[pol[fraction][(round(total_bednt,1)*10).astype(int)][round(sparing_factors[index+1]*100)].astype(int)]
dose_delivered = BED_calc0(dose_action,abt)
total_bedt += dose_delivered
total_bednt += BED_calc0(dose_action,abn,sparing_factors[index+1])
print('fraction ', index+2, 'dose delivered: ', round(dose_action,1))
print('total accumulated dose in tumor; fraction ', index+2, '=', round(total_bedt,1))
print('total accumulated dose in normal tissue; fraction ', index+2, '=', round(total_bednt,1))
| 68.4 | 250 | 0.693317 |
e16731d27b2926a6e8972922d05bae1f6e5d75bb | 240 | py | Python | ltc/base/admin.py | v0devil/jltom | b302a39a187b8e1154c6deda636a4db8b30bb40b | [
"MIT"
] | 4 | 2016-12-30T13:26:59.000Z | 2017-04-26T12:07:36.000Z | ltc/base/admin.py | v0devil/jltom | b302a39a187b8e1154c6deda636a4db8b30bb40b | [
"MIT"
] | null | null | null | ltc/base/admin.py | v0devil/jltom | b302a39a187b8e1154c6deda636a4db8b30bb40b | [
"MIT"
] | null | null | null | from django.contrib import admin
# Register your models here.
from ltc.base.models import Project, Test, Configuration
# Register your models here.
admin.site.register(Project)
admin.site.register(Test)
admin.site.register(Configuration)
| 24 | 56 | 0.808333 |
e168edf2fd4a9f7e34f7a83253f12b25417c2d21 | 3,113 | py | Python | examples/utils/interaction.py | epuzanov/pivy | 2f049c19200ab4a3a1e4740268450496c12359f9 | [
"ISC"
] | 29 | 2019-12-28T10:37:16.000Z | 2022-02-09T10:48:04.000Z | examples/utils/interaction.py | epuzanov/pivy | 2f049c19200ab4a3a1e4740268450496c12359f9 | [
"ISC"
] | 29 | 2019-12-26T13:46:11.000Z | 2022-03-29T18:14:33.000Z | examples/utils/interaction.py | epuzanov/pivy | 2f049c19200ab4a3a1e4740268450496c12359f9 | [
"ISC"
] | 17 | 2019-12-29T11:49:32.000Z | 2022-02-23T00:28:18.000Z | import sys
from PySide2.QtWidgets import QApplication
from PySide2.QtGui import QColor
from pivy import quarter, coin, graphics, utils
def main():
app = QApplication(sys.argv)
utils.addMarkerFromSvg("test.svg", "CUSTOM_MARKER", 40)
viewer = quarter.QuarterWidget()
root = graphics.InteractionSeparator(viewer.sorendermanager)
root.pick_radius = 40
m1 = ConnectionMarker([[-1, -1, -1]])
m2 = ConnectionMarker([[-1, 1, -1]])
m3 = ConnectionMarker([[ 1, 1, -1]])
m4 = ConnectionMarker([[ 1, -1, -1]])
m5 = ConnectionMarker([[-1, -1, 1]])
m6 = ConnectionMarker([[-1, 1, 1]])
m7 = ConnectionMarker([[ 1, 1, 1]])
m8 = ConnectionMarker([[ 1, -1, 1]])
points = [m1, m2, m3, m4, m5, m6, m7, m8]
l01 = ConnectionLine([m1, m2])
l02 = ConnectionLine([m2, m3])
l03 = ConnectionLine([m3, m4])
l04 = ConnectionLine([m4, m1])
l05 = ConnectionLine([m5, m6])
l06 = ConnectionLine([m6, m7])
l07 = ConnectionLine([m7, m8])
l08 = ConnectionLine([m8, m5])
l09 = ConnectionLine([m1, m5])
l10 = ConnectionLine([m2, m6])
l11 = ConnectionLine([m3, m7])
l12 = ConnectionLine([m4, m8])
lines = [l01, l02, l03, l04, l05, l06, l07, l08, l09, l10, l11, l12]
p1 = ConnectionPolygon([m1, m2, m3, m4])
p2 = ConnectionPolygon([m8, m7, m6, m5])
p3 = ConnectionPolygon([m5, m6, m2, m1])
p4 = ConnectionPolygon([m6, m7, m3, m2])
p5 = ConnectionPolygon([m7, m8, m4, m3])
p6 = ConnectionPolygon([m8, m5, m1, m4])
polygons = [p1, p2, p3, p4, p5, p6]
root += points + lines + polygons
root.register()
viewer.setSceneGraph(root)
viewer.setBackgroundColor(QColor(255, 255, 255))
viewer.setWindowTitle("minimal")
viewer.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| 28.824074 | 72 | 0.615805 |
e169422102864465f508bcbb4404b6271b866de1 | 4,673 | py | Python | utils/data_process.py | cltl/a-proof-zonmw | f6d1a83fc77223bf8b58c9d465aae301269bb679 | [
"Apache-2.0"
] | 2 | 2021-02-08T08:24:06.000Z | 2021-11-12T10:23:23.000Z | utils/data_process.py | cltl/a-proof-zonmw | f6d1a83fc77223bf8b58c9d465aae301269bb679 | [
"Apache-2.0"
] | null | null | null | utils/data_process.py | cltl/a-proof-zonmw | f6d1a83fc77223bf8b58c9d465aae301269bb679 | [
"Apache-2.0"
] | 2 | 2021-12-07T22:14:56.000Z | 2021-12-14T09:06:16.000Z | """
Functions used in pre-processing of data for the machine learning pipelines.
"""
import pandas as pd
from pandas.api.types import is_scalar
from pathlib import Path
from sklearn.model_selection import GroupShuffleSplit
def concat_annotated(datadir):
"""
Concatenate all "annotated_df_*_parsed*.pkl" files in `datadir`.
The pkl's of the core team should end with "dedup.pkl", i.e. they should be deduplicated by the `parse_annotations.py` script.
The ze pkl's need not be deduplicated, as only notes that are not in the annotations of the core team are included.
Parameters
----------
datadir: Path
path to directory with data
Returns
-------
DataFrame
df of concatenated parsed annotations
"""
# load core team annotations; pickles are deduplicated during processing
annot = pd.concat([pd.read_pickle(fp) for fp in datadir.glob('*_dedup.pkl')], ignore_index=True)
# load ze annotations and remove IAA files
ze = pd.concat(
[pd.read_pickle(fp) for fp in datadir.glob('annotated_df_ze_*.pkl')], ignore_index=True
).query("~NotitieID.isin(@annot.NotitieID)", engine='python')
return pd.concat([annot, ze], ignore_index=True)
def drop_disregard(df):
"""
If one token in a note is marked 'disregard', remove the whole note from df.
Parameters
----------
df: DataFrame
parsed token-level annotations df (created by `parse_annotations.py`)
Returns
-------
DataFrame
df without 'disregard' notes
"""
df['disregard_note'] = df.groupby('NotitieID').disregard.transform('any')
return df.query(
"not disregard_note"
).drop(columns=['disregard', 'disregard_note'])
def fix_week_14(df):
"""
For annotations from week 14:
- Replace MBW values with `False`
- Replace MBW-lvl values with NaN
We remove this domain from week 14 since the guidelines for it were changed after this week.
Parameters
----------
df: DataFrame
parsed token-level annotations df (created by `parse_annotations.py`)
Returns
-------
DataFrame
df without MBW and MBW_lvl labels for week 14
"""
df['MBW'] = df.MBW.mask(df.batch == 'week_14', other=False)
df['MBW_lvl'] = df.MBW_lvl.mask(df.batch == 'week_14')
return df
def pad_sen_id(id):
"""
Add padding zeroes to sen_id.
"""
note_id, sen_no = id.split('_')
return '_'.join([note_id, f"{sen_no:0>4}"])
def anonymize(txt, nlp):
"""
Replace entities of type PERSON and GPE with 'PERSON', 'GPE'.
Return anonymized text and its length.
"""
doc = nlp(txt)
anonym = str(doc)
to_repl = {str(ent):ent.label_ for ent in doc.ents if ent.label_ in ['PERSON', 'GPE']}
for string, replacement in to_repl.items():
anonym = anonym.replace(string, replacement)
return anonym, len(doc)
def data_split_groups(
df,
X_col,
y_col,
group_col,
train_size,
):
"""
Split data to train / dev / test, while taking into account groups that should stay together.
Parameters
----------
df: DataFrame
df with the data to split
X_col: str
name of the column with the data (text)
y_col: str
name of the column with the gold labels
group_col: str
name of the column with the groups to take into account when splitting
train_size: float
proportion of data that should go to the training set
Returns
-------
train, dev, test: DataFrame's
df with train data, df with dev data, df with test data
"""
# create training set of `train_size`
gss = GroupShuffleSplit(n_splits=1, test_size=1-train_size, random_state=19)
for train_idx, other_idx in gss.split(df[X_col], df[y_col], groups=df[group_col]):
train = df.iloc[train_idx]
other = df.iloc[other_idx]
# the non-train data is split 50/50 into development and test
gss = GroupShuffleSplit(n_splits=1, test_size=0.5, random_state=19)
for dev_idx, test_idx in gss.split(other[X_col], other[y_col], groups=other[group_col]):
dev = other.iloc[dev_idx]
test = other.iloc[test_idx]
return train, dev, test
def flatten_preds_if_necessary(df):
"""
Flatten predictions if they are a list in a list.
This is necessary because of an issue with the predict.py script prior to the update performed on 15-09-2021.
"""
cols = [col for col in df.columns if 'pred' in col]
for col in cols:
test = df[col].iloc[0]
if is_scalar(test[0]):
continue
df[col] = df[col].str[0]
return df | 29.024845 | 130 | 0.650332 |
e16c926aa6450fc30f72e50b4463f6a0fcd7d9ad | 276 | py | Python | venv/Lib/site-packages/numpy/typing/tests/data/fail/lib_utils.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 11 | 2020-06-28T04:30:26.000Z | 2022-03-26T08:40:47.000Z | venv/Lib/site-packages/numpy/typing/tests/data/fail/lib_utils.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 150 | 2019-09-30T11:22:36.000Z | 2021-08-02T06:19:29.000Z | venv/Lib/site-packages/numpy/typing/tests/data/fail/lib_utils.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 20 | 2021-11-07T13:55:56.000Z | 2021-12-02T10:54:01.000Z | import numpy as np
np.deprecate(1) # E: No overload variant
np.deprecate_with_doc(1) # E: incompatible type
np.byte_bounds(1) # E: incompatible type
np.who(1) # E: incompatible type
np.lookfor(None) # E: incompatible type
np.safe_eval(None) # E: incompatible type
| 19.714286 | 48 | 0.721014 |
e16db721abb59e634b09680c4bdf3796a1a5328b | 6,115 | py | Python | VoiceAssistant/Project_Basic_struct/speakListen.py | TheRealMilesLee/Python | d145c848a7ba76e8e523e4fe06e2a0add7e2fae1 | [
"MIT"
] | 1 | 2018-12-05T11:04:47.000Z | 2018-12-05T11:04:47.000Z | VoiceAssistant/Project_Basic_struct/speakListen.py | MarkHooland/Python | d145c848a7ba76e8e523e4fe06e2a0add7e2fae1 | [
"MIT"
] | null | null | null | VoiceAssistant/Project_Basic_struct/speakListen.py | MarkHooland/Python | d145c848a7ba76e8e523e4fe06e2a0add7e2fae1 | [
"MIT"
] | null | null | null | import time
from colorama import Fore, Back, Style
import speech_recognition as sr
import os
import pyttsx3
import datetime
from rich.progress import Progress
python = pyttsx3.init("sapi5") # name of the engine is set as Python
voices = python.getProperty("voices")
#print(voices)
python.setProperty("voice", voices[1].id)
python.setProperty("rate", 140)
def speak(text):
"""[This function would speak aloud some text provided as parameter]
Args:
text ([str]): [It is the speech to be spoken]
"""
python.say(text)
python.runAndWait()
def greet(g):
"""Uses the datetime library to generate current time and then greets accordingly.
Args:
g (str): To decide whether to say hello or good bye
"""
if g == "start" or g == "s":
h = datetime.datetime.now().hour
text = ''
if h > 12 and h < 17:
text = "Hello ! Good Afternoon "
elif h < 12 and h > 0:
text = "Hello! Good Morning "
elif h >= 17 :
text = "Hello! Good Evening "
text += " I am Python, How may i help you ?"
speak(text)
elif g == "quit" or g == "end" or g == "over" or g == "e":
text = 'Thank you!. Good Bye ! '
speak(text)
def hear():
"""[It will process the speech of user using Google_Speech_Recognizer(recognize_google)]
Returns:
[str]: [Speech of user as a string in English(en - IN)]
"""
r = sr.Recognizer()
"""Reconizer is a class which has lot of functions related to Speech i/p and o/p.
"""
r.pause_threshold = 1 # a pause of more than 1 second will stop the microphone temporarily
r.energy_threshold = 300 # python by default sets it to 300. It is the minimum input energy to be considered.
r.dynamic_energy_threshold = True # pyhton now can dynamically change the threshold energy
with sr.Microphone() as source:
# read the audio data from the default microphone
print(Fore.RED + "\nListening...")
#time.sleep(0.5)
speech = r.record(source, duration = 9) # option
#speech = r.listen(source)
# convert speech to text
try:
#print("Recognizing...")
recognizing()
speech = r.recognize_google(speech)
print(speech + "\n")
except Exception as exception:
print(exception)
return "None"
return speech
def recognizing():
"""Uses the Rich library to print a simulates version of "recognizing" by printing a loading bar.
"""
with Progress() as pr:
rec = pr.add_task("[red]Recognizing...", total = 100)
while not pr.finished:
pr.update(rec, advance = 1.0)
time.sleep(0.01)
def long_hear(duration_time = 60):
"""[It will process the speech of user using Google_Speech_Recognizer(recognize_google)]
the difference between the hear() and long_hear() is that - the
hear() - records users voice for 9 seconds
long_hear() - will record user's voice for the time specified by user. By default, it records for 60 seconds.
Returns:
[str]: [Speech of user as a string in English(en - IN)]
"""
r = sr.Recognizer()
"""Reconizer is a class which has lot of functions related to Speech i/p and o/p.
"""
r.pause_threshold = 1 # a pause of more than 1 second will stop the microphone temporarily
r.energy_threshold = 300 # python by default sets it to 300. It is the minimum input energy to be considered.
r.dynamic_energy_threshold = True # pyhton now can dynamically change the threshold energy
with sr.Microphone() as source:
# read the audio data from the default microphone
print(Fore.RED + "\nListening...")
#time.sleep(0.5)
speech = r.record(source, duration = duration_time) # option
#speech = r.listen(source)
# convert speech to text
try:
print(Fore.RED +"Recognizing...")
#recognizing()
speech = r.recognize_google(speech)
#print(speech + "\n")
except Exception as exception:
print(exception)
return "None"
return speech
def short_hear(duration_time = 5):
"""[It will process the speech of user using Google_Speech_Recognizer(recognize_google)]
the difference between the hear() and long_hear() is that - the
hear() - records users voice for 9 seconds
long_hear - will record user's voice for the time specified by user. By default, it records for 60 seconds.
Returns:
[str]: [Speech of user as a string in English(en - IN)]
"""
r = sr.Recognizer()
"""Reconizer is a class which has lot of functions related to Speech i/p and o/p.
"""
r.pause_threshold = 1 # a pause of more than 1 second will stop the microphone temporarily
r.energy_threshold = 300 # python by default sets it to 300. It is the minimum input energy to be considered.
r.dynamic_energy_threshold = True # pyhton now can dynamically change the threshold energy
with sr.Microphone() as source:
# read the audio data from the default microphone
print(Fore.RED + "\nListening...")
#time.sleep(0.5)
speech = r.record(source, duration = duration_time) # option
#speech = r.listen(source)
# convert speech to text
try:
print(Fore.RED +"Recognizing...")
#recognizing()
speech = r.recognize_google(speech)
#print(speech + "\n")
except Exception as exception:
print(exception)
return "None"
return speech
if __name__ == '__main__':
# print("Enter your name")
# name = hear()
# speak("Hello " + name)
# greet("s")
# greet("e")
pass
#hear()
#recognizing()
| 35.970588 | 118 | 0.594113 |
e16e5534d48d16f412f05cd80a5b2d4be81a0792 | 702 | py | Python | api/krenak_api/apps/activities/migrations/0008_auto_20210506_2357.py | bacuarabrasil/krenak | ad6a3af5ff162783ec9bd40d07a82f09bf35071b | [
"MIT"
] | null | null | null | api/krenak_api/apps/activities/migrations/0008_auto_20210506_2357.py | bacuarabrasil/krenak | ad6a3af5ff162783ec9bd40d07a82f09bf35071b | [
"MIT"
] | 26 | 2021-03-10T22:07:57.000Z | 2021-03-11T12:13:35.000Z | api/krenak_api/apps/activities/migrations/0008_auto_20210506_2357.py | bacuarabrasil/krenak | ad6a3af5ff162783ec9bd40d07a82f09bf35071b | [
"MIT"
] | null | null | null | # Generated by Django 3.1.7 on 2021-05-06 23:57
from django.db import migrations, models
import django.db.models.deletion
| 28.08 | 175 | 0.633903 |
e16f4dbb7cb5166fe73ac09acb1a07de0138e0d3 | 2,330 | py | Python | Data_pre/encoding_feature.py | KaifangXu/API | 47cb17e35a381e50b25bbda9aa7e5216482af022 | [
"MIT"
] | null | null | null | Data_pre/encoding_feature.py | KaifangXu/API | 47cb17e35a381e50b25bbda9aa7e5216482af022 | [
"MIT"
] | null | null | null | Data_pre/encoding_feature.py | KaifangXu/API | 47cb17e35a381e50b25bbda9aa7e5216482af022 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
from scipy import signal,stats
from flask import Flask,request,jsonify
import json
import re
import os
import data_utils as utils
import sklearn.preprocessing as pre
configpath=os.path.join(os.path.dirname(__file__),'config.txt')
try:
config = utils.py_configs(configpath)
Signal_SERVER = config["Signal_SERVER"]
Signal_PORT = config["Signal_PORT"]
except:
raise Exception("Configuration error")
app = Flask(__name__)
if __name__=="__main__":
app.run(host=Signal_SERVER, port=int(Signal_PORT)) | 31.066667 | 125 | 0.564807 |
e16fa8e2b0d20fbbd86dbb386c3a783cd3b7617b | 13,708 | py | Python | src/utils.py | mmin0/SigDFP | e2a93faa658741d693b8070bcc7038d2fb7c3e74 | [
"MIT"
] | null | null | null | src/utils.py | mmin0/SigDFP | e2a93faa658741d693b8070bcc7038d2fb7c3e74 | [
"MIT"
] | null | null | null | src/utils.py | mmin0/SigDFP | e2a93faa658741d693b8070bcc7038d2fb7c3e74 | [
"MIT"
] | 1 | 2022-02-28T23:26:23.000Z | 2022-02-28T23:26:23.000Z |
import torch
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib.legend_handler import HandlerTuple
from matplotlib.ticker import FormatStrFormatter
#from tqdm import tqdm
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
plt.rc('xtick', labelsize=22) # fontsize of the tick labels
plt.rc('ytick', labelsize=22)
plt.rc('legend', fontsize=25)
plt.rc('axes', labelsize=25)
plt.rcParams["figure.figsize"] = (7.5, 6)
colors = ['lightcoral', 'mediumseagreen', 'darkorange']
def train(model, dataloader, optimizer, criterion, initial, prev_m, device, depth=4):
"""
train model for alpha for one loop over dataloader
"""
epoch_loss = 0
model.train() # set model to train mode
i = 0
for batch in dataloader:
optimizer.zero_grad()
bm, cn = batch
X = model(bm.to(device),
cn.to(device),
prev_m[i*dataloader.batch_size:(i+1)*dataloader.batch_size],
initial[i*dataloader.batch_size:(i+1)*dataloader.batch_size].to(device))
strategy = model.strategy
loss = criterion(X, prev_m[i*dataloader.batch_size:(i+1)*dataloader.batch_size], strategy)
loss.backward(retain_graph=True)
optimizer.step()
epoch_loss += loss.item()
i+=1
return epoch_loss/len(dataloader)
def train1(model, dataloader, optimizer, criterion, initial, prev_m, device, depth=4):
"""
train model for alpha for one loop over dataloader
"""
epoch_loss = 0
model.train() # set model to train mode
i = 0
for batch in dataloader:
optimizer.zero_grad()
bm, cn, typeVec = batch
X = model(bm.to(device),
cn.to(device),
typeVec.to(device),
prev_m[i*dataloader.batch_size:(i+1)*dataloader.batch_size],
initial[i*dataloader.batch_size:(i+1)*dataloader.batch_size].to(device))
strategy = model.strategy
loss = criterion(X, prev_m[i*dataloader.batch_size:(i+1)*dataloader.batch_size], strategy)
loss.backward(retain_graph=True)
optimizer.step()
epoch_loss += loss.item()
i+=1
return epoch_loss/len(dataloader)
def train2(model, dataloader, optimizer, criterion, initial, prev_m, prev_c, device, depth=4):
"""
train model for alpha for one loop over dataloader
"""
epoch_loss = 0
model.train() # set model to train mode
i = 0
for batch in dataloader:
optimizer.zero_grad()
bm, cn, typeVec = batch
X = model(bm.to(device),
cn.to(device),
typeVec.to(device),
prev_m[i*dataloader.batch_size:(i+1)*dataloader.batch_size],
prev_c[i*dataloader.batch_size:(i+1)*dataloader.batch_size],
initial[i*dataloader.batch_size:(i+1)*dataloader.batch_size].to(device))
strategy = model.strategy
loss = criterion(X, prev_m[i*dataloader.batch_size:(i+1)*dataloader.batch_size],
strategy, prev_c[i*dataloader.batch_size:(i+1)*dataloader.batch_size])
loss.backward(retain_graph=True)
optimizer.step()
epoch_loss += loss.item()
i+=1
return epoch_loss/len(dataloader)
def plotSDE(benchmark, predicted, target_addr, title, filename, ylim=None, label1=None, label2=None, legendloc=None):
"""
input:
benchmark -- list[paths]
predicted -- list[paths]
"""
fig = plt.figure()
if title:
plt.title(title)
if ylim:
plt.ylim(ylim)
t = [i/100 for i in range(101)]
c = len(benchmark)
lines = []
lines_pred = []
for i in range(c):
l, = plt.plot(t, benchmark[i], color=colors[i], ls='-')
lines.append(l)
for i in range(c):
l, = plt.plot(t, predicted[i], color=colors[i], ls='--', marker='.')
lines_pred.append(l)
if legendloc:
plt.legend([tuple(lines), tuple(lines_pred)], [label1, label2],
loc=legendloc, ncol=2, handler_map={tuple: HandlerTuple(ndivide=None)})
else:
plt.legend([tuple(lines), tuple(lines_pred)], [label1, label2],
loc="upper left", ncol=2, handler_map={tuple: HandlerTuple(ndivide=None)})
plt.xlabel(r"time $t$")
plt.ylabel(r"$X_t$ and $\widehat{X}_t$")
plt.tight_layout()
fig.savefig(target_addr+'/'+filename+'.pdf')
def plotC(benchmark, predicted, target_addr, title, filename, label1=None, label2=None, ylabel=None):
"""
input:
benchmark -- list[paths]
predicted -- list[paths]
"""
t = [i/100 for i in range(100)]
fig = plt.figure()
if title:
plt.title(title)
c = len(benchmark)
lines = []
lines_pred = []
for i in range(c):
l, = plt.plot(t, benchmark[i], color=colors[i], ls='-')
lines.append(l)
for i in range(c):
l, = plt.plot(t, predicted[i], color=colors[i], ls='--', marker='.')
lines_pred.append(l)
plt.legend([tuple(lines), tuple(lines_pred)], [label1, label2],
loc="upper left",ncol=2, handler_map={tuple: HandlerTuple(ndivide=None)})
plt.xlabel(r"time $t$")
if ylabel:
plt.ylabel(ylabel)
plt.tight_layout()
fig.savefig(target_addr+'/'+filename+'.pdf')
def plotpi(benchmark, predicted, target_addr, title, filename, ylim = None, label1=None, label2=None, ylabel=None, legendloc = None):
"""
input:
benchmark -- list[paths]
predicted -- list[paths]
"""
fig = plt.figure()
if title:
plt.title(title)
if ylim:
plt.ylim(ylim)
t = [i/100 for i in range(100)]
c = len(benchmark)
lines = []
lines_pred = []
for i in range(c):
l, = plt.plot(t, benchmark[i], color=colors[i], ls='-')
lines.append(l)
for i in range(c):
l, = plt.plot(t, predicted[i], color=colors[i], ls='--', marker='.')
lines_pred.append(l)
if legendloc:
plt.legend([tuple(lines), tuple(lines_pred)], [label1, label2],
loc=legendloc, ncol=2, handler_map={tuple: HandlerTuple(ndivide=None)})
else:
plt.legend([tuple(lines), tuple(lines_pred)], [label1, label2],
loc="upper left", ncol=2, handler_map={tuple: HandlerTuple(ndivide=None)})
plt.xlabel(r"time $t$")
if ylabel:
plt.ylabel(ylabel)
plt.tight_layout()
fig.savefig(target_addr+'/'+filename+'.pdf')
def plotmC(benchmark, predicted, target_addr, title, filename, label1=None, label2=None, ylabel=None):
"""
input:
benchmark -- list[paths]
predicted -- list[paths]
"""
N = predicted.shape[1]
t = [1/N*i for i in range(N)]
fig = plt.figure()
if title:
plt.title(title)
c = len(predicted)
lines = []
lines_pred = []
l, = plt.plot(t, benchmark, color='darkgrey', ls='-', linewidth=5)
lines.append(l)
for i in range(c):
l, = plt.plot(t, predicted[i], color=colors[i], ls='--', marker='.')
lines_pred.append(l)
plt.legend([tuple(lines), tuple(lines_pred)], [label1, label2],
loc="upper left", ncol=2, handler_map={tuple: HandlerTuple(ndivide=None)})
plt.xlabel(r"time $t$")
if ylabel:
plt.ylabel(ylabel)
plt.tight_layout()
fig.savefig(target_addr+'/'+filename+'.pdf')
| 33.031325 | 153 | 0.587978 |
e170d7139c31119e1eb476ae084b331e0ed0a722 | 100 | py | Python | lambdata_doinalangille/__init__.py | doinalangille/lambdata_doinalangille | f57e1f9f87615bc9d1d1cfada530a542ea4551a1 | [
"MIT"
] | null | null | null | lambdata_doinalangille/__init__.py | doinalangille/lambdata_doinalangille | f57e1f9f87615bc9d1d1cfada530a542ea4551a1 | [
"MIT"
] | 3 | 2020-03-24T18:29:36.000Z | 2021-02-02T22:42:20.000Z | lambdata_doinalangille/__init__.py | doinalangille/lambdata_doinalangille | f57e1f9f87615bc9d1d1cfada530a542ea4551a1 | [
"MIT"
] | 1 | 2020-02-11T23:05:07.000Z | 2020-02-11T23:05:07.000Z | """
lambdata - a collection of Data Science helper functions
"""
import pandas as pd
import sklearn | 16.666667 | 56 | 0.76 |
e171d508606a36edd465712b9674cad13c99de99 | 1,554 | py | Python | jsConsole/__init__.py | Animenosekai/jsConsole | e2604988f20a0d0d93578f786ee7beaf72b9afbc | [
"MIT"
] | null | null | null | jsConsole/__init__.py | Animenosekai/jsConsole | e2604988f20a0d0d93578f786ee7beaf72b9afbc | [
"MIT"
] | null | null | null | jsConsole/__init__.py | Animenosekai/jsConsole | e2604988f20a0d0d93578f786ee7beaf72b9afbc | [
"MIT"
] | null | null | null | """
pyJsConsole wrapper.
Anime no Sekai - 2020
"""
from .internal.javascript import classes as JSClass
console = JSClass._Console()
document = JSClass._Document()
history = JSClass._History()
Math = JSClass._Math()
navigator = JSClass._Navigator()
screen = JSClass._Screen()
window = JSClass._Window()
browser = JSClass.BrowserObject
'''
import threading
from lifeeasy import sleep
def reloadElements():
global document
global window
lastURL = 'data:,'
while True:
sleep(0.1)
try:
if JSClass.evaluate('window.location.href') != lastURL:
document = JSClass._Document()
window = JSClass._Window()
lastURL = JSClass.evaluate('window.location.href')
except:
break
thread = threading.Thread(target=reloadElements)
thread.daemon = True
thread.start()
'''
| 23.19403 | 71 | 0.70592 |
e1741137c9f22621cbb2d5cd7d5c872d48ea9402 | 45,528 | py | Python | grr/client/client_actions/file_finder_test.py | panhania/grr | fe16a7311a528e31fe0e315a880e98273b8df960 | [
"Apache-2.0"
] | null | null | null | grr/client/client_actions/file_finder_test.py | panhania/grr | fe16a7311a528e31fe0e315a880e98273b8df960 | [
"Apache-2.0"
] | null | null | null | grr/client/client_actions/file_finder_test.py | panhania/grr | fe16a7311a528e31fe0e315a880e98273b8df960 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Tests the client file finder action."""
import collections
import glob
import hashlib
import os
import platform
import shutil
import subprocess
import unittest
import mock
import psutil
import unittest
from grr.client import comms
from grr.client.client_actions import file_finder as client_file_finder
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import utils
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import crypto as rdf_crypto
from grr.lib.rdfvalues import file_finder as rdf_file_finder
from grr.lib.rdfvalues import standard as rdf_standard
from grr.test_lib import client_test_lib
from grr.test_lib import test_lib
def testLinkStat(self):
"""Tests resolving symlinks when getting stat entries."""
test_dir = os.path.join(self.temp_dir, "lnk_stat_test")
lnk = os.path.join(test_dir, "lnk")
lnk_target = os.path.join(test_dir, "lnk_target")
os.mkdir(test_dir)
with open(lnk_target, "wb") as fd:
fd.write("sometext")
os.symlink(lnk_target, lnk)
paths = [lnk]
link_size = os.lstat(lnk).st_size
target_size = os.stat(lnk).st_size
for expected_size, resolve_links in [(link_size, False), (target_size,
True)]:
stat_action = rdf_file_finder.FileFinderAction.Stat(
resolve_links=resolve_links)
results = self._RunFileFinder(paths, stat_action)
self.assertEqual(len(results), 1)
res = results[0]
self.assertEqual(res.stat_entry.st_size, expected_size)
class RegexMatcherTest(unittest.TestCase):
class LiteralMatcherTest(unittest.TestCase):
class ConditionTestMixin(object):
# TODO(hanuszczak): Write tests for the metadata change condition.
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
| 35.708235 | 80 | 0.698405 |
e174818f6b393a98ed554aec714f2c139a01e0c8 | 1,624 | py | Python | lesson13/sunzhaohui/reboot/deploy/models.py | herrywen-nanj/51reboot | 1130c79a360e1b548a6eaad176eb60f8bed22f40 | [
"Apache-2.0"
] | null | null | null | lesson13/sunzhaohui/reboot/deploy/models.py | herrywen-nanj/51reboot | 1130c79a360e1b548a6eaad176eb60f8bed22f40 | [
"Apache-2.0"
] | null | null | null | lesson13/sunzhaohui/reboot/deploy/models.py | herrywen-nanj/51reboot | 1130c79a360e1b548a6eaad176eb60f8bed22f40 | [
"Apache-2.0"
] | null | null | null | from django.db import models
# Create your models here.
from django.db import models
from users.models import UserProfile
| 40.6 | 113 | 0.67734 |
e177afe5c4e52b6ea7d71deed0bddae35b953491 | 83 | py | Python | config.py | zombodotcom/twitchUserData | 50c702b832515a946d55c2f5ca79b51436352ef2 | [
"MIT",
"Unlicense"
] | 1 | 2019-10-22T06:23:56.000Z | 2019-10-22T06:23:56.000Z | config.py | zombodotcom/twitchUserData | 50c702b832515a946d55c2f5ca79b51436352ef2 | [
"MIT",
"Unlicense"
] | null | null | null | config.py | zombodotcom/twitchUserData | 50c702b832515a946d55c2f5ca79b51436352ef2 | [
"MIT",
"Unlicense"
] | null | null | null | Client_ID = "<Your Client ID>"
Authorization = "Bearer <Insert Bearer token Here>" | 41.5 | 51 | 0.73494 |
e17842bac608c397e2ccd355daad8b350e2c2102 | 1,900 | py | Python | echo_client.py | gauravssnl/Python3-Network-Programming | 32bb5a4872bce60219c6387b6f3c2e7f31b0654a | [
"MIT"
] | 4 | 2017-12-04T15:05:35.000Z | 2021-03-24T11:53:39.000Z | echo_client.py | gauravssnl/Python3-Network-Programming | 32bb5a4872bce60219c6387b6f3c2e7f31b0654a | [
"MIT"
] | null | null | null | echo_client.py | gauravssnl/Python3-Network-Programming | 32bb5a4872bce60219c6387b6f3c2e7f31b0654a | [
"MIT"
] | null | null | null | import socket
host = 'localhost'
# we need to define encode function for converting string to bytes string
# this will be use for sending/receiving data via socket
encode = lambda text: text.encode()
# we need to define deocde function for converting bytes string to string
# this will convert bytes string sent/recieved via socket to string
decode = lambda byte_text: byte_text.decode()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Simple TCP echo client')
parser.add_argument("--port", action="store",
dest="port", type=int, required=True)
parser.add_argument("--message", action="store",
dest="message", required=False)
get_args = parser.parse_args()
port = get_args.port
message = get_args.message
if message:
echo_client(port, message)
else:
echo_client(port)
| 31.666667 | 79 | 0.644211 |
e179f0630567fb54dca2d83ae47722317d2637db | 2,247 | py | Python | my_collection/paxos/proposer.py | khanh-nguyen-code/my-collection | 31581ef0b1dae67aafb1f4e64b9973a38cc01edf | [
"MIT"
] | null | null | null | my_collection/paxos/proposer.py | khanh-nguyen-code/my-collection | 31581ef0b1dae67aafb1f4e64b9973a38cc01edf | [
"MIT"
] | null | null | null | my_collection/paxos/proposer.py | khanh-nguyen-code/my-collection | 31581ef0b1dae67aafb1f4e64b9973a38cc01edf | [
"MIT"
] | null | null | null | from typing import Optional
from my_collection.paxos.common import NodeId, Router, ProposalId, Value, PrepareRequest, is_majority, PrepareResponse, \
Proposal, ProposeRequest, ProposeResponse, CODE_OK
| 38.084746 | 121 | 0.659546 |
e17a77153a0967bee562363294a90df123d695b6 | 6,551 | py | Python | .leetcode/749.contain-virus.py | KuiyuanFu/PythonLeetCode | 8962df2fa838eb7ae48fa59de272ba55a89756d8 | [
"MIT"
] | null | null | null | .leetcode/749.contain-virus.py | KuiyuanFu/PythonLeetCode | 8962df2fa838eb7ae48fa59de272ba55a89756d8 | [
"MIT"
] | null | null | null | .leetcode/749.contain-virus.py | KuiyuanFu/PythonLeetCode | 8962df2fa838eb7ae48fa59de272ba55a89756d8 | [
"MIT"
] | null | null | null | # @lc app=leetcode id=749 lang=python3
#
# [749] Contain Virus
#
# https://leetcode.com/problems/contain-virus/description/
#
# algorithms
# Hard (49.14%)
# Likes: 190
# Dislikes: 349
# Total Accepted: 7.5K
# Total Submissions: 15.2K
# Testcase Example: '[[0,1,0,0,0,0,0,1],[0,1,0,0,0,0,0,1],[0,0,0,0,0,0,0,1],[0,0,0,0,0,0,0,0]]'
#
# A virus is spreading rapidly, and your task is to quarantine the infected
# area by installing walls.
#
# The world is modeled as an m x n binary grid isInfected, where
# isInfected[i][j] == 0 represents uninfected cells, and isInfected[i][j] == 1
# represents cells contaminated with the virus. A wall (and only one wall) can
# be installed between any two 4-directionally adjacent cells, on the shared
# boundary.
#
# Every night, the virus spreads to all neighboring cells in all four
# directions unless blocked by a wall. Resources are limited. Each day, you can
# install walls around only one region (i.e., the affected area (continuous
# block of infected cells) that threatens the most uninfected cells the
# following night). There will never be a tie.
#
# Return the number of walls used to quarantine all the infected regions. If
# the world will become fully infected, return the number of walls used.
#
#
# Example 1:
#
#
# Input: isInfected =
# [[0,1,0,0,0,0,0,1],[0,1,0,0,0,0,0,1],[0,0,0,0,0,0,0,1],[0,0,0,0,0,0,0,0]]
# Output: 10
# Explanation: There are 2 contaminated regions.
# On the first day, add 5 walls to quarantine the viral region on the left. The
# board after the virus spreads is:
#
# On the second day, add 5 walls to quarantine the viral region on the right.
# The virus is fully contained.
#
#
#
# Example 2:
#
#
# Input: isInfected = [[1,1,1],[1,0,1],[1,1,1]]
# Output: 4
# Explanation: Even though there is only one cell saved, there are 4 walls
# built.
# Notice that walls are only built on the shared boundary of two different
# cells.
#
#
# Example 3:
#
#
# Input: isInfected =
# [[1,1,1,0,0,0,0,0,0],[1,0,1,0,1,1,1,1,1],[1,1,1,0,0,0,0,0,0]]
# Output: 13
# Explanation: The region on the left only builds two new walls.
#
#
#
# Constraints:
#
#
# m ==isInfected.length
# n ==isInfected[i].length
# 1 <= m, n <= 50
# isInfected[i][j] is either 0 or 1.
# There is always a contiguous viral region throughout the described process
# that will infect strictly more uncontaminated squares in the next round.
#
#
#
# @lc tags=hash-table
# @lc imports=start
from typing_extensions import get_args
from imports import *
# @lc imports=end
# @lc idea=start
#
#
#
# @lc idea=end
# @lc group=
# @lc rank=
# @lc code=start
# @lc code=end
# @lc main=start
if __name__ == '__main__':
print('Example 1:')
print('Input : ')
print(
'isInfected =[[0,1,0,0,0,0,0,1],[0,1,0,0,0,0,0,1],[0,0,0,0,0,0,0,1],[0,0,0,0,0,0,0,0]]'
)
print('Exception :')
print('10')
print('Output :')
print(
str(Solution().containVirus([[0, 1, 0, 0, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0]])))
print()
print('Example 2:')
print('Input : ')
print('isInfected = [[1,1,1],[1,0,1],[1,1,1]]')
print('Exception :')
print('4')
print('Output :')
print(str(Solution().containVirus([[1, 1, 1], [1, 0, 1], [1, 1, 1]])))
print()
print('Example 3:')
print('Input : ')
print(
'isInfected =[[1,1,1,0,0,0,0,0,0],[1,0,1,0,1,1,1,1,1],[1,1,1,0,0,0,0,0,0]]'
)
print('Exception :')
print('13')
print('Output :')
print(
str(Solution().containVirus([[1, 1, 1, 0, 0, 0, 0, 0, 0],
[1, 0, 1, 0, 1, 1, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 0, 0, 0]])))
print()
pass
# @lc main=end | 29.376682 | 96 | 0.523432 |
e17b963468c4245e4f159926617ef232d646797a | 4,309 | py | Python | src/robotcontrol.py | social-robotics-lab/dog_sample | e70706bdbdbb7be222ee71cd9529dc433bf705ce | [
"MIT"
] | null | null | null | src/robotcontrol.py | social-robotics-lab/dog_sample | e70706bdbdbb7be222ee71cd9529dc433bf705ce | [
"MIT"
] | null | null | null | src/robotcontrol.py | social-robotics-lab/dog_sample | e70706bdbdbb7be222ee71cd9529dc433bf705ce | [
"MIT"
] | null | null | null | import json
import os.path
import socket
import subprocess
from pydub import AudioSegment
from typing import Dict, List
#---------------------
# Low level functions
#---------------------
def recv(ip:str, port:int) -> str:
conn = connect(ip, port)
size = read_size(conn)
data = read_data(conn, size)
close(conn)
return data.decode('utf-8')
def send(ip:str, port:int, data:str):
conn = connect(ip, port)
size = len(data)
conn.send(size.to_bytes(4, byteorder='big'))
conn.send(data)
close(conn)
def connect(ip:str, port:int):
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.connect((ip, port))
return conn
def close(conn:socket):
conn.shutdown(1)
conn.close()
def read_size(conn:socket):
b_size = conn.recv(4)
return int.from_bytes(b_size, byteorder='big')
def read_data(conn:socket, size:int):
chunks = []
bytes_recved = 0
while bytes_recved < size:
chunk = conn.recv(size - bytes_recved)
if chunk == b'':
raise RuntimeError("socket connection broken")
chunks.append(chunk)
bytes_recved += len(chunk)
return b''.join(chunks)
# Path to which OpenJTalk was installed
OPENJTALK_BINPATH = '/usr/bin'
OPENJTALK_DICPATH = '/var/lib/mecab/dic/open-jtalk/naist-jdic'
OPENJTALK_VOICEPATH = '/usr/share/hts-voice/mei/mei_{emotion}.htsvoice'
def make_wav(text, speed=1.0, emotion='normal', output_file='__temp.wav', output_dir=os.getcwd()):
"""
Function to make a wav file using OpenJTalk.
args:
speed: The speed of speech. (Default: 1.0)
emotion: Voice emotion. You can specify 'normal', 'happy', 'bashful', 'angry', or 'sad'.
output_file: The file name made by this function. (Default: '__temp.wav')
output_dir: The directory of output_file. (Default: Current directory)
"""
open_jtalk = [OPENJTALK_BINPATH + '/open_jtalk']
mech = ['-x', OPENJTALK_DICPATH]
htsvoice = ['-m', OPENJTALK_VOICEPATH.format(emotion=emotion)]
speed = ['-r', str(speed)]
outwav = ['-ow', os.path.join(output_dir, output_file)]
cmd = open_jtalk + mech + htsvoice + speed + outwav
c = subprocess.Popen(cmd,stdin=subprocess.PIPE)
c.stdin.write(text.encode('utf-8'))
c.stdin.close()
c.wait()
return os.path.join(output_dir, output_file)
| 31.683824 | 116 | 0.620562 |
e17ccdc4212c86466c9d7221473dc6138120cb0b | 1,375 | py | Python | exercises/shortest-path/ShortestPath.py | maxwellmattryan/cs-313e | 462a871475ba956e364a0faf98284633462984b8 | [
"MIT"
] | 1 | 2020-02-05T23:56:16.000Z | 2020-02-05T23:56:16.000Z | exercises/shortest-path/ShortestPath.py | maxwellmattryan/cs-313e | 462a871475ba956e364a0faf98284633462984b8 | [
"MIT"
] | null | null | null | exercises/shortest-path/ShortestPath.py | maxwellmattryan/cs-313e | 462a871475ba956e364a0faf98284633462984b8 | [
"MIT"
] | 2 | 2020-03-09T16:26:00.000Z | 2021-07-23T03:17:11.000Z | import math
main()
| 25.462963 | 76 | 0.589091 |
e17d136893ad674b6eda0ec3efee1f8fda058d2d | 341 | py | Python | Data Analysis/csv remove other label.py | byew/python-do-differernt-csv | 094b154834ee48210c2ee4a6a529d8fe76055fb7 | [
"MIT"
] | null | null | null | Data Analysis/csv remove other label.py | byew/python-do-differernt-csv | 094b154834ee48210c2ee4a6a529d8fe76055fb7 | [
"MIT"
] | null | null | null | Data Analysis/csv remove other label.py | byew/python-do-differernt-csv | 094b154834ee48210c2ee4a6a529d8fe76055fb7 | [
"MIT"
] | null | null | null | import pandas as pd
exa = pd.read_csv('en_dup.csv')
exa.loc[exa['label'] =='F', 'label']= 0
exa.loc[exa['label'] =='T', 'label']= 1
exa.loc[exa['label'] =='U', 'label']= 2
#label2, 01
exa0 = exa.loc[exa["label"] == 0]
exa1 = exa.loc[exa["label"] == 1]
exa = [exa0, exa1]
exa = pd.concat(exa)
exa.to_csv('train.csv', index=0)
| 17.947368 | 39 | 0.595308 |
e17d6ab7a795e35c2eccfd187299cdaa6e5f367c | 60,009 | py | Python | pySPACE/resources/dataset_defs/stream.py | pyspace/pyspace | 763e62c0e7fa7cfcb19ccee1a0333c4f7e68ae62 | [
"BSD-3-Clause"
] | 32 | 2015-02-20T09:03:09.000Z | 2022-02-25T22:32:52.000Z | pySPACE/resources/dataset_defs/stream.py | pyspace/pyspace | 763e62c0e7fa7cfcb19ccee1a0333c4f7e68ae62 | [
"BSD-3-Clause"
] | 5 | 2015-05-18T15:08:40.000Z | 2020-03-05T19:18:01.000Z | pySPACE/resources/dataset_defs/stream.py | pyspace/pyspace | 763e62c0e7fa7cfcb19ccee1a0333c4f7e68ae62 | [
"BSD-3-Clause"
] | 18 | 2015-09-28T07:16:38.000Z | 2021-01-20T13:52:19.000Z | """ Reader objects and main class for continuous data (time series)
Depending on the storage format, the fitting reader is loaded and takes care
of reading the files.
.. todo:: unify with analyzer collection!
eeg source and analyzer sink node should work together
this connection should be documented when tested
"""
import os
import glob
import re
import numpy
import scipy
from scipy.io import loadmat
import warnings
import csv
from pySPACE.missions.support.windower import MarkerWindower
import logging
from pySPACE.resources.dataset_defs.base import BaseDataset
from pySPACE.missions.support.WindowerInterface import AbstractStreamReader
def parse_float(param):
""" Work around to catch colon instead of floating point """
try:
return float(param)
except ValueError, e:
warnings.warn("Failed float conversion from csv file.")
try:
return float(param.replace(".", "").replace(",", "."))
except:
warnings.warn("Secondary attempt at conversion also failed. " +
"Treating the value as string and return a 0 as " +
"placeholder.")
return float(0)
def get_csv_handler(file_handler):
"""Helper function to get a DictReader from csv"""
try:
dialect = csv.Sniffer().sniff(file_handler.read(2048))
file_handler.seek(0)
return csv.DictReader(file_handler, dialect=dialect)
except csv.Error, e:
warnings.warn(str(e))
csv.register_dialect("excel_space", excel_space)
file_handler.seek(0)
return csv.DictReader(file_handler, dialect=excel_space)
def read(self, nblocks=1):
""" Read *nblocks* of the stream and pass it to registers functions """
n = 0
while nblocks == -1 or n < nblocks:
if not self.first_entry is None:
samples, marker = self.first_entry, self.first_marker
self.first_entry = None
else:
try:
samples = self.DictReader.next()
except IOError:
break
if not self.MarkerReader is None:
if self.next_marker[0] == self.time_index:
marker = self.next_marker[1]
self.update_marker()
else:
marker = ""
elif self.marker in samples.keys():
marker = samples.pop(self.marker)
else:
marker = ""
# add marker to dict
if not marker == "" and not marker in self._markerids:
self._markerids[marker] = self.new_marker_id
self._markerNames[self.new_marker_id] = marker
self.new_marker_id += 1
# convert marker to array
markers = numpy.ones(1)*(-1)
if not marker == "":
markers[0] = self._markerids[marker]
# convert samples to array
# special handling of marker in channel names
# if the marker is in channelNames,
if self.marker in self.channelNames:
array_samples = numpy.zeros((len(self.channelNames)-1, 1))
else:
array_samples = numpy.zeros((len(self.channelNames), 1))
offset = 0
for index, channel in enumerate(self.channelNames):
if self.marker == channel:
offset -= 1
else:
array_samples[index + offset] = parse_float(samples[channel])
n += 1
for c in self.callbacks:
c(array_samples, markers)
self.time_index += 1
return n
def update_marker(self):
"""Update `next_marker` from `MarkerReader` information"""
try:
next = self.MarkerReader.next()
self.next_marker = (next["time"], next[self.marker])
except IOError:
pass
class EDFReader(AbstractStreamReader):
""" Read EDF-Data
On Instantiation it will automatically assign the value
for the blocksize coded in the edf-file to its own
attribute 'stdblocksize'.
The Feature, that different signals can have different
sampling rates is eliminated in a way, that every value
of a lower sampled signal is repeated so that it fits
the highest sampling rate present in the dataset. This
is needed to have the same length for every signal
in the returned array.
"""
def __init__(self, abs_edffile_path):
"""Initializes module and opens specified file."""
try:
self.edffile = open(abs_edffile_path, "r")
except IOError as io:
warnings.warn(str("failed to open file at [%s]" % abs_edffile_path))
raise io
# variables to later overwrite
# the properties from AbstractStreamReader
self.callbacks = list()
self._dSamplingInterval = 0
self._stdblocksize = 0
self._markerids = dict()
self._channelNames = dict()
self._markerNames = dict()
# gains, frequency for each channel
self.gains = []
self.phy_min = []
self.dig_min = []
self.frequency = []
self.num_channels = 0
self.num_samples = []
self.edf_plus = False
self.edf_header_length = 0
self.annotations = None
self.num_samples_anno = None
self.timepoint = 0.0
self.generate_meta_data()
def read_edf_header(self):
"""Read edf-header information"""
m = dict()
m["version"] = self.edffile.read(8)
m["subject_id"] = self.edffile.read(80).strip()
m["recording_id"] = self.edffile.read(80).strip()
m["start_date"] = self.edffile.read(8)
m["start_time"] = self.edffile.read(8)
m["num_bytes_header"] = int(self.edffile.read(8).strip())
m["edf_c_d"] = self.edffile.read(44).strip()
m["num_data_records"] = self.edffile.read(8)
m["single_record_duration"] = float(self.edffile.read(8))
m["num_channels"] = int(self.edffile.read(4))
m["channel_names"] = list()
for i in range(m["num_channels"]):
m["channel_names"].append(self.edffile.read(16).strip())
m["electrode_type"] = list()
for i in range(m["num_channels"]):
m["electrode_type"].append(self.edffile.read(80).strip())
m["phy_dims"] = list()
for i in range(m["num_channels"]):
m["phy_dims"].append(self.edffile.read(8).strip())
m["phy_min"] = list()
for i in range(m["num_channels"]):
m["phy_min"].append(float(self.edffile.read(8).strip()))
m["phy_max"] = list()
for i in range(m["num_channels"]):
m["phy_max"].append(float(self.edffile.read(8).strip()))
m["dig_min"] = list()
for i in range(m["num_channels"]):
m["dig_min"].append(float(self.edffile.read(8).strip()))
m["dig_max"] = list()
for i in range(m["num_channels"]):
m["dig_max"].append(float(self.edffile.read(8).strip()))
m["prefilter"] = list()
for i in range(m["num_channels"]):
m["prefilter"].append(self.edffile.read(80).strip())
m["single_record_num_samples"] = list()
for i in range(m["num_channels"]):
m["single_record_num_samples"].append(int(self.edffile.read(8).strip()))
m["reserved"] = self.edffile.read(32*m["num_channels"])
# check position in file!
assert self.edffile.tell() == m["num_bytes_header"], "EDF Header corrupt!"
self.edf_header_length = self.edffile.tell()
return m
def read_edf_data(self):
"""read one record inside the data section of the edf-file"""
edfsignal = []
edfmarkers = numpy.ones(max(self.num_samples))*(-1)
# get markers from self.annotations
if self.annotations is not None:
current_annotations = numpy.where(
numpy.array(self.annotations.keys()) <
self.timepoint+self.delta)[0]
for c in current_annotations:
tmarker = self.annotations.keys()[c]-self.timepoint
pmarker = int((tmarker/self.delta)*max(self.num_samples))
edfmarkers[pmarker] = self.markerids[self.annotations[self.annotations.keys()[c]]]
self.annotations.pop(self.annotations.keys()[c])
self.timepoint += self.delta
# in EDF+ the last channel has the annotations,
# otherwise it is treated as regular signal channel
if self.edf_plus:
for i,n in enumerate(self.num_samples):
data = self.edffile.read(n*2)
if len(data) != n*2:
raise IOError
channel = numpy.fromstring(data, dtype=numpy.int16).astype(numpy.float32)
signal = (channel - self.dig_min[i]) * self.gains[i] + self.phy_min[i]
# simple upsampling for integer factors
# TODO: may use scipy.resample ..
if signal.shape[0] != max(self.num_samples):
factor = max(self.num_samples)/signal.shape[0]
assert type(factor) == int, str("Signal cannot be upsampled by non-int factor %f!" % factor)
signal = signal.repeat(factor, axis=0)
edfsignal.append(signal)
else:
for i,n in enumerate(self.num_samples):
data = self.edffile.read(n*2)
if len(data) != n*2:
raise IOError
channel = numpy.fromstring(data, dtype=numpy.int16).astype(numpy.float32)
signal = (channel - self.dig_min[i]) * self.gains[i] + self.phy_min[i]
# simple upsampling for integer factors
# TODO: may use scipy.resample ..
if signal.shape[0] != max(self.num_samples):
factor = max(self.num_samples)/signal.shape[0]
assert type(factor) == int, str("Signal cannot be upsampled by non-int factor %f!" % factor)
signal = signal.repeat(factor, axis=0)
edfsignal.append(signal)
return edfsignal, edfmarkers
def parse_annotations(self):
""" Parses times and names of the annotations
This is done beforehand - annotations are later
added to the streamed data. """
self.edffile.seek(self.edf_header_length, os.SEEK_SET)
self.annotations = dict()
data_bytes_to_skip = sum(self.num_samples)*2
while True:
self.edffile.read(data_bytes_to_skip)
anno = self.edffile.read(self.num_samples_anno*2)
if len(anno) != self.num_samples_anno*2:
break
anno = anno.strip()
marker = anno.split(chr(20))
if marker[2][1:].startswith(chr(0)):
continue
base = float(marker[0])
offset = float(marker[2][1:])
name = str(marker[3])
self.annotations[base+offset] = name.strip()
def generate_meta_data(self):
""" Generate the necessary meta data for the windower """
m = self.read_edf_header()
# calculate gain for each channel
self.gains = [(px-pn)/(dx-dn) for px,pn,dx,dn in zip(m["phy_max"], m["phy_min"], m["dig_max"], m["dig_min"])]
self.dig_min = m["dig_min"]
self.phy_min = m["phy_min"]
self._channelNames = m["channel_names"]
self.num_channels = m["num_channels"]
self.num_samples = m["single_record_num_samples"]
# separate data from annotation channel
if m["edf_c_d"] in ["EDF+D", "EDF+C"]:
self.edf_plus = True
# the annotation channel is called "EDF Annotations" and is the last channel
assert "EDF Annotations" == m["channel_names"][-1], "Cannot determine Annotations Channel!"
if m["edf_c_d"] in ["EDF+D"]:
warnings.warn(str("The file %s contains non-continuous data-segments.\n"
"This feature is not supported and may lead to unwanted results!") % self.edffile.name)
self.num_samples_anno = self.num_samples.pop() # ignore sampling rate of the annotations channel
else :
self.edf_plus = False
# calculate sampling interval for each channel
self.frequency = [ns/m["single_record_duration"] for ns in self.num_samples]
self._dSamplingInterval = max(self.frequency)
self._stdblocksize = max(self.num_samples)
self.delta = self.stdblocksize / max(self.frequency)
# generate all marker names and ids
self._markerids['null'] = 0
# in edf+ case we can parse them from annotations
if self.edf_plus :
self.parse_annotations()
for i,(t,name) in enumerate(self.annotations.iteritems()):
self._markerids[name] = i+1
else:
warnings.warn("no marker channel is set - no markers will be streamed!")
for s in range(1,256,1):
self._markerids[str('S%3d' % s)] = s
for r in range(1,256,1):
self._markerids[str('R%3d' % r)] = r+256
# generate reverse mapping
for k,v in zip(self._markerids.iterkeys(), self._markerids.itervalues()):
self._markerNames[v] = k
# reset file position to begin of data section
self.edffile.seek(self.edf_header_length, os.SEEK_SET)
# Register callback function
# Forwards block of data until all data is send
def read(self, nblocks=1, verbose=False):
"""read data and call registered callbacks """
n = 0
while nblocks == -1 or n < nblocks:
try:
samples, markers = self.read_edf_data()
except IOError:
break
n += 1
for c in self.callbacks:
c(samples, markers)
return n
class SETReader(AbstractStreamReader):
""" Load eeglab .set format
Read eeglab format when the data has not been segmented yet. It is further
assumed that the data is stored binary in another file with extension .fdt.
Further possibilities are .dat format or to store everything in the .set
file. Both is currently not supported.
"""
def read_set_file(self):
setdata = loadmat(self.abs_setfile_path + '.set', appendmat=False)
# check if stream data
ntrials = setdata['EEG']['trials'][0][0][0][0]
assert(ntrials == 1), "Data consists of more than one trial. This is not supported!"
# check if data is stored in fdt format
datafilename = setdata['EEG']['data'][0][0][0]
assert(datafilename.split('.')[-1] == 'fdt'), "Data is not in fdt format!"
# collect meta information
self._dSamplingInterval = setdata['EEG']['srate'][0][0][0][0]
self._channelNames = numpy.hstack(setdata['EEG']['chanlocs'][0][0][ \
'labels'][0]).astype(numpy.str_).tolist()
self.nChannels = setdata['EEG']['nbchan'][0][0][0][0]
self.marker_data = numpy.hstack(setdata['EEG']['event'][0][0][ \
'type'][0]).astype(numpy.str_)
for marker in numpy.unique(self.marker_data):
marker_number = len(self._markerNames)
self._markerNames[marker_number] = marker
self._markerids[marker] = marker_number
self.marker_times = numpy.hstack(setdata['EEG']['event'][0][0][ \
'latency'][0]).flatten()
self.abs_data_path = os.path.join(os.path.dirname(self.abs_setfile_path),
datafilename)
def regcallback(self, func):
self.callbacks.append(func)
def read(self, nblocks=1, verbose=False):
readblocks = 0
while (readblocks < nblocks or nblocks == -1):
ret, samples, markers = self.read_fdt_data()
if ret:
for f in self.callbacks:
f(samples, markers)
else:
break
readblocks += 1
return readblocks
def read_fdt_data(self):
if self.fdt_handle == None:
return False, None, None
num_samples = self.nChannels * self._stdblocksize
markers = numpy.zeros(self._stdblocksize)
markers.fill(-1)
###### READ DATA FROM FILE ######
try:
samples = numpy.fromfile(self.fdt_handle, dtype=numpy.float32,
count=num_samples)
except MemoryError:
# assuming, that a MemoryError only occurs when file is finished
self.fdt_handle.close()
self.fdt_handle = None
return False, None, None
# True when EOF reached in last or current block
if samples.size < num_samples:
self.fdt_handle.close()
self.fdt_handle = None
if samples.size == 0:
return False, None, None
temp = samples
samples = numpy.zeros(num_samples)
numpy.put(samples, range(temp.size), temp)
# need channel x time matrix
samples = samples.reshape((self.stdblocksize, self.nChannels)).T
###### READ MARKERS FROM FILE ######
for l in range(self.current_marker_index,len(self.marker_times)):
if self.marker_times[l] > self.latency + self._stdblocksize:
self.current_marker_index = l
self.latency += self._stdblocksize
break
else:
rel_marker_pos = (self.marker_times[l] - 1) % self._stdblocksize
markers[rel_marker_pos] = self._markerids[self.marker_data[l]]
return True, samples, markers
class EEGReader(AbstractStreamReader):
""" Load raw EEG data in the .eeg brain products format
This module does the Task of parsing
.vhdr, .vmrk end .eeg/.dat files and then hand them
over to the corresponding windower which
iterates over the aggregated data.
"""
# This function gathers meta information from the .vhdr and .vmrk files.
# Only the relevant information is then stored in variables, the windower
# accesses during the initialisation phase.
def bp_meta(self):
nChannels = 0
dSamplingInterval = 0
resolutions = list()
channelNames = list()
channelids = dict()
markerids = dict()
markerNames = dict()
nmarkertypes = 0
prefix = ''
# helper function to convert resolutions
# 0 = 100 nV, 1 = 500 nV, 2 = 10 {mu}V, 3 = 152.6 {mu}V
# Start with vhdr file
file_path = self.abs_eegfile_path + '.vhdr'
hdr = open(file_path)
for line in hdr:
if line.startswith(";"): continue
# Read the words between brackets like "[Common Infos]"
if line.startswith('['):
prefix = line.partition("[")[2].partition("]")[0].lower()
continue
if line.find("=") == -1: continue
# Common Infos and Binary Infos
if(prefix == 'common infos' or prefix == 'binary infos'):
key, value = line.split('=')
key = key.lower()
value = value.lower()
if(key == 'datafile'):
pass # something like filename.eeg
elif(key == 'markerfile'):
mrk_file = value
elif(key == 'dataformat'):
pass # usually BINARY
elif(key == 'dataorientation'):
eeg_data_or = value
elif(key == 'datatype'):
pass # something like TIMEDOMAIN
elif(key == 'numberofchannels'):
nChannels = int(value)
elif(key == 'datapoints'):
pass # the number of datapoints in the whole set
elif(key == 'samplinginterval'):
dSamplingInterval = int(1000000/float(value))
elif(key == 'binaryformat'):
if re.match("int_16", value, flags=re.IGNORECASE) == None:
self.eeg_dtype = numpy.float32
else:
self.eeg_dtype = numpy.int16
elif(key == 'usebigendianorder'):
bin_byteorder = value
# Channel Infos
# ; Each entry: Ch<Channel number>=<Name>,<Reference channel name>,
# ; <Resolution in "Unit">,<Unit>,
elif(prefix == 'channel infos'):
key, value = line.split('=')
if re.match("^[a-z]{2}[0-9]{1,3}", key, flags=re.IGNORECASE) == None:
continue
ch_id = int(re.findall(r'\d+', key)[0])
ch_name = value.split(',')[0]
ch_ref = value.split(',')[1]
if len(re.findall(r'\d+', value.split(',')[2])) == 0:
ch_res_f = 0
else:
ch_res_f = float(re.findall(r'\d+', value.split(',')[2])[0])
ch_res_unit = value.split(',')[3]
channelNames.append(ch_name)
channelids[ch_name] = ch_id
resolutions.append(res_conv(ch_res_f, ch_res_unit))
# Everything thats left..
else:
#print "parsing finished!"
break
hdr.close()
# Continue with marker file
# Priority:
# 1: Path from .vhdr
# 2: Path constructed from eegfile path
prefix = ''
markerNames[0] = 'null'
try:
self.mrk_handle = open(os.path.basename(self.abs_eegfile_path) + mrk_file)
except IOError:
try:
self.mrk_handle = open(self.abs_eegfile_path + '.vmrk')
except IOError:
raise IOError, str("Could not open [%s.vmrk]!" % os.path.realpath(self.abs_eegfile_path))
# Parse file
for line in self.mrk_handle:
if line.startswith(";"): continue
# Read the words between brackets like "[Common Infos]"
if line.startswith('['):
prefix = line.partition("[")[2].partition("]")[0].lower()
continue
if line.find("=") == -1: continue
if prefix == "marker infos":
mrk_name = line.split(',')[1]
if mrk_name != "" and mrk_name not in markerNames.values():
markerNames[len(markerNames)] = mrk_name
# rewinds the marker file
self.mrk_handle.seek(0, os.SEEK_SET)
# helper struct for finding markers
self.mrk_info = dict()
self.mrk_info['line'] = ""
self.mrk_info['position'] = 0
# advance to first marker line
while(re.match("^Mk1=", self.mrk_info['line'], re.IGNORECASE) == None):
try:
self.mrk_info['line'] = self.mrk_handle.next()
except StopIteration:
self.mrk_handle.close()
raise StopIteration, str("Reached EOF while searching for first Marker in [%s]" % os.path.realpath(self.mrk_handle.name))
# TODO: Sort markerNames?
for key in markerNames:
markerids[markerNames[key]] = key
markertypes = len(markerids)
return nChannels, \
dSamplingInterval, \
resolutions, \
channelNames, \
channelids, \
markerids, \
markerNames, \
markertypes
# This function reads the eeg-file and the marker-file for every
# block of data which is processed.
# string representation with interesting information
# Register callback function
# Reads data from .eeg/.dat file until EOF
| 42.111579 | 137 | 0.580613 |
e17db1fd4e96affffe66942426ac284e73e8b345 | 10,463 | py | Python | tests/base/test_endpoints_authentication.py | rapydo/http-api | ef0a299173195145303069534d45d446ea4da93a | [
"MIT"
] | 8 | 2018-07-04T09:54:46.000Z | 2022-03-17T08:21:06.000Z | tests/base/test_endpoints_authentication.py | rapydo/http-api | ef0a299173195145303069534d45d446ea4da93a | [
"MIT"
] | 19 | 2018-04-18T07:24:55.000Z | 2022-03-04T01:03:15.000Z | tests/base/test_endpoints_authentication.py | rapydo/http-api | ef0a299173195145303069534d45d446ea4da93a | [
"MIT"
] | 7 | 2018-07-03T12:17:50.000Z | 2021-05-05T04:33:32.000Z | from restapi.connectors import Connector
from restapi.env import Env
from restapi.services.authentication import BaseAuthentication, Role
from restapi.tests import API_URI, BaseTests, FlaskClient
from restapi.utilities.logs import log
| 35.228956 | 88 | 0.60346 |
e17f627e7014eaf9f501344de8ac94066bc5da4f | 70,860 | py | Python | katsdpscripts/rts_session.py | ska-sa/katsdpscripts | f9eaa867aad8b94c715f7286953124df00b5781c | [
"BSD-3-Clause"
] | null | null | null | katsdpscripts/rts_session.py | ska-sa/katsdpscripts | f9eaa867aad8b94c715f7286953124df00b5781c | [
"BSD-3-Clause"
] | 21 | 2019-09-16T15:26:53.000Z | 2022-01-11T09:14:39.000Z | katsdpscripts/rts_session.py | ska-sa/katsdpscripts | f9eaa867aad8b94c715f7286953124df00b5781c | [
"BSD-3-Clause"
] | 1 | 2019-11-11T11:47:54.000Z | 2019-11-11T11:47:54.000Z | ###############################################################################
# SKA South Africa (http://ska.ac.za/) #
# Author: cam@ska.ac.za #
# Copyright @ 2013 SKA SA. All rights reserved. #
# #
# THIS SOFTWARE MAY NOT BE COPIED OR DISTRIBUTED IN ANY FORM WITHOUT THE #
# WRITTEN PERMISSION OF SKA SA. #
###############################################################################
"""CaptureSession encompassing data capturing and standard observations with RTS.
This defines the :class:`CaptureSession` class, which encompasses the capturing
of data and the performance of standard scans with the RTS system. It also
provides a fake :class:`TimeSession` class, which goes through the motions in
order to time them, but without performing any real actions.
"""
import time
import logging
import sys
import os.path
import numpy as np
import katpoint
# This is used to document available spherical projections (and set them in case of TimeSession)
from katcorelib.targets import Offset
from .array import Array
from .katcp_client import KATClient
from .defaults import user_logger, activity_logger
from katmisc.utils.utils import dynamic_doc
# Obtain list of spherical projections and the default projection from antenna proxy
projections, default_proj = Offset.PROJECTIONS.keys(), Offset.DEFAULT_PROJECTION
# Move default projection to front of list
projections.remove(default_proj)
projections.insert(0, default_proj)
def ant_array(kat, ants, name='ants'):
"""Create sub-array of antennas from flexible specification.
Parameters
----------
kat : :class:`utility.KATCoreConn` object
KAT connection object
ants : :class:`Array` or :class:`KATClient` object, or list, or string
Antennas specified by an Array object containing antenna devices, or
a single antenna device or a list of antenna devices, or a string of
comma-separated antenna names, or the string 'all' for all antennas
controlled via the KAT connection associated with this session
Returns
-------
array : :class:`Array` object
Array object containing selected antenna devices
Raises
------
ValueError
If antenna with a specified name is not found on KAT connection object
"""
if isinstance(ants, Array):
return ants
elif isinstance(ants, KATClient):
return Array(name, [ants])
elif isinstance(ants, basestring):
if ants.strip() == 'all':
return kat.ants
else:
try:
return Array(name, [getattr(kat, ant.strip()) for ant in ants.split(',')])
except AttributeError:
raise ValueError("Antenna '%s' not found (i.e. no kat.%s exists)" % (ant, ant))
else:
# The default assumes that *ants* is a list of antenna devices
return Array(name, ants)
def report_compact_traceback(tb):
"""Produce a compact traceback report."""
print '--------------------------------------------------------'
print 'Session interrupted while doing (most recent call last):'
print '--------------------------------------------------------'
while tb:
f = tb.tb_frame
print '%s %s(), line %d' % (f.f_code.co_filename, f.f_code.co_name, f.f_lineno)
tb = tb.tb_next
print '--------------------------------------------------------'
class TimeSession(CaptureSessionBase):
"""Fake CaptureSession object used to estimate the duration of an experiment."""
def __enter__(self):
"""Start time estimate, overriding the time module."""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Finish time estimate, restoring the time module."""
self.end()
# Do not suppress any exceptions that occurred in the body of with-statement
return False
def _azel(self, target, timestamp, antenna):
"""Target (az, el) position in degrees (including offsets in degrees)."""
projection_type, x, y = self.projection
az, el = target.plane_to_sphere(katpoint.deg2rad(x), katpoint.deg2rad(y), timestamp, antenna, projection_type)
return katpoint.rad2deg(az), katpoint.rad2deg(el)
def _teleport_to(self, target, mode='POINT'):
"""Move antennas instantaneously onto target (or nearest point on horizon)."""
for m in range(len(self._fake_ants)):
antenna = self._fake_ants[m][0]
az, el = self._azel(target, self.time, antenna)
self._fake_ants[m] = (antenna, mode, az, max(el, self.el_limit))
def _slew_to(self, target, mode='POINT', timeout=300.):
"""Slew antennas to target (or nearest point on horizon), with timeout."""
slew_times = []
for ant, ant_mode, ant_az, ant_el in self._fake_ants:
def estimate_slew(timestamp):
"""Obtain instantaneous target position and estimate time to slew there."""
# Target position right now
az, el = self._azel(target, timestamp, ant)
# If target is below horizon, aim at closest point on horizon
az_dist, el_dist = np.abs(az - ant_az), np.abs(max(el, self.el_limit) - ant_el)
# Ignore azimuth wraps and drive strategies
az_dist = az_dist if az_dist < 180. else 360. - az_dist
# Assume az speed of 2 deg/s, el speed of 1 deg/s and overhead of 1 second
slew_time = max(0.5 * az_dist, 1.0 * el_dist) + 1.0
return az, el, slew_time
# Initial estimate of slew time, based on a stationary target
az1, el1, slew_time = estimate_slew(self.time)
# Crude adjustment for target motion: chase target position for 2 iterations
az2, el2, slew_time = estimate_slew(self.time + slew_time)
az2, el2, slew_time = estimate_slew(self.time + slew_time)
# Ensure slew does not take longer than timeout
slew_time = min(slew_time, timeout)
# If source is below horizon, handle timeout and potential rise in that interval
if el2 < self.el_limit:
# Position after timeout
az_after_timeout, el_after_timeout = self._azel(target, self.time + timeout, ant)
# If source is still down, slew time == timeout, else estimate rise time through linear interpolation
slew_time = (self.el_limit - el1) / (el_after_timeout - el1) * timeout \
if el_after_timeout > self.el_limit else timeout
az2, el2 = self._azel(target, self.time + slew_time, ant)
el2 = max(el2, self.el_limit)
slew_times.append(slew_time)
# print "%s slewing from (%.1f, %.1f) to (%.1f, %.1f) in %.1f seconds" % \
# (ant.name, ant_az, ant_el, az2, el2, slew_time)
# The overall slew time is the max for all antennas - adjust current time to reflect the slew
self.time += (np.max(slew_times) if len(slew_times) > 0 else 0.)
# Blindly assume all antennas are on target (or on horizon) after this interval
self._teleport_to(target, mode)
def get_centre_freq(self):
"""Get RF (sky) frequency associated with middle CBF channel.
Returns
-------
centre_freq : float
Actual centre frequency in MHz
"""
return 1284.0
def set_centre_freq(self, centre_freq):
"""Set RF (sky) frequency associated with middle CBF channel.
Parameters
----------
centre_freq : float
Desired centre frequency in MHz
"""
pass
def standard_setup(self, observer, description, experiment_id=None,
centre_freq=None, nd_params=None,
stow_when_done=None, horizon=None, no_mask=False, **kwargs):
"""Perform basic experimental setup including antennas, LO and dump rate."""
self.ants = ant_array(self.kat, self.get_ant_names())
for ant in self.ants:
try:
self._fake_ants.append((katpoint.Antenna(ant.sensor.observer.get_value()),
ant.sensor.mode.get_value(),
ant.sensor.pos_actual_scan_azim.get_value(),
ant.sensor.pos_actual_scan_elev.get_value()))
except AttributeError:
pass
# Override provided session parameters (or initialize them from existing parameters if not provided)
self.experiment_id = experiment_id = self.experiment_id if experiment_id is None else experiment_id
self.nd_params = nd_params = self.nd_params if nd_params is None else nd_params
self.stow_when_done = stow_when_done = self.stow_when_done if stow_when_done is None else stow_when_done
self.horizon = self.horizon if horizon is None else horizon
user_logger.info('Antennas used = %s' % (' '.join([ant[0].name for ant in self._fake_ants]),))
user_logger.info('Observer = %s' % (observer,))
user_logger.info("Description ='%s'" % (description,))
user_logger.info('Experiment ID = %s' % (experiment_id,))
# There is no way to find out the centre frequency in this fake session... maybe
centre_freq = self.get_centre_freq()
if centre_freq is None:
user_logger.info('RF centre frequency = unknown to simulator, dump rate = %g Hz' % (1.0 / self.dump_period,))
else:
user_logger.info('RF centre frequency = %g MHz, dump rate = %g Hz' % (centre_freq, 1.0 / self.dump_period))
if nd_params['period'] > 0:
nd_info = "Will switch '%s' noise diode on for %g s and off for %g s, every %g s if possible" % \
(nd_params['diode'], nd_params['on'], nd_params['off'], nd_params['period'])
elif nd_params['period'] == 0:
nd_info = "Will switch '%s' noise diode on for %g s and off for %g s at every opportunity" % \
(nd_params['diode'], nd_params['on'], nd_params['off'])
else:
nd_info = "Noise diode will not fire automatically"
user_logger.info(nd_info + " while performing canned commands")
user_logger.info('--------------------------')
def capture_start(self):
"""Starting capture has no timing effect."""
pass
def label(self, label):
"""Adding label has no timing effect."""
if label:
user_logger.info("New compound scan: '%s'" % (label,))
def on_target(self, target):
"""Determine whether antennas are tracking a given target."""
if not self._fake_ants:
return False
for antenna, mode, ant_az, ant_el in self._fake_ants:
az, el = self._azel(target, self.time, antenna)
# Checking for lock and checking for target identity considered the same thing
if (az != ant_az) or (el != ant_el) or (mode != 'POINT'):
return False
return True
def target_visible(self, target, duration=0., timeout=300., operation='scan'):
"""Check whether target is visible for given duration."""
if not self._fake_ants:
return False
# Convert description string to target object, or keep object as is
target = target if isinstance(target, katpoint.Target) else katpoint.Target(target)
horizon = katpoint.deg2rad(self.horizon)
# Include an average time to slew to the target (worst case about 90 seconds, so half that)
now = self.time + 45.
average_el, visible_before, visible_after = [], [], []
for antenna, mode, ant_az, ant_el in self._fake_ants:
az, el = target.azel(now, antenna)
average_el.append(katpoint.rad2deg(el))
# If not up yet, see if the target will pop out before the timeout
if el < horizon:
now += timeout
az, el = target.azel(now, antenna)
visible_before.append(el >= horizon)
# Check what happens at end of observation
az, el = target.azel(now + duration, antenna)
visible_after.append(el >= horizon)
if all(visible_before) and all(visible_after):
return True
always_invisible = any(~np.array(visible_before) & ~np.array(visible_after))
if always_invisible:
user_logger.warning("Target '%s' is never up during requested period (average elevation is %g degrees)" %
(target.name, np.mean(average_el)))
else:
user_logger.warning("Target '%s' will rise or set during requested period" % (target.name,))
return False
def fire_noise_diode(self, diode='coupler', on=10.0, off=10.0, period=0.0, align=True, announce=True):
"""Estimate time taken to fire noise diode."""
return False
# XXX needs a rethink
# if not self._fake_ants:
# raise ValueError('No antennas specified for session - please run session.standard_setup first')
# if self.dump_period == 0.0:
# # Wait for the first correlator dump to appear
# user_logger.info('waiting for correlator dump to arrive')
# self.dump_period = self._requested_dump_period
# time.sleep(self.dump_period)
# user_logger.info('correlator dump arrived')
# if period < 0.0 or (self.time - self.last_nd_firing) < period:
# return False
# if announce:
# user_logger.info("Firing '%s' noise diode (%g seconds on, %g seconds off)" % (diode, on, off))
# else:
# user_logger.info('firing noise diode')
# self.time += on
# self.last_nd_firing = self.time + 0.
# self.time += off
# user_logger.info('fired noise diode')
# return True
def set_target(self, target):
"""Setting target has no timing effect."""
if not self._fake_ants:
raise ValueError('No antennas specified for session - please run session.standard_setup first')
def track(self, target, duration=20.0, announce=True):
"""Estimate time taken to perform track."""
if not self._fake_ants:
raise ValueError('No antennas specified for session - please run session.standard_setup first')
target = target if isinstance(target, katpoint.Target) else katpoint.Target(target)
if announce:
user_logger.info("Initiating %g-second track on target '%s'" % (duration, target.name))
if not self.target_visible(target, duration):
user_logger.warning("Skipping track, as target '%s' will be below horizon" % (target.name,))
return False
self.fire_noise_diode(announce=False, **self.nd_params)
if not self.on_target(target):
user_logger.info('slewing to target')
self._slew_to(target)
user_logger.info('target reached')
self.fire_noise_diode(announce=False, **self.nd_params)
user_logger.info('tracking target')
self.time += duration + 1.0
user_logger.info('target tracked for %g seconds' % (duration,))
self.fire_noise_diode(announce=False, **self.nd_params)
self._teleport_to(target)
return True
def scan(self, target, duration=30.0, start=(-3.0, 0.0), end=(3.0, 0.0),
index=-1, projection=default_proj, announce=True):
"""Estimate time taken to perform single linear scan."""
if not self._fake_ants:
raise ValueError('No antennas specified for session - please run session.standard_setup first')
scan_name = 'scan' if index < 0 else 'scan %d' % (index,)
target = target if isinstance(target, katpoint.Target) else katpoint.Target(target)
if announce:
user_logger.info("Initiating %g-second scan across target '%s'" % (duration, target.name))
if not self.target_visible(target, duration):
user_logger.warning("Skipping track, as target '%s' will be below horizon" % (target.name,))
return False
self.fire_noise_diode(announce=False, **self.nd_params)
projection = Offset.PROJECTIONS[projection]
self.projection = (projection, start[0], start[1])
user_logger.info('slewing to start of %s' % (scan_name,))
self._slew_to(target, mode='SCAN')
user_logger.info('start of %s reached' % (scan_name,))
self.fire_noise_diode(announce=False, **self.nd_params)
# Assume antennas can keep up with target (and doesn't scan too fast either)
user_logger.info('performing %s' % (scan_name,))
self.time += duration + 1.0
user_logger.info('%s complete' % (scan_name,))
self.fire_noise_diode(announce=False, **self.nd_params)
self.projection = (projection, end[0], end[1])
self._teleport_to(target)
return True
def raster_scan(self, target, num_scans=3, scan_duration=30.0, scan_extent=6.0, scan_spacing=0.5,
scan_in_azimuth=True, projection=default_proj, announce=True):
"""Estimate time taken to perform raster scan."""
if not self._fake_ants:
raise ValueError('No antennas specified for session - please run session.standard_setup first')
target = target if isinstance(target, katpoint.Target) else katpoint.Target(target)
projection = Offset.PROJECTIONS[projection]
if announce:
user_logger.info("Initiating raster scan (%d %g-second scans extending %g degrees) on target '%s'" %
(num_scans, scan_duration, scan_extent, target.name))
nd_time = self.nd_params['on'] + self.nd_params['off']
nd_time *= scan_duration / max(self.nd_params['period'], scan_duration)
nd_time = nd_time if self.nd_params['period'] >= 0 else 0.
if not self.target_visible(target, (scan_duration + nd_time) * num_scans):
user_logger.warning("Skipping track, as target '%s' will be below horizon" % (target.name,))
return False
# Create start and end positions of each scan, based on scan parameters
scan_levels = np.arange(-(num_scans // 2), num_scans // 2 + 1)
scanning_coord = (scan_extent / 2.0) * (-1) ** scan_levels
stepping_coord = scan_spacing * scan_levels
# Flip sign of elevation offsets to ensure that the first scan always starts at the top left of target
scan_starts = zip(scanning_coord, -stepping_coord) if scan_in_azimuth else zip(stepping_coord, -scanning_coord)
scan_ends = zip(-scanning_coord, -stepping_coord) if scan_in_azimuth else zip(stepping_coord, scanning_coord)
self.fire_noise_diode(announce=False, **self.nd_params)
# Perform multiple scans across the target
for scan_index, (start, end) in enumerate(zip(scan_starts, scan_ends)):
self.projection = (projection, start[0], start[1])
user_logger.info('slewing to start of scan %d' % (scan_index,))
self._slew_to(target, mode='SCAN')
user_logger.info('start of scan %d reached' % (scan_index,))
self.fire_noise_diode(announce=False, **self.nd_params)
# Assume antennas can keep up with target (and doesn't scan too fast either)
user_logger.info('performing scan %d' % (scan_index,))
self.time += scan_duration + 1.0
user_logger.info('scan %d complete' % (scan_index,))
self.fire_noise_diode(announce=False, **self.nd_params)
self.projection = (projection, end[0], end[1])
self._teleport_to(target)
return True
def end(self):
"""Stop data capturing to shut down the session and close the data file."""
user_logger.info('Scans complete, no data captured as this is a timing simulation...')
user_logger.info('Ended data capturing session with experiment ID %s' % (self.experiment_id,))
activity_logger.info('Timing simulation. Ended data capturing session with experiment ID %s' % (self.experiment_id,))
if self.stow_when_done and self._fake_ants:
user_logger.info("Stowing dishes.")
activity_logger.info('Timing simulation. Stowing dishes.')
self._teleport_to(katpoint.Target("azel, 0.0, 90.0"), mode="STOW")
user_logger.info('==========================')
duration = self.time - self.start_time
# Let KATCoreConn know how long the estimated observation time was.
self.kat.set_estimated_duration(duration)
if duration <= 100:
duration = '%d seconds' % (np.ceil(duration),)
elif duration <= 100 * 60:
duration = '%d minutes' % (np.ceil(duration / 60.),)
else:
duration = '%.1f hours' % (duration / 3600.,)
msg = "Experiment estimated to last %s until this time" % (duration,)
user_logger.info(msg + "\n")
activity_logger.info("Timing simulation. %s" % (msg,))
# Restore time module functions
time.time, time.sleep = self._realtime, self._realsleep
# Restore logging
for handler in user_logger.handlers:
if isinstance(handler, logging.StreamHandler):
handler.formatter.datefmt = handler.formatter.old_datefmt
del handler.formatter.old_datefmt
else:
handler.setLevel(handler.old_level)
del handler.old_level
activity_logger.info("Timing simulation. ----- Script ended %s (%s). Output file None" % (sys.argv[0], ' '.join(sys.argv[1:])))
| 50.14862 | 185 | 0.62869 |
e17fa16cc2830c70bdb6cc63e17b12437230ec42 | 499 | py | Python | projects/webptspy/apps/account/admin.py | codelieche/testing | 1f4a3393f761654d98588c9ba90596a307fa59db | [
"MIT"
] | 2 | 2017-08-10T03:40:22.000Z | 2017-08-17T13:20:16.000Z | projects/webptspy/apps/account/admin.py | codelieche/webpts | 1f4a3393f761654d98588c9ba90596a307fa59db | [
"MIT"
] | null | null | null | projects/webptspy/apps/account/admin.py | codelieche/webpts | 1f4a3393f761654d98588c9ba90596a307fa59db | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
from django.contrib import admin
from .models import UserProfile
# Register your models here.
admin.site.register(UserProfile, UserProfileModelAdmin)
| 26.263158 | 64 | 0.653307 |
e18273cb48126cd36d2e98bfcd448716a51f67d4 | 396 | py | Python | axicli.py | notpeter/AxiDraw_API | d9c35eb93fd85f96cf197908415822af9a725b41 | [
"MIT"
] | null | null | null | axicli.py | notpeter/AxiDraw_API | d9c35eb93fd85f96cf197908415822af9a725b41 | [
"MIT"
] | 3 | 2021-01-17T04:31:57.000Z | 2021-01-17T04:36:41.000Z | axicli.py | notpeter/AxiDraw_API | d9c35eb93fd85f96cf197908415822af9a725b41 | [
"MIT"
] | null | null | null | '''
axicli.py - Command line interface (CLI) for AxiDraw.
For quick help:
python axicli.py --help
Full user guide:
https://axidraw.com/doc/cli_api/
This script is a stand-alone version of AxiDraw Control, accepting
various options and providing a facility for setting default values.
'''
from axicli.axidraw_cli import axidraw_CLI
if __name__ == '__main__':
axidraw_CLI()
| 19.8 | 68 | 0.729798 |
e183076e1912547a48c02bb69c7456b82ec312ba | 852 | py | Python | qualification_round_2017/C.py | asukakenji/codejam2018 | a519f522337d7faf3d07a84f6e24f0161f95c880 | [
"MIT"
] | null | null | null | qualification_round_2017/C.py | asukakenji/codejam2018 | a519f522337d7faf3d07a84f6e24f0161f95c880 | [
"MIT"
] | null | null | null | qualification_round_2017/C.py | asukakenji/codejam2018 | a519f522337d7faf3d07a84f6e24f0161f95c880 | [
"MIT"
] | null | null | null | # code jam: Qualification Round 2017: Problem C. Bathroom Stalls
T = read_int()
x = 1
while x <= T:
N, K = read_int_n()
y, z = get_y_z(N, K)
print 'Case #{}: {} {}'.format(x, y, z)
x += 1
| 23.027027 | 64 | 0.419014 |
e1842583bfd3115c7825344cdde05a9fbfaf3644 | 1,143 | py | Python | tests/integration/modules/test_vmc_vm_stats.py | kdsalvy/salt-ext-modules-vmware-1 | 9fdc941692e4c526f575f33b2ce23c1470582934 | [
"Apache-2.0"
] | 10 | 2021-11-02T20:24:44.000Z | 2022-03-11T05:54:27.000Z | tests/integration/modules/test_vmc_vm_stats.py | waynew/salt-ext-modules-vmware | 9f693382772061676c846c850df6ff508b7f3a91 | [
"Apache-2.0"
] | 83 | 2021-10-01T15:13:02.000Z | 2022-03-31T16:22:40.000Z | tests/integration/modules/test_vmc_vm_stats.py | waynew/salt-ext-modules-vmware | 9f693382772061676c846c850df6ff508b7f3a91 | [
"Apache-2.0"
] | 15 | 2021-09-30T23:17:27.000Z | 2022-03-23T06:54:22.000Z | """
Integration Tests for vmc_vm_stats execution module
"""
import pytest
| 30.078947 | 87 | 0.724409 |
e18531179510c781afbb8fd1aa7db0ab08f90a40 | 2,134 | py | Python | uncompy3/decomp_dec.py | Alpha-Demon404/RE-14 | b5b46a9f0eee218f2a642b615c77135c33c6f4ad | [
"MIT"
] | 39 | 2020-02-26T09:44:36.000Z | 2022-03-23T00:18:25.000Z | uncompy3/decomp_dec.py | B4BY-DG/reverse-enginnering | b5b46a9f0eee218f2a642b615c77135c33c6f4ad | [
"MIT"
] | 15 | 2020-05-14T10:07:26.000Z | 2022-01-06T02:55:32.000Z | uncompy3/decomp_dec.py | B4BY-DG/reverse-enginnering | b5b46a9f0eee218f2a642b615c77135c33c6f4ad | [
"MIT"
] | 41 | 2020-03-16T22:36:38.000Z | 2022-03-17T14:47:19.000Z | # MENTOL
# At:Sun Nov 24 15:04:31 2019
if len(bytecode) == 0:
print('\x1b[1;93mbyte code kosong\nharap masukkan bytecodenya\x1b[0m')
exit()
import marshal, sys, os, random, string, time
try:
from uncompyle6.main import decompile
except:
os.system('pip install uncompyle6')
from uncompyle6.main import decompile
n = ''.join((random.choice(string.ascii_lowercase) for _ in range(4)))
fl = n + '-dec.py'
logo = '\n+m 888 +h,8,"88b,\n+m e88 888 ,e e, e88\'888 e88 88e 888 888 8e +p888 88e Y8b Y888P +h " ,88P\'\n+md888 888 d88 88b d888 \'8 d888 888b 888 888 88b +p888 888b Y8b Y8P +h C8K\n+mY888 888 888 , Y888 , Y888 888P 888 888 888 +p888 888P Y8b Y +h e `88b,\n+m "88 888 "YeeP" "88,e8\' "88 88" 888 888 888 +p888 88" 888 +h"8",88P\'\n +p888 888\n +p888 888+p\n\t\t+ccoded by: +pZhu Bai Lee AKA AnonyMass\n\t\t+cteam : +pBlack Coder Crush'
decom()
| 32.830769 | 643 | 0.49672 |
e189d16da8174a3b154f79a433e1c07828f194cc | 650 | py | Python | tests/unit/proxy/roundtrip/test_janus_graph_proxy.py | joshthoward/amundsenmetadatalibrary | 87e2b44f0e44ca643f087bff6bd6b39d4ae9e9ad | [
"Apache-2.0"
] | 3 | 2021-02-09T13:52:03.000Z | 2022-02-26T02:36:02.000Z | tests/unit/proxy/roundtrip/test_janus_graph_proxy.py | joshthoward/amundsenmetadatalibrary | 87e2b44f0e44ca643f087bff6bd6b39d4ae9e9ad | [
"Apache-2.0"
] | 1 | 2021-02-08T23:21:04.000Z | 2021-02-08T23:21:04.000Z | tests/unit/proxy/roundtrip/test_janus_graph_proxy.py | joshthoward/amundsenmetadatalibrary | 87e2b44f0e44ca643f087bff6bd6b39d4ae9e9ad | [
"Apache-2.0"
] | 2 | 2021-02-23T18:23:35.000Z | 2022-03-18T15:12:25.000Z | # Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
from typing import Any, Mapping
import unittest
from .abstract_gremlin_proxy_tests import abstract_gremlin_proxy_test_class
from .roundtrip_janusgraph_proxy import RoundtripJanusGraphProxy
| 40.625 | 91 | 0.803077 |
e18aabf2262561727c96e58389bdf2da5dd573c7 | 3,829 | py | Python | scripts/performance/config.py | atsgen/tf-test | 2748fcd81491450c75dadc71849d2a1c11061029 | [
"Apache-2.0"
] | 5 | 2020-09-29T00:36:57.000Z | 2022-02-16T06:51:32.000Z | scripts/performance/config.py | atsgen/tf-test | 2748fcd81491450c75dadc71849d2a1c11061029 | [
"Apache-2.0"
] | 27 | 2019-11-02T02:18:34.000Z | 2022-02-24T18:49:08.000Z | scripts/performance/config.py | atsgen/tf-test | 2748fcd81491450c75dadc71849d2a1c11061029 | [
"Apache-2.0"
] | 20 | 2019-11-28T16:02:25.000Z | 2022-01-06T05:56:58.000Z | from builtins import str
from builtins import range
from builtins import object
import os
import fixtures
import testtools
from vn_test import VNFixture
from vm_test import VMFixture
from common.connections import ContrailConnections
from policy_test import PolicyFixture
from policy.config import AttachPolicyFixture
from time import sleep
from tcutils.commands import ssh, execute_cmd, execute_cmd_out
| 47.271605 | 121 | 0.659441 |
e18bc1c9af4bc77f3e745030b60675eb8770630a | 4,048 | py | Python | deepspeech_pytorch/testing.py | Chudbrochil/deepspeech.pytorch-2.1 | d5d01e33ef383edb79c6a5b1584c134587108deb | [
"MIT"
] | 13 | 2022-01-25T01:26:56.000Z | 2022-03-18T00:46:38.000Z | deepspeech_pytorch/testing.py | Chudbrochil/deepspeech.pytorch-2.1 | d5d01e33ef383edb79c6a5b1584c134587108deb | [
"MIT"
] | null | null | null | deepspeech_pytorch/testing.py | Chudbrochil/deepspeech.pytorch-2.1 | d5d01e33ef383edb79c6a5b1584c134587108deb | [
"MIT"
] | 1 | 2021-03-03T06:14:21.000Z | 2021-03-03T06:14:21.000Z | import hydra
import torch
from tqdm import tqdm
from deepspeech_pytorch.configs.inference_config import EvalConfig
from deepspeech_pytorch.decoder import GreedyDecoder
from deepspeech_pytorch.loader.data_loader import SpectrogramDataset, AudioDataLoader
from deepspeech_pytorch.utils import load_model, load_decoder
| 42.610526 | 104 | 0.561759 |
e18d4a143a071cc6ef045ea47a03a5bc0de604f9 | 483 | py | Python | api/aps3/tasklist/tasklist/main.py | Gustavobb/megadados | 6a653314e0c93c866ec86be2119d64bf297d2f5a | [
"MIT"
] | null | null | null | api/aps3/tasklist/tasklist/main.py | Gustavobb/megadados | 6a653314e0c93c866ec86be2119d64bf297d2f5a | [
"MIT"
] | null | null | null | api/aps3/tasklist/tasklist/main.py | Gustavobb/megadados | 6a653314e0c93c866ec86be2119d64bf297d2f5a | [
"MIT"
] | null | null | null | # pylint: disable=missing-module-docstring
from fastapi import FastAPI
from .routers import task, user
tags_metadata = [
{
'name': 'task',
'description': 'Operations related to tasks.',
},
]
app = FastAPI(
title='Task list',
description='Task-list project for the **Megadados** course',
openapi_tags=tags_metadata,
)
app.include_router(task.router, prefix='/task', tags=['task'])
app.include_router(user.router, prefix='/user', tags=['user'])
| 23 | 65 | 0.672878 |
e18d760e51cdf1f8ab9695881861681dcd4595c4 | 214 | py | Python | silver_bullet/contain_value.py | Hojung-Jeong/Silver-Bullet-Encryption-Tool | 5ea29b3cd78cf7488e0cbdcf4ea60d7c9151c2a7 | [
"Apache-2.0"
] | null | null | null | silver_bullet/contain_value.py | Hojung-Jeong/Silver-Bullet-Encryption-Tool | 5ea29b3cd78cf7488e0cbdcf4ea60d7c9151c2a7 | [
"Apache-2.0"
] | null | null | null | silver_bullet/contain_value.py | Hojung-Jeong/Silver-Bullet-Encryption-Tool | 5ea29b3cd78cf7488e0cbdcf4ea60d7c9151c2a7 | [
"Apache-2.0"
] | null | null | null | '''
>List of functions
1. contain(value,limit) - contains a value between 0 to limit
'''
| 16.461538 | 62 | 0.705607 |
e18de74748857a558fcaeea0ccf6405de06e2047 | 6,388 | py | Python | code/train.py | TONGJINGLV/dagan | 4b6701f0a31026d9a45ab988a645f0a2249cc45c | [
"MIT"
] | null | null | null | code/train.py | TONGJINGLV/dagan | 4b6701f0a31026d9a45ab988a645f0a2249cc45c | [
"MIT"
] | null | null | null | code/train.py | TONGJINGLV/dagan | 4b6701f0a31026d9a45ab988a645f0a2249cc45c | [
"MIT"
] | null | null | null | from data import NumericalField, CategoricalField, Iterator
from data import Dataset
from synthesizer import MaskGenerator_MLP, ObservedGenerator_MLP, Discriminator, Handler, ObservedGenerator_LSTM
from random import choice
import multiprocessing
import pandas as pd
import numpy as np
import torch
import argparse
import json
import os
parameters_space = {
"batch_size":[64, 128, 256],
"z_dim":[100, 200, 300],
"gen_num_layers":[1,2,3],
"gen_hidden_dim":[100, 200, 300, 400],
"gen_feature_dim":[100, 200, 300, 400, 500],
"gen_lstm_dim":[100,200,300,400,500],
"dis_hidden_dim":[100, 200, 300],
"dis_num_layers":[1,2,3],
"lr":[0.0001,0.0002,0.0005],
"cp":[0.01],
"dis_train_num" :[1, 2, 5]
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('configs', help='a json config file')
parser.add_argument('gpu', default=0)
args = parser.parse_args()
gpu = int(args.gpu)
if gpu >= 0:
GPU = True
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu)
else:
GPU = False
with open(args.configs) as f:
configs = json.load(f)
try:
os.mkdir("expdir")
except:
pass
for config in configs:
path = "expdir/"+config["name"]+"/"
try:
os.mkdir("expdir/"+config["name"])
except:
pass
source = pd.read_csv(config["source"])
target = pd.read_csv(config["target"])
fields = []
col_type = []
if "label" in config.keys():
cond = config["label"]
for i, col in enumerate(list(source)):
if "label" in config.keys() and col in cond:
fields.append((col, CategoricalField("one-hot", noise=0)))
col_type.append("condition")
elif i in config["normalize_cols"]:
fields.append((col,NumericalField("normalize")))
col_type.append("normalize")
elif i in config["gmm_cols"]:
fields.append((col, NumericalField("gmm", n=5)))
col_type.append("gmm")
elif i in config["one-hot_cols"]:
fields.append((col, CategoricalField("one-hot", noise=0)))
col_type.append("one-hot")
elif i in config["ordinal_cols"]:
fields.append((col, CategoricalField("dict")))
col_type.append("ordinal")
source_dst, target_dst = Dataset.split(
fields = fields,
path = ".",
col_type = col_type,
train = config["source"],
validation = config["target"],
format = "csv",
)
source_dst.learn_convert()
target_dst.learn_convert()
print("source row : {}".format(len(source_dst)))
print("target row: {}".format(len(target_dst)))
n_search = config["n_search"]
jobs = [multiprocessing.Process(target=thread_run, args=(path, search, config, source_dst, target_dst, GPU)) for search in range(n_search)]
for j in jobs:
j.start()
for j in jobs:
j.join()
| 32.426396 | 153 | 0.706951 |
e18f60246990c305ae61d8fc90992d5ea5e04b27 | 4,054 | py | Python | front-end/testsuite-python-lib/Python-3.1/Lib/test/test_pep277.py | MalloyPower/parsing-python | b2bca5eed07ea2af7a2001cd4f63becdfb0570be | [
"MIT"
] | 1 | 2020-11-26T18:53:46.000Z | 2020-11-26T18:53:46.000Z | front-end/testsuite-python-lib/Python-3.1/Lib/test/test_pep277.py | MalloyPower/parsing-python | b2bca5eed07ea2af7a2001cd4f63becdfb0570be | [
"MIT"
] | null | null | null | front-end/testsuite-python-lib/Python-3.1/Lib/test/test_pep277.py | MalloyPower/parsing-python | b2bca5eed07ea2af7a2001cd4f63becdfb0570be | [
"MIT"
] | 1 | 2019-04-11T11:27:01.000Z | 2019-04-11T11:27:01.000Z | # Test the Unicode versions of normal file functions
# open, os.open, os.stat. os.listdir, os.rename, os.remove, os.mkdir, os.chdir, os.rmdir
import sys, os, unittest
from test import support
if not os.path.supports_unicode_filenames:
raise unittest.SkipTest("test works only on NT+")
filenames = [
'abc',
'ascii',
'Gr\xfc\xdf-Gott',
'\u0393\u03b5\u03b9\u03ac-\u03c3\u03b1\u03c2',
'\u0417\u0434\u0440\u0430\u0432\u0441\u0442\u0432\u0443\u0439\u0442\u0435',
'\u306b\u307d\u3093',
'\u05d4\u05e9\u05e7\u05e6\u05e5\u05e1',
'\u66e8\u66e9\u66eb',
'\u66e8\u05e9\u3093\u0434\u0393\xdf',
]
# Destroy directory dirname and all files under it, to one level.
if __name__ == "__main__":
test_main()
| 35.252174 | 88 | 0.583128 |
e18feeef1d44b1fb5822da8d7afcebfd40c0bfe3 | 1,336 | py | Python | test/unit/test_utils.py | managedbyq/q-dbt | 01f1918fe5cbf3036b7197b8e3211960403718f3 | [
"Apache-2.0"
] | 1 | 2018-06-20T17:51:20.000Z | 2018-06-20T17:51:20.000Z | test/unit/test_utils.py | managedbyq/q-dbt | 01f1918fe5cbf3036b7197b8e3211960403718f3 | [
"Apache-2.0"
] | null | null | null | test/unit/test_utils.py | managedbyq/q-dbt | 01f1918fe5cbf3036b7197b8e3211960403718f3 | [
"Apache-2.0"
] | 1 | 2018-10-18T18:45:38.000Z | 2018-10-18T18:45:38.000Z | import unittest
import dbt.utils
| 30.363636 | 67 | 0.450599 |
e191203fe2262ef390df012127682a8cc60f4320 | 1,006 | py | Python | results.py | ejnnr/steerable_pdo_experiments | 17902e56641cefe305b935c8733b45aa066bf068 | [
"BSD-3-Clause"
] | null | null | null | results.py | ejnnr/steerable_pdo_experiments | 17902e56641cefe305b935c8733b45aa066bf068 | [
"BSD-3-Clause"
] | null | null | null | results.py | ejnnr/steerable_pdo_experiments | 17902e56641cefe305b935c8733b45aa066bf068 | [
"BSD-3-Clause"
] | null | null | null | import argparse
from pathlib import Path
import numpy as np
import yaml
# this script takes in a folder path and then recursively collects all
# results.yaml files in that directory. It averages them and prints
# summary statistics
parser = argparse.ArgumentParser(description="Analyze the results")
parser.add_argument("path", type=str, help="path to the folder containing the results")
args = parser.parse_args()
results = []
keys = set()
for path in Path(args.path).rglob("results.yaml"):
with open(path, "r") as file:
results.append(yaml.safe_load(file))
keys = keys.union(results[-1].keys())
print(f"Found {len(results)} files with {len(keys)} different metrics\n")
output = {}
for key in keys:
vals = [result[key] for result in results if key in result]
n = len(vals)
mean = float(np.mean(vals))
std = float(np.std(vals))
output[key] = {
"N runs": n,
"mean": round(mean, 3),
"std": round(std, 3)
}
print(yaml.dump(output)) | 25.794872 | 87 | 0.672962 |
e19165bb436973e6fab40bd344665853870c9891 | 4,541 | py | Python | Pgnet.py | rs-lsl/Pgnet | b31de7c93619a40bfb194bda0ad2889e732c1db6 | [
"MIT"
] | 2 | 2021-12-27T06:27:56.000Z | 2022-03-12T05:19:59.000Z | Pgnet.py | rs-lsl/Pgnet | b31de7c93619a40bfb194bda0ad2889e732c1db6 | [
"MIT"
] | null | null | null | Pgnet.py | rs-lsl/Pgnet | b31de7c93619a40bfb194bda0ad2889e732c1db6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@author: lsl
E-mail: cug_lsl@cug.edu.cn
"""
import sys
import argparse
sys.path.append("/home/aistudio/code")
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as data
import time
from Pgnet_structure import Pg_net
from Pgnet_dataset import Mydata
from loss_function import SAMLoss
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# hyper parameters
test_batch_size = 1
parser = argparse.ArgumentParser(description='Paddle Pgnet')
# model
parser.add_argument('--model', type=str, default='Pgnet')
# dataset
parser.add_argument('--dataset', type=str, default='WV2')
# train
parser.add_argument('--in_nc', type=int, default=126, help='number of input image channels')
parser.add_argument('--endmember', type=int, default=20, help='number of endmember')
parser.add_argument('--batch_size', type=int, default=15, help='training batch size')
parser.add_argument('--num_epochs', type=int, default=500, help='number of training epochs')
parser.add_argument('--lr', type=float, default=2e-3, help='learning rate')
parser.add_argument('--resume', type=str, default='', help='path to model checkpoint')
parser.add_argument('--start_epoch', type=int, default=1, help='restart epoch number for training')
parser.add_argument('--momentum', type=float, default=0.05, help='momentum')
parser.add_argument('--step', type=int, default=100,
help='Sets the learning rate to the initial LR decayed by momentum every n epochs, Default: n=100')
# test
parser.add_argument('--test', type=bool, default=False, help='test')
parser.add_argument('--load_para', type=bool, default=False, help='if load model parameters')
parser.add_argument('--test_batch_size', type=int, default=1, help='testing batch size')
opt = parser.parse_args()
print(opt)
| 36.620968 | 120 | 0.657564 |
e1927fcb892725b69d50542f139eaa6330088fdc | 14,716 | py | Python | tracing/plugins/ath10k_pktlog.py | lumag/qca-swiss-army-knife | 5ede3cc07e9a52f115101c28f833242b772eeaab | [
"ISC"
] | 47 | 2016-05-20T02:33:26.000Z | 2022-03-02T01:48:57.000Z | tracing/plugins/ath10k_pktlog.py | lumag/qca-swiss-army-knife | 5ede3cc07e9a52f115101c28f833242b772eeaab | [
"ISC"
] | 7 | 2020-04-09T13:40:56.000Z | 2022-01-24T19:18:50.000Z | tracing/plugins/ath10k_pktlog.py | lumag/qca-swiss-army-knife | 5ede3cc07e9a52f115101c28f833242b772eeaab | [
"ISC"
] | 41 | 2016-04-19T06:31:14.000Z | 2022-03-30T06:25:09.000Z | #
# Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# trace-cmd pktlog plugin for ath10k, QCA Linux wireless driver
#
# TODO:
#
# o create class for struct ieee80211_hdr each packet headers with
# pack() and unpack() methods
import struct
import binascii
DEBUG = 1
CUR_PKTLOG_VER = 10010
PKTLOG_MAGIC_NUM = 7735225
IEEE80211_FCTL_TODS = 0x0100
IEEE80211_FCTL_FROMDS = 0x0200
TARGET_NUM_MSDU_DESC = (1024 + 400)
MAX_PKT_INFO_MSDU_ID = 192
MAX_10_4_PKT_INFO_MSDU_ID = 1
PKTLOG_MAX_TXCTL_WORDS = 57
# must match with enum ath10k_hw_rev from ath10k and existing values
# should not change
ATH10K_PKTLOG_HW_QCA988X = 0
ATH10K_PKTLOG_HW_QCA6174 = 1
ATH10K_PKTLOG_HW_QCA99X0 = 2
ATH10K_PKTLOG_HW_QCA9888 = 3
ATH10K_PKTLOG_HW_QCA9984 = 4
ATH10K_PKTLOG_HW_QCA9377 = 5
ATH10K_PKTLOG_HW_QCA40XX = 6
ATH10K_PKTLOG_HW_QCA9887 = 7
ATH10K_PKTLOG_TYPE_TX_CTRL = 1
ATH10K_PKTLOG_TYPE_TX_STAT = 2
ATH10K_PKTLOG_TYPE_TX_MSDU_ID = 3
ATH10K_PKTLOG_TYPE_TX_FRM_HDR = 4
ATH10K_PKTLOG_TYPE_RX_STAT = 5
ATH10K_PKTLOG_TYPE_RC_FIND = 6
ATH10K_PKTLOG_TYPE_RC_UPDATE = 7
ATH10K_PKTLOG_TYPE_TX_VIRT_ADDR = 8
ATH10K_PKTLOG_TYPE_DBG_PRINT = 9
ATH10K_PKTLOG_FLG_TYPE_LOCAL_S = 0
ATH10K_PKTLOG_FLG_TYPE_REMOTE_S = 1
ATH10K_PKTLOG_FLG_TYPE_CLONE_S = 2
ATH10K_PKTLOG_FLG_TYPE_UNKNOWN_S = 3
# sizeof(ath10k_pktlog_txctl) = 12 + 4 * 57
ATH10K_PKTLOG_TXCTL_LEN = 240
ATH10K_PKTLOG_MAX_TXCTL_WORDS = 57
# sizeof(ath10k_pktlog_10_4_txctl)2 = 16 + 4 * 153
ATH10K_PKTLOG_10_4_TXCTL_LEN = 624
ATH10K_PKTLOG_10_4_MAX_TXCTL_WORDS = 153
msdu_len_tbl = {}
output_file = None
frm_hdr = None
# struct ath10k_pktlog_hdr {
# unsigned short flags;
# unsigned short missed_cnt;
# unsigned short log_type;
# unsigned short size;
# unsigned int timestamp;
# unsigned char payload[0];
# } __attribute__((__packed__));
# struct ath10k_pktlog_10_4_hdr {
# unsigned short flags;
# unsigned short missed_cnt;
# unsigned short log_type;
# unsigned short size;
# unsigned int timestamp;
# unsigned int type_specific_data;
# unsigned char payload[0];
# } __attribute__((__packed__));
def output_open():
global output_file
# apparently no way to close the file as the python plugin doesn't
# have unregister() callback
output_file = open('pktlog.dat', 'wb')
buf = struct.pack('II', PKTLOG_MAGIC_NUM, CUR_PKTLOG_VER)
output_write(buf)
def output_write(buf):
global output_file
output_file.write(buf)
def pktlog_tx_frm_hdr(frame):
global frm_hdr
try:
# struct ieee80211_hdr
(frame_control, duration_id, addr1a, addr1b, addr1c, addr2a, addr2b, addr2c,
addr3a, addr3b, addr3c, seq_ctrl) = struct.unpack_from('<HHI2BI2BI2BH', frame, 0)
except struct.error as e:
dbg('failed to parse struct ieee80211_hdr: %s' % (e))
return
if frame_control & IEEE80211_FCTL_TODS:
bssid_tail = (addr1b << 8) | addr1c
sa_tail = (addr2b << 8) | addr2c
da_tail = (addr3b << 8) | addr3c
elif frame_control & IEEE80211_FCTL_FROMDS:
bssid_tail = (addr2b << 8) | addr2c
sa_tail = (addr3b << 8) | addr3c
da_tail = (addr1b << 8) | addr1c
else:
bssid_tail = (addr3b << 8) | addr3c
sa_tail = (addr2b << 8) | addr2c
da_tail = (addr1b << 8) | addr1c
resvd = 0
frm_hdr = struct.pack('HHHHHH', frame_control, seq_ctrl, bssid_tail,
sa_tail, da_tail, resvd)
dbg('frm_hdr %d B' % len(frm_hdr))
def pktlog_tx_ctrl(buf, hw_type):
global frm_hdr
if hw_type == ATH10K_PKTLOG_HW_QCA988X:
hdr = Ath10kPktlogHdr()
hdr.unpack(buf)
hdr.size = ATH10K_PKTLOG_TXCTL_LEN
num_txctls = ATH10K_PKTLOG_MAX_TXCTL_WORDS
elif hw_type in [ATH10K_PKTLOG_HW_QCA99X0, ATH10K_PKTLOG_HW_QCA40XX,
ATH10K_PKTLOG_HW_QCA9888, ATH10K_PKTLOG_HW_QCA9984]:
hdr = Ath10kPktlog_10_4_Hdr()
hdr.unpack(buf)
hdr.size = ATH10K_PKTLOG_10_4_TXCTL_LEN
num_txctls = ATH10K_PKTLOG_10_4_MAX_TXCTL_WORDS
output_write(hdr.pack())
# write struct ath10k_pktlog_frame
if frm_hdr:
output_write(frm_hdr)
else:
tmp = struct.pack('HHHHHH', 0, 0, 0, 0, 0, 0)
output_write(tmp)
txdesc_ctl = hdr.payload[0:]
for i in range(num_txctls):
if len(txdesc_ctl) >= 4:
txctl, = struct.unpack_from('<I', txdesc_ctl)
txdesc_ctl = txdesc_ctl[4:]
else:
txctl = 0
output_write(struct.pack('I', txctl))
| 31.177966 | 106 | 0.632509 |
e1936080a3e49021025bc658618c985bef1143b7 | 731 | py | Python | tgtypes/models/venue.py | autogram/tgtypes | 90f8d0d35d3c372767508e56c20777635e128e38 | [
"MIT"
] | null | null | null | tgtypes/models/venue.py | autogram/tgtypes | 90f8d0d35d3c372767508e56c20777635e128e38 | [
"MIT"
] | null | null | null | tgtypes/models/venue.py | autogram/tgtypes | 90f8d0d35d3c372767508e56c20777635e128e38 | [
"MIT"
] | null | null | null | from __future__ import annotations
from typing import TYPE_CHECKING, Optional
from ._base import TelegramObject
if TYPE_CHECKING: # pragma: no cover
from .location import Location
| 25.206897 | 80 | 0.689466 |
e19376010ad54eee24bb9ce56fbcb72f63a795cb | 2,457 | py | Python | nsa.py | hypervis0r/nsaproductgenerator | e384717e0eb2746bd58d041963b49e4772192f0a | [
"MIT"
] | null | null | null | nsa.py | hypervis0r/nsaproductgenerator | e384717e0eb2746bd58d041963b49e4772192f0a | [
"MIT"
] | null | null | null | nsa.py | hypervis0r/nsaproductgenerator | e384717e0eb2746bd58d041963b49e4772192f0a | [
"MIT"
] | 1 | 2022-02-25T13:06:14.000Z | 2022-02-25T13:06:14.000Z | import random
import argparse
# TODO: Parse word lists from files
words = {
"codenames_adjective": [
"quantum",
"loud",
"red",
"blue",
"green",
"yellow",
"irate",
"angry",
"peeved",
"happy",
"slimy",
"sleepy",
"junior",
"slicker",
"united",
"somber",
"bizarre",
"odd",
"weird",
"wrong",
"latent",
"chilly",
"strange",
"loud",
"silent",
"hopping",
"orange",
"violet",
"violent",
"desolate",
"lone",
"cold",
"solemn",
"raging",
"intelligent",
"american",
],
"codenames_noun": [
"matrix",
"wolf",
"solace",
"whisper",
"felony",
"moon",
"sucker",
"penguin",
"waffle",
"maestro",
"night",
"trinity",
"deity",
"monkey",
"ark",
"squirrel",
"iron",
"bounce",
"farm",
"chef",
"trough",
"net",
"trawl",
"glee",
"water",
"spork",
"plow",
"feed",
"souffle",
"route",
"bagel",
"montana",
"analyst",
"auto",
"watch",
"photo",
"yard",
"source",
"monkey",
"seagull",
"toll",
"spawn",
"gopher",
"chipmunk",
"set",
"calendar",
"artist",
"chaser",
"scan",
"tote",
"beam",
"entourage",
"genesis",
"walk",
"spatula",
"rage",
"fire",
"master"
],
"codenames_suffix": [
" {}000",
"-II",
" {}.0",
" rev{}",
"-HX",
" v{}",
]
}
if __name__ == "__main__":
main(parse_args()) | 16.944828 | 119 | 0.483923 |
e195525885c756f6c1eaa22f28ac15deda8bb369 | 2,886 | py | Python | bob/blitz/extension.py | bioidiap/bob.blitz | 348d7cf3866b549cac576efc3c6f3df24245d9fd | [
"BSD-3-Clause"
] | null | null | null | bob/blitz/extension.py | bioidiap/bob.blitz | 348d7cf3866b549cac576efc3c6f3df24245d9fd | [
"BSD-3-Clause"
] | 6 | 2015-01-01T09:15:28.000Z | 2016-10-20T08:09:26.000Z | bob/blitz/extension.py | bioidiap/bob.blitz | 348d7cf3866b549cac576efc3c6f3df24245d9fd | [
"BSD-3-Clause"
] | 3 | 2015-08-05T12:16:45.000Z | 2018-02-01T19:55:40.000Z | #!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Andre Anjos <andre.anjos@idiap.ch>
# Mon 18 Nov 21:38:19 2013
"""Extension building for using this package
"""
import numpy
from pkg_resources import resource_filename
from bob.extension import Extension as BobExtension
# forward the build_ext command from bob.extension
from bob.extension import build_ext, Library as BobLibrary
from distutils.version import LooseVersion
| 31.032258 | 79 | 0.698545 |
e195971c01d6f8dcda846bd7ff1f32bb1f7099e8 | 4,248 | py | Python | src/RosGazeboLibrary/Gazebo.py | hielsnoppe/robotframework-rosgazebolibrary | a91d48413d4af95856964644b149898b538c6724 | [
"Apache-2.0"
] | null | null | null | src/RosGazeboLibrary/Gazebo.py | hielsnoppe/robotframework-rosgazebolibrary | a91d48413d4af95856964644b149898b538c6724 | [
"Apache-2.0"
] | null | null | null | src/RosGazeboLibrary/Gazebo.py | hielsnoppe/robotframework-rosgazebolibrary | a91d48413d4af95856964644b149898b538c6724 | [
"Apache-2.0"
] | null | null | null | from robot.api.deco import keyword
from robot.libraries.BuiltIn import BuiltIn | 29.296552 | 94 | 0.638889 |
e1960d36e95888a08f212b033eea9c1cb048cffe | 1,143 | py | Python | tests/unit/test_hass.py | boonhapus/hautomate | f111a8ad86d5f07183903ec99c1981569e0ee046 | [
"MIT"
] | null | null | null | tests/unit/test_hass.py | boonhapus/hautomate | f111a8ad86d5f07183903ec99c1981569e0ee046 | [
"MIT"
] | null | null | null | tests/unit/test_hass.py | boonhapus/hautomate | f111a8ad86d5f07183903ec99c1981569e0ee046 | [
"MIT"
] | null | null | null | from ward import test, each, raises
from homeassistant.core import HomeAssistant
from hautomate.settings import HautoConfig
from hautomate import Hautomate
import pydantic
from tests.fixtures import cfg_data_hauto
| 26.581395 | 67 | 0.632546 |
e19634e1c0e6ad67f639ff7b727b4525f8c022d4 | 1,186 | py | Python | test/arguments/with_range_check_code/python/Bit4RangeCheckTest.py | dkBrazz/zserio | 29dd8145b7d851fac682d3afe991185ea2eac318 | [
"BSD-3-Clause"
] | 86 | 2018-09-06T09:30:53.000Z | 2022-03-27T01:12:36.000Z | test/arguments/with_range_check_code/python/Bit4RangeCheckTest.py | dkBrazz/zserio | 29dd8145b7d851fac682d3afe991185ea2eac318 | [
"BSD-3-Clause"
] | 362 | 2018-09-04T20:21:24.000Z | 2022-03-30T15:14:38.000Z | test/arguments/with_range_check_code/python/Bit4RangeCheckTest.py | dkBrazz/zserio | 29dd8145b7d851fac682d3afe991185ea2eac318 | [
"BSD-3-Clause"
] | 20 | 2018-09-10T15:59:02.000Z | 2021-12-01T15:38:22.000Z | import unittest
import zserio
from testutils import getZserioApi
BIT4_LOWER_BOUND = 0
BIT4_UPPER_BOUND = 15
| 34.882353 | 99 | 0.73946 |
e196eb274e00b4e5d8027a1161feb36eab5a1ff6 | 1,931 | py | Python | src/MainAPP/migrations/0030_auto_20181211_1246.py | mizamae/HomeAutomation | 8c462ee4c31c1fea6792cb19af66a4d2cf7bb2ca | [
"MIT"
] | null | null | null | src/MainAPP/migrations/0030_auto_20181211_1246.py | mizamae/HomeAutomation | 8c462ee4c31c1fea6792cb19af66a4d2cf7bb2ca | [
"MIT"
] | 9 | 2017-11-21T15:45:18.000Z | 2022-02-11T03:37:54.000Z | src/MainAPP/migrations/0030_auto_20181211_1246.py | mizamae/HomeAutomation | 8c462ee4c31c1fea6792cb19af66a4d2cf7bb2ca | [
"MIT"
] | 1 | 2020-07-22T02:24:17.000Z | 2020-07-22T02:24:17.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-12-11 11:46
from __future__ import unicode_literals
from django.db import migrations, models
| 47.097561 | 252 | 0.651476 |
e1970edc7bf4ebc76f1931f011d41021ea8563bf | 18,954 | py | Python | qdms/PulsedProgramming.py | 3it-nano/QDMS | 9ec2d4e198c00f394d8882517c4b3b336c7fe8c2 | [
"MIT"
] | 1 | 2021-11-21T15:18:27.000Z | 2021-11-21T15:18:27.000Z | qdms/PulsedProgramming.py | 3it-nano/QDMS | 9ec2d4e198c00f394d8882517c4b3b336c7fe8c2 | [
"MIT"
] | null | null | null | qdms/PulsedProgramming.py | 3it-nano/QDMS | 9ec2d4e198c00f394d8882517c4b3b336c7fe8c2 | [
"MIT"
] | null | null | null | import numpy as np
import math
import time
| 43.672811 | 219 | 0.614013 |
e197f5d2fbd28b451da6017706229aff6b5fef77 | 462 | py | Python | tests/test_push.py | associatedpress/datakit-dworld | 21ccd0e468c7064d62022a2f136c0f8f47bbabb9 | [
"ISC"
] | 2 | 2019-09-07T02:03:46.000Z | 2021-03-06T14:43:01.000Z | tests/test_push.py | associatedpress/datakit-dworld | 21ccd0e468c7064d62022a2f136c0f8f47bbabb9 | [
"ISC"
] | 5 | 2019-09-06T22:24:26.000Z | 2021-04-27T21:42:18.000Z | tests/test_push.py | associatedpress/datakit-dworld | 21ccd0e468c7064d62022a2f136c0f8f47bbabb9 | [
"ISC"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# from unittest import mock
# from datakit_dworld.push import Push
def test_push(capsys):
"""Sample pytest test function with a built-in pytest fixture as an argument.
"""
# cmd = Greeting(None, None, cmd_name='dworld push')
# parsed_args = mock.Mock()
# parsed_args.greeting = 'Hello world!'
# cmd.run(parsed_args)
# out, err = capsys.readouterr()
# assert 'Hello world!' in out
| 25.666667 | 81 | 0.655844 |
e19851b68758ad2c430ee4ac03534a5235c71909 | 1,742 | py | Python | cool_filter.py | Andrea055/Imager | 8463fb18253a97f0c93b11f36104881b7e003f41 | [
"MIT"
] | 7 | 2019-03-31T00:02:42.000Z | 2022-01-30T00:30:46.000Z | cool_filter.py | Andrea055/Imager | 8463fb18253a97f0c93b11f36104881b7e003f41 | [
"MIT"
] | 21 | 2018-11-29T14:35:08.000Z | 2019-01-11T08:00:26.000Z | cool_filter.py | Andrea055/Imager | 8463fb18253a97f0c93b11f36104881b7e003f41 | [
"MIT"
] | 17 | 2018-11-27T01:15:29.000Z | 2019-12-29T19:41:30.000Z | import cv2
import numpy as np
from scipy.interpolate import UnivariateSpline
| 32.259259 | 74 | 0.703215 |
e198e752d01863b4604a77f7225e25fec572d794 | 623 | py | Python | src/evaluators/sample_evaluators/swd_sample_evaluator.py | gmum/cwae-pytorch | 7fb31a5d12a0a637be7dde76f0e11e80ec4a345d | [
"MIT"
] | 4 | 2020-08-20T20:51:24.000Z | 2022-01-26T23:56:35.000Z | src/evaluators/sample_evaluators/swd_sample_evaluator.py | gmum/cwae-pytorch | 7fb31a5d12a0a637be7dde76f0e11e80ec4a345d | [
"MIT"
] | null | null | null | src/evaluators/sample_evaluators/swd_sample_evaluator.py | gmum/cwae-pytorch | 7fb31a5d12a0a637be7dde76f0e11e80ec4a345d | [
"MIT"
] | 1 | 2021-12-24T14:13:40.000Z | 2021-12-24T14:13:40.000Z | import torch
from metrics.swd import sliced_wasserstein_distance
from evaluators.sample_evaluators.base_sample_evaluator import BaseSampleEvaluator
from noise_creator import NoiseCreator
| 38.9375 | 89 | 0.781701 |
e199a6e60b58553046744ab0013002524ed3e824 | 473 | py | Python | topic-db/topicdb/core/models/language.py | anthcp-infocom/Contextualise | 0136660fcb965fd70fb4c7a33de7973a69ee9fec | [
"MIT"
] | 184 | 2019-01-10T03:50:50.000Z | 2022-03-31T19:45:16.000Z | topic-db/topicdb/core/models/language.py | anthcp-infocom/Contextualise | 0136660fcb965fd70fb4c7a33de7973a69ee9fec | [
"MIT"
] | 11 | 2019-04-07T07:39:11.000Z | 2022-02-17T13:29:32.000Z | topic-db/topicdb/core/models/language.py | anthcp-infocom/Contextualise | 0136660fcb965fd70fb4c7a33de7973a69ee9fec | [
"MIT"
] | 9 | 2019-10-26T02:43:59.000Z | 2021-11-03T00:49:10.000Z | """
Language enumeration. Part of the StoryTechnologies project.
June 12, 2016
Brett Alistair Kromkamp (brett.kromkamp@gmail.com)
"""
from enum import Enum
| 18.92 | 60 | 0.64482 |
e199bdb1802d5fdf8365414f161e96d1a070a7b9 | 899 | py | Python | utils/migrations/0002_alter_electricitybilling_unit_price_and_more.py | shumwe/rental-house-management-system | f97f22afa8bc2740ed08baa387c74b93e02fac0c | [
"MIT"
] | 1 | 2022-03-16T13:29:30.000Z | 2022-03-16T13:29:30.000Z | utils/migrations/0002_alter_electricitybilling_unit_price_and_more.py | shumwe/rental-house-management-system | f97f22afa8bc2740ed08baa387c74b93e02fac0c | [
"MIT"
] | null | null | null | utils/migrations/0002_alter_electricitybilling_unit_price_and_more.py | shumwe/rental-house-management-system | f97f22afa8bc2740ed08baa387c74b93e02fac0c | [
"MIT"
] | null | null | null | # Generated by Django 4.0.3 on 2022-04-02 17:24
from django.db import migrations, models
| 31 | 130 | 0.604004 |
e19e2e064598322d4426a31b5e25a817d667c8db | 13,017 | py | Python | lemur/roles/views.py | pandragoq/lemur | 4f289c790b6638be49dc6614045bcad01bebf7ba | [
"Apache-2.0"
] | null | null | null | lemur/roles/views.py | pandragoq/lemur | 4f289c790b6638be49dc6614045bcad01bebf7ba | [
"Apache-2.0"
] | null | null | null | lemur/roles/views.py | pandragoq/lemur | 4f289c790b6638be49dc6614045bcad01bebf7ba | [
"Apache-2.0"
] | null | null | null | """
.. module: lemur.roles.views
:platform: Unix
:copyright: (c) 2015 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Kevin Glisson <kglisson@netflix.com>
"""
from flask import Blueprint
from flask import make_response, jsonify, abort, g
from flask.ext.restful import reqparse, fields, Api
from lemur.roles import service
from lemur.auth.service import AuthenticatedResource
from lemur.auth.permissions import ViewRoleCredentialsPermission, admin_permission
from lemur.common.utils import marshal_items, paginated_parser
mod = Blueprint('roles', __name__)
api = Api(mod)
FIELDS = {
'name': fields.String,
'description': fields.String,
'id': fields.Integer,
}
api.add_resource(RolesList, '/roles', endpoint='roles')
api.add_resource(Roles, '/roles/<int:role_id>', endpoint='role')
api.add_resource(RoleViewCredentials, '/roles/<int:role_id>/credentials', endpoint='roleCredentials`')
api.add_resource(AuthorityRolesList, '/authorities/<int:authority_id>/roles', endpoint='authorityRoles')
api.add_resource(UserRolesList, '/users/<int:user_id>/roles', endpoint='userRoles')
| 29.186099 | 112 | 0.520089 |
e19e4bc332d90affe3ea70c17d43f480bce982e0 | 16,454 | py | Python | backend/core/workspaces/dataView.py | makakken/roseguarden | 9a867f3d5e979b990bf474dcba81e5e9d0814c6a | [
"MIT"
] | null | null | null | backend/core/workspaces/dataView.py | makakken/roseguarden | 9a867f3d5e979b990bf474dcba81e5e9d0814c6a | [
"MIT"
] | 50 | 2021-03-28T03:06:19.000Z | 2021-10-18T12:36:16.000Z | backend/core/workspaces/dataView.py | makakken/roseguarden | 9a867f3d5e979b990bf474dcba81e5e9d0814c6a | [
"MIT"
] | 1 | 2021-07-30T07:12:46.000Z | 2021-07-30T07:12:46.000Z | """
The roseguarden project
Copyright (C) 2018-2020 Marcus Drobisch,
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <http://www.gnu.org/licenses/>.
"""
__authors__ = ["Marcus Drobisch"]
__contact__ = "roseguarden@fabba.space"
__credits__ = []
__license__ = "GPLv3"
import copy
from core.common.objDict import ObjDict
| 38.533958 | 109 | 0.521454 |
e19eb98e8bf34e916a97b5b0db5e158713913cb4 | 3,892 | py | Python | demo/video_gpuaccel_demo.py | chenxinfeng4/mmdetection | a99a1aaa5e4a7614f2f89f2350e1b917b2a8ca7e | [
"Apache-2.0"
] | 1 | 2021-12-10T15:08:22.000Z | 2021-12-10T15:08:22.000Z | demo/video_gpuaccel_demo.py | q3394101/mmdetection | ca11860f4f3c3ca2ce8340e2686eeaec05b29111 | [
"Apache-2.0"
] | null | null | null | demo/video_gpuaccel_demo.py | q3394101/mmdetection | ca11860f4f3c3ca2ce8340e2686eeaec05b29111 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
import numpy as np
import torch
from torchvision.transforms import functional as F
from mmdet.apis import init_detector
from mmdet.datasets.pipelines import Compose
try:
import ffmpegcv
except ImportError:
raise ImportError(
'Please install ffmpegcv with:\n\n pip install ffmpegcv')
if __name__ == '__main__':
main()
| 34.140351 | 77 | 0.659044 |
e1a102da5af50dd136ac3eab04d096bc659e8951 | 1,234 | py | Python | scripts/benchmark_1_rdomset.py | bluegenes/spacegraphcats | 35f8057068e4fe79ab83ac4efe91d1b0f389e1ea | [
"BSD-3-Clause"
] | null | null | null | scripts/benchmark_1_rdomset.py | bluegenes/spacegraphcats | 35f8057068e4fe79ab83ac4efe91d1b0f389e1ea | [
"BSD-3-Clause"
] | null | null | null | scripts/benchmark_1_rdomset.py | bluegenes/spacegraphcats | 35f8057068e4fe79ab83ac4efe91d1b0f389e1ea | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/env python
"""
Benchmark the rdomset (catlas level 1) algorithm, without I/O considerations.
"""
import sys, os
# add spacegraphcats package to import path:
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
import spacegraphcats
from spacegraphcats.catlas import catlas
import argparse
import sys
import time
if __name__ == '__main__':
sys.exit(main())
| 28.697674 | 77 | 0.658833 |
e1a1374935fa7cc8ec68a7212a8ba5b8c016fac8 | 2,107 | py | Python | pyob/mixins/pyob_set_label.py | khunspoonzi/pyob | b1b134b708585add15d04fa75001f3364f31dd74 | [
"MIT"
] | null | null | null | pyob/mixins/pyob_set_label.py | khunspoonzi/pyob | b1b134b708585add15d04fa75001f3364f31dd74 | [
"MIT"
] | null | null | null | pyob/mixins/pyob_set_label.py | khunspoonzi/pyob | b1b134b708585add15d04fa75001f3364f31dd74 | [
"MIT"
] | null | null | null | #
# PYOB SET LABEL MIXIN
#
| 36.327586 | 88 | 0.366398 |
e1a2f60719c963dbaf1979085fd5c1225ad3b6d9 | 1,547 | py | Python | proto/twowfuck.py | hanss314/twowfuck | 43d4f7b8c9f6b1e547f57d0c56b1db2972393c1e | [
"Unlicense"
] | null | null | null | proto/twowfuck.py | hanss314/twowfuck | 43d4f7b8c9f6b1e547f57d0c56b1db2972393c1e | [
"Unlicense"
] | null | null | null | proto/twowfuck.py | hanss314/twowfuck | 43d4f7b8c9f6b1e547f57d0c56b1db2972393c1e | [
"Unlicense"
] | null | null | null | import time
from hashlib import sha1
| 28.127273 | 93 | 0.513251 |
e1a32855943669310a5b6aeb210eb2e39273f98a | 5,915 | py | Python | simba/process_data_log.py | justinshenk/simba | a58ccd0ceeda201c1452d186033ce6b25fbab564 | [
"MIT"
] | 1 | 2021-12-15T07:30:33.000Z | 2021-12-15T07:30:33.000Z | simba/process_data_log.py | justinshenk/simba | a58ccd0ceeda201c1452d186033ce6b25fbab564 | [
"MIT"
] | null | null | null | simba/process_data_log.py | justinshenk/simba | a58ccd0ceeda201c1452d186033ce6b25fbab564 | [
"MIT"
] | 1 | 2021-11-14T09:15:30.000Z | 2021-11-14T09:15:30.000Z | import pandas as pd
import os
from configparser import ConfigParser, NoSectionError, NoOptionError
from datetime import datetime
import numpy as np
import glob
from simba.rw_dfs import *
| 49.291667 | 200 | 0.598478 |
e1a33e79602cf7d5b3c579918699d4ae4a866f09 | 3,812 | py | Python | SphinxReportPlugins/MatplotlibPlugin/__init__.py | Tim-HU/sphinx-report | 3a0dc225e594c4b2083dff7a93b6d77054256416 | [
"BSD-2-Clause"
] | 1 | 2020-04-10T12:48:40.000Z | 2020-04-10T12:48:40.000Z | SphinxReportPlugins/MatplotlibPlugin/__init__.py | Tim-HU/sphinx-report | 3a0dc225e594c4b2083dff7a93b6d77054256416 | [
"BSD-2-Clause"
] | null | null | null | SphinxReportPlugins/MatplotlibPlugin/__init__.py | Tim-HU/sphinx-report | 3a0dc225e594c4b2083dff7a93b6d77054256416 | [
"BSD-2-Clause"
] | null | null | null | import os
import re
import warnings
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.image as image
from matplotlib import _pylab_helpers
from matplotlib.cbook import exception_to_str
import seaborn
from SphinxReport.Component import *
from SphinxReport import Config, Utils
| 35.626168 | 97 | 0.471144 |
e1a3af379a95b5a4e4e2ab062711e4da48ff07ad | 4,358 | py | Python | nips2018/utils/plotting.py | kovacspe/Sinz2018_NIPS | c0aad625e516bed57f3ee52b39195c8817527d66 | [
"MIT"
] | 5 | 2018-10-30T05:39:11.000Z | 2021-03-20T12:00:25.000Z | nips2018/utils/plotting.py | kovacspe/Sinz2018_NIPS | c0aad625e516bed57f3ee52b39195c8817527d66 | [
"MIT"
] | null | null | null | nips2018/utils/plotting.py | kovacspe/Sinz2018_NIPS | c0aad625e516bed57f3ee52b39195c8817527d66 | [
"MIT"
] | 5 | 2019-03-08T13:48:39.000Z | 2020-10-04T13:27:56.000Z | import cv2
import imageio
import numpy as np
import matplotlib.pyplot as plt
from itertools import product, zip_longest
import seaborn as sns
| 36.932203 | 106 | 0.630564 |
e1a3e676ffda7283905e72fd2a219c469d9b17cf | 2,115 | py | Python | src/tests/samples.py | BeholdersEye/PyBitmessage | 362a975fbf1ec831d3107c7442527225bc140162 | [
"MIT",
"BSD-2-Clause-FreeBSD"
] | 5 | 2018-03-24T17:33:03.000Z | 2019-07-01T07:16:19.000Z | src/tests/samples.py | BeholdersEye/PyBitmessage | 362a975fbf1ec831d3107c7442527225bc140162 | [
"MIT",
"BSD-2-Clause-FreeBSD"
] | 15 | 2018-03-19T00:04:57.000Z | 2021-12-10T17:21:54.000Z | src/tests/samples.py | BeholdersEye/PyBitmessage | 362a975fbf1ec831d3107c7442527225bc140162 | [
"MIT",
"BSD-2-Clause-FreeBSD"
] | 5 | 2019-05-15T08:42:57.000Z | 2019-09-17T12:21:37.000Z | """Various sample data"""
from binascii import unhexlify
magic = 0xE9BEB4D9
# These keys are from addresses test script
sample_pubsigningkey = unhexlify(
'044a367f049ec16cb6b6118eb734a9962d10b8db59c890cd08f210c43ff08bdf09d'
'16f502ca26cd0713f38988a1237f1fc8fa07b15653c996dc4013af6d15505ce')
sample_pubencryptionkey = unhexlify(
'044597d59177fc1d89555d38915f581b5ff2286b39d022ca0283d2bdd5c36be5d3c'
'e7b9b97792327851a562752e4b79475d1f51f5a71352482b241227f45ed36a9')
sample_privsigningkey = \
b'93d0b61371a54b53df143b954035d612f8efa8a3ed1cf842c2186bfd8f876665'
sample_privencryptionkey = \
b'4b0b73a54e19b059dc274ab69df095fe699f43b17397bca26fdf40f4d7400a3a'
sample_ripe = b'003cd097eb7f35c87b5dc8b4538c22cb55312a9f'
# stream: 1, version: 2
sample_address = 'BM-onkVu1KKL2UaUss5Upg9vXmqd3esTmV79'
sample_factor = 66858749573256452658262553961707680376751171096153613379801854825275240965733
# G * sample_factor
sample_point = (
33567437183004486938355437500683826356288335339807546987348409590129959362313,
94730058721143827257669456336351159718085716196507891067256111928318063085006
)
sample_seed = 'TIGER, tiger, burning bright. In the forests of the night'
# Deterministic addresses with stream 1 and versions 3, 4
sample_deterministic_ripe = b'00cfb69416ae76f68a81c459de4e13460c7d17eb'
sample_deterministic_addr3 = 'BM-2DBPTgeSawWYZceFD69AbDT5q4iUWtj1ZN'
sample_deterministic_addr4 = 'BM-2cWzSnwjJ7yRP3nLEWUV5LisTZyREWSzUK'
sample_daddr3_512 = 18875720106589866286514488037355423395410802084648916523381
sample_daddr4_512 = 25152821841976547050350277460563089811513157529113201589004
sample_statusbar_msg = "new status bar message"
sample_inbox_msg_ids = ['27e644765a3e4b2e973ee7ccf958ea20', '51fc5531-3989-4d69-bbb5-68d64b756f5b',
'2c975c515f8b414db5eea60ba57ba455', 'bc1f2d8a-681c-4cc0-9a12-6067c7e1ac24']
# second address in sample_test_subscription_address is for the announcement broadcast
sample_test_subscription_address = ['BM-2cWQLCBGorT9pUGkYSuGGVr9LzE4mRnQaq', 'BM-GtovgYdgs7qXPkoYaRgrLFuFKz1SFpsw']
sample_subscription_name = 'test sub'
| 47 | 115 | 0.858629 |
e1a48794127b8e659f4702ccf90e46361a4d8c86 | 9,123 | py | Python | court_scraper/platforms/wicourts/pages/search.py | mscarey/court-scraper | 0e13976d901352a09cfd7e48450bbe427494f48e | [
"0BSD"
] | 1 | 2021-08-20T08:24:55.000Z | 2021-08-20T08:24:55.000Z | court_scraper/platforms/wicourts/pages/search.py | palewire/court-scraper | da4b614fb16806d8b5117373d273f802ca93a8cb | [
"0BSD"
] | null | null | null | court_scraper/platforms/wicourts/pages/search.py | palewire/court-scraper | da4b614fb16806d8b5117373d273f802ca93a8cb | [
"0BSD"
] | null | null | null | from urllib.parse import parse_qs
from anticaptchaofficial.hcaptchaproxyless import hCaptchaProxyless
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from court_scraper.base.selenium_helpers import SeleniumHelpers
from court_scraper.utils import dates_for_range
from .search_results import SearchResultsPage
from ..search_api import SearchApi
| 45.38806 | 110 | 0.645183 |
e1a571d93e123889de55adde281c383678e87c9f | 392 | py | Python | bitmovin_api_sdk/encoding/encodings/muxings/mp3/__init__.py | jaythecaesarean/bitmovin-api-sdk-python | 48166511fcb9082041c552ace55a9b66cc59b794 | [
"MIT"
] | 11 | 2019-07-03T10:41:16.000Z | 2022-02-25T21:48:06.000Z | bitmovin_api_sdk/encoding/encodings/muxings/mp3/__init__.py | jaythecaesarean/bitmovin-api-sdk-python | 48166511fcb9082041c552ace55a9b66cc59b794 | [
"MIT"
] | 8 | 2019-11-23T00:01:25.000Z | 2021-04-29T12:30:31.000Z | bitmovin_api_sdk/encoding/encodings/muxings/mp3/__init__.py | jaythecaesarean/bitmovin-api-sdk-python | 48166511fcb9082041c552ace55a9b66cc59b794 | [
"MIT"
] | 13 | 2020-01-02T14:58:18.000Z | 2022-03-26T12:10:30.000Z | from bitmovin_api_sdk.encoding.encodings.muxings.mp3.mp3_api import Mp3Api
from bitmovin_api_sdk.encoding.encodings.muxings.mp3.customdata.customdata_api import CustomdataApi
from bitmovin_api_sdk.encoding.encodings.muxings.mp3.information.information_api import InformationApi
from bitmovin_api_sdk.encoding.encodings.muxings.mp3.mp3_muxing_list_query_params import Mp3MuxingListQueryParams
| 78.4 | 113 | 0.903061 |
e1a57aedd9dcfceea7d88aacf27bf3e654f2a6f2 | 6,107 | py | Python | platform/bl_interface.py | sumasree98/2022-ectf-insecure-example | ed0a1e3618bca226b0cacd0157ff32a7f4fec2d9 | [
"Apache-2.0"
] | null | null | null | platform/bl_interface.py | sumasree98/2022-ectf-insecure-example | ed0a1e3618bca226b0cacd0157ff32a7f4fec2d9 | [
"Apache-2.0"
] | null | null | null | platform/bl_interface.py | sumasree98/2022-ectf-insecure-example | ed0a1e3618bca226b0cacd0157ff32a7f4fec2d9 | [
"Apache-2.0"
] | 1 | 2022-01-28T02:30:35.000Z | 2022-01-28T02:30:35.000Z | # 2022 eCTF
# Bootloader Interface Emulator
# Ben Janis
#
# (c) 2022 The MITRE Corporation
#
# This source file is part of an example system for MITRE's 2022 Embedded System
# CTF (eCTF). This code is being provided only for educational purposes for the
# 2022 MITRE eCTF competition, and may not meet MITRE standards for quality.
# Use this code at your own risk!
#
# DO NOT CHANGE THIS FILE
import argparse
import os
import logging
import socket
import select
from pathlib import Path
from typing import List, Optional, TypeVar
Message = TypeVar("Message")
LOG_FORMAT = "%(asctime)s:%(name)-s%(levelname)-8s %(message)s"
if __name__ == "__main__":
main()
| 27.885845 | 80 | 0.595055 |
e1a5eed695e57412a97412dd5eb33192adae977e | 968 | py | Python | Excel_python_demo.py | SJG88/excel_vba_python | ba7413be23796c67f921fe5428cd52592fdb54a9 | [
"MIT"
] | null | null | null | Excel_python_demo.py | SJG88/excel_vba_python | ba7413be23796c67f921fe5428cd52592fdb54a9 | [
"MIT"
] | null | null | null | Excel_python_demo.py | SJG88/excel_vba_python | ba7413be23796c67f921fe5428cd52592fdb54a9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
This is a script to demo how to open up a macro enabled excel file, write a pandas dataframe to it
and save it as a new file name.
Created on Mon Mar 1 17:47:41 2021
@author: Shane Gore
"""
import os
import xlwings as xw
import pandas as pd
os.chdir(r"C:\Users\Shane Gore\Desktop\Roisin")
wb = xw.Book("CAO_template.xlsm")
worksheet = wb.sheets['EPOS_Closing_Stock_Detailed']
'Create dataframe'
cars = {'Brand': ['Honda Civic','Toyota Corolla','Ford Focus','Audi A4'],
'Price': [22000,25000,27000,35000]
}
cars_df = pd.DataFrame(cars, columns = ['Brand', 'Price'])
'Write a dataframe to excel'
worksheet.range('A1').value = cars_df
'Create a datafame from and excel sheet'
excel_df = worksheet.range('A1').options(pd.DataFrame, expand='table').value
'Save the excel as a new workbook'
newfilename = ('Test4.xlsm')
wb.save(newfilename)
'close the workbook'
wb.close()
| 23.047619 | 99 | 0.669421 |
e1a7e0683b31fbc7612eb7456c53935a17b8dbcf | 31,709 | py | Python | code/Elipsoide_Clark_FAT.py | birocoles/paper-magnetic-elipsoid | 81d9b2e39cbb942f619f590ad389eb30d58b46d4 | [
"BSD-3-Clause"
] | 1 | 2017-02-26T15:19:25.000Z | 2017-02-26T15:19:25.000Z | code/Elipsoide_Clark_FAT.py | birocoles/paper-magnetic-elipsoid | 81d9b2e39cbb942f619f590ad389eb30d58b46d4 | [
"BSD-3-Clause"
] | null | null | null | code/Elipsoide_Clark_FAT.py | birocoles/paper-magnetic-elipsoid | 81d9b2e39cbb942f619f590ad389eb30d58b46d4 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import division
import numpy as np
from scipy import linalg
from matplotlib import pyplot as plt
from fatiando.gravmag import sphere
from fatiando import mesher, gridder, utils
from fatiando.vis import mpl
import scipy.special
import scipy.interpolate
def elipsoide (xp,yp,zp,inten,inc,dec,ellipsoids):
'''
Calcula as tres componentes do campo magnetico de um elipsoide.
a: escalar - semi eixo maior
b: escalar - semi eixo intermediario
c: escalar - semi eixo menor
h: escalar - profundidade
alfa: escalar - azimute do elipsoide em relacao ao "a"
delta: escalar - inclinacao do elipsoide em relacao ao "a"
gamma: escalar - angulo entre o semi eixo "b" e a projecao do centro do elipsoide no plano xy
xp: matriz - malha do eixo x
yp: matriz - malha do eixo y
zp: matriz - malha do eixo z
xc: escalar - posicao x do centro do elipsoide
yc: escalar - posicao y do centro do elipsoide
J: vetor - magnetizacao do corpo
'''
# Calculo de parametros de direcao
#l1 = l1_v (ellipsoids.alfa, ellipsoids.delta)
#l2 = l2_v (ellipsoids.alfa, ellipsoids.delta, ellipsoids.gamma)
#l3 = l3_v (ellipsoids.alfa, ellipsoids.delta, ellipsoids.gamma)
#m1 = m1_v (ellipsoids.alfa, ellipsoids.delta)
#m2 = m2_v (ellipsoids.alfa, ellipsoids.delta, ellipsoids.gamma)
#m3 = m3_v (ellipsoids.alfa, ellipsoids.delta, ellipsoids.gamma)
#n1 = n1_v (ellipsoids.delta)
#n2 = n2_v (ellipsoids.delta, ellipsoids.gamma)
#n3 = n3_v (ellipsoids.delta, ellipsoids.gamma)
#ln = ln_v (ellipsoids.props['remanence'][2], ellipsoids.props['remanence'][1])
#mn = mn_v (ellipsoids.props['remanence'][2], ellipsoids.props['remanence'][1])
#nn = nn_v (ellipsoids.props['remanence'][1])
#mcon = np.array([[l1, m1, n1],[l2, m2, n2],[l3, m3, n3]])
#mconT = mcon.T
#print mconT
lt = ln_v (dec, inc)
mt = mn_v (dec, inc)
nt = nn_v (inc)
#print l1,m1,n1
#print l2,m2,n2
#print l3,m3,n3
# Coordenadas Cartesianas elipsoide
x1 = x1_e (xp,yp,zp,ellipsoids.xc,ellipsoids.yc,ellipsoids.zc,ellipsoids.l1,ellipsoids.m1,ellipsoids.n1)
x2 = x2_e (xp,yp,zp,ellipsoids.xc,ellipsoids.yc,ellipsoids.zc,ellipsoids.l2,ellipsoids.m2,ellipsoids.n2)
x3 = x3_e (xp,yp,zp,ellipsoids.xc,ellipsoids.yc,ellipsoids.zc,ellipsoids.l3,ellipsoids.m3,ellipsoids.n3)
# Calculos auxiliares
p0 = p0_e (ellipsoids.a,ellipsoids.b,ellipsoids.c,x1,x2,x3)
p1 = p1_e (ellipsoids.a,ellipsoids.b,ellipsoids.c,x1,x2,x3)
p2 = p2_e (ellipsoids.a,ellipsoids.b,ellipsoids.c,x1,x2,x3)
p = p_e (p1,p2)
q = q_e (p0,p1,p2)
teta = teta_e (p,q)
# Raizes da equacao cubica
lamb = lamb_e (p,teta,p2)
# Calculo de parametros para as integrais
F,E,F2,E2,k,teta_linha = parametros_integrais(ellipsoids.a,ellipsoids.b,ellipsoids.c,lamb)
# Magnetizacoes nas coordenadas do elipsoide
#k_dec = np.array([[ellipsoids.props['k1'][2]],[ellipsoids.props['k2'][2]],[ellipsoids.props['k3'][2]]])
#k_int = np.array([[ellipsoids.props['k1'][0]],[ellipsoids.props['k2'][0]],[ellipsoids.props['k3'][0]]])
#k_inc = np.array([[ellipsoids.props['k1'][1]],[ellipsoids.props['k2'][1]],[ellipsoids.props['k3'][1]]])
#if ellipsoids.props['k1'][0] == ellipsoids.props['k2'][0] and ellipsoids.props['k1'][0] == ellipsoids.props['k3'][0]:
# km = k_matrix2 (k_int,l1,l2,l3,m1,m2,m3,n1,n2,n3)
#else:
# Lr = Lr_v (k_dec, k_inc)
# Mr = Mr_v (k_dec, k_inc)
# Nr = Nr_v (k_inc)
# km = k_matrix (k_int,Lr,Mr,Nr,l1,l2,l3,m1,m2,m3,n1,n2,n3)
Ft = F_e (inten,lt,mt,nt,ellipsoids.l1,ellipsoids.l2,ellipsoids.l3,ellipsoids.m1,ellipsoids.m2,ellipsoids.m3,ellipsoids.n1,ellipsoids.n2,ellipsoids.n3)
JN = JN_e (ellipsoids.props['remanence'][0],ellipsoids.ln,ellipsoids.mn,ellipsoids.nn,ellipsoids.l1,ellipsoids.l2,ellipsoids.l3,ellipsoids.m1,ellipsoids.m2,ellipsoids.m3,ellipsoids.n1,ellipsoids.n2,ellipsoids.n3)
N1,N2,N3 = N_desmag (ellipsoids.a,ellipsoids.b,ellipsoids.c,F2,E2)
JR = JR_e (ellipsoids.km,JN,Ft)
JRD = JRD_e (ellipsoids.km,N1,N2,N3,JR)
JRD_carte = (ellipsoids.mconT).dot(JRD)
JRD_ang = utils.vec2ang(JRD_carte)
#print Ft
#print JN
#print JRD
#print N1,N2,N3
#print JRD_ang
# Derivadas de lambda em relacao as posicoes
dlambx1 = dlambx1_e (ellipsoids.a,ellipsoids.b,ellipsoids.c,x1,x2,x3,lamb)
dlambx2 = dlambx2_e (ellipsoids.a,ellipsoids.b,ellipsoids.c,x1,x2,x3,lamb)
dlambx3 = dlambx3_e (ellipsoids.a,ellipsoids.b,ellipsoids.c,x1,x2,x3,lamb)
# Calculo das integrais
A, B, C = integrais_elipticas(ellipsoids.a,ellipsoids.b,ellipsoids.c,k,teta_linha,F,E)
# Geometria para o calculo de B (eixo do elipsoide)
cte = cte_m (ellipsoids.a,ellipsoids.b,ellipsoids.c,lamb)
V1, V2, V3 = v_e (ellipsoids.a,ellipsoids.b,ellipsoids.c,x1,x2,x3,lamb)
# Calculo matriz geometria para B1
m11 = (cte*dlambx1*V1) - A
m12 = cte*dlambx1*V2
m13 = cte*dlambx1*V3
# Calculo matriz geometria para B2
m21 = cte*dlambx2*V1
m22 = (cte*dlambx2*V2) - B
m23 = cte*dlambx2*V3
# Calculo matriz geometria para B3
m31 = cte*dlambx3*V1
m32 = cte*dlambx3*V2
m33 = (cte*dlambx3*V3) - C
# Problema Direto (Calcular o campo externo nas coordenadas do elipsoide)
B1 = B1_e (m11,m12,m13,JRD,ellipsoids.a,ellipsoids.b,ellipsoids.c)
B2 = B2_e (m21,m22,m23,JRD,ellipsoids.a,ellipsoids.b,ellipsoids.c)
B3 = B3_e (m31,m32,m33,JRD,ellipsoids.a,ellipsoids.b,ellipsoids.c)
#constante = constante_nova (a,b,c,lamb,JRD,x1,x2,x3)
#B1 = B1_novo (constante,dlambx1,a,b,c,JRD,A)
#B2 = B2_novo (constante,dlambx2,a,b,c,JRD,B)
#B3 = B3_novo (constante,dlambx3,a,b,c,JRD,C)
# Problema Direto (Calcular o campo externo nas coordenadas geograficas)
Bx = Bx_c (B1,B2,B3,ellipsoids.l1,ellipsoids.l2,ellipsoids.l3)
By = By_c (B1,B2,B3,ellipsoids.m1,ellipsoids.m2,ellipsoids.m3)
Bz = Bz_c (B1,B2,B3,ellipsoids.n1,ellipsoids.n2,ellipsoids.n3)
return Bx,By,Bz,JRD_ang
# Problema Direto (Calcular o campo externo e anomalia nas coordenadas geograficas no SI)
def bx_c(xp,yp,zp,inten,inc,dec,ellipsoids):
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
size = len(xp)
res = np.zeros(size, dtype=np.float)
ctemag = 1
for i in range(len(ellipsoids)):
bx,by,bz,jrd_ang = elipsoide (xp,yp,zp,inten,inc,dec,ellipsoids[i])
res += bx
res = res*ctemag
return res
def by_c(xp,yp,zp,inten,inc,dec,ellipsoids):
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
size = len(xp)
res = np.zeros(size, dtype=np.float)
ctemag = 1
for i in range(len(ellipsoids)):
bx,by,bz,jrd_ang = elipsoide (xp,yp,zp,inten,inc,dec,ellipsoids[i])
res += by
res = res*ctemag
return res
def bz_c(xp,yp,zp,inten,inc,dec,ellipsoids):
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
size = len(xp)
res = np.zeros(size, dtype=np.float)
ctemag = 1
for i in range(len(ellipsoids)):
bx,by,bz,jrd_ang = elipsoide (xp,yp,zp,inten,inc,dec,ellipsoids[i])
res += bz
res = res*ctemag
return res
def tf_c(xp,yp,zp,inten,inc,dec,ellipsoids):
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
size = len(xp)
res = np.zeros(size, dtype=np.float)
ctemag = 1
for i in range(len(ellipsoids)):
bx,by,bz,jrd_ang = elipsoide (xp,yp,zp,inten,inc,dec,ellipsoids[i])
tf = bx*np.cos(inc)*np.cos(dec) + by*np.cos(inc)*np.sin(dec) + bz*np.sin(inc)
res += tf
res = res*ctemag
return res,jrd_ang
def l1_v (alfa, delta):
'''
Orientacao do elipsoide com respeito ao eixo x.
input:
alfa - Azimute com relacao ao eixo-maior. (0<=alfa<=360)
delta - Inclinacao com relacao ao eixo-maior. (0<=delta<=90)
output:
Direcao em radianos.
'''
l1 = (-np.cos(alfa)*np.cos(delta))
return l1
def l2_v (alfa, delta, gamma):
'''
Orientacao do elipsoide com respeito ao eixo y.
input:
alfa - Azimute com relacao ao eixo-maior. (0<=alfa<=360)
delta - Inclinacao com relacao ao eixo-maior. (0<=delta<=90)
gamma - Angulo entre o eixo-maior e a projecao vertical do centro do elipsoide com o plano. (-90<=gamma<=90)
output:
Direcao em radianos.
'''
l2 = (np.cos(alfa)*np.cos(gamma)*np.sin(delta)+np.sin(alfa)*np.sin(gamma))
return l2
def l3_v (alfa, delta, gamma):
'''
Orientacao do elipsoide com respeito ao eixo z.
input:
alfa - Azimute com relacao ao eixo-maior. (0<=alfa<=360)
delta - Inclinacao com relacao ao eixo-maior. (0<=delta<=90)
gamma - Angulo entre o eixo-maior e a projecao vertical do centro do elipsoide com o plano. (-90<=gamma<=90)
output:
Direcao em radianos.
'''
l3 = (np.sin(alfa)*np.cos(gamma)-np.cos(alfa)*np.sin(gamma)*np.sin(delta))
return l3
def m1_v (alfa, delta):
'''
Orientacao do elipsoide com respeito ao eixo x.
input:
alfa - Azimute com relacao ao eixo-maior. (0<=alfa<=360)
delta - Inclinacao com relacao ao eixo-maior. (0<=delta<=90)
output:
Direcao em radianos.
'''
m1 = (-np.sin(alfa)*np.cos(delta))
return m1
def m2_v (alfa, delta, gamma):
'''
Orientacao do elipsoide com respeito ao eixo y.
input:
alfa - Azimute com relacao ao eixo-maior. (0<=alfa<=360)
delta - Inclinacao com relacao ao eixo-maior. (0<=delta<=90)
gamma - Angulo entre o eixo-maior e a projecao vertical do centro do elipsoide com o plano. (-90<=gamma<=90)
output:
Direcao em radianos.
'''
m2 = (np.sin(alfa)*np.cos(gamma)*np.sin(delta)-np.cos(alfa)*np.sin(gamma))
return m2
def m3_v (alfa, delta, gamma):
'''
Orientacao do elipsoide com respeito ao eixo z.
input:
alfa - Azimute com relacao ao eixo-maior. (0<=alfa<=360)
delta - Inclinacao com relacao ao eixo-maior. (0<=delta<=90)
gamma - Angulo entre o eixo-maior e a projecao vertical do centro do elipsoide com o plano. (-90<=gamma<=90)
output:
Direcao em radianos.
'''
m3 = (-np.cos(alfa)*np.cos(gamma)-np.sin(alfa)*np.sin(gamma)*np.sin(delta))
return m3
def n1_v (delta):
'''
Orientacao do elipsoide com respeito ao eixo x.
input:
delta - Inclinacao com relacao ao eixo-maior. (0<=delta<=90)
output:
Direcao em radianos.
'''
n1 = (-np.sin(delta))
return n1
def n2_v (delta, gamma):
'''
Orientacao do elipsoide com respeito ao eixo y.
input:
delta - Inclinacao com relacao ao eixo-maior. (0<=delta<=90)
gamma - Angulo entre o eixo-maior e a projecao vertical do centro do elipsoide com o plano. (-90<=gamma<=90)
output:
Direcao em radianos.
'''
n2 = (-np.cos(gamma)*np.cos(delta))
return n2
def n3_v (delta, gamma):
'''
Orientacao do elipsoide com respeito ao eixo z.
input:
delta - Inclinacao com relacao ao eixo-maior. (0<=delta<=90)
gamma - Angulo entre o eixo-maior e a projecao vertical do centro do elipsoide com o plano. (-90<=gamma<=90)
output:
Direcao em radianos.
'''
n3 = (np.sin(gamma)*np.cos(delta))
return n3
def ln_v (declinacao, inclinacao):
'''
Orientacao do elipsoide com respeito ao eixo x.
input:
alfa - Azimute com relacao ao eixo-maior. (0<=alfa<=360)
delta - Inclinacao com relacao ao eixo-maior. (0<=delta<=90)
output:
Direcao em radianos.
'''
ln = (np.cos(declinacao)*np.cos(inclinacao))
return ln
def mn_v (declinacao, inclinacao):
'''
Orientacao do elipsoide com respeito ao eixo x.
input:
alfa - Azimute com relacao ao eixo-maior. (0<=alfa<=360)
delta - Inclinacao com relacao ao eixo-maior. (0<=delta<=90)
output:
Direcao em radianos.
'''
mn = (np.sin(declinacao)*np.cos(inclinacao))
return mn
def nn_v (inclinacao):
'''
Orientacao do elipsoide com respeito ao eixo x.
input:
alfa - Azimute com relacao ao eixo-maior. (0<=alfa<=360)
delta - Inclinacao com relacao ao eixo-maior. (0<=delta<=90)
output:
Direcao em radianos.
'''
nn = np.sin(inclinacao)
return nn
def Lr_v (k_dec, k_inc):
'''
Cossenos diretores dos eixos dos vetores de susceptibilidade magnetica.
input:
k_dec - declinacoes dos vetores de susceptibilidade.
k_inc - inclinacoes dos vetores de susceptibilidade.
'''
Lr = np.zeros(3)
for i in range (3):
Lr[i] = np.cos(k_dec[i])*np.cos(k_inc[i])
return Lr
def Mr_v (k_dec, k_inc):
'''
Cossenos diretores dos eixos dos vetores de susceptibilidade magnetica.
input:
k_dec - declinacoes dos vetores de susceptibilidade.
k_inc - inclinacoes dos vetores de susceptibilidade.
'''
Mr = np.zeros(3)
for i in range (3):
Mr[i] = np.sin(k_dec[i])*np.cos(k_inc[i])
return Mr
def Nr_v (k_inc):
'''
Cossenos diretores dos eixos dos vetores de susceptibilidade magnetica.
input:
k_inc - inclinacoes dos vetores de susceptibilidade.
'''
Nr = np.zeros(3)
for i in range (3):
Nr[i] = np.sin(k_inc[i])
return Nr
def F_e (intensidadeT,lt,mt,nt,l1,l2,l3,m1,m2,m3,n1,n2,n3):
'''
Transformacao do vetor campo magnetico da Terra para as coordenadas nos eixos do elipsoide.
'''
Ft = intensidadeT*np.ravel(np.array([[(lt*l1+mt*m1+nt*n1)], [(lt*l2+mt*m2+nt*n2)], [(lt*l3+mt*m3+nt*n3)]]))
return Ft
def JN_e (intensidade,ln,mn,nn,l1,l2,l3,m1,m2,m3,n1,n2,n3):
'''
transformacao do Vetor de magnetizacao remanente para as coordenadas nos eixos do elipsoide.
'''
JN = intensidade*np.ravel(np.array([[(ln*l1+mn*m1+nn*n1)], [(ln*l2+mn*m2+nn*n2)], [(ln*l3+mn*m3+nn*n3)]]))
return JN
def N_desmag (a,b,c,F2,E2):
'''
Fator de desmagnetizacao ao longo do eixo de revolucao (N1) e em relacao ao plano equatorial (N2).
'''
N1 = ((4.*np.pi*a*b*c)/((a**2-b**2)*(a**2-c**2)**0.5)) * (F2-E2)
N2 = (((4.*np.pi*a*b*c)*(a**2-c**2)**0.5)/((a**2-b**2)*(b**2-c**2))) * (E2 - ((b**2-c**2)/(a**2-c**2)) * F2 - ((c*(a**2-b**2))/(a*b*(a**2-c**2)**0.5)))
N3 = ((4.*np.pi*a*b*c)/((b**2-c**2)*(a**2-c**2)**0.5)) * (((b*(a**2-c**2)**0.5)/(a*c)) - E2)
return N1, N2, N3
def k_matrix (k_int,Lr,Mr,Nr,l1,l2,l3,m1,m2,m3,n1,n2,n3):
'''
Matriz de tensores de susceptibilidade.
'''
l = np.array([[l1],[l2],[l3]])
m = np.array([[m1],[m2],[m3]])
n = np.array([[n1],[n2],[n3]])
k = np.zeros([3,3])
for i in range (3):
for j in range (3):
for r in range (3):
k[i,j] = k[i,j] + (k_int[r]*(Lr[r]*l[i] + Mr[r]*m[i] + Nr[r]*n[i])*(Lr[r]*l[j] + Mr[r]*m[j] + Nr[r]*n[j]))
return k
def k_matrix2 (k_int,l1,l2,l3,m1,m2,m3,n1,n2,n3):
'''
Matriz de tensores de susceptibilidade.
'''
l = np.array([[l1],[l2],[l3]])
m = np.array([[m1],[m2],[m3]])
n = np.array([[n1],[n2],[n3]])
k = np.zeros([3,3])
for i in range (3):
for j in range (3):
for r in range (3):
k[i,j] = k[i,j] + (k_int[r]*(l[r]*l[i] + m[r]*m[i] + n[r]*n[i])*(l[r]*l[j] + m[r]*m[j] + n[r]*n[j]))
return k
def JR_e (km,JN,Ft):
'''
Vetor de magnetizacao resultante sem correcao da desmagnetizacao.
'''
JR = km.dot(Ft) + JN
return JR
def JRD_e (km,N1,N2,N3,JR):
'''
Vetor de magnetizacao resultante com a correcao da desmagnetizacao.
'''
I = np.identity(3)
kn0 = km[:,0]*N1
kn1 = km[:,1]*N2
kn2 = km[:,2]*N3
kn = (np.vstack((kn0,kn1,kn2))).T
A = I + kn
JRD = (linalg.inv(A)).dot(JR)
return JRD
def x1_e (xp,yp,zp,xc,yc,h,l1,m1,n1):
'''
Calculo da coordenada x no elipsoide
input:
xp,yp - Matriz: Coordenadas geograficas (malha).
h - Profundidade do elipsoide.
l1,m1,n1 - Orientacao do elipsoide (eixo x)
output:
x1 - Coordenada x do elipsoide.
'''
x1 = (xp-xc)*l1+(yp-yc)*m1+(-zp-h)*n1
return x1
def x2_e (xp,yp,zp,xc,yc,h,l2,m2,n2):
'''
Calculo da coordenada y no elipsoide
input:
xp,yp - Matriz: Coordenadas geograficas (malha).
h - Profundidade do elipsoide.
l2,m2,n2 - Orientacao do elipsoide (eixo y).
output:
x2 - Coordenada y do elipsoide.
'''
x2 = (xp-xc)*l2+(yp-yc)*m2+(-zp-h)*n2
return x2
def x3_e (xp,yp,zp,xc,yc,h,l3,m3,n3):
'''
Calculo da coordenada z no elipsoide
input:
xp,yp - Matriz: Coordenadas geograficas (malha).
h - Profundidade do elipsoide.
l3,m3,n3 - Orientacao do elipsoide (eixo z).
output:
x3 - Coordenada z do elipsoide.
'''
x3 = (xp-xc)*l3+(yp-yc)*m3+(-zp-h)*n3
return x3
def p0_e (a,b,c,x1,x2,x3):
'''
Constante da equacao cubica: s^3 + p2*s^2 + p0 = 0
input:
a,b,c - Eixos do elipsoide.
x1,x2,x3 - Eixo de coordenadas do elipsoide.
output:
p0 - Constante
'''
p0 = (a*b*c)**2-(b*c*x1)**2-(c*a*x2)**2-(a*b*x3)**2
return p0
def p1_e (a,b,c,x1,x2,x3):
'''
Constante da equacao cubica: s^3 + p2*s^2 + p0 = 0
input:
a,b,c - Eixos do elipsoide.
x1,x2,x3 - Eixo de coordenadas do elipsoide.
output:
p0 - Constante
'''
p1 = (a*b)**2+(b*c)**2+(c*a)**2-(b**2+c**2)*x1**2-(c**2+a**2)*x2**2-(a**2+b**2)*x3**2
return p1
def p2_e (a,b,c,x1,x2,x3):
'''
Constante da equacao cubica: s^3 + p2*s^2 + p0 = 0
input:
a,b,c - Eixos do elipsoide.
x1,x2,x3 - Eixo de coordenadas do elipsoide.
output:
p0 - Constante
'''
p2 = a**2+b**2+c**2-x1**2-x2**2-x3**2
return p2
def p_e (p1,p2):
'''
Constante
input:
p1,p2 - constantes da equacao cubica
output:
p - Constante.
'''
p = p1-(p2**2)/3.
return p
def q_e (p0,p1,p2):
'''
Constante
input:
p0,p1,p2 - constantes da equacao cubica
output:
q - Constante.
'''
q = p0-((p1*p2)/3.)+2*(p2/3.)**3
return q
def teta_e (p,q):
'''
Constante angular (radianos)
input:
p - constante da equacao cubica
q - constante
output:
teta - Constante.
'''
teta = np.arccos(-q/(2*np.sqrt((-p/3.)**3)))
#teta = np.arccos((-q/2.)*np.sqrt((-p/3.)**3))
return teta
def lamb_e (p,teta,p2):
'''
Maior raiz real da equacao cubica: s^3 + p2*s^2 + p0 = 0
input:
p,p2 - constantes da equacao cubica
teta - constante angular (radianos)
output:
lamb - Maior raiz real.
'''
lamb = 2.*((-p/3.)**0.5)*np.cos(teta/3.)-(p2/3.)
#lamb = 2*((-p/3.)*np.cos(teta/3.)-(p2/3.))**0.5
#lamb = 2*((-p/3.)*np.cos(teta/3.))**0.5 - (p2/3.)
return lamb
def mi_e (p,teta,p2):
'''
Raiz intermediaria real da equacao cubica: s^3 + p2*s^2 + p0 = 0
input:
p,p2 - constantes da equacao cubica
teta - constante angular (radianos)
output:
mi - Raiz intermediaria real.
'''
mi = -2.*((-p/3.)**0.5)*np.cos(teta/3.+np.pi/3.)-(p2/3.)
return mi
def ni_e (p,teta,p2):
'''
Menor raiz real da equacao cubica: s^3 + p2*s^2 + p0 = 0
input:
p,p2 - constantes da equacao cubica
teta - constante angular (radianos)
output:
ni - Menor raiz real.
'''
ni = -2.*np.sqrt(-p/3.)*np.cos(teta/3. - np.pi/3.)-(p2/3.)
return ni
def dlambx1_e (a,b,c,x1,x2,x3,lamb):
'''
Derivada de lamb em relacao ao eixo x1 do elipsoide.
input:
a,b,c, - semi-eixos do elipsoide.
x1,x2,x3 - Eixo de coordenadas do elipsoide.
lamb - Maior raiz real da equacao cubica.
output:
dlambx1 - escalar
'''
dlambx1 = (2*x1/(a**2+lamb))/((x1/(a**2+lamb))**2+(x2/(b**2+lamb))**2+((x3/(c**2+lamb))**2))
return dlambx1
def dlambx2_e (a,b,c,x1,x2,x3,lamb):
'''
Derivada de lamb em relacao ao eixo x2 do elipsoide.
input:
a,b,c, - semi-eixos do elipsoide.
x1,x2,x3 - Eixo de coordenadas do elipsoide.
lamb - Maior raiz real da equacao cubica.
output:
dlambx2 - escalar
'''
dlambx2 = (2*x2/(b**2+lamb))/((x1/(a**2+lamb))**2+(x2/(b**2+lamb))**2+((x3/(c**2+lamb))**2))
return dlambx2
def dlambx3_e (a,b,c,x1,x2,x3,lamb):
'''
Derivada de lamb em relacao ao eixo x3 do elipsoide.
input:
a,b,c, - semi-eixos do elipsoide.
x1,x2,x3 - Eixo de coordenadas do elipsoide.
lamb - Maior raiz real da equacao cubica.
output:
dlambx3 - escalar
'''
dlambx3 = (2*x3/(c**2+lamb))/((x1/(a**2+lamb))**2+(x2/(b**2+lamb))**2+((x3/(c**2+lamb))**2))
return dlambx3
def cte_m (a,b,c,lamb):
'''
Fator geometrico do campo magnetico (fi) com relacao aos eixos do elipsoide
input:
a,b,c - semi-eixos do elipsoide
lamb - Maior raiz real da equacao cubica.
output:
cte - constante escalar.
'''
cte = 1/np.sqrt((a**2+lamb)*(b**2+lamb)*(c**2+lamb))
return cte
def v_e (a,b,c,x1,x2,x3,lamb):
'''
Constante do campo magnetico (fi) com relacao aos eixos do elipsoide
input:
a,b,c - semi-eixos do elipsoide
x1,x2,x3 - Eixo de coordenadas do elipsoide.
lamb - Maior raiz real da equacao cubica.
output:
v - matriz
'''
V1 = x1/(a**2+lamb)
V2 = x2/(b**2+lamb)
V3 = x3/(c**2+lamb)
return V1, V2, V3
def B1_e (m11,m12,m13,J,a,b,c):
'''
Calculo do campo magnetico (Bi) com relacao aos eixos do elipsoide
input:
a,b,c - semi-eixos do elipsoide
x1,x2,x3 - Eixo de coordenadas do elipsoide.
dlambx1 - matriz: derivada de lambda em relacao ao eixo x1.
cte - matriz
v - matriz
A - matriz: integrais do potencial
J - vetor: magnetizacao
output:
B1 - matriz
'''
B1 = 2*np.pi*a*b*c*(m11*J[0]+m12*J[1]+m13*J[2])
return B1
def B2_e (m21,m22,m23,J,a,b,c):
'''
Calculo do campo magnetico (Bi) com relacao aos eixos do elipsoide
input:
a,b,c - semi-eixos do elipsoide
x1,x2,x3 - Eixo de coordenadas do elipsoide.
dlambx2 - matriz: derivada de lambda em relacao ao eixo x2.
cte - matriz
v - matriz
B - matriz: integrais do potencial
J - vetor: magnetizacao
output:
B2 - matriz
'''
B2 = 2*np.pi*a*b*c*(m21*J[0]+m22*J[1]+m23*J[2])
return B2
def B3_e (m31,m32,m33,J,a,b,c):
'''
Calculo do campo magnetico (Bi) com relacao aos eixos do elipsoide
input:
a,b,c - semi-eixos do elipsoide
x1,x2,x3 - Eixo de coordenadas do elipsoide.
dlambx3 - matriz: derivada de lambda em relacao ao eixo x3.
cte - matriz
v - matriz
C - matriz: integrais do potencial
J - vetor: magnetizacao
output:
B3 - matriz
'''
B3 = 2*np.pi*a*b*c*(m31*J[0]+m32*J[1]+m33*J[2])
return B3
def Bx_c (B1,B2,B3,l1,l2,l3):
'''
Calculo do campo magnetico (Bi) com relacao aos eixos geograficos
input:
B1,B2,B3 - vetores
l1,l2,l3 - escalares.
output:
Bx - matriz
'''
Bx = B1*l1+B2*l2+B3*l3
return Bx
def By_c (B1,B2,B3,m1,m2,m3):
'''
Calculo do campo magnetico (Bi) com relacao aos eixos geograficos
input:
B1,B2,B3 - vetores
m1,m2,m3 - escalares.
output:
By - matriz
'''
By = B1*m1+B2*m2+B3*m3
return By
def Bz_c (B1,B2,B3,n1,n2,n3):
'''
Calculo do campo magnetico (Bi) com relacao aos eixos geograficos
input:
B1,B2,B3 - vetores
n1,n2,n3 - escalares.
output:
Bz - matriz
'''
Bz = B1*n1+B2*n2+B3*n3
return Bz
def Alambda_simp_ext3(a,b,c,lamb):
'''
Integral do potencial utilizando o metodo de simpson.
input:
a,b,c - semi-eixos do elipsoide
lamb -
output:
A - matriz
'''
A = []
umax = 1000000.0
N = 300000
aux1 = 3./8.
aux2 = 7./6.
aux3 = 23./24.
for l in np.ravel(lamb):
h = (umax - l)/(N-1)
u = np.linspace(l, umax, N)
R = np.sqrt((a**2 + u) + (b**2 + u) + (c**2 + u))
f = 1./((a**2 + u)*R)
aij = h*(aux1*f[0] + aux2*f[1] + aux3*f[2] + np.sum(f[3:N-3]) + aux3*f[-3] + aux2*f[-2] + aux1*f[-1])
A.append(aij)
A = np.array(A).reshape((lamb.shape[0], lamb.shape[1]))
return A
def Blambda_simp_ext3(a,b,c,lamb):
'''
Integral do potencial utilizando o metodo de simpson.
input:
a,b,c - semi-eixos do elipsoide
lamb -
output:
B - matriz
'''
B = []
umax = 1000000.0
N = 300000
aux1 = 3./8.
aux2 = 7./6.
aux3 = 23./24.
for l in np.ravel(lamb):
h = (umax - l)/(N-1)
u = np.linspace(l, umax, N)
R = np.sqrt((a**2 + u) + (b**2 + u) + (c**2 + u))
f = 1./((b**2 + u)*R)
bij = h*(aux1*f[0] + aux2*f[1] + aux3*f[2] + np.sum(f[3:N-3]) + aux3*f[-3] + aux2*f[-2] + aux1*f[-1])
B.append(bij)
B = np.array(B).reshape((lamb.shape[0], lamb.shape[1]))
return B
def Clambda_simp_ext3(a,b,c,lamb):
'''
Integral do potencial utilizando o metodo de simpson.
input:
a,b,c - semi-eixos do elipsoide
lamb -
output:
A - constante escalar
'''
C = []
umax = 1000000.0
N = 300000
aux1 = 3./8.
aux2 = 7./6.
aux3 = 23./24.
for l in np.ravel(lamb):
h = (umax - l)/(N-1)
u = np.linspace(l, umax, N)
R = np.sqrt((a**2 + u) + (b**2 + u) + (c**2 + u))
f = 1./((c**2 + u)*R)
cij = h*(aux1*f[0] + aux2*f[1] + aux3*f[2] + np.sum(f[3:N-3]) + aux3*f[-3] + aux2*f[-2] + aux1*f[-1])
C.append(cij)
C = np.array(C).reshape((lamb.shape[0], lamb.shape[1]))
return C
def Dlambda_simp_ext3(a, b, c, lamb):
'''
Integral do potencial utilizando o metodo de simpson.
input:
a,b,c - semi-eixos do elipsoide
lamb -
output:
D - constante escalar
'''
D = []
umax = 1000000.0
N = 300000
aux1 = 3./8.
aux2 = 7./6.
aux3 = 23./24.
for l in np.ravel(lamb):
h = (umax - l)/(N-1)
u = np.linspace(l, umax, N)
R = np.sqrt((a**2 + u) + (b**2 + u) + (c**2 + u))
f = 1./R
dij = h*(aux1*f[0] + aux2*f[1] + aux3*f[2] + np.sum(f[3:N-3]) + aux3*f[-3] + aux2*f[-2] + aux1*f[-1])
D.append(dij)
D = np.array(D).reshape((lamb.shape[0], lamb.shape[1]))
return D
def parametros_integrais(a,b,c,lamb):
'''
a: escalar - semi eixo maior
b: escalar - semi eixo intermediario
c: escalar - semi eixo menor
lamb - Maior raiz real da equacao cubica.
'''
k = np.zeros_like(lamb)
k1 = ((a**2-b**2)/(a**2-c**2))**0.5
k.fill(k1)
k2 = ((a**2-b**2)/(a**2-c**2))**0.5
teta_linha = np.arcsin(((a**2-c**2)/(a**2+lamb))**0.5)
teta_linha2 = np.arccos(c/a)
F = scipy.special.ellipkinc(teta_linha, k)
E = scipy.special.ellipeinc(teta_linha, k)
F2 = scipy.special.ellipkinc(teta_linha2, k2)
E2 = scipy.special.ellipeinc(teta_linha2, k2)
return F,E,F2,E2,k,teta_linha
def integrais_elipticas(a,b,c,k,teta_linha,F,E):
'''
a: escalar - semi eixo maior
b: escalar - semi eixo intermediario
c: escalar - semi eixo menor
k: matriz - parametro de geometria
teta_linha: matriz - parametro de geometria
F: matriz - integrais normais elipticas de primeiro tipo
E: matriz - integrais normais elipticas de segundo tipo
'''
A2 = (2/((a**2-b**2)*(a**2-c**2)**0.5))*(F-E)
B2 = ((2*(a**2-c**2)**0.5)/((a**2-b**2)*(b**2-c**2)))*(E-((b**2-c**2)/(a**2-c**2))*F-((k**2*np.sin(teta_linha)*np.cos(teta_linha))/(1-k**2*np.sin(teta_linha)*np.sin(teta_linha))**0.5))
C2 = (2/((b**2-c**2)*(a**2-c**2)**0.5))*(((np.sin(teta_linha)*((1-k**2*np.sin(teta_linha)*np.sin(teta_linha))**0.5))/np.cos(teta_linha))-E)
return A2,B2,C2
| 30.372605 | 216 | 0.583431 |
e1a8d307cf28e74c7cd2efb91e428fd65a4beecc | 852 | py | Python | app.py | aserhatdemir/digitalocean | 4ae2bfc2831b4fae15d9076b3b228c9a4bda44e7 | [
"MIT"
] | null | null | null | app.py | aserhatdemir/digitalocean | 4ae2bfc2831b4fae15d9076b3b228c9a4bda44e7 | [
"MIT"
] | null | null | null | app.py | aserhatdemir/digitalocean | 4ae2bfc2831b4fae15d9076b3b228c9a4bda44e7 | [
"MIT"
] | null | null | null | from do import DigitalOcean
import argparse
import json
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-token')
args = parser.parse_args()
# parse_input(args.file)
do_play(args.token)
| 23.027027 | 61 | 0.627934 |
e1a8f6b9d508e5ab80894a039b117dd4b1afc6ed | 343 | py | Python | convert_twitter_data.py | charlieyou/data-science-final-project | 7968261c3e44fe3544360a08fea271b611d105c1 | [
"Apache-2.0"
] | null | null | null | convert_twitter_data.py | charlieyou/data-science-final-project | 7968261c3e44fe3544360a08fea271b611d105c1 | [
"Apache-2.0"
] | null | null | null | convert_twitter_data.py | charlieyou/data-science-final-project | 7968261c3e44fe3544360a08fea271b611d105c1 | [
"Apache-2.0"
] | null | null | null | import cPickle as pickle
import pandas as pd
if __name__ == '__main__':
fnames = set(['clinton_tweets.json', 'trump_tweets.json'])
for fname in fnames:
df = pd.read_json('data/' + fname)
df = df.transpose()
df = df['text']
pickle.dump([(i, v) for i, v in zip(df.index, df.values)], open(fname, 'wb'))
| 28.583333 | 85 | 0.597668 |