max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
wayback/__init__.py
|
edsu/wayback
| 0
|
12781751
|
<gh_stars>0
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
from ._client import ( # noqa
CdxRecord,
memento_url_data,
WaybackClient,
WaybackSession)
| 1.242188
| 1
|
Plugins/Aspose.BarCode Java for Jython/asposebarcode/WorkingWithBarcode/AdvanceFeatures/PatchCode.py
|
aspose-barcode/Aspose.BarCode-for-Java
| 10
|
12781752
|
<filename>Plugins/Aspose.BarCode Java for Jython/asposebarcode/WorkingWithBarcode/AdvanceFeatures/PatchCode.py
from asposebarcode import Settings
from com.aspose.barcode import BarCodeBuilder
from com.aspose.barcode import Symbology
class PatchCode:
def __init__(self):
dataDir = Settings.dataDir + 'WorkingWithBarcode/AdvanceBarcodeFeatures/PatchCode/'
# Instantiate barcode object
builder = BarCodeBuilder()
# Set Symbology type
symbology = Symbology
builder.setSymbologyType(symbology.PatchCode)
# Set code text
builder.setCodeText("Patch T")
# Save the image to your system and set its image format to Jpeg
builder.save(dataDir + "PatchCode.jpg")
# Display Status
print "Generated PatchCode Successfully."
if __name__ == '__main__':
PatchCode()
| 2.625
| 3
|
scripts/user.py
|
GabrielTavernini/TelegramMap
| 3
|
12781753
|
class User:
def __init__(self, name, time, point, dist):
self.name = name
self.time = time
self.points = {point: dist}
| 2.796875
| 3
|
examples/deadline/All-In-AWS-Infrastructure-Basic/python/package/lib/security_tier.py
|
aws-painec/aws-rfdk
| 76
|
12781754
|
<reponame>aws-painec/aws-rfdk
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
from aws_cdk.core import (
Construct,
Stack,
)
from aws_rfdk import (
DistinguishedName,
X509CertificatePem
)
class SecurityTier(Stack):
"""
The security tier of the render farm.
This stack contains resources used to ensure the render farm is secure.
"""
def __init__(self, scope: Construct, stack_id: str, **kwargs):
"""
Initialize a new instance of ServiceTier
:param scope: The scope of this construct.
:param stack_id: The ID of this construct.
:param props: The properties for this construct.
:param kwargs: Any kwargs that need to be passed on to the parent class.
"""
super().__init__(scope, stack_id, **kwargs)
# Our self-signed root CA certificate for the internal endpoints in the farm.
self.root_ca = X509CertificatePem(
self,
'RootCA',
subject=DistinguishedName(
cn='SampleRootCA'
)
)
| 2.046875
| 2
|
sdk/python/pulumi_google_native/file/v1beta1/get_instance.py
|
AaronFriel/pulumi-google-native
| 44
|
12781755
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetInstanceResult',
'AwaitableGetInstanceResult',
'get_instance',
'get_instance_output',
]
@pulumi.output_type
class GetInstanceResult:
def __init__(__self__, create_time=None, description=None, etag=None, file_shares=None, kms_key_name=None, labels=None, name=None, networks=None, satisfies_pzs=None, state=None, status_message=None, suspension_reasons=None, tier=None):
if create_time and not isinstance(create_time, str):
raise TypeError("Expected argument 'create_time' to be a str")
pulumi.set(__self__, "create_time", create_time)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if file_shares and not isinstance(file_shares, list):
raise TypeError("Expected argument 'file_shares' to be a list")
pulumi.set(__self__, "file_shares", file_shares)
if kms_key_name and not isinstance(kms_key_name, str):
raise TypeError("Expected argument 'kms_key_name' to be a str")
pulumi.set(__self__, "kms_key_name", kms_key_name)
if labels and not isinstance(labels, dict):
raise TypeError("Expected argument 'labels' to be a dict")
pulumi.set(__self__, "labels", labels)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if networks and not isinstance(networks, list):
raise TypeError("Expected argument 'networks' to be a list")
pulumi.set(__self__, "networks", networks)
if satisfies_pzs and not isinstance(satisfies_pzs, bool):
raise TypeError("Expected argument 'satisfies_pzs' to be a bool")
pulumi.set(__self__, "satisfies_pzs", satisfies_pzs)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if status_message and not isinstance(status_message, str):
raise TypeError("Expected argument 'status_message' to be a str")
pulumi.set(__self__, "status_message", status_message)
if suspension_reasons and not isinstance(suspension_reasons, list):
raise TypeError("Expected argument 'suspension_reasons' to be a list")
pulumi.set(__self__, "suspension_reasons", suspension_reasons)
if tier and not isinstance(tier, str):
raise TypeError("Expected argument 'tier' to be a str")
pulumi.set(__self__, "tier", tier)
@property
@pulumi.getter(name="createTime")
def create_time(self) -> str:
"""
The time when the instance was created.
"""
return pulumi.get(self, "create_time")
@property
@pulumi.getter
def description(self) -> str:
"""
The description of the instance (2048 characters or less).
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def etag(self) -> str:
"""
Server-specified ETag for the instance resource to prevent simultaneous updates from overwriting each other.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="fileShares")
def file_shares(self) -> Sequence['outputs.FileShareConfigResponse']:
"""
File system shares on the instance. For this version, only a single file share is supported.
"""
return pulumi.get(self, "file_shares")
@property
@pulumi.getter(name="kmsKeyName")
def kms_key_name(self) -> str:
"""
KMS key name used for data encryption.
"""
return pulumi.get(self, "kms_key_name")
@property
@pulumi.getter
def labels(self) -> Mapping[str, str]:
"""
Resource labels to represent user provided metadata.
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter
def name(self) -> str:
"""
The resource name of the instance, in the format `projects/{project_id}/locations/{location_id}/instances/{instance_id}`.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def networks(self) -> Sequence['outputs.NetworkConfigResponse']:
"""
VPC networks to which the instance is connected. For this version, only a single network is supported.
"""
return pulumi.get(self, "networks")
@property
@pulumi.getter(name="satisfiesPzs")
def satisfies_pzs(self) -> bool:
"""
Reserved for future use.
"""
return pulumi.get(self, "satisfies_pzs")
@property
@pulumi.getter
def state(self) -> str:
"""
The instance state.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="statusMessage")
def status_message(self) -> str:
"""
Additional information about the instance state, if available.
"""
return pulumi.get(self, "status_message")
@property
@pulumi.getter(name="suspensionReasons")
def suspension_reasons(self) -> Sequence[str]:
"""
field indicates all the reasons the instance is in "SUSPENDED" state.
"""
return pulumi.get(self, "suspension_reasons")
@property
@pulumi.getter
def tier(self) -> str:
"""
The service tier of the instance.
"""
return pulumi.get(self, "tier")
class AwaitableGetInstanceResult(GetInstanceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetInstanceResult(
create_time=self.create_time,
description=self.description,
etag=self.etag,
file_shares=self.file_shares,
kms_key_name=self.kms_key_name,
labels=self.labels,
name=self.name,
networks=self.networks,
satisfies_pzs=self.satisfies_pzs,
state=self.state,
status_message=self.status_message,
suspension_reasons=self.suspension_reasons,
tier=self.tier)
def get_instance(instance_id: Optional[str] = None,
location: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetInstanceResult:
"""
Gets the details of a specific instance.
"""
__args__ = dict()
__args__['instanceId'] = instance_id
__args__['location'] = location
__args__['project'] = project
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:file/v1beta1:getInstance', __args__, opts=opts, typ=GetInstanceResult).value
return AwaitableGetInstanceResult(
create_time=__ret__.create_time,
description=__ret__.description,
etag=__ret__.etag,
file_shares=__ret__.file_shares,
kms_key_name=__ret__.kms_key_name,
labels=__ret__.labels,
name=__ret__.name,
networks=__ret__.networks,
satisfies_pzs=__ret__.satisfies_pzs,
state=__ret__.state,
status_message=__ret__.status_message,
suspension_reasons=__ret__.suspension_reasons,
tier=__ret__.tier)
@_utilities.lift_output_func(get_instance)
def get_instance_output(instance_id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetInstanceResult]:
"""
Gets the details of a specific instance.
"""
...
| 1.742188
| 2
|
examples/retirement_accumulation.py
|
nheske/stocker
| 7
|
12781756
|
#!/usr/bin/env python3
# Include the directory up in the path:
import sys
sys.path.insert(0,'..')
# Import stocker:
import stocker
# Main:
if __name__== "__main__":
# Let's save in a combination of stocks and bonds for retirement. Weight the
# initial portfolio towards 100% stocks for 15 years, weighted 70% US and
# 30% international. We will contribute 15000 annually with a 2% increase
# in that contribution annually. Assume the default inflation rate of 3.5%.
all_stocks_portfolio = stocker.Portfolio(
name="Stocks", \
value=0.0, \
positions=[stocker.US_Stocks(), stocker.International_Stocks()], \
weights=[7, 3]
)
all_stocks_phase = stocker.Scenario(
name="Initial Accumulation", \
portfolio=all_stocks_portfolio, \
num_years=15, \
annual_contribution=16000, \
annual_contribution_increase_perc=2.0
)
# The next phase of our retirement accumulation will start at 100% stocks
# but gradually transition to a 50/50 stocks/bonds portfolio by retirement.
# This phase consists of 20 years more accumulation, with an annual contribution
# of 20k, increasing 2% each year.
end_weights = [7, 3, 7, 3]
stocks_and_bonds_portfolio = stocker.Portfolio(
name="Stocks and Bonds", \
positions=[stocker.US_Stocks(), stocker.International_Stocks(), stocker.US_Bonds(), stocker.International_Bonds()], \
weights=[7, 3, 0, 0]
)
stocks_and_bonds_phase = stocker.Scenario(
name="Secondary Accumulation", \
portfolio=stocks_and_bonds_portfolio, \
num_years=15, \
annual_contribution=20000, \
annual_contribution_increase_perc=2.0, \
end_weights=end_weights
)
# Combine these two accumulation phases together using a piecewise scenario:
retirement_accumulation = stocker.Piecewise_Scenario("Retirement Accumulation", [all_stocks_phase, stocks_and_bonds_phase])
# Run the savings scenario once and print and plot the results:
retirement_accumulation.run()
print(retirement_accumulation.results())
retirement_accumulation.plot(smooth=False)
# Run a monte carlo simulation of this scenario with 400 iterations:
mc = stocker.Monte_Carlo(retirement_accumulation)
mc.run(n=400)
# Print the results of the monte carlo simulation, showing the probablility
# of hitting a 1M dollar accumulation goal:
print(mc.results(goal=1000000))
# Create the monte carlo plots:
mc.histogram()
mc.plot(smooth=True)
# Show all the stocker plots:
stocker.show_plots()
| 3.03125
| 3
|
module2-sql-for-analysis/insert_titanic.py
|
ameralhomdy/DS-Unit-3-Sprint-2-SQL-and-Databases
| 0
|
12781757
|
<filename>module2-sql-for-analysis/insert_titanic.py
import psycopg2
import pandas as pd
import sqlite3
import helper
from settings import secrets
def load_dataset(fpath, verbose=False):
df = pd.read_csv(fpath)
df['Name'] = df['Name'].str.replace("'", "")
if verbose:
print('Shape is: ', df.shape)
print('-'*80)
print('Pandas Data Frame')
print(df.head())
print('-'*80, '\n')
return df
def main():
# Load titanic data set to dataframe
print('Loading dataset...')
titanic = load_dataset('titanic.csv', verbose=True)
print('-'*80, '\n')
# Convert CSV to SQLite database
print('------------------( ETL ) CSV to SQLite ------------------')
print('Converting...')
db_file = 'titanic.sqlite3'
sql_conn = helper.create_connection(db_file)
titanic.to_sql('titanic', sql_conn, if_exists='replace',
index_label='id')
print('Done')
# Print rows from the SQlite database titanic table
print('------------------ TITANIC DATAFRAME 10 ROWS ------------------')
query = "SELECT * FROM titanic"
passengers = helper.select_all_query(db_file, query)
df = pd.read_sql(query, sql_conn)
print(df.head())
print('-'*80, '\n')
# Print data type of SQLite titanic table
print('------------------ DATA TYPE OF TITANIC TABLE ------------------')
query = 'PRAGMA table_info(titanic);'
df = pd.read_sql(query, sql_conn)
print(df)
print('-'*80, '\n')
# Connect to Postgre SQL database in Elephant SQl Server
dbname = secrets.get('dbname')
user = secrets.get('user')
password = <PASSWORD>('password')
host = secrets.get('host')
print('------------------( ETL ) SQLite to PostgreSQL ------------------')
print(f'Connecting to {dbname}')
print(f'Host is: {host}')
try:
pg_conn = psycopg2.connect(dbname=dbname, user=user,
password=password, host=host)
except:
print("I am unable to connect to the database")
# Create Cursor
pg_curs = pg_conn.cursor()
# Drop table if exists
pg_curs.execute("DROP TABLE IF EXISTS titanic")
pg_conn.commit()
# Create titanic table in PostgreSQL
create_titanic_table = """
CREATE TABLE titanic (
passenger_id SERIAL PRIMARY KEY,
survived INT,
pclass INT,
name VARCHAR(200),
sex VARCHAR(20),
age REAL,
siblings_spouses INT,
parents_children INT,
fare REAL
)
"""
pg_curs.execute(create_titanic_table)
pg_conn.commit()
for passenger in passengers:
insert_passenger = """INSERT INTO titanic
(passenger_id, survived, pclass, name, sex, age, siblings_spouses, parents_children, fare)
VALUES """ + str(passenger) + ';'
pg_curs.execute(insert_passenger)
# print(insert_passenger)
pg_conn.commit()
# Print rows from the Postgres titanic table
print('------------------ TITANIC DATAFRAME 10 ROWS ------------------')
query = "SELECT * FROM titanic LIMIT 10;"
passengers = helper.select_all_query(db_file, query)
df = pd.read_sql(query, pg_conn)
print(df.head())
print('-'*80, '\n')
if __name__ == "__main__":
main()
| 3.375
| 3
|
zktx/storage.py
|
wenbobuaa/pykit
| 0
|
12781758
|
#!/usr/bin/env python
# coding: utf-8
import logging
from pykit import rangeset
from pykit import txutil
from .accessor import KeyValue
from .accessor import Value
from .status import COMMITTED
from .status import PURGED
from .status import STATUS
logger = logging.getLogger(__name__)
class StorageHelper(object):
max_value_history = 16 # keeps the last n modifications in a record
max_journal_history = 1024 # keeps the last n committed journal
conflicterror = None
def apply_record(self, txid, key, value):
# the data in underlying storage is multi-version record:
# [
# [<txid>, <value>]
# [<txid>, <value>]
# ...
# ]
for curr in txutil.cas_loop(self.record.get,
self.record.set_or_create,
args=(key, ),
conflicterror=self.conflicterror):
max_txid = curr.v[-1][0]
if max_txid >= txid:
return False
curr.v.append((txid, value))
while len(curr.v) > self.max_value_history:
curr.v.pop(0)
return True
def add_to_txidset(self, status, txid):
if status not in STATUS:
raise KeyError('invalid status: ' + repr(status))
logger.info('add {status}:{txid}'
' to txidset'.format(
status=status, txid=txid))
for curr in txutil.cas_loop(self.txidset.get,
self.txidset.set,
conflicterror=self.conflicterror):
for st in STATUS:
if st not in curr.v:
curr.v[st] = rangeset.RangeSet([])
curr.v[status].add([txid, txid + 1])
self.purge(curr.v)
def purge(self, sets):
topurge = rangeset.RangeSet()
committed = sets[COMMITTED]
l = committed.length()
while l > self.max_journal_history:
first = committed[0]
# a range contains a single txid
r = rangeset.RangeSet([[first[0], first[0] + 1]])
topurge.add(r[0])
committed = rangeset.substract(committed, r)
l -= 1
for rng in topurge:
for txid in range(rng[0], rng[1]):
self.journal.safe_delete(txid)
sets[PURGED] = rangeset.union(sets[PURGED], topurge)
sets[COMMITTED] = rangeset.substract(sets[COMMITTED], topurge)
class Storage(StorageHelper):
record = KeyValue()
journal = KeyValue()
txidset = Value()
def acquire_key_loop(self, txid, key): raise TypeError('unimplemented')
def try_release_key(self, txid, key): raise TypeError('unimplemented')
| 2.328125
| 2
|
tests/avalon_test_framework/tests/work_order_tests/test_submit_getresult.py
|
jinengandhi-intel/avalon
| 0
|
12781759
|
<gh_stars>0
import pytest
import logging
from src.libs.avalon_test_base import AvalonBase
from src.libs.verification_libs \
import verify_test, check_negative_test_responses,\
validate_response_code
from src.libs.pre_processing_libs \
import ResultStatus
from conftest import env
logger = logging.getLogger(__name__)
@pytest.mark.usefixtures("setup_teardown")
class TestClass():
test_obj = AvalonBase()
pytestmark = pytest.mark.setup_teardown_data(
test_obj, "WorkOrderGetResult")
@pytest.mark.listener
@pytest.mark.sdk
@pytest.mark.proxy
def test_workordergetresult_success(self):
result_response = self.test_obj.run_test(
env['work_order_getresult_input_file'])
assert (
verify_test(
result_response, 0,
self.test_obj.setup_output['pre_test_output'],
self.test_obj.setup_output['pre_test_workorder_output'])
is ResultStatus.SUCCESS.value)
@pytest.mark.sdk
@pytest.mark.proxy
@pytest.mark.listener
def test_workordergetresult_workorderid_different(self):
result_response = self.test_obj.run_test(
env['work_order_getresult_input_file'])
assert (
check_negative_test_responses(
result_response,
"Invalid work order Id")
is ResultStatus.SUCCESS.value)
@pytest.mark.sdk
@pytest.mark.proxy
@pytest.mark.listener
def test_workordergetresult_workorderid_specialchar(self):
result_response = self.test_obj.run_test(
env['work_order_getresult_input_file'])
assert (
check_negative_test_responses(
result_response,
"Invalid work order Id")
is ResultStatus.SUCCESS.value)
@pytest.mark.sdk
@pytest.mark.proxy
@pytest.mark.listener
def test_workordergetresult_workorderid_null(self):
result_response = self.test_obj.run_test(
env['work_order_getresult_input_file'])
assert (
check_negative_test_responses(
result_response,
"Invalid work order Id")
is ResultStatus.SUCCESS.value)
@pytest.mark.sdk
@pytest.mark.proxy
@pytest.mark.listener
def test_workordergetresult_workorderid_nonhexstring(self):
result_response = self.test_obj.run_test(
env['work_order_getresult_input_file'])
assert (
check_negative_test_responses(
result_response,
"Work order Id not found in the database. "
"Hence invalid parameter")
is ResultStatus.SUCCESS.value)
@pytest.mark.sdk
@pytest.mark.proxy
@pytest.mark.listener
def test_workordergetresult_workorderid_alphabetsonly(self):
result_response = self.test_obj.run_test(
env['work_order_getresult_input_file'])
assert (
check_negative_test_responses(
result_response,
"Invalid work order Id")
is ResultStatus.SUCCESS.value)
@pytest.mark.sdk
@pytest.mark.proxy
@pytest.mark.listener
def test_workordergetresult_workorderid_withoutquotes(self):
result_response = self.test_obj.run_test(
env['work_order_getresult_input_file'])
assert (
check_negative_test_responses(
result_response,
"Invalid work order Id")
is ResultStatus.SUCCESS.value)
@pytest.mark.sdk
@pytest.mark.proxy
@pytest.mark.listener
def test_workordergetresult_emptyparameter(self):
result_response = self.test_obj.run_test(
env['work_order_getresult_input_file'])
assert (validate_response_code(result_response, 2)
is ResultStatus.SUCCESS.value)
@pytest.mark.sdk
@pytest.mark.proxy
@pytest.mark.listener
def test_workordergetresult_unknownparameter(self):
result_response = self.test_obj.run_test(
env['work_order_getresult_input_file'])
assert (validate_response_code(result_response, 2)
is ResultStatus.SUCCESS.value)
@pytest.mark.listener
def test_workordergetresult_workorderId_empty(self):
result_response = self.test_obj.run_test(
env['work_order_getresult_input_file'],
direct_avalon_listener=True)
assert (
check_negative_test_responses(
result_response,
"Invalid work order Id")
is ResultStatus.SUCCESS.value)
| 2.1875
| 2
|
ioutracker/dataloaders/MOTDataLoader.py
|
jiankaiwang/ioutracker
| 3
|
12781760
|
# -*- coding: utf-8 -*-
"""
@author: jiankaiwang
@version: 0.0.1
@date: 2020/03
@desc: The script implements the data loader of the MOT challenge.
@note:
Style: pylint_2015
@reference:
"""
import os
import logging
import pandas as pd
import requests
import tqdm
import zipfile
import argparse
# In[]
MOT_ID_LABEL = {1: "Pedestrian", 7: "Static_Person"}
MOT_LABEL_ID = {"Pedestrian": 1, "Static_Person": 7}
# In[]:
def formatBBoxAndVis(dataframe, is_dict=False):
"""formatBBoxAndVis keeps the bbox information and its visibility per frames.
Args:
dataframe: the pandas data frame
is_dict: using the frame id as the key in the dictionary
Returns:
frameBBoxes: a list conserves person detection results that each one is a
list in which contains [x1, y1, width, height, visible],
visible also represents the probability or confident score of
the object
"""
frameBBoxes = []
fids = list(dataframe["fid"].unique())
for fid in fids:
tmp = dataframe[dataframe["fid"] == fid]
frameBBoxes.append(tmp[["bX", "bY", "bW", "bH", "visible"]].values.tolist())
if is_dict:
return dict(zip(fids, frameBBoxes))
return frameBBoxes
# In[]
def formatForMetrics(dataframe, is_dict=False):
"""formatForMetrics keeps the bbox information, its visibility and uid per frames.
Args:
dataframe: the pandas data frame
is_dict: using the frame id as the key in the dictionary
Returns:
frameBBoxes: a list conserves person detection results that each one is a
list in which contains [x1, y1, width, height, visible, uid],
visible also represents the probability or confident score of
the object
"""
frameBBoxes = []
fids = list(dataframe["fid"].unique())
for fid in fids:
tmp = dataframe[dataframe["fid"] == fid]
frameBBoxes.append(tmp[["bX", "bY", "bW", "bH", "visible", "uid"]].values.tolist())
if is_dict:
return dict(zip(fids, frameBBoxes))
return frameBBoxes
# In[]
def maybeDownload(name="mot17det", src=None, target=os.path.join("/","tmp"),
uncompressed=True):
"""maybeDownload: Maybe download the MOT17Det dataset from the official datasets
to the local.
Args:
name (primary): the dataset name
src: the source URL, select one of the name and src
target: the local directory
uncompressed: whether to compress the downloaded file
Return:
status: 0 (success) or Exception (failed)
"""
assert os.path.exists(target), "No such folder exists."
if name or (not src):
availableURL = {"mot17det":
["https://motchallenge.net/data/MOT17DetLabels.zip",
"https://motchallenge.net/data/MOT17Det.zip"]}
if name not in list(availableURL.keys()):
raise ValueError("Available datasets: {}".format(list(availableURL.keys())))
src = availableURL["mot17det"]
logging.info("Download source: {}".format(src))
if type(src) == str: src = [src]
for urlIdx in tqdm.trange(len(src)):
url = src[urlIdx]
fname = os.path.basename(url)
folderName, fileType = fname.split('.')
# the compressed file path
filePath = os.path.join(target, fname)
# download the compressed first
if os.path.exists(filePath):
logging.warning("{} existed.".format(filePath))
else:
logging.warning("Downloading {} ...".format(url))
# change to wget tool on the shell
res = requests.get(url, allow_redirects=True)
if res.status_code != 200:
logging.error("Download {} failed.".format(url))
continue
with open(filePath, "wb") as fout:
fout.write(res.content)
# uncompress the file
if uncompressed:
uncompPath = os.path.join(target, folderName)
assert not os.path.exists(uncompPath), \
"The folder {} exists. Please delete it first.".format(uncompPath)
try:
os.mkdir(uncompPath)
logging.warning("Created a folder {}.".format(uncompPath))
except Exception as e:
raise Exception("Can't create the folder {}. ({})".format(uncompPath, e))
allowedCompressedType = ["zip"]
if fileType not in allowedCompressedType:
raise ValueError("Available compressed type: {}".format(allowedCompressedType))
if fileType == "zip":
with zipfile.ZipFile(filePath, 'r') as fin:
fin.extractall(uncompPath)
logging.warning("Compressed to folder {}.".format(uncompPath))
return 0
# In[]:
def loadLabel(src, is_path=True, load_Pedestrian=True, load_Static_Person=True,
visible_thresholde=0, format_style="onlybbox"):
"""LoadLabel: Load a label file in the csv format.
Args:
src: the MOT label file path (available when is_path is True)
is_path: True or False for whether the src is the file path or not
load_Pedestrian: whether to load the pedestrian data or not
load_Static_Person: whether to load the statuc person data or not
visible_thresholde: the threshold for filtering the invisible person data
format_style: provides different styles in the lists,
"onlybbox" (func: formatBBoxAndVis), "onlybbox_dict" (func: formatBBoxAndVis),
"metrics" (func: formatForMetrics), "metrics_dict" (func: formatForMetrics)
Returns:
objects_in_frames: a list contains the person detection information per frames
"""
df = src
if is_path:
df = pd.read_csv(src, header=None)
df.columns = ["fid", "uid", "bX", "bY", "bW", "bH", "conf", "class", "visible"]
df_persons = df[((df["class"] == MOT_LABEL_ID["Pedestrian"]) & load_Pedestrian) | \
((df["class"] == MOT_LABEL_ID["Static_Person"]) & load_Static_Person)]
if visible_thresholde:
df_persons = df_persons[df_persons["visible"] >= visible_thresholde]
if format_style[:8] == "onlybbox":
if format_style[-4:] == "dict":
return formatBBoxAndVis(df_persons, is_dict=True), df_persons
else:
# format_style == "onlybbox"
return formatBBoxAndVis(df_persons), df_persons
elif format_style[:7] == "metrics":
if format_style[-4:] == "dict":
return formatForMetrics(df_persons, is_dict=True), df_persons
else:
# format_style == "onlybbox"
return formatForMetrics(df_persons), df_persons
# In[]
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# parsing args for maybeDownload
parser = argparse.ArgumentParser()
parser.add_argument("--name", type=str, default="mot17det")
parser.add_argument("--src", type=str, default=None)
parser.add_argument("--target", type=str, default="/tmp")
parser.add_argument("--uncompressed", type=int, default=1)
args = parser.parse_args()
maybeDownload(name=args.name)
| 2.6875
| 3
|
main.py
|
Genzo4/knd-1190803
| 0
|
12781761
|
<reponame>Genzo4/knd-1190803
import wx
from yoyo import read_migrations
from yoyo import get_backend
import options
from logzero import logger
class MainFrame(wx.Frame):
def __init__(self, *args, **kwds):
kwds["style"] = kwds.get("style", 0) | wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.SetSize((749, 476))
self.SetTitle("frame")
self.panel_1 = wx.Panel(self, wx.ID_ANY)
sizer_1 = wx.BoxSizer(wx.VERTICAL)
sizer_2 = wx.BoxSizer(wx.HORIZONTAL)
sizer_1.Add(sizer_2, 0, wx.EXPAND, 0)
label_org = wx.StaticText(self.panel_1, wx.ID_ANY, u"Организация: ", style=wx.ALIGN_RIGHT)
label_org.SetFont(wx.Font(14, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, 0, ""))
sizer_2.Add(label_org, 0, 0, 0)
self.sp_org = wx.ComboBox(self.panel_1, wx.ID_ANY, choices=[], style=wx.CB_DROPDOWN)
sizer_2.Add(self.sp_org, 0, 0, 0)
self.b_add_org = wx.Button(self.panel_1, wx.ID_ANY, "button_1")
sizer_2.Add(self.b_add_org, 0, 0, 0)
self.b_del_org = wx.Button(self.panel_1, wx.ID_ANY, "button_2")
sizer_2.Add(self.b_del_org, 0, 0, 0)
sizer_1.Add((0, 0), 0, 0, 0)
self.panel_1.SetSizer(sizer_1)
self.Layout()
self._apply_migrations()
@staticmethod
def _apply_migrations():
logger.info("Start apply migrations")
backend = get_backend(options.DB_PATH)
migrations = read_migrations(options.MIGRATIONS_PATH)
with backend.lock():
backend.apply_migrations(backend.to_apply(migrations))
logger.info("Migrations apply")
class MyApp(wx.App):
def OnInit(self):
self.frame = MainFrame(None, wx.ID_ANY, "")
self.SetTopWindow(self.frame)
self.frame.Show()
return True
if __name__ == "__main__":
app = MyApp(0)
app.MainLoop()
| 1.992188
| 2
|
face_pose_dataset/third_party/fsa_estimator/SSRNET_model.py
|
samuelbaltanas/face-pose-dataset
| 1
|
12781762
|
import logging
import sys
import numpy as np
from tensorflow.keras import backend as K
from tensorflow.keras import layers
from tensorflow.keras.layers import (
AveragePooling2D,
BatchNormalization,
Conv2D,
MaxPooling2D,
SeparableConv2D,
)
from tensorflow.keras.models import Model
sys.setrecursionlimit(2 ** 20)
np.random.seed(2 ** 10)
class SSR_net:
def __init__(self, image_size, stage_num, lambda_local, lambda_d):
if K.image_dim_ordering() == "th":
logging.debug("image_dim_ordering = 'th'")
self._channel_axis = 1
self._input_shape = (3, image_size, image_size)
else:
logging.debug("image_dim_ordering = 'tf'")
self._channel_axis = -1
self._input_shape = (image_size, image_size, 3)
self.stage_num = stage_num
self.lambda_local = lambda_local
self.lambda_d = lambda_d
def __call__(self):
logging.debug("Creating model...")
inputs = layers.Input(shape=self._input_shape)
# -------------------------------------------------------------------------------------------------------------------------
x = Conv2D(32, (3, 3))(inputs)
x = BatchNormalization(axis=self._channel_axis)(x)
x = layers.Activation("relu")(x)
x_layer1 = AveragePooling2D(2, 2)(x)
x = Conv2D(32, (3, 3))(x_layer1)
x = BatchNormalization(axis=self._channel_axis)(x)
x = layers.Activation("relu")(x)
x_layer2 = AveragePooling2D(2, 2)(x)
x = Conv2D(32, (3, 3))(x_layer2)
x = BatchNormalization(axis=self._channel_axis)(x)
x = layers.Activation("relu")(x)
x_layer3 = AveragePooling2D(2, 2)(x)
x = Conv2D(32, (3, 3))(x_layer3)
x = BatchNormalization(axis=self._channel_axis)(x)
x = layers.Activation("relu")(x)
# -------------------------------------------------------------------------------------------------------------------------
s = Conv2D(16, (3, 3))(inputs)
s = BatchNormalization(axis=self._channel_axis)(s)
s = layers.Activation("tanh")(s)
s_layer1 = MaxPooling2D(2, 2)(s)
s = Conv2D(16, (3, 3))(s_layer1)
s = BatchNormalization(axis=self._channel_axis)(s)
s = layers.Activation("tanh")(s)
s_layer2 = MaxPooling2D(2, 2)(s)
s = Conv2D(16, (3, 3))(s_layer2)
s = BatchNormalization(axis=self._channel_axis)(s)
s = layers.Activation("tanh")(s)
s_layer3 = MaxPooling2D(2, 2)(s)
s = Conv2D(16, (3, 3))(s_layer3)
s = BatchNormalization(axis=self._channel_axis)(s)
s = layers.Activation("tanh")(s)
# -------------------------------------------------------------------------------------------------------------------------
# Classifier block
s_layer4 = Conv2D(10, (1, 1), activation="relu")(s)
s_layer4 = layers.Flatten()(s_layer4)
s_layer4_mix = layers.Dropout(0.2)(s_layer4)
s_layer4_mix = layers.Dense(units=self.stage_num[0], activation="relu")(
s_layer4_mix
)
x_layer4 = Conv2D(10, (1, 1), activation="relu")(x)
x_layer4 = layers.Flatten()(x_layer4)
x_layer4_mix = layers.Dropout(0.2)(x_layer4)
x_layer4_mix = layers.Dense(units=self.stage_num[0], activation="relu")(
x_layer4_mix
)
feat_a_s1_pre = layers.Multiply()([s_layer4, x_layer4])
delta_s1 = layers.Dense(1, activation="tanh", name="delta_s1")(feat_a_s1_pre)
feat_a_s1 = layers.Multiply()([s_layer4_mix, x_layer4_mix])
feat_a_s1 = layers.Dense(2 * self.stage_num[0], activation="relu")(feat_a_s1)
pred_a_s1 = layers.Dense(
units=self.stage_num[0], activation="relu", name="pred_age_stage1"
)(feat_a_s1)
# feat_local_s1 = layers.Lambda(lambda x: x/10)(feat_a_s1)
# feat_a_s1_local = Dropout(0.2)(pred_a_s1)
local_s1 = layers.Dense(
units=self.stage_num[0], activation="tanh", name="local_delta_stage1",
)(feat_a_s1)
# -------------------------------------------------------------------------------------------------------------------------
s_layer2 = Conv2D(10, (1, 1), activation="relu")(s_layer2)
s_layer2 = MaxPooling2D(4, 4)(s_layer2)
s_layer2 = layers.Flatten()(s_layer2)
s_layer2_mix = layers.Dropout(0.2)(s_layer2)
s_layer2_mix = layers.Dense(self.stage_num[1], activation="relu")(s_layer2_mix)
x_layer2 = Conv2D(10, (1, 1), activation="relu")(x_layer2)
x_layer2 = AveragePooling2D(4, 4)(x_layer2)
x_layer2 = layers.Flatten()(x_layer2)
x_layer2_mix = layers.Dropout(0.2)(x_layer2)
x_layer2_mix = layers.Dense(self.stage_num[1], activation="relu")(x_layer2_mix)
feat_a_s2_pre = layers.Multiply()([s_layer2, x_layer2])
delta_s2 = layers.Dense(1, activation="tanh", name="delta_s2")(feat_a_s2_pre)
feat_a_s2 = layers.Multiply()([s_layer2_mix, x_layer2_mix])
feat_a_s2 = layers.Dense(2 * self.stage_num[1], activation="relu")(feat_a_s2)
pred_a_s2 = layers.Dense(
units=self.stage_num[1], activation="relu", name="pred_age_stage2"
)(feat_a_s2)
# feat_local_s2 = layers.Lambda(lambda x: x/10)(feat_a_s2)
# feat_a_s2_local = Dropout(0.2)(pred_a_s2)
local_s2 = layers.Dense(
units=self.stage_num[1], activation="tanh", name="local_delta_stage2",
)(feat_a_s2)
# -------------------------------------------------------------------------------------------------------------------------
s_layer1 = Conv2D(10, (1, 1), activation="relu")(s_layer1)
s_layer1 = MaxPooling2D(8, 8)(s_layer1)
s_layer1 = layers.Flatten()(s_layer1)
s_layer1_mix = layers.Dropout(0.2)(s_layer1)
s_layer1_mix = layers.Dense(self.stage_num[2], activation="relu")(s_layer1_mix)
x_layer1 = Conv2D(10, (1, 1), activation="relu")(x_layer1)
x_layer1 = AveragePooling2D(8, 8)(x_layer1)
x_layer1 = layers.Flatten()(x_layer1)
x_layer1_mix = layers.Dropout(0.2)(x_layer1)
x_layer1_mix = layers.Dense(self.stage_num[2], activation="relu")(x_layer1_mix)
feat_a_s3_pre = layers.Multiply()([s_layer1, x_layer1])
delta_s3 = layers.Dense(1, activation="tanh", name="delta_s3")(feat_a_s3_pre)
feat_a_s3 = layers.Multiply()([s_layer1_mix, x_layer1_mix])
feat_a_s3 = layers.Dense(2 * self.stage_num[2], activation="relu")(feat_a_s3)
pred_a_s3 = layers.Dense(
units=self.stage_num[2], activation="relu", name="pred_age_stage3"
)(feat_a_s3)
# feat_local_s3 = layers.Lambda(lambda x: x/10)(feat_a_s3)
# feat_a_s3_local = Dropout(0.2)(pred_a_s3)
local_s3 = layers.Dense(
units=self.stage_num[2], activation="tanh", name="local_delta_stage3",
)(feat_a_s3)
# -------------------------------------------------------------------------------------------------------------------------
def merge_age(x, s1, s2, s3, lambda_local, lambda_d):
a = x[0][:, 0] * 0
b = x[0][:, 0] * 0
c = x[0][:, 0] * 0
# A = s1 * s2 * s3
V = 101
for i in range(0, s1):
a = a + (i + lambda_local * x[6][:, i]) * x[0][:, i]
a = K.expand_dims(a, -1)
a = a / (s1 * (1 + lambda_d * x[3]))
for j in range(0, s2):
b = b + (j + lambda_local * x[7][:, j]) * x[1][:, j]
b = K.expand_dims(b, -1)
b = b / (s1 * (1 + lambda_d * x[3])) / (s2 * (1 + lambda_d * x[4]))
for k in range(0, s3):
c = c + (k + lambda_local * x[8][:, k]) * x[2][:, k]
c = K.expand_dims(c, -1)
c = (
c
/ (s1 * (1 + lambda_d * x[3]))
/ (s2 * (1 + lambda_d * x[4]))
/ (s3 * (1 + lambda_d * x[5]))
)
age = (a + b + c) * V
return age
pred_a = layers.Lambda(
merge_age,
arguments={
"s1": self.stage_num[0],
"s2": self.stage_num[1],
"s3": self.stage_num[2],
"lambda_local": self.lambda_local,
"lambda_d": self.lambda_d,
},
name="pred_a",
)(
[
pred_a_s1,
pred_a_s2,
pred_a_s3,
delta_s1,
delta_s2,
delta_s3,
local_s1,
local_s2,
local_s3,
]
)
model = Model(inputs=inputs, outputs=pred_a)
return model
class SSR_net_general:
def __init__(self, image_size, stage_num, lambda_local, lambda_d):
if K.image_dim_ordering() == "th":
logging.debug("image_dim_ordering = 'th'")
self._channel_axis = 1
self._input_shape = (3, image_size, image_size)
else:
logging.debug("image_dim_ordering = 'tf'")
self._channel_axis = -1
self._input_shape = (image_size, image_size, 3)
self.stage_num = stage_num
self.lambda_local = lambda_local
self.lambda_d = lambda_d
def __call__(self):
logging.debug("Creating model...")
inputs = layers.Input(shape=self._input_shape)
# -------------------------------------------------------------------------------------------------------------------------
x = Conv2D(32, (3, 3))(inputs)
x = BatchNormalization(axis=self._channel_axis)(x)
x = layers.Activation("relu")(x)
x_layer1 = AveragePooling2D(2, 2)(x)
x = Conv2D(32, (3, 3))(x_layer1)
x = BatchNormalization(axis=self._channel_axis)(x)
x = layers.Activation("relu")(x)
x_layer2 = AveragePooling2D(2, 2)(x)
x = Conv2D(32, (3, 3))(x_layer2)
x = BatchNormalization(axis=self._channel_axis)(x)
x = layers.Activation("relu")(x)
x_layer3 = AveragePooling2D(2, 2)(x)
x = Conv2D(32, (3, 3))(x_layer3)
x = BatchNormalization(axis=self._channel_axis)(x)
x = layers.Activation("relu")(x)
# -------------------------------------------------------------------------------------------------------------------------
s = Conv2D(16, (3, 3))(inputs)
s = BatchNormalization(axis=self._channel_axis)(s)
s = layers.Activation("tanh")(s)
s_layer1 = MaxPooling2D(2, 2)(s)
s = Conv2D(16, (3, 3))(s_layer1)
s = BatchNormalization(axis=self._channel_axis)(s)
s = layers.Activation("tanh")(s)
s_layer2 = MaxPooling2D(2, 2)(s)
s = Conv2D(16, (3, 3))(s_layer2)
s = BatchNormalization(axis=self._channel_axis)(s)
s = layers.Activation("tanh")(s)
s_layer3 = MaxPooling2D(2, 2)(s)
s = Conv2D(16, (3, 3))(s_layer3)
s = BatchNormalization(axis=self._channel_axis)(s)
s = layers.Activation("tanh")(s)
# -------------------------------------------------------------------------------------------------------------------------
# Classifier block
s_layer4 = Conv2D(10, (1, 1), activation="relu")(s)
s_layer4 = layers.Flatten()(s_layer4)
s_layer4_mix = layers.Dropout(0.2)(s_layer4)
s_layer4_mix = layers.Dense(units=self.stage_num[0], activation="relu")(
s_layer4_mix
)
x_layer4 = Conv2D(10, (1, 1), activation="relu")(x)
x_layer4 = layers.Flatten()(x_layer4)
x_layer4_mix = layers.Dropout(0.2)(x_layer4)
x_layer4_mix = layers.Dense(units=self.stage_num[0], activation="relu")(
x_layer4_mix
)
feat_s1_pre = layers.Multiply()([s_layer4, x_layer4])
delta_s1 = layers.Dense(1, activation="tanh", name="delta_s1")(feat_s1_pre)
feat_s1 = layers.Multiply()([s_layer4_mix, x_layer4_mix])
feat_s1 = layers.Dense(2 * self.stage_num[0], activation="relu")(feat_s1)
pred_s1 = layers.Dense(
units=self.stage_num[0], activation="relu", name="pred_stage1"
)(feat_s1)
local_s1 = layers.Dense(
units=self.stage_num[0], activation="tanh", name="local_delta_stage1",
)(feat_s1)
# -------------------------------------------------------------------------------------------------------------------------
s_layer2 = Conv2D(10, (1, 1), activation="relu")(s_layer2)
s_layer2 = MaxPooling2D(4, 4)(s_layer2)
s_layer2 = layers.Flatten()(s_layer2)
s_layer2_mix = layers.Dropout(0.2)(s_layer2)
s_layer2_mix = layers.Dense(self.stage_num[1], activation="relu")(s_layer2_mix)
x_layer2 = Conv2D(10, (1, 1), activation="relu")(x_layer2)
x_layer2 = AveragePooling2D(4, 4)(x_layer2)
x_layer2 = layers.Flatten()(x_layer2)
x_layer2_mix = layers.Dropout(0.2)(x_layer2)
x_layer2_mix = layers.Dense(self.stage_num[1], activation="relu")(x_layer2_mix)
feat_s2_pre = layers.Multiply()([s_layer2, x_layer2])
delta_s2 = layers.Dense(1, activation="tanh", name="delta_s2")(feat_s2_pre)
feat_s2 = layers.Multiply()([s_layer2_mix, x_layer2_mix])
feat_s2 = layers.Dense(2 * self.stage_num[1], activation="relu")(feat_s2)
pred_s2 = layers.Dense(
units=self.stage_num[1], activation="relu", name="pred_stage2"
)(feat_s2)
local_s2 = layers.Dense(
units=self.stage_num[1], activation="tanh", name="local_delta_stage2",
)(feat_s2)
# -------------------------------------------------------------------------------------------------------------------------
s_layer1 = Conv2D(10, (1, 1), activation="relu")(s_layer1)
s_layer1 = MaxPooling2D(8, 8)(s_layer1)
s_layer1 = layers.Flatten()(s_layer1)
s_layer1_mix = layers.Dropout(0.2)(s_layer1)
s_layer1_mix = layers.Dense(self.stage_num[2], activation="relu")(s_layer1_mix)
x_layer1 = Conv2D(10, (1, 1), activation="relu")(x_layer1)
x_layer1 = AveragePooling2D(8, 8)(x_layer1)
x_layer1 = layers.Flatten()(x_layer1)
x_layer1_mix = layers.Dropout(0.2)(x_layer1)
x_layer1_mix = layers.Dense(self.stage_num[2], activation="relu")(x_layer1_mix)
feat_s3_pre = layers.Multiply()([s_layer1, x_layer1])
delta_s3 = layers.Dense(1, activation="tanh", name="delta_s3")(feat_s3_pre)
feat_s3 = layers.Multiply()([s_layer1_mix, x_layer1_mix])
feat_s3 = layers.Dense(2 * self.stage_num[2], activation="relu")(feat_s3)
pred_s3 = layers.Dense(
units=self.stage_num[2], activation="relu", name="pred_stage3"
)(feat_s3)
local_s3 = layers.Dense(
units=self.stage_num[2], activation="tanh", name="local_delta_stage3",
)(feat_s3)
# -------------------------------------------------------------------------------------------------------------------------
def SSR_module(x, s1, s2, s3, lambda_local, lambda_d):
a = x[0][:, 0] * 0
b = x[0][:, 0] * 0
c = x[0][:, 0] * 0
V = 1
for i in range(0, s1):
a = a + (i + lambda_local * x[6][:, i]) * x[0][:, i]
a = K.expand_dims(a, -1)
a = a / (s1 * (1 + lambda_d * x[3]))
for j in range(0, s2):
b = b + (j + lambda_local * x[7][:, j]) * x[1][:, j]
b = K.expand_dims(b, -1)
b = b / (s1 * (1 + lambda_d * x[3])) / (s2 * (1 + lambda_d * x[4]))
for k in range(0, s3):
c = c + (k + lambda_local * x[8][:, k]) * x[2][:, k]
c = K.expand_dims(c, -1)
c = (
c
/ (s1 * (1 + lambda_d * x[3]))
/ (s2 * (1 + lambda_d * x[4]))
/ (s3 * (1 + lambda_d * x[5]))
)
out = (a + b + c) * V
return out
pred = layers.Lambda(
SSR_module,
arguments={
"s1": self.stage_num[0],
"s2": self.stage_num[1],
"s3": self.stage_num[2],
"lambda_local": self.lambda_local,
"lambda_d": self.lambda_d,
},
name="pred",
)(
[
pred_s1,
pred_s2,
pred_s3,
delta_s1,
delta_s2,
delta_s3,
local_s1,
local_s2,
local_s3,
]
)
model = Model(inputs=inputs, outputs=pred)
return model
class SSR_net_MT:
def __init__(self, image_size, num_classes, stage_num, lambda_d):
if K.image_dim_ordering() == "th":
logging.debug("image_dim_ordering = 'th'")
self._channel_axis = 1
self._input_shape = (3, image_size, image_size)
else:
logging.debug("image_dim_ordering = 'tf'")
self._channel_axis = -1
self._input_shape = (image_size, image_size, 3)
self.num_classes = num_classes
self.stage_num = stage_num
self.lambda_d = lambda_d
def __call__(self):
logging.debug("Creating model...")
img_inputs = layers.Input(self._input_shape)
# -------------------------------------------------------------------------------------------------------------------------
x = SeparableConv2D(16, (3, 3), padding="same")(img_inputs)
x = BatchNormalization(axis=-1)(x)
x = layers.Activation("relu")(x)
x_layer1 = AveragePooling2D((2, 2))(x)
x = SeparableConv2D(32, (3, 3), padding="same")(x_layer1)
x = BatchNormalization(axis=-1)(x)
x = layers.Activation("relu")(x)
x = SeparableConv2D(32, (3, 3), padding="same")(x)
x = BatchNormalization(axis=-1)(x)
x = layers.Activation("relu")(x)
x_layer2 = AveragePooling2D((2, 2))(x)
x = SeparableConv2D(64, (3, 3), padding="same")(x_layer2)
x = BatchNormalization(axis=-1)(x)
x = layers.Activation("relu")(x)
x = SeparableConv2D(64, (3, 3), padding="same")(x)
x = BatchNormalization(axis=-1)(x)
x = layers.Activation("relu")(x)
x_layer3 = AveragePooling2D((2, 2))(x)
x = SeparableConv2D(128, (3, 3), padding="same")(x_layer3)
x = BatchNormalization(axis=-1)(x)
x = layers.Activation("relu")(x)
x = SeparableConv2D(128, (3, 3), padding="same")(x)
x = BatchNormalization(axis=-1)(x)
x_layer4 = layers.Activation("relu")(x)
# -------------------------------------------------------------------------------------------------------------------------
s = SeparableConv2D(16, (3, 3), padding="same")(img_inputs)
s = BatchNormalization(axis=-1)(s)
s = layers.Activation("tanh")(s)
s_layer1 = MaxPooling2D((2, 2))(s)
s = SeparableConv2D(32, (3, 3), padding="same")(s_layer1)
s = BatchNormalization(axis=-1)(s)
s = layers.Activation("tanh")(s)
s = SeparableConv2D(32, (3, 3), padding="same")(s)
s = BatchNormalization(axis=-1)(s)
s = layers.Activation("tanh")(s)
s_layer2 = MaxPooling2D((2, 2))(s)
s = SeparableConv2D(64, (3, 3), padding="same")(s_layer2)
s = BatchNormalization(axis=-1)(s)
s = layers.Activation("tanh")(s)
s = SeparableConv2D(64, (3, 3), padding="same")(s)
s = BatchNormalization(axis=-1)(s)
s = layers.Activation("tanh")(s)
s_layer3 = MaxPooling2D((2, 2))(s)
s = SeparableConv2D(128, (3, 3), padding="same")(s_layer3)
s = BatchNormalization(axis=-1)(s)
s = layers.Activation("tanh")(s)
s = SeparableConv2D(128, (3, 3), padding="same")(s)
s = BatchNormalization(axis=-1)(s)
s_layer4 = layers.Activation("tanh")(s)
# -------------------------------------------------------------------------------------------------------------------------
# Classifier block
s_layer4 = Conv2D(64, (1, 1), activation="tanh")(s_layer4)
s_layer4 = MaxPooling2D((2, 2))(s_layer4)
x_layer4 = Conv2D(64, (1, 1), activation="relu")(x_layer4)
x_layer4 = AveragePooling2D((2, 2))(x_layer4)
feat_s1_pre = layers.Multiply()([s_layer4, x_layer4])
feat_s1_pre = layers.Flatten()(feat_s1_pre)
feat_delta_s1 = layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s1_pre
)
delta_s1 = layers.Dense(self.num_classes, activation="tanh", name="delta_s1")(
feat_delta_s1
)
feat_local_s1 = layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s1_pre
)
local_s1 = layers.Dense(
units=self.num_classes, activation="tanh", name="local_delta_stage1"
)(feat_local_s1)
feat_pred_s1 = layers.Dense(
self.stage_num[0] * self.num_classes, activation="relu"
)(feat_s1_pre)
pred_a_s1 = layers.Reshape((self.num_classes, self.stage_num[0]))(feat_pred_s1)
# -------------------------------------------------------------------------------------------------------------------------
s_layer3 = Conv2D(64, (1, 1), activation="tanh")(s_layer3)
s_layer3 = MaxPooling2D((2, 2))(s_layer3)
x_layer3 = Conv2D(64, (1, 1), activation="relu")(x_layer3)
x_layer3 = AveragePooling2D((2, 2))(x_layer3)
feat_s2_pre = layers.Multiply()([s_layer3, x_layer3])
feat_s2_pre = layers.Flatten()(feat_s2_pre)
feat_delta_s2 = layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s2_pre
)
delta_s2 = layers.Dense(self.num_classes, activation="tanh", name="delta_s2")(
feat_delta_s2
)
feat_local_s2 = layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s2_pre
)
local_s2 = layers.Dense(
units=self.num_classes, activation="tanh", name="local_delta_stage2"
)(feat_local_s2)
feat_pred_s2 = layers.Dense(
self.stage_num[1] * self.num_classes, activation="relu"
)(feat_s2_pre)
pred_a_s2 = layers.Reshape((self.num_classes, self.stage_num[1]))(feat_pred_s2)
# -------------------------------------------------------------------------------------------------------------------------
s_layer2 = Conv2D(64, (1, 1), activation="tanh")(s_layer2)
s_layer2 = MaxPooling2D((2, 2))(s_layer2)
x_layer2 = Conv2D(64, (1, 1), activation="relu")(x_layer2)
x_layer2 = AveragePooling2D((2, 2))(x_layer2)
feat_s3_pre = layers.Multiply()([s_layer2, x_layer2])
feat_s3_pre = layers.Flatten()(feat_s3_pre)
feat_delta_s3 = layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s3_pre
)
delta_s3 = layers.Dense(self.num_classes, activation="tanh", name="delta_s3")(
feat_delta_s3
)
feat_local_s3 = layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s3_pre
)
local_s3 = layers.Dense(
units=self.num_classes, activation="tanh", name="local_delta_stage3"
)(feat_local_s3)
feat_pred_s3 = layers.Dense(
self.stage_num[2] * self.num_classes, activation="relu"
)(feat_s3_pre)
pred_a_s3 = layers.Reshape((self.num_classes, self.stage_num[2]))(feat_pred_s3)
# -------------------------------------------------------------------------------------------------------------------------
def SSR_module(x, s1, s2, s3, lambda_d):
a = x[0][:, :, 0] * 0
b = x[0][:, :, 0] * 0
c = x[0][:, :, 0] * 0
di = s1 // 2
dj = s2 // 2
dk = s3 // 2
V = 99
# lambda_d = 0.9
for i in range(0, s1):
a = a + (i - di + x[6]) * x[0][:, :, i]
# a = K.expand_dims(a,-1)
a = a / (s1 * (1 + lambda_d * x[3]))
for j in range(0, s2):
b = b + (j - dj + x[7]) * x[1][:, :, j]
# b = K.expand_dims(b,-1)
b = b / (s1 * (1 + lambda_d * x[3])) / (s2 * (1 + lambda_d * x[4]))
for k in range(0, s3):
c = c + (k - dk + x[8]) * x[2][:, :, k]
# c = K.expand_dims(c,-1)
c = (
c
/ (s1 * (1 + lambda_d * x[3]))
/ (s2 * (1 + lambda_d * x[4]))
/ (s3 * (1 + lambda_d * x[5]))
)
pred = (a + b + c) * V
return pred
pred_pose = layers.Lambda(
SSR_module,
arguments={
"s1": self.stage_num[0],
"s2": self.stage_num[1],
"s3": self.stage_num[2],
"lambda_d": self.lambda_d,
},
name="pred_pose",
)(
[
pred_a_s1,
pred_a_s2,
pred_a_s3,
delta_s1,
delta_s2,
delta_s3,
local_s1,
local_s2,
local_s3,
]
)
model = Model(inputs=img_inputs, outputs=pred_pose)
return model
class SSR_net_ori_MT:
def __init__(self, image_size, num_classes, stage_num, lambda_d):
if K.image_dim_ordering() == "th":
logging.debug("image_dim_ordering = 'th'")
self._channel_axis = 1
self._input_shape = (3, image_size, image_size)
else:
logging.debug("image_dim_ordering = 'tf'")
self._channel_axis = -1
self._input_shape = (image_size, image_size, 3)
self.num_classes = num_classes
self.stage_num = stage_num
self.lambda_d = lambda_d
def __call__(self):
logging.debug("Creating model...")
img_inputs = layers.Input(self._input_shape)
# -------------------------------------------------------------------------------------------------------------------------
x = Conv2D(32, (3, 3), padding="same")(img_inputs)
x = BatchNormalization(axis=self._channel_axis)(x)
x = layers.Activation("relu")(x)
x_layer1 = AveragePooling2D(2, 2)(x)
x = Conv2D(32, (3, 3), padding="same")(x_layer1)
x = BatchNormalization(axis=self._channel_axis)(x)
x = layers.Activation("relu")(x)
x_layer2 = AveragePooling2D(2, 2)(x)
x = Conv2D(32, (3, 3), padding="same")(x_layer2)
x = BatchNormalization(axis=self._channel_axis)(x)
x = layers.Activation("relu")(x)
x_layer3 = AveragePooling2D(2, 2)(x)
x = Conv2D(32, (3, 3), padding="same")(x_layer3)
x = BatchNormalization(axis=self._channel_axis)(x)
x_layer4 = layers.Activation("relu")(x)
# -------------------------------------------------------------------------------------------------------------------------
s = Conv2D(16, (3, 3), padding="same")(img_inputs)
s = BatchNormalization(axis=self._channel_axis)(s)
s = layers.Activation("tanh")(s)
s_layer1 = MaxPooling2D(2, 2)(s)
s = Conv2D(16, (3, 3), padding="same")(s_layer1)
s = BatchNormalization(axis=self._channel_axis)(s)
s = layers.Activation("tanh")(s)
s_layer2 = MaxPooling2D(2, 2)(s)
s = Conv2D(16, (3, 3), padding="same")(s_layer2)
s = BatchNormalization(axis=self._channel_axis)(s)
s = layers.Activation("tanh")(s)
s_layer3 = MaxPooling2D(2, 2)(s)
s = Conv2D(16, (3, 3), padding="same")(s_layer3)
s = BatchNormalization(axis=self._channel_axis)(s)
s_layer4 = layers.Activation("tanh")(s)
# -------------------------------------------------------------------------------------------------------------------------
# Classifier block
s_layer4 = Conv2D(64, (1, 1), activation="tanh")(s_layer4)
s_layer4 = MaxPooling2D((2, 2))(s_layer4)
x_layer4 = Conv2D(64, (1, 1), activation="relu")(x_layer4)
x_layer4 = AveragePooling2D((2, 2))(x_layer4)
feat_s1_pre = layers.Multiply()([s_layer4, x_layer4])
feat_s1_pre = layers.Flatten()(feat_s1_pre)
feat_delta_s1 = layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s1_pre
)
delta_s1 = layers.Dense(self.num_classes, activation="tanh", name="delta_s1")(
feat_delta_s1
)
feat_local_s1 = layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s1_pre
)
local_s1 = layers.Dense(
units=self.num_classes, activation="tanh", name="local_delta_stage1"
)(feat_local_s1)
feat_pred_s1 = layers.Dense(
self.stage_num[0] * self.num_classes, activation="relu"
)(feat_s1_pre)
pred_a_s1 = layers.Reshape((self.num_classes, self.stage_num[0]))(feat_pred_s1)
# -------------------------------------------------------------------------------------------------------------------------
s_layer3 = Conv2D(64, (1, 1), activation="tanh")(s_layer3)
s_layer3 = MaxPooling2D((2, 2))(s_layer3)
x_layer3 = Conv2D(64, (1, 1), activation="relu")(x_layer3)
x_layer3 = AveragePooling2D((2, 2))(x_layer3)
feat_s2_pre = layers.Multiply()([s_layer3, x_layer3])
feat_s2_pre = layers.Flatten()(feat_s2_pre)
feat_delta_s2 = layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s2_pre
)
delta_s2 = layers.Dense(self.num_classes, activation="tanh", name="delta_s2")(
feat_delta_s2
)
feat_local_s2 = layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s2_pre
)
local_s2 = layers.Dense(
units=self.num_classes, activation="tanh", name="local_delta_stage2"
)(feat_local_s2)
feat_pred_s2 = layers.Dense(
self.stage_num[1] * self.num_classes, activation="relu"
)(feat_s2_pre)
pred_a_s2 = layers.Reshape((self.num_classes, self.stage_num[1]))(feat_pred_s2)
# -------------------------------------------------------------------------------------------------------------------------
s_layer2 = Conv2D(64, (1, 1), activation="tanh")(s_layer2)
s_layer2 = MaxPooling2D((2, 2))(s_layer2)
x_layer2 = Conv2D(64, (1, 1), activation="relu")(x_layer2)
x_layer2 = AveragePooling2D((2, 2))(x_layer2)
feat_s3_pre = layers.Multiply()([s_layer2, x_layer2])
feat_s3_pre = layers.Flatten()(feat_s3_pre)
feat_delta_s3 = layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s3_pre
)
delta_s3 = layers.Dense(self.num_classes, activation="tanh", name="delta_s3")(
feat_delta_s3
)
feat_local_s3 = layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s3_pre
)
local_s3 = layers.Dense(
units=self.num_classes, activation="tanh", name="local_delta_stage3"
)(feat_local_s3)
feat_pred_s3 = layers.Dense(
self.stage_num[2] * self.num_classes, activation="relu"
)(feat_s3_pre)
pred_a_s3 = layers.Reshape((self.num_classes, self.stage_num[2]))(feat_pred_s3)
# -------------------------------------------------------------------------------------------------------------------------
def SSR_module(x, s1, s2, s3, lambda_d):
a = x[0][:, :, 0] * 0
b = x[0][:, :, 0] * 0
c = x[0][:, :, 0] * 0
di = s1 // 2
dj = s2 // 2
dk = s3 // 2
V = 99
# lambda_d = 0.9
for i in range(0, s1):
a = a + (i - di + x[6]) * x[0][:, :, i]
# a = K.expand_dims(a,-1)
a = a / (s1 * (1 + lambda_d * x[3]))
for j in range(0, s2):
b = b + (j - dj + x[7]) * x[1][:, :, j]
# b = K.expand_dims(b,-1)
b = b / (s1 * (1 + lambda_d * x[3])) / (s2 * (1 + lambda_d * x[4]))
for k in range(0, s3):
c = c + (k - dk + x[8]) * x[2][:, :, k]
# c = K.expand_dims(c,-1)
c = (
c
/ (s1 * (1 + lambda_d * x[3]))
/ (s2 * (1 + lambda_d * x[4]))
/ (s3 * (1 + lambda_d * x[5]))
)
pred = (a + b + c) * V
return pred
pred_pose = layers.Lambda(
SSR_module,
arguments={
"s1": self.stage_num[0],
"s2": self.stage_num[1],
"s3": self.stage_num[2],
"lambda_d": self.lambda_d,
},
name="pred_pose",
)(
[
pred_a_s1,
pred_a_s2,
pred_a_s3,
delta_s1,
delta_s2,
delta_s3,
local_s1,
local_s2,
local_s3,
]
)
model = Model(inputs=img_inputs, outputs=pred_pose)
return model
| 2.609375
| 3
|
python/pyo.py
|
dnagle-usgs/lidar-processing
| 1
|
12781763
|
import code
import os.path
import sys
sys.path.append(os.path.dirname(__file__))
import alps.site
yo = alps.site.yo
code.interact(local=locals())
| 1.625
| 2
|
queue_/queue_stack.py
|
makar-fedorov/programming-2021-19fpl
| 0
|
12781764
|
"""
Programming for linguists
Implementation of the data structure "QueueStack"
"""
from typing import Iterable
class QueueStack:
"""
Queue Data Structure from Stack
"""
def __init__(self, data: Iterable = None, queue_size: int = 'no_info'):
if data and isinstance(data, list):
self.stack = data
elif not data:
self.stack = []
elif data:
self.stack = list(data)
else:
raise TypeError
if isinstance(queue_size, int):
self.queue_size = queue_size
elif queue_size == 'no_info':
self.queue_size = len(self.stack) if len(self.stack) > 0 else len(self.stack) + 1
self.stack = self.stack[::-1][:self.queue_size]
def push(self, element):
"""
Add the element ‘element’ at the top of stack
:param element: element to add to stack
"""
self.stack.append(element)
def empty(self) -> bool:
"""
Return whether stack is empty or not
:return: True if stack does not contain any elements
False if stack contains elements
"""
return not self.stack
def get(self):
"""
Remove and return an item from queue_stack
"""
return self.stack.pop()
def put(self, element):
"""
Add the element ‘element’ at the end of queue_stack
:param element: element to add to queue_stack
"""
if len(self.stack) == self.queue_size:
raise ValueError
self.stack.insert(0, element)
def size(self) -> int:
"""
Return the number of elements in stack
:return: Number of elements in stack
"""
return len(self.stack)
def top(self):
"""
Return the element on the top of stack
:return: the element that is on the top of stack
"""
if self.stack:
return self.stack[-1]
raise ValueError
| 4.03125
| 4
|
tk_basic_as_possible.py
|
alvaro-root/pa2_2021
| 1
|
12781765
|
"""
Tkinter generic tasks
1. LOOK: Define the look of the screen
2. DO; Define the event handler routines
3. LOOK associated with DO: Associate interesting keyboard events with their handlers.
4. LISTEN: Loop forever, observing events. Exit when an exit event occurs.
"""
from tkinter import *
# Contain top level window usually called root
root = Tk()
# Basic workflow:
# 1. Create a GUI object and associate it with its parent
# 2. Pack it or place it on grid - set up a 'geometry manager'
# Keep listening for events until destroy event occurs.
root.mainloop()
| 3.484375
| 3
|
cactus/listener/__init__.py
|
danielchasehooper/Cactus
| 1,048
|
12781766
|
<filename>cactus/listener/__init__.py
import logging
from cactus.listener.polling import PollingListener
logger = logging.getLogger(__name__)
try:
from cactus.listener.mac import FSEventsListener as Listener
except (ImportError, OSError):
logger.debug("Failed to load FSEventsListener, falling back to PollingListener", exc_info=True)
Listener = PollingListener
| 1.960938
| 2
|
eyws/docker.py
|
canelmas/eyws
| 2
|
12781767
|
<filename>eyws/docker.py
# Copyright 2018 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from eyws import ssh
def install_docker(opts, instances):
for res in instances:
for instance in res["Instances"]:
instance_id = instance["InstanceId"]
public_dns = instance["PublicDnsName"]
print("installing docker on {id} ({pdns})...".format(id=instance_id, pdns=public_dns))
execute(instance, opts, "sudo apt-get update")
execute(instance, opts, "sudo apt-get install apt-transport-https ca-certificates curl "
"software-properties-common")
execute(instance, opts, "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -")
execute(instance, opts, "sudo apt-key fingerprint 0EBFCD88")
execute(instance, opts, "sudo add-apt-repository \"deb [arch=amd64] "
"https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable\"")
execute(instance, opts, "sudo apt-get update")
execute(instance, opts, "apt-cache policy docker-ce")
execute(instance, opts, "sudo apt-get install docker-ce")
execute(instance, opts, "sudo usermod -aG docker $USER")
execute(instance, opts, "sudo systemctl enable docker")
print("docker installed on {id} ({pdns})".format(id=instance_id, pdns=public_dns))
def execute(instance, opts, cmnd):
ssh(host=instance["PublicDnsName"],
opts=opts,
command=cmnd)
| 2.15625
| 2
|
tests/__init__.py
|
posita/modwalk
| 0
|
12781768
|
<filename>tests/__init__.py
# -*- encoding: utf-8 -*-
# ======================================================================
"""
Copyright and other protections apply. Please see the accompanying
:doc:`LICENSE <LICENSE>` and :doc:`CREDITS <CREDITS>` file(s) for rights
and restrictions governing use of this software. All rights not
expressly waived or licensed are reserved. If those files are missing or
appear to be modified from their originals, then please contact the
author before viewing or using this software in any capacity.
"""
# ======================================================================
from __future__ import absolute_import, division, print_function
TYPE_CHECKING = False # from typing import TYPE_CHECKING
if TYPE_CHECKING:
import typing # noqa: F401 # pylint: disable=import-error,unused-import,useless-suppression
from builtins import * # noqa: F401,F403 # pylint: disable=redefined-builtin,unused-wildcard-import,useless-suppression,wildcard-import
from future.builtins.disabled import * # noqa: F401,F403 # pylint: disable=no-name-in-module,redefined-builtin,unused-wildcard-import,useless-suppression,wildcard-import
# ---- Imports ---------------------------------------------------------
import six
import unittest
from modwalk.main import configlogging
# ---- Data ------------------------------------------------------------
__all__ = ()
# ---- Initialization --------------------------------------------------
# See <https://github.com/python/typeshed/issues/1874>
unittest.TestCase.longMessage = True # type: ignore # py2
# Python 3 complains that the assert*Regexp* methods are deprecated in
# favor of the analogous assert*Regex methods, which Python 2's unittest
# doesn't have; this monkey patch fixes all that nonsense
if not hasattr(unittest.TestCase, 'assertCountEqual'):
setattr(unittest.TestCase, 'assertCountEqual', six.assertCountEqual)
if not hasattr(unittest.TestCase, 'assertRaisesRegex'):
setattr(unittest.TestCase, 'assertRaisesRegex', six.assertRaisesRegex)
if not hasattr(unittest.TestCase, 'assertRegex'):
setattr(unittest.TestCase, 'assertRegex', six.assertRegex)
configlogging()
| 1.679688
| 2
|
simulation/modem/util/qam_mapper.py
|
jdemel/GFDM-PHY-Reference
| 0
|
12781769
|
<filename>simulation/modem/util/qam_mapper.py<gh_stars>0
import numpy as np
import numpy as np
import matplotlib as plt
from commpy.utilities import bitarray2dec, dec2bitarray
from itertools import product
class QamMapper():
""" Creates a Quadrature Amplitude Modulation (QAM) Modem object.
Modified code from commpy library
"""
def _constellation_symbol(self, i):
return (2*i[0]-1) + (2*i[1]-1)*(1j)
def __init__(self, m):
""" Creates a Quadrature Amplitude Modulation (QAM) Modem object.
Parameters
----------
m : int
Size of the QAM constellation.
"""
self.m = m
sqrt_m = int(np.sqrt(self.m))
self.num_bits_symbol = int(np.log2(self.m))
gray_mapping = self.bin2gray(np.arange(self.m))
mapping_array = np.arange(1, np.sqrt(self.m)+1) - (np.sqrt(self.m)/2)
self.constellation = list(map(self._constellation_symbol,
list(product(mapping_array, repeat=2))))
self.constellation = np.reshape(self.constellation,(sqrt_m,sqrt_m))
self.constellation = np.transpose(self.constellation)
self.constellation = list(zip(*self.constellation[::-1]))
self.constellation = np.transpose(self.constellation)
self.constellation[1::2,::] = np.flip(self.constellation[1::2,::],1)
self.constellation = np.reshape(self.constellation,m)
sort_idx = np.argsort(gray_mapping)
self.constellation = self.constellation[sort_idx]
def modulate(self, input_bits):
""" Modulate (map) an array of bits to constellation symbols.
Parameters
----------
input_bits : 1D ndarray of ints
Inputs bits to be modulated (mapped).
Returns
-------
baseband_symbols : 1D ndarray of complex floats
Modulated complex symbols.
"""
mapfunc = np.vectorize(lambda i:
self.constellation[bitarray2dec(input_bits[i:i+self.num_bits_symbol])])
baseband_symbols = mapfunc(np.arange(0, len(input_bits), self.num_bits_symbol))
return baseband_symbols
def bin2gray(self,n):
return n ^ (n >> 1)
def demodulate(self, data):
""" Demodulate (map) a set of constellation symbols to corresponding bits.
Supports hard-decision demodulation only.
Parameters
----------
data : 1D ndarray of complex floats
Input symbols to be demodulated.
Returns
-------
demod_bits : 1D ndarray of ints
Corresponding demodulated bits.
"""
index_list = map(lambda i: np.argmin(abs(data[i] - self.constellation)),
range(0, len(data)))
demod_bits = np.hstack(list(map(lambda i: dec2bitarray(i, self.num_bits_symbol),
index_list)))
return demod_bits
if __name__ == "__main__":
pass
| 2.875
| 3
|
pidf/__init__.py
|
jasmine125/pidf
| 0
|
12781770
|
# This page intentionally left blank
| 0.765625
| 1
|
temboo/core/Library/DataGov/GetCensusIDByTypeAndName.py
|
jordanemedlock/psychtruths
| 7
|
12781771
|
<filename>temboo/core/Library/DataGov/GetCensusIDByTypeAndName.py
# -*- coding: utf-8 -*-
###############################################################################
#
# GetCensusIDByTypeAndName
# Retrieve the U.S. census ID for a specified geography type and name.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetCensusIDByTypeAndName(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetCensusIDByTypeAndName Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetCensusIDByTypeAndName, self).__init__(temboo_session, '/Library/DataGov/GetCensusIDByTypeAndName')
def new_input_set(self):
return GetCensusIDByTypeAndNameInputSet()
def _make_result_set(self, result, path):
return GetCensusIDByTypeAndNameResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetCensusIDByTypeAndNameChoreographyExecution(session, exec_id, path)
class GetCensusIDByTypeAndNameInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetCensusIDByTypeAndName
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_GeographyName(self, value):
"""
Set the value of the GeographyName input for this Choreo. ((required, string) Specify the geography name for the correspnding type, with at least three leading characters. For example, for the geography type "state" you could enter "ore" for Oregon.)
"""
super(GetCensusIDByTypeAndNameInputSet, self)._set_input('GeographyName', value)
def set_GeographyType(self, value):
"""
Set the value of the GeographyType input for this Choreo. ((required, string) Specify one of the following geography type values: "state", "county", "tract", "block", "congdistrict", "statehouse", "statesenate", "censusplace", or "msa" (metropolitan statistical area).)
"""
super(GetCensusIDByTypeAndNameInputSet, self)._set_input('GeographyType', value)
def set_MaxResults(self, value):
"""
Set the value of the MaxResults input for this Choreo. ((required, integer) Specify the maximum number of results to return. Defaults to 50.)
"""
super(GetCensusIDByTypeAndNameInputSet, self)._set_input('MaxResults', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: xml (the default) and json.)
"""
super(GetCensusIDByTypeAndNameInputSet, self)._set_input('ResponseFormat', value)
class GetCensusIDByTypeAndNameResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetCensusIDByTypeAndName Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response returned from the API.)
"""
return self._output.get('Response', None)
def get_CensusID(self):
"""
Retrieve the value for the "CensusID" output from this Choreo execution. ((integer) The ID retrieved from the API call.)
"""
return self._output.get('CensusID', None)
class GetCensusIDByTypeAndNameChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetCensusIDByTypeAndNameResultSet(response, path)
| 2.265625
| 2
|
app.py
|
itsron143/markov-gen
| 1
|
12781772
|
<gh_stars>1-10
from flask import Flask, current_app
from flask import render_template, request
from markov_gen.markov_gen import word_markovIt, ngram_markovIt
app = Flask(__name__)
def read_file(corpus):
file_path = "markov_gen/corpus/" + corpus + ".txt"
with current_app.open_resource(file_path, mode="r") as f:
txt = f.read().replace('\n', '')
txt_list = txt.split()
return txt_list, txt
@app.route('/', methods=['POST', 'GET'])
def index():
return render_template("index.html")
@app.route('/markov', methods=['POST', 'GET'])
def markov():
sizes = {"small": 250, "medium": 1000, "large": 5000}
n_gram_order = 10
if request.method == 'POST':
if 'corpus' in request.form:
corpus = request.form['corpus']
size = request.form['size']
chain_type = request.form['type']
text = request.form['text']
if len(text) == 0:
txt_list, txt = read_file(corpus)
else:
txt_list, txt = text.split(), text
generated_txt = word_markovIt(txt_list, sizes[size]) if chain_type == "word" else ngram_markovIt(
txt, n_gram_order, sizes[size])
# print(generated_txt)
return render_template("markov.html", data=generated_txt)
if __name__ == '__main__':
app.run()
| 2.59375
| 3
|
morse/main.py
|
augustin64/scripts
| 0
|
12781773
|
#!/usr/bin/python3
# max_score:392
import sys
import random
import platform
from optparse import OptionParser
if platform.system() == "Windows":
import msvcrt
import time
else:
from select import select
try:
import enquiries
choose = enquiries.choose
except: # On offre une autre option si le module enquiries n'est pas installé
# ce module n'étant pas compatible égaleent sur toutes les plateformes
def choose(query, options):
print(query)
print(
"\n".join(["{}. {}".format(i + 1, options[i]) for i in range(len(options))])
)
response = int(input("> "))
return options[response - 1]
morse = {
"a": ".-",
"b": "-...",
"c": "-.-.",
"d": "-..",
"e": ".",
"f": "..-.",
"g": "--.",
"h": "....",
"i": "..",
"j": ".---",
"k": "-.-",
"l": ".-..",
"m": "--",
"n": "-.",
"o": "---",
"p": ".--.",
"q": "--.-",
"r": ".-.",
"s": "...",
"t": "-",
"u": "..-",
"v": "...-",
"w": ".--",
"x": "-..-",
"y": "-.--",
"z": "--..",
"1": ".----",
"2": "..---",
"3": "...--",
"4": "....-",
"5": ".....",
"6": "-....",
"7": "--...",
"8": "---..",
"9": "----.",
"0": "-----",
}
mnemotechnique = {
"a": "Allô ?",
"b": "Bonaparte",
"c": "Coca-Cola",
"d": "Dorémi",
"e": "Euh..",
"f": "Farandole",
"g": "Golgotha",
"h": "Himalaya",
"i": "Ici",
"j": "Jablonovo",
"k": "Koalo",
"l": "Limonade",
"m": "Moto",
"n": "Noé",
"o": "Oporto",
"p": "Philosophe",
"q": "Quocorico",
"r": "Ricola",
"s": "Sapristi",
"t": "Thon",
"u": "Union",
"v": "Valparéso",
"w": "Wagon Long",
"x": "Xtrocadéro",
"y": "Yomamoto",
"z": "Zoro est là",
}
mnemoschematik = {
".-": "Allô ?",
"-...": "Bonaparte",
"-.-.": "Coca-Cola",
"-..": "Do-ré-mi",
".": "Euh..",
"..-.": "Farandole",
"--.": "Golgotha",
"....": "Himalaya",
"..": "Ici",
".---": "Jablonovo",
"-.-": "Koalo",
".-..": "Limonade",
"--": "Moto",
"-.": "Noël",
"---": "Oporto",
".--.": "Philosophe",
"--.-": "Quocorico",
".-.": "Ricola",
"...": "Sapristi",
"-": "Thon",
"..-": "Union",
"...-": "Valparéso",
".--": "Wagon Long",
"-..-": "Xtrocadéro",
"-.--": "Yomamoto",
"--..": "Zoro est là",
}
crs = [j for j in morse.keys()]
def diff(a, b):
"""
Renvoie la différence, formattée en couleurs entre les chaînes de charactères a et b
"""
if platform.system() != "Windows":
s = ""
if len(a) > len(b):
b = b + " " * (len(a) - len(b))
if len(b) > len(a):
a = a + " " * (len(b) - len(a))
for i in range(len(a)):
if a[i] != b[i]:
s += "\x1b[7;30;41m" + b[i]
else:
s += "\x1b[0m\x1b[7;30;42m" + b[i]
s += "\x1b[0m"
return s
else:
return b
def multi_quiz(length=10, timed=True, timeout=5):
"""
Pouvant comporter un timer ou non, ce quiz renvoie "length" charactères encodés en morse à décoder
"""
score = 0
while True:
clear_text = "".join([random.choice(crs) for i in range(length)])
encoded = "/".join([morse[i] for i in clear_text])
if timed:
s = timed_input(encoded, timeout=length * timeout)
else:
s = input(encoded + "\n").lower()
if s == TimeoutError:
print("\nTemps écoulé, sois plus rapide la prochaine fois !")
elif s != clear_text:
print(f"Faux ! La bonne réponse : {clear_text}")
print(f"Votre réponse était : {diff(clear_text,s)}")
print("Votre score est de {} points".format(score))
break
else:
score += length
print("Bonne réponse ! Votre score est de {} points".format(score))
def int_quiz(timed=True, timeout=10):
"""
Pouvant comporter un timer ou non, ce quiz renvoie une lettre ou chiffre à encoder en morse
"""
score = 0
while True:
clear_text = random.choice(crs)
if timed:
s = timed_input(clear_text.upper(), timeout=timeout)
else:
s = input(clear_text.upper() + "\n> ")
if s == TimeoutError:
print("Temps écoulé, sois plus rapide la prochaine fois !")
elif s != morse[clear_text]:
if clear_text in mnemotechnique.keys():
print(
"Faux ! La bonne réponse est {} [{}]".format(
morse[clear_text], mnemotechnique[clear_text]
)
)
else:
print("Faux ! La bonne réponse est {}".format(morse[clear_text]))
print("Votre score est de {} points".format(score))
break
else:
score += 1
print("Bonne réponse ! Votre score est de {} points".format(score))
def quiz(timed=True, timeout=10):
"""
Pouvant comporter un timer ou non, ce quiz renvoie un charactère en morse à décoder
"""
score = 0
while True:
clear_text = random.choice(crs)
if timed:
s = timed_input(str(morse[clear_text]), timeout=timeout)
else:
s = input(str(morse[clear_text]) + "\n> ")
if s == TimeoutError:
print("Temps écoulé, sois plus rapide la prochaine fois !")
elif s != clear_text:
if clear_text in mnemotechnique.keys():
print(
"Faux ! La bonne réponse est {}[{}]".format(
clear_text, mnemotechnique[clear_text]
)
)
else:
print("Faux ! La bonne réponse est {}".format(clear_text))
print("Votre score est de {} points".format(score))
break
else:
score += 1
print("Bonne réponse ! Votre score est de {} points".format(score))
def quiz_junior(timed=True, timeout=10):
"""
Pouvant comporter un timer ou non, ce quiz renvoie un moyen mnémotechnique dont il faut extraire le morse
"""
score = 0
crs = [j for j in mnemoschematik.keys()]
while True:
memo = random.choice(crs)
if timed:
s = timed_input(mnemoschematik[memo], timeout=timeout)
else:
s = input(mnemoschematik[memo] + "\n> ")
if s == TimeoutError:
print("tmps écoulé, sois plus rapide la prochaine fois !")
elif s != memo:
print("\x1b[0;37;41mFaux ! La bonne réponse est {}\x1b[0m".format(memo))
print("Votre score est de {} points".format(score))
break
else:
score += 1
print("Bonne réponse ! Votre score est de {} points".format(score))
def timed_input(prompt, timeout=10):
if platform.system() != "Windows":
print(prompt)
sys.stdin.flush()
rlist, _, _ = select([sys.stdin], [], [], timeout)
if rlist:
s = sys.stdin.readline()
return s[:-1].lower()
else:
return TimeoutError
else:
sys.stdout.write(prompt + "\n")
sys.stdout.flush()
endtime = time.monotonic() + timeout
result = []
while time.monotonic() < endtime:
if msvcrt.kbhit():
result.append(msvcrt.getwche())
if result[-1] == "\r":
return "".join(result[:-1])
time.sleep(0.04)
return TimeoutError
parser = OptionParser()
parser.add_option(
"-g",
"--gamemode",
dest="gamemode",
help="choose GAMEMODE",
type="string",
metavar="(JUNIOR|INTERMEDIAIRE|NORMAL|EXPERT)",
)
parser.add_option(
"-T",
"--timeout",
action="store",
dest="timeout",
type="int",
help="set TIMEOUT",
metavar="TIMEOUT",
default=5,
)
parser.add_option(
"-d",
"--disable-timer",
action="store_false",
dest="timed",
help="Disable timer",
default=True,
)
parser.add_option(
"-l",
"--length",
dest="length",
help="Nombre d'éléments, disponible uniquement pour le mode de jeu EXPERT",
action="store",
type="int",
metavar="NOMBRE D'ELEMENTS",
default=10,
)
(options, args) = parser.parse_args()
gamemodes = {
"Junior": quiz_junior,
"Intermédiaire": int_quiz,
"Normal": quiz,
"Expert": multi_quiz,
}
if options.gamemode != None:
gamemodes = {
"JUNIOR": quiz_junior,
"INTERMEDIAIRE": int_quiz,
"NORMAL": quiz,
"EXPERT": multi_quiz,
}
if options.gamemode not in gamemodes:
print(f"Option not available gamemode {options.gamemode}")
raise ValueError
else:
gm = gamemodes[options.gamemode]
else:
gm = gamemodes[
choose("Choisissez votre mode de jeu", [i for i in gamemodes.keys()])
]
while True:
if gm == multi_quiz:
gm(timed=options.timed, timeout=options.timeout, length=options.length)
else:
gm(timed=options.timed, timeout=options.timeout)
| 2.8125
| 3
|
src/ggrc_risks/migrations/versions/20170502140636_377d935e1b21_migrate_urls_to_documents.py
|
Killswitchz/ggrc-core
| 0
|
12781774
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
migrate urls to documents
Create Date: 2017-05-02 14:06:36.936410
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from ggrc.migrations.utils import url_util
# revision identifiers, used by Alembic.
revision = '377d935e1b21'
down_revision = '55f583313670'
HYPERLINKED_OBJECTS = {
'Risk': 'risks',
'Threat': 'threats'
}
HYPERLINKED_OBJ_TYPES = set(HYPERLINKED_OBJECTS)
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
url_util.migrate_urls_to_documents(HYPERLINKED_OBJECTS)
def downgrade():
"""Downgrade database schema and/or vdata back to the previous revision."""
url_util.delete_reference_urls(HYPERLINKED_OBJ_TYPES)
| 1.945313
| 2
|
51cto.py
|
hmilyfe/web-get
| 0
|
12781775
|
#!/usr/bin/python2.6
#coding=utf-8
#51cto自动领豆 by 2016-07-27
import requests
import time
from bs4 import BeautifulSoup
import random
def freedown(username,passwd):
header = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:14.0) Gecko/20100101 Firefox/14.0.1','Referer':'http://home.51cto.com/index'}
req = requests.session()
res = req.get('http://home.51cto.com/index',headers=header)
soup = BeautifulSoup(res.text)
csrf = soup.find('input').attrs.get('value')
postdata = {'LoginForm[username]': username,
'LoginForm[password]': <PASSWORD>,
'_csrf':csrf,
'LoginForm[rememberMe]':'0',
}
res_post = req.post('http://home.51cto.com/index',data=postdata)
so = BeautifulSoup(res_post.text)
print so
login_list_url = []
for script_url in so.find_all('script'):
login_list_url.append(script_url.attrs.get('src'))
print login_list_url
for url in login_list_url[:-2]:
r = req.get(url)
print r.text
# down_url = 'http://down.51cto.com/download.php'
# down_data = {'do':'getfreecredits','t':random.random()}
# down_res = req.post(down_url,params=down_data,data=down_data)
# print down_res.text
#down_url = 'http://home.51cto.com/home/ajax-to-sign'
#down_data = {'DNT':'1'}
#down_res = req.post(down_url,params=down_data,data=down_data)
#print down_res.text
if __name__ == "__main__":
# t=random.randint(60,600 )
# time.sleep( t )
freedown('hmilyfe','6014256')
| 2.640625
| 3
|
misc/deep_learning_notes/Ch3 Advanced Tensorflow/GPU and device management tests/1_simple_GPU_test.py
|
tmjnow/MoocX
| 7
|
12781776
|
import tensorflow as tf
a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')
b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')
c = tf.matmul(a, b)
# Creates a session with log_device_placement set to True.
with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:
print(sess.run(c))
""" prints:
Device mapping:
/job:localhost/replica:0/task:0/gpu:0 -> device: 0, name: GRID K520, pci bus id: 0000:00:03.0
/job:localhost/replica:0/task:0/gpu:1 -> device: 1, name: GRID K520, pci bus id: 0000:00:04.0
/job:localhost/replica:0/task:0/gpu:2 -> device: 2, name: GRID K520, pci bus id: 0000:00:05.0
/job:localhost/replica:0/task:0/gpu:3 -> device: 3, name: GRID K520, pci bus id: 0000:00:06.0
I tensorflow/core/common_runtime/direct_session.cc:175] Device mapping:
/job:localhost/replica:0/task:0/gpu:0 -> device: 0, name: GRID K520, pci bus id: 0000:00:03.0
/job:localhost/replica:0/task:0/gpu:1 -> device: 1, name: GRID K520, pci bus id: 0000:00:04.0
/job:localhost/replica:0/task:0/gpu:2 -> device: 2, name: GRID K520, pci bus id: 0000:00:05.0
/job:localhost/replica:0/task:0/gpu:3 -> device: 3, name: GRID K520, pci bus id: 0000:00:06.0
MatMul: /job:localhost/replica:0/task:0/gpu:0
I tensorflow/core/common_runtime/simple_placer.cc:818] MatMul: /job:localhost/replica:0/task:0/gpu:0
b: /job:localhost/replica:0/task:0/gpu:0
I tensorflow/core/common_runtime/simple_placer.cc:818] b: /job:localhost/replica:0/task:0/gpu:0
a: /job:localhost/replica:0/task:0/gpu:0
I tensorflow/core/common_runtime/simple_placer.cc:818] a: /job:localhost/replica:0/task:0/gpu:0
[[ 22. 28.]
[ 49. 64.]]
"""
| 2.5625
| 3
|
Advent2015/2.py
|
SSteve/AdventOfCode
| 0
|
12781777
|
import re
boxRegex = re.compile(r"(\d+)x(\d+)x(\d+)")
def day2(fileName):
totalPaper = 0
totalRibbon = 0
with open(fileName) as infile:
for line in infile:
match = boxRegex.match(line)
if match:
sides = sorted(int(side) for side in match.group(1, 2, 3))
totalPaper += 3 * sides[0] * sides[1] + 2 * sides[1] * sides[2] + 2 * sides[2] * sides[0]
totalRibbon += 2 * sides[0] + 2 * sides[1] + sides[0] * sides[1] * sides[2]
print(totalPaper)
print(totalRibbon)
if __name__ == "__main__":
day2("2.txt")
| 3.34375
| 3
|
Demo/common/example_math_file.py
|
quecpython/EC100Y-SDK
| 4
|
12781778
|
# 数学运算math函数示例
import math
import log
# 设置日志输出级别
log.basicConfig(level=log.INFO)
math_log = log.getLogger("Math")
# x**y运算后的值
result = math.pow(2,3)
math_log.info(result)
# 8.0
# 取大于等于x的最小的整数值,如果x是一个整数,则返回x
result = math.ceil(4.12)
math_log.info(result)
# 5
# 把y的正负号加到x前面,可以使用0
result = math.copysign(2,-3)
math_log.info(result)
# -2.0
# 求x的余弦,x必须是弧度
result = math.cos(math.pi/4)
math_log.info(result)
# 0.7071067811865476
# 把x从弧度转换成角度
result = math.degrees(math.pi/4)
math_log.info(result)
# 45.0
# e表示一个常量
result = math.e
math_log.info(result)
# 2.718281828459045
# exp()返回math.e(其值为2.71828)的x次方
result = math.exp(2)
math_log.info(result)
# 7.38905609893065
# fabs()返回x的绝对值
result = math.fabs(-0.03)
math_log.info(result)
# 0.03
# floor()取小于等于x的最大的整数值,如果x是一个整数,则返回自身
result = math.floor(4.999)
math_log.info(result)
# 4
# fmod()得到x/y的余数,其值是一个浮点数
result = math.fmod(20,3)
math_log.info(result)
# 2.0
# frexp()返回一个元组(m,e),其计算方式为:x分别除0.5和1,得到一个值的范围,2e的值在这个范围内,e取符合要求的最大整数值,然后x/(2e),得到m的值。如果x等于0,则m和e的值都为0,m的绝对值的范围为(0.5,1)之间,不包括0.5和1
result = math.frexp(75)
math_log.info(result)
# (0.5859375, 7)
# isfinite()如果x不是无穷大的数字,则返回True,否则返回False
result = math.isfinite(0.1)
math_log.info(result)
# True
# isinf()如果x是正无穷大或负无穷大,则返回True,否则返回False
result = math.isinf(234)
math_log.info(result)
# False
# isnan()如果x不是数字True,否则返回False
result = math.isnan(23)
math_log.info(result)
# False
# ldexp()返回x*(2**i)的值
result = math.ldexp(5,5)
math_log.info(result)
# 160.0
# modf()返回由x的小数部分和整数部分组成的元组
result = math.modf(math.pi)
math_log.info(result)
# (0.14159265358979312, 3.0)
# pi:数字常量,圆周率
result = math.pi
math_log.info(result)
# 3.141592653589793
# sin()求x(x为弧度)的正弦值
result = math.sin(math.pi/4)
math_log.info(result)
# 0.7071067811865476
# sqrt()求x的平方根
result = math.sqrt(100)
math_log.info(result)
# 10.0
# tan()返回x(x为弧度)的正切值
result = math.tan(math.pi/4)
math_log.info(result)
# 0.9999999999999999
# trunc()返回x的整数部分
result = math.trunc(6.789)
math_log.info(result)
# 6
| 3.34375
| 3
|
pandas/tests/strings/test_extract.py
|
oricou/pandas
| 2
|
12781779
|
<gh_stars>1-10
from datetime import datetime
import re
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
_testing as tm,
)
def test_extract_expand_None():
values = Series(["fooBAD__barBAD", np.nan, "foo"])
with pytest.raises(ValueError, match="expand must be True or False"):
values.str.extract(".*(BAD[_]+).*(BAD)", expand=None)
def test_extract_expand_unspecified():
values = Series(["fooBAD__barBAD", np.nan, "foo"])
result_unspecified = values.str.extract(".*(BAD[_]+).*")
assert isinstance(result_unspecified, DataFrame)
result_true = values.str.extract(".*(BAD[_]+).*", expand=True)
tm.assert_frame_equal(result_unspecified, result_true)
def test_extract_expand_False():
# Contains tests like those in test_match and some others.
values = Series(["fooBAD__barBAD", np.nan, "foo"])
er = [np.nan, np.nan] # empty row
result = values.str.extract(".*(BAD[_]+).*(BAD)", expand=False)
exp = DataFrame([["BAD__", "BAD"], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(
[
"aBAD_BAD",
np.nan,
"BAD_b_BAD",
True,
datetime.today(),
"foo",
None,
1,
2.0,
]
)
rs = Series(mixed).str.extract(".*(BAD[_]+).*(BAD)", expand=False)
exp = DataFrame([["BAD_", "BAD"], er, ["BAD_", "BAD"], er, er, er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series(["fooBAD__barBAD", np.nan, "foo"])
result = values.str.extract(".*(BAD[_]+).*(BAD)", expand=False)
exp = DataFrame([["BAD__", "BAD"], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(["A1", "A2", "A3", "A4", "B5"])
with pytest.raises(ValueError, match="supported"):
idx.str.extract("([AB])([123])", expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(["A1", "B2", "C3"])
msg = "pattern contains no capture groups"
with pytest.raises(ValueError, match=msg):
s_or_idx.str.extract("[ABC][123]", expand=False)
# only non-capturing groups
with pytest.raises(ValueError, match=msg):
s_or_idx.str.extract("(?:[AB]).*", expand=False)
# single group renames series/index properly
s_or_idx = klass(["A1", "A2"])
result = s_or_idx.str.extract(r"(?P<uno>A)\d", expand=False)
assert result.name == "uno"
exp = klass(["A", "A"], name="uno")
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(["A1", "B2", "C3"])
# one group, no matches
result = s.str.extract("(_)", expand=False)
exp = Series([np.nan, np.nan, np.nan], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract("(_)(_)", expand=False)
exp = DataFrame(
[[np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]], dtype=object
)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract("([AB])[123]", expand=False)
exp = Series(["A", "B", np.nan])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract("([AB])([123])", expand=False)
exp = DataFrame([["A", "1"], ["B", "2"], [np.nan, np.nan]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract("(?P<letter>[AB])", expand=False)
exp = Series(["A", "B", np.nan], name="letter")
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract("(?P<letter>[AB])(?P<number>[123])", expand=False)
exp = DataFrame(
[["A", "1"], ["B", "2"], [np.nan, np.nan]], columns=["letter", "number"]
)
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract("([AB])(?P<number>[123])", expand=False)
exp = DataFrame([["A", "1"], ["B", "2"], [np.nan, np.nan]], columns=[0, "number"])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract("([AB])(?:[123])", expand=False)
exp = Series(["A", "B", np.nan])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(["A11", "B22", "C33"]).str.extract(
"([AB])([123])(?:[123])", expand=False
)
exp = DataFrame([["A", "1"], ["B", "2"], [np.nan, np.nan]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(["A1", "B2", "3"]).str.extract(
"(?P<letter>[AB])?(?P<number>[123])", expand=False
)
exp = DataFrame(
[["A", "1"], ["B", "2"], [np.nan, "3"]], columns=["letter", "number"]
)
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(["A1", "B2", "C"]).str.extract(
"(?P<letter>[ABC])(?P<number>[123])?", expand=False
)
exp = DataFrame(
[["A", "1"], ["B", "2"], ["C", np.nan]], columns=["letter", "number"]
)
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ["A1", "B2", "C"]
index = index[: len(data)]
s = Series(data, index=index)
result = s.str.extract(r"(\d)", expand=False)
exp = Series(["1", "2", np.nan], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r"(?P<letter>\D)(?P<number>\d)?", expand=False
)
e_list = [["A", "1"], ["B", "2"], ["C", np.nan]]
exp = DataFrame(e_list, columns=["letter", "number"], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex,
tm.makeUnicodeIndex,
tm.makeIntIndex,
tm.makeDateIndex,
tm.makePeriodIndex,
tm.makeRangeIndex,
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(["a3", "b3", "c2"], name="bob")
r = s.str.extract(r"(?P<sue>[a-z])", expand=False)
e = Series(["a", "b", "c"], name="sue")
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True():
# Contains tests like those in test_match and some others.
values = Series(["fooBAD__barBAD", np.nan, "foo"])
er = [np.nan, np.nan] # empty row
result = values.str.extract(".*(BAD[_]+).*(BAD)", expand=True)
exp = DataFrame([["BAD__", "BAD"], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(
[
"aBAD_BAD",
np.nan,
"BAD_b_BAD",
True,
datetime.today(),
"foo",
None,
1,
2.0,
]
)
rs = Series(mixed).str.extract(".*(BAD[_]+).*(BAD)", expand=True)
exp = DataFrame([["BAD_", "BAD"], er, ["BAD_", "BAD"], er, er, er, er, er, er])
tm.assert_frame_equal(rs, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(["A1", "B2", "C3"])
msg = "pattern contains no capture groups"
with pytest.raises(ValueError, match=msg):
s_or_idx.str.extract("[ABC][123]", expand=True)
# only non-capturing groups
with pytest.raises(ValueError, match=msg):
s_or_idx.str.extract("(?:[AB]).*", expand=True)
# single group renames series/index properly
s_or_idx = klass(["A1", "A2"])
result_df = s_or_idx.str.extract(r"(?P<uno>A)\d", expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df["uno"]
tm.assert_series_equal(result_series, Series(["A", "A"], name="uno"))
def test_extract_series():
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(["A1", "B2", "C3"], name=series_name)
# one group, no matches
result = s.str.extract("(_)", expand=True)
exp = DataFrame([np.nan, np.nan, np.nan], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract("(_)(_)", expand=True)
exp = DataFrame(
[[np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]], dtype=object
)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract("([AB])[123]", expand=True)
exp = DataFrame(["A", "B", np.nan])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract("([AB])([123])", expand=True)
exp = DataFrame([["A", "1"], ["B", "2"], [np.nan, np.nan]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract("(?P<letter>[AB])", expand=True)
exp = DataFrame({"letter": ["A", "B", np.nan]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract("(?P<letter>[AB])(?P<number>[123])", expand=True)
e_list = [["A", "1"], ["B", "2"], [np.nan, np.nan]]
exp = DataFrame(e_list, columns=["letter", "number"])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract("([AB])(?P<number>[123])", expand=True)
exp = DataFrame(e_list, columns=[0, "number"])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract("([AB])(?:[123])", expand=True)
exp = DataFrame(["A", "B", np.nan])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups():
# two normal groups, one non-capturing group
result = Series(["A11", "B22", "C33"]).str.extract(
"([AB])([123])(?:[123])", expand=True
)
exp = DataFrame([["A", "1"], ["B", "2"], [np.nan, np.nan]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(["A1", "B2", "3"]).str.extract(
"(?P<letter>[AB])?(?P<number>[123])", expand=True
)
e_list = [["A", "1"], ["B", "2"], [np.nan, "3"]]
exp = DataFrame(e_list, columns=["letter", "number"])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(["A1", "B2", "C"]).str.extract(
"(?P<letter>[ABC])(?P<number>[123])?", expand=True
)
e_list = [["A", "1"], ["B", "2"], ["C", np.nan]]
exp = DataFrame(e_list, columns=["letter", "number"])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ["A1", "B2", "C"]
index = index[: len(data)]
result = Series(data, index=index).str.extract(r"(\d)", expand=True)
exp = DataFrame(["1", "2", np.nan], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r"(?P<letter>\D)(?P<number>\d)?", expand=True
)
e_list = [["A", "1"], ["B", "2"], ["C", np.nan]]
exp = DataFrame(e_list, columns=["letter", "number"], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex,
tm.makeUnicodeIndex,
tm.makeIntIndex,
tm.makeDateIndex,
tm.makePeriodIndex,
tm.makeRangeIndex,
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame():
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(["a3", "b3", "c2"], name="series_name")
r = s.str.extract(r"(?P<letter>[a-z])", expand=True)
e = DataFrame({"letter": ["a", "b", "c"]})
tm.assert_frame_equal(r, e)
def test_extractall():
subject_list = [
"<EMAIL>",
"<EMAIL>",
"<EMAIL>",
"<EMAIL> some text <EMAIL>",
"<EMAIL> some text <EMAIL> and <EMAIL>",
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"),
("steve", "gmail", "com"),
("a", "b", "com"),
("c", "d", "com"),
("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples(
[(0, 0), (1, 0), (2, 0), (3, 0), (3, 1), (4, 0), (4, 1), (4, 2)],
names=(None, "match"),
)
expected_df = DataFrame(expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples(
[
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
]
)
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples(
[
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
],
names=(None, None, "match"),
)
expected_df = DataFrame(expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ["", "A1", "32"]
named_pattern = "(?P<letter>[AB])?(?P<number>[123])"
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples(
[(1, 0), (2, 0), (2, 1)], names=(None, "match")
)
expected_df = DataFrame(
[("A", "1"), (np.nan, "3"), (np.nan, "2")],
expected_index,
columns=["letter", "number"],
)
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = "([AB])?(?P<number>[123])"
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame(
[("A", "1"), (np.nan, "3"), (np.nan, "2")],
expected_index,
columns=[0, "number"],
)
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group():
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(["a3", "b3", "d4c2"], name="series_name")
r = s.str.extractall(r"(?P<letter>[a-z])")
i = MultiIndex.from_tuples([(0, 0), (1, 0), (2, 0), (2, 1)], names=(None, "match"))
e = DataFrame({"letter": ["a", "b", "d", "c"]}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r"([a-z])")
e = DataFrame(["a", "b", "d", "c"], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier():
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(["ab3", "abc3", "d4cd2"], name="series_name")
r = s.str.extractall(r"([a-z]+)")
i = MultiIndex.from_tuples([(0, 0), (1, 0), (2, 0), (2, 1)], names=(None, "match"))
e = DataFrame(["ab", "abc", "d", "cd"], i)
tm.assert_frame_equal(r, e)
@pytest.mark.parametrize(
"data, names",
[
([], (None,)),
([], ("i1",)),
([], (None, "i2")),
([], ("i1", "i2")),
(["a3", "b3", "d4c2"], (None,)),
(["a3", "b3", "d4c2"], ("i1", "i2")),
(["a3", "b3", "d4c2"], (None, "i2")),
(["a3", "b3", "d4c2"], ("i1", "i2")),
],
)
def test_extractall_no_matches(data, names):
# GH19075 extractall with no matches should return a valid MultiIndex
n = len(data)
if len(names) == 1:
i = Index(range(n), name=names[0])
else:
a = (tuple([i] * (n - 1)) for i in range(n))
i = MultiIndex.from_tuples(a, names=names)
s = Series(data, name="series_name", index=i, dtype="object")
ei = MultiIndex.from_tuples([], names=(names + ("match",)))
# one un-named group.
r = s.str.extractall("(z)")
e = DataFrame(columns=[0], index=ei)
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall("(z)(z)")
e = DataFrame(columns=[0, 1], index=ei)
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall("(?P<first>z)")
e = DataFrame(columns=["first"], index=ei)
tm.assert_frame_equal(r, e)
# two named groups.
r = s.str.extractall("(?P<first>z)(?P<second>z)")
e = DataFrame(columns=["first", "second"], index=ei)
tm.assert_frame_equal(r, e)
# one named, one un-named.
r = s.str.extractall("(z)(?P<second>z)")
e = DataFrame(columns=[0, "second"], index=ei)
tm.assert_frame_equal(r, e)
def test_extractall_stringindex():
s = Series(["a1a2", "b1", "c1"], name="xxx")
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)], names=[None, "match"])
exp = DataFrame({"digit": ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
# index should return the same result as the default index without name
# thus index.name doesn't affect to the result
for idx in [
Index(["a1a2", "b1", "c1"]),
Index(["a1a2", "b1", "c1"], name="xxx"),
]:
res = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(res, exp)
s = Series(
["a1a2", "b1", "c1"],
name="s_name",
index=Index(["XX", "yy", "zz"], name="idx_name"),
)
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples(
[("XX", 0), ("XX", 1), ("yy", 0)], names=["idx_name", "match"]
)
exp = DataFrame({"digit": ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
def test_extractall_errors():
# Does not make sense to use extractall with a regex that has
# no capture groups. (it returns DataFrame with one column for
# each capture group)
s = Series(["a3", "b3", "d4c2"], name="series_name")
with pytest.raises(ValueError, match="no capture groups"):
s.str.extractall(r"[a-z]")
def test_extract_index_one_two_groups():
s = Series(["a3", "b3", "d4c2"], index=["A3", "B3", "D4"], name="series_name")
r = s.index.str.extract(r"([A-Z])", expand=True)
e = DataFrame(["A", "B", "D"])
tm.assert_frame_equal(r, e)
# Prior to v0.18.0, index.str.extract(regex with one group)
# returned Index. With more than one group, extract raised an
# error (GH9980). Now extract always returns DataFrame.
r = s.index.str.extract(r"(?P<letter>[A-Z])(?P<digit>[0-9])", expand=True)
e_list = [("A", "3"), ("B", "3"), ("D", "4")]
e = DataFrame(e_list, columns=["letter", "digit"])
tm.assert_frame_equal(r, e)
def test_extractall_same_as_extract():
s = Series(["a3", "b3", "c2"], name="series_name")
pattern_two_noname = r"([a-z])([0-9])"
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_multi_index = s.str.extractall(pattern_two_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_multi_index)
pattern_two_named = r"(?P<letter>[a-z])(?P<digit>[0-9])"
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_multi_index = s.str.extractall(pattern_two_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_multi_index)
pattern_one_named = r"(?P<group_name>[a-z])"
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_multi_index = s.str.extractall(pattern_one_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_multi_index)
pattern_one_noname = r"([a-z])"
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_multi_index = s.str.extractall(pattern_one_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_multi_index)
def test_extractall_same_as_extract_subject_index():
# same as above tests, but s has an MultiIndex.
i = MultiIndex.from_tuples(
[("A", "first"), ("B", "second"), ("C", "third")],
names=("capital", "ordinal"),
)
s = Series(["a3", "b3", "c2"], i, name="series_name")
pattern_two_noname = r"([a-z])([0-9])"
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_match_index = s.str.extractall(pattern_two_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_match_index)
pattern_two_named = r"(?P<letter>[a-z])(?P<digit>[0-9])"
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_match_index = s.str.extractall(pattern_two_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_match_index)
pattern_one_named = r"(?P<group_name>[a-z])"
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_match_index = s.str.extractall(pattern_one_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_match_index)
pattern_one_noname = r"([a-z])"
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_match_index = s.str.extractall(pattern_one_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_match_index)
| 2.5
| 2
|
double_pendulum_animated.py
|
ehoppmann/double-pendulum-matplotlib-subplots
| 0
|
12781780
|
<reponame>ehoppmann/double-pendulum-matplotlib-subplots<filename>double_pendulum_animated.py
#!/usr/bin/env python3
"""
===========================
The double pendulum problem
===========================
This animation illustrates the double pendulum problem.
"""
# Double pendulum formula translated from the C code at
# http://www.physics.usyd.edu.au/~wheat/dpend_html/solve_dpend.c
# Modified to show subplots comparing double pendulums with the same initial
# conditions, and then after that is closed, a second figure with the initial
# theta 1 modified by 1 degree
from numpy import sin, cos
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate as integrate
import matplotlib.animation as animation
class double_pendulum(object):
def __init__(self, fig, ax, th1=120.0):
self.fig = fig
self.ax = ax
self.G = 9.8 # acceleration due to gravity, in m/s^2
self.L1 = 1.0 # length of pendulum 1 in m
self.L2 = 1.0 # length of pendulum 2 in m
self.M1 = 1.0 # mass of pendulum 1 in kg
self.M2 = 1.0 # mass of pendulum 2 in kg
self.dt = 0.05
t = np.arange(0.0, 20, self.dt)
# th1 and th2 are the initial angles (degrees)
# w10 and w20 are the initial angular velocities (degrees per second)
th1 = th1
w1 = 0.0
th2 = -20.0
w2 = 0.0
# initial state
state = np.radians([th1, w1, th2, w2])
# integrate your ODE using scipy.integrate.
self.y = integrate.odeint(self.derivs, state, t)
self.x1 = self.L1*sin(self.y[:, 0])
self.y1 = -self.L1*cos(self.y[:, 0])
self.x2 = self.L2*sin(self.y[:, 2]) + self.x1
self.y2 = -self.L2*cos(self.y[:, 2]) + self.y1
self.line, = ax.plot([], [], 'o-', lw=2)
self.time_template = 'time = %.1fs'
self.time_text = ax.text(0.05, 0.9, '', transform=ax.transAxes)
self.line.set_data([], [])
self.time_text.set_text('')
self.ax.set_title('Initial angle 1 = {}'.format(th1))
def derivs(self, state, t):
dydx = np.zeros_like(state)
dydx[0] = state[1]
del_ = state[2] - state[0]
den1 = (self.M1 + self.M2)*self.L1 - self.M2*self.L1*cos(del_)*cos(del_)
dydx[1] = (self.M2*self.L1*state[1]*state[1]*sin(del_)*cos(del_) +
self.M2*self.G*sin(state[2])*cos(del_) +
self.M2*self.L2*state[3]*state[3]*sin(del_) -
(self.M1 + self.M2)*self.G*sin(state[0]))/den1
dydx[2] = state[3]
den2 = (self.L2/self.L1)*den1
dydx[3] = (-self.M2*self.L2*state[3]*state[3]*sin(del_)*cos(del_) +
(self.M1 + self.M2)*self.G*sin(state[0])*cos(del_) -
(self.M1 + self.M2)*self.L1*state[1]*state[1]*sin(del_) -
(self.M1 + self.M2)*self.G*sin(state[2]))/den2
return dydx
def animate(self, i):
thisx = [0, self.x1[i], self.x2[i]]
thisy = [0, self.y1[i], self.y2[i]]
self.line.set_data(thisx, thisy)
self.time_text.set_text(self.time_template % (i*self.dt))
return self.line, self.time_text
class ani_wrapper(object):
def __init__(self, iterable):
self.fns = iterable
self.called = 0
self.n = len(iterable)
def animate(self, *args, **kwargs):
fn = self.fns[self.called % self.n]
self.called += 1
return fn(*args, **kwargs)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(10,6))
ax1.grid()
ax1.set_xlim(-2,2)
ax1.set_ylim(-2,2)
ax2.grid()
ax2.set_xlim(-2,2)
ax2.set_ylim(-2,2)
dp1 = double_pendulum(f, ax1)
dp2 = double_pendulum(f, ax2)
aw = ani_wrapper((dp1.animate, dp2.animate))
ani = animation.FuncAnimation(f, aw.animate, np.arange(1, len(dp1.y)), interval=1000/30, blit=False)
# ani.save('double_pendulum.mp4', fps=15)
plt.show()
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(10,6))
ax1.grid()
ax1.set_xlim(-2,2)
ax1.set_ylim(-2,2)
ax2.grid()
ax2.set_xlim(-2,2)
ax2.set_ylim(-2,2)
dp1 = double_pendulum(f, ax1)
dp2 = double_pendulum(f, ax2, th1=119)
aw = ani_wrapper((dp1.animate, dp2.animate))
ani = animation.FuncAnimation(f, aw.animate, np.arange(1, len(dp1.y)), interval=1000/30, blit=False)
# ani.save('double_pendulum.mp4', fps=15)
plt.show()
| 3.78125
| 4
|
funhouse.py
|
thertzelle/funhouse-clock
| 0
|
12781781
|
import board
import neopixel
pixels = neopixel.NeoPixel(board.D6, 30, brightness=0.5, auto_write=False)
pixels.fill((255, 0, 0))
pixels.show()
| 2.53125
| 3
|
object_detection/evaluation/COCODataset.py
|
HeartFu/NeuralBabyTalk
| 1
|
12781782
|
import os
from PIL import Image
from pycocotools.coco import COCO
from torch.utils import data
class COCODataset(data.Dataset):
def __init__(self, images_path, ann_path, split='train', transform=None):
self.coco = COCO(ann_path)
self.image_path = images_path
self.ids = list(self.coco.imgs.keys())
self.transform = transform
self.split = split
def __getitem__(self, index):
img_id = self.ids[index]
ann_ids = self.coco.getAnnIds(imgIds=img_id)
# print(ann_ids)
target = self.coco.loadAnns(ann_ids)
# print(target)
file_name = self.coco.loadImgs(img_id)[0]['file_name']
img = Image.open(os.path.join(self.image_path, file_name)).convert('RGB')
if self.transform is not None:
img = self.transform(img)
return img, target, img_id
def __len__(self):
return len(self.ids)
| 2.765625
| 3
|
ClusterTools/cluster_szspec.py
|
deimqs/ClusterModel
| 2
|
12781783
|
"""
This script gather functions related to the SZ spectrum
"""
import numpy as np
import astropy.units as u
from astropy import constants as const
from astropy.cosmology import Planck15 as cosmo
#===================================================
#========== CMB intensity
#===================================================
def get_I0_CMB():
"""
Compute the CMB intensity
Parameters
----------
Outputs
--------
- I0 (quantity): the CMB intensity (homogeneous to MJy/sr)
"""
I0 = 2*(const.k_B*cosmo.Tcmb0)**3/(const.h*const.c)**2*u.sr**-1
return I0.to('MJy sr-1')
#===================================================
#========== Non relativistic tSZ spectrum
#===================================================
def tsz_spec(frequency):
"""
Compute the non relativistic SZ spectrum, f(nu)
as in delta I_nu = I0 f(nu) y
Parameters
----------
- frequency (quantity): frequency array homogeneous to GHz
Outputs
--------
- SZ spectrum: f(nu)
"""
x = const.h * frequency / (const.k_B * cosmo.Tcmb0)
f_nu = x**4 * np.exp(x) / (np.exp(x)-1)**2 * (x*(np.exp(x)+1)/(np.exp(x)-1) - 4)
return f_nu
#===================================================
#========== Relativistic tSZ spectrum
#===================================================
def tsz_spec_relativistic(frequency, kBT):
"""
Compute the relativistic SZ spectrum, f(nu, T)
as in delta I_nu = I0 f(nu, T) y
Parameters
----------
- frequency (quantity): frequency array homogeneous to GHz
- temperature (quantity): frequency array homogeneous to GHz
Outputs
--------
- SZ spectrum: f(nu, T)
"""
#========== Make sure that frequency and temperature are arrays
if type(frequency.to_value()) == float:
frequency = np.array([frequency.to_value()]) * frequency.unit
if type(kBT.to_value()) == float:
kBT = np.array([kBT.to_value()]) * kBT.unit
#========== Replicate to work with grids
f_grid = (np.tile(frequency, [len(kBT),1])).T
t_grid = (np.tile(kBT, [len(frequency),1]))
#========== Function variable
theta = t_grid.to_value('keV')/(const.m_e*const.c**2).to_value('keV')
x = (const.h*f_grid/(const.k_B*cosmo.Tcmb0)).to_value('')
#========== Region where x < 1.2
f1 = x**4 * np.exp(x)/(np.exp(x)-1)**2
xtil = x*(np.exp(x)+1)/(np.exp(x)-1)
s = 2*x/(np.exp(x/2)-np.exp(-x/2))
y0 = xtil-4.0
y1a = -10.+47./2.*xtil-42./5.*xtil**(2.)
y1b = 0.7*xtil**(3.)+s**(2.)*(-21./5.+7./5.*xtil)
y1 = y1a+y1b
y2a = -15/2.+1023./8.*xtil-868./5.*xtil**(2.)
y2b = 329./5.*xtil**(3.)-44./5.*xtil**(4.)
y2c = 11./30.*xtil**(5.)
y2d = -434./5.+658/5.*xtil-242./5.*xtil**(2.)+143./30.*xtil**(3.)
y2e = -44./5.+187./60.*xtil
y2 = y2a+y2b+y2c+s**(2.)*y2d+s**(4.)*y2e
y3a = 15./2.+2505./8.*xtil-7098./5.*xtil**(2.)
y3b = 1425.3*xtil**(3.)-18594./35.*xtil**(4.)
y3c = 12059./140.*xtil**(5.)-128./21.*xtil**(6.)+16./105.*xtil**(7.)
y3d1 = -709.8+14253/5.*xtil-102267./35.*xtil**(2.)
y3d2 = 156767./140.*xtil**(3.)-1216./7.*xtil**(4.)+64./7.*xtil**(5.)
y3d = s**(2.)*(y3d1+y3d2)
y3e1 = -18594./35.+205003./280.*xtil
y3e2 = -1920./7.*xtil**(2.)+1024./35.*xtil**(3.)
y3e = s**(4.)*(y3e1+y3e2)
y3f = s**(6.)*(-544./21.+922./105.*xtil)
y3 = y3a+y3b+y3c+y3d+y3e+y3f
y4a = -135./32.+30375./128.*xtil-6239.1*xtil**(2.)
y4b = 61472.7/4.*xtil**(3.)-12438.9*xtil**(4.)
y4c = 35570.3/8.*xtil**(5.)-16568./21.*xtil**(6.)
y4d = 7516./105.*xtil**(7.)-22./7.*xtil**(8.)+11./210.*xtil**(9.)
y4e1 = -62391./20.+614727./20.*xtil
y4e2 = -1368279./20.*xtil**(2.)+4624139./80.*xtil**(3.)
y4e3 = -157396./7.*xtil**(4.)+30064./7.*xtil**(5.)
y4e4 = -2717./7.*xtil**(6.)+2761./210.*xtil**(7.)
y4e = s**(2.)*(y4e1+y4e2+y4e3+y4e4)
y4f1 = -12438.9+6046951./160.*xtil
y4f2 = -248520./7.*xtil**(2.)+481024./35.*xtil**(3.)
y4f3 = -15972./7.*xtil**(4.)+18689./140.*xtil**(5.)
y4f = s**(4.)*(y4f1+y4f2+y4f3)
y4g1 = -70414./21.+465992./105.*xtil
y4g2 = -11792./7.*xtil**(2.)+19778./105.*xtil**(3.)
y4g = s**(6.)*(y4g1+y4g2)
y4h = s**(8.)*(-682./7.+7601./210.*xtil)
y4 = y4a+y4b+y4c+y4d+y4e+y4f+y4g+y4h
DI_over_tau_over_theta_lt12 = f1*(y0+theta*y1+theta**(2.)*y2+theta**(3.)*y3+theta**(4.)*y4)
#========== Region where x > 1.2 if T > 20.0 keV
Tlim = 20.0
x_0 = 3.830 * (1.0 + 1.1674*theta - 0.8533*theta**2.)
a_ij = np.array([
[[-1.81317E+1+x*0],[ 9.97038E+1+x*0],[-6.07438E+1+x*0],[ 1.05143E+3+x*0],[-2.86734E+3+x*0],[ 7.73353E+3+x*0],[-8.16644E+3+x*0],[-5.37712E+3+x*0],[ 1.52226E+4+x*0],[ 7.18726E+3+x*0],[-1.39548E+4+x*0],[-2.08464E+4+x*0],[ 1.79040E+4+x*0]],
[[ 1.68733E+2+x*0],[-6.07829E+2+x*0],[ 1.14933E+3+x*0],[-2.42382E+2+x*0],[-7.73030E+2+x*0],[ 5.33993E+3+x*0],[-4.03443E+3+x*0],[ 3.00692E+3+x*0],[ 9.58809E+3+x*0],[ 8.16574E+3+x*0],[-6.13322E+3+x*0],[-1.48117E+4+x*0],[ 3.43816E+4+x*0]],
[[-6.69883E+2+x*0],[ 1.59654E+3+x*0],[-3.33375E+3+x*0],[-2.13234E+3+x*0],[-1.80812E+2+x*0],[ 3.75605E+3+x*0],[-4.75180E+3+x*0],[-4.50495E+3+x*0],[ 5.38753E+3+x*0],[ 5.03355E+3+x*0],[-1.18396E+4+x*0],[-8.58473E+3+x*0],[ 3.96316E+4+x*0]],
[[ 1.56222E+3+x*0],[-1.78598E+3+x*0],[ 5.13747E+3+x*0],[ 4.10404E+3+x*0],[ 5.54775E+2+x*0],[-3.89994E+3+x*0],[-1.22455E+3+x*0],[ 1.03747E+3+x*0],[ 4.32237E+3+x*0],[ 1.03805E+3+x*0],[-1.47172E+4+x*0],[-1.23591E+4+x*0],[ 1.77290E+4+x*0]],
[[-2.34712E+3+x*0],[ 2.78197E+2+x*0],[-5.49648E+3+x*0],[-5.94988E+2+x*0],[-1.47060E+3+x*0],[-2.84032E+2+x*0],[-1.15352E+3+x*0],[-1.17893E+3+x*0],[ 7.01209E+3+x*0],[ 4.75631E+3+x*0],[-5.13807E+3+x*0],[-8.73615E+3+x*0],[ 9.41580E+3+x*0]],
[[ 1.92894E+3+x*0],[ 1.17970E+3+x*0],[ 3.13650E+3+x*0],[-2.91121E+2+x*0],[-1.15006E+3+x*0],[ 4.17375E+3+x*0],[-3.31788E+2+x*0],[ 1.37973E+3+x*0],[-2.48966E+3+x*0],[ 4.82005E+3+x*0],[-1.06121E+4+x*0],[-1.19394E+4+x*0],[ 1.34908E+4+x*0]],
[[ 6.40881E+2+x*0],[-6.81789E+2+x*0],[ 1.20037E+3+x*0],[-3.27298E+3+x*0],[ 1.02988E+2+x*0],[ 2.03514E+3+x*0],[-2.80502E+3+x*0],[ 8.83880E+2+x*0],[ 1.68409E+3+x*0],[ 4.26227E+3+x*0],[-6.37868E+3+x*0],[-1.11597E+4+x*0],[ 1.46861E+4+x*0]],
[[-4.02494E+3+x*0],[-1.37983E+3+x*0],[-1.65623E+3+x*0],[ 7.36120E+1+x*0],[ 2.66656E+3+x*0],[-2.30516E+3+x*0],[ 5.22182E+3+x*0],[-8.53317E+3+x*0],[ 3.75800E+2+x*0],[ 8.49249E+2+x*0],[-6.88736E+3+x*0],[-1.01475E+4+x*0],[ 4.75820E+3+x*0]],
[[ 4.59247E+3+x*0],[ 3.04203E+3+x*0],[-2.11039E+3+x*0],[ 1.32383E+3+x*0],[ 1.10646E+3+x*0],[-3.53827E+3+x*0],[-1.12073E+3+x*0],[-5.47633E+3+x*0],[ 9.85745E+3+x*0],[ 5.72138E+3+x*0],[ 6.86444E+3+x*0],[-5.72696E+3+x*0],[ 1.29053E+3+x*0]],
[[-1.61848E+3+x*0],[-1.83704E+3+x*0],[ 2.06738E+3+x*0],[ 4.00292E+3+x*0],[-3.72824E+1+x*0],[ 9.10086E+2+x*0],[ 3.72526E+3+x*0],[ 3.41895E+3+x*0],[ 1.31241E+3+x*0],[ 6.68089E+3+x*0],[-4.34269E+3+x*0],[-5.42296E+3+x*0],[ 2.83445E+3+x*0]],
[[-1.00239E+3+x*0],[-1.24281E+3+x*0],[ 2.46998E+3+x*0],[-4.25837E+3+x*0],[-1.83515E+2+x*0],[-6.47138E+2+x*0],[-7.35806E+3+x*0],[-1.50866E+3+x*0],[-2.47275E+3+x*0],[ 9.09399E+3+x*0],[-2.75851E+3+x*0],[-6.75104E+3+x*0],[ 7.00899E+2+x*0]],
[[ 1.04911E+3+x*0],[ 2.07475E+3+x*0],[-3.83953E+3+x*0],[ 7.79924E+2+x*0],[-4.08658E+3+x*0],[ 4.43432E+3+x*0],[ 3.23015E+2+x*0],[ 6.16180E+3+x*0],[-1.00851E+4+x*0],[ 7.65063E+3+x*0],[ 1.52880E+3+x*0],[-6.08330E+3+x*0],[ 1.23369E+3+x*0]],
[[-2.61041E+2+x*0],[-7.22803E+2+x*0],[ 1.34581E+3+x*0],[ 5.90851E+2+x*0],[ 3.32198E+2+x*0],[ 2.58340E+3+x*0],[-5.97604E+2+x*0],[-4.34018E+3+x*0],[-3.58925E+3+x*0],[ 2.59165E+3+x*0],[ 6.76140E+3+x*0],[-6.22138E+3+x*0],[ 4.40668E+3+x*0]]
])[:,:,0,:]
theta_ei = np.array([
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]],
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]],
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]],
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]],
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]],
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]],
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]],
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]],
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]],
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]],
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]],
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]],
[[x*0+(10*theta)**0.],[x*0+(10*theta)**1.],[x*0+(10*theta)**2.],[x*0+(10*theta)**3.],[x*0+(10*theta)**4.],[x*0+(10*theta)**5.],[x*0+(10*theta)**6.],[x*0+(10*theta)**7.],[x*0+(10*theta)**8.],[x*0+(10*theta)**9.],[x*0+(10*theta)**10.],[x*0+(10*theta)**11.],[x*0+(10*theta)**12.]]
])[:,:,0,:,:]
theta_ei = np.transpose(theta_ei, (1,0,2,3))
Zj = np.array([
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]],
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]],
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]],
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]],
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]],
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]],
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]],
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]],
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]],
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]],
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]],
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]],
[[(0.05*x)**0.],[(0.05*x)**1.],[(0.05*x)**2.],[(0.05*x)**3.],[(0.05*x)**4.],[(0.05*x)**5.],[(0.05*x)**6.],[(0.05*x)**7.],[(0.05*x)**8.],[(0.05*x)**9.],[(0.05*x)**10.],[(0.05*x)**11.],[(0.05*x)**12.]]
])[:,:,0,:,:]
G_theta_x = np.sum(np.sum(a_ij*theta_ei*Zj, 1), 0)
DI_over_tau_over_theta_gt12 = x**2.0 * np.exp(-x) * (x-x_0) * G_theta_x
#========== Pick the region
f_nu = DI_over_tau_over_theta_lt12
w_gt12 = (x > 1.2) * (t_grid > 20*u.keV)
f_nu[w_gt12] = DI_over_tau_over_theta_gt12[w_gt12]
return f_nu
| 2.8125
| 3
|
mowl/embeddings/translational/model.py
|
bio-ontology-research-group/OntoML
| 0
|
12781784
|
<gh_stars>0
from mowl.model import EmbeddingModel
from mowl.projection.factory import projector_factory
from mowl.projection.edge import Edge
#PyKEEN imports
from pykeen.triples import CoreTriplesFactory
from pykeen.models import TransE, TransH, TransR, TransD
from pykeen.training import SLCWATrainingLoop
from pykeen.evaluation import RankBasedEvaluator
import torch as th
from torch.optim import Adam
import logging
logging.basicConfig(level=logging.DEBUG)
class TranslationalOnt():
'''
:param edges: List of edges
:type edges: mowl.projection.edge.Edge
:param trans_method: Translational model. Choices are: "transE", "transH", "transR", "transD".
:type trans_method: str
:param embedding_dim: Dimension of embedding for each node
:type embedding_dim: int
:param epochs: Number of epochs
:type epochs: int
'''
def __init__(self,
edges,
trans_method="transE",
embedding_dim = 50,
epochs = 5,
batch_size = 32
):
self.edges = edges
self.trans_method = trans_method
self.embedding_dim = embedding_dim
self.epochs = epochs
self.batch_size = batch_size
self.model = None
self._trained = False
def train(self):
entities, relations = Edge.getEntitiesAndRelations(self.edges)
logging.debug("Number of ontology classes: %d, relations %d.", len(entities), len(relations))
self.entities_idx = {ent: idx for idx, ent in enumerate(entities)}
self.relations_idx = {rel: idx for idx, rel in enumerate(relations)}
mapped_triples = [(self.entities_idx[e.src()], self.relations_idx[e.rel()], self.entities_idx[e.dst()]) for e in self.edges]
mapped_triples = th.tensor(mapped_triples).long()
triples_factory = CoreTriplesFactory(mapped_triples, len(entities), len(relations), self.entities_idx, self.relations_idx)
self.model = self.trans_factory(self.trans_method, triples_factory, self.embedding_dim)
optimizer = Adam(params=self.model.get_grad_params())
training_loop = SLCWATrainingLoop(model=self.model, triples_factory=triples_factory, optimizer=optimizer)
_ = training_loop.train(triples_factory=triples_factory, num_epochs=self.epochs, batch_size=self.batch_size)
self._trained = True
def get_embeddings(self):
if not self._trained:
raise ValueError("Model has not been trained yet")
embeddings = self.model.entity_representations[0](indices = None).cpu().detach().numpy()
embeddings = {item[0]: embeddings[item[1]] for item in self.entities_idx.items()}
return embeddings
def trans_factory(self, method_name, triples_factory, embedding_dim):
methods = {
"transE": TransE,
"transH": TransH,
"transR": TransR,
"transD": TransD
}
if method_name in methods:
return methods[method_name](triples_factory=triples_factory, embedding_dim=embedding_dim)
else:
raise Exception(f"Method name unrecognized. Recognized methods are: {methods}")
| 2.03125
| 2
|
flask_saestorage.py
|
csuzhangxc/Flask-SaeStorage
| 11
|
12781785
|
# -*- coding: utf-8 -*-
from sae import storage
class SaeStorage(object):
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
access_key = app.config.get('SAE_ACCESS_KEY', storage.ACCESS_KEY)
secret_key = app.config.get('SAE_SECRET_KEY', storage.SECRET_KEY)
app_name = app.config.get('SAE_APP_NAME', storage.APP_NAME)
bucket_name = app.config.get('SAE_BUCKET_NAME', '')
connection = storage.Connection(access_key, secret_key, app_name)
self._bucket = connection.get_bucket(bucket_name)
def save(self, data, filename):
return self._bucket.put_object(filename, data)
def delete(self, filename):
return self._bucket.delete_object(filename)
def url(self, filename):
return self._bucket.generate_url(filename)
| 2.53125
| 3
|
cassle/methods/vicreg.py
|
DonkeyShot21/cassle
| 13
|
12781786
|
<filename>cassle/methods/vicreg.py
import argparse
from typing import Any, Dict, List, Sequence
import torch
import torch.nn as nn
from cassle.losses.vicreg import vicreg_loss_func
from cassle.methods.base import BaseModel
class VICReg(BaseModel):
def __init__(
self,
output_dim: int,
proj_hidden_dim: int,
sim_loss_weight: float,
var_loss_weight: float,
cov_loss_weight: float,
**kwargs
):
"""Implements VICReg (https://arxiv.org/abs/2105.04906)
Args:
output_dim (int): number of dimensions of the projected features.
proj_hidden_dim (int): number of neurons in the hidden layers of the projector.
sim_loss_weight (float): weight of the invariance term.
var_loss_weight (float): weight of the variance term.
cov_loss_weight (float): weight of the covariance term.
"""
super().__init__(**kwargs)
self.sim_loss_weight = sim_loss_weight
self.var_loss_weight = var_loss_weight
self.cov_loss_weight = cov_loss_weight
# projector
self.projector = nn.Sequential(
nn.Linear(self.features_dim, proj_hidden_dim),
nn.BatchNorm1d(proj_hidden_dim),
nn.ReLU(),
nn.Linear(proj_hidden_dim, proj_hidden_dim),
nn.BatchNorm1d(proj_hidden_dim),
nn.ReLU(),
nn.Linear(proj_hidden_dim, output_dim),
)
@staticmethod
def add_model_specific_args(parent_parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
parent_parser = super(VICReg, VICReg).add_model_specific_args(parent_parser)
parser = parent_parser.add_argument_group("vicreg")
# projector
parser.add_argument("--output_dim", type=int, default=2048)
parser.add_argument("--proj_hidden_dim", type=int, default=2048)
# parameters
parser.add_argument("--sim_loss_weight", default=25, type=float)
parser.add_argument("--var_loss_weight", default=25, type=float)
parser.add_argument("--cov_loss_weight", default=1.0, type=float)
return parent_parser
@property
def learnable_params(self) -> List[dict]:
"""Adds projector parameters to the parent's learnable parameters.
Returns:
List[dict]: list of learnable parameters.
"""
extra_learnable_params = [{"params": self.projector.parameters()}]
return super().learnable_params + extra_learnable_params
def forward(self, X: torch.Tensor, *args, **kwargs) -> Dict[str, Any]:
"""Performs the forward pass of the encoder and the projector.
Args:
X (torch.Tensor): a batch of images in the tensor format.
Returns:
Dict[str, Any]: a dict containing the outputs of the parent and the projected features.
"""
out = super().forward(X, *args, **kwargs)
z = self.projector(out["feats"])
return {**out, "z": z}
def training_step(self, batch: Sequence[Any], batch_idx: int) -> torch.Tensor:
"""Training step for VICReg reusing BaseModel training step.
Args:
batch (Sequence[Any]): a batch of data in the format of [img_indexes, [X], Y], where
[X] is a list of size self.num_crops containing batches of images.
batch_idx (int): index of the batch.
Returns:
torch.Tensor: total loss composed of VICReg loss and classification loss.
"""
out = super().training_step(batch, batch_idx)
feats1, feats2 = out["feats"]
z1 = self.projector(feats1)
z2 = self.projector(feats2)
# ------- barlow twins loss -------
vicreg_loss = vicreg_loss_func(
z1,
z2,
sim_loss_weight=self.sim_loss_weight,
var_loss_weight=self.var_loss_weight,
cov_loss_weight=self.cov_loss_weight,
)
self.log("train_vicreg_loss", vicreg_loss, on_epoch=True, sync_dist=True)
out.update({"loss": out["loss"] + vicreg_loss, "z": [z1, z2]})
return out
| 2.328125
| 2
|
bin/physiboss.py
|
sysbio-curie/pb4covid19
| 1
|
12781787
|
# PhysiBoSS Tab
import os
from ipywidgets import Layout, Label, Text, Checkbox, Button, HBox, VBox, Box, \
FloatText, BoundedIntText, BoundedFloatText, HTMLMath, Dropdown, interactive, Output
from collections import deque, Counter
import xml.etree.ElementTree as ET
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
import matplotlib.colors as mplc
import numpy as np
import glob
import platform
import numpy as np
import csv
import itertools
import copy
import scipy
# from debug import debug_view
class PhysiBoSSTab(object):
def __init__(self):
# tab_height = '520px'
# tab_layout = Layout(width='900px', # border='2px solid black',
# height=tab_height, overflow_y='scroll')
self.output_dir = '.'
self.figsize_width = 15.0 # allow extra for colormap
self.figsize_height = 8
constWidth = '180px'
# self.fig = plt.figure(figsize=(6, 6))
# self.fig = plt.figure(figsize=(7, 7))
config_file = "data/PhysiCell_settings.xml"
self.cell_lines = {}
self.cell_lines_by_name = {}
self.cell_lines_array = ["All"]
if os.path.isfile(config_file):
try:
tree = ET.parse(config_file)
except:
print("Cannot parse",config_file, "- check it's XML syntax.")
return
root = tree.getroot()
uep = root.find('.//cell_definitions') # find unique entry point (uep)
for child in uep.findall('cell_definition'):
self.cell_lines[int(child.attrib["ID"])] = child.attrib["name"]
self.cell_lines_by_name[child.attrib["name"]] = int(child.attrib["ID"])
self.cell_lines_array.append(child.attrib["name"])
# print(child.attrib['name'])
else:
print("config.xml does not exist")
max_frames = 0
self.svg_plot = interactive(self.create_area_chart, frame=(0, max_frames), percentage=(0.0, 10.0), total=False, cell_line=self.cell_lines_array, continuous_update=False)
plot_size = '500px' # small: controls the size of the tab height, not the plot (rf. figsize for that)
plot_size = '700px' # medium
plot_size = '750px' # medium
self.svg_plot.layout.width = '1000px'
self.svg_plot.layout.height = '700px'
self.use_defaults = True
self.axes_min = 0.0
self.axes_max = 2000 # hmm, this can change (TODO?)
self.max_frames = BoundedIntText(
min=0, max=99999, value=max_frames,
description='Max',
layout=Layout(width='160px'),
# layout=Layout(flex='1 1 auto', width='auto'), #Layout(width='160px'),
)
self.max_frames.observe(self.update_max_frames)
items_auto = [Label('select slider: drag or left/right arrows'),
self.max_frames,
]
box_layout = Layout(display='flex',
flex_flow='row',
align_items='stretch',
width='900px')
row1 = Box(children=items_auto, layout=box_layout)
self.tab = VBox([row1, self.svg_plot])
self.count_dict = {}
self.file_dict = {}
self.cells_indexes = np.zeros((0))
self.up_to_frame = 0
def update(self, rdir=''):
# with debug_view:
# print("SVG: update rdir=", rdir)
if rdir:
self.output_dir = rdir
all_files = sorted(glob.glob(os.path.join(self.output_dir, 'snapshot*.svg')))
if len(all_files) > 0:
last_file = all_files[-1]
self.max_frames.value = int(last_file[-12:-4]) # assumes naming scheme: "snapshot%08d.svg"
# self.create_dict(self.max_frames.value, self.output_dir)
# self.state_counter(self.max_frames.value)
# with debug_view:
# print("SVG: added %s files" % len(all_files))
def update_max_frames(self,_b):
self.svg_plot.children[0].max = self.max_frames.value
def create_dict(self, number_of_files, folder):
"create a dictionary with the states file in the folder 'output', half of the dict is used to calculate the percentage of the node, the other half is for the states"
if number_of_files > 0:
for i in range (0, number_of_files):
if "state_step{0}".format(i) not in self.file_dict.keys():
states_dict = {}
with open(os.path.join(self.output_dir, 'states_%08u.csv' % i), newline='') as csvfile:
states_reader = csv.reader(csvfile, delimiter=',')
for row in states_reader:
if row[0] != 'ID':
states_dict[int(row[0])] = row[1]
self.file_dict["state_step{0}".format(i)] = states_dict
def state_counter(self, number_of_files, percentage, cell_indexes, cell_line):
"create a dict with the states of the network, it can be used to print states pie chart"
self.count_dict = {}
temp_dict = {}
max_cell = 0
if number_of_files > 0:
for i in range (0, number_of_files):
state_list = []
for key in self.file_dict["state_step{0}".format(i)]:
if cell_line == 'All' or self.cells_indexes[key] == self.cell_lines_by_name[cell_line]:
state_list.append(self.file_dict["state_step{0}".format(i)][key])
state_counts = Counter(state_list)
max_cell = max_cell + sum(state_counts.values())
temp_dict["state_count{0}".format(i)] = state_counts
self.count_dict = self.filter_states(max_cell, temp_dict, percentage)
def create_cell_indexes(self, frame, cell_line):
for i in range(self.up_to_frame, frame):
fname = "output%08d_cells_physicell.mat" % i
full_fname = os.path.join(self.output_dir, fname)
if not os.path.isfile(full_fname):
print("Once output files are generated, click the slider.") # No: output00000000_microenvironment0.mat
return
info_dict = {}
scipy.io.loadmat(full_fname, info_dict)
M = info_dict['cells'][[0,5], :].astype(int)
self.cells_indexes.resize((max(self.cells_indexes.shape[0], M[0, :].max(axis=0)+1)))
self.cells_indexes[M[0, :]] = M[1, :]
self.up_to_frame = frame
return self.cells_indexes
def create_area_chart(self, frame=None, total=False, percentage=(0.0, 100.0), cell_line="All"):
"plot an area chart with the evolution of the network states during the simulation"
cells_indexes = None
if cell_line != "All":
cells_indexes = self.create_cell_indexes(frame, cell_line)
if np.sum(cells_indexes == self.cell_lines_by_name[cell_line]) == 0:
print("There are no %s cells." % cell_line)
return
self.create_dict(frame, self.output_dir)
self.state_counter(frame, percentage, cells_indexes, cell_line)
state_list = []
all_state = []
a = []
for k in self.count_dict:
state_list.append([key for key, value in self.count_dict[k].items() if value > 0])
for l in state_list:
for state in l:
all_state.append(state)
all_state = list(dict.fromkeys(all_state))
for state_count in self.count_dict:
b = []
for states in all_state:
try:
b.append(self.count_dict[state_count][states])
except:
b.append(0)
a.append(b)
a = np.array(a)
#print(a)
a = np.transpose(a)
if not total:
percent = a / a.sum(axis=0).astype(float) * 100
else:
percent = a
x = np.arange(len(self.count_dict))
self.fig = plt.figure(figsize=(self.figsize_width, self.figsize_height))
ax = self.fig.add_subplot(111)
ax.stackplot(x, percent, labels=all_state)
ax.legend(labels=all_state, loc='upper center', bbox_to_anchor=(0.5, -0.05),shadow=True, ncol=2)
# ax.legend(labels=all_state, bbox_to_anchor=(1.05, 1), loc='lower center', borderaxespad=0.)
if not total:
ax.set_ylabel('Percent (%)')
else:
ax.set_ylabel("Total")
ax.margins(0, 0) # Set margins to avoid "whitespace"
# plt.show()
def filter_states(self, max_cell, all_counts, percentage):
"""max_cell = 0
all_counts = {}
for i in range (0, number_of_files):
state_list = []
for key in file_dict["state_step{0}".format(i)]:
state_list.append(file_dict["state_step{0}".format(i)][key])
state_counts = Counter(state_list)
max_cell = max_cell + sum(state_counts.values())
all_counts[i] = state_counts"""
copy_all_counts = copy.deepcopy(all_counts)
state_list = []
all_state = []
for k in all_counts:
state_list.append(list(all_counts[k].keys()))
for l in state_list:
for state in l:
all_state.append(state)
all_state = list(dict.fromkeys(all_state))
banned_list = []
for state in all_state:
a = 0
for i in all_counts.keys():
try:
a = a + all_counts[i][state]
except:
a = a + 0
if (a < (percentage/100) * max_cell):
banned_list.append(state)
for i in all_counts.keys():
del all_counts[i][state]
for i in all_counts.keys():
b = 0
for state in banned_list:
try:
b = b + copy_all_counts[i][state]
except:
b = b + 0
all_counts[i]["others"] = b
return all_counts
| 2.09375
| 2
|
src/compas_assembly/geometry/_geometry.py
|
GeneKao/compas_assembly
| 0
|
12781788
|
<reponame>GeneKao/compas_assembly
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
__all__ = ['Geometry']
class Geometry(object):
def __init__(self):
pass
def blocks(self):
raise NotImplementedError
def interfaces(self):
raise NotImplementedError
def to_blocks_and_interfaces(self):
"""Convert the geometry to a list of block meshes,
and a list of block index pairs representing connections or interfaces.
Returns
-------
tuple
0. List of meshes representing the block geometries.
1.
"""
raise NotImplementedError
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
pass
| 2.375
| 2
|
src/harvest/posts.py
|
fhgr/harvest
| 5
|
12781789
|
<filename>src/harvest/posts.py
#!/usr/bin/env python3
# Forum Extraction AI and heuristic
# ---------------------------------
# (C)opyrights 2020 <NAME>
# simplifications:
# ================
# - only consider tags with a class attribute
# - vsm based on the hashing trick
# algorithm
# =========
# - match text to xpath nodes
# - extract the text based on the xpath nodes and determine the best match
# based on the node + its children
# - from the best match that yields multiple results (i.e. forum posts)
# select node parent elements as long as we still get the same number of
# results.
# - constraints
# - blocked tags are not allowed to appear down- or upstream of the selected
# path (e.g. it is not possible that a forum post contains a 'form' or
# 'input' element :)
# - there are forums that are contained in a form tag ....
# cleanup posts
# -------------
# * remove repeated elements
# * appear at the beginning or end of a post
# * may contain information on
# - user
# - date (subscription versus post date) => always compare dates within a page for computing the date extraction rule
# - replies, likes, etc.
import logging
import re
from lxml import etree
from harvest.cleanup.forum_post import remove_boilerplate
from harvest.metadata.date import get_date
from harvest.metadata.link import get_link
from harvest.metadata.username import get_user
from harvest.metadata.usertext import get_text_xpath_pattern
from harvest.post_text import get_cleaned_text
from harvest.similarity_calculator import assess_node
from harvest.utils import (get_xpath_expression, get_html_dom, get_xpath_combinations_for_classes,
get_xpath_tree_text, get_grandparent, elements_have_no_overlap)
CORPUS = "./data/forum/"
# number of characters required for a match
MATCH_PREFIX_SIZE = 30
BLACKLIST_POST_TEXT_TAG = ('h1', 'h2', 'h3', 'h4', 'h5', 'a')
# minimum number of posts we suspect on the page
MIN_POST_COUNT = 3
def _get_matching_element(comment, dom):
"""
returns
-------
the element that matches the given comment
"""
if not comment.strip():
return None
for e in dom.iter():
text = (e.text or "").strip()
min_length_of_text = len(comment[:MATCH_PREFIX_SIZE])
if text and comment.startswith(text[:MATCH_PREFIX_SIZE]) and len(text) >= min_length_of_text and \
e.tag is not etree.Comment:
return e
return None
def _get_xpath_tree(comment, dom, tree):
element = _get_matching_element(comment, dom)
return (None, None) if element is None else (element, tree.getpath(element))
def _remove_trailing_p_element(xpath_score, xpath_element_count, xpath, reference_text, dom):
"""
The p elements at the end can be removed. Some posts have several p elements and some have none at all.
Those without p element can then not be detected. As Example, leading post can not be detected:
https://us.forums.blizzard.com/en/wow/t/layers-and-character-creation-adjustments-on-select-realms/499760
Args:
xpath: the xpath to remove the p element from
Returns:
"""
cleaned_xpath = re.sub(r'(?<!([\/]))\/p$', '', xpath)
if cleaned_xpath != xpath:
xpath_score, xpath_element_count = assess_node(reference_content=reference_text, dom=dom,
xpath=cleaned_xpath)
return xpath_score, xpath_element_count, cleaned_xpath
def _get_xpaths_candidates(text_sections, dom, tree, reference_text):
candidate_xpaths = []
for section_text in text_sections:
element, xpath = _get_xpath_tree(section_text, dom, tree)
logging.debug(f"Processing section of text '{section_text}' with xpath '{xpath}'.")
if not xpath:
continue
element = _get_matching_element(section_text, dom)
if element.tag not in BLACKLIST_POST_TEXT_TAG:
xpath_pattern = get_xpath_expression(element, parent_element=get_grandparent(element),
single_class_filter=True)
xpath_score, xpath_element_count = assess_node(reference_content=reference_text, dom=dom,
xpath=xpath_pattern, reward_classes=True)
if xpath_element_count > 1:
candidate_xpaths.append((xpath_score, xpath_element_count, xpath_pattern))
return candidate_xpaths
def _get_post_frame(xpath_pattern, xpath_score, reference_text, dom):
while True:
new_xpath_pattern = xpath_pattern + "/.."
new_xpath_score, new_xpath_element_count = assess_node(reference_content=reference_text, dom=dom,
xpath=new_xpath_pattern)
if new_xpath_element_count < MIN_POST_COUNT:
return xpath_pattern, xpath_score
xpath_pattern = new_xpath_pattern
xpath_score = new_xpath_score
def _get_combination_of_posts(xpath_pattern, xpath_score, xpath_element_count, reference_text, dom):
"""
Check if combinations of classes result in detecting leading post
Args:
xpath_pattern:
xpath_score:
xpath_element_count:
reference_text:
dom:
Returns:
Combination of classes if they resulting in a better score. Otherwise the parameters xpath_patter, xpath_score and
xpath_element_count are returned.
"""
candidate_xpaths = []
for final_xpath in get_xpath_combinations_for_classes(xpath_pattern):
new_xpath_score, new_xpath_element_count = assess_node(reference_content=reference_text, dom=dom,
xpath=final_xpath)
if (xpath_element_count < new_xpath_element_count <= xpath_element_count + 2 or
xpath_element_count * 2 - new_xpath_element_count in range(-1, 2)) and new_xpath_score > xpath_score:
if elements_have_no_overlap(dom.xpath(final_xpath)):
candidate_xpaths.append((new_xpath_score, new_xpath_element_count, final_xpath))
if candidate_xpaths:
candidate_xpaths.sort()
return candidate_xpaths.pop()
return xpath_score, xpath_element_count, xpath_pattern
def extract_posts(html, url):
dom = get_html_dom(html)
tree = etree.ElementTree(dom)
result = {'url': url, 'dragnet': None, 'url_xpath_pattern': None, 'xpath_pattern': None,
'xpath_score': None, 'forum_posts': None, 'date_xpath_pattern': None, 'user_xpath_pattern': None,
'text_xpath_pattern': None}
text_sections = get_cleaned_text(html)
logging.debug(f"Extracted {len(text_sections)} lines of comments.")
reference_text = " ".join(text_sections)
candidate_xpaths = _get_xpaths_candidates(text_sections, dom, tree, reference_text)
if not candidate_xpaths:
logging.warning("Couldn't identify any candidate posts for forum", url)
return result
# obtain anchor node
candidate_xpaths.sort()
xpath_score, xpath_element_count, xpath_pattern = candidate_xpaths.pop()
xpath_score, xpath_element_count, xpath_pattern = _remove_trailing_p_element(xpath_score, xpath_element_count,
xpath_pattern, reference_text, dom)
xpath_pattern, xpath_score = _get_post_frame(xpath_pattern, xpath_score, reference_text, dom)
xpath_score, xpath_element_count, xpath_pattern = _get_combination_of_posts(xpath_pattern, xpath_score,
xpath_element_count, reference_text,
dom)
logging.info(
f"Obtained most likely forum xpath for forum {url}: {xpath_pattern} with a score of {xpath_score}.")
if xpath_pattern:
forum_posts = get_xpath_tree_text(dom, xpath_pattern)
forum_posts = remove_boilerplate(forum_posts)
result['xpath_pattern'] = xpath_pattern
result['xpath_score'] = xpath_score
result['forum_posts'] = forum_posts
if xpath_pattern:
result['text_xpath_pattern'] = get_text_xpath_pattern(dom, xpath_pattern, forum_posts)
# add the post URL
url_xpath_pattern = get_link(dom, xpath_pattern, url, forum_posts)
if url_xpath_pattern:
result['url_xpath_pattern'] = url_xpath_pattern
# add the post Date
date_xpath_pattern = get_date(dom, xpath_pattern, url, forum_posts)
if date_xpath_pattern:
result['date_xpath_pattern'] = date_xpath_pattern
# add the post user
user_xpath_pattern = get_user(dom, xpath_pattern, url, forum_posts)
if user_xpath_pattern:
result['user_xpath_pattern'] = user_xpath_pattern
return result
| 2.046875
| 2
|
python/param_utils.py
|
MeshFEM/MeshFEM
| 19
|
12781790
|
import enum
import mesh
from tri_mesh_viewer import TriMeshViewer
import parametrization
from matplotlib import pyplot as plt
def analysisPlots(m, uvs, figsize=(8,4), bins=200):
plt.figure(figsize=figsize)
plt.subplot(1, 2, 1)
for label, uv in uvs.items():
distortion = parametrization.conformalDistortion(m, uv)
plt.hist(distortion, bins=bins, alpha=0.5, label=label)
plt.title('Quasi-conformal Distortion Error Q - 1')
plt.legend()
plt.subplot(1, 2, 2)
for label, uv in uvs.items():
scaleFactor = parametrization.scaleFactor(m, uv)
plt.hist(scaleFactor, bins=bins, alpha=0.5, label=label)
plt.title('Scale Factors')
plt.legend()
plt.tight_layout()
def analysisPlotsGrid(m, uvs, figsize=(8,6), bins=200):
plt.figure(figsize=figsize)
nrows = len(uvs)
for i, (label, uv) in enumerate(uvs.items()):
plt.subplot(nrows, 2, 1 + 2 * i)
distortion = parametrization.conformalDistortion(m, uv)
plt.hist(distortion, bins=bins, alpha=1.0)
plt.title(f'{label} Quasi-conformal Distortion Q - 1')
plt.subplot(nrows, 2, 2 + 2 * i)
scaleFactor = parametrization.scaleFactor(m, uv)
plt.hist(scaleFactor, bins=bins, alpha=1.0)
plt.title(f'{label} Scale Factors')
plt.tight_layout()
class AnalysisField(enum.Enum):
NONE = 1
SCALE = 2
DISTORTION = 3
class ParametrizationViewer:
def __init__(self, m, uv):
self.m = m
self.view_3d = TriMeshViewer(m, wireframe=True)
self.view_2d = None
self.field = AnalysisField.DISTORTION
self.update_parametrization(uv)
def displayField(self, field, updateModelMatrix=False):
self.field = field
sf = None
if (self.field == AnalysisField.DISTORTION): sf = self.distortion
if (self.field == AnalysisField.SCALE ): sf = self.scaleFactor
self.view_2d.update(preserveExisting=False, updateModelMatrix=updateModelMatrix, mesh=self.mflat, scalarField=sf)
def update_parametrization(self, uv, updateModelMatrix=False):
self.mflat = mesh.Mesh(uv, self.m.elements())
if (self.view_2d is None): self.view_2d = TriMeshViewer(self.mflat, wireframe=True)
self.distortion = parametrization.conformalDistortion(self.m, uv)
self.scaleFactor = parametrization.scaleFactor(self.m, uv)
self.displayField(self.field, updateModelMatrix=updateModelMatrix)
def show(self):
from ipywidgets import HBox
return HBox([self.view_3d.show(), self.view_2d.show()])
| 2.109375
| 2
|
week_0_to_2/tree_analysis/hw0pr2/hw0pr2Answers.py
|
ScriptingBeyondCS/CS-35
| 0
|
12781791
|
import os
import os.path
import shutil
# 1
def countFilesOfType(top, extension):
"""inputs: top: a String directory
extension: a String file extension
returns a count of files with a given extension in the directory
top and its subdirectories"""
count = 0
filenames = [x[2] for x in os.walk(top)]
for fileList in filenames:
for file in fileList:
if file.endswith(extension):
count += 1
return count
# 2 & 3
def findMaxDepth(top):
"""inputs: top: a String directory
returns maximum directory depth within top, prints path to max depth
"""
return findMaxDepthHelper(top, [])
def findMaxDepthHelper(top, pathList):
maxDepth = 0
maxPath = getMaxSlashes(pathList)
depth = 0
allEntries = os.scandir(top) # all files and directories in top
for entry in allEntries:
if entry.is_dir():
nextDepth, maxPath = findMaxDepthHelper(entry.path, pathList)
pathList += [entry.path]
depth = 1 + nextDepth
if depth > maxDepth:
maxDepth = depth
return maxDepth, maxPath
# 4
def countHaveTenDigits(top):
"""inputs: top: a String directory
returns the number of files within top and its subdirectories that
have a 10 digit phone number
"""
count = 0
allEntries = os.scandir(top) # all files and directories in top
for entry in allEntries:
if entry.is_file():
try:
f = open(entry.path, 'r') # open and read file (used .path to prevent changing directories)
data = f.read()
f.close()
except PermissionError:
data = ""
except UnicodeDecodeError:
data = ""
except TypeError:
data = ""
if countDigits(data) == 10:
count += 1
elif entry.is_dir(): # repeat if entry is a directory
count += countHaveTenDigits(entry.path)
else:
pass
return count
# 5
def count909AreaCode(top):
"""inputs: top: a String directory
returns number of files within top directory and its subdirectories
that have a 909 area code"""
count = 0
allEntries = os.scandir(top) # all files and directories in top
for entry in allEntries:
if entry.is_file():
try:
f = open(entry.path, 'r') # open and read file (used .path to prevent changing directories)
data = f.read()
f.close()
except PermissionError:
data = ""
except UnicodeDecodeError:
data = ""
except TypeError:
data = ""
newData = makeDigitString(data)
if newData[0:3] == '909':
count += 1
elif entry.is_dir(): # repeat if entry is a directory
count += count909AreaCode(entry.path)
else:
pass
return count
# 6
def countLastName(top, name):
"""inputs: top: a String directory
name: a last name
returns a count of files within top and subdirectories
that have a given last name"""
count = 0
allEntries = os.scandir(top) # all files and directories in top
for entry in allEntries:
if entry.is_file():
try:
f = open(entry.path, 'r') # open and read file (used .path to prevent changing directories)
data = f.read()
f.close()
except PermissionError:
data = ""
except UnicodeDecodeError:
data = ""
except TypeError:
data = ""
newData = getName(data)
if ',' in newData:
if newData.startswith(name):
count += 1
else:
if newData.endswith(name):
count += 1
elif entry.is_dir(): # repeat if entry is a directory
count += countLastName(entry.path, name)
else:
pass
return count
# 7
def countFirstName(top, name):
"""inputs: top: a String directory
name: a first name
returns a count of files within top and its subdirectories
that have a given first name"""
count = 0
allEntries = os.scandir(top) # all files and directories in top
for entry in allEntries:
if entry.is_file():
try:
f = open(entry.path, 'r') # open and read file (used .path to prevent changing directories)
data = f.read()
f.close()
except PermissionError:
data = ""
except UnicodeDecodeError:
data = ""
except TypeError:
data = ""
newData = getName(data)
if ',' in newData:
if newData.endswith(name):
count += 1
else:
if newData.startswith(name):
count += 1
elif entry.is_dir(): # repeat if entry is a directory
count += countFirstName(entry.path, name)
else:
pass
return count
# 8
def countInitials(top, firstInit, lastInit):
"""inputs: top: a String directory
firstInit: the name's first initial
lastInit: the name's last initial
returns a count of files within top and its subdirectories
that have a name with the given initials"""
count = 0
allEntries = os.scandir(top) # all files and directories in top
for entry in allEntries:
if entry.is_file():
try:
f = open(entry.path, 'r') # open and read file (used .path to prevent changing directories)
data = f.read()
f.close()
except PermissionError:
data = ""
except UnicodeDecodeError:
data = ""
except TypeError:
data = ""
newData = getName(data)
if ',' in newData:
if getOneAfterSpace(newData) == firstInit and newData[0] == lastInit:
count += 1
else:
if getOneAfterSpace(newData) == lastInit and newData[0] == firstInit:
count += 1
elif entry.is_dir(): # repeat if entry is a directory
count += countInitials(entry.path, firstInit, lastInit)
else:
pass
return count
# 9
def diffFirstName(top):
"""inputs: top: a String directory
returns a number of unique first names in
files of top and its subdirectories"""
return diffFirstNameHelper(top, [])
def diffFirstNameHelper(top, nameList):
allEntries = os.scandir(top) # all files and directories in top
for entry in allEntries:
if entry.is_file():
try:
f = open(entry.path, 'r') # open and read file (used .path to prevent changing directories)
data = f.read()
f.close()
except PermissionError:
data = ""
except UnicodeDecodeError:
data = ""
except TypeError:
data = ""
firstName = getFirstName(data)
if firstName not in nameList and firstName != None:
nameList += [firstName]
elif entry.is_dir(): # repeat if entry is a directory
diffFirstNameHelper(entry.path, nameList)
else:
pass
return len(nameList)
# HELPER FUNCTIONS
def getMaxSlashes(L):
maxCount = 0
index = 0
if L == []:
return ''
else:
for i in range(len(L)):
count = 0
for char in L[i]:
if char == '/':
count += 1
if count > maxCount:
maxCount = count
index = i
return L[index]
def countDigits(string):
"""return number of digits in a string (Helper for countHaveTenDigits)"""
count = 0
for char in string:
if char == '0' or char == '1' or char == '2' or char == '3' or char == '4' or \
char == '5' or char == '6' or char == '7' or char == '8' or char == '9':
count += 1
return count
def makeDigitString(string):
"""Gathers all digits and returns them in a continuous string
(Helper for count909AreaCode)"""
newString = ''
for char in string:
if char in '0123456789':
newString += char
return newString
def getOneAfterSpace(string):
"""returns next character after a space in given string
(Helper for countInitials)"""
result = ''
reachedSpace = False
for i in range(len(string)):
if string[i] == ' ':
return string[i+1]
return ''
def getAllAfterSpace(string):
"""returns all characters after a space in given string
(Helper for getFirstName)"""
result = ''
for i in range(len(string)):
if string[i] == ' ':
return string[i+1:]
return ''
def getAllBeforeSpace(string):
"""returns all characters before a space in given string
(Helper for getFirstName)"""
result = ''
for i in range(len(string)):
if string[i] == ' ':
return string[:i]
def getName(string):
"""Grab the name as written in files (Helper)"""
newString = ''
reachedLetter = False
for char in string:
if char in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
reachedLetter = True
if reachedLetter == True and char == '\n':
break
if reachedLetter == True:
newString += char
return newString
def getFirstName(string):
"""return the first name (Helper for diffFirstName)"""
name = getName(string)
if ',' in name:
return getAllAfterSpace(name)
return getAllBeforeSpace(name)
# MAIN
def main():
print(countFilesOfType('phone_files', '.txt')) # 9893
print(findMaxDepth('phone_files')) # 4, 'phone_files/86/Hidden/Deeper/Deepest'
print(countHaveTenDigits('phone_files')) # 3988
print(count909AreaCode('phone_files')) # 17
print(countLastName('phone_files', 'DAVIS')) # 224
print(countFirstName('phone_files', 'DAVIS'))# 3
print(countInitials('phone_files', 'J', 'S')) # 105
print(diffFirstName('phone_files'))# 224
# got some inconsistent answers first Name, last Name not working
if __name__ == "__main__":
main()
| 3.90625
| 4
|
PCRC-MCDR.py
|
MiKayule/PCRC-MCDR
| 0
|
12781792
|
# -*- coding: utf-8 -*-
import sys
import importlib
import time
sys.path.append('plugins/')
PCRC = None
PREFIX = '!!PCRC'
# 0=guest 1=user 2=helper 3=admin
Permission = 1
def permission(server, info, perm):
if info.is_user:
if info.source == 1:
return True
elif server.get_permission_level(info) >= perm:
return True
return False
def load_PCRC():
global PCRC
PCRC = importlib.import_module('PCRC-MCDR.PCRC')
def on_info(server, info):
if permission(server, info, Permission) and info.content == '!!PCRC start':
server.reply(info, 'Starting PCRC')
if PCRC.is_working():
server.reply(info, 'PCRC is already running!')
else:
PCRC.start()
if info.source == 1 and info.content == '!!PCRC stop':
if PCRC.is_working():
PCRC.stop()
else:
server.reply(info, 'PCRC is not running!')
def on_load(server, old):
global PCRC
try:
if old is not None and old.PCRC is not None and old.PCRC.is_working():
PCRC = old.PCRC
else:
load_PCRC()
except:
load_PCRC()
def on_mcdr_stop(server):
global PCRC
if PCRC is None:
return
if PCRC.is_working():
PCRC.stop()
else:
for i in range(600):
if not PCRC.is_stopped():
server.logger.info('Waiting for PCRC to stop')
for i in range(10):
if not PCRC.is_stopped():
time.sleep(0.1)
if not PCRC.is_stopped():
server.logger.info('PCRC took too long to stop (more than 10min)! Exit anyway')
| 2.34375
| 2
|
pratik.py
|
Tesmi-ui/Tesmi-ux
| 0
|
12781793
|
<reponame>Tesmi-ui/Tesmi-ux<filename>pratik.py
import random
import math
def main():
user = int(input("inter a no.:" ))
print("your entered number is " user)
| 3.109375
| 3
|
untws/position.py
|
maanbsat/untws
| 11
|
12781794
|
<gh_stars>10-100
#!/usr/bin/env python
# encoding: utf-8
# position.py
#
# Created by <NAME> on 2013-09-02.
# Copyright (c) 2013 <NAME>. All rights reserved.
from untws.instrument import create_instrument
class Position(object):
"""Represents a single position"""
def __init__(self, connection, account_name, instrument, quantity, price,
average_cost, market_value, realized_pnl, unrealized_pnl):
self._connection = connection
self._account_name = account_name
self._instrument = create_instrument(connection, instrument)
self._quantity = quantity
self._price = price
self._average_cost = average_cost
self._market_value = market_value
self._realized_pnl = realized_pnl
self._unrealized_pnl = unrealized_pnl
def __repr__(self):
return "<Position(%s, %s, %f)>" % \
(self.account_name, self.instrument.ticker, self.quantity)
@property
def connection(self):
"""Returns the connection object"""
return self._connection
@property
def account_name(self):
"""The account name for this position"""
return self._account_name
@property
def instrument(self):
"""The instrument object"""
return self._instrument
@property
def quantity(self):
"""The position quantity"""
return self._quantity
@property
def price(self):
"""The current price"""
return self._price
@property
def average_cost(self):
"""The average cose"""
return self._average_cost
@property
def market_value(self):
"""The market value"""
return self._market_value
@property
def realized_pnl(self):
"""The realized P&L"""
return self._realized_pnl
@property
def unrealized_pnl(self):
"""The unrealized P&L"""
return self._unrealized_pnl
| 2.6875
| 3
|
output/copilot/python/timeout/house-robber.py
|
nhtnhan/CMPUT663-copilot-eval
| 0
|
12781795
|
<gh_stars>0
# https://leetcode.com/problems/house-robber/
class Solution(object):
def rob(self, nums):
"""
You are a professional robber planning to rob houses along a street. Each house has a certain amount of money stashed, the only constraint stopping you from robbing each of them is that adjacent houses have security systems connected and it will automatically contact the police if two adjacent houses were broken into on the same night.
Given an integer array nums representing the amount of money of each house, return the maximum amount of money you can rob tonight without alerting the police.
:type nums: List[int]
:rtype: int
"""
| 3.65625
| 4
|
carla/evaluation/benchmark.py
|
jayanthyetukuri/CARLA
| 0
|
12781796
|
import timeit
from typing import Union
import numpy as np
import pandas as pd
import copy
from carla.evaluation.distances import get_distances
from carla.evaluation.nearest_neighbours import yNN, yNN_prob, yNN_dist
from carla.evaluation.manifold import yNN_manifold, sphere_manifold
from carla.evaluation.process_nans import remove_nans
from carla.evaluation.redundancy import redundancy
from carla.evaluation.success_rate import success_rate, individual_success_rate
from carla.evaluation.diversity import individual_diversity, avg_diversity
from carla.evaluation.violations import constraint_violation
from carla.evaluation.recourse_time import recourse_time_taken
from carla.models.api import MLModel
from carla.models.catalog import MLModelCatalog
from carla.recourse_methods.api import RecourseMethod
from carla.recourse_methods.processing import get_drop_columns_binary
class Benchmark:
"""
The benchmarking class contains all measurements.
It is possible to run only individual evaluation metrics or all via one single call.
For every given factual, the benchmark object will generate one counterfactual example with
the given recourse method.
Parameters
----------
mlmodel: carla.models.MLModel
Black Box model we want to explain
recmodel: carla.recourse_methods.RecourseMethod
Recourse method we want to benchmark
factuals: pd.DataFrame
Instances we want to find counterfactuals
Methods
-------
compute_ynn:
Computes y-Nearest-Neighbours for generated counterfactuals
compute_average_time:
Computes average time for generated counterfactual
compute_distances:
Calculates the distance measure and returns it as dataframe
compute_constraint_violation:
Computes the constraint violation per factual as dataframe
compute_redundancy:
Computes redundancy for each counterfactual
compute_success_rate:
Computes success rate for the whole recourse method.
run_benchmark:
Runs every measurement and returns every value as dict.
"""
def __init__(
self,
mlmodel: Union[MLModel, MLModelCatalog],
recourse_method: RecourseMethod,
factuals: pd.DataFrame,
dataset: pd.DataFrame = None
) -> None:
self._mlmodel = mlmodel
self._recourse_method = recourse_method
self._full_dataset = dataset
start = timeit.default_timer()
self._counterfactuals = recourse_method.get_counterfactuals(factuals)
stop = timeit.default_timer()
self._timer = stop - start
# Avoid using scaling and normalizing more than once
if isinstance(mlmodel, MLModelCatalog):
self._mlmodel.use_pipeline = False # type: ignore
self._factuals = copy.deepcopy(factuals)
# Normalizing and encoding factual for later use
self._enc_norm_factuals = recourse_method.encode_normalize_order_factuals(
factuals, with_target=True
)
def compute_ynn(self) -> pd.DataFrame:
"""
Computes y-Nearest-Neighbours for generated counterfactuals
Returns
-------
pd.DataFrame
"""
_, counterfactuals_without_nans = remove_nans(
self._factuals, self._counterfactuals
)
if counterfactuals_without_nans.empty:
ynn = np.nan
else:
ynn = yNN(
counterfactuals_without_nans, self._recourse_method, self._mlmodel, 5
)
columns = ["y-Nearest-Neighbours"]
return pd.DataFrame([[ynn]], columns=columns)
def compute_average_time(self) -> pd.DataFrame:
"""
Computes average time for generated counterfactual
Returns
-------
pd.DataFrame
"""
avg_time = self._timer / self._counterfactuals.shape[0]
columns = ["Average_Time"]
return pd.DataFrame([[avg_time]], columns=columns)
def compute_distances(self) -> pd.DataFrame:
"""
Calculates the distance measure and returns it as dataframe
Returns
-------
pd.DataFrame
"""
factual_without_nans, counterfactuals_without_nans = remove_nans(
self._enc_norm_factuals, self._counterfactuals
)
columns = ["Distance_1", "Distance_2", "Distance_3", "Distance_4"]
if counterfactuals_without_nans.empty:
return pd.DataFrame(columns=columns)
if self._mlmodel.encoder.drop is None:
# To prevent double count of encoded features without drop if_binary
binary_columns_to_drop = get_drop_columns_binary(
self._mlmodel.data.categoricals,
counterfactuals_without_nans.columns.tolist(),
)
counterfactuals_without_nans = counterfactuals_without_nans.drop(
binary_columns_to_drop, axis=1
)
factual_without_nans = factual_without_nans.drop(
binary_columns_to_drop, axis=1
)
arr_f = factual_without_nans.to_numpy()
arr_cf = counterfactuals_without_nans.to_numpy()
distances = get_distances(arr_f, arr_cf)
output = pd.DataFrame(distances, columns=columns)
return output
def compute_constraint_violation(self) -> pd.DataFrame:
"""
Computes the constraint violation per factual as dataframe
Returns
-------
pd.Dataframe
"""
factual_without_nans, counterfactuals_without_nans = remove_nans(
self._factuals, self._counterfactuals
)
if counterfactuals_without_nans.empty:
violations = []
else:
violations = constraint_violation(
self._mlmodel, counterfactuals_without_nans, factual_without_nans
)
columns = ["Constraint_Violation"]
return pd.DataFrame(violations, columns=columns)
def compute_time_taken(self) -> pd.DataFrame:
"""
TODO
Computes time taken for generated counterfactual
Returns
-------
pd.DataFrame
"""
factual_without_nans, counterfactuals_without_nans = remove_nans(
self._factuals, self._counterfactuals
)
if counterfactuals_without_nans.empty:
time_taken = []
else:
time_taken = recourse_time_taken(
self._recourse_method, self._factuals
)
columns = ["Time_taken"]
return pd.DataFrame(time_taken, columns=columns)
def compute_individual_diversity(self) -> pd.DataFrame:
"""
TODO
Computes instance-wise diveristy for generated counterfactual
Returns
-------
pd.DataFrame
"""
factual_without_nans, counterfactuals_without_nans = remove_nans(
self._factuals, self._counterfactuals
)
if counterfactuals_without_nans.empty:
diveristy = []
else:
diveristy = individual_diversity(
counterfactuals_without_nans, factual_without_nans
)
columns = ["Individual_Diversity"]
return pd.DataFrame(diveristy, columns=columns)
def compute_avg_diversity(self) -> pd.DataFrame:
"""
TODO
Computes average diversity for generated counterfactual
Returns
-------
pd.DataFrame
"""
factual_without_nans, counterfactuals_without_nans = remove_nans(
self._factuals, self._counterfactuals
)
if counterfactuals_without_nans.empty:
diversity = []
else:
diversity = avg_diversity(
counterfactuals_without_nans, factual_without_nans
)
columns = ["Average_Diversity"]
return pd.DataFrame(diversity, columns=columns)
def compute_ynn_dist(self) -> pd.DataFrame:
"""
TODO
Computes y-Nearest-Neighbours for generated counterfactuals
Returns
-------
pd.DataFrame
"""
_, counterfactuals_without_nans = remove_nans(
self._factuals, self._counterfactuals
)
if counterfactuals_without_nans.empty:
ynn = np.nan
else:
ynn = yNN_dist(
counterfactuals_without_nans, self._recourse_method, self._mlmodel, 5
)
columns = ["y-Nearest-Neighbours-Distance"]
output = pd.DataFrame(ynn, columns=columns)
return output
def compute_manifold_ynn(self) -> pd.DataFrame:
"""
TODO
Computes y-Nearest-Neighbours for generated counterfactuals with respect to positive class
Returns
-------
pd.DataFrame
"""
_, counterfactuals_without_nans = remove_nans(
self._factuals, self._counterfactuals
)
if counterfactuals_without_nans.empty:
ynn = np.nan
else:
ynn = yNN_manifold(
counterfactuals_without_nans, self._recourse_method, self._mlmodel, 5
)
columns = ["y-Nearest-Neighbours-Manifold-Distance"]
output = pd.DataFrame(ynn, columns=columns)
return output
def compute_manifold_sphere(self) -> pd.DataFrame:
"""
TODO
Computes neighbor distance for generated counterfactuals with respect to positive class within sphere
Returns
-------
pd.DataFrame
"""
_, counterfactuals_without_nans = remove_nans(
self._factuals, self._counterfactuals
)
if counterfactuals_without_nans.empty:
ynn = np.nan
else:
ynn = sphere_manifold(
counterfactuals_without_nans, self._recourse_method, self._mlmodel
)
columns = ["Sphere-Manifold-Distance"]
output = pd.DataFrame(ynn, columns=columns)
return output
def compute_ynn_prob(self) -> pd.DataFrame:
"""
TODO
Computes y-Nearest-Neighbours for generated counterfactuals
Returns
-------
pd.DataFrame
"""
_, counterfactuals_without_nans = remove_nans(
self._factuals, self._counterfactuals
)
if counterfactuals_without_nans.empty:
ynn = np.nan
else:
ynn = yNN_prob(
counterfactuals_without_nans, self._recourse_method, self._mlmodel, 5
)
print(ynn)
columns = ["y-Nearest-Neighbours-Probability"]
output = pd.DataFrame(ynn, columns=columns)
return output
def compute_redundancy(self) -> pd.DataFrame:
"""
Computes redundancy for each counterfactual
Returns
-------
pd.Dataframe
"""
factual_without_nans, counterfactuals_without_nans = remove_nans(
self._enc_norm_factuals, self._counterfactuals
)
if counterfactuals_without_nans.empty:
redundancies = []
else:
redundancies = redundancy(
factual_without_nans, counterfactuals_without_nans, self._mlmodel
)
columns = ["Redundancy"]
return pd.DataFrame(redundancies, columns=columns)
def compute_success_rate(self) -> pd.DataFrame:
"""
Computes success rate for the whole recourse method.
Returns
-------
pd.Dataframe
"""
rate = success_rate(self._counterfactuals)
columns = ["Success_Rate"]
return pd.DataFrame([[rate]], columns=columns)
def compute_individual_success_rate(self) -> pd.DataFrame:
"""
Computes success rate for the whole recourse method.
Returns
-------
pd.Dataframe
"""
rate = individual_success_rate(self._counterfactuals)
columns = ["Individual_Success_Rate"]
return pd.DataFrame([[rate]], columns=columns)
def run_benchmark(self) -> pd.DataFrame:
"""
Runs every measurement and returns every value as dict.
Returns
-------
pd.DataFrame
"""
pipeline = [
self.compute_distances(),
self.compute_constraint_violation(),
self.compute_redundancy(),
self.compute_ynn_prob(),
self.compute_ynn_dist(),
#self.compute_individual_success_rate(),
#self.compute_individual_diversity(),
self.compute_time_taken(),
self.compute_manifold_ynn(),
self.compute_manifold_sphere(),
self.compute_success_rate(),
self.compute_average_time(),
self.compute_ynn()
#self.compute_avg_diversity()
]
output = pd.concat(pipeline, axis=1)
return output
| 2.421875
| 2
|
config/appdaemon/apps/kodi_ambient_lights.py
|
azogue/hassio_config
| 18
|
12781797
|
# -*- coding: utf-8 -*-
"""
Automation task as a AppDaemon App for Home Assistant
This little app controls the ambient light when Kodi plays video,
dimming some lights and turning off others, and returning to the
initial state when the playback is finished.
In addition, it also sends notifications when starting the video playback,
reporting the video info in the message.
For that, it talks with Kodi through its JSONRPC API by HA service calls.
"""
import datetime as dt
from urllib import parse
import appdaemon.plugins.hass.hassapi as hass
LOG_LEVEL = "DEBUG"
LOG_LEVEL_HIGH = "WARNING"
LOGGER = "event_log"
EVENT_KODI_CALL_METHOD_RESULT = "kodi_call_method_result"
METHOD_GET_PLAYERS = "Player.GetPlayers"
METHOD_GET_ITEM = "Player.GetItem"
PARAMS_GET_ITEM = {
"playerid": 1,
"properties": [
"title",
"artist",
"albumartist",
"genre",
"year",
"rating",
"album",
"track",
"duration",
"playcount",
"fanart",
"plot",
"originaltitle",
"lastplayed",
"firstaired",
"season",
"episode",
"showtitle",
"thumbnail",
"file",
"tvshowid",
"watchedepisodes",
"art",
"description",
"theme",
"dateadded",
"runtime",
"starttime",
"endtime",
],
}
TYPE_ITEMS_NOTIFY = ["movie", "episode"]
TYPE_HA_ITEMS_NOTIFY = ["tvshow", "movie"]
TELEGRAM_KEYBOARD_KODI = ["/luceson, /ambilighttoggle"]
TELEGRAM_INLINEKEYBOARD_KODI = [
[("Luces ON", "/luceson"), ("Switch Ambilight", "/ambilighttoggle")],
]
# noinspection PyClassHasNoInit
class KodiAssistant(hass.Hass):
"""App for Ambient light control when playing video with KODI."""
_lights = None
_light_states = {}
_media_player = None
_is_playing_video = False
_item_playing = None
_last_play = None
_notifier_bot = "telegram_bot"
_target_sensor = None
_ios_notifier = None
def initialize(self):
"""AppDaemon required method for app init."""
_lights_dim_on = self.args.get("lights_dim_on", "").split(",")
_lights_dim_off = self.args.get("lights_dim_off", "").split(",")
_lights_off = self.args.get("lights_off", "").split(",")
self._lights = {
"dim": {"on": _lights_dim_on, "off": _lights_dim_off},
"off": _lights_off,
"state": "off",
}
self._media_player = self.config["media_player"]
self._ios_notifier = self.config["notifier"].replace(".", "/")
self._target_sensor = self.config["chatid_sensor"]
# Listen for Kodi changes:
self._last_play = self.datetime()
self.listen_state(self.kodi_state, self._media_player)
self.listen_event(
self._receive_kodi_result, EVENT_KODI_CALL_METHOD_RESULT
)
def _get_max_brightness_ambient_lights(self):
if self.now_is_between("09:00:00", "19:00:00"):
return 200
elif self.now_is_between("19:00:00", "22:00:00"):
return 150
elif self.now_is_between("22:00:00", "04:00:00"):
return 75
return 25
def _ask_for_playing_item(self):
self.call_service(
"kodi/call_method",
entity_id=self._media_player,
method=METHOD_GET_ITEM,
**PARAMS_GET_ITEM,
)
# noinspection PyUnusedLocal
def _receive_kodi_result(self, event_id, payload_event, *args):
result = payload_event["result"]
method = payload_event["input"]["method"]
if (
event_id == EVENT_KODI_CALL_METHOD_RESULT
and method == METHOD_GET_ITEM
):
if "item" in result:
item = result["item"]
self._is_playing_video = item["type"] in TYPE_ITEMS_NOTIFY
title = message = img_url = ""
if self._is_playing_video:
title, message, img_url = self._get_kodi_info_params(item)
new_video = (
self._item_playing is None or self._item_playing != title
)
now = self.datetime()
delta = now - self._last_play
self._last_play = now
if self._is_playing_video and (
new_video or delta > dt.timedelta(minutes=30)
):
self._adjust_kodi_lights(play=True)
self._item_playing = title
# Notifications
self._notify_ios_message(title, message, img_url)
self._notify_telegram_message(title, message, img_url)
else:
self.log(
"RECEIVED BAD KODI RESULT: {}".format(result),
level=LOG_LEVEL_HIGH,
log=LOGGER,
)
elif (
event_id == EVENT_KODI_CALL_METHOD_RESULT
and method == METHOD_GET_PLAYERS
):
self.log(
"KODI GET_PLAYERS RECEIVED: {}".format(result), log=LOGGER
)
def _get_kodi_info_params(self, item):
"""
media_content_id: {
"unknown": "304004"
}
entity_picture: /api/media_player_proxy/media_player.kodi?token=...
media_duration: 1297
media_title: The One Where Chandler Takes A Bath
media_album_name:
media_season: 8
media_episode: 13
is_volume_muted: false
media_series_title: Friends
media_content_type: tvshow
"""
if item["type"] == "episode":
title = "{} S{:02d}E{:02d} {}".format(
item["showtitle"],
item["season"],
item["episode"],
item["title"],
)
else:
title = "Playing: {}".format(item["title"])
if item["year"]:
title += " [{}]".format(item["year"])
message = "{}\n∆T: {}.".format(
item["plot"], dt.timedelta(hours=item["runtime"] / 3600)
)
img_url = None
try:
if "thumbnail" in item:
raw_img_url = item["thumbnail"]
elif "thumb" in item:
raw_img_url = item["thumb"]
elif "poster" in item["art"]:
raw_img_url = item["art"]["poster"]
elif "season.poster" in item["art"]:
raw_img_url = item["art"]["season.poster"]
else:
self.log(f"No poster in item[art]={item['art']}", log=LOGGER)
k = list(item["art"].keys())[0]
raw_img_url = item["art"][k]
img_url = (
parse.unquote_plus(raw_img_url).rstrip("/").lstrip("image://")
)
if ("192.168." not in img_url) and img_url.startswith("http://"):
img_url = img_url.replace("http:", "https:")
url_valid = self._valid_image_url(img_url)
self.log(
"MESSAGE: T={}, URL={}, ok={}".format(title, message, img_url, url_valid),
log=LOGGER,
level=LOG_LEVEL,
)
if not url_valid:
img_url = None
except KeyError as e:
self.log(
"MESSAGE KeyError: {}; item={}".format(e, item), log=LOGGER
)
return title, message, img_url
def _valid_image_url(self, img_url):
if (img_url is not None) and img_url.startswith("http"):
return True
if img_url is not None:
self.log(
"BAD IMAGE URL: {}".format(img_url),
level=LOG_LEVEL_HIGH,
log=LOGGER,
)
return False
def _notify_ios_message(self, title, message, img_url=None):
data_msg = {
"title": title,
"message": message,
"data": {"push": {"category": "kodiplay"}},
}
if img_url is not None:
data_msg["data"]["attachment"] = {"url": img_url}
self.call_service(self._ios_notifier, **data_msg)
def _notify_telegram_message(self, title, message, img_url=None):
target = int(self.get_state(self._target_sensor))
if img_url is not None:
data_photo = {
"url": img_url,
"keyboard": TELEGRAM_KEYBOARD_KODI,
"disable_notification": True,
}
self.call_service(
"{}/send_photo".format(self._notifier_bot),
target=target,
**data_photo,
)
message + "\n{}\nEND".format(img_url)
data_msg = {
"message": message,
"title": "*{}*".format(title),
"inline_keyboard": TELEGRAM_INLINEKEYBOARD_KODI,
"disable_notification": True,
}
self.call_service(
"{}/send_message".format(self._notifier_bot),
target=target,
**data_msg,
)
def _adjust_kodi_lights(self, play=True):
k_l = self._lights["dim"][self._lights["state"]] + self._lights["off"]
for light_id in k_l:
if play:
light_state = self.get_state(light_id)
attrs_light = self.get_state(light_id, attribute="attributes")
if attrs_light:
attrs_light.update({"state": light_state})
self._light_states[light_id] = attrs_light
max_brightness = self._get_max_brightness_ambient_lights()
if light_id in self._lights["off"]:
self.log(
"Apagando light {} para KODI PLAY".format(
light_id
),
level=LOG_LEVEL,
log=LOGGER,
)
self.call_service(
"light/turn_off", entity_id=light_id, transition=2
)
elif ("brightness" in attrs_light.keys()) and (
attrs_light["brightness"] > max_brightness
):
self.log(
"Atenuando light {} para KODI PLAY".format(
light_id
),
level=LOG_LEVEL,
log=LOGGER,
)
self.call_service(
"light/turn_on",
entity_id=light_id,
transition=2,
brightness=max_brightness,
)
else:
try:
state_before = self._light_states[light_id]
except KeyError:
state_before = {}
if ("state" in state_before) and (
state_before["state"] == "on"
):
try:
new_state_attrs = {
"xy_color": state_before["xy_color"],
"brightness": state_before["brightness"],
}
except KeyError:
new_state_attrs = {
"color_temp": state_before["color_temp"],
"brightness": state_before["brightness"],
}
self.log(
"Reponiendo light {}, con state_before={}".format(
light_id, new_state_attrs
),
level=LOG_LEVEL,
log=LOGGER,
)
self.call_service(
"light/turn_on",
entity_id=light_id,
transition=2,
**new_state_attrs,
)
else:
self.log(
"Doing nothing with light {}, state_before={}".format(
light_id, state_before
),
level=LOG_LEVEL,
log=LOGGER,
)
# noinspection PyUnusedLocal
def kodi_state(self, entity, attribute, old, new, kwargs):
"""Kodi state change main control."""
if new == "playing":
kodi_attrs = self.get_state(
self._media_player, attribute="attributes"
)
self._is_playing_video = (
"media_content_type" in kodi_attrs
and kodi_attrs["media_content_type"] in TYPE_HA_ITEMS_NOTIFY
)
if self._is_playing_video:
self._ask_for_playing_item()
elif (new == "idle") and self._is_playing_video:
self._is_playing_video = False
self._last_play = self.datetime()
self.log(
"KODI STOP. old:{}, new:{}, type_lp={}".format(
old, new, type(self._last_play)
),
level=LOG_LEVEL,
log=LOGGER,
)
self._adjust_kodi_lights(play=False)
elif new == "off":
self._is_playing_video = False
self.log(
"KODI turned off. old:{}, new:{}, type_lp={}".format(
old, new, type(self._last_play)
),
level=LOG_LEVEL,
log=LOGGER,
)
self._light_states = {}
| 2.640625
| 3
|
fp-cloud-compute/fpstackutils/cmd/nova.py
|
2535463841/fluent-python
| 0
|
12781798
|
import sys
from fp_lib.common import cliparser
from fp_lib.common import log
from fpstackutils.commands import nova
LOG = log.getLogger(__name__)
def main():
cli_parser = cliparser.SubCliParser('Python Nova Utils')
cli_parser.register_clis(nova.ResourcesInit,
nova.VMCleanup, nova.VMTest, nova.VMEvacuateTest)
try:
cli_parser.call()
return 0
except KeyboardInterrupt:
LOG.error("user interrupt")
return 1
if __name__ == '__main__':
sys.exit(main())
| 2.015625
| 2
|
src/fhdc/preprocessing.py
|
middlec000/fast_heirarchical_document_clustering
| 0
|
12781799
|
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
# Note: Must download stuff for stopwords:
# showing info https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/index.xml
import re
import string
from typing import Dict
from data_classes import *
def preprocess(docs: Dict[str,str], **kwargs):
"""
Takes as input a dictionary of the documents for clustering and transforms them into the compact data structures of corpus and Vocabulary in preperation for clustering.
This process turns each document from a string of words into a dictionary with (k, v) of (word id, TF-IDF score).
Args:
docs (Dict[str,str]): Documents to preprocess. Must be in format {doc_id (str): doc_text (str)}
**kwargs:
corpus_min_frequency (optional): Minimum corpus wide frequency each word needs to meet in order to be retained in clustering. Defaults to 2.
doc_min_frequency (optional): Minimum document frequency each word needs to meet in order to be retained in each document. Defaults to 2.
tfidf_decimals (optional): Number of decimal places to round TF-IDF scores. Defaults to 4.
stop_words (optional): Words to remove from corpus. If none is provided, the default list of nltk english stopwords is used by default.
lemmatizer (optional): Lemmatizer to be used. Must have a .lematize(word) function. If none is provided, nltk's WordNetLemmatizer is used by default.
Returns:
corpus (Dict[int, ClusterContents]): The document corpus where each document is represented as a dictionary of word_id's and the corresponding TF-IDF scores.
doc_name_map (Dict[str, int]): Mapping of passed document name to cluster (document) id.
vocabulary (Vocabulary): The vocabulary for the given corpus.
"""
# Establish parameter values
params = {'corpus_min_frequency':2, 'doc_min_frequency':2, 'tfidf_digits':4, 'stop_words': set(stopwords.words('english')), 'lemmatizer': WordNetLemmatizer()}
if kwargs is not None:
for k,v in kwargs.items():
params[k] = v
# print(params)
for doc in docs:
# Lowercase
current_doc = docs[doc].lower()
# Remove punctuation and symbols
regex = re.compile(f"[{re.escape(string.punctuation)}]")
current_doc = regex.sub('', current_doc)
# Remove numbers
current_doc = re.sub('\d', '', current_doc)
# Tokenize
current_doc = current_doc.split(' ')
# Remove stopwords and empty strings
current_doc = [word for word in current_doc if word not in params['stop_words'] and word]
# Lemmatize
current_doc = [params['lemmatizer'].lemmatize(word) for word in current_doc]
# Transform to vector format {word: frequency}
transformed_doc = {}
for word in current_doc:
if word not in transformed_doc:
transformed_doc[word] = 1
else:
transformed_doc[word] += 1
# Remove low frequency words from doc
transformed_doc = {k:v for (k,v) in transformed_doc.items() if v >= params['doc_min_frequency']}
# Replace the original doc with transformed_doc
docs[doc] = transformed_doc
# Create vocabulary
vocabulary = Vocabulary({}, {}, {})
current_word_id = 0
for doc in docs:
for word in docs[doc]:
if word in vocabulary.word_id:
existing_id = vocabulary.word_id[word]
vocabulary.id_freq[existing_id] += docs[doc][word]
else:
vocabulary.word_id[word] = current_word_id
vocabulary.id_word[current_word_id] = word
vocabulary.id_freq[current_word_id] = docs[doc][word]
current_word_id += 1
# Find corpus-wide low-frequency words
infrequent_corpus_word_ids = []
for word_id in vocabulary.id_freq:
if vocabulary.id_freq[word_id] < params['corpus_min_frequency']:
infrequent_corpus_word_ids.append(word_id)
# Remove corpus-wide low-frequency words from vocabulary
for word_id_to_drop in infrequent_corpus_word_ids:
vocabulary.id_freq.pop(word_id_to_drop)
word_to_drop = vocabulary.id_word[word_id_to_drop]
vocabulary.id_word.pop(word_id_to_drop)
vocabulary.word_id.pop(word_to_drop)
# Remove corpus-wide low-frequency words from corpus
# Change words to word_ids
# Transform word frequencies to TF-IDF scores
# Create clusters, cluster_ids
doc_name_map = DocumentNameMap({}, {})
cluster_id = 0
new_docs = {}
for doc in docs:
cluster_contents = {}
for word in docs[doc]:
if word in vocabulary.word_id:
word_id = vocabulary.word_id[word]
word_tfidf = float(docs[doc][word]) / float(vocabulary.id_freq[word_id])
cluster_contents[word_id] = round(word_tfidf, ndigits=params['tfidf_digits'])
new_docs[cluster_id] = ClusterContents(cluster_id=cluster_id, contents=cluster_contents)
doc_name_map.name_id[doc] = cluster_id
doc_name_map.id_name[cluster_id] = doc
cluster_id += 1
return new_docs, doc_name_map, vocabulary
| 3.25
| 3
|
pretrain.py
|
rlaboulaye/turn-of-phrase
| 0
|
12781800
|
<reponame>rlaboulaye/turn-of-phrase<gh_stars>0
from argparse import ArgumentParser
import random
import time
from typing import Callable
from convokit import Corpus, download
import numpy as np
import torch
from torch import nn
import torch.backends.cudnn as cudnn
from torch.cuda.amp import GradScaler, autocast
from torch.optim import Optimizer
from torch.utils.data import DataLoader
from transformers import AdamW, AutoModel, AutoModelForMaskedLM, AutoModelForMultipleChoice, AutoTokenizer, get_linear_schedule_with_warmup
from data import ConversationPathDataset, ConversationPathBatchSampler, add_title_to_root, conversation_path_collate_fn
from model import ConversationClassificationHRNN
from utils import AverageMeter, ProgressMeter, save_checkpoint
WARMUP_RATIO = 0.1
CLIPPING_GRADIENT_NORM = 1.0
MLM_COEF = 0.1
parser = ArgumentParser(description='Turn of Phrase Pretraining')
parser.add_argument('-m', '--model-name', type=str, default='google/bigbird-roberta-base',
help='name of pretrained model to use')
parser.add_argument('-c', '--corpus', type=str, default='subreddit-changemyview',
help='name of convokit corpus used for pretraining')
parser.add_argument('--start-index', type=int, default=None,
help='start index for utterance.json')
parser.add_argument('--end-index', type=int, default=None,
help='end index for utterance.json')
parser.add_argument('-l', '--learning-rate', type=float, default=2e-5,
help='base learning rate used')
parser.add_argument('-b', '--batch-size', type=int, default=16,
help='training data batch size')
parser.add_argument('-t', '--training-steps', type=int, default=100000,
help='number of training steps to run')
parser.add_argument('--loop-steps', type=int, default=1000,
help='number of training steps per train loop')
parser.add_argument('-s', '--seed', type=int, default=None,
help='random seed for reproducibility')
parser.add_argument('--conversation-min', type=int, default=3,
help='minimum conversation length')
parser.add_argument('--conversation-max', type=int, default=6,
help='maximum conversation length')
parser.add_argument('--num-neighbors', type=int, default=3,
help='the number of contrastive examples used')
parser.add_argument('--utterance-max', type=int, default=1024,
help='maximum utterance length')
parser.add_argument('-r', '--resume_path', type=str, default=None,
help='path to model from which you would like to resume')
best_loss = np.inf
def main() -> None:
global best_loss
step = 0
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if args.start_index is not None or args.end_index is not None:
start_index = args.start_index
end_index = args.end_index
if start_index is None:
start_index = 0
if end_index is None:
corpus = Corpus(filename=download(args.corpus), utterance_start_index=start_index)
else:
corpus = Corpus(filename=download(args.corpus), utterance_start_index=start_index, utterance_end_index=end_index)
else:
corpus = Corpus(filename=download(args.corpus))
add_title_to_root(corpus)
conversations = list(corpus.iter_conversations())
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
dataset = ConversationPathDataset(corpus, tokenizer,
min_len=args.conversation_min, max_len=args.conversation_max, n_neighbors=args.num_neighbors, max_tokenization_len=args.utterance_max)
sampler = ConversationPathBatchSampler(args.batch_size, dataset.min_len, dataset.get_indices_by_len())
loader = DataLoader(dataset, batch_sampler=sampler, collate_fn=conversation_path_collate_fn, pin_memory=device.type != 'cpu', num_workers=4)
# utterance_encoder = AutoModel.from_pretrained(args.model_name)
# conversation_encoder = nn.LSTM(utterance_encoder.config.hidden_size, args.hidden, args.num_layers)
# model = ConversationClassificationHRNN(utterance_encoder, conversation_encoder, 1)
# mlm_head = AutoModelForMaskedLM.from_pretrained(args.model_name).predictions
model = AutoModelForMultipleChoice.from_pretrained(args.model_name)
model.to(device)
# mlm_head.to(device)
criterion = nn.CrossEntropyLoss()
# optimizer = AdamW(list(model.parameters()) + list(mlm_head.parameters()), args.learning_rate)
optimizer = AdamW(list(model.parameters()), args.learning_rate)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=WARMUP_RATIO*args.training_steps, num_training_steps=args.training_steps)
scaler = GradScaler()
if args.resume_path is not None:
if os.path.isfile(args.resume_path):
print("=> loading checkpoint '{}'".format(args.resume_path))
checkpoint = torch.load(args.resume_path, map_location=device)
step = checkpoint['step']
best_loss = checkpoint['best_loss']
model.bert.load_state_dict(checkpoint['state_dict'])
# mlm_head.load_state_dict(checkpoint['head_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
print("=> loaded checkpoint '{}' (step {})"
.format(args.resume_path, checkpoint['step']))
else:
print("=> no checkpoint found at '{}'".format(args.resume_path))
while step < args.training_steps:
loop_steps = args.loop_steps if args.training_steps - step > args.loop_steps else args.training_steps - step
# loss = train(loader, model, mlm_head, criterion, optimizer, scheduler, scaler,
# device, loop_steps, step // args.loop_steps)
loss = train(loader, model, criterion, optimizer, scheduler, scaler,
device, loop_steps, step // args.loop_steps)
step += loop_steps
# checkpoint model every k training loops
k = 2
if step % (k * args.loop_steps) == 0 or step == args.training_steps:
is_best = loss < best_loss
best_loss = min(loss, best_loss)
run_name = '{}.{}.{}.{}.{}'.format(args.model_name.split('/')[-1], args.corpus, args.conversation_max, args.num_neighbors, args.utterance_max)
# save_checkpoint({
# 'step': step,
# 'model': args.model_name,
# 'state_dict': model.state_dict(),
# 'head_state_dict': mlm_head.state_dict(),
# 'best_loss': best_loss,
# 'optimizer': optimizer.state_dict(),
# 'scheduler': scheduler.state_dict()
# }, is_best, run_name)
save_checkpoint({
'step': step,
'model': args.model_name,
'state_dict': model.bert.state_dict(),
'best_loss': best_loss,
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict()
}, is_best, run_name)
# def train(loader: DataLoader, model: nn.Module, mlm_head: nn.Module, criterion: Callable, optimizer: Optimizer, scheduler: object, scaler: GradScaler, device: torch.device, loop_steps: int, step: int):
def train(loader: DataLoader, model: nn.Module, criterion: Callable, optimizer: Optimizer, scheduler: object, scaler: GradScaler, device: torch.device, loop_steps: int, step: int):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
# path_losses = AverageMeter('Path Loss', ':.4e')
# mlm_losses = AverageMeter('MLM Loss', ':.4e')
accuracies = AverageMeter('Accuracy', ':6.2f')
# progress = ProgressMeter(
# loop_steps,
# [batch_time, data_time, losses, path_losses, mlm_losses, accuracies],
# prefix="Epoch: [{}]".format(step))
progress = ProgressMeter(
loop_steps,
[batch_time, data_time, losses, accuracies],
prefix="Epoch: [{}]".format(step))
model.train()
end = time.time()
for i, (path, attention_mask, targets, _) in enumerate(loader):
data_time.update(time.time() - end)
non_blocking = device.type != 'cpu'
path = path.to(device, non_blocking=non_blocking)
attention_mask = path.to(device, non_blocking=non_blocking)
targets = targets.to(device, non_blocking=non_blocking)
#
# mask_indices = []
# for input_ids_batch, attention_mask_batch in zip(path, attention_masks):
# sequence_lengths_batch = attention_mask_batch.shape[1] - attention_mask_batch.flip(1).argmax(1)
# attention_mask_batch = attention_mask_batch.detach().clone()
# # remove masking for sequence length
# for batch_idx in range(input_ids_batch.shape[0]):
# attention_mask_batch[batch_idx, sequence_lengths_batch[batch_idx]:] = 1
# mask_indices_batch = (attention_mask_batch == 0).nonzero(as_tuple=True)
# mask_indices.append(mask_indices_batch)
#
with autocast():
# logits, mask_encodings = model(input_ids=path, attention_mask=attention_mask)
output = model(input_ids=path, attention_mask=attention_mask, labels=targets)
logits = output.logits
loss = output.loss.mean()
# # mlm_loss = torch.zeros(1).to(device, non_blocking=non_blocking)
# for mask_encoding_batch, input_ids_batch, mask_indices_batch in zip(mask_encodings, path, mask_indices):
# if len(mask_indices_batch[0]) == 0:
# continue
# mask_targets = input_ids_batch[mask_indices_batch]
# mask_logits = mlm_head(mask_encoding_batch)
# # gradually take mean
# mlm_loss += criterion(mask_logits, mask_targets) * (1. / len(path))
# loss = path_loss + MLM_COEF * mlm_loss
accuracy = (logits.argmax(1) == targets).float().mean().item()
losses.update(loss.item(), targets.shape[0])
# path_losses.update(path_loss.item(), targets.shape[0])
# mlm_losses.update(mlm_loss.item(), targets.shape[0])
accuracies.update(accuracy, targets.shape[0])
optimizer.zero_grad()
scaler.scale(loss).backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), CLIPPING_GRADIENT_NORM)
scaler.step(optimizer)
scaler.update()
scheduler.step()
batch_time.update(time.time() - end)
end = time.time()
if i % (loop_steps // 50) == 0:
progress.display(i)
if i == loop_steps - 1:
break
return losses.avg
if __name__ == '__main__':
main()
| 2.15625
| 2
|
degmo/vae/__init__.py
|
IcarusWizard/Deep-Generative-Models
| 2
|
12781801
|
<gh_stars>1-10
from .vae import VAE
from .fvae import FVAE
from .vqvae import VQ_VAE
| 0.976563
| 1
|
remind.py
|
ksu-hmi/Remind_Rx
| 0
|
12781802
|
#Importing all the necessary libraries:
from tkinter import *
import datetime
import time
import sys
import signal
import os
import webbrowser
import time
import re
# Log into the app
print ("RemindRx, your guide to better health")
print("Enter Email address(Username): ")
email_address = input()
while "@" not in email_address:
email_address = input("Your email address must have '@' in it\nPlease write your email address again: ")
if len(email_address) <= 6 :
email_address = input("Your email address is too short\nPlease write your email address again: ")
if "." not in email_address:
email_address = input("Your email address must have '.' in it\nPlease write your email address again: ")
while "." not in email_address:
email_address = input("Your email address must have '.' in it\nPlease write your email address again: ")
if len(email_address) <= 6 :
email_address = input("Your email address is too short\nPlease write your email address again: ")
if "@" not in email_address:
email_address = input("Your email address must have '@' in it\nPlease write your email address again: ")
# Create password
def checkPassword(password):
"""
Validate the password
"""
if len(password) < 8:
# Password to short
print("Your password must be 8 characters long.")
return False
elif not re.findall(r'\d+', password):
# Missing a number
print("You need a number in your password.")
return False
elif not re.findall(r'[A-Z]+', password):
# Missing at least one capital letter
print("You need a capital letter in your password.")
return False
else:
# All good
print("All good")
return True
# Promt user to enter valid password
passwordValid = False
while not passwordValid:
create_password = input( "Create your password:\n (Password must contain at least 8 characters, one number,\n one capital letter, and one lowercase letter)")
passwordValid = checkPassword(create_password)
print("Enter your first name: ")
first_name=input()
print("Enter your last name: ")
last_name=input()
#Now log in with new password
print("Welcome", first_name + "!", "Your profile is set up")
print("Log in to access the application\n You have 3 attempts or application quits")
attempts = 0
username = email_address
password = <PASSWORD>
while True:
usern = input("Enter Username: ")
print()
userp = input("Enter Password: ")
attempts += 1
if attempts == 3:
print("Too many incorrect attempts. Please try again in few minutes")
exit()
else:
if usern == username and userp == password:
print("\nAccess Granted. Welcome " + first_name)
break
else:
print("\nIncorrect credentials. Try again")
def print_menu():
print()
print("Welcome to RemindRx! Press enter to select option")
print()
choice = input("""
1. Add A Medication
2. Delete A Medication
3. Review Your Medication List
4. Set an Alarm for Medication
5. Exit
""")
print_menu()
choice = input("Select the menu item that you want edit [1-5]: ")
choice = int(choice)
medication_name=[]
while choice != 5:
if choice == 1:
med_add = input("Enter the medication Name to add to your list: ")
medication_name.append(med_add)
print("Updated Medication List: ", medication_name)
med_direction = input()
break
elif choice == 2:
print ("Delete A Medication")
med_remove = input("Enter the medication that you are removing from your list: ")
medication_name.remove(med_remove)
print("Updated medication list: ", medication_name)
continue
elif choice == 3:
print ("Review Your Medication List")
print("Current medication list: ", "\n", medication_name)
break
elif choice == 4:
print ("Set an Alarm for Medication")
alarm_HH = input("Enter the hour you want to take the medication - in 24 hour format: ")
alarm_MM = input("Enter the minute you want to take the medication: ")
print(medication_name)
print("Enter the directions for medication : ")
med_direction = input()
print(med_direction)
#need input format for the time so end user will know how to enter it!
print("Enter time of day medication is to be taken : ")
time_take=input('Please input the time for the alarm in format HHMM: \n ')
print(time_take)
print("Hello", first_name,"!", "Remember to take", med_direction, "at", time_take)
def alarm(set_alarm_timer):
while True:
time.sleep(1)
current_time = datetime.datetime.now()
now = current_time.strftime("%H:%M:%S")
date = current_time.strftime("%d/%m/%Y")
print("The Set Date is:",date)
print(now)
if now == set_alarm_timer:
print("Time to take med")
winsound.PlaySound("sound.wav",winsound.SND_ASYNC)
break
def actual_time():
set_alarm_timer = f"{hour.get()}:{min.get()}:{sec.get()}"
alarm(set_alarm_timer)
# Create GUI in tinker
clock = Tk()
clock.title("RemindRx")
clock.geometry("400x200")
time_format=Label(clock, text= "Enter time in 24 hour format!", fg="red",bg="black",font="Arial").place(x=60,y=120)
addTime = Label(clock,text = "Hour Min Sec",font=60).place(x = 110)
setYourAlarm = Label(clock,text = "Remember to take your med",fg="blue",relief = "solid",font=("Helevetica",7,"bold")).place(x=0, y=29)
# The Variables we require to set the alarm(initialization):
hour = StringVar()
min = StringVar()
sec = StringVar()
#Time required to set the alarm clock:
hourTime= Entry(clock,textvariable = hour,bg = "pink",width = 15).place(x=110,y=30)
minTime= Entry(clock,textvariable = min,bg = "pink",width = 15).place(x=150,y=30)
secTime = Entry(clock,textvariable = sec,bg = "pink",width = 15).place(x=200,y=30)
#To take the time input by user:
submit = Button(clock,text = "Set Alarm",fg="red",width = 10,command = actual_time).place(x =110,y=70)
clock.mainloop()
#Execution of the window.
| 4
| 4
|
main.py
|
Katreque/trab-means
| 0
|
12781803
|
from sklearn.cluster import KMeans
from random import randint
import numpy as np
import csv
import matplotlib.pyplot as plt
matriz = []
arrayCriacaoCentroides = []
with open('dataset_iris.csv') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
largPetala = (row['larguraPetala'])
compSepala = (row['comprimentoSepala'])
matriz.append([float(largPetala), float(compSepala)])
matriz = np.array(matriz)
def criacaoCentroideRandom():
array = [[randint(0, 9), randint(0, 9)], [randint(0, 9), randint(0, 9)], [randint(0, 9), randint(0, 9)]]
array = np.array(array)
global arrayCriacaoCentroides
arrayCriacaoCentroides = array
return array
def avaliacaoAcertos(arrayAnalise):
g1 = 0
g2 = 0
g3 = 0
acertos = 0
for i in range(0, len(arrayAnalise)):
if (arrayAnalise[i] == 0):
g1+=1
if (arrayAnalise[i] == 1):
g2+=1
if (arrayAnalise[i] == 2):
g3+=1
if (i == 49) or (i == 99) or (i == 149):
print("Agrupamento:", g1, g2, g3)
acertos += max(g1, g2, g3)
g1 = 0
g2 = 0
g3 = 0
return round(acertos/150*100, 2)
for i in range(1, 4):
if (i != 3):
#Minha geração de centroides;
trabmeans = KMeans(n_clusters=3, init=criacaoCentroideRandom(), n_init=1).fit(matriz)
else:
#Geração de centroides otimizada da própria lib;
trabmeans = KMeans(n_clusters=3).fit(matriz)
plt.figure(i)
plt.scatter(matriz[:, 0], matriz[:, 1], s = 100, c = trabmeans.labels_)
if (i != 3):
plt.scatter(arrayCriacaoCentroides[:, 0], arrayCriacaoCentroides[:, 1], s = 100, c = 'green', label = 'Centroides Iniciais')
plt.scatter(trabmeans.cluster_centers_[:, 0], trabmeans.cluster_centers_[:, 1], s = 100, c = 'red', label = 'Centroides Finais')
plt.xlabel('Largura da Petala')
plt.ylabel('Comprimento da Sepala')
plt.legend()
if (i != 3):
print("Centroide inicial - Grupo " + str(i) + ":", arrayCriacaoCentroides[0], arrayCriacaoCentroides[1], arrayCriacaoCentroides[2])
else:
print("Coordenadas do Centroide geradas de maneira otimizada pelo algoritmo.")
print("Porcentagem acerto - Grupo " + str(i) + ":", avaliacaoAcertos(trabmeans.labels_))
print("\n")
plt.show()
| 3.046875
| 3
|
setup.py
|
Jim00000/waterwave-simulator
| 1
|
12781804
|
<reponame>Jim00000/waterwave-simulator
"""
Copyright (C) 2017 the team of Jim00000, ActKz and pityYo
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from distutils.core import setup, Extension
from Cython.Build import cythonize
import numpy
setup(
ext_modules = cythonize(Extension(
"wave_equation", # the extension name
sources=["wave_equation.pyx", "wave_equation_api.cpp"], # the Cython source and additional C source files
language="c++", # the language
libraries=["m"], # linking libraries
extra_compile_args=["-std=c++11", "-O2", "-Wall", "-Wextra",
"-fopenmp"], # compiler flags
extra_link_args=["-fopenmp", "wave_equation_cuda_api.o",
"-lcuda", "-lcudart", "-lboost_system", "-lboost_thread"], # link arguments
include_dirs=[numpy.get_include()], # included directories
)))
| 1.46875
| 1
|
server/processes/migrations/0082_auto_20191219_0855.py
|
CloudReactor/task_manager
| 0
|
12781805
|
<filename>server/processes/migrations/0082_auto_20191219_0855.py
# Generated by Django 2.2.2 on 2019-12-19 08:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('processes', '0081_auto_20191217_0709'),
]
operations = [
migrations.AddField(
model_name='processtype',
name='aws_ecs_default_assign_public_ip',
field=models.BooleanField(default=False, null=True),
),
migrations.AddField(
model_name='runenvironment',
name='aws_ecs_default_assign_public_ip',
field=models.BooleanField(default=False),
),
]
| 1.445313
| 1
|
stock_info.py
|
play0137/Stock_web_crawler
| 4
|
12781806
|
""" 個股月營收資訊 """
import re
import sys
import pdb
import pandas as pd
from stock_web_crawler import stock_crawler, delete_header, excel_formatting
import global_vars
def main():
global_vars.initialize_proxy()
""" valid input formats """
# inputs = "台積電 聯電"
# inputs = "2330 2314"
# inputs = "台積電, 2314"
inputs = input("輸入要搜尋的公司名稱或股號:\n(press q to exit)\n")
if inputs == "q":
sys.exit(0)
stocks_ID = list()
stock_dict = stock_ID_name_mapping()
delims = r"[\s\t,\.;]+"
inputs = re.split(delims, inputs)
for stock in inputs:
if stock not in stock_dict:
print("Invalid input!", stock, "is not in the stock ticker symbol table")
sys.exit(-1)
if stock.isdigit():
stocks_ID.append(stock)
else: # map company name to stock ID
stocks_ID.append(stock_dict[stock])
print("stocks ID:", stocks_ID)
stock_salemon_file = global_vars.DIR_PATH + "個股月營收.xlsx"
with pd.ExcelWriter(stock_salemon_file, mode='w', engine="xlsxwriter") as writer:
# headers = ["月別", "開盤", "收盤", "最高", "最低", "漲跌(元)", "漲跌(%)", "單月營收(億)", "單月月增(%)", "單月年增(%)", "累計營收(億)", "累計年增(%)", "合併單月營收(億)", "合併單月月增(%)", "合併單月年增(%)", "合併累計營收(億)", "合併累計年增(%)"]
table_ID = "#divDetail"
for stock_ID in stocks_ID:
url = f"https://goodinfo.tw/StockInfo/ShowSaleMonChart.asp?STOCK_ID={stock_ID}"
df = stock_crawler(url, None, table_ID)
# reassign headers
headers = list()
for i in range(len(df.columns)):
headers.append('_'.join(pd.Series(df.columns[i]).drop_duplicates().tolist()))
df.columns = headers
delete_header(df, headers)
sheet_name = f"{stock_dict[stock_ID]}"
df.to_excel(writer, index=False, encoding="UTF-8", sheet_name=sheet_name, freeze_panes=(1,2)) # write to different sheets
excel_formatting(writer, df, sheet_name)
# 1101,台泥,台灣水泥股份有限公司
def stock_ID_name_mapping():
stock_dict = dict()
with open(global_vars.DIR_PATH + "公司股市代號對照表.csv", "r", encoding="UTF-8") as file_r:
file_r.readline()
for line in file_r:
line = line.split(",")
stock_ID = line[0]
stock_name = line[1]
if stock_ID not in stock_dict:
stock_dict[stock_ID] = stock_name
if stock_name not in stock_dict:
stock_dict[stock_name] = stock_ID
return stock_dict
if __name__ == "__main__":
main()
| 2.796875
| 3
|
examples/chrome-dino/main.py
|
robianmcd/keras-mri
| 12
|
12781807
|
<filename>examples/chrome-dino/main.py
from keras.models import Model
from keras.layers import Input, Conv2D, Flatten, Dense, concatenate
import numpy as np
import imageio
import os
import os.path as path
import kmri
base_path = path.dirname(path.realpath(__file__))
weights_path = path.join(base_path, 'model-weights.h5')
labeled_path = path.join(base_path, 'labeled-input.csv')
img_path = path.join(base_path, 'img-input')
# *****************************************************************************
# ******************************** Build model ********************************
# *****************************************************************************
labeled_input_layer = Input(shape=(3,), name='labeledInput')
img_input_layer = Input(shape=(38, 150, 1), name='imgInput')
x = Conv2D(16, kernel_size=8, strides=4, activation='relu')(img_input_layer)
x = Conv2D(32, kernel_size=4, strides=2, activation='relu')(x)
x = Flatten()(x)
x = concatenate([x, labeled_input_layer])
x = Dense(256, activation='relu')(x)
output = Dense(5)(x)
model = Model(inputs=[img_input_layer, labeled_input_layer], outputs=[output])
model.load_weights(weights_path)
# *****************************************************************************
# ******************************** Load inputs ********************************
# *****************************************************************************
labeled_input = np.loadtxt(labeled_path, delimiter=',')
read_img = lambda file_name: imageio.imread(os.path.join(img_path, file_name)).reshape(38,150,1) / 255
img_input = np.array([read_img(file_name) for file_name in os.listdir(img_path)])
# *****************************************************************************
# ****************************** Visualize Model*******************************
# *****************************************************************************
kmri.visualize_model(model, [img_input, labeled_input])
| 2.625
| 3
|
first_setup.py
|
EzraCerpac/SatLink
| 8
|
12781808
|
<gh_stars>1-10
import sys
import subprocess
# package list must follow the installation guide in README.md
package_list = ('itur==0.2.1', 'tqdm==4.56.0', 'pandas==1.2.1', 'pathos==0.2.7', 'astropy==4.2', 'pyqt5==5.15.2',
'matplotlib==3.4.1')
for package in package_list:
# implement pip as a subprocess:
subprocess.check_call([sys.executable, '-m', 'pip', 'install',package])
# process output with an API in the subprocess module:
reqs = subprocess.check_output([sys.executable, '-m', 'pip','freeze'])
installed_packages = [r.decode().split('==')[0] for r in reqs.split()]
print(installed_packages)
print('Process Completed!!!')
| 2.28125
| 2
|
tethys_quotas/models/tethys_app_quota.py
|
msouff/tethys
| 79
|
12781809
|
"""
********************************************************************************
* Name: tethys_app_quota.py
* Author: tbayer, mlebarron
* Created On: April 2, 2019
* Copyright: (c) Aquaveo 2018
********************************************************************************
"""
import logging
from django.db import models
from tethys_quotas.models.entity_quota import EntityQuota
from tethys_apps.models import TethysApp
log = logging.getLogger('tethys.' + __name__)
class TethysAppQuota(EntityQuota):
"""
entity_id (IntegerField): id of the entity.
"""
class Meta:
verbose_name = 'Tethys App Quota'
entity = models.ForeignKey(TethysApp, on_delete=models.CASCADE)
| 1.695313
| 2
|
python/RawNet2/dataloader.py
|
ishine/RawNet
| 199
|
12781810
|
<reponame>ishine/RawNet<gh_stars>100-1000
import numpy as np
import soundfile as sf
from torch.utils import data
class Dataset_VoxCeleb2(data.Dataset):
def __init__(self, list_IDs, base_dir, nb_samp = 0, labels = {}, cut = True, return_label = True, norm_scale = True):
'''
self.list_IDs : list of strings (each string: utt key)
self.labels : dictionary (key: utt key, value: label integer)
self.nb_samp : integer, the number of timesteps for each mini-batch
cut : (boolean) adjust utterance duration for mini-batch construction
return_label : (boolean)
norm_scale : (boolean) normalize scale alike SincNet github repo
'''
self.list_IDs = list_IDs
self.nb_samp = nb_samp
self.base_dir = base_dir
self.labels = labels
self.cut = cut
self.return_label = return_label
self.norm_scale = norm_scale
if self.cut and self.nb_samp == 0: raise ValueError('when adjusting utterance length, "nb_samp" should be input')
def __len__(self):
return len(self.list_IDs)
def __getitem__(self, index):
ID = self.list_IDs[index]
try:
X, _ = sf.read(self.base_dir+ID)
X = X.astype(np.float64)
except:
raise ValueError('%s'%ID)
if self.norm_scale:
X = self._normalize_scale(X).astype(np.float32)
X = X.reshape(1,-1) #because of LayerNorm for the input
if self.cut:
nb_time = X.shape[1]
if nb_time > self.nb_samp:
start_idx = np.random.randint(low = 0, high = nb_time - self.nb_samp)
X = X[:, start_idx : start_idx + self.nb_samp][0]
elif nb_time < self.nb_samp:
nb_dup = int(self.nb_samp / nb_time) + 1
X = np.tile(X, (1, nb_dup))[:, :self.nb_samp][0]
else:
X = X[0]
if not self.return_label:
return X
y = self.labels[ID.split('/')[0]]
return X, y
def _normalize_scale(self, x):
'''
Normalize sample scale alike SincNet.
'''
return x/np.max(np.abs(x))
class TA_Dataset_VoxCeleb2(data.Dataset):
def __init__(self, list_IDs, base_dir, nb_samp = 0, window_size = 0, labels = {}, cut = True, return_label = True, norm_scale = True):
'''
self.list_IDs : list of strings (each string: utt key)
self.labels : dictionary (key: utt key, value: label integer)
self.nb_samp : integer, the number of timesteps for each mini-batch
cut : (boolean) adjust utterance duration for mini-batch construction
return_label : (boolean)
norm_scale : (boolean) normalize scale alike SincNet github repo
'''
self.list_IDs = list_IDs
self.window_size = window_size
self.nb_samp = nb_samp
self.base_dir = base_dir
self.labels = labels
self.cut = cut
self.return_label = return_label
self.norm_scale = norm_scale
if self.cut and self.nb_samp == 0: raise ValueError('when adjusting utterance length, "nb_samp" should be input')
def __len__(self):
return len(self.list_IDs)
def __getitem__(self, index):
ID = self.list_IDs[index]
try:
X, _ = sf.read(self.base_dir+ID)
X = X.astype(np.float64)
except:
raise ValueError('%s'%ID)
if self.norm_scale:
X = self._normalize_scale(X).astype(np.float32)
X = X.reshape(1,-1)
list_X = []
nb_time = X.shape[1]
if nb_time < self.nb_samp:
nb_dup = int(self.nb_samp / nb_time) + 1
list_X.append(np.tile(X, (1, nb_dup))[:, :self.nb_samp][0])
elif nb_time > self.nb_samp:
step = self.nb_samp - self.window_size
iteration = int( (nb_time - self.window_size) / step ) + 1
for i in range(iteration):
if i == 0:
list_X.append(X[:, :self.nb_samp][0])
elif i < iteration - 1:
list_X.append(X[:, i*step : i*step + self.nb_samp][0])
else:
list_X.append(X[:, -self.nb_samp:][0])
else :
list_X.append(X[0])
if not self.return_label:
return list_X
y = self.labels[ID.split('/')[0]]
return list_X, y
def _normalize_scale(self, x):
'''
Normalize sample scale alike SincNet.
'''
return x/np.max(np.abs(x))
| 2.4375
| 2
|
test/test_group.py
|
malramsay64/hoomd-simple-force
| 0
|
12781811
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2018 <NAME> <<EMAIL>>
#
# Distributed under terms of the MIT license.
import hoomd
import hoomd.simple_force
import hoomd.md
import numpy as np
context = hoomd.context.initialize("--notice-level=10 --mode=cpu")
uc = hoomd.lattice.unitcell(
N=1,
a1=[3, 0, 0],
a2=[0, 3, 0],
a3=[0, 0, 3],
dimensions=2,
position=[[0, 0, 0]],
type_name=["R"],
mass=[1.0],
moment_inertia=[[1, 1, 1]],
orientation=[[0.9, 0, 0, 0.2]],
)
system = hoomd.init.create_lattice(unitcell=uc, n=10)
system.particles.types.add("A")
pairs = hoomd.md.pair.lj(2.5, nlist=hoomd.md.nlist.cell())
pairs.pair_coeff.set("A", "A", epsilon=1.0, sigma=1.0)
pairs.pair_coeff.set("R", "A", epsilon=1.0, sigma=1.0)
pairs.pair_coeff.set("R", "R", epsilon=1.0, sigma=1.0)
rigid = hoomd.md.constrain.rigid()
rigid.set_param("R", types=["A"], positions=[(-1, 0, 0)])
rigid.create_bodies()
snap_init = system.take_snapshot()
hoomd.md.update.enforce2d()
hoomd.md.integrate.mode_standard(dt=0.005)
hoomd.md.integrate.nvt(hoomd.group.rigid_center(), kT=1, tau=1)
nmols = min(max(snap_init.particles.body) + 1, snap_init.particles.N)
print(nmols)
rc = hoomd.group.rigid_center()
force = hoomd.simple_force.SimpleForce(rc)
print(rc.cpp_group.getNumMembersGlobal())
hoomd.run(1000)
| 1.898438
| 2
|
website/discovery/views.py
|
gaybro8777/osf.io
| 628
|
12781812
|
<filename>website/discovery/views.py
from framework.flask import redirect
def redirect_activity_to_search(**kwargs):
return redirect('/search/')
| 1.695313
| 2
|
utils/orbits.py
|
tom-boyes-park/n-body-modelling
| 1
|
12781813
|
import logging
from typing import List
import matplotlib.pyplot as plt
import numpy as np
import streamlit as st
from matplotlib.animation import FuncAnimation
from scipy import integrate
from utils.objects import Body
logger = logging.getLogger(__name__)
# Arbitrary value for G (gravitational constant)
G = 1
def create_initial_conditions(bodies: List[Body]) -> List[int]:
"""
:param bodies: List of Body classes
:return: list of starting x, y, vx, and vy values for each Body in bodies
"""
initial = []
# Loop through bodies and create initial conditions to be passed into the integrator
logger.info(f"Creating initial conditions for the {len(bodies)} bodies")
for body in bodies:
values = [body.x, body.vx, body.y, body.vy]
initial += values
return initial
def calc_2d_distance(x1: float, y1: float, x2: float, y2: float) -> float:
"""
Returns:
Distance between the 2-dimensional co-ordinates supplied.
"""
return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5
def calc_dvel(c1: float, c2: float, r: float, m2: float) -> float:
"""
Calculates the change in velocity on a target body due to the gravitational force
of another body (source body) in a single dimension.
Args:
c1: value for target body position in x or y dimension
c2: value for source body position in x or y dimension
r: distance between 2 bodies
m2: mass of the source body
Returns:
change in target body velocity (float)
"""
return (-G * m2 * (c1 - c2)) * r ** (-3)
def n_body_func(t: int, pos_vel: np.ndarray, bodies: List[Body]) -> np.ndarray:
"""
Function to be passed into the ode integrator. Calculates and stores the changes
in spatial and velocity values.
Args:
t: time step
pos_vel: array containing x, y, vx and vy values for each body
[x1, vx1, y1, vy1, x2, ...]
bodies: list of Body objects
Returns:
array containing change in spatial and velocity values for each body
"""
# Set up array to store updated spatial and velocity values
dpos_dvel = np.zeros(4 * len(bodies))
# Change in x, y is velocity in x, y
dpos_dvel[0 : len(dpos_dvel) : 4] = pos_vel[1 : len(pos_vel) : 4]
dpos_dvel[2 : len(dpos_dvel) : 4] = pos_vel[3 : len(pos_vel) : 4]
# Loop through bodies, calculating change in vx, vy due to all other bodies
for i, body in enumerate(bodies):
# Extract x, y values of body
x1 = pos_vel[i * 4]
y1 = pos_vel[i * 4 + 2]
vx1 = 0
vy1 = 0
for j, other_body in enumerate(bodies):
# Check bodies aren't the same
if i != j:
# Extract x, y & mass of other body
x2 = pos_vel[j * 4]
y2 = pos_vel[j * 4 + 2]
# Distance to other body
r = calc_2d_distance(x1=x1, y1=y1, x2=x2, y2=y2,)
# Change in x, y
vx1 += calc_dvel(c1=x1, c2=x2, r=r, m2=other_body.mass)
vy1 += calc_dvel(c1=y1, c2=y2, r=r, m2=other_body.mass)
# Add resultant change in vel to array
dpos_dvel[i * 4 + 1] = vx1
dpos_dvel[i * 4 + 3] = vy1
return dpos_dvel
def calc_orbits(bodies: List[Body], t0: int, t1: int, dt: int) -> np.ndarray:
"""
Calculate the change in x, y, vx and vy at each time step between t0 and t1 due to
the gravitational forces of all other bodies in the system. The integrator used is
dopri835.
Args:
bodies: List of Body classes that describe the starting conditions and
masses of the bodies
t0: start time
t1: end time
dt: time step (seconds)
Returns:
Array containing spatial coordinates and velocities of bodies at each
time step
"""
logger.info(
f"""Orbit settings: n_bodies: {len(bodies)}, t0: {t0}, t1: {t1}, dt: {dt}"""
)
# Initial conditions (x, vx, y, vy)
initial = create_initial_conditions(bodies=bodies)
# Time period over which to calculate orbit paths
t = np.linspace(t0, t1, dt)
# Array for solution
y = np.zeros((len(t), len(bodies) * 4))
y[0, :] = initial
# Setup integrator
integrator = (
integrate.ode(n_body_func)
.set_integrator("dop853", rtol=1e-6, atol=1e-10)
.set_initial_value(initial, t0)
.set_f_params(bodies)
)
# Iterate over time intervals and integrate, storing updated spatial coordinates
# and velocities of bodies
progress_text = st.sidebar.text(f"Iteration: 0/{len(t)}")
progress_bar = st.sidebar.progress(0)
logger.info("Calculating orbits")
for i in range(1, len(t)):
progress_text.text(f"Iteration: {i}/{len(t)-1}")
progress_bar.progress((i + 1) / len(t))
y[i, :] = integrator.integrate(t[i])
progress_text.text("Complete!")
return y
def animate_orbits(orbit_paths: np.ndarray) -> None:
"""
Animates the orbits
Args:
orbit_paths: array containing spatial and velocity values over time
"""
logger.info("Animating orbits")
fig = plt.figure(figsize=(6, 6))
# set size of axis based on max/min spatial values
x_min = orbit_paths[:, 0::4].min() * 1.1
x_max = orbit_paths[:, 0::4].max() * 1.1
y_min = orbit_paths[:, 2::4].min() * 1.1
y_max = orbit_paths[:, 2::4].max() * 1.1
ax = plt.axes(xlim=(x_min, x_max), ylim=(y_min, y_max))
n_bodies = int(orbit_paths.shape[1] / 4)
colours = ["red", "blue", "orange", "green", "black"]
lines = []
for index in range(n_bodies * 2):
if index < n_bodies:
lobj = ax.plot([], [], "--", lw=1, color=colours[index % len(colours)])[0]
else:
lobj = ax.plot(
[], [], "o", color=colours[(index - n_bodies) % len(colours)]
)[0]
lines.append(lobj)
def init():
for line in lines:
line.set_data([], [])
return lines
def animate(i):
for j, line in enumerate(lines):
if j < n_bodies:
orbit_tail_length = 30
if i > orbit_tail_length:
x = orbit_paths[i - orbit_tail_length : i, j * 4]
y = orbit_paths[i - orbit_tail_length : i, j * 4 + 2]
else:
x = orbit_paths[:i, j * 4]
y = orbit_paths[:i, j * 4 + 2]
else:
x = orbit_paths[i, (j - n_bodies) * 4]
y = orbit_paths[i, (j - n_bodies) * 4 + 2]
line.set_data(x, y)
return lines
# TODO: ensure a consistent maximum number of frames so animations aren't too slow
# or too fast
anim = FuncAnimation(
fig, animate, init_func=init, frames=orbit_paths.shape[0], interval=1, blit=True
)
plt.show()
def plot_orbits(orbit_paths: np.ndarray, title: str) -> None:
"""
Plots the orbits
Args:
orbit_paths: array containing spatial and velocity values over time
title: title to use for figure
"""
logger.info("Plotting orbits")
fig = plt.figure(figsize=(10, 10))
plt.title(title)
for i in range(int(orbit_paths.shape[1] / 4)):
plt.plot(orbit_paths[:, i * 4], orbit_paths[:, i * 4 + 2])
st.pyplot(fig)
plt.show()
| 3.28125
| 3
|
sjtwo-c/site_scons/color.py
|
seanlinc/Playmate
| 2
|
12781814
|
<gh_stars>1-10
import sys
import colorama
def _is_command_prompt():
return sys.stdin.isatty() is True
if _is_command_prompt():
colorama.init()
class ColorString(object):
COLORS = {
"red": colorama.Fore.RED,
"green": colorama.Fore.GREEN,
"yellow": colorama.Fore.YELLOW,
"blue": colorama.Fore.BLUE,
"neutral": colorama.Style.RESET_ALL,
}
def __init__(self, string):
self._string = string
def __getattr__(self, name):
if name in self.COLORS:
ret = "{}{}{}".format(self.COLORS[name], self._string, self.COLORS["neutral"])
else:
raise AttributeError(name)
return ret
| 2.984375
| 3
|
src/yw2nwlib/nwd_file.py
|
peter88213/yw2nw
| 1
|
12781815
|
"""Provide a generic class for novelWriter item file representation.
Copyright (c) 2022 <NAME>
For further information see https://github.com/peter88213/yw2nw
Published under the MIT License (https://opensource.org/licenses/mit-license.php)
"""
import os
from pywriter.pywriter_globals import ERROR
class NwdFile:
"""abstract novelWriter item file representation.
Public methods:
read() -- read a content file.
write() -- write a content file.
"""
EXTENSION = '.nwd'
def __init__(self, prj, nwItem):
"""Define instance variables.
Positional arguments:
prj -- NwxFile instance: the novelWriter project represenation.
nwItem -- NwItem instance associated with the .nwd file.
"""
self._prj = prj
self._nwItem = nwItem
self._filePath = os.path.dirname(self._prj.filePath) + self._prj.CONTENT_DIR + nwItem.nwHandle + self.EXTENSION
self._lines = []
def read(self):
"""Read a content file.
Return a message beginning with the ERROR constant in case of error.
"""
try:
with open(self._filePath, 'r', encoding='utf-8') as f:
self._lines = f.read().split('\n')
return 'Item data read in.'
except:
return f'{ERROR}Can not read "{os.path.normpath(self._filePath)}".'
def write(self):
"""Write a content file.
Return a message beginning with the ERROR constant in case of error.
"""
lines = [f'%%~name: {self._nwItem.nwName}',
f'%%~path: {self._nwItem.nwParent}/{self._nwItem.nwHandle}',
f'%%~kind: {self._nwItem.nwClass}/{self._nwItem.nwLayout}',
]
lines.extend(self._lines)
text = '\n'.join(lines)
try:
with open(self._filePath, 'w', encoding='utf-8') as f:
f.write(text)
return 'nwd file saved.'
except:
return f'{ERROR}Can not write "{os.path.normpath(self._filePath)}".'
| 2.8125
| 3
|
2-hard/computer-terminal/main.py
|
mpillar/codeeval
| 21
|
12781816
|
<reponame>mpillar/codeeval<filename>2-hard/computer-terminal/main.py
import sys
class Screen:
_SCREEN_ROWS = 10
_SCREEN_COLS = 10
def __init__(self, terminal_input):
self.terminal_input = terminal_input
# Rendering variables.
self._in_overwrite_mode = True
self._current_row = 0
self._current_col = 0
self._image = Screen._generate_empty_image()
# Render the screen image.
self._render()
@staticmethod
def _generate_empty_image():
result = []
for i in range(0, Screen._SCREEN_ROWS):
result.append([' '] * Screen._SCREEN_COLS)
return result
def _control_c(self):
self._image = Screen._generate_empty_image()
def _control_h(self):
self._current_row = 0
self._current_col = 0
def _control_b(self):
self._current_col = 0
def _control_d(self):
self._current_row = min(self._current_row+1, Screen._SCREEN_ROWS-1)
def _control_u(self):
self._current_row = max(self._current_row-1, 0)
def _control_l(self):
self._current_col = max(self._current_col-1, 0)
def _control_r(self):
self._current_col = min(self._current_col+1, Screen._SCREEN_COLS-1)
def _control_e(self):
for i in range(self._current_col, Screen._SCREEN_COLS):
self._image[self._current_row][i] = ' '
def _control_i(self):
self._in_overwrite_mode = False
def _control_o(self):
self._in_overwrite_mode = True
def _control_circumflex(self):
self._write('^')
def _move_to(self, row, col):
self._current_row = row
self._current_col = col
def _write(self, char):
# If we are in insert mode, shift the row over first.
if not self._in_overwrite_mode:
for i in range(Screen._SCREEN_COLS-1, self._current_col, -1):
self._image[self._current_row][i] = self._image[self._current_row][i-1]
# Write the char in place (for both modes).
self._image[self._current_row][self._current_col] = char
# Move the cursor right one col (for both modes).
self._control_r()
def _render(self):
i = 0
while i < len(self.terminal_input):
c = self.terminal_input[i]
if c == '^':
if self.terminal_input[i+1].isdigit():
control = self.terminal_input[i+1:i+3]
i += 2
else:
control = self.terminal_input[i+1]
i += 1
# Map control to actionable function.
functions = {
'c': self._control_c,
'h': self._control_h,
'b': self._control_b,
'd': self._control_d,
'u': self._control_u,
'l': self._control_l,
'r': self._control_r,
'e': self._control_e,
'i': self._control_i,
'o': self._control_o,
'^': self._control_circumflex
}
if control in functions:
function = functions[control]
function()
else:
# In this case we have reached a ^DD.
self._move_to(int(control[0]), int(control[1]))
else:
if c != '\n':
self._write(c)
else:
# Do nothing for newlines.
pass
# Loop increment. Don't use for since we modify the looping variable.
i += 1
def __str__(self):
result = []
for i in self._image:
result.append(''.join(i).rstrip())
return '\n'.join(result)
def main():
with open(sys.argv[1], 'r') as input_file:
screen = Screen(input_file.read())
print(screen)
if __name__ == '__main__':
main()
| 3.109375
| 3
|
utils/utils.py
|
cendaifeng/keras-face-recognition
| 1
|
12781817
|
import sys
from operator import itemgetter
import numpy as np
import cv2
import math
import matplotlib.pyplot as plt
# -----------------------------#
# 计算原始输入图像
# 每一次缩放的比例
# -----------------------------#
def calculateScales(img):
copy_img = img.copy()
pr_scale = 1.0
h, w, _ = copy_img.shape
if min(w, h) > 500:
pr_scale = 500.0 / min(h, w)
w = int(w * pr_scale)
h = int(h * pr_scale)
elif max(w, h) < 500:
pr_scale = 500.0 / max(h, w)
w = int(w * pr_scale)
h = int(h * pr_scale)
scales = []
factor = 0.709
factor_count = 0
minl = min(h, w)
while minl >= 12:
scales.append(pr_scale * pow(factor, factor_count))
minl *= factor
factor_count += 1
return scales
# -------------------------------------#
# 对pnet处理后的结果进行处理
# -------------------------------------#
def detect_face_12net(cls_prob, roi, out_side, scale, width, height, threshold):
cls_prob = np.swapaxes(cls_prob, 0, 1)
roi = np.swapaxes(roi, 0, 2)
stride = 0
# stride略等于2
if out_side != 1:
stride = float(2 * out_side - 1) / (out_side - 1)
(x, y) = np.where(cls_prob >= threshold)
boundingbox = np.array([x, y]).T
# 找到对应原图的位置
bb1 = np.fix((stride * (boundingbox) + 0) * scale)
bb2 = np.fix((stride * (boundingbox) + 11) * scale)
# plt.scatter(bb1[:,0],bb1[:,1],linewidths=1)
# plt.scatter(bb2[:,0],bb2[:,1],linewidths=1,c='r')
# plt.show()
boundingbox = np.concatenate((bb1, bb2), axis=1)
dx1 = roi[0][x, y]
dx2 = roi[1][x, y]
dx3 = roi[2][x, y]
dx4 = roi[3][x, y]
score = np.array([cls_prob[x, y]]).T
offset = np.array([dx1, dx2, dx3, dx4]).T
boundingbox = boundingbox + offset * 12.0 * scale
rectangles = np.concatenate((boundingbox, score), axis=1)
rectangles = rect2square(rectangles)
pick = []
for i in range(len(rectangles)):
x1 = int(max(0, rectangles[i][0]))
y1 = int(max(0, rectangles[i][1]))
x2 = int(min(width, rectangles[i][2]))
y2 = int(min(height, rectangles[i][3]))
sc = rectangles[i][4]
if x2 > x1 and y2 > y1:
pick.append([x1, y1, x2, y2, sc])
return NMS(pick, 0.3)
# -----------------------------#
# 将长方形调整为正方形
# -----------------------------#
def rect2square(rectangles):
w = rectangles[:, 2] - rectangles[:, 0]
h = rectangles[:, 3] - rectangles[:, 1]
l = np.maximum(w, h).T
rectangles[:, 0] = rectangles[:, 0] + w * 0.5 - l * 0.5
rectangles[:, 1] = rectangles[:, 1] + h * 0.5 - l * 0.5
rectangles[:, 2:4] = rectangles[:, 0:2] + np.repeat([l], 2, axis=0).T
return rectangles
# -------------------------------------#
# 非极大抑制
# -------------------------------------#
def NMS(rectangles, threshold):
if len(rectangles) == 0:
return rectangles
boxes = np.array(rectangles)
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
s = boxes[:, 4]
area = np.multiply(x2 - x1 + 1, y2 - y1 + 1)
I = np.array(s.argsort())
pick = []
while len(I) > 0:
xx1 = np.maximum(x1[I[-1]], x1[I[0:-1]]) # I[-1] have hightest prob score, I[0:-1]->others
yy1 = np.maximum(y1[I[-1]], y1[I[0:-1]])
xx2 = np.minimum(x2[I[-1]], x2[I[0:-1]])
yy2 = np.minimum(y2[I[-1]], y2[I[0:-1]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
o = inter / (area[I[-1]] + area[I[0:-1]] - inter)
pick.append(I[-1])
I = I[np.where(o <= threshold)[0]]
result_rectangle = boxes[pick].tolist()
return result_rectangle
# -------------------------------------#
# 对pnet处理后的结果进行处理
# -------------------------------------#
def filter_face_24net(cls_prob, roi, rectangles, width, height, threshold):
prob = cls_prob[:, 1]
pick = np.where(prob >= threshold)
rectangles = np.array(rectangles)
x1 = rectangles[pick, 0]
y1 = rectangles[pick, 1]
x2 = rectangles[pick, 2]
y2 = rectangles[pick, 3]
sc = np.array([prob[pick]]).T
dx1 = roi[pick, 0]
dx2 = roi[pick, 1]
dx3 = roi[pick, 2]
dx4 = roi[pick, 3]
w = x2 - x1
h = y2 - y1
x1 = np.array([(x1 + dx1 * w)[0]]).T
y1 = np.array([(y1 + dx2 * h)[0]]).T
x2 = np.array([(x2 + dx3 * w)[0]]).T
y2 = np.array([(y2 + dx4 * h)[0]]).T
rectangles = np.concatenate((x1, y1, x2, y2, sc), axis=1)
rectangles = rect2square(rectangles)
pick = []
for i in range(len(rectangles)):
x1 = int(max(0, rectangles[i][0]))
y1 = int(max(0, rectangles[i][1]))
x2 = int(min(width, rectangles[i][2]))
y2 = int(min(height, rectangles[i][3]))
sc = rectangles[i][4]
if x2 > x1 and y2 > y1:
pick.append([x1, y1, x2, y2, sc])
return NMS(pick, 0.3)
# -------------------------------------#
# 对onet处理后的结果进行处理
# -------------------------------------#
def filter_face_48net(cls_prob, roi, pts, rectangles, width, height, threshold):
prob = cls_prob[:, 1]
pick = np.where(prob >= threshold)
rectangles = np.array(rectangles)
x1 = rectangles[pick, 0]
y1 = rectangles[pick, 1]
x2 = rectangles[pick, 2]
y2 = rectangles[pick, 3]
sc = np.array([prob[pick]]).T
dx1 = roi[pick, 0]
dx2 = roi[pick, 1]
dx3 = roi[pick, 2]
dx4 = roi[pick, 3]
w = x2 - x1
h = y2 - y1
pts0 = np.array([(w * pts[pick, 0] + x1)[0]]).T
pts1 = np.array([(h * pts[pick, 5] + y1)[0]]).T
pts2 = np.array([(w * pts[pick, 1] + x1)[0]]).T
pts3 = np.array([(h * pts[pick, 6] + y1)[0]]).T
pts4 = np.array([(w * pts[pick, 2] + x1)[0]]).T
pts5 = np.array([(h * pts[pick, 7] + y1)[0]]).T
pts6 = np.array([(w * pts[pick, 3] + x1)[0]]).T
pts7 = np.array([(h * pts[pick, 8] + y1)[0]]).T
pts8 = np.array([(w * pts[pick, 4] + x1)[0]]).T
pts9 = np.array([(h * pts[pick, 9] + y1)[0]]).T
x1 = np.array([(x1 + dx1 * w)[0]]).T
y1 = np.array([(y1 + dx2 * h)[0]]).T
x2 = np.array([(x2 + dx3 * w)[0]]).T
y2 = np.array([(y2 + dx4 * h)[0]]).T
rectangles = np.concatenate((x1, y1, x2, y2, sc, pts0, pts1, pts2, pts3, pts4, pts5, pts6, pts7, pts8, pts9),
axis=1)
pick = []
for i in range(len(rectangles)):
x1 = int(max(0, rectangles[i][0]))
y1 = int(max(0, rectangles[i][1]))
x2 = int(min(width, rectangles[i][2]))
y2 = int(min(height, rectangles[i][3]))
if x2 > x1 and y2 > y1:
pick.append([x1, y1, x2, y2, rectangles[i][4],
rectangles[i][5], rectangles[i][6], rectangles[i][7], rectangles[i][8], rectangles[i][9],
rectangles[i][10], rectangles[i][11], rectangles[i][12], rectangles[i][13], rectangles[i][14]])
return NMS(pick, 0.3)
# -------------------------------------#
# 人脸对齐
# -------------------------------------#
def Alignment_1(img, landmark):
if landmark.shape[0] == 68:
x = landmark[36, 0] - landmark[45, 0]
y = landmark[36, 1] - landmark[45, 1]
elif landmark.shape[0] == 5:
x = landmark[0, 0] - landmark[1, 0]
y = landmark[0, 1] - landmark[1, 1]
if x == 0:
angle = 0
else:
angle = math.atan(y / x) * 180 / math.pi
center = (img.shape[1] // 2, img.shape[0] // 2)
RotationMatrix = cv2.getRotationMatrix2D(center, angle, 1)
new_img = cv2.warpAffine(img, RotationMatrix, (img.shape[1], img.shape[0]))
RotationMatrix = np.array(RotationMatrix)
new_landmark = []
for i in range(landmark.shape[0]):
pts = []
pts.append(RotationMatrix[0, 0] * landmark[i, 0] + RotationMatrix[0, 1] * landmark[i, 1] + RotationMatrix[0, 2])
pts.append(RotationMatrix[1, 0] * landmark[i, 0] + RotationMatrix[1, 1] * landmark[i, 1] + RotationMatrix[1, 2])
new_landmark.append(pts)
new_landmark = np.array(new_landmark)
return new_img, new_landmark
def Alignment_2(img, std_landmark, landmark):
def Transformation(std_landmark, landmark):
std_landmark = np.matrix(std_landmark).astype(np.float64)
landmark = np.matrix(landmark).astype(np.float64)
c1 = np.mean(std_landmark, axis=0)
c2 = np.mean(landmark, axis=0)
std_landmark -= c1
landmark -= c2
s1 = np.std(std_landmark)
s2 = np.std(landmark)
std_landmark /= s1
landmark /= s2
U, S, Vt = np.linalg.svd(std_landmark.T * landmark)
R = (U * Vt).T
return np.vstack([np.hstack(((s2 / s1) * R, c2.T - (s2 / s1) * R * c1.T)), np.matrix([0., 0., 1.])])
Trans_Matrix = Transformation(std_landmark, landmark) # Shape: 3 * 3
Trans_Matrix = Trans_Matrix[:2]
Trans_Matrix = cv2.invertAffineTransform(Trans_Matrix)
new_img = cv2.warpAffine(img, Trans_Matrix, (img.shape[1], img.shape[0]))
Trans_Matrix = np.array(Trans_Matrix)
new_landmark = []
for i in range(landmark.shape[0]):
pts = []
pts.append(Trans_Matrix[0, 0] * landmark[i, 0] + Trans_Matrix[0, 1] * landmark[i, 1] + Trans_Matrix[0, 2])
pts.append(Trans_Matrix[1, 0] * landmark[i, 0] + Trans_Matrix[1, 1] * landmark[i, 1] + Trans_Matrix[1, 2])
new_landmark.append(pts)
new_landmark = np.array(new_landmark)
return new_img, new_landmark
# ---------------------------------#
# 图片预处理
# 高斯归一化
# ---------------------------------#
def pre_process(x):
if x.ndim == 4:
axis = (1, 2, 3)
size = x[0].size
elif x.ndim == 3:
axis = (0, 1, 2)
size = x.size
else:
raise ValueError('Dimension should be 3 or 4')
mean = np.mean(x, axis=axis, keepdims=True)
std = np.std(x, axis=axis, keepdims=True)
std_adj = np.maximum(std, 1.0 / np.sqrt(size))
y = (x - mean) / std_adj
return y
# ---------------------------------#
# l2标准化
# ---------------------------------#
def l2_normalize(x, axis=-1, epsilon=1e-10):
output = x / np.sqrt(np.maximum(np.sum(np.square(x), axis=axis, keepdims=True), epsilon))
return output
# ---------------------------------#
# 计算128特征值
# ---------------------------------#
def calc_128_vec(model, img):
face_img = pre_process(img)
pre = model.predict(face_img)
pre = l2_normalize(np.concatenate(pre))
pre = np.reshape(pre, [128])
return pre
# ---------------------------------#
# 计算人脸距离
# ---------------------------------#
def face_distance(face_encodings, face_to_compare):
if len(face_encodings) == 0:
return np.empty((0))
return np.linalg.norm(face_encodings - face_to_compare, axis=1)
# ---------------------------------#
# 比较人脸
# ---------------------------------#
def compare_faces(known_face_encodings, face_encoding_to_check, tolerance=0.6):
dis = face_distance(known_face_encodings, face_encoding_to_check)
return list(dis <= tolerance)
| 2.3125
| 2
|
modules/timer.py
|
DMCTruong/MoosikBot
| 1
|
12781818
|
<filename>modules/timer.py<gh_stars>1-10
##########################################################################################
# Program Name : Discord Bot
# Author : DMCTruong
# Last Updated : August 31, 2017
# License : MIT
# Description : A general purpose bot written for Discord
##########################################################################################
import discord
from discord.ext import commands
import asyncio
import configurations
import datetime
import subprocess
import time
from time import localtime, strftime
bot = commands.Bot(configurations.PREFIX)
class Time:
def __init__(self, bot):
self.bot = bot
@bot.command(pass_context=True)
async def date(self):
"""Returns the current date and time"""
date_time = strftime("The date is %A, %B %d, %Y at %I:%M %p in %Z.", localtime())
print(date_time)
await self.bot.say(date_time)
#async def alarm(self, ctx, *, mins: int, member: discord.Member=None):
@bot.command(pass_context=True)
async def join_date(self, ctx, member: discord.Member = None):
"""Gives user's server join date"""
if member is None:
member = ctx.message.author
await self.bot.say('{0} joined the server on {0.joined_at}'.format(member))
| 3.140625
| 3
|
code/traditional/KMM.py
|
Flsahkong/transferlearning
| 9
|
12781819
|
<gh_stars>1-10
# encoding=utf-8
"""
Created on 9:53 2019/4/21
@author: <NAME>
"""
"""
Kernel Mean Matching
# 1. Gretton, Arthur, et al. "Covariate shift by kernel mean matching." Dataset shift in machine learning 3.4 (2009): 5.
# 2. Huang, Jiayuan, et al. "Correcting sample selection bias by unlabeled data." Advances in neural information processing systems. 2006.
"""
import numpy as np
import sklearn.metrics
from cvxopt import matrix, solvers
def kernel(ker, X1, X2, gamma):
K = None
if ker == 'linear':
if X2 is not None:
K = sklearn.metrics.pairwise.linear_kernel(np.asarray(X1), np.asarray(X2))
else:
K = sklearn.metrics.pairwise.linear_kernel(np.asarray(X1))
elif ker == 'rbf':
if X2 is not None:
K = sklearn.metrics.pairwise.rbf_kernel(np.asarray(X1), np.asarray(X2), gamma)
else:
K = sklearn.metrics.pairwise.rbf_kernel(np.asarray(X1), None, gamma)
return K
class KMM:
def __init__(self, kernel_type='linear', gamma=1.0, B=1.0, eps=None):
'''
Initialization function
:param kernel_type: 'linear' | 'rbf'
:param gamma: kernel bandwidth for rbf kernel
:param B: bound for beta
:param eps: bound for sigma_beta
'''
self.kernel_type = kernel_type
self.gamma = gamma
self.B = B
self.eps = eps
def fit(self, Xs, Xt):
'''
Fit source and target using KMM (compute the coefficients)
:param Xs: ns * dim
:param Xt: nt * dim
:return: Coefficients (Pt / Ps) value vector (Beta in the paper)
'''
ns = Xs.shape[0]
nt = Xt.shape[0]
if self.eps == None:
self.eps = self.B / np.sqrt(ns)
K = kernel(self.kernel_type, Xs, None, self.gamma)
kappa = np.sum(kernel(self.kernel_type, Xs, Xt, self.gamma) * float(ns) / float(nt), axis=1)
K = matrix(K)
kappa = matrix(kappa)
G = matrix(np.r_[np.ones((1, ns)), -np.ones((1, ns)), np.eye(ns), -np.eye(ns)])
h = matrix(np.r_[ns * (1 + self.eps), ns * (self.eps - 1), self.B * np.ones((ns,)), np.zeros((ns,))])
sol = solvers.qp(K, -kappa, G, h)
beta = np.array(sol['x'])
return beta
if __name__ == '__main__':
Xs = [[1, 2, 3], [4, 7, 4], [3, 3, 3], [4, 4, 4], [5, 5, 5], [3, 4, 5], [1, 2, 3], [4, 7, 4], [3, 3, 3], [4, 4, 4], [5, 5, 5], [3, 4, 5], [1, 2, 3], [4, 7, 4], [3, 3, 3], [4, 4, 4], [5, 5, 5], [3, 4, 5], [1, 2, 3], [4, 7, 4], [3, 3, 3], [4, 4, 4], [5, 5, 5], [3, 4, 5]]
Xt = [[5, 9, 10], [4, 5, 6], [10, 20, 30], [1, 2, 3], [3, 4, 5], [5, 6, 7], [7, 8, 9], [100, 100, 100], [11, 22, 33], [12, 11, 5], [5, 9, 10], [4, 5, 6], [10, 20, 30], [1, 2, 3], [3, 4, 5], [5, 6, 7], [7, 8, 9], [100, 100, 100], [11, 22, 33], [12, 11, 5]]
Xs, Xt = np.asarray(Xs), np.asarray(Xt)
kmm = KMM(kernel_type='rbf', B=10)
beta = kmm.fit(Xs, Xt)
print(beta)
print(beta.shape)
| 2.328125
| 2
|
RelayHandler/plugin.py
|
nyuszika7h/limnoria-plugins
| 3
|
12781820
|
<reponame>nyuszika7h/limnoria-plugins<gh_stars>1-10
# Copyright 2017, nyuszika7h <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import re
from supybot import callbacks, commands, i18n, ircmsgs, ircutils
_ = i18n.PluginInternationalization('RelayHandler')
class RelayHandler(callbacks.Plugin):
"""
This plugin handles commands from authorized relays.
"""
def __init__(self, irc):
self.__parent = super(RelayHandler, self)
self.__parent.__init__(irc)
def inFilter(self, irc, msg):
if msg.prefix.endswith('!relayhandler@relayhandler'):
# Don't parse our own injected messages
return msg
if msg.command != "PRIVMSG":
# Don't parse non-PRIVMSGs
return msg
channel = msg.args[0]
if not irc.isChannel(channel):
return msg
trustedRelayHostmask = re.compile(self.registryValue('trustedRelayHostmask', channel))
relayPrefixes = self.registryValue('relayPrefix', channel).split()
if not trustedRelayHostmask:
self.log.debug('No authorized relays configured for %s',
channel)
m = None
for relayPrefix in relayPrefixes:
m = re.match(relayPrefix, ircutils.stripFormatting(msg.args[1]))
if m is not None:
break
if m is None:
self.log.debug('Message does not match relay pattern')
return msg
nick = re.sub(r'\s', '', ircutils.stripFormatting(m.group('nick')))
message = m.group('message')
assert(nick is not None)
assert(message is not None)
if not re.match(trustedRelayHostmask, msg.prefix):
self.log.debug('Ignored message from unauthorized relay %s to '
'%s: %r', msg.prefix, channel, msg.args[1])
return msg
msg.args = (channel, message)
self.log.debug('Handling relay message from %s in %s (relay: %s): %r',
nick, channel, msg.prefix, message)
prefix = '@%s!%s@relayhandler' % (nick, nick)
return ircmsgs.IrcMsg(prefix=prefix,
command='PRIVMSG',
args=(channel, message))
Class = RelayHandler
# vim: set tabstop=4 shiftwidth=4 expandtab textwidth=79:
| 1.570313
| 2
|
poseidon/dags/city_docs/ob_testdocs_dags.py
|
panda-tech/poseidon-airflow
| 0
|
12781821
|
<filename>poseidon/dags/city_docs/ob_testdocs_dags.py
""" OnBase web tables _dags file"""
from airflow.operators.python_operator import PythonOperator
from airflow.operators.bash_operator import BashOperator
from trident.operators.s3_file_transfer_operator import S3FileTransferOperator
from airflow.operators.latest_only_operator import LatestOnlyOperator
from airflow.models import DAG
from trident.util import general
from trident.util.notifications import notify
from dags.city_docs.city_docs_jobs import *
from datetime import datetime
# All times in Airflow UTC. Set Start Time in PST?
args = general.args
conf = general.config
schedule = general.schedule['onbase_test']
start_date = general.start_date['onbase_test']
#: Dag spec
dag = DAG(dag_id='obdocs_test', catchup=False, default_args=args, start_date=start_date, schedule_interval=schedule)
#: Get onbase tables
get_doc_tables = PythonOperator(
task_id='get_onbase_tables',
python_callable=get_onbase_test,
on_failure_callback=notify,
on_retry_callback=notify,
on_success_callback=notify,
dag=dag)
files = [f for f in os.listdir(conf['prod_data_dir'])]
for f in files:
file_name = f.split('.')[0]
name_parts = file_name.split('_')
if name_parts[0] == "onbase" and name_parts[1] == "test":
#: Upload onbase prod files to S3
upload_doc_tables = S3FileTransferOperator(
task_id='upload_' + file_name,
source_base_path=conf['prod_data_dir'],
source_key='{}.csv'.format(file_name),
dest_s3_conn_id=conf['default_s3_conn_id'],
dest_s3_bucket=conf['dest_s3_bucket'],
dest_s3_key='city_docs/{}.csv'.format(file_name),
on_failure_callback=notify,
on_retry_callback=notify,
on_success_callback=notify,
replace=True,
dag=dag)
#: get_doc_tables must run before upload_doc_tables
upload_doc_tables.set_upstream(get_doc_tables)
| 2.125
| 2
|
tests_scripts/Sprint1-f19/Sprint1-f19/tests/loginpageui.py
|
uno-isqa-8950/uno-cpi
| 13
|
12781822
|
<gh_stars>10-100
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
class LoginPageUi:
## URL
URL = 'https://uno-cpi-dev.herokuapp.com/'
# Locators for Login
Login_link = (By.XPATH, '//*[@id="target"]/ul/li[4]/a')
Username_field = (By.XPATH, '/html/body/div/div/div/div/div/div[2]/div[2]/form/input[2]')
Password_field = (By.XPATH, '/html/body/div/div/div/div/div/div[2]/div[2]/form/input[3]')
Login_button = (By.XPATH, '/html/body/div/div/div/div/div/div[2]/div[2]/form/div[1]/p/button')
Login_asserttoggle = (By.XPATH, '//*[@id="target"]/ul/li[6]/a')
## Locators for Logout
Logout_dropdown = (By.XPATH, '//*[@id="target"]/ul/li[6]/a')
Logout_click = (By.XPATH, '//*[@id="target"]/ul/li[6]/div/a')
Successful_logout = (By.XPATH,'/html/body/div/div/div/div[1]/h4')
# Initializer
def __init__(self, browser):
self.browser = browser
# Interaction Methods
def load(self):
self.browser.get(self.URL)
## login link on home page
def login_link_click(self):
login_link = self.browser.find_element(*self.Login_link)
login_link.click()
## username field
def login_username(self,username):
login_username = self.browser.find_element(*self.Username_field)
login_username.send_keys(username + Keys.RETURN)
## password field
def login_password(self, password):
login_password = self.browser.find_element(*self.Password_field)
login_password.send_keys(password + Keys.RETURN)
## Login Button
def login_button(self):
login_button = self.browser.find_element(*self.Login_button)
login_button.click()
## Login assertion
def login_assertion(self):
login_assert = self.browser.find_element(*self.Login_asserttoggle)
value = login_assert.get_attribute('class')
return value
## Logout dropdown
def logout_dropdown(self):
logout_dropdown = self.browser.find_element(*self.Logout_dropdown)
logout_dropdown.click()
## Logout Click
def logout_click(self):
logout_click = self.browser.find_element(*self.Login_button)
logout_click.click()
## Logout Assertion
def logout_assertion(self):
successful_logout = self.browser.find_element(*self.Successful_logout)
success = successful_logout.get_attribute('text') ##Logged Out
return success
| 2.3125
| 2
|
pyyadisk/__init__.py
|
ndrwpvlv/pyyadisk
| 1
|
12781823
|
<filename>pyyadisk/__init__.py
from .yandexdisk import YandexDisk
| 1.039063
| 1
|
lunch/models.py
|
pythondev0101/django_eats_easy_ordering_system
| 0
|
12781824
|
<gh_stars>0
from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
# Create your models here.
class Order(models.Model):
"""Blueprint for Order object"""
weekorder = models.ForeignKey('human_resource.OrderForWeek',on_delete=models.SET_NULL,null=True)
name = models.CharField(max_length=255,default='')
user = models.ForeignKey(User, on_delete=models.SET_NULL,null=True)
total = models.DecimalField(max_digits=9,decimal_places=2,verbose_name='Total')
ORDER_STATUS = (('new', 'New'),('received', 'Received'),('ordered', 'Ordered'),('cancelled', 'Cancelled'))
status = models.CharField(max_length=10,choices=ORDER_STATUS,blank=True, verbose_name='Status',default='new')
date = models.DateField(null=True)
def get_absolute_url(self):
return reverse('order-detail', args=[str(self.id)])
class OrderLine(models.Model):
order = models.ForeignKey('Order',on_delete=models.SET_NULL,null=True)
product = models.ForeignKey('core.Product',on_delete=models.SET_NULL,null=True)
date = models.DateField(verbose_name="Date",null=True)
day = models.CharField(max_length=10,default="",blank=True)
| 2.375
| 2
|
server/server/settings.py
|
dcat52/interop
| 0
|
12781825
|
<gh_stars>0
"""
Django settings for the interop server.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
TEMPLATE_DEBUG = DEBUG
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
ALLOWED_HOSTS = ['*']
# Public IP addresses given access to Django Debug Toolbar
# Add your IP here, if not localhost.
INTERNAL_IPS = ['127.0.0.1']
# Path to jQuery for the Django Debug Toolbar to use.
JQUERY_URL = '/static/admin/js/jquery.js'
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'auvsi_suas',
'auvsi_suas.views.auvsi_admin',
) # yapf: disable
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'auvsi_suas.views.middleware.LoggingMiddleware',
) # yapf: disable
# Add a '?debug' parameter to API endpoints, which wraps them in an HTML
# response, allowing the use of Django Debug Toolbar with the endpoints.
if DEBUG:
import debug
INSTALLED_APPS += 'debug_toolbar',
MIDDLEWARE_CLASSES += debug.middleware
# All of the default panels, plus the profiling panel.
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.profiling.ProfilingPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
]
DEBUG_TOOLBAR_CONFIG = {'PROFILER_MAX_DEPTH': 50}
ROOT_URLCONF = 'server.urls'
WSGI_APPLICATION = 'server.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'auvsi_suas_db',
'USER': 'postgresql_user',
'PASSWORD': '<PASSWORD>',
'CONN_MAX_AGE': None,
'HOST': 'localhost',
'PORT': '5432',
'TEST': {
'NAME': 'test_auvsi_suas_db',
},
}
}
# Caches
# https://docs.djangoproject.com/en/1.6/topics/cache
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
'TIMEOUT': 30,
'KEY_PREFIX': 'suas',
}
}
# Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format':
'%(asctime)s %(levelname)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(asctime)s %(levelname)s %(module)s %(message)s'
},
},
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
},
'loggers': {
'py.warnings': {
'handlers': ['file'],
},
'django': {
'handlers': ['file'],
},
'django.request': {
'handlers': ['file'],
'level': 'WARNING',
'propagate': True,
},
'django.security': {
'handlers': ['file'],
'level': 'WARNING',
'propagate': True,
},
'auvsi_suas.views': {
'handlers': ['file'],
'level': 'WARNING',
'propagate': True,
},
},
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'auvsi_suas/static')
# User uploaded files
MEDIA_URL = '/media/'
MEDIA_ROOT = '/var/www/media'
# Send with X-SENDFILE in apache
SENDFILE_BACKEND = 'sendfile.backends.xsendfile'
# Login URL
LOGIN_URL = '/admin/login/?next=/'
# Migrations
MIGRATION_MODULES = {
'auvsi_suas.models': 'auvsi_suas.models.migrations',
} # yapf: disable
# Custom test runner.
TEST_RUNNER = 'auvsi_suas.test_runner.AuvsiSuasTestRunner'
# Whether tests can/should generate plots (requires window access)
TEST_ENABLE_PLOTTING = False
# Whether to perform load tests (slower)
TEST_ENABLE_LOADTEST = True
# The time to execute each loadtest for
TEST_LOADTEST_TIME = 2.0
# The minimum rate of an individual interop interface
# (1.5x safety factor, 10Hz, 4 interfaces)
TEST_LOADTEST_INTEROP_MIN_RATE = 1.5 * 10.0 * 4
# The time window (in seconds) in which a plane cannot be counted as going out
# of bounds multiple times. This prevents noisy input data from recording
# significant more violations than a human observer.
OUT_OF_BOUNDS_DEBOUNCE_SEC = 10.0
# The max distance for a waypoint to be considered satisfied.
SATISFIED_WAYPOINT_DIST_MAX_FT = 100
# The time between interop telemetry posts that's a prereq for other tasks.
INTEROP_TELEM_THRESHOLD_TIME_SEC = 1.0
# Ratio of object points to lose for every extra unmatched object submitted.
EXTRA_OBJECT_PENALTY_RATIO = 0.05
# The weight of classification accuracy when calculating a odlc match score.
CHARACTERISTICS_WEIGHT = 0.2
# The lowest allowed location accuracy (in feet)
TARGET_LOCATION_THRESHOLD = 150
# The weight of geolocation accuracy when calculating a odlc match score.
GEOLOCATION_WEIGHT = 0.3
# The weight of actionable intelligence when calculating a odlc match score.
ACTIONABLE_WEIGHT = 0.3
# The weight of autonomy when calculating a odlc match score.
AUTONOMY_WEIGHT = 0.2
# Weight of timeline points for mission time.
MISSION_TIME_WEIGHT = 0.8
# Weight of timeline points for not taking a timeout.
TIMEOUT_WEIGHT = 0.2
# Max mission time.
MISSION_MAX_TIME_SEC = 45.0 * 60.0
# Points for flight time in mission time score.
FLIGHT_TIME_SEC_TO_POINTS = 5.0 / 60.0
# Points for post-processing time in mission time score.
PROCESS_TIME_SEC_TO_POINTS = 1.0 / 60.0
# Total points possible for mission time.
MISSION_TIME_TOTAL_POINTS = MISSION_MAX_TIME_SEC * max(
FLIGHT_TIME_SEC_TO_POINTS, PROCESS_TIME_SEC_TO_POINTS)
# Mission time points lost due for every second over time.
MISSION_TIME_PENALTY_FROM_SEC = 0.03
# Ratio of points lost per takeover.
AUTONOMOUS_FLIGHT_TAKEOVER = 0.10
# Ratio of points lost per out of bounds.
BOUND_PENALTY = 0.1
SAFETY_BOUND_PENALTY = 0.1
# Ratio of points lost for TFOA and crash.
TFOA_PENALTY = 0.25
CRASH_PENALTY = 0.35
# Weight of flight points to all autonomous flight.
AUTONOMOUS_FLIGHT_FLIGHT_WEIGHT = 0.4
# Weight of capture points to all autonomous flight.
WAYPOINT_CAPTURE_WEIGHT = 0.1
# Weight of accuracy points to all autonomous flight.
WAYPOINT_ACCURACY_WEIGHT = 0.5
# Weight of stationary obstacle avoidance.
STATIONARY_OBST_WEIGHT = 0.5
# Weight of moving obstacle avoidance.
MOVING_OBST_WEIGHT = 0.5
# Air delivery accuracy threshold.
AIR_DELIVERY_THRESHOLD_FT = 150.0
# Scoring weights.
TIMELINE_WEIGHT = 0.1
AUTONOMOUS_WEIGHT = 0.3
OBSTACLE_WEIGHT = 0.2
OBJECT_WEIGHT = 0.2
AIR_DELIVERY_WEIGHT = 0.1
OPERATIONAL_WEIGHT = 0.1
# Max aircraft airspeed in ft/s. Rules specify 70 KIAS.
MAX_AIRSPEED_FT_PER_SEC = 118.147
# Maximum interval between telemetry logs allowed for interpolation.
MAX_TELMETRY_INTERPOLATE_INTERVAL_SEC = 1.5
| 1.835938
| 2
|
tests/test_physics/test_fock/test_fock.py
|
sylviemonet/MrMustard
| 33
|
12781826
|
# Copyright 2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from hypothesis import settings, given, strategies as st
import pytest
import numpy as np
from scipy.special import factorial
from thewalrus.quantum import total_photon_number_distribution
from mrmustard.lab import *
from mrmustard.physics.fock import dm_to_ket, ket_to_dm
# helper strategies
st_angle = st.floats(min_value=0, max_value=2 * np.pi)
@given(n_mean=st.floats(0, 3), phi=st_angle)
def test_two_mode_squeezing_fock(n_mean, phi):
"""Tests that perfect number correlations are obtained for a two-mode squeezed vacuum state
Note that this is consistent with the Strawberryfields convention"""
cutoff = 4
r = np.arcsinh(np.sqrt(n_mean))
circ = Circuit(ops=[S2gate(r=r, phi=phi)])
amps = (Vacuum(num_modes=2) >> circ).ket(cutoffs=[cutoff, cutoff])
diag = (1 / np.cosh(r)) * (np.exp(1j * phi) * np.tanh(r)) ** np.arange(cutoff)
expected = np.diag(diag)
assert np.allclose(amps, expected)
@given(n_mean=st.floats(0, 3), phi=st_angle, varphi=st_angle)
def test_hong_ou_mandel(n_mean, phi, varphi):
"""Tests that perfect number correlations are obtained for a two-mode squeezed vacuum state"""
cutoff = 2
r = np.arcsinh(np.sqrt(n_mean))
ops = [
S2gate(r=r, phi=phi)[0, 1],
S2gate(r=r, phi=phi)[2, 3],
BSgate(theta=np.pi / 4, phi=varphi)[1, 2],
]
circ = Circuit(ops)
amps = (Vacuum(4) >> circ).ket(cutoffs=[cutoff, cutoff, cutoff, cutoff])
assert np.allclose(amps[1, 1, 1, 1], 0.0, atol=1e-6)
@given(alpha=st.complex_numbers(min_magnitude=0, max_magnitude=2))
def test_coherent_state(alpha):
"""Test that coherent states have the correct photon number statistics"""
cutoff = 10
amps = Coherent(x=alpha.real, y=alpha.imag).ket(cutoffs=[cutoff])
expected = np.exp(-0.5 * np.abs(alpha) ** 2) * np.array(
[alpha**n / np.sqrt(factorial(n)) for n in range(cutoff)]
)
assert np.allclose(amps, expected, atol=1e-6)
@given(r=st.floats(0, 2), phi=st_angle)
def test_squeezed_state(r, phi):
"""Test that squeezed states have the correct photon number statistics
Note that we use the same sign with respect to SMSV in https://en.wikipedia.org/wiki/Squeezed_coherent_state"""
cutoff = 10
amps = SqueezedVacuum(r=r, phi=phi).ket(cutoffs=[cutoff])
assert np.allclose(amps[1::2], 0.0)
non_zero_amps = amps[0::2]
len_non_zero = len(non_zero_amps)
amp_pairs = (
1
/ np.sqrt(np.cosh(r))
* np.array(
[
(-np.exp(1j * phi) * np.tanh(r)) ** n
* np.sqrt(factorial(2 * n))
/ (2**n * factorial(n))
for n in range(len_non_zero)
]
)
)
assert np.allclose(non_zero_amps, amp_pairs)
@given(n_mean=st.floats(0, 3), phi=st_angle)
def test_two_mode_squeezing_fock_mean_and_covar(n_mean, phi):
"""Tests that perfect number correlations are obtained for a two-mode squeezed vacuum state"""
r = np.arcsinh(np.sqrt(n_mean))
state = Vacuum(num_modes=2) >> S2gate(r=r, phi=phi)
meanN = state.number_means
covN = state.number_cov
expectedN = np.array([n_mean, n_mean])
expectedCov = n_mean * (n_mean + 1) * np.ones([2, 2])
assert np.allclose(meanN, expectedN)
assert np.allclose(covN, expectedCov)
@given(n_mean=st.floats(0, 2), phi=st_angle, eta=st.floats(min_value=0, max_value=1))
def test_lossy_squeezing(n_mean, phi, eta):
"""Tests the total photon number distribution of a lossy squeezed state"""
r = np.arcsinh(np.sqrt(n_mean))
cutoff = 40
ps = (SqueezedVacuum(r=r, phi=phi) >> Attenuator(transmissivity=eta)).fock_probabilities(
[cutoff]
)
expected = np.array([total_photon_number_distribution(n, 1, r, eta) for n in range(cutoff)])
assert np.allclose(ps, expected, atol=1e-6)
@given(n_mean=st.floats(0, 2), phi=st_angle, eta_0=st.floats(0, 1), eta_1=st.floats(0, 1))
def test_lossy_two_mode_squeezing(n_mean, phi, eta_0, eta_1):
"""Tests the photon number distribution of a lossy two-mode squeezed state"""
cutoff = 40
n = np.arange(cutoff)
L = Attenuator(transmissivity=[eta_0, eta_1])
state = TMSV(r=np.arcsinh(np.sqrt(n_mean)), phi=phi) >> L
ps0 = state.get_modes(0).fock_probabilities([cutoff])
ps1 = state.get_modes(1).fock_probabilities([cutoff])
mean_0 = np.sum(n * ps0)
mean_1 = np.sum(n * ps1)
assert np.allclose(mean_0, n_mean * eta_0, atol=1e-5)
assert np.allclose(mean_1, n_mean * eta_1, atol=1e-5)
@given(num_modes=st.integers(1, 3))
def test_density_matrix(num_modes):
"""Tests the density matrix of a pure state is equal to |psi><psi|"""
modes = list(range(num_modes))
cutoffs = [num_modes + 1] * num_modes
G = Ggate(num_modes=num_modes)
L = Attenuator(transmissivity=1.0)
rho_legit = (Vacuum(num_modes) >> G >> L[modes]).dm(cutoffs=cutoffs)
rho_made = (Vacuum(num_modes) >> G).dm(cutoffs=cutoffs)
# rho_legit = L[modes](G(Vacuum(num_modes))).dm(cutoffs=cutoffs)
# rho_built = G(Vacuum(num_modes=num_modes)).dm(cutoffs=cutoffs)
assert np.allclose(rho_legit, rho_made)
@pytest.mark.parametrize(
"state",
[
Vacuum(num_modes=2),
Fock(4),
Coherent(x=0.1, y=-0.4, cutoffs=[15]),
Gaussian(num_modes=2, cutoffs=[15]),
],
)
def test_dm_to_ket(state):
"""Tests pure state density matrix conversion to ket"""
dm = state.dm()
ket = dm_to_ket(dm)
# check if ket is normalized
assert np.allclose(np.linalg.norm(ket), 1)
# check kets are equivalent
assert np.allclose(ket, state.ket())
dm_reconstructed = ket_to_dm(ket)
# check ket leads to same dm
assert np.allclose(dm, dm_reconstructed)
def test_dm_to_ket_error():
"""Test dm_to_ket raises an error when state is mixed"""
state = Coherent(x=0.1, y=-0.4, cutoffs=[15]) >> Attenuator(0.5)
with pytest.raises(ValueError):
dm_to_ket(state)
| 2.125
| 2
|
tests/core/test_api_options.py
|
webbcam/tiflash
| 5
|
12781827
|
import pytest
import tiflash
class TestOptionsApi():
# Getters
def test_basic_get_option(self, tdev):
"""Tests basic get_option function"""
result = tiflash.get_option(tdev['option'],
serno=tdev['serno'],
connection=tdev['connection'],
devicetype=tdev['devicetype'])
if 'option-val' in tdev.keys():
assert result == tdev['option-val']
def test_get_option_with_preop(self, tdev):
"""Tests get_option with a preop"""
if 'preop' not in tdev.keys():
pytest.skip("No preop provided for device")
result = tiflash.get_option(tdev['preop-option'],
pre_operation=tdev['preop'],
serno=tdev['serno'],
connection=tdev['connection'],
devicetype=tdev['devicetype'])
def test_get_invalid_option(self, tdev):
"""Tests get_option throws error when invalid option id provided"""
with pytest.raises(tiflash.TIFlashError):
result = tiflash.get_option("InvalidOption",
serno=tdev['serno'],
connection=tdev['connection'],
devicetype=tdev['devicetype'])
def test_get_option_invalid_preop(self, tdev):
"""Tests get_option raises error when invalid preop provided"""
with pytest.raises(tiflash.TIFlashError):
result = tiflash.get_option(tdev['preop-option'],
pre_operation="InvalidOperation",
serno=tdev['serno'],
connection=tdev['connection'],
devicetype=tdev['devicetype'])
# Setters
<EMAIL>
def test_basic_set_option(self, tdev):
"""Tests basic set_option function"""
tiflash.set_option(
option_id=tdev['option'],
option_val=tdev['option-val'],
serno=tdev['serno'],
connection=tdev['connection'],
devicetype=tdev['devicetype'])
# List
def test_list_options(self, tdev):
"""Tests all options returned in list are valid"""
options = tiflash.list_options(
serno=tdev['serno'],
connection=tdev['connection'],
devicetype=tdev['devicetype'])
assert len(options) > 1
def test_list_single_option(self, tdev):
"""Tests listing of one specified option"""
options = tiflash.list_options(
option_id=tdev['option'],
serno=tdev['serno'],
connection=tdev['connection'],
devicetype=tdev['devicetype'])
assert len(options) == 1
assert tdev['option'] in options.keys()
def test_list_single_nonexistant_option(self, tdev):
"""Tests listing of specified option that does not exist"""
options = tiflash.list_options(
option_id="InvalidOption",
serno=tdev['serno'],
connection=tdev['connection'],
devicetype=tdev['devicetype'])
assert len(options) == 0
| 2.046875
| 2
|
plugins/module_utils/plugins/plugin_base.py
|
sma-de/ansible-collections-base
| 0
|
12781828
|
<filename>plugins/module_utils/plugins/plugin_base.py
#!/usr/bin/env python
# TODO: copyright, owner license
#
"""
TODO module / file doc string
"""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import abc
import collections
import copy
import re
from ansible.errors import AnsibleError, AnsibleInternalError, AnsibleOptionsError
from ansible.module_utils.basic import missing_required_lib
from ansible.module_utils._text import to_native
from ansible.module_utils.six import iteritems
from ansible.utils.display import Display
from ansible_collections.smabot.base.plugins.module_utils.utils.utils import ansible_assert, detemplate
display = Display()
KWARG_UNSET = object()
MAGIC_ARGSPECKEY_META = '___args_meta'
def default_param_value(pname, defcfg, ans_varspace, templater):
def get_space_val(space, key, templater=templater):
res = space[key]
if templater:
## note: at least sometimes when we get values
## from ansible varspace, they are still template
## string, not the templated value
res = detemplate(res, templater)
return res
if not defcfg:
raise AnsibleOptionsError(
"Must set mandatory param '{}'".format(pname)
)
# check if we have a match in for ansvars
ansvars = defcfg.get('ansvar', [])
for av in ansvars:
if av in ans_varspace:
return get_space_val(ans_varspace, av)
# check if we have a matching envvar
envvars = defcfg.get('env', [])
envspace = ans_varspace.get('ansible_env', {})
for e in envvars:
if e in envspace:
return get_space_val(envspace, e)
# use hardcoded fallback
if 'fallback' in defcfg:
return defcfg['fallback']
raise AnsibleOptionsError(
"No hardcoded fallback for param '{}', set it either directly or"
" by specifying one of these ansible variables (=> {}) or one of"
" these environment variables (=> {})".format(pname, ansvars, envvars)
)
def check_paramtype(param, value, typespec, errmsg):
if typespec == []:
# no type restriction ==> noop
return
if callable(typespec):
return typespec(value)
type_match = False
display.vvv(
"[PLUGIN] :: handle args, do type check: {}".format(typespec)
)
for xt in typespec:
display.vvv(
"[PLUGIN] :: handle args, type test: {}".format(xt)
)
sub_types = None
if isinstance(xt, list):
xt = list
sub_types = xt
if isinstance(value, xt):
type_match = True
break
if not type_match:
if not errmsg:
errmsg = "Must be one of the following types: {}".format(typespec)
raise AnsibleOptionsError(
"Value '{}' for param '{}' failed its type"
" check: {}".format(value, param, errmsg)
)
if isinstance(value, list):
ansible_assert(sub_types, 'bad typespec')
display.vvv(
"[PLUGIN] :: handle args, do subtype check: {}".format(sub_types)
)
for vx in value:
check_paramtype(param, vx, sub_types, errmsg)
class ArgsPlugin():
''' TODO '''
def __init__(self):
self._taskparams = {}
self._ansible_varspace = {}
@property
def argspec(self):
return {}
@property
def error_prefix(self):
return ''
def get_taskparam(self, name):
return self._taskparams[name]
def _handle_taskargs(self, argspec, args_in, args_out):
display.vvv(
"[PLUGIN] :: handle args, argspec: {}".format(argspec)
)
argspec = copy.deepcopy(argspec)
args_set = copy.deepcopy(args_in)
args_found = {}
args_meta = argspec.pop(MAGIC_ARGSPECKEY_META, {})
for (k, v) in iteritems(argspec):
display.vv(
"[PLUGIN] :: handle args, do param '{}'".format(k)
)
## first normalize argspec
# convert convenience short forms to norm form
if isinstance(v, collections.abc.Mapping):
display.vvv(
"[PLUGIN] :: handle args, argspec is dict,"\
" nothing to normalize"
)
pass # noop
elif isinstance(v, tuple):
tmp = {}
display.vvv(
"[PLUGIN] :: handle args, argspec is short form,"\
" normalizing ..."
)
for i in range(0, len(v)):
vx = v[i]
if i == 0:
tmp['type'] = vx
elif i == 1:
tmp['defaulting'] = { 'fallback': vx }
elif i == 2:
if isinstance(vx, collections.abc.Mapping):
tmp['subspec'] = vx
else:
tmp['choice'] = vx
else:
raise AnsibleInternalError(
"Unsupported short form argspec tuple: '{}'".format(v)
)
v = tmp
else:
## assume a single value for arg type
v = { 'type': v }
# normalize norm form
ansible_assert('type' in v,
"Bad argspec for param '{}': Mandatory type field missing".format(k)
)
vdef = v.get('defaulting', None)
mandatory = not vdef
## TODO: min and max sizes for collection types
# get param
key_hits = []
aliases = v.get('aliases', [])
display.vvv(
"[PLUGIN] :: handle args, get set val / handle"\
" aliasing: {}".format(aliases)
)
for x in [k] + aliases:
ansible_assert(x not in args_found,
"Bad argspec for param '{}': duplicate alias"
" name '{}'".format(k, x)
)
if x in args_set:
key_hits.append(x)
pval = args_set.pop(x)
args_found[k] = True
if len(key_hits) > 1:
raise AnsibleOptionsError(
"Bad param '{}': Use either key or one of its aliases"
" '{}', but not more than one at a time".format(k, aliases)
)
if len(key_hits) == 0:
display.vv("[PLUGIN] :: handle args, do defaulting")
# param unset, do defaulting
pval = default_param_value(
k, vdef, self._ansible_varspace,
getattr(self, '_templar', None)
)
display.vv(
"[PLUGIN] :: handle args, final pvalue: |{}|".format(pval)
)
display.vv(
"[PLUGIN] :: handle args, check param"\
" type: {}".format(v['type'])
)
## at this point param is either set explicitly or by
## defaulting mechanism, proceed with value tests
check_paramtype(k, pval, v['type'], v.get('type_err', None))
## optionally handle choice
choice = v.get('choice', None)
if choice:
display.vvv(
"[PLUGIN] :: handle args, handle choice: {}".format(choice)
)
ansible_assert(isinstance(choice, list),
"bad argspec[{}]: choice must be list,"\
" but was '{}': {}".format(k, type(choice), choice)
)
ansible_assert(
not isinstance(pval, (list, collections.abc.Mapping)),
"bad argspec[{}]: if choice is specified, param"\
" cannot be collection type, it must be scalar".format(k)
)
if pval not in choice:
raise AnsibleOptionsError(
"Bad param '{}': given value was '{}' but it"\
" must be one of these: {}".format(k, pval, choice)
)
args_out[k] = pval
subspec = v.get('subspec', None)
if isinstance(pval, collections.abc.Mapping) and subspec:
display.vvv(
"[PLUGIN] :: handle args, do subspec: {}".format(subspec)
)
self._handle_taskargs(subspec, pval, pval)
if args_set:
raise AnsibleOptionsError(
"Unsupported parameters given: {}".format(list(args_set.keys()))
)
## check mutual exclusions:
for exlst in args_meta.get('mutual_exclusions', []):
tmp = []
for x in exlst:
if x in args_found:
tmp.append(x)
if len(tmp) > 1:
raise AnsibleOptionsError(
"It is not allowed to set mutual exclusive"
" params '{}' and '{}' together".format(*tmp)
)
## @abc.abstractmethod
## def run_specific(self, *args, **kwargs):
## pass
def run_wrapper(self, *args, **kwargs):
try:
return self.run_specific(*args, **kwargs)
except AnsibleError as e:
raise type(e)("{}{}".format(self.error_prefix, e))
except ModuleNotFoundError as e:
bad_lib = re.search(r"(?i)module named '(.*?)'", e.msg).group(1)
raise AnsibleError("{}{}".format(
self.error_prefix, missing_required_lib(bad_lib))
)
except Exception as e:
raise AnsibleInternalError("{}{}".format(self.error_prefix,
to_native("Unhandled native error {}: {}".format(type(e), e))
)) from e
class AnsSpaceAndArgsPlugin(ArgsPlugin):
''' TODO '''
def __init__(self, *args, **kwargs):
super(AnsSpaceAndArgsPlugin, self).__init__(*args, **kwargs)
def get_ansible_var(self, var, default=KWARG_UNSET):
if default != KWARG_UNSET:
return detemplate(
self._ansible_varspace.get(var, default), self._templar
)
return detemplate(self._ansible_varspace[var], self._templar)
def get_ansible_fact(self, fact, default=KWARG_UNSET):
facts = self.get_ansible_var('ansible_facts')
if default == KWARG_UNSET:
return facts[fact]
return facts.get(fact, default)
| 2.21875
| 2
|
lib/models/mixformer/__init__.py
|
SangbumChoi/MixFormer
| 103
|
12781829
|
from .mixformer import build_mixformer
from .mixformer_online import build_mixformer_online_score
| 1
| 1
|
examples/cpu_usage/models.py
|
reesezxf/infi.clickhouse_orm
| 3
|
12781830
|
<filename>examples/cpu_usage/models.py
from infi.clickhouse_orm import Model, DateTimeField, UInt16Field, Float32Field, Memory
class CPUStats(Model):
timestamp = DateTimeField()
cpu_id = UInt16Field()
cpu_percent = Float32Field()
engine = Memory()
| 1.984375
| 2
|
mappgene/subscripts/ivar.py
|
aavilaherrera/mappgene
| 7
|
12781831
|
#!/usr/bin/env python3
import os,sys,glob,multiprocessing,time,csv,math,pprint
from parsl.app.app import python_app
from os.path import *
from mappgene.subscripts import *
@python_app(executors=['worker'])
def run_ivar(params):
subject_dir = params['work_dir']
subject = basename(subject_dir)
input_reads = params['input_reads']
variant_frequency = params['variant_frequency']
read_cutoff_bp = params['read_cutoff_bp']
primers_bp = params['primers_bp']
depth_cap = params['depth_cap']
stdout = params['stdout']
ivar_dir = join(subject_dir, 'ivar')
output_dir = join(subject_dir, 'ivar_outputs')
alignments_dir = join(output_dir, 'alignments')
raw_dir = join(ivar_dir, 'raw_data')
smart_remove(raw_dir)
smart_remove(output_dir)
smart_mkdir(raw_dir)
smart_mkdir(output_dir)
smart_mkdir(alignments_dir)
reads = []
start_time = time.time()
start_str = f'''
=====================================
Starting iVar with subject: {subject}
{get_time_date()}
Arguments:
{pprint.pformat(params, width=1)}
=====================================
'''
write(stdout, start_str)
print(start_str)
update_permissions(ivar_dir, params)
update_permissions(output_dir, params)
# Run fixq.sh
for input_read in input_reads:
tmp_f = join(raw_dir, 'tmp_' + basename(input_read))
f = join(raw_dir, basename(input_read))
smart_copy(input_read, f)
run(f'zcat {f} | awk \'NR%4 == 0 {{ gsub(\\"F\\", \\"?\\"); gsub(\\":\\", \\"5\\") }}1\'' +
f' | gzip -c > {tmp_f}', params)
if exists(tmp_f):
smart_remove(f)
os.rename(tmp_f, f)
reads.append(f)
# Deinterleave if only a single FASTQ was found
# fasta = join(ivar_dir, 'references/PS_1200bp.fasta')
fasta = join(ivar_dir, 'references/NC_045512.2.fasta')
if len(reads) == 1:
f = reads[0]
read1 = replace_extension(f, '_R1.fastq.gz')
read2 = replace_extension(f, '_R2.fastq.gz')
run(f'reformat.sh in={f} out1={read1} out2={read2}', params)
smart_remove(f)
elif len(reads) == 2:
reads.sort()
read1 = reads[0]
read2 = reads[1]
else:
raise Exception(f'Invalid reads: {reads}')
align_prefix = join(alignments_dir, subject)
bam = replace_extension(align_prefix, '.bam')
trimmed = replace_extension(align_prefix, '.trimmed')
trimmed_sorted = replace_extension(align_prefix, '.trimmed.sorted.bam')
variants = replace_extension(align_prefix, '.variants')
noinsertions = replace_extension(align_prefix, '.noins.variants')
masked = replace_extension(align_prefix, '.masked.txt')
trimmed_masked = replace_extension(align_prefix, '.trimmed.masked.bam')
trimmed_masked_bedgraph = join(output_dir, f'{subject}.ivar.bedgraph')
final_masked = replace_extension(align_prefix, '.final.masked.variants')
lofreq_bam = replace_extension(align_prefix, '.lofreq.bam')
lofreq_bedgraph = join(output_dir, f'{subject}.ivar.lofreq.bedgraph')
vcf_s0 = replace_extension(align_prefix, '.vcf')
tsv = replace_extension(align_prefix, '.final.masked.variants.tsv')
output_vcf = join(alignments_dir, f'{subject}.ivar.vcf')
output_tsv = join(output_dir, f'{subject}.ivar.tsv')
output_fa = join(output_dir, f'{subject}.ivar.consensus')
run(f'bwa index {fasta}', params)
run(f'bwa mem -t 8 {fasta} {read1} {read2} | samtools sort -o {bam}', params)
run(f'ivar trim -m {read_cutoff_bp} -b {ivar_dir}/primers_{primers_bp}bp/nCoV-2019.scheme.bed -p {trimmed} -i {bam} -e', params)
run(f'samtools sort {trimmed}.bam -o {trimmed_sorted}', params)
# call variants with ivar (produces {subject}.variants.tsv)
run(f'samtools mpileup -aa -A -d 0 -B -Q 0 {trimmed_sorted} | ' +
f'ivar variants -p {variants} -q 20 -t {variant_frequency} -r {fasta} ' +
f'-g {ivar_dir}/GCF_009858895.2_ASM985889v3_genomic.gff', params)
# remove low quality insertions because we want to ignore most mismatches
# to primers that are insertions (produces {subject}.noins.variants.tsv)
run(f"awk \'! (\\$4 ~ /^\\+/ && \\$10 >= 20) {{ print }}\' < {variants}.tsv > {noinsertions}.tsv", params)
# get primers with mismatches to reference (produces {subject}.masked.txt)
run(f'ivar getmasked -i {noinsertions}.tsv -b {ivar_dir}/primers_{primers_bp}bp/nCoV-2019.bed ' +
f'-f {ivar_dir}/primers_{primers_bp}bp/nCoV-2019.tsv -p {masked}', params)
# remove reads with primer mismatches (produces {subject}.trimmed.masked.bam)
run(f'ivar removereads -i {trimmed_sorted} -p {trimmed_masked} ' +
f'-t {masked} -b {ivar_dir}/primers_{primers_bp}bp/nCoV-2019.bed', params)
# call variants with reads with primer mismatches removed (produces {subject}.final.masked.variants.tsv)
run(f'samtools mpileup -aa -A -d 0 -B -Q 0 {trimmed_masked} | ' +
f'ivar variants -p {final_masked} -q 20 -t {variant_frequency} -r {fasta} ' +
f'-g {ivar_dir}/GCF_009858895.2_ASM985889v3_genomic.gff', params)
smart_copy(tsv, output_tsv)
# convert ivar output to vcf (produces {subject}.final.masked.variants.vcf)
run(f'python /opt/ivar_variants_to_vcf.py {output_tsv} {output_vcf}', params)
# use lofreq to call variants (produces {subject}.lofreq.bam and {subject}.vcf)
run(f'lofreq indelqual --dindel -f {fasta} -o {lofreq_bam} --verbose {trimmed_masked}', params)
run(f'samtools index {lofreq_bam}', params)
run(f'lofreq call -d {depth_cap} --verbose --call-indels -f {fasta} -o {vcf_s0} --verbose {lofreq_bam}', params)
# create consensus sequence for comparing to reference genome (produces {subject}.consensus.fa)
run(f'samtools mpileup -aa -A -d 0 -B -Q 0 {lofreq_bam} | ' +
f'ivar consensus -p {output_fa}', params)
# create bedgraphs of gene coverage (produces {subject}.lofreq.bedgraph and {subject}.trimmed.masked.bedgraph)
# https://bedtools.readthedocs.io/en/latest/content/tools/genomecov.html
run(f'bedtools genomecov -ibam {lofreq_bam} -bga > {lofreq_bedgraph}', params)
run(f'bedtools genomecov -ibam {trimmed_masked} -bga > {trimmed_masked_bedgraph}', params)
# Run snpEff postprocessing
vcf_s1 = join(output_dir, f'{subject}.ivar.lofreq.vcf')
vcf_s2 = join(output_dir, f'{subject}.ivar.lofreq.snpEFF.vcf')
vcf_s3 = join(output_dir, f'{subject}.ivar.lofreq.snpSIFT.txt')
run(f'sed "s/MN908947.3/NC_045512.2/g" {vcf_s0} > {vcf_s1}', params)
run(f'java -Xmx8g -jar /opt/snpEff/snpEff.jar NC_045512.2 {vcf_s1} > {vcf_s2}', params)
run(f'cat {vcf_s2} | /opt/snpEff/scripts/vcfEffOnePerLine.pl | java -jar /opt/snpEff/SnpSift.jar ' +
f' extractFields - CHROM POS REF ALT AF DP "ANN[*].IMPACT" "ANN[*].FEATUREID" "ANN[*].EFFECT" ' +
f' "ANN[*].HGVS_C" "ANN[*].HGVS_P" "ANN[*].CDNA_POS" "ANN[*].AA_POS" "ANN[*].GENE" > {vcf_s3}', params)
# //TODO: make this DRY
i_vcf_s1 = join(output_dir, f'{subject}.ivar.vcf')
i_vcf_s2 = join(output_dir, f'{subject}.ivar.snpEFF.vcf')
i_vcf_s3 = join(output_dir, f'{subject}.ivar.snpSIFT.txt')
run(f'sed "s/MN908947.3/NC_045512.2/g" {output_vcf} > {i_vcf_s1}', params)
run(f'java -Xmx8g -jar /opt/snpEff/snpEff.jar NC_045512.2 -noStats {i_vcf_s1} > {i_vcf_s2}', params)
run(f'cat {i_vcf_s2} | /opt/snpEff/scripts/vcfEffOnePerLine.pl | java -jar /opt/snpEff/SnpSift.jar ' +
f' extractFields - CHROM POS REF ALT "GEN[0].ALT_FREQ" DP "ANN[*].IMPACT" "ANN[*].FEATUREID" "ANN[*].EFFECT" ' +
f' "ANN[*].HGVS_C" "ANN[*].HGVS_P" "ANN[*].CDNA_POS" "ANN[*].AA_POS" "ANN[*].GENE" ' +
f' FILTER "GEN[0].ALT_QUAL" | ' +
f' awk \'/^CHROM/ {{ sub(\\"GEN\\\\[0\\\\].ALT_FREQ\\", \\"AF\\"); \
sub(\\"GEN\\\\[0\\\\].ALT_QUAL\\", \\"ALT_QUAL\\") }}1\' > {i_vcf_s3}', params)
# Clear extra files
smart_remove('snpEff_genes.txt')
smart_remove('snpEff_summary.html')
update_permissions(ivar_dir, params)
update_permissions(output_dir, params)
finish_str = f'''
=====================================
Finished iVar with subject: {subject}
{get_time_date()}
Arguments:
{pprint.pformat(params, width=1)}
Total time: {get_time_string(time.time() - start_time)} (HH:MM:SS)
=====================================
'''
write(stdout, finish_str)
print(finish_str)
| 2.15625
| 2
|
pypadb/decorator/select.py
|
ChenzDNA/pypadb
| 1
|
12781832
|
<gh_stars>1-10
import functools
from types import GenericAlias
from typing import Callable, Any
from ..connection_pool import cursor_type, connection
from ..exception import RequireReturnTypeAnnotation
from ..utils import inspect_util
def select(sql: str, data_type: Any) -> Callable:
def deco(fun: Callable):
@functools.wraps(fun)
def wrapper(*args):
conn = connection()
with conn:
cur = conn.cursor(cursor_type())
cur.execute(sql, dict(zip([a.name for a in inspect_util.arg_list(fun)], args)))
func_returns = inspect_util.returns_type(fun)
if not func_returns:
raise RequireReturnTypeAnnotation('require return type annotation')
first_data = cur.fetchone()
if first_data is None:
return None
if func_returns == GenericAlias(list, data_type):
return [data_type(**first_data), *[data_type(**i) for i in cur.fetchall()]]
else:
return data_type(**first_data)
return wrapper
return deco
| 2.375
| 2
|
simulation/utilities/util.py
|
0xDBFB7/ionprinter
| 0
|
12781833
|
import math
import unittest
epsilon = 8.85*(10**-12.0) #vacuum permittivity
def electron_charge():
return 1.602*10**-19
def scharge_efield(beam_current,beam_velocity,beam_radius,sample_radius=None):
"""Calculate the electric field at the edge of a beam
Eq. 44 at https://arxiv.org/pdf/1401.3951.pdf
returns one value: V/m
Beam velocity is in m/s
"""
if(sample_radius == None):
sample_radius=beam_radius
return ((beam_current/(2.0*(math.pi)*epsilon*beam_velocity)) * (sample_radius/(beam_radius**2.0)))
def scharge_force(beam_current,beam_velocity,beam_radius):
scharge_force = scharge_efield(beam_current,beam_velocity,beam_radius)*electron_charge()
return scharge_force
def scharge_bfield(beam_current,beam_velocity,beam_radius):
"""Calculate the minimum B field required to counteract scharge beam dispersion
Return value is in teslas
"""
required_bfield = scharge_force(beam_current,beam_velocity,beam_radius)/(electron_charge()*beam_velocity)
return required_bfield, scharge_force
#
# def einzel_focus_efield(V_0,V_1,focusing_period,focus_geometry_radius,y_position):
# """Calculate
# from Page 7 of https://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/19650023758.pdf
# """
# focusing_period_v = focusing_period*2.0*(math.pi)
# K = V_1/(V_0*math.cosh(focusing_period_v))
# return -0.5*electron_charge()*V_0*(K**2.0)*(focusing_period_v**2.0)*y_position
#print(scharge_efield(0.178,9990,0.005)/10**6)
#0.00001963495
# print(einzel_focus_efield(20000,10000,0.001,0.001,0.0005)/10**6)
class TestAll(unittest.TestCase):
def test_scharge_efield(self):
ef = scharge_efield(0.05,9990,0.0025)
self.assertAlmostEqual(ef/10**6, 36.0032524, places=3)
# def test_einzel_efield(self):
# ef = einzel_focus_efield(20000,10000,0.001,0.001,0.0005)
# self.assertAlmostEqual(ef/10**6, 36.0032524, places=3)
#
# def scharge_efield_test(self):
# assertAlmostEqual(, second, places=4, msg=None, delta=None)
# assertAlmostEqual(, second, places=4, msg=None, delta=None)
# assertAlmostEqual(, second, places=4, msg=None, delta=None)
# unittest.main()
| 2.8125
| 3
|
Markov.py
|
timestocome/AliceInWonderland
| 0
|
12781834
|
# http://github.com/timestocome/
# build a markov chain and use it to predict Alice In Wonderland/Through the Looking Glass text
import numpy as np
import pickle
from collections import Counter
import markovify # https://github.com/jsvine/markovify
#######################################################################
# read in text and break into words and sentences
#####################################################################
# open file and read in text
#file = open('AliceInWonderland.txt', 'r')
file = open('BothBooks.txt', encoding='utf-8')
data = file.read()
file.close()
# create markov model
model_3 = markovify.Text(data, state_size=3)
# generate text from model
print("*******************************")
for i in range(10):
print("__________________________")
print(model_3.make_sentence())
| 3.640625
| 4
|
test/schema.py
|
fanngyuan/vitess
| 0
|
12781835
|
#!/usr/bin/python
import os
import socket
import utils
import tablet
shard_0_master = tablet.Tablet()
shard_0_replica1 = tablet.Tablet()
shard_0_replica2 = tablet.Tablet()
shard_0_rdonly = tablet.Tablet()
shard_0_backup = tablet.Tablet()
shard_1_master = tablet.Tablet()
shard_1_replica1 = tablet.Tablet()
def setup():
utils.zk_setup()
setup_procs = [
shard_0_master.init_mysql(),
shard_0_replica1.init_mysql(),
shard_0_replica2.init_mysql(),
shard_0_rdonly.init_mysql(),
shard_0_backup.init_mysql(),
shard_1_master.init_mysql(),
shard_1_replica1.init_mysql(),
]
utils.wait_procs(setup_procs)
def teardown():
if utils.options.skip_teardown:
return
teardown_procs = [
shard_0_master.teardown_mysql(),
shard_0_replica1.teardown_mysql(),
shard_0_replica2.teardown_mysql(),
shard_0_rdonly.teardown_mysql(),
shard_0_backup.teardown_mysql(),
shard_1_master.teardown_mysql(),
shard_1_replica1.teardown_mysql(),
]
utils.wait_procs(teardown_procs, raise_on_error=False)
utils.zk_teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
shard_0_master.remove_tree()
shard_0_replica1.remove_tree()
shard_0_replica2.remove_tree()
shard_0_rdonly.remove_tree()
shard_0_backup.remove_tree()
shard_1_master.remove_tree()
shard_1_replica1.remove_tree()
# statements to create the table
create_vt_select_test = [
('''create table vt_select_test%d (
id bigint not null,
msg varchar(64),
primary key (id)
) Engine=InnoDB''' % x).replace("\n", "")
for x in xrange(4)]
def check_tables(tablet, expectedCount):
tables = tablet.mquery('vt_test_keyspace', 'show tables')
if len(tables) != expectedCount:
raise utils.TestError('Unexpected table count on %s (not %u): %s' %
(tablet.tablet_alias, expectedCount, str(tables)))
@utils.test_case
def run_test_complex_schema():
utils.run_vtctl('CreateKeyspace test_keyspace')
shard_0_master.init_tablet( 'master', 'test_keyspace', '0')
shard_0_replica1.init_tablet('replica', 'test_keyspace', '0')
shard_0_replica2.init_tablet('replica', 'test_keyspace', '0')
shard_0_rdonly.init_tablet( 'rdonly', 'test_keyspace', '0')
shard_0_backup.init_tablet( 'backup', 'test_keyspace', '0')
shard_1_master.init_tablet( 'master', 'test_keyspace', '1')
shard_1_replica1.init_tablet('replica', 'test_keyspace', '1')
utils.run_vtctl('RebuildShardGraph test_keyspace/0', auto_log=True)
utils.run_vtctl('RebuildKeyspaceGraph test_keyspace', auto_log=True)
# run checks now before we start the tablets
utils.validate_topology()
# create databases
shard_0_master.create_db('vt_test_keyspace')
shard_0_replica1.create_db('vt_test_keyspace')
shard_0_replica2.create_db('vt_test_keyspace')
shard_0_rdonly.create_db('vt_test_keyspace')
shard_0_backup.create_db('vt_test_keyspace')
shard_1_master.create_db('vt_test_keyspace')
shard_1_replica1.create_db('vt_test_keyspace')
# start the tablets
shard_0_master.start_vttablet()
shard_0_replica1.start_vttablet()
shard_0_replica2.start_vttablet()
shard_0_rdonly.start_vttablet()
shard_0_backup.start_vttablet(wait_for_state="NOT_SERVING")
shard_1_master.start_vttablet()
shard_1_replica1.start_vttablet()
# make sure all replication is good
utils.run_vtctl('ReparentShard -force test_keyspace/0 ' + shard_0_master.tablet_alias, auto_log=True)
utils.run_vtctl('ReparentShard -force test_keyspace/1 ' + shard_1_master.tablet_alias, auto_log=True)
utils.run_vtctl('ValidateKeyspace -ping-tablets test_keyspace')
# check after all tablets are here and replication is fixed
utils.validate_topology(ping_tablets=True)
# shard 0: apply the schema using a complex schema upgrade, no
# reparenting yet
utils.run_vtctl(['ApplySchemaShard',
'-sql='+create_vt_select_test[0],
'test_keyspace/0'],
auto_log=True)
# check all expected hosts have the change:
# - master won't have it as it's a complex change
check_tables(shard_0_master, 0)
check_tables(shard_0_replica1, 1)
check_tables(shard_0_replica2, 1)
check_tables(shard_0_rdonly, 1)
check_tables(shard_0_backup, 1)
check_tables(shard_1_master, 0)
check_tables(shard_1_replica1, 0)
# shard 0: apply schema change to just master directly
# (to test its state is not changed)
utils.run_vtctl(['ApplySchema',
'-stop-replication',
'-sql='+create_vt_select_test[0],
shard_0_master.tablet_alias],
auto_log=True)
check_tables(shard_0_master, 1)
# shard 0: apply new schema change, with reparenting
utils.run_vtctl(['ApplySchemaShard',
'-new-parent='+shard_0_replica1.tablet_alias,
'-sql='+create_vt_select_test[1],
'test_keyspace/0'],
auto_log=True)
check_tables(shard_0_master, 1)
check_tables(shard_0_replica1, 2)
check_tables(shard_0_replica2, 2)
check_tables(shard_0_rdonly, 2)
check_tables(shard_0_backup, 2)
# verify GetSchema --tables works
out, err = utils.run_vtctl('GetSchema --tables=vt_select_test0 ' +
shard_0_replica1.tablet_alias,
log_level='INFO', trap_output=True)
if not "vt_select_test0" in err or "vt_select_test1" in err:
raise utils.TestError('Unexpected GetSchema --tables=vt_select_test0 output: %s' % err)
# keyspace: try to apply a keyspace-wide schema change, should fail
# as the preflight would be different in both shards
out, err = utils.run_vtctl(['ApplySchemaKeyspace',
'-sql='+create_vt_select_test[2],
'test_keyspace'],
log_level='INFO', trap_output=True,
raise_on_error=False)
if err.find('ApplySchemaKeyspace Shard 1 has inconsistent schema') == -1:
raise utils.TestError('Unexpected ApplySchemaKeyspace output: %s' % err)
utils.run_vtctl('PurgeActions /zk/global/vt/keyspaces/test_keyspace/action')
# shard 1: catch it up with simple updates
utils.run_vtctl(['ApplySchemaShard',
'-simple',
'-sql='+create_vt_select_test[0],
'test_keyspace/1'],
auto_log=True)
utils.run_vtctl(['ApplySchemaShard',
'-simple',
'-sql='+create_vt_select_test[1],
'test_keyspace/1'],
auto_log=True)
check_tables(shard_1_master, 2)
check_tables(shard_1_replica1, 2)
# keyspace: apply a keyspace-wide simple schema change, should work now
utils.run_vtctl(['ApplySchemaKeyspace',
'-simple',
'-sql='+create_vt_select_test[2],
'test_keyspace'],
auto_log=True)
# check all expected hosts have the change
check_tables(shard_0_master, 1) # was stuck a long time ago as scrap
check_tables(shard_0_replica1, 3) # current master
check_tables(shard_0_replica2, 3)
check_tables(shard_0_rdonly, 3)
check_tables(shard_0_backup, 3)
check_tables(shard_1_master, 3) # current master
check_tables(shard_1_replica1, 3)
# keyspace: apply a keyspace-wide complex schema change, should work too
utils.run_vtctl(['ApplySchemaKeyspace',
'-sql='+create_vt_select_test[3],
'test_keyspace'],
auto_log=True)
# check all expected hosts have the change:
# - master won't have it as it's a complex change
# - backup won't have it as IsReplicatingType is false
check_tables(shard_0_master, 1) # was stuck a long time ago as scrap
check_tables(shard_0_replica1, 3) # current master
check_tables(shard_0_replica2, 4)
check_tables(shard_0_rdonly, 4)
check_tables(shard_0_backup, 4)
check_tables(shard_1_master, 3) # current master
check_tables(shard_1_replica1, 4)
# now test action log pruning
oldLines = utils.zk_ls(shard_0_replica1.zk_tablet_path+'/actionlog')
oldCount = len(oldLines)
if utils.options.verbose:
print "I have %u actionlog before" % oldCount
if oldCount <= 5:
raise utils.TestError('Not enough actionlog before: %u' % oldCount)
utils.run_vtctl('PruneActionLogs -keep-count=5 /zk/*/vt/tablets/*/actionlog', auto_log=True)
newLines = utils.zk_ls(shard_0_replica1.zk_tablet_path+'/actionlog')
newCount = len(newLines)
if utils.options.verbose:
print "I have %u actionlog after" % newCount
if newCount != 5:
raise utils.TestError('Unexpected actionlog count after: %u' % newCount)
if oldLines[-5:] != newLines:
raise utils.TestError('Unexpected actionlog values:\n%s\n%s' %
(' '.join(oldLines[-5:]), ' '.join(newLines)))
utils.pause("Look at schema now!")
shard_0_master.kill_vttablet()
shard_0_replica1.kill_vttablet()
shard_0_replica2.kill_vttablet()
shard_0_rdonly.kill_vttablet()
shard_0_backup.kill_vttablet()
shard_1_master.kill_vttablet()
shard_1_replica1.kill_vttablet()
def run_all():
run_test_complex_schema()
def main():
args = utils.get_args()
try:
if args[0] != 'teardown':
setup()
if args[0] != 'setup':
for arg in args:
globals()[arg]()
print "GREAT SUCCESS"
except KeyboardInterrupt:
pass
except utils.Break:
utils.options.skip_teardown = True
finally:
teardown()
if __name__ == '__main__':
main()
| 2.09375
| 2
|
keskiarvo_vs_mediaani/Python/keskiarvo_vs_mediaani.py
|
samuntiede/valokuvamatikka
| 0
|
12781836
|
# Kokeillaan mediaanin ja keskiarvon eroa. Kuvissa (30kpl) on oppilaita
# satunnaisissa kohdissa, ja kamera oli jalustalla luokkahuoneessa. Otetaan
# toisaalta keskiarvot ja toisaalta mediaanit pikseliarvoista.
# Lopputulokset ovat hyvin erilaiset!
#
# <NAME> huhtikuu 2021
# Matlab -> Python Ville Tilvis kesäkuu 2021
import numpy as np
import matplotlib.pyplot as plt
# Kuvien lukumäärä
Nim = 30
# Alustetaan matriisit, joihin tallennetaam keskiarvot ja mediaanit
im_ave = np.zeros([2000,2997,3])
im_median = np.zeros([2000,2997,3])
im_4D = np.zeros([2000,2997,3,Nim])
print("Ladataan kuvat:")
# Avataan kuvat yksi kerrallaan
for iii in range (0,Nim):
fname = '../_kuvat/IMGP'+str(1423+iii)+'.jpg'
im_orig = plt.imread(fname,'jpg');
# Lisätään tämänhetkinen kuva pakkaan
im_4D[:,:,:,iii] = im_orig;
# Seuraa ajoa
print(iii+1,"/",Nim)
print("Lasketaan keskiarvo ja mediaani...")
im_ave = np.mean(im_4D,axis=3)/255;
im_median = np.median(im_4D,axis=3)/255;
print("Valmis!")
print("")
print("Näytetään kuvat...")
# Vähennetään keskiarvokuva ja
# mediaanikuva tyhjän kuvan
# punaisesta värikanavasta
im0 = np.array(plt.imread('../_kuvat/IMGP1444.jpg','jpg'))/255
error1 = np.abs(im_ave-im0)
error2 = np.abs(im_median-im0)
errorpic = np.concatenate((error1,error2),axis=1)
errorpic = errorpic/np.max(errorpic[:,:,0])
errorpic = np.power(errorpic,0.3)
#Katsotaan kuvia
plt.subplot(2,1,1)
plt.imshow(np.concatenate((im_ave,im_median),axis=1))
plt.axis('off')
plt.gcf().set_dpi(600)
plt.subplot(2,1,2)
plt.imshow(errorpic[:,:,0],cmap='gray', interpolation='none')
plt.axis('off')
plt.gcf().set_dpi(600)
plt.show()
print("Valmis!")
print("")
print("Tallennetaan kuvat...")
# Tallennetaan kuvat
plt.imsave('../_kuvat/im_average.jpg',im_ave,);
plt.imsave('../_kuvat/im_median.jpg',im_median);
print("Valmis!")
| 2.890625
| 3
|
src/arghgreet.py
|
CaptSolo/LU_PySem_2020_1
| 1
|
12781837
|
<filename>src/arghgreet.py
import argh
def main(name="Valdis", count=3):
"""
Printing a greeting message
For our friends
"""
for _ in range(count):
print(f'Hello {name}')
return None
if __name__ == "__main__":
argh.dispatch_command(main)
| 3.171875
| 3
|
examples/acreps/pendulum_cart.py
|
hanyas/reps
| 8
|
12781838
|
import numpy as np
import gym
from reps.acreps import acREPS
np.random.seed(1337)
env = gym.make('Pendulum-RL-v1')
env._max_episode_steps = 250
env.unwrapped.dt = 0.05
env.unwrapped.sigma = 1e-4
# env.seed(1337)
acreps = acREPS(env=env, kl_bound=0.1, discount=0.985, lmbda=0.95,
scale=[1., 1., 8.0, 2.5], mult=0.5,
nb_vfeat=75, nb_pfeat=75, vf_reg=1e-12)
acreps.run(nb_iter=15, nb_train_samples=5000,
nb_eval_rollouts=25, nb_eval_steps=100)
# evaluate
rollouts, _ = acreps.evaluate(nb_rollouts=25, nb_steps=100)
import matplotlib.pyplot as plt
fig, ax = plt.subplots(nrows=1, ncols=acreps.state_dim + acreps.act_dim, figsize=(12, 4))
for roll in rollouts:
for k, col in enumerate(ax[:-1]):
col.plot(roll['x'][:, k])
ax[-1].plot(roll['uc'])
plt.show()
| 2.125
| 2
|
agent_sched/packages/schedMain.py
|
mishahmadian/HPC_Provenance
| 0
|
12781839
|
<gh_stars>0
# -*- coding: utf-8 -*-
"""
The main Module that keeps listening for any incoming RPC calls in order to
run the appropreate method to respond to the RPC request.
<NAME> (<EMAIL>)
"""
from subprocess import run, CalledProcessError, DEVNULL
from schedConfig import SchedConfig, ConfigReadExcetion
from schedComm import SchedConnection, CommunicationExp
from ugeSchedService import UGEAccountingInfo
from schedLogger import log, Mode
from threading import Event
import signal
import json
import sys
class SchedMain:
def __init__(self):
try:
self._config = SchedConfig()
self._rpc_queue = self._config.getRPC_queue()
self._rpc_vhost = self._config.getVhost()
self._rpc_proc = None
signal.signal(signal.SIGINT, self.agent_exit)
signal.signal(signal.SIGTERM, self.agent_exit)
#self._cleanup_RPC_que()
except ConfigReadExcetion as confExp:
log(Mode.MAIN_SCHED, confExp.getMessage())
# Handle the SIGINT and SIGTERM signals in order to shutdown
# the Collector agent
def agent_exit(self, sig, frame):
raise SchedExitExp
# Start the Sched Service
def run_sched_service(self):
try:
log(Mode.APP_START, "***************** Provenance Sched Service Started *****************")
# Make Sure the rpc_queue has been defined
if not self._rpc_queue:
raise ConfigReadExcetion("'rpc_queue' is not defined in 'sched.conf' file.")
# Initialize RPC Communication
schedComm = SchedConnection(is_rpc=True)
# Start listening and replying to RPC requests
schedComm.start_RPC_server(self._on_rpc_callback)
except SchedExitExp:
log(Mode.APP_EXIT, "***************** Provenance Sched Service Stopped *****************")
if self._rpc_proc and self._rpc_proc.is_alive():
self._rpc_proc.terminate()
except ConfigReadExcetion as confExp:
log(Mode.MAIN_SCHED, confExp.getMessage())
except CommunicationExp as commExp:
log(Mode.MAIN_SCHED, commExp.getMessage())
except Exception as exp:
log(Mode.MAIN_SCHED, str(exp))
#
# Receiving RPC request and handling the response that has to be sent
# back to the RPC client
#
def _on_rpc_callback(self, request: str, response: list, event: 'Event') -> None:
request = json.loads(request)
if request['action'] == 'uge_acct':
ugeAcctInfo = UGEAccountingInfo(self._config)
ugeAcctLst = ugeAcctInfo.getAcctountingInfo(request['data'], event)
if ugeAcctLst:
response.append('[^@]'.join(ugeAcctLst))
response.append('NONE')
# else return nothing
response.append('NONE')
#
# Cleanup the RabbitMQ RPC Queue
#
def _cleanup_RPC_que(self):
try:
run(f"rabbitmqctl -p {self._rpc_vhost} purge_queue {self._rpc_queue}",
shell=True, check=True, stdout=DEVNULL)
except CalledProcessError as runExp:
log(Mode.MAIN_SCHED, str(runExp))
sys.exit(-1)
#
# Exception will be raised when SIGINT or SIGTERM are called
#
class SchedExitExp(Exception):
pass
if __name__ == "__main__":
schedMain = SchedMain()
schedMain.run_sched_service()
| 2.234375
| 2
|
surveillance/stats.py
|
bkmeneguello/surveillance
| 1
|
12781840
|
<reponame>bkmeneguello/surveillance
import os
import statsd
client = statsd.StatsClient(prefix='surveillance',
host=os.environ.get('STATSD_HOST', 'localhost'),
port=int(os.environ.get('STATSD_PORT', 8125)),
maxudpsize=int(os.environ.get('STATSD_MAX_UDP_SIZE', 512)),
ipv6=bool(os.environ.get('STATSD_IPV6', False)))
incr = client.incr
decr = client.decr
timer = client.timer
timing = client.timing
gauge = client.gauge
set = client.set
| 2.203125
| 2
|
tests/loading.py
|
ecoinvent/bw_processing
| 1
|
12781841
|
<reponame>ecoinvent/bw_processing
# from bw_processing.loading import load_bytes
# from bw_processing import create_package
# from io import BytesIO
# from pathlib import Path
# import json
# import numpy as np
# import tempfile
# def test_load_package_in_directory():
# with tempfile.TemporaryDirectory() as td:
# td = Path(td)
# resources = [
# {
# "name": "first-resource",
# "path": "some-array.npy",
# "matrix": "technosphere",
# "data": [
# tuple(list(range(11)) + [False, False]),
# tuple(list(range(12, 23)) + [True, True]),
# ],
# }
# ]
# with tempfile.TemporaryDirectory() as td:
# fp = create_package(
# name="test-package", resources=resources, path=td, replace=False
# )
# # Test data in fp
# def test_load_json():
# with tempfile.TemporaryDirectory() as td:
# td = Path(td)
# data = [{'foo': 'bar', }, 1, True]
# json.dump(data, open(td / "data.json", "w"))
# assert mapping["json"](open(td / "data.json")) == data
# # def test_load_numpy():
# # with tempfile.TemporaryDirectory() as td:
# # td = Path(td)
# # data = np.arange(10)
# # np.save(td / "array.npy", data)
# # assert np.allclose(mapping["npy"](open(td / "array.npy")), data)
# # def
# # resources = [
# # {
# # "name": "first-resource",
# # "path": "some-array.npy",
# # "matrix": "technosphere",
# # "data": [
# # tuple(list(range(11)) + [False, False]),
# # tuple(list(range(12, 23)) + [True, True]),
# # ],
# # }
# # ]
# # with tempfile.TemporaryDirectory() as td:
# # fp = create_package(
# # name="test-package", resources=resources, path=td, compress=False
# # )
# # # Test data in fp
| 2.25
| 2
|
momentproblems/intersections.py
|
mwageringel/momentproblems
| 0
|
12781842
|
from sage.all import RDF, CDF, matrix, prod
import scipy.linalg
import numpy as np
def column_space_intersection(*As, tol, orthonormal=False):
r"""
Return a matrix with orthonormal columns spanning the intersection of the
column spaces of the given matrices.
INPUT:
- ``*As`` -- matrices with a fixed number of rows and linearly independent
(or orthonormal) columns each
- ``tol`` -- tolerance for truncating the singular values to determine the
rank of the intersection
- ``orthonormal`` -- boolean (default: ``False``); if ``True``, the columns
of each matrix are assumed to be orthonormal
ALGORITHM: <NAME> -- Algorithm 12.4.3
"""
if len(As) < 1:
raise ValueError("at least one matrix required")
n = As[0].nrows()
for A in As:
if A.nrows() != n:
raise ValueError("matrices must have same number of rows")
if all(A.base_ring().is_exact() for A in As):
V = As[0].column_space()
for A in As[1:]:
V = V.intersection(A.column_space())
return V.basis_matrix().T
for A in As:
if A.base_ring() not in (RDF, CDF):
raise ValueError("only matrices over RDF/CDF or exact fields supported")
if any(A.ncols() == 0 for A in As):
return matrix(As[0].base_ring(), n, 0)
Qs = As if orthonormal else [A.QR()[0][:,:A.ncols()] for A in As]
if len(As) == 1:
return Qs[0]
# for better performance, we switch to numpy
# Taking slices or hermitian transposes is a bottleneck with double dense matrices in Sage.
Qs = [Q.numpy() for Q in Qs]
# C = prod([Qs[0].H] + [Q*Q.H for Q in Qs[1:-1]] + [Qs[-1]])
# sort Qs such that smallest matrix is last, second smallest first
Q_last = Qs.pop(min(range(len(Qs)), key=lambda j: Qs[j].shape[1]))
Q_first = Qs.pop(min(range(len(Qs)), key=lambda j: Qs[j].shape[1]))
C = Q_last
for Q in Qs: # without Q_last and Q_first
C = Q @ (Q.conj().T @ C) # this should be faster than (Q * Q.H) * C, since Q*Q.H is very large
C = Q_first.conj().T @ C
Σ, Vh = scipy.linalg.svd(C, overwrite_a=True)[1:] # we can overwrite, since C involves at least 1 multiplication
rk = np.sum(1-Σ < tol)
return matrix(Q_last @ Vh.T[:,:rk].conj())
def null_space_intersection(*As, tol):
r"""
Return a matrix with orthonormal columns spanning the intersection of the
null spaces of the given matrices.
INPUT:
- ``*As`` -- matrices with a fixed number of columns
- ``tol`` -- tolerance for truncating the singular values to determine the
rank of intermediate results
ALGORITHM: <NAME> -- Algorithm 12.4.2
"""
if len(As) < 1:
raise ValueError("at least one matrix required")
n = As[0].ncols()
if all(A.base_ring().is_exact() for A in As):
ker = As[0].right_kernel()
for A in As[1:]:
ker = ker.intersection(A.right_kernel())
# TODO document that this does not have orthonormal columns
return ker.basis_matrix().T
for A in As:
if A.base_ring() not in (RDF, CDF):
raise ValueError("only matrices over RDF/CDF or exact rings supported")
if A.ncols() != n:
raise ValueError("matrices must have same number of columns")
Y = None
for A in As:
if A.nrows() == 0:
continue
C = A * Y if Y is not None else A
Σ, V = C.SVD()[1:]
q = len([s for s in Σ.diagonal() if s > tol])
if q >= C.ncols():
return matrix(As[0].base_ring(), n, 0)
X = V[:, q:]
Y = Y * X if Y is not None else X
if Y is None:
# all the matrices have 0 rows
return matrix.identity(As[0].base_ring(), n)
else:
return Y
def null_space(A, tol):
import numpy
import scipy.linalg
if A.nrows() == 0:
return matrix.identity(A.base_ring(), A.ncols())
return matrix(numpy.ascontiguousarray(scipy.linalg.null_space(A, rcond=tol)))
def _tests_sage():
"""
TESTS::
sage: from momentproblems import intersections
sage: TestSuite(intersections._tests_sage()).run(skip='_test_pickling')
"""
from sage.all import SageObject, matrix, RDF, ZZ
import numpy
import numpy.linalg
import scipy.linalg
class Tests(SageObject):
def matrices(self):
# test data
for _ in range(5):
for num in range(1, 5):
# generate some matrices with few rows, so we can intersect their kernels
matrices = [matrix.random(RDF, ZZ.random_element(0, 4), 9) for _ in range(num)]
yield matrices
def matrices2(self):
# test data
for _ in range(5):
for num in range(1, 5):
# generate some matrices with few rows, so we can intersect their kernels
matrices = [matrix.random(RDF, 9, 9 - ZZ.random_element(0, 4)) for _ in range(num)]
yield matrices
def equal_spaces(self, A, B, tol):
from numpy.linalg import matrix_rank
return matrix_rank(A.augment(B), tol) == matrix_rank(A, tol) == matrix_rank(B, tol)
def _test_null_space_intersection(self, **kwds):
tol = 1e-10
for As in self.matrices():
ker = null_space_intersection(*As, tol=tol)
assert all([ker.ncols() == 0 or A.nrows() == 0 or (A * ker).norm() < tol for A in As])
assert max(0, As[0].ncols() - sum([A.nrows() for A in As])) == ker.ncols() # generically the correct dimension
# the intersection is also simply the null space of the augmented matrix
ker2 = null_space(matrix(RDF, [v for A in As for v in A.rows()], ncols=As[0].ncols()), tol)
assert self.equal_spaces(ker, ker2, tol)
def _test_column_space_intersection(self, **kwds):
tol = 1e-10
for As in self.matrices2():
B = column_space_intersection(*As, tol=tol)
assert B.ncols() == max(0, As[0].nrows() - sum([A.nrows() - A.ncols() for A in As])) # generically the correct dimension
for A in As:
assert self.equal_spaces(A.augment(B), A, tol) # B is contained in A
def _test_compatibilty(self, **kwds):
tol = 1e-10
for As in self.matrices():
# computing null space intersection is the same as computing
# column space intersection of null spaces
ker = null_space_intersection(*As, tol=tol)
ker2 = column_space_intersection(*[null_space(A, tol) for A in As], tol=tol, orthonormal=True)
assert self.equal_spaces(ker, ker2, tol)
return Tests()
| 3.0625
| 3
|
settings/__init__.py
|
SublimeText/InactivePanes
| 28
|
12781843
|
<gh_stars>10-100
"""Provides a settings abstraction class.
Exported classes:
* Settings
"""
class Settings(object):
"""ST settings abstraction that helps with default values and running a callback when changed.
The main purpose is to always provide the correct value of a setting or a default,
if set, under the same identifier (here: attribute).
The settings auto-update by default
and a custom callback may be specified
that is called whenever one of the tracked settings value changes.
Note that this is different to Sublimes `settings.add_on_change`
as that will be called when any containing setting *could* have changed,
while we only want it if the specified setting actually changed.
Methods:
* __init__(settings_obj, settings, callback=None, auto_update=True):
* update()
* has_changed()
* get_state()
* get_real_state()
* set_callback(callback, auto_update=True)
* clear_callback(clear_auto_update=False)
"""
# Static class variables
KEY = "__settings_abstr"
# Instance variables
_sobj = None
_settings = None
_callback = None
_registered = False
def __init__(self, settings_obj, settings, callback=None, auto_update=True):
"""Create a new instance.
`settings` should be provided as a dict of tuples,
and attribute names should not be one of the existing functions.
And of course they should be valid attribute names.
Example call:
Settings(
sublime.load_settings("Preferences.sublime-settings"),
settings=dict(
attr_name_to_save_as=('settings_key_to_read_from', 'default_value'),
attr_name_to_save_as2='settings_key_to_read_from_with_default_None',
attr_name_and_settings_key_with_default_None=None
#, ...
),
callback=on_settings_changed, # optional, callback
auto_update=True # optional, bool (whether the attributes should be kept up to date)
)
For the callback and auto_update parameters, refer to `set_callback`.
"""
self._sobj = settings_obj
for k, v in settings.items():
if v is None:
# Use the attr name as settings key and `None` as default
settings[k] = (k, None)
if isinstance(v, str):
# Set default to `None` if a string was supplied
settings[k] = (v, None)
self._settings = settings
self.update()
self.set_callback(callback, auto_update)
def __del__(self):
"""Deregister callback when destructing."""
self.clear_callback(True)
def update(self):
"""Read all the settings and save them in their respective attributes."""
for attr, (name, def_value) in self._settings.items():
setattr(self, attr, self._sobj.get(name, def_value))
def _on_change(self):
"""Test if the tracked settings have changed and run a callback if specified."""
if self.has_changed():
self.update()
if self._callback:
self._callback()
def _register(self, callback):
self._registered = True
# Use a 100% unique identifier (because ids are never equal for non-primitives)
self._sobj.add_on_change(self.KEY + str(id(self)), callback)
def _unregister(self):
self._registered = False
self._sobj.clear_on_change(self.KEY + str(id(self)))
def has_changed(self):
"""Return a boolean whether the cached settings differ from the settings object."""
return self.get_state() != self.get_real_state()
def get_state(self):
"""Return a dict with the tracked settings and their cached values.
Does NOT use the attribute names but the setting keys.
With the example from __init__: `{"settings_key_to_read_from": 'current_value'}`.
"""
return dict((name, getattr(self, attr))
for attr, (name, _) in self._settings.items())
def get_real_state(self):
"""Return a dict with the tracked settings and their actual values from the settings obj.
Does NOT use the attribute names but the setting keys.
With the example from __init__: `{"settings_key_to_read_from": 'current_value'}`.
"""
return dict((name, self._sobj.get(name, def_value))
for name, def_value in self._settings.values())
def set_callback(self, callback=None, auto_update=True):
"""Register `callback` to be called whenever a tracked setting's value changes.
If `auto_update` is true, automatically update the attributes when the settings change.
This always happens when a callback is set,
thus resulting in the values being up-to-date
when the callback is called.
Return the previous callback if any.
"""
if callback is not None and not callable(callback):
raise TypeError("callback must be callable or None")
register = bool(auto_update or callback)
old_cb = self.clear_callback(not register)
self._callback = callback
if not self._registered and register:
self._register(self._on_change)
return old_cb
def clear_callback(self, clear_auto_update=False):
"""Clear the callback set with set_callback and return it in the process.
clear_auto_update=True will also remove auto-updating the attributes and `get_state`,
if previously enabled.
"""
old_cb = self._callback
self._callback = None
if self._registered and clear_auto_update:
self._unregister()
return old_cb
| 2.84375
| 3
|
setup.py
|
mikiec84/pyneurovault_upload
| 1
|
12781844
|
try:
from setuptools.core import setup
except ImportError:
from distutils.core import setup
__version__ = '0.1.0'
setup(
name='pyneurovault_upload',
version='0.1.0',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/ljchang/pyneurovault_upload',
packages=['pyneurovault_upload'],
license='MIT',
install_requires=['requests>=2.10.0'],
description='A Python library for interfacing with http://neurovault.org upload API',
keywords=['neuroimaging', 'neurovault'],
classifiers=[
"Programming Language :: Python",
"Operating System :: OS Independent",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
]
)
| 1.398438
| 1
|
texttractor/__init__.py
|
walkr/texttractor
| 0
|
12781845
|
<filename>texttractor/__init__.py
# -*- coding: utf-8 -*-
from .core import TextTractor
from .core import TextCleaner
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__version__ = '0.0.1'
__all__ = ['TextTractor', 'TextCleaner']
| 1.1875
| 1
|
authentik/stages/user_write/api.py
|
BeryJu/passbook
| 15
|
12781846
|
"""User Write Stage API Views"""
from rest_framework.viewsets import ModelViewSet
from authentik.core.api.used_by import UsedByMixin
from authentik.flows.api.stages import StageSerializer
from authentik.stages.user_write.models import UserWriteStage
class UserWriteStageSerializer(StageSerializer):
"""UserWriteStage Serializer"""
class Meta:
model = UserWriteStage
fields = StageSerializer.Meta.fields + ["create_users_as_inactive", "create_users_group"]
class UserWriteStageViewSet(UsedByMixin, ModelViewSet):
"""UserWriteStage Viewset"""
queryset = UserWriteStage.objects.all()
serializer_class = UserWriteStageSerializer
filterset_fields = "__all__"
search_fields = ["name"]
ordering = ["name"]
| 2.21875
| 2
|
shadow/static.py
|
f1uzz/shadow
| 1
|
12781847
|
<gh_stars>1-10
from itertools import chain
from enum import Enum
from shadow.models import Queue, QueueList, Role, RoleList, Shard, ShardList, Ability, AbilityList
# queue types
QUEUES = QueueList(queues=[
Queue(
lcu_queue_name="Summoner's Rift",
ugg_queue_name="ranked_solo_5x5",
rank="platinum_plus",
roles=RoleList(roles=[
Role(display_role_name="Top", display_short_role_name="Top", lcu_role_name="top", ugg_role_name="top"),
Role(display_role_name="Jungle", display_short_role_name="JG", lcu_role_name="jungle", ugg_role_name="jungle"),
Role(display_role_name="Middle", display_short_role_name="Mid", lcu_role_name="middle", ugg_role_name="mid"),
Role(display_role_name="ADC", display_short_role_name="ADC", lcu_role_name="bottom", ugg_role_name="adc"),
Role(display_role_name="Support", display_short_role_name="Sup", lcu_role_name="utility", ugg_role_name="supp")
])
),
Queue(
lcu_queue_name="Howling Abyss",
ugg_queue_name="normal_aram",
rank="overall",
roles=RoleList(roles=[
Role(display_role_name="ARAM", display_short_role_name="ARAM", lcu_role_name="", ugg_role_name="none")
])
),
Queue(
lcu_queue_name="Nexus Blitz",
ugg_queue_name="nexus_blitz",
rank="overall",
roles=RoleList(roles=[
Role(display_role_name="Nexus Blitz", display_short_role_name="NB", lcu_role_name="", ugg_role_name="none")
])
)
])
# list of all roles
ALL_ROLES = chain.from_iterable([queue.roles.roles for queue in QUEUES])
ALL_ROLES = RoleList(roles=[role for role in ALL_ROLES])
# ability types
ABILITIES = AbilityList(abilities=[
Ability(key="Q"),
Ability(key="W"),
Ability(key="E"),
Ability(key="R"),
])
BASIC_ABILITIES = AbilityList(abilities=[
ABILITIES.get_ability_by_key("Q"),
ABILITIES.get_ability_by_key("W"),
ABILITIES.get_ability_by_key("E"),
])
# rune shards
SHARDS = ShardList(shards=[
Shard(ugg_shard_name="Adaptive Force", shard_id=5008),
Shard(ugg_shard_name="Attack Speed", shard_id=5005),
Shard(ugg_shard_name="Scaling Cooldown Reduction", shard_id=5007),
Shard(ugg_shard_name="Armor", shard_id=5002),
Shard(ugg_shard_name="Magic Resist", shard_id=5003),
Shard(ugg_shard_name="Scaling Health", shard_id=5001)
])
# gameflow phases
class GAMEFLOW_PHASE(Enum):
NONE = "None"
LOBBY = "Lobby"
MATCHMAKING = "Matchmaking"
READY_CHECK = "ReadyCheck"
CHAMP_SELECT = "ChampSelect"
IN_PROGRESS = "InProgress"
RECONNECT = "Reconnect"
PRE_END_OF_GAME = "PreEndOfGame"
END_OF_GAME = "EndOfGame"
WAITING_FOR_STATS = "WaitingForStats"
UAS = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36" # user agent string
SLEEP_TIME = 0.5 # time to sleep between polls
MIN_ACCEPTABLE_PATCH_MATCH_RATIO = 0.15 # ratio of games on current patch to previous patch required to use current patch's data
FLASH = 4 # id for flash summoner
CONFIG_FILENAME = "config.json" # name of config file
DEFAULT_CONFIG = { # default config file contents
"flash_on_f": True,
"revert_patch": True,
"preferred_item_slots": dict(),
"small_items": list()
}
| 2.046875
| 2
|
Lipniacki2004/plot_func.py
|
okadalabipr/cancer_signaling
| 1
|
12781848
|
<reponame>okadalabipr/cancer_signaling
from matplotlib import pyplot as plt
def timecourse(sim):
plt.figure(figsize=(16,13))
plt.rcParams['font.family'] = 'Arial'
plt.rcParams['font.size'] = 15
plt.rcParams['axes.linewidth'] = 1
plt.rcParams['lines.linewidth'] = 1.5
plt.subplots_adjust(wspace=0.4, hspace=0.5)
plt.subplot(3,4,1)
plt.plot(sim.t,1*(sim.t>1),'k')
plt.xlim(0,7)
plt.xticks([0,2,4,6])
plt.ylim(0,1.5)
plt.title('TNF activity')
plt.subplot(3,4,2)
plt.plot(sim.t,sim.Neutral_IKK,'k',clip_on=False)
plt.xlim(0,7)
plt.xticks([0,2,4,6])
plt.ylim(0,0.2)
plt.title('Neutral IKK (IKKn)')
plt.subplot(3,4,3)
plt.plot(sim.t,sim.Active_IKK,'k')
plt.xlim(0,7)
plt.xticks([0,2,4,6])
plt.ylim(0,0.1)
plt.title('Active IKK (IKKa)')
plt.subplot(3,4,4)
plt.plot(sim.t,sim.Inactive_IKK,'k')
plt.xlim(0,7)
plt.xticks([0,2,4,6])
plt.ylim(0,0.2)
plt.title('Inactive IKK (IKKi)')
plt.subplot(3,4,5)
plt.plot(sim.t,sim.Free_cyt_IkBa,'k')
plt.xlim(0,7)
plt.xticks([0,2,4,6])
plt.ylim(0,0.04)
plt.title('Free cyt. IkBa')
plt.subplot(3,4,6)
plt.plot(sim.t,sim.Cyt,'k')
plt.xlim(0,7)
plt.xticks([0,2,4,6])
plt.ylim(0,0.06)
plt.title('Cyt. (IkBa|NFkB)')
plt.subplot(3,4,7)
plt.plot(sim.t,sim.Free_nuclear_IkBa,'k')
plt.xlim(0,7)
plt.xticks([0,2,4,6])
plt.ylim(0,0.02)
plt.title('Free nuclear IkBa')
plt.subplot(3,4,8)
plt.plot(sim.t,sim.Free_nuclear_NFkB,'k')
plt.xlim(0,7)
plt.xticks([0,2,4,6])
plt.ylim(0,0.4)
plt.title('Free nuclear NFkB')
plt.subplot(3,4,9)
plt.plot(sim.t,sim.IkBa_mRNA*1e4,'k')
plt.xlim(0,7)
plt.xticks([0,2,4,6])
plt.title(r'× 10$^4$ IkBa mRNA')
plt.subplot(3,4,10)
plt.plot(sim.t,sim.A20_mRNA*1e4,'k')
plt.xlim(0,7)
plt.xticks([0,2,4,6])
plt.title(r'× 10$^4$ A20 mRNA')
plt.subplot(3,4,11)
plt.plot(sim.t,sim.A20_protein,'k')
plt.xlim(0,7)
plt.xticks([0,2,4,6])
plt.ylim(0,0.2)
plt.title('A20 protein')
plt.subplot(3,4,12)
plt.plot(sim.t,sim.cgen_mRNA*1e4,'k')
plt.xlim(0,7)
plt.xticks([0,2,4,6])
plt.title(r'× 10$^4$ cgen mRNA')
plt.savefig('wild_type.png',bbox_inches='tight')
| 2.234375
| 2
|
interpretation/deepseismic_interpretation/dutchf3/tests/test_dataloaders.py
|
elmajdma/seismic-deeplearning
| 270
|
12781849
|
<filename>interpretation/deepseismic_interpretation/dutchf3/tests/test_dataloaders.py<gh_stars>100-1000
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""
Tests for TrainLoader and TestLoader classes when overriding the file names of the seismic and label data.
"""
import tempfile
import numpy as np
from deepseismic_interpretation.dutchf3.data import (
get_test_loader,
TrainPatchLoaderWithDepth,
TrainSectionLoaderWithDepth,
)
import pytest
import yacs.config
import os
# npy files dimensions
IL = 5
XL = 10
D = 8
N_CLASSES = 2
CONFIG_FILE = "./experiments/interpretation/dutchf3_patch/configs/unet.yaml"
with open(CONFIG_FILE, "rt") as f_read:
config = yacs.config.load_cfg(f_read)
def generate_npy_files(path, data):
np.save(path, data)
def assert_dimensions(test_section_loader):
assert test_section_loader.labels.shape[0] == IL
assert test_section_loader.labels.shape[1] == XL
assert test_section_loader.labels.shape[2] == D
# Because add_section_depth_channels method add
# 2 extra channels to a 1 channel section
assert test_section_loader.seismic.shape[0] == IL
assert test_section_loader.seismic.shape[2] == XL
assert test_section_loader.seismic.shape[3] == D
def test_TestSectionLoader_should_load_data_from_test1_set():
with open(CONFIG_FILE, "rt") as f_read:
config = yacs.config.load_cfg(f_read)
with tempfile.TemporaryDirectory() as data_dir:
os.makedirs(os.path.join(data_dir, "test_once"))
os.makedirs(os.path.join(data_dir, "splits"))
seimic = np.zeros([IL, XL, D])
generate_npy_files(os.path.join(data_dir, "test_once", "test1_seismic.npy"), seimic)
labels = np.ones([IL, XL, D])
generate_npy_files(os.path.join(data_dir, "test_once", "test1_labels.npy"), labels)
txt_path = os.path.join(data_dir, "splits", "section_test1.txt")
open(txt_path, "a").close()
TestSectionLoader = get_test_loader(config)
config.merge_from_list(["DATASET.ROOT", data_dir])
test_set = TestSectionLoader(config, split="test1")
assert_dimensions(test_set)
def test_TestSectionLoader_should_load_data_from_test2_set():
with tempfile.TemporaryDirectory() as data_dir:
os.makedirs(os.path.join(data_dir, "test_once"))
os.makedirs(os.path.join(data_dir, "splits"))
seimic = np.zeros([IL, XL, D])
generate_npy_files(os.path.join(data_dir, "test_once", "test2_seismic.npy"), seimic)
A = np.load(os.path.join(data_dir, "test_once", "test2_seismic.npy"))
labels = np.ones([IL, XL, D])
generate_npy_files(os.path.join(data_dir, "test_once", "test2_labels.npy"), labels)
txt_path = os.path.join(data_dir, "splits", "section_test2.txt")
open(txt_path, "a").close()
TestSectionLoader = get_test_loader(config)
config.merge_from_list(["DATASET.ROOT", data_dir])
test_set = TestSectionLoader(config, split="test2")
assert_dimensions(test_set)
def test_TestSectionLoader_should_load_data_from_path_override_data():
with tempfile.TemporaryDirectory() as data_dir:
os.makedirs(os.path.join(data_dir, "volume_name"))
os.makedirs(os.path.join(data_dir, "splits"))
seimic = np.zeros([IL, XL, D])
generate_npy_files(os.path.join(data_dir, "volume_name", "seismic.npy"), seimic)
labels = np.ones([IL, XL, D])
generate_npy_files(os.path.join(data_dir, "volume_name", "labels.npy"), labels)
txt_path = os.path.join(data_dir, "splits", "section_volume_name.txt")
open(txt_path, "a").close()
TestSectionLoader = get_test_loader(config)
config.merge_from_list(["DATASET.ROOT", data_dir])
test_set = TestSectionLoader(
config,
split="volume_name",
is_transform=True,
augmentations=None,
seismic_path=os.path.join(data_dir, "volume_name", "seismic.npy"),
label_path=os.path.join(data_dir, "volume_name", "labels.npy"),
)
assert_dimensions(test_set)
def test_TrainPatchLoaderWithDepth_should_fail_on_missing_seismic_file(tmpdir):
"""
Check for exception when training param is empty
"""
# Setup
os.makedirs(os.path.join(tmpdir, "volume_name"))
os.makedirs(os.path.join(tmpdir, "splits"))
labels = np.ones([IL, XL, D])
generate_npy_files(os.path.join(tmpdir, "volume_name", "labels.npy"), labels)
txt_path = os.path.join(tmpdir, "splits", "patch_volume_name.txt")
open(txt_path, "a").close()
config.merge_from_list(["DATASET.ROOT", str(tmpdir)])
# Test
with pytest.raises(Exception) as excinfo:
_ = TrainPatchLoaderWithDepth(
config,
split="volume_name",
is_transform=True,
augmentations=None,
seismic_path=os.path.join(tmpdir, "volume_name", "seismic.npy"),
label_path=os.path.join(tmpdir, "volume_name", "labels.npy"),
)
assert "does not exist" in str(excinfo.value)
def test_TrainPatchLoaderWithDepth_should_fail_on_missing_label_file(tmpdir):
"""
Check for exception when training param is empty
"""
# Setup
os.makedirs(os.path.join(tmpdir, "volume_name"))
os.makedirs(os.path.join(tmpdir, "splits"))
seimic = np.zeros([IL, XL, D])
generate_npy_files(os.path.join(tmpdir, "volume_name", "seismic.npy"), seimic)
txt_path = os.path.join(tmpdir, "splits", "patch_volume_name.txt")
open(txt_path, "a").close()
config.merge_from_list(["DATASET.ROOT", str(tmpdir)])
# Test
with pytest.raises(Exception) as excinfo:
_ = TrainPatchLoaderWithDepth(
config,
split="volume_name",
is_transform=True,
augmentations=None,
seismic_path=os.path.join(tmpdir, "volume_name", "seismic.npy"),
label_path=os.path.join(tmpdir, "volume_name", "labels.npy"),
)
assert "does not exist" in str(excinfo.value)
def test_TrainPatchLoaderWithDepth_should_load_with_one_train_and_label_file(tmpdir):
"""
Check for successful class instantiation w/ single npy file for train & label
"""
# Setup
os.makedirs(os.path.join(tmpdir, "volume_name"))
os.makedirs(os.path.join(tmpdir, "splits"))
seimic = np.zeros([IL, XL, D])
generate_npy_files(os.path.join(tmpdir, "volume_name", "seismic.npy"), seimic)
labels = np.ones([IL, XL, D])
generate_npy_files(os.path.join(tmpdir, "volume_name", "labels.npy"), labels)
txt_dir = os.path.join(tmpdir, "splits")
txt_path = os.path.join(txt_dir, "patch_volume_name.txt")
open(txt_path, "a").close()
config.merge_from_list(["DATASET.ROOT", str(tmpdir)])
# Test
train_set = TrainPatchLoaderWithDepth(
config,
split="volume_name",
is_transform=True,
augmentations=None,
seismic_path=os.path.join(tmpdir, "volume_name", "seismic.npy"),
label_path=os.path.join(tmpdir, "volume_name", "labels.npy"),
)
assert train_set.labels.shape == (IL, XL, D + 2 * config.TRAIN.PATCH_SIZE)
assert train_set.seismic.shape == (IL, XL, D + 2 * config.TRAIN.PATCH_SIZE)
| 2.078125
| 2
|
preprocess_optimized.py
|
OEMarshall/REU2016-Supervised-Experiments
| 0
|
12781850
|
import numpy
import sys
import pickle
f = open("kddcup.names","r")
counter = 1
labels = {}; features = {}
for line in f:
line = line.strip() #get rid of period at end
if counter == 1:
s = line.split(",") #split first line on comma
for label in s:
labels[label] = 0 #each CSV in first line is label, add to
#label dictionary as key with value 0
else:
s = line.split(":") #split second line on ':' add name of feature
#as key to features dictionary
features[s[0]]=0
counter += 1
f.close()
#print labels.keys(), len(labels.keys())
#print features.keys(), len(features.keys())
f = open("kddcup.data.corrected","r")
data_dict = {}
durations = 0
src_bytes = 0
dest_bytes = 0
wrong_fragments = 0
urgent = 0
hot = 0
fail = 0
compromised = 0
root = 0
file_create = 0
shells = 0
access = 0
outbound_cmds = 0
count = 0
srv_count = 0
#serror = []
dest_hosts = 0
dest_host_srv = 0
for label in labels:
data_dict[label] = []
#make each value of label a list
print "Reading the data file..."
line_counter = 0
for line in f:
line_counter += 1
if line_counter % 200000 == 0:
print str(line_counter)+" lines processed."
line = line.strip();
line = line[:-1] #get rid of period at end
s = line.split(",") #split line on commas
try:
labels[s[-1]] += 1 #increment label everytime it occurs in file
data_dict[s[-1]].append(s[:-1]) #append line to value of correct label
#durations.append(float(s[0]))
if (float(s[0]) > durations):
durations = float(s[0])
#src_bytes.append(float(s[4]))
if (float(s[4]) > src_bytes):
src_bytes = float(s[4])
#dest_bytes.append(float(s[5]))
if (float(s[5]) > dest_bytes):
dest_bytes = float(s[5])
#wrong_fragments.append(float(s[7]))
if (float(s[7]) > wrong_fragments):
wrong_fragments = float(s[7])
#urgent.append(float(s[8]))
if (float(s[8]) > urgent):
urgent = float(s[8])
#hot.append(float(s[9]))
if (float(s[9]) > hot):
hot = float(s[9])
#fail.append(float(s[10]))
if (float(s[10]) > fail):
fail = float(s[10])
#compromised.append(float(s[12]))
if (float(s[12]) > compromised):
compromised = float(s[12])
#root.append(float(s[15]))
if (float(s[15]) > root):
root = float(s[15])
#file_create.append(float(s[16]))
if (float(s[16]) > file_create):
file_create = float(s[16])
#shells.append(float(s[17]))
if (float(s[17]) > shells):
shells = float(s[17])
#access.append(float(s[18]))
if (float(s[18]) > access):
access = float(s[18])
#outbound_cmds.append(float(s[19]))
if (float(s[19]) > outbound_cmds):
outbound_cmds = float(s[19])
#count.append(float(s[22]))
if (float(s[22]) > count):
count = float(s[22])
#srv_count.append(float(s[23]))
if (float(s[23]) > srv_count):
srv_count = float(s[23])
#dest_hosts.append(float(s[31]))
if (float(s[31]) > dest_hosts):
dest_hosts = float(s[31])
#dest_host_srv.append(float(s[32]))
if (float(s[32]) > dest_host_srv):
dest_host_srv = float(s[32])
#for i in range(len(s)-1):
#features[s[i]]+=1
except KeyError:
pass
f.close()
print str(line_counter)+ " lines processed, start quantifying..."
dur_max = durations
src_bytes_max = src_bytes
dest_bytes_max = dest_bytes
wrong_fragments_max = wrong_fragments
urgent_max = urgent
hot_max = hot
fail_max = fail
compromised_max = compromised
root_max = root
file_create_max = file_create
shell_max = shells
access_max = access
outbound_max = outbound_cmds
count_max = count
srv_count_max = srv_count
dest_host_max = dest_hosts
dest_host_srv_max = dest_host_srv
min_items = 10000 #Minimum number of examples per class (very important variable)
random_dict = {}
for label in labels:
print("here", label, labels[label])
if(labels[label] >= min_items): # for each label, if it has more than min_items
# create a key for random_dict and list as value
random_dict[label] = []
rng = numpy.random.RandomState(11)
for label in random_dict:
indice = rng.choice(range(0,len(data_dict[label])),min_items,replace = False)
# generate random list of indices of size min_items
for elem in indice:
random_dict[label].append(data_dict[label][elem])
#from each label in random_dict, append to random_dict[label] the random indices
#for label in random_dict:
# for line in random_dict[label]: # remove unwanted features
# print line[0]
# line.pop(1)
for label in random_dict:
for line in random_dict[label]:
if line[1] == "tcp":
line[1]= 1/6.0
elif line[1] == "udp":
line[1]= 2/6.0 # assign values to second column of value lists (protocol type)
else:
line[1]= .5
services = ['http', 'smtp', 'finger', 'domain_u', 'auth',\
'telnet', 'ftp', 'eco_i', 'ntp_u', 'ecr_i', \
'other', 'private', 'pop_3', 'ftp_data', \
'rje', 'time', 'mtp', 'link', 'remote_job', \
'gopher', 'ssh', 'name', 'whois', 'domain', \
'login', 'imap4', 'daytime', 'ctf', 'nntp', \
'shell', 'IRC', 'nnsp', 'http_443', 'exec', \
'printer', 'efs', 'courier', 'uucp', 'klogin', \
'kshell', 'echo', 'discard', 'systat', 'supdup', \
'iso_tsap', 'hostnames', 'csnet_ns', 'pop_2', \
'sunrpc', 'uucp_path', 'netbios_ns', 'netbios_ssn', \
'netbios_dgm', 'sql_net', 'vmnet', 'bgp', 'Z39_50', \
'ldap', 'netstat', 'urh_i', 'X11', 'urp_i', 'pm_dump', \
'tftp_u', 'tim_i', 'red_i','SF', 'aol','http_8001', 'harvest']
flags = ['S1', 'SF', 'REJ', 'S2', 'S0', 'S3', 'RSTO', 'RSTR', 'RSTOS0', 'OTH', 'SH']
for label in random_dict:
for List in random_dict[label]:
for i in range(len(List)):
if i == 0:
List[i] = float(List[i]) / float(dur_max) # assign values to first column of value lists (duration)
elif i == 2:
List[i]= float(services.index(List[i]))/float(len(services)) # assign values to third column of value lists (service)
elif i == 3:
List[i] = float(flags.index(List[i]))/float(len(flags))# assign values to fourth column of value lists (flags)
elif i == 4:
List[i] = float(List[i]) / float(src_bytes_max) # assign values to fifth column of value lists (src_bytes)
elif i == 5:
List[i] = float(List[i]) / float(dest_bytes_max) # assign values to sixth column of value lists (dest_bytes)
elif i == 6:
List[i] = float(List[i]) # convert seventh column of value lists to float (land)
elif i == 7:
List[i] = float(List[i]) / float(wrong_fragments_max) # assign values to eighth column of value lists (wrong_fragments)
elif i == 8:
List[i] = float(List[i]) / float(urgent_max) # assign values to ninth column of value lists (urgent)
elif i == 9:
List[i] = float(List[i]) / float(hot_max) # assign values to 10th column of value lists (hot)
elif i == 10:
List[i] = float(List[i]) / float(fail_max) # assign values to 11th column of value lists (failed_logins)
elif i == 11:
List[i] = float(List[i]) # convert 12th column (successful login) to float
elif i == 12:
List[i] = float(List[i]) / float(compromised_max) # assign values to 13th column of value lists (num_comprimesed)
elif i == 13:
List[i] = float(List[i]) # convert 14th column (root shell) to float
elif i == 14:
List[i] = float(List[i]) # convert 15th column (su_attemptedl) to float
elif i == 15:
List[i] = float(List[i]) / float(root_max) # assign values to 16th column of value lists (num_root)
elif i == 16:
List[i] = float(List[i]) / float(file_create_max) # assign values to 17th column of value lists (num_file creations)
elif i == 17:
List[i] = float(List[i]) / float(shell_max) # assign values to 18th column of value lists (num_shells)
elif i == 18:
List[i] = float(List[i]) / float(access_max) # assign values to 19th column of value lists (num_access_files)
elif i == 19:
#get rid of becasue all 0
List[i] = float(List[i])/ float(sys.maxint) # assign values to 20th column of value lists (num_outbound_commands)
elif i == 20:
List[i] = float(List[i]) # convert 21st column (is_host_login) to float
elif i == 21:
List[i] = float(List[i]) # convert 22nd column (is_guest_login) to float
elif i == 22:
List[i] = float(List[i]) / float(count_max) # assign values to 23rd column of value lists (count)
elif i == 23:
List[i] = float(List[i]) / float(srv_count_max) # assign values to 24th column of value lists (srv_count)
elif i == 24:
List[i] = float(List[i]) # convert 25th column (serror_rate) to float
elif i == 25:
List[i] = float(List[i]) # convert 26th column (srv_serror_rate) to float
elif i == 26:
List[i] = float(List[i]) # convert 27th column (rerror_rate) to float
elif i == 27:
List[i] = float(List[i]) # convert 28th column (srv_rerror_rate) to float
elif i == 28:
List[i] = float(List[i]) # convert 29th column (same_srv_rate) to float
elif i == 29:
List[i] = float(List[i]) # convert 30th column (diff_srv_rate) to float
elif i == 30:
List[i] = float(List[i]) # convert 31st column (srv_diff_host_rate) to float
elif i == 31:
List[i] = float(List[i]) / float(dest_host_max) # assign values to 32nd column of value lists (dest_host_count)
elif i == 32:
List[i] = float(List[i]) / float(dest_host_srv_max) # assign values to 33rd column of value lists (dst_host_srv_count)
elif i == 33:
List[i] = float(List[i]) # convert 34th column (dst_host_same_srv_rate) to float
elif i == 34:
List[i] = float(List[i]) # convert 35th column (dst_host_diff_srv_rate) to float
elif i == 35:
List[i] = float(List[i]) # convert 36th column (dst_host_same_src_port_rate) to float
elif i == 36:
List[i] = float(List[i]) # convert 37th column (dst_host_srv_diff_host_rate) to float
elif i == 37:
List[i] = float(List[i]) # convert 38th column (dst_host_serror_rate) to float
elif i == 38:
List[i] = float(List[i]) # convert 39th column (dst_host_srv_serror_rate) to float
elif i == 39:
List[i] = float(List[i]) # convert 40th column (dst_host_rerror_rate) to float
elif i == 40:
List[i] = float(List[i]) # convert 41st column (dst_host_srv_rerror_rate) to float
#print random_dict
file2 = open("labeling.txt","w")
print "Generating training set..."
all_data = []; labels = random_dict.keys()[:]
for k in labels:
file2.write(str(k))
for data in random_dict[k]:
all_data.append((data,labels.index(k)))
print "Pickling..."
with open('all_data_' + str(min_items) +'.pickle','w') as f:
pickle.dump(all_data,f)
file2.close
| 2.546875
| 3
|