hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
befd00f2e7be9ee982348d730c3ed4d4bbdd8988 | 177 | py | Python | iris_sdk/models/data/tn_status.py | NumberAI/python-bandwidth-iris | 0e05f79d68b244812afb97e00fd65b3f46d00aa3 | [
"MIT"
] | 2 | 2020-04-13T13:47:59.000Z | 2022-02-23T20:32:41.000Z | iris_sdk/models/data/tn_status.py | bandwidthcom/python-bandwidth-iris | dbcb30569631395041b92917252d913166f7d3c9 | [
"MIT"
] | 5 | 2020-09-18T20:59:24.000Z | 2021-08-25T16:51:42.000Z | iris_sdk/models/data/tn_status.py | bandwidthcom/python-bandwidth-iris | dbcb30569631395041b92917252d913166f7d3c9 | [
"MIT"
] | 5 | 2018-12-12T14:39:50.000Z | 2020-11-17T21:42:29.000Z | #!/usr/bin/env python
from iris_sdk.models.base_resource import BaseData
from iris_sdk.models.maps.tn_status import TnStatusMap | 25.285714 | 54 | 0.813559 |
befd8dcdbdb6d9ed65837be1a16b79168d010d75 | 8,437 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/f5networks/f5_modules/plugins/modules/bigip_device_group_member.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 1 | 2020-01-22T13:11:23.000Z | 2020-01-22T13:11:23.000Z | venv/lib/python3.6/site-packages/ansible_collections/f5networks/f5_modules/plugins/modules/bigip_device_group_member.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/f5networks/f5_modules/plugins/modules/bigip_device_group_member.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: bigip_device_group_member
short_description: Manages members in a device group
description:
- Manages members in a device group. Members in a device group can only
be added or removed, never updated. This is because the members are
identified by unique name values and changing that name would invalidate
the uniqueness.
version_added: "1.0.0"
options:
name:
description:
- Specifies the name of the device that you want to add to the
device group. Often this will be the hostname of the device.
This member must be trusted by the device already. Trusting
can be done with the C(bigip_device_trust) module and the
C(peer_hostname) option to that module.
type: str
required: True
device_group:
description:
- The device group to which you want to add the member.
type: str
required: True
state:
description:
- When C(present), ensures the device group member exists.
- When C(absent), ensures the device group member is removed.
type: str
choices:
- present
- absent
default: present
extends_documentation_fragment: f5networks.f5_modules.f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Add the current device to the "device_trust_group" device group
bigip_device_group_member:
name: "{{ inventory_hostname }}"
device_group: device_trust_group
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Add the hosts in the current scope to "device_trust_group"
bigip_device_group_member:
name: "{{ item }}"
device_group: device_trust_group
provider:
password: secret
server: lb.mydomain.com
user: admin
loop: "{{ hostvars.keys() }}"
run_once: true
delegate_to: localhost
'''
RETURN = r'''
# only common fields returned
'''
from datetime import datetime
from ansible.module_utils.basic import AnsibleModule
from ..module_utils.bigip import F5RestClient
from ..module_utils.common import (
F5ModuleError, AnsibleF5Parameters, f5_argument_spec
)
from ..module_utils.icontrol import tmos_version
from ..module_utils.teem import send_teem
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| 28.6 | 94 | 0.6174 |
befdd813dce9c8916652b3514805d92fc7258e7d | 793 | py | Python | comrade/blueprints/rest.py | sp3c73r2038/elasticsearch-comrade | ed0c94e071d2fe701a14429981390b9a89df79a7 | [
"MIT"
] | 256 | 2019-09-09T10:09:34.000Z | 2022-03-28T04:15:21.000Z | comrade/blueprints/rest.py | sp3c73r2038/elasticsearch-comrade | ed0c94e071d2fe701a14429981390b9a89df79a7 | [
"MIT"
] | 503 | 2019-07-31T17:01:12.000Z | 2022-03-28T13:19:26.000Z | comrade/blueprints/rest.py | nmeisels/elasticsearch-comrade | 57dc600e5ffd7f9d4c055b584124bef9365e538c | [
"MIT"
] | 25 | 2019-08-30T13:04:31.000Z | 2022-03-09T09:50:32.000Z | from elasticsearch import TransportError
from sanic import Blueprint
from sanic.request import Request
from sanic.response import HTTPResponse, json
from ..connections import get_client
rest_bp = Blueprint('rest')
| 28.321429 | 78 | 0.693569 |
befebe8c408a00b9be09490e9fa3fb8d41c06ce6 | 1,081 | py | Python | tests/test_utils.py | tedeler/pyexchange | 58042f473cbd4f00769249ce9ca20c6a376eddb6 | [
"Apache-2.0"
] | 128 | 2015-01-11T10:29:40.000Z | 2021-06-25T05:27:45.000Z | tests/test_utils.py | tedeler/pyexchange | 58042f473cbd4f00769249ce9ca20c6a376eddb6 | [
"Apache-2.0"
] | 52 | 2015-01-02T15:24:28.000Z | 2020-08-07T04:49:49.000Z | tests/test_utils.py | tedeler/pyexchange | 58042f473cbd4f00769249ce9ca20c6a376eddb6 | [
"Apache-2.0"
] | 96 | 2015-01-02T15:16:20.000Z | 2021-12-25T01:37:46.000Z | from datetime import datetime
from pytz import timezone, utc
from pytest import mark
from pyexchange.utils import convert_datetime_to_utc
| 43.24 | 121 | 0.781684 |
befed480f20eb883fd15d6235756ef7750bbee56 | 786 | py | Python | vidpub/__main__.py | gary9630/session-video-publisher | 6602f53d722af8e569c82b7de8ef79a63293c766 | [
"0BSD"
] | null | null | null | vidpub/__main__.py | gary9630/session-video-publisher | 6602f53d722af8e569c82b7de8ef79a63293c766 | [
"0BSD"
] | 5 | 2020-11-15T12:45:03.000Z | 2021-12-07T08:29:40.000Z | vidpub/__main__.py | gary9630/session-video-publisher | 6602f53d722af8e569c82b7de8ef79a63293c766 | [
"0BSD"
] | 4 | 2018-06-23T16:48:03.000Z | 2021-04-18T09:51:29.000Z | import argparse
from .upload_video import upload_video
from .generate_playlist import generate_playlist
if __name__ == "__main__":
main()
| 23.818182 | 99 | 0.675573 |
beff85e9c6691647f15d3bfe260f151e7cc2041f | 3,275 | py | Python | ally/utils/option.py | rjfranssen/PyAlly | f24d4d449dd0578f52e75365ad0ba69a572d3237 | [
"MIT"
] | 53 | 2019-08-11T20:39:16.000Z | 2022-02-01T02:05:12.000Z | ally/utils/option.py | rjfranssen/PyAlly | f24d4d449dd0578f52e75365ad0ba69a572d3237 | [
"MIT"
] | 53 | 2019-12-11T06:39:59.000Z | 2022-02-13T05:06:44.000Z | ally/utils/option.py | rjfranssen/PyAlly | f24d4d449dd0578f52e75365ad0ba69a572d3237 | [
"MIT"
] | 31 | 2019-10-05T02:28:16.000Z | 2022-02-03T03:41:42.000Z | # MIT License
#
# Copyright (c) 2020 Brett Graves
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import datetime
import math
from .utils import *
############################################################################
def option_format(symbol="", exp_date="1970-01-01", strike=0, direction=""):
"""Returns the OCC standardized option name.
Args:
symbol: the underlying symbol, case insensitive
exp_date: date of expiration, in string-form.
strike: strike price of the option
direction: 'C' or 'call' or the like, for call, otherwise 'p' or 'Put' for put
Returns:
OCC string, like 'IBM201231C00301000'
.. code-block:: python
# Construct the option's OCC symbol
>>> ibm_call = ally.utils.option_format(
exp_date = '2020-12-31',
symbol = 'IBM', # case insensitive
direction = 'call',
strike = 301
)
>>> ibm_call
'IBM201231C00301000'
"""
if not (
check(symbol) and check(exp_date) and check(str(strike)) and check(direction)
):
return ""
# direction into C or P
direction = "C" if "C" in direction.upper() else "P"
# Pad strike with zeros
# Assemble
return (
str(symbol).upper()
+ datetime.datetime.strptime(exp_date, "%Y-%m-%d").strftime("%y%m%d")
+ direction
+ format_strike(strike)
)
def option_strike(name):
"""Pull apart an OCC standardized option name and
retreive the strike price, in integer form"""
return int(name[-8:]) / 1000.0
def option_maturity(name):
"""Given OCC standardized option name,
return the date of maturity"""
return datetime.datetime.strptime(name[-15:-9], "%y%m%d").strftime("%Y-%m-%d")
def option_callput(name):
"""Given OCC standardized option name,
return whether its a call or a put"""
return "call" if name.upper()[-9] == "C" else "put"
def option_symbol(name):
"""Given OCC standardized option name, return option ticker"""
return name[:-15]
| 31.796117 | 90 | 0.635725 |
8300d2d4159d348f8f2a81357e0afeb556ced95e | 460 | py | Python | examples/104-python3-9-pipeline.py | marviniter/argo-dataflow | 89a060b1c6ea70f7c26bc58a01ba675c3acc1c06 | [
"Apache-2.0"
] | null | null | null | examples/104-python3-9-pipeline.py | marviniter/argo-dataflow | 89a060b1c6ea70f7c26bc58a01ba675c3acc1c06 | [
"Apache-2.0"
] | null | null | null | examples/104-python3-9-pipeline.py | marviniter/argo-dataflow | 89a060b1c6ea70f7c26bc58a01ba675c3acc1c06 | [
"Apache-2.0"
] | null | null | null | from argo_dataflow import pipeline, kafka
if __name__ == '__main__':
(pipeline("104-python3-9")
.owner('argoproj-labs')
.describe("""This example is of the Python 3.9 handler.
[Learn about handlers](../docs/HANDLERS.md)""")
.step(
(kafka('input-topic')
.code('main', handler)
.kafka('output-topic')
))
.save())
| 23 | 60 | 0.582609 |
8300f1e857cc9e2e0c3bf9685d4664e9e4c8faa9 | 2,195 | py | Python | djangur.py | JerryPopi/djangur-py | 0ba76a1a9c0f77ded014f0f3a0b3a98bf7835f51 | [
"MIT"
] | null | null | null | djangur.py | JerryPopi/djangur-py | 0ba76a1a9c0f77ded014f0f3a0b3a98bf7835f51 | [
"MIT"
] | null | null | null | djangur.py | JerryPopi/djangur-py | 0ba76a1a9c0f77ded014f0f3a0b3a98bf7835f51 | [
"MIT"
] | null | null | null | import asyncio
import discord
from commands import Commands, Guild_Instance, leave, play_search
import os
from pymongo import MongoClient
from dotenv import load_dotenv
load_dotenv()
CONNECTION_STRING = f"mongodb+srv://{os.environ['mongo_user']}:{os.environ['mongo_pass']}@djangur.erogd.mongodb.net/djangur?retryWrites=true&w=majority"
db_client = MongoClient(CONNECTION_STRING)
db = db_client['djangur']
client = discord.Client()
client.run(os.environ['token'])
| 29.662162 | 152 | 0.653303 |
830374b559d44b39454687ae70bffd40d78c9944 | 44,236 | py | Python | membership/models.py | str4nd/sikteeri | 34dd5a4dc35558cdba9e6f97fd38fb661a36b8a5 | [
"MIT"
] | 22 | 2015-03-30T19:33:15.000Z | 2022-01-10T03:52:43.000Z | membership/models.py | str4nd/sikteeri | 34dd5a4dc35558cdba9e6f97fd38fb661a36b8a5 | [
"MIT"
] | 66 | 2015-05-15T13:54:59.000Z | 2021-05-27T20:28:39.000Z | membership/models.py | str4nd/sikteeri | 34dd5a4dc35558cdba9e6f97fd38fb661a36b8a5 | [
"MIT"
] | 13 | 2015-03-09T18:59:29.000Z | 2022-01-10T04:08:38.000Z | # -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from decimal import Decimal
import logging
from django.core.files.storage import FileSystemStorage
from membership.billing.pdf_utils import get_bill_pdf, create_reminder_pdf
from membership.reference_numbers import barcode_4, group_right,\
generate_membership_bill_reference_number
import traceback
from io import StringIO, BytesIO
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.db import transaction
from django.db.models import Q, Sum, Count
from django.utils.translation import ugettext_lazy as _
import django.utils.timezone
from django.conf import settings
from django.template.loader import render_to_string
from django.forms import ValidationError
from django.db.models.query import QuerySet
from django.contrib.contenttypes.models import ContentType
from .utils import log_change, tupletuple_to_dict
from membership.signals import send_as_email, send_preapprove_email, send_duplicate_payment_notice
from .email_utils import bill_sender, preapprove_email_sender, duplicate_payment_sender, format_email
logger = logging.getLogger("membership.models")
MEMBER_TYPES = (('P', _('Person')),
('J', _('Junior')),
('S', _('Supporting')),
('O', _('Organization')),
('H', _('Honorary')))
MEMBER_TYPES_DICT = tupletuple_to_dict(MEMBER_TYPES)
STATUS_NEW = 'N'
STATUS_PREAPPROVED = 'P'
STATUS_APPROVED = 'A'
STATUS_DIS_REQUESTED = 'S'
STATUS_DISASSOCIATED = 'I'
STATUS_DELETED = 'D'
MEMBER_STATUS = ((STATUS_NEW, _('New')),
(STATUS_PREAPPROVED, _('Pre-approved')),
(STATUS_APPROVED, _('Approved')),
(STATUS_DIS_REQUESTED, _('Dissociation requested')),
(STATUS_DISASSOCIATED, _('Dissociated')),
(STATUS_DELETED, _('Deleted')))
MEMBER_STATUS_DICT = tupletuple_to_dict(MEMBER_STATUS)
BILL_EMAIL = 'E'
BILL_PAPER = 'P'
BILL_SMS = 'S'
BILL_TYPES = (
(BILL_EMAIL, _('Email')),
(BILL_PAPER, _('Paper')),
(BILL_SMS, _('SMS'))
)
BILL_TYPES_DICT = tupletuple_to_dict(BILL_TYPES)
def _get_logs(self):
'''Gets the log entries related to this object.
Getter to be used as property instead of GenericRelation'''
my_class = self.__class__
ct = ContentType.objects.get_for_model(my_class)
object_logs = ct.logentry_set.filter(object_id=self.id)
return object_logs
def __str__(self):
if self.organization:
return str(self.organization)
else:
if self.person:
return str(self.person)
else:
return "#%d" % self.id
class Fee(models.Model):
type = models.CharField(max_length=1, choices=MEMBER_TYPES, verbose_name=_('Fee type'))
start = models.DateTimeField(_('Valid from date'))
sum = models.DecimalField(_('Sum'), max_digits=6, decimal_places=2)
vat_percentage = models.IntegerField(_('VAT percentage'))
class BillingCycleManager(models.Manager):
class BillingCycleQuerySet(QuerySet):
class BillingCycle(models.Model):
membership = models.ForeignKey('Membership', verbose_name=_('Membership'), on_delete=models.PROTECT)
start = models.DateTimeField(default=django.utils.timezone.now, verbose_name=_('Start'))
end = models.DateTimeField(verbose_name=_('End'))
sum = models.DecimalField(_('Sum'), max_digits=6, decimal_places=2) # This limits sum to 9999,99
is_paid = models.BooleanField(default=False, verbose_name=_('Is paid'))
# NOT an integer since it can begin with 0 XXX: format
reference_number = models.CharField(max_length=64, verbose_name=_('Reference number'))
logs = property(_get_logs)
objects = BillingCycleManager()
def get_rf_reference_number(self):
"""
Get reference number in international RFXX format.
For example 218012 is formatted as RF28218012 where 28 is checksum
:return: RF formatted reference number
"""
# Magic 2715 is "RF" in number encoded format and
# zeros are placeholders for modulus calculation.
reference_number_int = int(''.join(self.reference_number.split()) + '271500')
modulo = reference_number_int % 97
return "RF%02d%s" % (98 - modulo, reference_number_int)
def end_date(self):
"""Logical end date
This is one day before actual end since actual end is a timestamp.
The end date is the previous day.
E.g. 2015-01-01 -- 2015-12-31
"""
day = timedelta(days=1)
return self.end.date()-day
def __str__(self):
return str(self.start.date()) + "--" + str(self.end_date())
def save(self, *args, **kwargs):
if not self.end:
self.end = self.start + timedelta(days=365)
if (self.end.day != self.start.day):
# Leap day
self.end += timedelta(days=1)
if not self.reference_number:
self.reference_number = generate_membership_bill_reference_number(self.membership.id, self.start.year)
if not self.sum:
self.sum = self.get_fee()
super(BillingCycle, self).save(*args, **kwargs)
cache_storage = FileSystemStorage(location=settings.CACHE_DIRECTORY)
models.signals.post_save.connect(logging_log_change, sender=Membership)
models.signals.post_save.connect(logging_log_change, sender=Contact)
models.signals.post_save.connect(logging_log_change, sender=BillingCycle)
models.signals.post_save.connect(logging_log_change, sender=Bill)
models.signals.post_save.connect(logging_log_change, sender=Fee)
models.signals.post_save.connect(logging_log_change, sender=Payment)
# These are registered here due to import madness and general clarity
send_as_email.connect(bill_sender, sender=Bill, dispatch_uid="email_bill")
send_preapprove_email.connect(preapprove_email_sender, sender=Membership,
dispatch_uid="preapprove_email")
send_duplicate_payment_notice.connect(duplicate_payment_sender, sender=Payment,
dispatch_uid="duplicate_payment_notice")
| 41.149767 | 128 | 0.628651 |
830421c0eef174df1951cc79db82af6869f9e1bc | 177 | py | Python | napari_imc/io/__init__.py | neuromusic/napari-imc | ce2ff998b33b49f19a786585cc2cb8e59db74c24 | [
"MIT"
] | 4 | 2021-01-29T15:11:37.000Z | 2021-03-01T02:04:24.000Z | napari_imc/io/__init__.py | neuromusic/napari-imc | ce2ff998b33b49f19a786585cc2cb8e59db74c24 | [
"MIT"
] | 25 | 2021-01-19T01:49:13.000Z | 2022-02-09T10:46:41.000Z | napari_imc/io/__init__.py | neuromusic/napari-imc | ce2ff998b33b49f19a786585cc2cb8e59db74c24 | [
"MIT"
] | 3 | 2021-01-29T17:31:05.000Z | 2022-03-25T10:23:32.000Z | from .imaxt import ImaxtFileReader
from .mcd import McdFileReader
from .txt import TxtFileReader
__all__ = [
'ImaxtFileReader',
'McdFileReader',
'TxtFileReader',
]
| 17.7 | 34 | 0.734463 |
83043d6bcc47235264f0457736e61baf87cbac95 | 2,449 | py | Python | eval.py | ldzhangyx/TCN-for-beat-tracking | 8e09ba5b2f222a4944a8bd039987a01240ae778d | [
"BSD-3-Clause"
] | 3 | 2021-03-22T01:59:52.000Z | 2022-01-22T11:08:56.000Z | eval.py | ldzhangyx/TCN-for-beat-tracking | 8e09ba5b2f222a4944a8bd039987a01240ae778d | [
"BSD-3-Clause"
] | 1 | 2021-06-21T19:14:35.000Z | 2021-06-21T19:14:35.000Z | eval.py | ldzhangyx/TCN-for-beat-tracking | 8e09ba5b2f222a4944a8bd039987a01240ae778d | [
"BSD-3-Clause"
] | 1 | 2021-03-22T01:59:57.000Z | 2021-03-22T01:59:57.000Z | import torch
from torch.utils.data import Dataset
import numpy as np
import os
import pickle
from madmom.features import DBNBeatTrackingProcessor
import torch
from model import BeatTrackingNet
from utils import init_single_spec
from mir_eval.beat import evaluate
from data import BallroomDataset
from beat_tracker import predict_beats_from_spectrogram
import yaml
import sys
import pdb
# import config
with open('config.yaml', 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
def evaluate_model(
model_checkpoint,
spectrogram,
ground_truth):
"""
Given a model checkpoint, a single spectrogram, and the corresponding
ground truth, evaluate the model's performance on all beat tracking metrics
offered by mir_eval.beat.
"""
prediction = predict_beats_from_spectrogram(
spectrogram,
model_checkpoint)
scores = evaluate(ground_truth, prediction)
return scores
def evaluate_model_on_dataset(
model_checkpoint,
dataset,
ground_truths):
"""
Run through a whole instance of torch.utils.data.Dataset and compare the
model's predictions to the given ground truths.
"""
# Create dicts to store scores and histories
mean_scores = {}
running_scores = {}
# Iterate over dataset
for i in range(len(dataset)):
spectrogram = dataset[i]["spectrogram"].unsqueeze(0)
ground_truth = ground_truths[i]
scores = evaluate_model(
model_checkpoint,
spectrogram,
ground_truth)
beat_scores = scores
for metric in beat_scores:
if metric not in running_scores:
running_scores[metric] = 0.0
running_scores[metric] += beat_scores[metric]
# Each iteration, pass our current index and our running score total
# to a print callback function.
print(f"{i}, {str(running_scores)}")
# After all iterations, calculate mean scores.
for metric in running_scores:
mean_scores[metric] = running_scores[metric] / (i + 1)
# Return a dictionary of helpful information
return {
"total_examples": i + 1,
"scores": mean_scores
}
dataset = BallroomDataset()
ground_truths = (dataset.get_ground_truth(i) for i in range(len(dataset)))
# Run evaluation
evaluate_model_on_dataset(config['default_checkpoint_path'],
dataset,
ground_truths) | 25.510417 | 79 | 0.685178 |
830448984e5a77e90d22cacc683d54197d1adc44 | 130,468 | py | Python | pycity_calc/cities/scripts/city_generator/city_generator.py | RWTH-EBC/pyCity_calc | 99fd0dab7f9a9030fd84ba4715753364662927ec | [
"MIT"
] | 4 | 2020-06-22T14:14:25.000Z | 2021-11-08T11:47:01.000Z | pycity_calc/cities/scripts/city_generator/city_generator.py | RWTH-EBC/pyCity_calc | 99fd0dab7f9a9030fd84ba4715753364662927ec | [
"MIT"
] | 4 | 2019-08-28T19:42:28.000Z | 2019-08-28T19:43:44.000Z | pycity_calc/cities/scripts/city_generator/city_generator.py | RWTH-EBC/pyCity_calc | 99fd0dab7f9a9030fd84ba4715753364662927ec | [
"MIT"
] | null | null | null | # coding=utf-8
"""
Script to generate city object.
"""
from __future__ import division
import os
import numpy as np
import pickle
import warnings
import random
import datetime
import shapely.geometry.point as point
import pycity_base.classes.Weather as weath
import pycity_base.classes.demand.SpaceHeating as SpaceHeating
import pycity_base.classes.demand.ElectricalDemand as ElectricalDemand
import pycity_base.classes.demand.Apartment as Apartment
import pycity_base.classes.demand.DomesticHotWater as DomesticHotWater
import pycity_base.classes.demand.Occupancy as occup
import pycity_calc.environments.timer as time
# import pycity_calc.environments.market as price
import pycity_calc.environments.germanmarket as germanmarket
import pycity_calc.environments.environment as env
import pycity_calc.environments.co2emissions as co2
import pycity_calc.buildings.building as build_ex
import pycity_calc.cities.city as city
import pycity_calc.visualization.city_visual as citvis
import pycity_calc.toolbox.modifiers.slp_th_manipulator as slpman
import pycity_calc.toolbox.teaser_usage.teaser_use as tusage
import pycity_calc.toolbox.mc_helpers.user.user_unc_sampling as usunc
try:
import teaser.logic.simulation.VDI_6007.weather as vdiweather
except: # pragma: no cover
msg = 'Could not import teaser.logic.simulation.VDI_6007.weather. ' \
'If you need to use it, install ' \
'it via pip "pip install TEASER". Alternatively, you might have ' \
'run into trouble with XML bindings in TEASER. This can happen ' \
'if you try to re-import TEASER within an active Python console.' \
'Please close the active Python console and open another one. Then' \
' try again. You might also be on the wrong TEASER branch ' \
'(without VDI 6007 core).'
warnings.warn(msg)
def load_data_file_with_spec_demand_data(filename):
"""
Function loads and returns data from
.../src/data/BaseData/Specific_Demand_Data/filename.
Filename should hold float (or int) values.
Other values (e.g. strings) will be loaded as 'nan'.
Parameter
---------
filename : str
String with name of file, e.g. 'district_data.txt'
Returns
-------
dataset : numpy array
Numpy array with data
"""
src_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname
(
os.path.abspath(
__file__)))))
input_data_path = os.path.join(src_path, 'data', 'BaseData',
'Specific_Demand_Data', filename)
dataset = np.genfromtxt(input_data_path, delimiter='\t', skip_header=1)
return dataset
def convert_th_slp_int_and_str(th_slp_int):
"""
Converts thermal slp type integer into string
Parameters
----------
th_slp_int : int
SLP type integer number
Returns
-------
th_slp_tag : str
SLP type string
Annotations
-----------
- `HEF` : Single family household
- `HMF` : Multi family household
- `GBA` : Bakeries
- `GBD` : Other services
- `GBH` : Accomodations
- `GGA` : Restaurants
- `GGB` : Gardening
- `GHA` : Retailers
- `GHD` : Summed load profile business, trade and services
- `GKO` : Banks, insurances, public institutions
- `GMF` : Household similar businesses
- `GMK` : Automotive
- `GPD` : Paper and printing
- `GWA` : Laundries
"""
if th_slp_int is None:
msg = 'th_slp_int is None. Going to return None.'
warnings.warn(msg)
return None
slp_th_profile_dict_tag = {0: 'HEF',
1: 'HMF',
2: 'GMF',
3: 'GMK',
4: 'GPD',
5: 'GHA',
6: 'GBD',
7: 'GKO',
8: 'GBH',
9: 'GGA',
10: 'GBA',
11: 'GWA',
12: 'GGB',
13: 'GHD'}
th_slp_tag = slp_th_profile_dict_tag[th_slp_int]
return th_slp_tag
def convert_el_slp_int_and_str(el_slp_int):
"""
Converts el slp type integer into string
Parameters
----------
el_slp_int : int
SLP type integer number
Returns
-------
el_slp_tag : str
SLP type string
Annotations
-----------
# 0: H0 : Residential
# 1: G0 : Commercial
# 2: G1 : Commercial Mo-Sa 08:00 to 18:00
# 3: G2 : Commercial, mainly evening hours
# 4: G3 : Commercial 24 hours
# 5: G4 : Shop / hairdresser
# 6: G5 : Backery
# 7: G6 : Commercial, weekend
# 8: L0 : Farm
# 9: L1 : Farm, mainly cattle and milk
# 10: L2 : Other farming
"""
if el_slp_int is None:
msg = 'el_slp_int is None. Going to return None.'
warnings.warn(msg)
return None
slp_el_profile_dict_tag = {0: 'H0',
1: 'G0',
2: 'G1',
3: 'G2',
4: 'G3',
5: 'G4',
6: 'G5',
7: 'G6',
8: 'L0',
9: 'L1',
10: 'L2'}
el_slp_tag = slp_el_profile_dict_tag[el_slp_int]
return el_slp_tag
def convert_method_3_nb_into_str(method_3_nb):
"""
Converts method_3_nb into string
Parameters
----------
method_3_nb : int
Number of method 3
Returns
-------
method_3_str : str
String of method 3
"""
if method_3_nb is None:
msg = 'method_3_nb is None. Going to return None.'
warnings.warn(msg)
return None
dict_method_3 = {0: 'food_pro',
1: 'metal',
2: 'rest',
3: 'sports',
4: 'repair'}
method_3_str = dict_method_3[method_3_nb]
return method_3_str
def convert_method_4_nb_into_str(method_4_nb):
"""
Converts method_4_nb into string
Parameters
----------
method_4_nb : int
Number of method 4
Returns
-------
method_4_str : str
String of method 4
"""
if method_4_nb is None:
msg = 'method_4_nb is None. Going to return None.'
warnings.warn(msg)
return None
dict_method_4 = {0: 'metal_1', 1: 'metal_2', 2: 'warehouse'}
method_4_str = dict_method_4[method_4_nb]
return method_4_str
def conv_build_type_nb_to_name(build_type):
"""
Convert build_type number to name / explanation
Parameters
----------
build_type : int
Building type number, based on Spec_demands_non_res.txt
Returns
-------
build_name : str
Building name / explanation
"""
if build_type is None:
msg = 'build_type is None. Going to return None for build_name.'
warnings.warn(msg)
return None
dict_b_name = {
0: 'Residential',
1: 'Office (simulation)',
2: 'Main construction work',
3: 'Finishing trade construction work',
4: 'Bank and insurance',
5: 'Public institution',
6: 'Non profit organization',
7: 'Small office buildings',
8: 'Other services',
9: 'Metal',
10: 'Automobile',
11: 'Wood and timber',
12: 'Paper',
13: 'Small retailer for food',
14: 'Small retailer for non-food',
15: 'Large retailer for food',
16: 'Large retailer for non-food',
17: 'Primary school',
18: 'School for physically handicapped',
19: 'High school',
20: 'Trade school',
21: 'University',
22: 'Hotel',
23: 'Restaurant',
24: 'Childrens home',
25: 'Backery',
26: 'Butcher',
27: 'Laundry',
28: 'Farm primary agriculture ',
29: 'Farm with 10 - 49 cattle units',
30: 'Farm with 50 - 100 cattle units',
31: 'Farm with more than 100 cattle units',
32: 'Gardening',
33: 'Hospital',
34: 'Library',
35: 'Prison',
36: 'Cinema',
37: 'Theater',
38: 'Parish hall',
39: 'Sports hall',
40: 'Multi purpose hall',
41: 'Swimming hall',
42: 'Club house',
43: 'Fitness studio',
44: 'Train station smaller 5000m2',
45: 'Train station equal to or larger than 5000m2'
}
return dict_b_name[build_type]
def constrained_sum_sample_pos(n, total):
"""
Return a randomly chosen list of n positive integers summing to total.
Each such list is equally likely to occur.
Parameters
----------
n : int
Number of chosen integers
total : int
Sum of all entries of result list
Returns
-------
results_list : list (of int)
List with result integers, which sum up to value 'total'
"""
dividers = sorted(random.sample(range(1, int(total)), int(n - 1)))
list_occ = [a - b for a, b in zip(dividers + [total], [0] + dividers)]
for i in range(len(list_occ)):
list_occ[i] = int(list_occ[i])
return list_occ
def redistribute_occ(occ_list):
"""
Redistribute occupants in occ_list, so that each apartment is having at
least 1 person and maximal 5 persons.
Parameters
----------
occ_list
Returns
-------
occ_list_new : list
List holding number of occupants per apartment
"""
occ_list_new = occ_list[:]
if sum(occ_list_new) / len(occ_list_new) > 5: # pragma: no cover
msg = 'Average number of occupants per apartment is higher than 5.' \
' This is not valid for usage of Richardson profile generator.'
raise AssertionError(msg)
# Number of occupants to be redistributed
nb_occ_redist = 0
# Find remaining occupants
# ###############################################################
for i in range(len(occ_list_new)):
if occ_list_new[i] > 5:
# Add remaining occupants to nb_occ_redist
nb_occ_redist += occ_list_new[i] - 5
# Set occ_list_new entry to 5 persons
occ_list_new[i] = 5
if nb_occ_redist == 0:
# Return original list
return occ_list_new
# Identify empty apartments and add single occupant
# ###############################################################
for i in range(len(occ_list_new)):
if occ_list_new[i] == 0:
# Add single occupant
occ_list_new[i] = 1
# Remove occupant from nb_occ_redist
nb_occ_redist -= 1
if nb_occ_redist == 0:
# Return original list
return occ_list_new
# Redistribute remaining occupants
# ###############################################################
for i in range(len(occ_list_new)):
if occ_list_new[i] < 5:
# Fill occupants up with remaining occupants
for j in range(5 - occ_list_new[i]):
# Add single occupant
occ_list_new[i] += 1
# Remove single occupant from remaining sum
nb_occ_redist -= 1
if nb_occ_redist == 0:
# Return original list
return occ_list_new
if nb_occ_redist: # pragma: no cover
raise AssertionError('Not all occupants could be distributed.'
'Check inputs and/or redistribute_occ() call.')
def generate_environment(timestep=3600,
year_timer=2017,
year_co2=2017,
try_path=None,
location=(51.529086, 6.944689),
altitude=55,
new_try=False):
"""
Returns environment object. Total number of timesteps is automatically
generated for one year.
Parameters
----------
timestep : int
Timestep in seconds
year_timer : int, optional
Chosen year of analysis (default: 2010)
(influences initial day for profile generation)
year_co2 : int, optional
Chose year with specific emission factors (default: 2017)
try_path : str, optional
Path to TRY weather file (default: None)
If set to None, uses default weather TRY file (2010, region 5)
location : Tuple, optional
(latitude , longitude) of the simulated system's position,
(default: (51.529086, 6.944689) for Bottrop, Germany.
altitude : float, optional
Altitute of location in m (default: 55 - City of Bottrop)
new_try : bool, optional
Defines, if TRY dataset have been generated after 2017 (default: False)
If False, assumes that TRY dataset has been generated before 2017.
If True, assumes that TRY dataset has been generated after 2017 and
belongs to the new TRY classes. This is important for extracting
the correct values from the TRY dataset!
Returns
-------
environment : object
Environment object
"""
# Create environment
timer = time.TimerExtended(timestep=timestep, year=year_timer)
weather = weath.Weather(timer, useTRY=True, pathTRY=try_path,
location=location, altitude=altitude,
new_try=new_try)
market = germanmarket.GermanMarket()
co2em = co2.Emissions(year=year_co2)
environment = env.EnvironmentExtended(timer=timer,
weather=weather,
prices=market,
location=location,
co2em=co2em)
return environment
def generate_res_building_single_zone(environment, net_floor_area,
spec_th_demand,
th_gen_method,
el_gen_method,
annual_el_demand=None,
el_random=False,
use_dhw=False,
dhw_method=1, number_occupants=None,
build_year=None, mod_year=None,
build_type=None, pv_use_area=None,
height_of_floors=None, nb_of_floors=None,
neighbour_buildings=None,
residential_layout=None, attic=None,
cellar=None, construction_type=None,
dormer=None, dhw_volumen=None,
do_normalization=True,
slp_manipulate=True,
curr_central_ahu=None,
dhw_random=False, prev_heat_dev=True,
season_mod=None):
"""
Function generates and returns extended residential building object
with single zone.
Parameters
----------
environment : object
Environment object
net_floor_area : float
Net floor area of building in m2
spec_th_demand : float
Specific thermal energy demand in kWh/m2*a
th_gen_method : int
Thermal load profile generation method
1 - Use SLP
2 - Load Modelica simulation output profile (only residential)
Method 2 is only used for residential buildings. For non-res.
buildings, SLPs are generated instead
el_gen_method : int, optional
Electrical generation method (default: 1)
1 - Use SLP
2 - Generate stochastic load profile (only valid for residential
building)
annual_el_demand : float, optional
Annual electrical energy demand in kWh/a (default: None)
el_random : bool, optional
Defines, if random value should be chosen from statistics
or if average value should be chosen. el_random == True means,
use random value. (default: False)
use_dhw : bool, optional
Boolean to define, if domestic hot water profile should be generated
(default: False)
True - Generate dhw profile
dhw_method : int, optional
Domestic hot water profile generation method (default: 1)
1 - Use Annex 42 profile
2 - Use stochastic profile
number_occupants : int, optional
Number of occupants (default: None)
build_year : int, optional
Building year of construction (default: None)
mod_year : int, optional
Last year of modernization of building (default: None)
build_type : int, optional
Building type (default: None)
pv_use_area : float, optional
Usable pv area in m2 (default: None)
height_of_floors : float
average height of single floor
nb_of_floors : int
Number of floors above the ground
neighbour_buildings : int
neighbour (default = 0)
0: no neighbour
1: one neighbour
2: two neighbours
residential_layout : int
type of floor plan (default = 0)
0: compact
1: elongated/complex
attic : int
type of attic (default = 0)
0: flat roof
1: non heated attic
2: partly heated attic
3: heated attic
cellar : int
type of cellar (default = 0)
0: no cellar
1: non heated cellar
2: partly heated cellar
3: heated cellar
construction_type : str
construction type (default = "heavy")
heavy: heavy construction
light: light construction
dormer : str
construction type
0: no dormer
1: dormer
dhw_volumen : float, optional
Volume of domestic hot water in liter per capita and day
(default: None).
do_normalization : bool, optional
Defines, if stochastic profile (el_gen_method=2) should be
normalized to given annualDemand value (default: True).
If set to False, annual el. demand depends on stochastic el. load
profile generation. If set to True, does normalization with
annualDemand
slp_manipulate : bool, optional
Defines, if thermal space heating SLP profile should be modified
(default: True). Only used for residential buildings!
Only relevant, if th_gen_method == 1
True - Do manipulation
False - Use original profile
Sets thermal power to zero in time spaces, where average daily outdoor
temperature is equal to or larger than 12 C. Rescales profile to
original demand value.
curr_central_ahu : bool, optional
Defines, if building has air handling unit (AHU)
(default: False)
dhw_random : bool, optional
Defines, if hot water volume per person and day value should be
randomized by choosing value from gaussian distribution (20 %
standard deviation) (default: False)
If True: Randomize value
If False: Use reference value
prev_heat_dev : bool, optional
Defines, if heating devices should be prevented within chosen
appliances (default: True). If set to True, DESWH, E-INST,
Electric shower, Storage heaters and Other electric space heating
are set to zero. Only relevant for el_gen_method == 2
season_mod : float, optional
Float to define rescaling factor to rescale annual lighting power curve
with cosine wave to increase winter usage and decrease summer usage.
Reference is maximum lighting power (default: None). If set to None,
do NOT perform rescaling with cosine wave
Returns
-------
extended_building : object
BuildingExtended object
"""
assert net_floor_area > 0
assert spec_th_demand >= 0
if annual_el_demand is not None:
assert annual_el_demand >= 0
else:
assert number_occupants is not None
assert number_occupants > 0
# Define SLP profiles for residential building with single zone
th_slp_type = 'HEF'
el_slp_type = 'H0'
if number_occupants is not None:
assert number_occupants > 0
assert number_occupants <= 5 # Max 5 occupants for stochastic profile
if el_gen_method == 2 or (dhw_method == 2 and use_dhw == True):
# Generate occupancy profile (necessary for stochastic, el. or
# dhw profile)
occupancy_object = occup.Occupancy(environment,
number_occupants=number_occupants)
else: # Generate occupancy object without profile generation
# Just used to store information about number of occupants
occupancy_object = occup.Occupancy(environment,
number_occupants=number_occupants,
do_profile=False)
else:
occupancy_object = None # Dummy object to prevent error with
# apartment usage
if el_gen_method == 2:
warnings.warn('Stochastic el. profile cannot be generated ' +
'due to missing number of occupants. ' +
'SLP is used instead.')
# Set el_gen_method to 1 (SLP)
el_gen_method = 1
elif dhw_method == 2:
raise AssertionError('DHW profile cannot be generated' +
'for residential building without' +
'occupants (stochastic mode).' +
'Please check your input file ' +
'(missing number of occupants) ' +
'or disable dhw generation.')
if (number_occupants is None and dhw_method == 1 and use_dhw == True):
# Set number of occupants to 2 to enable dhw usage
number_occupants = 2
# Create space heating demand
if th_gen_method == 1:
# Use SLP
heat_power_curve = SpaceHeating.SpaceHeating(environment,
method=1,
profile_type=th_slp_type,
livingArea=net_floor_area,
specificDemand=spec_th_demand)
if slp_manipulate: # Do SLP manipulation
timestep = environment.timer.timeDiscretization
temp_array = environment.weather.tAmbient
mod_curve = \
slpman.slp_th_manipulator(timestep,
th_slp_curve=heat_power_curve.loadcurve,
temp_array=temp_array)
heat_power_curve.loadcurve = mod_curve
elif th_gen_method == 2:
# Use Modelica result profile
heat_power_curve = SpaceHeating.SpaceHeating(environment,
method=3,
livingArea=net_floor_area,
specificDemand=spec_th_demand)
# Calculate el. energy demand for apartment, if no el. energy
# demand is given for whole building to rescale
if annual_el_demand is None:
# Generate annual_el_demand_ap
annual_el_demand = calc_el_dem_ap(nb_occ=number_occupants,
el_random=el_random,
type='sfh')
print('Annual electrical demand in kWh: ', annual_el_demand)
if number_occupants is not None:
print('El. demand per person in kWh: ')
print(annual_el_demand / number_occupants)
print()
# Create electrical power curve
if el_gen_method == 2:
if season_mod is not None:
season_light_mod = True
else:
season_light_mod = False
el_power_curve = ElectricalDemand.ElectricalDemand(environment,
method=2,
total_nb_occupants=number_occupants,
randomizeAppliances=True,
lightConfiguration=0,
annualDemand=annual_el_demand,
occupancy=occupancy_object.occupancy,
do_normalization=do_normalization,
prev_heat_dev=prev_heat_dev,
season_light_mod=season_light_mod,
light_mod_fac=season_mod)
else: # Use el. SLP
el_power_curve = ElectricalDemand.ElectricalDemand(environment,
method=1,
annualDemand=annual_el_demand,
profileType=el_slp_type)
# Create domestic hot water demand
if use_dhw:
if dhw_volumen is None or dhw_random:
dhw_kwh = calc_dhw_dem_ap(nb_occ=number_occupants,
dhw_random=dhw_random,
type='sfh')
# Reconvert kWh/a to Liters per day
dhw_vol_ap = dhw_kwh * 1000 * 3600 * 1000 / (955 * 4182 * 35 * 365)
# DHW volume per person and day
dhw_volumen = dhw_vol_ap / number_occupants
if dhw_method == 1: # Annex 42
dhw_power_curve = DomesticHotWater.DomesticHotWater(environment,
tFlow=60,
thermal=True,
method=1,
# Annex 42
dailyConsumption=dhw_volumen * number_occupants,
supplyTemperature=25)
else: # Stochastic profile
dhw_power_curve = DomesticHotWater.DomesticHotWater(environment,
tFlow=60,
thermal=True,
method=2,
supplyTemperature=25,
occupancy=occupancy_object.occupancy)
# Rescale to reference dhw volume (liters per person
# and day)
curr_dhw_vol_flow = dhw_power_curve.water
# Water volume flow in Liter/hour
curr_volume_year = sum(curr_dhw_vol_flow) * \
environment.timer.timeDiscretization / \
3600
curr_vol_day = curr_volume_year / 365
curr_vol_day_and_person = curr_vol_day / \
occupancy_object.number_occupants
print('Curr. volume per person and day: ',
curr_vol_day_and_person)
dhw_con_factor = dhw_volumen / curr_vol_day_and_person
print('Conv. factor of hot water: ', dhw_con_factor)
print('New volume per person and day: ',
curr_vol_day_and_person * dhw_con_factor)
# Normalize water flow and power load
dhw_power_curve.water *= dhw_con_factor
dhw_power_curve.loadcurve *= dhw_con_factor
# Create apartment
apartment = Apartment.Apartment(environment, occupancy=occupancy_object,
net_floor_area=net_floor_area)
# Add demands to apartment
if th_gen_method == 1 or th_gen_method == 2:
if use_dhw:
apartment.addMultipleEntities([heat_power_curve, el_power_curve,
dhw_power_curve])
else:
apartment.addMultipleEntities([heat_power_curve, el_power_curve])
else:
if use_dhw:
apartment.addMultipleEntities([el_power_curve,
dhw_power_curve])
else:
apartment.addEntity(el_power_curve)
# Create extended building object
extended_building = \
build_ex.BuildingExtended(environment,
build_year=build_year,
mod_year=mod_year,
build_type=build_type,
roof_usabl_pv_area=pv_use_area,
net_floor_area=net_floor_area,
height_of_floors=height_of_floors,
nb_of_floors=nb_of_floors,
neighbour_buildings=neighbour_buildings,
residential_layout=residential_layout,
attic=attic,
cellar=cellar,
construction_type=construction_type,
dormer=dormer,
with_ahu=
curr_central_ahu)
# Add apartment to extended building
extended_building.addEntity(entity=apartment)
return extended_building
def generate_res_building_multi_zone(environment,
net_floor_area,
spec_th_demand,
th_gen_method,
el_gen_method,
nb_of_apartments,
annual_el_demand=None,
el_random=False,
use_dhw=False,
dhw_method=1,
total_number_occupants=None,
build_year=None, mod_year=None,
build_type=None, pv_use_area=None,
height_of_floors=None, nb_of_floors=None,
neighbour_buildings=None,
residential_layout=None, attic=None,
cellar=None, construction_type=None,
dormer=None, dhw_volumen=None,
do_normalization=True,
slp_manipulate=True,
curr_central_ahu=False,
dhw_random=False, prev_heat_dev=True,
season_mod=None):
"""
Function generates and returns extended residential building object
with multiple apartments. Occupants are randomly distributed over
number of apartments.
Parameters
----------
environment : object
Environment object
net_floor_area : float
Net floor area of building in m2
spec_th_demand : float
Specific thermal energy demand in kWh/m2*a
annual_el_demand : float, optional
Annual electrical energy demand in kWh/a (default: None)
el_random : bool, optional
Defines, if random value should be chosen from statistics
or if average value should be chosen. el_random == True means,
use random value. (default: False)
th_gen_method : int
Thermal load profile generation method
1 - Use SLP
2 - Load Modelica simulation output profile (only residential)
Method 2 is only used for residential buildings. For non-res.
buildings, SLPs are generated instead
el_gen_method : int, optional
Electrical generation method (default: 1)
1 - Use SLP
2 - Generate stochastic load profile (only valid for residential
building)
nb_of_apartments : int
Number of apartments within building
use_dhw : bool, optional
Boolean to define, if domestic hot water profile should be generated
(default: False)
True - Generate dhw profile
dhw_method : int, optional
Domestic hot water profile generation method (default: 1)
1 - Use Annex 42 profile
2 - Use stochastic profile
total_number_occupants : int, optional
Total number of occupants in all apartments (default: None)
build_year : int, optional
Building year of construction (default: None)
mod_year : int, optional
Last year of modernization of building (default: None)
build_type : int, optional
Building type (default: None)
pv_use_area : float, optional
Usable pv area in m2 (default: None)
height_of_floors : float
average height of the floors
nb_of_floors : int
Number of floors above the ground
neighbour_buildings : int
neighbour (default = 0)
0: no neighbour
1: one neighbour
2: two neighbours
residential_layout : int
type of floor plan (default = 0)
0: compact
1: elongated/complex
attic : int
type of attic (default = 0)
0: flat roof
1: non heated attic
2: partly heated attic
3: heated attic
cellar : int
type of cellar (default = 0)
0: no cellar
1: non heated cellar
2: partly heated cellar
3: heated cellar
construction_type : str
construction type (default = "heavy")
heavy: heavy construction
light: light construction
dormer : str
construction type
0: no dormer
1: dormer
dhw_volumen : float, optional
Volume of domestic hot water in liter per capita and day
(default: None).
do_normalization : bool, optional
Defines, if stochastic profile (el_gen_method=2) should be
normalized to given annualDemand value (default: True).
If set to False, annual el. demand depends on stochastic el. load
profile generation. If set to True, does normalization with
annualDemand
slp_manipulate : bool, optional
Defines, if thermal space heating SLP profile should be modified
(default: True). Only used for residential buildings!
Only relevant, if th_gen_method == 1
True - Do manipulation
False - Use original profile
Sets thermal power to zero in time spaces, where average daily outdoor
temperature is equal to or larger than 12 C. Rescales profile to
original demand value.
curr_central_ahu : bool, optional
Defines, if building has air handling unit (AHU)
(default: False)
dhw_random : bool, optional
Defines, if hot water volume per person and day value should be
randomized by choosing value from gaussian distribution (20 %
standard deviation) (default: False)
If True: Randomize value
If False: Use reference value
prev_heat_dev : bool, optional
Defines, if heating devices should be prevented within chosen
appliances (default: True). If set to True, DESWH, E-INST,
Electric shower, Storage heaters and Other electric space heating
are set to zero. Only relevant for el_gen_method == 2
season_mod : float, optional
Float to define rescaling factor to rescale annual lighting power curve
with cosine wave to increase winter usage and decrease summer usage.
Reference is maximum lighting power (default: None). If set to None,
do NOT perform rescaling with cosine wave
Returns
-------
extended_building : object
BuildingExtended object
Annotation
----------
Raise assertion error when share of occupants per apartment is higher
than 5 (necessary for stochastic, el. profile generation)
"""
assert net_floor_area > 0
assert spec_th_demand >= 0
if annual_el_demand is not None:
assert annual_el_demand >= 0
if total_number_occupants is not None:
assert total_number_occupants > 0
assert total_number_occupants / nb_of_apartments <= 5, (
'Number of occupants per apartment is ' +
'at least once higher than 5.')
# Distribute occupants to different apartments
occupancy_list = constrained_sum_sample_pos(n=nb_of_apartments,
total=total_number_occupants)
# While not all values are smaller or equal to 5, return run
# This while loop might lead to large runtimes for buildings with a
# large number of apartments (not finding a valid solution, see
# issue #147). Thus, we add a counter to exit the loop
count = 0
while all(i <= 5 for i in occupancy_list) is not True:
occupancy_list = constrained_sum_sample_pos(n=nb_of_apartments,
total=total_number_occupants)
if count == 100000:
# Take current occupancy_list and redistribute occupants
# manually until valid distribution is found
occupancy_list = redistribute_occ(occ_list=occupancy_list)
# Exit while loop
break
count += 1
print('Current list of occupants per apartment: ', occupancy_list)
else:
msg = 'Number of occupants is None for current building!'
warnings.warn(msg)
# Define SLP profiles for residential building with multiple zone
th_slp_type = 'HMF'
el_slp_type = 'H0'
# Create extended building object
extended_building = \
build_ex.BuildingExtended(environment,
build_year=build_year,
mod_year=mod_year,
build_type=build_type,
roof_usabl_pv_area=pv_use_area,
net_floor_area=net_floor_area,
height_of_floors=height_of_floors,
nb_of_floors=nb_of_floors,
neighbour_buildings=
neighbour_buildings,
residential_layout=
residential_layout,
attic=attic,
cellar=cellar,
construction_type=
construction_type,
dormer=dormer,
with_ahu=curr_central_ahu)
if annual_el_demand is not None:
# Distribute el. demand equally to apartments
annual_el_demand_ap = annual_el_demand / nb_of_apartments
else:
annual_el_demand_ap = None
# Loop over apartments
# #---------------------------------------------------------------------
for i in range(int(nb_of_apartments)):
# Dummy init of number of occupants
curr_number_occupants = None
# Check number of occupants
if total_number_occupants is not None:
# Get number of occupants
curr_number_occupants = occupancy_list[i]
# Generate occupancy profiles for stochastic el. and/or dhw
if el_gen_method == 2 or (dhw_method == 2 and use_dhw):
# Generate occupancy profile (necessary for stochastic, el. or
# dhw profile)
occupancy_object = occup.Occupancy(environment,
number_occupants=
curr_number_occupants)
else: # Generate occupancy object without profile
occupancy_object = occup.Occupancy(environment,
number_occupants=
curr_number_occupants,
do_profile=False)
else:
if el_gen_method == 2:
warnings.warn('Stochastic el. profile cannot be generated ' +
'due to missing number of occupants. ' +
'SLP is used instead.')
# Set el_gen_method to 1 (SLP)
el_gen_method = 1
elif dhw_method == 2:
raise AssertionError('DHW profile cannot be generated' +
'for residential building without' +
'occupants (stochastic mode).' +
'Please check your input file ' +
'(missing number of occupants) ' +
'or disable dhw generation.')
if (curr_number_occupants is None and dhw_method == 1 and
use_dhw == True):
# If dhw profile should be generated, but current number of
# occupants is None, number of occupants is samples from
# occupancy distribution for apartment
curr_number_occupants = usunc.calc_sampling_occ_per_app(
nb_samples=1)
# Assumes equal area share for all apartments
apartment_area = net_floor_area / nb_of_apartments
# Create space heating demand (for apartment)
if th_gen_method == 1:
# Use SLP
heat_power_curve = \
SpaceHeating.SpaceHeating(environment,
method=1,
profile_type=th_slp_type,
livingArea=apartment_area,
specificDemand=spec_th_demand)
if slp_manipulate: # Do SLP manipulation
timestep = environment.timer.timeDiscretization
temp_array = environment.weather.tAmbient
mod_curve = \
slpman.slp_th_manipulator(timestep,
th_slp_curve=heat_power_curve.loadcurve,
temp_array=temp_array)
heat_power_curve.loadcurve = mod_curve
elif th_gen_method == 2:
# Use Modelica result profile
heat_power_curve = SpaceHeating.SpaceHeating(environment,
method=3,
livingArea=apartment_area,
specificDemand=spec_th_demand)
# Calculate el. energy demand for apartment, if no el. energy
# demand is given for whole building to rescale
if annual_el_demand_ap is None:
# Generate annual_el_demand_ap
annual_el_demand_ap = calc_el_dem_ap(nb_occ=curr_number_occupants,
el_random=el_random,
type='mfh')
print('Annual el. demand (apartment) in kWh: ', annual_el_demand_ap)
if curr_number_occupants is not None:
print('El. demand per person in kWh: ')
print(annual_el_demand_ap / curr_number_occupants)
print()
# Create electrical power curve
if el_gen_method == 2:
if season_mod is not None:
season_light_mod = True
else:
season_light_mod = False
el_power_curve = ElectricalDemand.ElectricalDemand(environment,
method=2,
total_nb_occupants=curr_number_occupants,
randomizeAppliances=True,
lightConfiguration=0,
annualDemand=annual_el_demand_ap,
occupancy=occupancy_object.occupancy,
do_normalization=do_normalization,
prev_heat_dev=prev_heat_dev,
season_light_mod=season_light_mod,
light_mod_fac=season_mod)
else: # Use el. SLP
el_power_curve = ElectricalDemand.ElectricalDemand(environment,
method=1,
annualDemand=annual_el_demand_ap,
profileType=el_slp_type)
# Create domestic hot water demand
if use_dhw:
if dhw_volumen is None or dhw_random:
dhw_kwh = calc_dhw_dem_ap(nb_occ=curr_number_occupants,
dhw_random=dhw_random,
type='mfh')
# Reconvert kWh/a to Liters per day
dhw_vol_ap = dhw_kwh * 1000 * 3600 * 1000 / (
955 * 4182 * 35 * 365)
# DHW volume per person and day
dhw_volumen = dhw_vol_ap / curr_number_occupants
if dhw_method == 1: # Annex 42
dhw_power_curve = DomesticHotWater.DomesticHotWater(
environment,
tFlow=60,
thermal=True,
method=1,
# Annex 42
dailyConsumption=dhw_volumen * curr_number_occupants,
supplyTemperature=25)
else: # Stochastic profile
dhw_power_curve = DomesticHotWater.DomesticHotWater(
environment,
tFlow=60,
thermal=True,
method=2,
supplyTemperature=25,
occupancy=occupancy_object.occupancy)
# Rescale to reference dhw volume (liters per person
# and day)
curr_dhw_vol_flow = dhw_power_curve.water
# Water volume flow in Liter/hour
curr_volume_year = sum(curr_dhw_vol_flow) * \
environment.timer.timeDiscretization / \
3600
curr_vol_day = curr_volume_year / 365
curr_vol_day_and_person = curr_vol_day / \
occupancy_object.number_occupants
print('Curr. volume per person and day: ',
curr_vol_day_and_person)
dhw_con_factor = dhw_volumen / curr_vol_day_and_person
print('Conv. factor of hot water: ', dhw_con_factor)
print('New volume per person and day: ',
curr_vol_day_and_person * dhw_con_factor)
# Normalize water flow and power load
dhw_power_curve.water *= dhw_con_factor
dhw_power_curve.loadcurve *= dhw_con_factor
# Create apartment
apartment = Apartment.Apartment(environment,
occupancy=occupancy_object,
net_floor_area=apartment_area)
# Add demands to apartment
if th_gen_method == 1 or th_gen_method == 2:
if use_dhw:
apartment.addMultipleEntities([heat_power_curve,
el_power_curve,
dhw_power_curve])
else:
apartment.addMultipleEntities([heat_power_curve,
el_power_curve])
else:
if use_dhw:
apartment.addMultipleEntities([el_power_curve,
dhw_power_curve])
else:
apartment.addEntity(el_power_curve)
# Add apartment to extended building
extended_building.addEntity(entity=apartment)
return extended_building
def generate_nonres_building_single_zone(environment,
net_floor_area, spec_th_demand,
annual_el_demand, th_slp_type,
el_slp_type=None,
build_year=None, mod_year=None,
build_type=None, pv_use_area=None,
method_3_type=None,
method_4_type=None,
height_of_floors=None,
nb_of_floors=None):
"""
Function generates and returns extended nonresidential building object
with single zone.
Parameters
----------
environment : object
Environment object
net_floor_area : float
Net floor area of building in m2
spec_th_demand : float
Specific thermal energy demand in kWh/m2*a
annual_el_demand : float
Annual electrical energy demand in kWh/a
th_slp_type : str
Thermal SLP type (for non-residential buildings)
- `GBA` : Bakeries
- `GBD` : Other services
- `GBH` : Accomodations
- `GGA` : Restaurants
- `GGB` : Gardening
- `GHA` : Retailers
- `GHD` : Summed load profile business, trade and services
- `GKO` : Banks, insurances, public institutions
- `GMF` : Household similar businesses
- `GMK` : Automotive
- `GPD` : Paper and printing
- `GWA` : Laundries
el_slp_type : str, optional (default: None)
Electrical SLP type
- H0 : Household
- L0 : Farms
- L1 : Farms with breeding / cattle
- L2 : Farms without cattle
- G0 : Business (general)
- G1 : Business (workingdays 8:00 AM - 6:00 PM)
- G2 : Business with high loads in the evening
- G3 : Business (24 hours)
- G4 : Shops / Barbers
- G5 : Bakery
- G6 : Weekend operation
number_occupants : int, optional
Number of occupants (default: None)
build_year : int, optional
Building year of construction (default: None)
mod_year : int, optional
Last year of modernization of building (default: None)
build_type : int, optional
Building type (default: None)
pv_use_area : float, optional
Usable pv area in m2 (default: None)
method_3_type : str, optional
Defines type of profile for method=3 (default: None)
Options:
- 'food_pro': Food production
- 'metal': Metal company
- 'rest': Restaurant (with large cooling load)
- 'sports': Sports hall
- 'repair': Repair / metal shop
method_4_type : str, optional
Defines type of profile for method=4 (default: None)
- 'metal_1' : Metal company with smooth profile
- 'metal_2' : Metal company with fluctuation in profile
- 'warehouse' : Warehouse
height_of_floors : float
average height of the floors
nb_of_floors : int
Number of floors above the ground
Returns
-------
extended_building : object
BuildingExtended object
"""
assert net_floor_area > 0
assert spec_th_demand >= 0
assert annual_el_demand >= 0
assert th_slp_type != 'HEF', ('HEF thermal slp profile only valid for ' +
'residential buildings.')
assert th_slp_type != 'HMF', ('HMF thermal slp profile only valid for ' +
'residential buildings.')
assert el_slp_type != 'H0', ('H0 thermal slp profile only valid for ' +
'residential buildings.')
# Create space heating demand
heat_power_curve = SpaceHeating.SpaceHeating(environment,
method=1,
profile_type=th_slp_type,
livingArea=net_floor_area,
specificDemand=spec_th_demand)
if method_3_type is not None:
el_power_curve = \
ElectricalDemand.ElectricalDemand(environment,
method=3,
annualDemand=annual_el_demand,
do_normalization=True,
method_3_type=method_3_type)
elif method_4_type is not None:
el_power_curve = \
ElectricalDemand.ElectricalDemand(environment,
method=4,
annualDemand=annual_el_demand,
do_normalization=True,
method_4_type=method_4_type)
else:
# Use el. SLP for el. power load generation
assert el_slp_type is not None, 'el_slp_type is required!'
el_power_curve = \
ElectricalDemand.ElectricalDemand(environment,
method=1,
annualDemand=annual_el_demand,
profileType=el_slp_type)
# Create apartment
apartment = Apartment.Apartment(environment)
# Add demands to apartment
apartment.addMultipleEntities([heat_power_curve, el_power_curve])
# Create extended building object
extended_building = build_ex.BuildingExtended(environment,
net_floor_area=net_floor_area,
build_year=build_year,
mod_year=mod_year,
build_type=build_type,
roof_usabl_pv_area=pv_use_area,
height_of_floors=height_of_floors,
nb_of_floors=nb_of_floors,
)
# Add apartment to extended building
extended_building.addEntity(entity=apartment)
return extended_building
def get_district_data_from_txt(path, delimiter='\t'):
"""
Load city district data from txt file (see annotations below for further
information of required inputs).
naN are going to be replaced with Python None.
Parameters
----------
path : str
Path to txt file
delimiter : str, optional
Defines delimiter for txt file (default: '\t')
Returns
-------
district_data : ndarray
Numpy 2d-array with city district data (each column represents
different parameter, see annotations)
Annotations
-----------
File structure
Columns:
1: id (int)
2: x in m (float)
3: y in m (float)
4: building_type (int, e.g. 0 for residential building)
5: net floor area in m2 (float)
6: Year of construction (int, optional)
7: Year of modernization (int, optional)
8: Annual (final) thermal energy demand in kWh (float, optional)
9: Annual electrical energy demand in kWh (float, optional)
10: Usable pv roof area in m2 (float, optional)
11: Number of apartments (int, optional)
12: Total number of occupants (int, optional)
13: Number of floors above the ground (int, optional)
14: Average Height of floors (float, optional)
15: If building has a central AHU or not (boolean, optional)
16: Residential layout (int, optional, e.g. 0 for compact)
17: Neighbour Buildings (int, optional) (0 - free standing)
(1 - double house) (2 - row house)
18: Type of attic (int, optional, e.g. 0 for flat roof) (1 - regular roof;
unheated) (2 - regular roof; partially heated) (3 - regular roof; fully
heated)
19: Type of cellar (int, optional, e.g. 1 for non heated cellar)
(0 - no basement) (1 - non heated) (2 - partially heated) (3 - fully heated)
20: Dormer (int, optional, 0: no dormer/ 1: dormer)
21: Construction Type(heavy/light, optional) (0 - heavy; 1 - light)
22: Method_3_nb (for usage of measured, weekly non-res. el. profile
(optional)
23: Method_4_nb (for usage of measured, annual non-res. el. profile
(optional)
"""
district_data = np.genfromtxt(path, delimiter=delimiter, skip_header=1)
# Replace nan with None values of Python
district_data = np.where(np.isnan(district_data), None, district_data)
return district_data
def calc_el_dem_ap(nb_occ, el_random, type):
"""
Calculate electric energy demand per apartment per year
in kWh/a (residential buildings, only)
Parameters
----------
nb_occ : int
Number of occupants
el_random : bool
Defines, if random value should be chosen from statistics
or if average value should be chosen. el_random == True means,
use random value.
type : str
Define residential building type (single family or multi-
family)
Options:
- 'sfh' : Single family house
- 'mfh' : Multi family house
Returns
-------
el_dem : float
Electric energy demand per apartment in kWh/a
"""
assert nb_occ > 0
assert nb_occ <= 5, 'Number of occupants cannot exceed 5 per ap.'
assert type in ['sfh', 'mfh']
if el_random:
# Choose first entry of random sample list
el_dem = usunc.calc_sampling_el_demand_per_apartment(
nb_samples=1,
nb_persons=nb_occ,
type=type)[0]
else:
# Choose average value depending on nb_occ
# Class D without hot water (Stromspiegel 2017)
dict_sfh = {1: 2500,
2: 3200,
3: 3900,
4: 4200,
5: 5400}
dict_mfh = {1: 1500,
2: 2200,
3: 2800,
4: 3200,
5: 4000}
if type == 'sfh':
el_dem = dict_sfh[nb_occ]
elif type == 'mfh':
el_dem = dict_mfh[nb_occ]
return el_dem
def calc_dhw_dem_ap(nb_occ, dhw_random, type, delta_t=35, c_p_water=4182,
rho_water=995):
"""
Calculate hot water energy demand per apartment per year
in kWh/a (residential buildings, only)
Parameters
----------
nb_occ : int
Number of occupants
dhw_random : bool
Defines, if random value should be chosen from statistics
or if average value should be chosen. dhw_random == True means,
use random value.
type : str
Define residential building type (single family or multi-
family)
Options:
- 'sfh' : Single family house
- 'mfh' : Multi family house
delta_t : float, optional
Temperature split of heated up water in Kelvin (default: 35)
c_p_water : float, optional
Specific heat capacity of water in J/kgK (default: 4182)
rho_water : float, optional
Density of water in kg/m3 (default: 995)
Returns
-------
dhw_dem : float
Electric energy demand per apartment in kWh/a
"""
assert nb_occ > 0
assert nb_occ <= 5, 'Number of occupants cannot exceed 5 per ap.'
assert type in ['sfh', 'mfh']
if dhw_random:
# Choose first entry of random sample list
# DHW volume in liters per apartment and day
dhw_volume = usunc.calc_sampling_dhw_per_apartment(
nb_samples=1,
nb_persons=nb_occ,
b_type=type)[0]
dhw_dem = dhw_volume * 365 * rho_water * c_p_water * delta_t / \
(1000 * 3600 * 1000)
else:
# Choose average value depending on nb_occ
# Class D without hot water (Stromspiegel 2017)
dict_sfh = {1: 500,
2: 800,
3: 1000,
4: 1300,
5: 1600}
dict_mfh = {1: 500,
2: 900,
3: 1300,
4: 1400,
5: 2000}
if type == 'sfh':
dhw_dem = dict_sfh[nb_occ]
elif type == 'mfh':
dhw_dem = dict_mfh[nb_occ]
return dhw_dem
def run_city_generator(generation_mode, timestep,
year_timer, year_co2,
location,
th_gen_method,
el_gen_method, district_data, use_dhw=False,
dhw_method=1, try_path=None,
pickle_city_filename=None, do_save=True,
path_save_city=None, eff_factor=0.85,
show_city=False, altitude=55, dhw_volumen=None,
do_normalization=True, slp_manipulate=True,
call_teaser=False, teaser_proj_name='pycity',
do_log=True, log_path=None,
project_name='teaser_project',
air_vent_mode=1, vent_factor=0.5,
t_set_heat=20,
t_set_cool=70,
t_night=16,
vdi_sh_manipulate=False, city_osm=None,
el_random=False, dhw_random=False, prev_heat_dev=True,
season_mod=None, merge_windows=False, new_try=False):
"""
Function generates city district for user defined input. Generated
buildings consist of only one single zone!
Parameters
----------
generation_mode : int
Integer to define method to generate city district
(so far, only csv/txt file import has been implemented)
generation_mode = 0: Load data from csv/txt file (tab seperated)
timestep : int
Timestep in seconds
year_timer : int
Chosen year of analysis
(influences initial day for profile generation)
year_co2 : int, optional
Chose year with specific emission factors
location : Tuple
(latitude, longitude) of the simulated system's position.
th_gen_method : int
Thermal load profile generation method
1 - Use SLP
2 - Load Modelica simulation output profile (only residential)
Method 2 is only used for residential buildings. For non-res.
buildings, SLPs are generated instead
3 - Use TEASER VDI 6007 core to simulate thermal loads
el_gen_method : int
Electrical generation method
1 - Use SLP
2 - Generate stochastic load profile (only valid for residential
building). Requires number of occupants.
district_data : ndarray
Numpy 2d-array with city district data (each column represents
different parameter, see annotations)
use_dhw : bool, optional
Defines if domestic hot water profiles should be generated.
(default: False)
dhw_method : int, optional
Defines method for dhw profile generation (default: 1)
Only relevant if use_dhw=True. Options:
- 1: Generate profiles via Annex 42
- 2: Generate stochastic dhw profiles
try_path : str, optional
Path to TRY weather file (default: None)
If set to None, uses default weather TRY file (2010, region 5)
pickle_city_filename : str, optional
Name for file, which should be pickled and saved, if no path is
handed over to save object to(default: None)
do_save : bool, optional
Defines, if city object instance should be saved as pickle file
(default: True)
path_save_city : str, optional
Path to save (pickle and dump) city object instance to (default: None)
If None is used, saves file to .../output/...
eff_factor : float, optional
Efficiency factor of thermal boiler system (default: 0.85)
show_city : bool, optional
Boolean to define if city district should be printed by matplotlib
after generation (default: False)
True: Print results
False: Do not print results
altitude : float, optional
Altitude of location in m (default: 55 - City of Bottrop)
dhw_volumen : float, optional
Volume of domestic hot water in liter per capita and day
(default: None).
do_normalization : bool, optional
Defines, if stochastic profile (el_gen_method=2) should be
normalized to given annualDemand value (default: True).
If set to False, annual el. demand depends on stochastic el. load
profile generation. If set to True, does normalization with
annualDemand
slp_manipulate : bool, optional
Defines, if thermal space heating SLP profile should be modified
(default: True). Only used for residential buildings!
Only relevant, if th_gen_method == 1
True - Do manipulation
False - Use original profile
Sets thermal power to zero in time spaces, where average daily outdoor
temperature is equal to or larger than 12 C. Rescales profile to
original demand value.
call_teaser : bool, optional
Defines, if teaser should be called to generate typeBuildings
(currently, residential typeBuildings only).
(default: False)
If set to True, generates typeBuildings and add them to building node
as attribute 'type_building'
teaser_proj_name : str, optional
TEASER project name (default: 'pycity'). Only relevant, if call_teaser
is set to True
do_log : bool, optional
Defines, if log file of inputs should be generated (default: True)
log_path : str, optional
Path to log file (default: None). If set to None, saves log to
.../output
air_vent_mode : int
Defines method to generation air exchange rate for VDI 6007 simulation
Options:
0 : Use constant value (vent_factor in 1/h)
1 : Use deterministic, temperature-dependent profile
2 : Use stochastic, user-dependent profile
vent_factor : float, optional
Ventilation rate factor in 1/h (default: 0.5). Only used, if
array_vent_rate is None (otherwise, array_vent_rate array is used)
t_set_heat : float, optional
Heating set temperature in degree Celsius. If temperature drops below
t_set_heat, model is going to be heated up. (default: 20)
(Related to constraints for res. buildings in DIN V 18599)
t_set_cool : float, optional
Cooling set temperature in degree Celsius. If temperature rises above
t_set_cool, model is going to be cooled down. (default: 70)
t_night : float, optional
Night set back temperature in degree Celsius (default: 16)
(Related to constraints for res. buildings in DIN V 18599)
project_name : str, optional
TEASER project name (default: 'teaser_project')
vdi_sh_manipulate : bool, optional
Defines, if VDI 6007 thermal space heating load curve should be
normalized to match given annual space heating demand in kWh
(default: False)
el_random : bool, optional
Defines, if annual, eletrical demand value for normalization of
el. load profile should randomly diverge from reference value
within specific boundaries (default: False).
If False: Use reference value for normalization
If True: Allow generating values that is different from reference value
dhw_random : bool, optional
Defines, if hot water volume per person and day value should be
randomized by choosing value from gaussian distribution (20 %
standard deviation) (default: False)
If True: Randomize value
If False: Use reference value
prev_heat_dev : bool, optional
Defines, if heating devices should be prevented within chosen
appliances (default: True). If set to True, DESWH, E-INST,
Electric shower, Storage heaters and Other electric space heating
are set to zero. Only relevant for el_gen_method == 2
season_mod : float, optional
Float to define rescaling factor to rescale annual lighting power curve
with cosine wave to increase winter usage and decrease summer usage.
Reference is maximum lighting power (default: None). If set to None,
do NOT perform rescaling with cosine wave
merge_windows : bool, optional
Defines TEASER project setting for merge_windows_calc
(default: False). If set to False, merge_windows_calc is set to False.
If True, Windows are merged into wall resistances.
new_try : bool, optional
Defines, if TRY dataset have been generated after 2017 (default: False)
If False, assumes that TRY dataset has been generated before 2017.
If True, assumes that TRY dataset has been generated after 2017 and
belongs to the new TRY classes. This is important for extracting
the correct values from the TRY dataset!
Returns
-------
city_object : object
City object of pycity_calc
Annotations
-----------
Non-residential building loads are automatically generated via SLP
(even if el_gen_method is set to 2). Furthermore, dhw profile generation
is automatically neglected (only valid for residential buildings)
Electrical load profiles of residential buildings without occupants
are automatically generated via SLP (even if el_gen_method is set to 2)
File structure (district_data np.array)
Columns:
1: id (int)
2: x in m (float)
3: y in m (float)
4: building_type (int, e.g. 0 for residential building)
5: net floor area in m2 (float)
6: Year of construction (int, optional)
7: Year of modernization (int, optional)
8: Annual (final) thermal energy demand in kWh (float, optional)
For residential: space heating, only!
For non-residential: Space heating AND hot water! (SLP usage)
9: Annual electrical energy demand in kWh (float, optional)
10: Usable pv roof area in m2 (float, optional)
11: Number of apartments (int, optional)
12: Total number of occupants (int, optional)
13: Number of floors above the ground (int, optional)
14: Average Height of floors (float, optional)
15: If building has a central AHU or not (boolean, optional)
16: Residential layout (int, optional, e.g. 0 for compact)
17: Neighbour Buildings (int, optional); 0 - free standing; 1 - Double house; 2 - Row house;
18: Type of attic (int, optional, e.g. 0 for flat roof); 1 - Roof, non heated; 2 - Roof, partially heated; 3- Roof, fully heated;
19: Type of basement (int, optional, e.g. 1 for non heated basement 0 - No basement; 1 - basement, non heated; 2 - basement, partially heated; 3- basement, fully heated;
20: Dormer (int, optional, 0: no dormer/ 1: dormer)
21: Construction Type(heavy/light, optional) (0 - heavy; 1 - light)
22: Method_3_nb (for usage of measured, weekly non-res. el. profile
(optional) (0 to 4)
23: Method_4_nb (for usage of measured, annual non-res. el. profile
(optional) (0 - 2)
method_3_type : str, optional
Defines type of profile for method=3 (default: None)
Options:
0 - 'food_pro': Food production
1 - 'metal': Metal company
2 - 'rest': Restaurant (with large cooling load)
3 - 'sports': Sports hall
4 - 'repair': Repair / metal shop
method_4_type : str, optional
Defines type of profile for method=4 (default: None)
0 - 'metal_1' : Metal company with smooth profile
1 - 'metal_2' : Metal company with fluctuation in profile
2 - 'warehouse' : Warehouse
"""
assert eff_factor > 0, 'Efficiency factor has to be larger than zero.'
assert eff_factor <= 1, 'Efficiency factor cannot increase value 1.'
if dhw_volumen is not None: # pragma: no cover
assert dhw_volumen >= 0, 'Hot water volume cannot be below zero.'
if generation_mode == 1: # pragma: no cover
assert city_osm is not None, 'Generation mode 1 requires city object!'
if vdi_sh_manipulate is True and th_gen_method == 3: # pragma: no cover
msg = 'Simulated profiles of VDI 6007 call (TEASER --> ' \
'space heating) is going to be normalized with annual thermal' \
' space heating demand values given by user!'
warnings.warn(msg)
if do_log: # pragma: no cover
# Write log file
# ################################################################
# Log file path
if log_path is None:
# If not existing, use default path
this_path = os.path.dirname(os.path.abspath(__file__))
log_path = os.path.join(this_path, 'output', 'city_gen_log.txt')
log_file = open(log_path, mode='w')
log_file.write('PyCity_Calc city_generator.py log file')
log_file.write('\n############## Time and location ##############\n')
log_file.write('Date: ' + str(datetime.datetime.now()) + '\n')
log_file.write('generation_mode: ' + str(generation_mode) + '\n')
log_file.write('timestep in seconds: ' + str(timestep) + '\n')
log_file.write('Year for timer: ' + str(year_timer) + '\n')
log_file.write('Year for CO2 emission factors: '
+ str(year_co2) + '\n')
log_file.write('Location: ' + str(location) + '\n')
log_file.write('altitude: ' + str(altitude) + '\n')
if generation_mode == 0:
log_file.write('Generation mode: csv/txt input, only.\n')
elif generation_mode == 1:
log_file.write('Generation mode: csv/txt plus city osm object.\n')
log_file.write('\n############## Generation methods ##############\n')
log_file.write('th_gen_method: ' + str(th_gen_method) + '\n')
if th_gen_method == 1:
log_file.write('Manipulate SLP: ' + str(slp_manipulate) + '\n')
elif th_gen_method == 3:
log_file.write('t_set_heat: ' + str(t_set_heat) + '\n')
log_file.write('t_set_night: ' + str(t_night) + '\n')
log_file.write('t_set_cool: ' + str(t_set_cool) + '\n')
log_file.write('air_vent_mode: ' + str(air_vent_mode) + '\n')
log_file.write('vent_factor: ' + str(vent_factor) + '\n')
log_file.write('el_gen_method: ' + str(el_gen_method) + '\n')
log_file.write(
'Normalize el. profile: ' + str(do_normalization) + '\n')
log_file.write(
'Do random el. normalization: ' + str(el_random) + '\n')
log_file.write(
'Prevent el. heating devices for el load generation: '
'' + str(prev_heat_dev) + '\n')
log_file.write(
'Rescaling factor lighting power curve to implement seasonal '
'influence: ' + str(season_mod) + '\n')
log_file.write('use_dhw: ' + str(use_dhw) + '\n')
log_file.write('dhw_method: ' + str(dhw_method) + '\n')
log_file.write('dhw_volumen: ' + str(dhw_volumen) + '\n')
log_file.write(
'Do random dhw. normalization: ' + str(dhw_random) + '\n')
log_file.write('\n############## Others ##############\n')
log_file.write('try_path: ' + str(try_path) + '\n')
log_file.write('eff_factor: ' + str(eff_factor) + '\n')
log_file.write('timestep in seconds: ' + str(timestep) + '\n')
log_file.write('call_teaser: ' + str(call_teaser) + '\n')
log_file.write('teaser_proj_name: ' + str(teaser_proj_name) + '\n')
# Log file is closed, after pickle filename has been generated
# (see code below)
if generation_mode == 0 or generation_mode == 1:
# ##################################################################
# Load specific demand files
# Load specific thermal demand input data
spec_th_dem_res_building = load_data_file_with_spec_demand_data(
'RWI_res_building_spec_th_demand.txt')
start_year_column = (spec_th_dem_res_building[:, [0]])
# Reverse
start_year_column = start_year_column[::-1]
"""
Columns:
1. Start year (int)
2. Final year (int)
3. Spec. thermal energy demand in kWh/m2*a (float)
"""
# ##################################################################
# Load specific electrical demand input data
spec_el_dem_res_building = load_data_file_with_spec_demand_data(
'AGEB_res_building_spec_e_demand.txt')
"""
Columns:
1. Start year (int)
2. Final year (int)
3. Spec. thermal energy demand in kWh/m2*a (float)
"""
# ##################################################################
# Load specific electrical demand input data
# (depending on number of occupants)
spec_el_dem_res_building_per_person = \
load_data_file_with_spec_demand_data(
'Stromspiegel2017_spec_el_energy_demand.txt')
"""
Columns:
1. Number of persons (int) ( 1 - 5 SFH and 1 - 5 MFH)
2. Annual electrical demand in kWh/a (float)
3. Specific electrical demand per person in kWh/person*a (float)
"""
# ###################################################################
# Load specific demand data and slp types for
# non residential buildings
spec_dem_and_slp_non_res = load_data_file_with_spec_demand_data(
'Spec_demands_non_res.txt')
"""
Columns:
1. type_id (int)
2. type_name (string) # Currently 'nan', due to expected float
3. Spec. thermal energy demand in kWh/m2*a (float)
4. Spec. electrical energy demand in kWh/m2*a (float)
5. Thermal SLP type (int)
6. Electrical SLP type (int)
"""
# ###################################################################
# Generate city district
# Generate extended environment of pycity_calc
environment = generate_environment(timestep=timestep,
year_timer=year_timer,
year_co2=year_co2,
location=location,
try_path=try_path,
altitude=altitude,
new_try=new_try)
print('Generated environment object.\n')
if generation_mode == 0:
# Generate city object
# ############################################################
city_object = city.City(environment=environment)
print('Generated city object.\n')
else:
# Overwrite city_osm environment
print('Overwrite city_osm.environment with new environment')
city_osm.environment = environment
city_object = city_osm
# Check if district_data only holds one entry for single building
# In this case, has to be processed differently
if district_data.ndim > 1:
multi_data = True
else: # Only one entry (single building)
multi_data = False
# If multi_data is false, loop below is going to be exited with
# a break statement at the end.
# Generate dummy node id and thermal space heating demand dict
dict_id_vdi_sh = {}
# Loop over district_data
# ############################################################
for i in range(len(district_data)):
if multi_data:
# Extract data out of input file
curr_id = int(
district_data[i][0]) # id / primary key of building
curr_x = district_data[i][1] # x-coordinate in m
curr_y = district_data[i][2] # y-coordinate in m
curr_build_type = int(
district_data[i][3]) # building type nb (int)
curr_nfa = district_data[i][4] # Net floor area in m2
curr_build_year = district_data[i][5] # Year of construction
curr_mod_year = district_data[i][
6] # optional (last year of modernization)
curr_th_e_demand = district_data[i][
7] # optional: Final thermal energy demand in kWh
# For residential buildings: Space heating only!
# For non-residential buildings: Space heating AND hot water! (SLP)
curr_el_e_demand = district_data[i][
8] # optional (Annual el. energy demand in kWh)
curr_pv_roof_area = district_data[i][
9] # optional (Usable pv roof area in m2)
curr_nb_of_apartments = district_data[i][
10] # optional (Number of apartments)
curr_nb_of_occupants = district_data[i][
11] # optional (Total number of occupants)
curr_nb_of_floors = district_data[i][
12] # optional (Number of floors above the ground)
curr_avg_height_of_floors = district_data[i][
13] # optional (Average Height of floors)
curr_central_ahu = district_data[i][
14] # optional (If building has a central air handling unit (AHU) or not (boolean))
curr_res_layout = district_data[i][
15] # optional Residential layout (int, optional, e.g. 0 for compact)
curr_nb_of_neighbour_bld = district_data[i][
16] # optional Neighbour Buildings (int, optional)
curr_type_attic = district_data[i][
17] # optional Type of attic (int, optional, e.g. 0 for flat roof);
# 1 - Roof, non heated; 2 - Roof, partially heated; 3- Roof, fully heated;
curr_type_cellar = district_data[i][
18] # optional Type of basement
# (int, optional, e.g. 1 for non heated basement 0 - No basement; 1 - basement, non heated; 2 - basement, partially heated; 3- basement, fully heated;
curr_dormer = district_data[i][
19] # optional Dormer (int, optional, 0: no dormer/ 1: dormer)
curr_construction_type = district_data[i][
20] # optional Construction Type(heavy/light, optional) (0 - heavy; 1 - light)
curr_method_3_nb = district_data[i][
21] # optional Method_3_nb (for usage of measured, weekly non-res. el. profile
curr_method_4_nb = district_data[i][
22] # optional Method_4_nb (for usage of measured, annual non-res. el. profile
else: # Single entry
# Extract data out of input file
curr_id = int(district_data[0]) # id / primary key of building
curr_x = district_data[1] # x-coordinate in m
curr_y = district_data[2] # y-coordinate in m
curr_build_type = int(
district_data[3]) # building type nb (int)
curr_nfa = district_data[4] # Net floor area in m2
curr_build_year = district_data[5] # Year of construction
curr_mod_year = district_data[
6] # optional (last year of modernization)
curr_th_e_demand = district_data[
7] # optional: Final thermal energy demand in kWh
# For residential buildings: Space heating only!
# For non-residential buildings: Space heating AND hot water! (SLP)
curr_el_e_demand = district_data[
8] # optional (Annual el. energy demand in kWh)
curr_pv_roof_area = district_data[
9] # optional (Usable pv roof area in m2)
curr_nb_of_apartments = district_data[
10] # optional (Number of apartments)
curr_nb_of_occupants = district_data[
11] # optional (Total number of occupants)
curr_nb_of_floors = district_data[
12] # optional (Number of floors above the ground)
curr_avg_height_of_floors = district_data[
13] # optional (Average Height of floors)
curr_central_ahu = district_data[
14] # optional (If building has a central air handling unit (AHU) or not (boolean))
curr_res_layout = district_data[
15] # optional Residential layout (int, optional, e.g. 0 for compact)
curr_nb_of_neighbour_bld = district_data[
16] # optional Neighbour Buildings (int, optional)
curr_type_attic = district_data[
17] # optional Type of attic (int, optional, e.g. 0 for flat roof);
# 1 - Roof, non heated; 2 - Roof, partially heated; 3- Roof, fully heated;
curr_type_cellar = district_data[
18] # optional Type of basement
# (int, optional, e.g. 1 for non heated basement 0 - No basement; 1 - basement, non heated; 2 - basement, partially heated; 3- basement, fully heated;
curr_dormer = district_data[
19] # optional Dormer (int, optional, 0: no dormer/ 1: dormer)
curr_construction_type = district_data[
20] # optional Construction Type(heavy/light, optional) (0 - heavy; 1 - light)
curr_method_3_nb = district_data[
21] # optional Method_3_nb (for usage of measured, weekly non-res. el. profile
curr_method_4_nb = district_data[
22] # optional Method_4_nb (for usage of measured, annual non-res. el. profile
print('Process building', curr_id)
print('########################################################')
# Assert functions
# ############################################################
assert curr_build_type >= 0
assert curr_nfa > 0
for m in range(5, 9):
if multi_data:
if district_data[i][m] is not None:
assert district_data[i][m] > 0
else:
if district_data[m] is not None:
assert district_data[m] > 0
if curr_nb_of_apartments is not None:
assert curr_nb_of_apartments > 0
# Convert to int
curr_nb_of_apartments = int(curr_nb_of_apartments)
if curr_nb_of_occupants is not None:
assert curr_nb_of_occupants > 0
# Convert curr_nb_of_occupants from float to int
curr_nb_of_occupants = int(curr_nb_of_occupants)
if (curr_nb_of_occupants is not None
and curr_nb_of_apartments is not None):
assert curr_nb_of_occupants / curr_nb_of_apartments <= 5, (
'Average share of occupants per apartment should ' +
'not exceed 5 persons! (Necessary for stochastic, el.' +
'profile generation.)')
if curr_method_3_nb is not None:
curr_method_3_nb >= 0
if curr_method_4_nb is not None:
curr_method_4_nb >= 0
if curr_build_type == 0 and curr_nb_of_apartments is None: # pragma: no cover
# Define single apartment, if nb of apartments is unknown
msg = 'Building ' + str(curr_id) + ' is residential, but' \
' does not have a number' \
' of apartments. Going' \
' to set nb. to 1.'
warnings.warn(msg)
curr_nb_of_apartments = 1
if (curr_build_type == 0 and curr_nb_of_occupants is None
and use_dhw and dhw_method == 2):
raise AssertionError('DHW profile cannot be generated' +
'for residential building without' +
'occupants (stochastic mode).' +
'Please check your input file ' +
'(missing number of occupants) ' +
'or disable dhw generation.')
# Check if TEASER inputs are defined
if call_teaser or th_gen_method == 3:
if curr_build_type == 0: # Residential
assert curr_nb_of_floors is not None
assert curr_avg_height_of_floors is not None
assert curr_central_ahu is not None
assert curr_res_layout is not None
assert curr_nb_of_neighbour_bld is not None
assert curr_type_attic is not None
assert curr_type_cellar is not None
assert curr_dormer is not None
assert curr_construction_type is not None
if curr_nb_of_floors is not None:
assert curr_nb_of_floors > 0
if curr_avg_height_of_floors is not None:
assert curr_avg_height_of_floors > 0
if curr_central_ahu is not None:
assert 0 <= curr_central_ahu <= 1
if curr_res_layout is not None:
assert 0 <= curr_res_layout <= 1
if curr_nb_of_neighbour_bld is not None:
assert 0 <= curr_nb_of_neighbour_bld <= 2
if curr_type_attic is not None:
assert 0 <= curr_type_attic <= 3
if curr_type_cellar is not None:
assert 0 <= curr_type_cellar <= 3
if curr_dormer is not None:
assert 0 <= curr_dormer <= 1
if curr_construction_type is not None:
assert 0 <= curr_construction_type <= 1
# Check building type (residential or non residential)
# #-------------------------------------------------------------
if curr_build_type == 0: # Is residential
print('Residential building')
# Get spec. net therm. demand value according to last year
# of modernization or build_year
# If year of modernization is defined, use curr_mod_year
if curr_mod_year is not None:
use_year = int(curr_mod_year)
else: # Use year of construction
use_year = int(curr_build_year)
# Get specific, thermal energy demand (based on use_year)
for j in range(len(start_year_column)):
if use_year >= start_year_column[j]:
curr_spec_th_demand = spec_th_dem_res_building[len(
spec_th_dem_res_building) - 1 - j][2]
break
# # Get spec. electr. demand
# if curr_nb_of_occupants is None:
# # USE AGEB values, if no number of occupants is given
# # Set specific demand value in kWh/m2*a
# curr_spec_el_demand = spec_el_dem_res_building[1]
# # Only valid for array like [2012 38.7]
# else:
# # Use Stromspiegel 2017 values
# # Calculate specific electric demand values depending
# # on number of occupants
#
# if curr_nb_of_apartments == 1:
# btype = 'sfh'
# elif curr_nb_of_apartments > 1:
# btype = 'mfh'
#
# # Average occupancy number per apartment
# curr_av_occ_per_app = \
# curr_nb_of_occupants / curr_nb_of_apartments
# print('Average number of occupants per apartment')
# print(round(curr_av_occ_per_app, ndigits=2))
#
# if curr_av_occ_per_app <= 5 and curr_av_occ_per_app > 0:
# # Correctur factor for non-int. av. number of
# # occupants (#19)
#
# # Divide annual el. energy demand with net floor area
# if btype == 'sfh':
# row_idx_low = math.ceil(curr_av_occ_per_app) - 1
# row_idx_high = math.floor(curr_av_occ_per_app) - 1
# elif btype == 'mfh':
# row_idx_low = math.ceil(curr_av_occ_per_app) - 1 \
# + 5
# row_idx_high = math.floor(curr_av_occ_per_app) - 1 \
# + 5
#
# cur_spec_el_dem_per_occ_high = \
# spec_el_dem_res_building_per_person[row_idx_high][2]
# cur_spec_el_dem_per_occ_low = \
# spec_el_dem_res_building_per_person[row_idx_low][2]
#
# print('Chosen reference spec. el. demands per person '
# 'in kWh/a (high and low value):')
# print(cur_spec_el_dem_per_occ_high)
# print(cur_spec_el_dem_per_occ_low)
#
# delta = round(curr_av_occ_per_app, 0) - \
# curr_av_occ_per_app
#
# if delta < 0:
# curr_spec_el_dem_occ = cur_spec_el_dem_per_occ_high + \
# (cur_spec_el_dem_per_occ_high -
# cur_spec_el_dem_per_occ_low) * delta
# elif delta > 0:
# curr_spec_el_dem_occ = cur_spec_el_dem_per_occ_low + \
# (cur_spec_el_dem_per_occ_high -
# cur_spec_el_dem_per_occ_low) * delta
# else:
# curr_spec_el_dem_occ = cur_spec_el_dem_per_occ_high
#
# # print('Calculated spec. el. demand per person in '
# # 'kWh/a:')
# # print(round(curr_spec_el_dem_occ, ndigits=2))
#
# # Specific el. demand per person (dependend on av.
# # number of occupants in each apartment)
# # --> Multiplied with number of occupants
# # --> Total el. energy demand in kWh
# # --> Divided with net floor area
# # --> Spec. el. energy demand in kWh/a
#
# curr_spec_el_demand = \
# curr_spec_el_dem_occ * curr_nb_of_occupants \
# / curr_nfa
#
# # print('Spec. el. energy demand in kWh/m2:')
# # print(curr_spec_el_demand)
#
# else:
# raise AssertionError('Invalid number of occupants')
# if el_random:
# if curr_nb_of_occupants is None:
# # Randomize curr_spec_el_demand with normal distribution
# # with curr_spec_el_demand as mean and 10 % standard dev.
# curr_spec_el_demand = \
# np.random.normal(loc=curr_spec_el_demand,
# scale=0.10 * curr_spec_el_demand)
# else:
# # Randomize rounding up and down of curr_av_occ_per_ap
# if round(curr_av_occ_per_app) > curr_av_occ_per_app:
# # Round up
# delta = round(curr_av_occ_per_app) - \
# curr_av_occ_per_app
# prob_r_up = 1 - delta
# rnb = random.random()
# if rnb < prob_r_up:
# use_occ = math.ceil(curr_av_occ_per_app)
# else:
# use_occ = math.floor(curr_av_occ_per_app)
#
# else:
# # Round down
# delta = curr_av_occ_per_app - \
# round(curr_av_occ_per_app)
# prob_r_down = 1 - delta
# rnb = random.random()
# if rnb < prob_r_down:
# use_occ = math.floor(curr_av_occ_per_app)
# else:
# use_occ = math.ceil(curr_av_occ_per_app)
#
# sample_el_per_app = \
# usunc.calc_sampling_el_demand_per_apartment(nb_samples=1,
# nb_persons=use_occ,
# type=btype)[0]
#
# # Divide sampled el. demand per apartment through
# # number of persons of apartment (according to
# # Stromspiegel 2017) and multiply this value with
# # actual number of persons in building to get
# # new total el. energy demand. Divide this value with
# # net floor area to get specific el. energy demand
# curr_spec_el_demand = \
# (sample_el_per_app / curr_av_occ_per_app) * \
# curr_nb_of_occupants / curr_nfa
# conversion of the construction_type from int to str
if curr_construction_type == 0:
new_curr_construction_type = 'heavy'
elif curr_construction_type == 1:
new_curr_construction_type = 'light'
else:
new_curr_construction_type = 'heavy'
# #-------------------------------------------------------------
else: # Non-residential
print('Non residential')
# Get spec. demands and slp types according to building_type
curr_spec_th_demand = \
spec_dem_and_slp_non_res[curr_build_type - 2][2]
curr_spec_el_demand = \
spec_dem_and_slp_non_res[curr_build_type - 2][3]
curr_th_slp_type = \
spec_dem_and_slp_non_res[curr_build_type - 2][4]
curr_el_slp_type = \
spec_dem_and_slp_non_res[curr_build_type - 2][5]
# Convert slp type integers into strings
curr_th_slp_type = convert_th_slp_int_and_str(curr_th_slp_type)
curr_el_slp_type = convert_el_slp_int_and_str(curr_el_slp_type)
# If curr_el_e_demand is not known, calculate it via spec.
# demand
if curr_el_e_demand is None:
curr_el_e_demand = curr_spec_el_demand * curr_nfa
# #-------------------------------------------------------------
# If curr_th_e_demand is known, recalc spec e. demand
if curr_th_e_demand is not None:
# Calc. spec. net thermal energy demand with efficiency factor
curr_spec_th_demand = eff_factor * curr_th_e_demand / curr_nfa
else:
# Spec. final energy demand is given, recalculate it to
# net thermal energy demand with efficiency factor
curr_spec_th_demand *= eff_factor
# # If curr_el_e_demand is not known, calculate it via spec. demand
# if curr_el_e_demand is None:
# curr_el_e_demand = curr_spec_el_demand * curr_nfa
if th_gen_method == 1 or th_gen_method == 2 or curr_build_type != 0:
print('Used specific thermal demand value in kWh/m2*a:')
print(curr_spec_th_demand)
# #-------------------------------------------------------------
# Generate BuildingExtended object
if curr_build_type == 0: # Residential
if curr_nb_of_apartments > 1: # Multi-family house
building = generate_res_building_multi_zone(environment,
net_floor_area=curr_nfa,
spec_th_demand=curr_spec_th_demand,
annual_el_demand=curr_el_e_demand,
th_gen_method=th_gen_method,
el_gen_method=el_gen_method,
nb_of_apartments=curr_nb_of_apartments,
use_dhw=use_dhw,
dhw_method=dhw_method,
total_number_occupants=curr_nb_of_occupants,
build_year=curr_build_year,
mod_year=curr_mod_year,
build_type=curr_build_type,
pv_use_area=curr_pv_roof_area,
height_of_floors=curr_avg_height_of_floors,
nb_of_floors=curr_nb_of_floors,
neighbour_buildings=curr_nb_of_neighbour_bld,
residential_layout=curr_res_layout,
attic=curr_type_attic,
cellar=curr_type_cellar,
construction_type=new_curr_construction_type,
dormer=curr_dormer,
dhw_volumen=dhw_volumen,
do_normalization=do_normalization,
slp_manipulate=slp_manipulate,
curr_central_ahu=curr_central_ahu,
dhw_random=dhw_random,
prev_heat_dev=prev_heat_dev,
season_mod=season_mod)
elif curr_nb_of_apartments == 1: # Single-family house
building = generate_res_building_single_zone(environment,
net_floor_area=curr_nfa,
spec_th_demand=curr_spec_th_demand,
annual_el_demand=curr_el_e_demand,
th_gen_method=th_gen_method,
el_gen_method=el_gen_method,
use_dhw=use_dhw,
dhw_method=dhw_method,
number_occupants=curr_nb_of_occupants,
build_year=curr_build_year,
mod_year=curr_mod_year,
build_type=curr_build_type,
pv_use_area=curr_pv_roof_area,
height_of_floors=curr_avg_height_of_floors,
nb_of_floors=curr_nb_of_floors,
neighbour_buildings=curr_nb_of_neighbour_bld,
residential_layout=curr_res_layout,
attic=curr_type_attic,
cellar=curr_type_cellar,
construction_type=new_curr_construction_type,
dormer=curr_dormer,
dhw_volumen=dhw_volumen,
do_normalization=do_normalization,
slp_manipulate=slp_manipulate,
curr_central_ahu=curr_central_ahu,
dhw_random=dhw_random,
prev_heat_dev=prev_heat_dev,
season_mod=season_mod)
else:
raise AssertionError('Wrong number of apartments')
else: # Non-residential
method_3_str = None
method_4_str = None
# Convert curr_method numbers, if not None
if curr_method_3_nb is not None:
method_3_str = \
convert_method_3_nb_into_str(int(curr_method_3_nb))
if curr_method_4_nb is not None:
method_4_str = \
convert_method_4_nb_into_str(int(curr_method_4_nb))
building = generate_nonres_building_single_zone(environment,
th_slp_type=curr_th_slp_type,
net_floor_area=curr_nfa,
spec_th_demand=curr_spec_th_demand,
annual_el_demand=curr_el_e_demand,
el_slp_type=curr_el_slp_type,
build_year=curr_build_year,
mod_year=curr_mod_year,
build_type=curr_build_type,
pv_use_area=curr_pv_roof_area,
method_3_type=method_3_str,
method_4_type=method_4_str,
height_of_floors=curr_avg_height_of_floors,
nb_of_floors=curr_nb_of_floors
)
# Generate position shapely point
position = point.Point(curr_x, curr_y)
if generation_mode == 0:
# Add building to city object
id = city_object.add_extended_building(
extended_building=building,
position=position, name=curr_id)
elif generation_mode == 1:
# Add building as entity to corresponding building node
# Positions should be (nearly) equal
assert position.x - city_object.nodes[int(curr_id)][
'position'].x <= 0.1
assert position.y - city_object.nodes[int(curr_id)][
'position'].y <= 0.1
city_object.nodes[int(curr_id)]['entity'] = building
id = curr_id
# Save annual thermal net heat energy demand for space heating
# to dict (used for normalization with VDI 6007 core)
dict_id_vdi_sh[id] = curr_spec_th_demand * curr_nfa
print('Finished processing of building', curr_id)
print('#######################################################')
print()
# If only single building should be processed, break loop
if multi_data is False:
break
# #-------------------------------------------------------------
print('Added all buildings with data to city object.')
# VDI 6007 simulation to generate space heating load curves
# Overwrites existing heat load curves (and annual heat demands)
if th_gen_method == 3:
print('Perform VDI 6007 space heating load simulation for every'
' building')
if el_gen_method == 1:
# Skip usage of occupancy and electrial load profiles
# as internal loads within VDI 6007 core
requ_profiles = False
else:
requ_profiles = True
tusage.calc_and_add_vdi_6007_loads_to_city(city=city_object,
air_vent_mode=air_vent_mode,
vent_factor=vent_factor,
t_set_heat=t_set_heat,
t_set_cool=t_set_cool,
t_night=t_night,
alpha_rad=None,
project_name=project_name,
requ_profiles=requ_profiles)
# Set call_teaser to False, as it is already included
# in calc_and_add_vdi_6007_loads_to_city
call_teaser = False
if vdi_sh_manipulate:
# Normalize VDI 6007 load curves to match given annual
# thermal space heating energy demand
for n in city_object.nodes():
if 'node_type' in city_object.nodes[n]:
# If node_type is building
if city_object.nodes[n]['node_type'] == 'building':
# If entity is kind building
if city_object.nodes[n][
'entity']._kind == 'building':
# Given value (user input)
ann_sh = dict_id_vdi_sh[n]
# Building pointer
curr_b = city_object.nodes[n]['entity']
# Current value on object
curr_sh = curr_b.get_annual_space_heat_demand()
norm_factor = ann_sh / curr_sh
# Do normalization
# Loop over apartments
for apart in curr_b.apartments:
# Normalize apartment space heating load
apart.demandSpaceheating.loadcurve \
*= norm_factor
print('Generation results:')
print('###########################################')
for n in city_object.nodes():
if 'node_type' in city_object.nodes[n]:
if city_object.nodes[n]['node_type'] == 'building':
if 'entity' in city_object.nodes[n]:
if city_object.nodes[n]['entity']._kind == 'building':
print('Results of building: ', n)
print('################################')
print()
curr_b = city_object.nodes[n]['entity']
sh_demand = curr_b.get_annual_space_heat_demand()
el_demand = curr_b.get_annual_el_demand()
dhw_demand = curr_b.get_annual_dhw_demand()
nfa = curr_b.net_floor_area
print('Annual space heating demand in kWh:')
print(sh_demand)
if nfa is not None and nfa != 0:
print(
'Specific space heating demand in kWh/m2:')
print(sh_demand / nfa)
print()
print('Annual electric demand in kWh:')
print(el_demand)
if nfa is not None and nfa != 0:
print('Specific electric demand in kWh/m2:')
print(el_demand / nfa)
nb_occ = curr_b.get_number_of_occupants()
if nb_occ is not None and nb_occ != 0:
print('Specific electric demand in kWh'
' per person and year:')
print(el_demand / nb_occ)
print()
print('Annual hot water demand in kWh:')
print(dhw_demand)
if nfa is not None and nfa != 0:
print('Specific hot water demand in kWh/m2:')
print(dhw_demand / nfa)
volume_year = dhw_demand * 1000 * 3600 / (
4200 * 35)
volume_day = volume_year / 365
if nb_occ is not None and nb_occ != 0:
v_person_day = \
volume_day / nb_occ
print('Hot water volume per person and day:')
print(v_person_day)
print()
# Create and add TEASER type_buildings to every building node
if call_teaser:
# Create TEASER project
project = tusage.create_teaser_project(name=teaser_proj_name,
merge_windows=merge_windows)
# Generate typeBuildings and add to city
tusage.create_teaser_typecity(project=project,
city=city_object,
generate_Output=False)
if do_save: # pragma: no cover
if path_save_city is None:
if pickle_city_filename is None:
msg = 'If path_save_city is None, pickle_city_filename' \
'cannot be None! Instead, filename has to be ' \
'defined to be able to save city object.'
raise AssertionError
this_path = os.path.dirname(os.path.abspath(__file__))
path_save_city = os.path.join(this_path, 'output',
pickle_city_filename)
try:
# Pickle and dump city objects
pickle.dump(city_object, open(path_save_city, 'wb'))
print('Pickled and dumped city object to: ')
print(path_save_city)
except:
warnings.warn('Could not pickle and save city object')
if do_log: # pragma: no cover
if pickle_city_filename is not None:
log_file.write('pickle_city_filename: ' +
str(pickle_city_filename)
+ '\n')
print('Wrote log file to: ' + str(log_path))
# Close log file
log_file.close()
# Visualize city
if show_city: # pragma: no cover
# Plot city district
try:
citvis.plot_city_district(city=city_object,
plot_street=False)
except:
warnings.warn('Could not plot city district.')
return city_object
if __name__ == '__main__':
this_path = os.path.dirname(os.path.abspath(__file__))
# User inputs #########################################################
# Choose generation mode
# ######################################################
# 0 - Use csv/txt input to generate city district
# 1 - Use csv/txt input file to enrich existing city object, based on
# osm call (city object should hold nodes, but no entities. City
# generator is going to add building, apartment and load entities to
# building nodes
generation_mode = 0
# Generate environment
# ######################################################
year_timer = 2017
year_co2 = 2017
timestep = 3600 # Timestep in seconds
# location = (51.529086, 6.944689) # (latitude, longitude) of Bottrop
location = (50.775346, 6.083887) # (latitude, longitude) of Aachen
altitude = 266 # Altitude of location in m (Aachen)
# Weather path
try_path = None
# If None, used default TRY (region 5, 2010)
new_try = False
# new_try has to be set to True, if you want to use TRY data of 2017
# or newer! Else: new_try = False
# Space heating load generation
# ######################################################
# Thermal generation method
# 1 - SLP (standardized load profile)
# 2 - Load and rescale Modelica simulation profile
# (generated with TRY region 12, 2010)
# 3 - VDI 6007 calculation (requires el_gen_method = 2)
th_gen_method = 3
# For non-residential buildings, SLPs are generated automatically.
# Manipulate thermal slp to fit to space heating demand?
slp_manipulate = False
# True - Do manipulation
# False - Use original profile
# Only relevant, if th_gen_method == 1
# Sets thermal power to zero in time spaces, where average daily outdoor
# temperature is equal to or larger than 12 C. Rescales profile to
# original demand value.
# Manipulate vdi space heating load to be normalized to given annual net
# space heating demand in kWh
vdi_sh_manipulate = False
# Electrical load generation
# ######################################################
# Choose electric load profile generation method (1 - SLP; 2 - Stochastic)
# Stochastic profile is only generated for residential buildings,
# which have a defined number of occupants (otherwise, SLP is used)
el_gen_method = 2
# If user defindes method_3_nb or method_4_nb within input file
# (only valid for non-residential buildings), SLP will not be used.
# Instead, corresponding profile will be loaded (based on measurement
# data, see ElectricalDemand.py within pycity)
# Do normalization of el. load profile
# (only relevant for el_gen_method=2).
# Rescales el. load profile to expected annual el. demand value in kWh
do_normalization = True
# Randomize electrical demand value (residential buildings, only)
el_random = True
# Prevent usage of electrical heating and hot water devices in
# electrical load generation (only relevant if el_gen_method == 2)
prev_heat_dev = True
# True: Prevent electrical heating device usage for profile generation
# False: Include electrical heating devices in electrical load generation
# Use cosine function to increase winter lighting usage and reduce
# summer lighting usage in richadson el. load profiles
# season_mod is factor, which is used to rescale cosine wave with
# lighting power reference (max. lighting power)
season_mod = 0.3
# If None, do not use cosine wave to estimate seasonal influence
# Else: Define float
# (only relevant if el_gen_method == 2)
# Hot water profile generation
# ######################################################
# Generate DHW profiles? (True/False)
use_dhw = True # Only relevant for residential buildings
# DHW generation method? (1 - Annex 42; 2 - Stochastic profiles)
# Choice of Anex 42 profiles NOT recommended for multiple builings,
# as profile stays the same and only changes scaling.
# Stochastic profiles require defined nb of occupants per residential
# building
dhw_method = 2 # Only relevant for residential buildings
# Define dhw volume per person and day (use_dhw=True)
dhw_volumen = None # Only relevant for residential buildings
# Randomize choosen dhw_volume reference value by selecting new value
dhw_random = True
# Input file names and pathes
# ######################################################
# Define input data filename
filename = 'city_3_buildings.txt'
# filename = 'city_clust_simple.txt'
# filename = 'aachen_forsterlinde_mod_6.txt'
# filename = 'aachen_frankenberg_mod_6.txt'
# filename = 'aachen_huenefeld_mod_6.txt'
# filename = 'aachen_kronenberg_mod_8.txt'
# filename = 'aachen_preusweg_mod_8.txt'
# filename = 'aachen_tuerme_mod_6.txt'
# Output filename
pickle_city_filename = filename[:-4] + '.pkl'
# For generation_mode == 1:
# city_osm_input = None
# city_osm_input = 'aachen_forsterlinde_mod_7.pkl'
city_osm_input = 'aachen_frankenberg_mod_7.pkl'
# city_osm_input = 'aachen_huenefeld_mod_7.pkl'
# city_osm_input = 'aachen_kronenberg_mod_7.pkl'
# city_osm_input = 'aachen_preusweg_mod_7.pkl'
# city_osm_input = 'aachen_tuerme_mod_7.pkl'
# Pickle and dump city object instance?
do_save = True
# Path to save city object instance to
path_save_city = None
# If None, uses .../output/...
# Efficiency factor of thermal energy systems
# Used to convert input values (final energy demand) to net energy demand
eff_factor = 1
# For VDI 6007 simulation (th_gen_method == 3)
# #####################################
t_set_heat = 20 # Heating set temperature in degree Celsius
t_set_night = 16 # Night set back temperature in degree Celsius
t_set_cool = 70 # Cooling set temperature in degree Celsius
# Air exchange rate (required for th_gen_method = 3 (VDI 6007 sim.))
air_vent_mode = 2
# int; Define mode for air ventilation rate generation
# 0 : Use constant value (vent_factor in 1/h)
# 1 : Use deterministic, temperature-dependent profile
# 2 : Use stochastic, user-dependent profile
# False: Use static ventilation rate value
vent_factor = 0.3 # Constant. ventilation rate
# (only used, if air_vent_mode is 0. Otherwise, estimate vent_factor
# based on last year of modernization)
# TEASER typebuilding generation
# ######################################################
# Use TEASER to generate typebuildings?
call_teaser = False
teaser_proj_name = filename[:-4]
# Requires additional attributes (such as nb_of_floors, net_floor_area..)
merge_windows = False
# merge_windows : bool, optional
# Defines TEASER project setting for merge_windows_calc
# (default: False). If set to False, merge_windows_calc is set to False.
# If True, Windows are merged into wall resistances.
txt_path = os.path.join(this_path, 'input', filename)
if generation_mode == 1:
path_city_osm_in = os.path.join(this_path, 'input', city_osm_input)
# Path for log file
log_f_name = log_file_name = str('log_' + filename)
log_f_path = os.path.join(this_path, 'output', log_file_name)
# End of user inputs ################################################
print('Run city generator for ', filename)
assert generation_mode in [0, 1]
if generation_mode == 1:
assert city_osm_input is not None
if air_vent_mode == 1 or air_vent_mode == 2:
assert el_gen_method == 2, 'air_vent_mode 1 and 2 require occupancy' \
' profiles!'
# Load district_data file
district_data = get_district_data_from_txt(txt_path)
if generation_mode == 1:
# Load city input file
city_osm = pickle.load(open(path_city_osm_in, mode='rb'))
else:
# Dummy value
city_osm = None
# Generate city district
city = run_city_generator(generation_mode=generation_mode,
timestep=timestep,
year_timer=year_timer,
year_co2=year_co2,
location=location,
th_gen_method=th_gen_method,
el_gen_method=el_gen_method, use_dhw=use_dhw,
dhw_method=dhw_method,
district_data=district_data,
pickle_city_filename=pickle_city_filename,
eff_factor=eff_factor, show_city=True,
try_path=try_path, altitude=altitude,
dhw_volumen=dhw_volumen,
do_normalization=do_normalization,
slp_manipulate=slp_manipulate,
call_teaser=call_teaser,
teaser_proj_name=teaser_proj_name,
air_vent_mode=air_vent_mode,
vent_factor=vent_factor,
t_set_heat=t_set_heat,
t_set_cool=t_set_cool,
t_night=t_set_night,
vdi_sh_manipulate=vdi_sh_manipulate,
city_osm=city_osm, el_random=el_random,
dhw_random=dhw_random,
prev_heat_dev=prev_heat_dev,
log_path=log_f_path,
season_mod=season_mod,
merge_windows=merge_windows,
new_try=new_try,
path_save_city=path_save_city,
do_save=do_save)
| 44.482782 | 173 | 0.52153 |
830541d7c666d087b745fabc733309dfe46fdeb0 | 14,092 | py | Python | cpgan_data.py | basilevh/object-discovery-cp-gan | 170cdcf14aa0b5f7258d15e177485ee4fd697afb | [
"MIT"
] | 14 | 2020-06-04T15:50:38.000Z | 2021-10-03T02:59:54.000Z | cpgan_data.py | basilevh/object-discovery-cp-gan | 170cdcf14aa0b5f7258d15e177485ee4fd697afb | [
"MIT"
] | null | null | null | cpgan_data.py | basilevh/object-discovery-cp-gan | 170cdcf14aa0b5f7258d15e177485ee4fd697afb | [
"MIT"
] | 1 | 2021-01-19T15:50:47.000Z | 2021-01-19T15:50:47.000Z | # Basile Van Hoorick, March 2020
# Common code for PyTorch implementation of Copy-Pasting GAN
import copy
import itertools
import matplotlib.pyplot as plt
import numpy as np
import os, platform, time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from PIL import Image, ImageDraw
from torch.utils.data import Dataset
from tqdm import tqdm
def read_image_robust(img_path, monochromatic=False):
''' Returns an image that meets conditions along with a success flag, in order to avoid crashing. '''
try:
# image = plt.imread(img_path).copy()
image = np.array(Image.open(img_path)).copy() # always uint8
success = True
if np.any(np.array(image.strides) < 0):
success = False # still negative stride
elif not(monochromatic) and (image.ndim != 3 or image.shape[2] != 3):
success = False # not RGB
elif monochromatic:
# width, height = image.shape[1], image.shape[0]
# image = np.broadcast_to(x[:, :, np.newaxis], (height, width, 3))
image = image[:, :, np.newaxis] # one channel <=> only one ground truth
except IOError:
# Probably corrupt file
image = None
success = False
return image, success
def paint_squares(image, noisy=False, channels=10):
'''
Paints one or more squares at random locations to create an artificial foreground image.
Generates multiple associated ground truth masks; one per object.
'''
width, height = image.shape[1], image.shape[0]
image = image.copy() # do not overwrite background
object_count = np.random.randint(1, 5) # [1, 4] inclusive
masks = np.zeros((height, width, channels), dtype=np.uint8)
for i in range(object_count):
sq_w, sq_h = 9, 9
x1 = np.random.randint(0, width - sq_w + 1)
y1 = np.random.randint(0, height - sq_h + 1)
x2 = x1 + sq_w
y2 = y1 + sq_h
masks[y1:y2, x1:x2, i] = 255
if not(noisy):
# Pick one fixed (not necessarily saturated) color for the whole square
clr = np.random.randint(0, 256, 3)
image[y1:y2, x1:x2] = clr
else:
# Pick a random fully saturated (extremal) color for every pixel
image[y1:y2, x1:x2] = np.random.choice([0, 255], (sq_h, sq_w, 3))
return image, masks, object_count
def create_random_gfake_mask(width, height):
''' See Appendix D. '''
x0, y0 = np.random.rand(2) * 0.8 + 0.1
num_verts = np.random.randint(4, 7)
# TODO possible improvement: allow up to more vertices?
# TODO possible improvement: encourage convex (currently many "sharp" objects)
radii = np.random.rand(num_verts) * 0.4 + 0.1
# radii = np.random.rand(num_verts) * 0.8 + 0.2 # TODO: not very clear from paper
angles = np.sort(np.random.rand(num_verts)) * 2.0 * np.pi
poly_polar = list(zip(radii, angles))
poly_cart = [(int(width * (x0 + r * np.cos(a)) / 1),
int(height * (y0 + r * np.sin(a)) / 1)) for (r, a) in poly_polar]
# poly_cart = [(x1, y1), (x2, y2), ...]
img = Image.new('L', (width, height), 0)
ImageDraw.Draw(img).polygon(poly_cart, outline=1, fill=255)
mask = np.array(img, dtype='uint8')
assert(mask.shape == (height, width))
return mask
| 39.92068 | 154 | 0.590477 |
8305a58a05e7a9623ae618b46a183f5331e34e3b | 3,207 | py | Python | provision/env/lib/python3.6/site-packages/ansible/plugins/become/dzdo.py | brightkan/tukole-frontend | 45e1d82a4ae5a65e88e7434f67d4d1a88f462e96 | [
"MIT"
] | 1 | 2020-03-29T18:41:01.000Z | 2020-03-29T18:41:01.000Z | ansible/ansible/plugins/become/dzdo.py | SergeyCherepanov/ansible | 875711cd2fd6b783c812241c2ed7a954bf6f670f | [
"MIT"
] | 7 | 2020-09-07T17:27:56.000Z | 2022-03-02T06:25:46.000Z | ansible/ansible/plugins/become/dzdo.py | SergeyCherepanov/ansible | 875711cd2fd6b783c812241c2ed7a954bf6f670f | [
"MIT"
] | 1 | 2020-10-30T12:48:24.000Z | 2020-10-30T12:48:24.000Z | # -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
become: dzdo
short_description: Centrify's Direct Authorize
description:
- This become plugins allows your remote/login user to execute commands as another user via the dzdo utility.
author: ansible (@core)
version_added: "2.8"
options:
become_user:
description: User you 'become' to execute the task
ini:
- section: privilege_escalation
key: become_user
- section: dzdo_become_plugin
key: user
vars:
- name: ansible_become_user
- name: ansible_dzdo_user
env:
- name: ANSIBLE_BECOME_USER
- name: ANSIBLE_DZDO_USER
become_exe:
description: Sudo executable
default: dzdo
ini:
- section: privilege_escalation
key: become_exe
- section: dzdo_become_plugin
key: executable
vars:
- name: ansible_become_exe
- name: ansible_dzdo_exe
env:
- name: ANSIBLE_BECOME_EXE
- name: ANSIBLE_DZDO_EXE
become_flags:
description: Options to pass to dzdo
default: -H -S -n
ini:
- section: privilege_escalation
key: become_flags
- section: dzdo_become_plugin
key: flags
vars:
- name: ansible_become_flags
- name: ansible_dzdo_flags
env:
- name: ANSIBLE_BECOME_FLAGS
- name: ANSIBLE_DZDO_FLAGS
become_pass:
description: Options to pass to dzdo
required: False
vars:
- name: ansible_become_password
- name: ansible_become_pass
- name: ansible_dzdo_pass
env:
- name: ANSIBLE_BECOME_PASS
- name: ANSIBLE_DZDO_PASS
ini:
- section: dzdo_become_plugin
key: password
"""
from ansible.plugins.become import BecomeBase
| 32.72449 | 131 | 0.558154 |
830713faff66a018b4d3b736c65a71173ebb4219 | 3,078 | py | Python | templates/php/functionsTest.py | anconaesselmann/LiveUnit | 8edebb49cb02fa898550cbafdf87af7fc22f106b | [
"MIT"
] | null | null | null | templates/php/functionsTest.py | anconaesselmann/LiveUnit | 8edebb49cb02fa898550cbafdf87af7fc22f106b | [
"MIT"
] | null | null | null | templates/php/functionsTest.py | anconaesselmann/LiveUnit | 8edebb49cb02fa898550cbafdf87af7fc22f106b | [
"MIT"
] | null | null | null | import unittest
import os
if __name__ == '__main__' and __package__ is None:
from os import sys, path
sys.path.append(path.abspath(path.join(__file__, "..", "..")))
sys.path.append(path.abspath(path.join(__file__, "..", "..", "..", "classes_and_tests")))
from php.functions import *
from src.mocking.MockFileSystem import MockFileSystem
if __name__ == '__main__':
unittest.main() | 34.58427 | 100 | 0.649448 |
8307eb589ed701e9bef2d35aecb16eec594af392 | 5,629 | py | Python | app.py | opeyemibami/decision_support_system | 15ffdd795c8f2704b577a9c84db9dafb1fcf792d | [
"MIT"
] | 1 | 2021-10-31T13:07:24.000Z | 2021-10-31T13:07:24.000Z | app.py | opeyemibami/decision_support_system | 15ffdd795c8f2704b577a9c84db9dafb1fcf792d | [
"MIT"
] | null | null | null | app.py | opeyemibami/decision_support_system | 15ffdd795c8f2704b577a9c84db9dafb1fcf792d | [
"MIT"
] | 1 | 2022-02-03T13:12:59.000Z | 2022-02-03T13:12:59.000Z | import sys
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import efficientnet.keras as efn
import streamlit as st
import SessionState
from skimage.transform import resize
import skimage
import skimage.filters
import reportgenerator
import style
from keras.models import Model, load_model
st.set_option('deprecation.showPyplotGlobalUse', False)
model = load_model('classifier.h5')
st.markdown(
f"""
<style>
.reportview-container .main .block-container{{
max-width: {1000}px;
padding-top: {5}rem;
padding-right: {0}rem;
padding-left: {0}rem;
padding-bottom: {0}rem;
}}
.reportview-container .main {{
}}
[data-testid="stImage"] img {{
margin: 0 auto;
max-width: 500px;
}}
</style>
""",
unsafe_allow_html=True,
)
# main panel
logo = Image.open('dss_logo.png')
st.image(logo, width=None)
style.display_app_header(main_txt='Gleason Score Prediction for Prostate Cancer',
sub_txt='The intensity of prostate cancer metastasis in using artificial intelligence', is_sidebar=False)
# session state
ss = SessionState.get(page='home', run_model=False)
st.markdown('**Upload biopsy image to analyze**')
st.write('')
uploaded_file = st.file_uploader("Choose an image...", type=['png', 'jpg'])
med_opinion_list = ["The cancer cells look like healthy cells and PSA levels are low. However, cancer in this early stage is usually slow growing.",
"Well differentiated cells and PSA levels are medium. This stage also includes larger tumors found only in the prostate, as long as the cancer cells are still well differentiated. ",
"Moderately diffentiated cells and the PSA level is medium. The tumor is found only inside the prostate, and it may be large enough to be felt during DRE.",
"Moderately or poorly diffentiated cells and the PSA level is medium. The tumor is found only inside the prostate, and it may be large enough to be felt during DRE.",
"Poorly diffentiated cells. The cancer has spread beyond the outer layer of the prostate into nearby tissues. It may also have spread to the seminal vesicles. The PSA level is high.",
"Poorly diffentiated cells. The tumor has grown outside of the prostate gland and may have invaded nearby structures, such as the bladder or rectum.",
"Poorly diffentiated cells. The cancer cells across the tumor are poorly differentiated, meaning they look very different from healthy cells.",
"Poorly diffentiated cells. The cancer has spread to the regional lymph nodes.",
"Poorly diffentiated cells. The cancer has spread to distant lymph nodes, other parts of the body, or to the bones.",
]
if uploaded_file is not None:
# uploaded_file.read()
image = Image.open(uploaded_file)
st.image(image, caption='Biopsy image', use_column_width=True)
im_resized = image.resize((224, 224))
im_resized = resize(np.asarray(im_resized), (224, 224, 3))
# grid section
col1, col2, col3 = st.columns(3)
col1.header('Resized Image')
col1.image(im_resized, caption='Biopsy image', use_column_width=False)
with col2:
st.header('Gray Image')
gray_image = skimage.color.rgb2gray(im_resized)
st.image(gray_image, caption='preprocessed image',
use_column_width=False)
with col3:
st.header('Spotted Pattern')
# sigma = float(sys.argv[2])
gray_image = skimage.color.rgb2gray(im_resized)
blur = skimage.filters.gaussian(gray_image, sigma=1.5)
# perform adaptive thresholding
t = skimage.filters.threshold_otsu(blur)
mask = blur > t
sel = np.zeros_like(im_resized)
sel[mask] = im_resized[mask]
st.image(sel, caption='preprocessed image', use_column_width=False)
preds = model.predict(np.expand_dims(im_resized, 0))
data = (preds[0]*100).round(2)
isup_data = [data[0], data[1], data[2], data[3],
data[4]+data[5]+data[6], data[7]+data[8]+data[9]]
gleason_label = ['0+0', '3+3', '3+4', '4+3',
'4+4', '3+5', '5+3', '4+5', '5+4', '5+5']
gleason_colors = ['yellowgreen', 'red', 'gold', 'lightskyblue',
'cyan', 'lightcoral', 'blue', 'pink', 'darkgreen', 'yellow']
isup_label = ['0', '1', '2', '3', '4', '5']
isup_colors = ['gold', 'lightskyblue', 'cyan', 'lightcoral', 'blue']
col1, col2, = st.columns(2)
with col1:
reportgenerator.visualize_confidence_level(data, label=gleason_label, ylabel='GleasonScore Pattern Scale',
title='GleasonScore Prediction ')
with col2:
reportgenerator.pieChart(data, label=gleason_label, colors=gleason_colors,
title='GleasonScore Prediction Distribution', startangle=120)
col1, col2, = st.columns(2)
with col1:
reportgenerator.pieChart(isup_data, label=isup_label, colors=isup_colors,
title='ISUP Pattern Scale Prediction Distribution', startangle=45)
with col2:
reportgenerator.visualize_confidence_level(isup_data, label=isup_label, ylabel='ISUP Pattern Scale',
title='ISUP Prediction')
opinion = list(data).index(max(list(data)))
style.display_app_header(main_txt='Medical Report Proposition:',
sub_txt=med_opinion_list[opinion], is_sidebar=False)
| 45.032 | 203 | 0.650382 |
83080191fabbc152072cd0019bf81fd6f737d375 | 7,129 | py | Python | richardson_extrapolation.py | PrabalChowdhury/CSE330-NUMERICAL-METHODS | aabfea01f4ceaecfbb50d771ee990777d6e1122c | [
"MIT"
] | null | null | null | richardson_extrapolation.py | PrabalChowdhury/CSE330-NUMERICAL-METHODS | aabfea01f4ceaecfbb50d771ee990777d6e1122c | [
"MIT"
] | null | null | null | richardson_extrapolation.py | PrabalChowdhury/CSE330-NUMERICAL-METHODS | aabfea01f4ceaecfbb50d771ee990777d6e1122c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Richardson-Extrapolation.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1oNlSL2Vztk9Fc7tMBgPcL82WGaUuCY-A
Before you turn this problem in, make sure everything runs as expected. First, **restart the kernel** (in the menubar, select Kernel$\rightarrow$Restart) and then **run all cells** (in the menubar, select Cell$\rightarrow$Run All).
Make sure you fill in any place that says `YOUR CODE HERE` or "YOUR ANSWER HERE", as well as your name and collaborators below:
"""
NAME = "Prabal Chowdhury"
COLLABORATORS = ""
"""---
## CSE330 Lab: Richardson Extrapolation
---
## Instructions
Today's assignment is to:
1. Implement Richardson Extrapolation method using Python
## Richardson Extrapolation:
We used central difference method to calculate derivatives of functions last task. In this task we will use Richardson extrapolation to get a more accurate result.
Let,
$$ D_h = \frac{f(x_1+h) -f(x_1-h)}{2h}\tag{5.1}$$
General Taylor Series formula:
$$ f(x) = f(x_1) + f'(x_1)(x - x_1) + \frac{f''(x_1)}{2}(x - x_1)^2+... $$
Using Taylor's theorem to expand we get,
\begin{align}
f(x_1+h) &= f(x_1) + f^{\prime}(x_1)h + \frac{f^{\prime \prime}(x_1)}{2}h^2 + \frac{f^{\prime \prime \prime}(x_1)}{3!}h^3 + \frac{f^{(4)}(x_1)}{4!}h^4 + \frac{f^{(5)}(x_1)}{5!}h^5 + O(h^6)\tag{5.2} \\
f(x_1-h) &= f(x_1) - f^{\prime}(x_1)h + \frac{f^{\prime \prime}(x_1)}{2}h^2 - \frac{f^{\prime \prime \prime}(x_1)}{3!}h^3 + \frac{f^{(4)}(x_1)}{4!}h^4 - \frac{f^{(5)}(x_1)}{5!}h^5 + O(h^6)\tag{5.3}
\end{align}
Subtracting $5.3$ from $5.2$ we get,
$$ f(x_1+h) - f(x_1-h) = 2f^{\prime}(x_1)h + 2\frac{f^{\prime \prime \prime}(x_1)}{3!}h^3 + 2\frac{f^{(5)}(x_1)}{5!}h^5 + O(h^7)\tag{5.4}$$
So,
\begin{align}
D_h &= \frac{f(x_1+h) - f(x_1-h)}{2h} \\
&= \frac{1}{2h} \left( 2f^{\prime}(x_1)h + 2\frac{f^{\prime \prime \prime}(x_1)}{3!}h^3 + 2\frac{f^{(5)}(x_1)}{5!}h^5 + O(h^7) \right) \\
&= f^{\prime}(x_1) + \frac{f^{\prime \prime \prime}(x_1)}{6}h^2 + \frac{f^{(5)}(x_1)}{120}h^4 + O(h^6) \tag{5.5}
\end{align}
We get our derivative $f'(x)$ plus some error terms of order $>= 2$ Now, we want to bring our error order down to 4.
If we use $h, \text{and} \frac{h}{2}$ as step size in $5.5$, we get,
\begin{align}
D_h &= f^{\prime}(x_1) + f^{\prime \prime \prime}(x_1)\frac{h^2}{6} + f^{(5)}(x_1) \frac{h^4}{120} + O(h^6) \tag{5.6} \\
D_{h/2} &= f^{\prime}(x_1) + f^{\prime \prime \prime}(x_1)\frac{h^2}{2^2 . 6} + f^{(5)}(x_1) \frac{h^4}{2^4 . 120} + O(h^6) \tag{5.7}
\end{align}
Multiplying $5.7$ by $4$ and subtracting from $5.6$ we get,
\begin{align}
D_h - 4D_{h/2} &= -3f^{\prime}(x) + f^{(5)}(x_1) \frac{h^4}{160} + O(h^6)\\
\Longrightarrow D^{(1)}_h = \frac{4D_{h/2} - D_h}{3} &= f^{\prime}(x) - f^{(5)}(x_1) \frac{h^4}{480} + O(h^6) \tag{5.8}
\end{align}
Let's calculate the derivative using $5.8$
### 1. Let's import the necessary headers
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from numpy.polynomial import Polynomial
"""### 2. Let's create a function named `dh(f, h, x)`
function `dh(f, h, x)` takes three parameters as input: a function `f`, a value `h`, and a set of values `x`. It returns the derivatives of the function at each elements of array `x` using the Central Difference method. This calculates equation $(5.1)$.
"""
def dh(f, h, x):
'''
Input:
f: np.polynomial.Polynonimial type data.
h: floating point data.
x: np.array type data.
Output:
return np.array type data of slope at each point x.
'''
# --------------------------------------------
return (f(x+h) - f(x-h)) / (2*h)
# --------------------------------------------
"""### 3. Let's create another funtion `dh1(f, h, x)`.
`dh1(f, h, x)` takes the same type of values as `dh(f, h, x)` as input. It calculates the derivative using previously defined `dh(f, h, x)` function and using equation $5.8$ and returns the values.
"""
def dh1(f, h, x):
'''
Input:
f: np.polynomial.Polynonimial type data.
h: floating point data.
x: np.array type data.
Output:
return np.array type data of slope at each point x.
'''
# --------------------------------------------
# YOUR CODE HERE
return (4 * dh(f, h/2, x) - dh(f, h, x)) / 3
# --------------------------------------------
"""### 4. Now let's create the `error(f, hs, x_i)` function
The `error(f, hs, x_i)` function takes a function `f` as input. It also takes a list of different values of h as `hs` and a specific value as `x_i` as input. It calculates the derivatives as point `x_i` using both functions described in **B** and **C**, i.e. `dh` and `dh1`
"""
def error(f, hs, x_i): #Using the functions we wrote dh() my c_diff and dh1() which is my first order c diff, we find the error through appending their diffrences with Y_actual ny f(x)
'''
Input:
f : np.polynomial.Polynonimial type data.
hs : np.array type data. list of h.
x_i: floating point data. single value of x.
Output:
return two np.array type data of errors by two methods..
'''
f_prime = f.deriv(1) #first order derivitive f^1(x)
Y_actual = f_prime(x_i)
diff_error = []
diff2_error = []
for h in hs: #where h is my loop counter iterating through hs
# for each values of hs calculate the error using both methods
# and append those values into diff_error and diff2_error list.
# --------------------------------------------
# YOUR CODE HERE
e1 = Y_actual - dh(f, hs, x_i)
diff_error.append(e1)
e2 = Y_actual - dh1(f, hs, x_i)
diff2_error.append(e2)
# --------------------------------------------
print(pd.DataFrame({"h": hs, "Diff": diff_error, "Diff2": diff2_error}))
return diff_error, diff2_error
"""### 5. Finally let's run some tests
function to draw the actual function
"""
"""### Draw the polynomial and it's actual derivative function"""
fig, ax = plt.subplots()
ax.axhline(y=0, color='k')
p = Polynomial([2.0, 1.0, -6.0, -2.0, 2.5, 1.0])
p_prime = p.deriv(1)
draw_graph(p, ax, [-2.4, 1.5], 'Function')
draw_graph(p_prime, ax, [-2.4, 1.5], 'Derivative')
ax.legend()
"""### Draw the actual derivative and richardson derivative using `h=1` and `h=0.1` as step size."""
fig, ax = plt.subplots()
ax.axhline(y=0, color='k')
draw_graph(p_prime, ax, [-2.4, 1.5], 'actual')
h = 1
x = np.linspace(-2.4, 1.5, 50, endpoint=True)
y = dh1(p, h, x)
ax.plot(x, y, label='Richardson; h=1')
h = 0.1
x = np.linspace(-2.4, 1.5, 50, endpoint=True)
y = dh1(p, h, x)
ax.plot(x, y, label='Richardson; h=0.1')
ax.legend()
"""### Draw error-vs-h cuve"""
fig, ax = plt.subplots()
ax.axhline(y=0, color='k')
hs = np.array([1., 0.55, 0.3, .17, 0.1, 0.055, 0.03, 0.017, 0.01])
e1, e2 = error(p, hs, 2.0)
ax.plot(hs, e1, label='e1')
ax.plot(hs, e2, label='e2')
ax.legend()
| 36.747423 | 273 | 0.591668 |
83092d72acd08ca21db99e040f029c6dead0fb17 | 9,050 | py | Python | src/mlshell/blocks/pipeline/steps.py | nizaevka/mlshell | 36893067f598f6b071b61604423d0fd15c2a7c62 | [
"Apache-2.0"
] | 8 | 2020-10-04T15:33:58.000Z | 2020-11-24T15:10:18.000Z | src/mlshell/blocks/pipeline/steps.py | nizaevka/mlshell | 36893067f598f6b071b61604423d0fd15c2a7c62 | [
"Apache-2.0"
] | 5 | 2020-03-06T18:13:10.000Z | 2022-03-12T00:52:48.000Z | src/mlshell/blocks/pipeline/steps.py | nizaevka/mlshell | 36893067f598f6b071b61604423d0fd15c2a7c62 | [
"Apache-2.0"
] | null | null | null | """The :mod:`mlshell.pipeline.steps` contains unified pipeline steps."""
import inspect
import mlshell
import numpy as np
import pandas as pd
import sklearn
import sklearn.impute
import sklearn.compose
__all__ = ['Steps']
if __name__ == '__main__':
pass
| 43.301435 | 206 | 0.612818 |
830a09b0fe214d145afe8c3a467c3effd538a38b | 2,283 | py | Python | paste/application/repositories.py | Afonasev/Paste | ca1dcb566f15a9cf1aa0e97c6fc4cf4d450ec89d | [
"MIT"
] | null | null | null | paste/application/repositories.py | Afonasev/Paste | ca1dcb566f15a9cf1aa0e97c6fc4cf4d450ec89d | [
"MIT"
] | 1 | 2018-05-07T00:12:59.000Z | 2018-05-07T00:12:59.000Z | paste/application/repositories.py | Afonasev/Paste | ca1dcb566f15a9cf1aa0e97c6fc4cf4d450ec89d | [
"MIT"
] | null | null | null | from datetime import datetime
import peewee
from paste import domain
from . import db
def _by_object(obj):
name = obj.__class__.__name__
fields = ('pk', 'created_at', 'updated_at')
if name == 'User':
return domain.User, db.User, fields + ('name', 'passhash')
if name == 'Snippet':
fields += ('author', 'name', 'syntax', 'raw', 'html')
return domain.Snippet, db.Snippet, fields
raise NotImplementedError
def _entity_to_model(entity):
_, model_cls, fields = _by_object(entity)
attrs = {}
for field in fields:
value = getattr(entity, field)
if isinstance(value, domain.Entity):
value = value.pk
attrs[field] = value
return model_cls(**attrs)
def _model_to_entity(model):
entity_cls, _, fields = _by_object(model)
attrs = {}
for f in fields:
value = getattr(model, f)
if isinstance(value, db.AbstractModel):
value = _model_to_entity(value)
attrs[f] = value
return entity_cls(**attrs)
| 23.78125 | 72 | 0.610162 |
830a2f904f214eab34723ae65f4d0799f4773a77 | 3,278 | py | Python | example/example_nursery.py | airysen/racog | 8751436437e9e82d80d54617a8b39fae5fd0ebdd | [
"MIT"
] | 3 | 2019-03-06T07:58:22.000Z | 2021-03-12T18:10:46.000Z | example/example_nursery.py | airysen/racog | 8751436437e9e82d80d54617a8b39fae5fd0ebdd | [
"MIT"
] | 1 | 2019-08-19T18:51:02.000Z | 2019-08-19T18:51:02.000Z | example/example_nursery.py | airysen/racog | 8751436437e9e82d80d54617a8b39fae5fd0ebdd | [
"MIT"
] | 1 | 2019-08-19T19:07:05.000Z | 2019-08-19T19:07:05.000Z | # Dataset https://archive.ics.uci.edu/ml/datasets/Nursery
import numpy as np
import pandas as pd
from collections import Counter
from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedKFold
from imblearn.metrics import geometric_mean_score
from sklearn.metrics import mean_squared_error, make_scorer, roc_auc_score, log_loss
from imblearn.over_sampling import SMOTE, RandomOverSampler
from imblearn.under_sampling import RandomUnderSampler
from sklearn.preprocessing import OneHotEncoder, LabelBinarizer, LabelEncoder
from sklearn.ensemble import RandomForestClassifier
from racog import RACOG
RS = 334
nurseryurl = 'https://archive.ics.uci.edu/ml/machine-learning-databases/nursery/nursery.data'
attribute_list = ['parents', 'has_nurs', 'form', 'children',
'housing', 'finance', 'social', 'health', 'target']
nursery = pd.read_csv(nurseryurl, header=None, names=attribute_list)
LE = LabelEncoder()
X = nursery.drop('target', axis=1)
y = nursery['target']
ii = y[y == 'recommend'].index.values
X.drop(ii, inplace=True)
y.drop(ii, inplace=True)
for col in X:
if X[col].dtype == 'object':
X[col] = LE.fit_transform(X[col])
X = X.values
LE = LabelEncoder()
y = LE.fit_transform(y)
rf = RandomForestClassifier()
params = {'class_weight': 'balanced',
'criterion': 'entropy',
'max_depth': 15,
'max_features': 0.9,
'min_samples_leaf': 11,
'min_samples_split': 2,
'min_weight_fraction_leaf': 0,
'n_estimators': 30}
rf.set_params(**params)
gscore = make_scorer(geometric_mean_score, average='multiclass')
strf = StratifiedKFold(n_splits=3, shuffle=True, random_state=RS)
count = 0
for train_index, test_index in strf.split(X, y):
print(Counter(y[test_index]), Counter(y[train_index]))
# swap train/test
X_train, X_test, y_train, y_test = X[test_index], X[train_index], y[test_index], y[train_index]
rf.set_params(**params)
rf.fit(X_train, y_train)
y_pred = rf.predict(X_test)
print('#####################################################')
print('Count', count)
print('')
print('Without oversampling | Gmean:', gmean(y_test, y_pred))
rnd_over = RandomOverSampler(random_state=RS + count)
X_rndo, y_rndo = rnd_over.fit_sample(X_train, y_train)
print('')
rf.fit(X_rndo, y_rndo)
y_pred = rf.predict(X_test)
print('Random oversampling | Gmean:', gmean(y_test, y_pred))
smote = SMOTE(random_state=RS + count, kind='regular', k_neighbors=5, m=None,
m_neighbors=10, n_jobs=1)
X_smote, y_smote = smote.fit_sample(X_train, y_train)
rf.fit(X_smote, y_smote)
y_pred = rf.predict(X_test)
print('')
print('SMOTE oversampling | Gmean:', gmean(y_test, y_pred))
racog = RACOG(categorical_features='all',
warmup_offset=100, lag0=20, n_iter='auto',
threshold=10, eps=10E-5, verbose=0, n_jobs=1)
X_racog, y_racog = racog.fit_sample(X_train, y_train)
rf.fit(X_racog, y_racog)
y_pred = rf.predict(X_test)
print('RACOG oversampling | Gmean:', gmean(y_test, y_pred))
print('')
count = count + 1
| 31.519231 | 99 | 0.682123 |
830a7a30cf722db0418fa36cfcde2cb40ad3323f | 8,187 | py | Python | channels/piratestreaming.py | sodicarus/channels | d77402f4f460ea6daa66959aa5384aaffbff70b5 | [
"MIT"
] | null | null | null | channels/piratestreaming.py | sodicarus/channels | d77402f4f460ea6daa66959aa5384aaffbff70b5 | [
"MIT"
] | null | null | null | channels/piratestreaming.py | sodicarus/channels | d77402f4f460ea6daa66959aa5384aaffbff70b5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# StreamOnDemand Community Edition - Kodi Addon
# ------------------------------------------------------------
# streamondemand.- XBMC Plugin
# Canale piratestreaming
# http://www.mimediacenter.info/foro/viewforum.php?f=36
# ------------------------------------------------------------
import re
import urlparse
from core import config, httptools
from platformcode import logger
from core import scrapertools
from core import servertools
from core.item import Item
from core.tmdb import infoSod
__channel__ = "piratestreaming"
host = "https://www.piratestreaming.watch/"
| 36.386667 | 142 | 0.561011 |
830be7742fbd411e52ef441a27dec4480a075f6e | 5,259 | py | Python | test/IECore/LayeredDictTest.py | gcodebackups/cortex-vfx | 72fa6c6eb3327fce4faf01361c8fcc2e1e892672 | [
"BSD-3-Clause"
] | 5 | 2016-07-26T06:09:28.000Z | 2022-03-07T03:58:51.000Z | test/IECore/LayeredDictTest.py | turbosun/cortex | 4bdc01a692652cd562f3bfa85f3dae99d07c0b15 | [
"BSD-3-Clause"
] | null | null | null | test/IECore/LayeredDictTest.py | turbosun/cortex | 4bdc01a692652cd562f3bfa85f3dae99d07c0b15 | [
"BSD-3-Clause"
] | 3 | 2015-03-25T18:45:24.000Z | 2020-02-15T15:37:18.000Z | ##########################################################################
#
# Copyright (c) 2008-2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
if __name__ == "__main__":
unittest.main()
| 24.460465 | 76 | 0.56969 |
830ca47819b03f644d5fc932f9eb92819146316f | 1,425 | py | Python | nuplan/database/utils/boxes/box.py | MCZhi/nuplan-devkit | 3c4f5b8dcd517b27cfd258915ca5fe5c54e3cb0c | [
"Apache-2.0"
] | null | null | null | nuplan/database/utils/boxes/box.py | MCZhi/nuplan-devkit | 3c4f5b8dcd517b27cfd258915ca5fe5c54e3cb0c | [
"Apache-2.0"
] | null | null | null | nuplan/database/utils/boxes/box.py | MCZhi/nuplan-devkit | 3c4f5b8dcd517b27cfd258915ca5fe5c54e3cb0c | [
"Apache-2.0"
] | null | null | null | from __future__ import annotations
import abc
from typing import Any, Dict
| 22.619048 | 77 | 0.562105 |
830e1e09e8a968bc1c2ae3714f7a575834f1f2be | 4,627 | py | Python | tests/test_training.py | Hilly12/masters-code | 60b20a0e5e4c0ab9152b090b679391d8d62ec88a | [
"MIT"
] | null | null | null | tests/test_training.py | Hilly12/masters-code | 60b20a0e5e4c0ab9152b090b679391d8d62ec88a | [
"MIT"
] | null | null | null | tests/test_training.py | Hilly12/masters-code | 60b20a0e5e4c0ab9152b090b679391d8d62ec88a | [
"MIT"
] | null | null | null | import torch
import prifair as pf
N_SAMPLES = 10000
VAL_SAMPLES = 1000
STUDENT_SAMPLES = 5000
INPUTS = 1000
OUTPUTS = 5
BATCH_SIZE = 256
MAX_PHYSICAL_BATCH_SIZE = 128
EPSILON = 2.0
DELTA = 1e-5
MAX_GRAD_NORM = 1.0
N_TEACHERS = 4
N_GROUPS = 10
EPOCHS = 2
X = torch.randn(N_SAMPLES + VAL_SAMPLES, INPUTS)
Y = torch.randint(0, OUTPUTS, (N_SAMPLES + VAL_SAMPLES,))
student = torch.randn(STUDENT_SAMPLES, INPUTS)
groups = torch.randint(0, N_GROUPS, (N_SAMPLES,))
weights = torch.ones(N_SAMPLES) / N_SAMPLES
train_data = torch.utils.data.TensorDataset(X[:N_SAMPLES], Y[:N_SAMPLES])
val_data = torch.utils.data.TensorDataset(X[N_SAMPLES:], Y[N_SAMPLES:])
student_data = torch.utils.data.TensorDataset(student, torch.zeros(STUDENT_SAMPLES))
train_loader = torch.utils.data.DataLoader(train_data, batch_size=BATCH_SIZE)
val_loader = torch.utils.data.DataLoader(val_data, batch_size=BATCH_SIZE)
student_loader = torch.utils.data.DataLoader(student_data, batch_size=BATCH_SIZE)
model_class = MockModel
optim_class = torch.optim.NAdam
criterion = torch.nn.NLLLoss()
| 28.91875 | 84 | 0.690944 |
830e39c22c34be264cb1928c1b6da3f32584283d | 177 | py | Python | problem/01000~09999/02164/2164.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
] | 1 | 2019-04-19T16:37:44.000Z | 2019-04-19T16:37:44.000Z | problem/01000~09999/02164/2164.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
] | 1 | 2019-04-20T11:42:44.000Z | 2019-04-20T11:42:44.000Z | problem/01000~09999/02164/2164.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
] | 3 | 2019-04-19T16:37:47.000Z | 2021-10-25T00:45:00.000Z | from collections import deque
n,x=int(input()),deque()
for i in range(1,n+1): x.append(i)
while len(x)>1:
x.popleft()
if len(x)==1: break
x.append(x.popleft())
print(x.pop()) | 22.125 | 34 | 0.661017 |
830e650277386eb71938c69ac25104bf879b279f | 2,430 | py | Python | craft_ai/timezones.py | craft-ai/craft-ai-client-python | 3d8b3d9a49c0c70964deaeb9645130dd54f9a0b3 | [
"BSD-3-Clause"
] | 14 | 2016-08-26T07:06:57.000Z | 2020-09-22T07:41:21.000Z | craft_ai/timezones.py | craft-ai/craft-ai-client-python | 3d8b3d9a49c0c70964deaeb9645130dd54f9a0b3 | [
"BSD-3-Clause"
] | 94 | 2016-08-02T14:07:59.000Z | 2021-10-06T11:50:52.000Z | craft_ai/timezones.py | craft-ai/craft-ai-client-python | 3d8b3d9a49c0c70964deaeb9645130dd54f9a0b3 | [
"BSD-3-Clause"
] | 8 | 2017-02-07T12:05:57.000Z | 2021-10-14T09:45:30.000Z | import re
_TIMEZONE_REGEX = re.compile(r"^([+-](2[0-3]|[01][0-9])(:?[0-5][0-9])?|Z)$")
TIMEZONES = {
"UTC": "+00:00",
"GMT": "+00:00",
"BST": "+01:00",
"IST": "+01:00",
"WET": "+00:00",
"WEST": "+01:00",
"CET": "+01:00",
"CEST": "+02:00",
"EET": "+02:00",
"EEST": "+03:00",
"MSK": "+03:00",
"MSD": "+04:00",
"AST": "-04:00",
"ADT": "-03:00",
"EST": "-05:00",
"EDT": "-04:00",
"CST": "-06:00",
"CDT": "-05:00",
"MST": "-07:00",
"MDT": "-06:00",
"PST": "-08:00",
"PDT": "-07:00",
"HST": "-10:00",
"AKST": "-09:00",
"AKDT": "-08:00",
"AEST": "+10:00",
"AEDT": "+11:00",
"ACST": "+09:30",
"ACDT": "+10:30",
"AWST": "+08:00",
}
| 26.413043 | 81 | 0.530864 |
830ebcb1b5a538ed7758db2770eff5e0ab51ebf3 | 2,066 | py | Python | gpvdm_gui/gui/json_fdtd.py | roderickmackenzie/gpvdm | 914fd2ee93e7202339853acaec1d61d59b789987 | [
"BSD-3-Clause"
] | 12 | 2016-09-13T08:58:13.000Z | 2022-01-17T07:04:52.000Z | gpvdm_gui/gui/json_fdtd.py | roderickmackenzie/gpvdm | 914fd2ee93e7202339853acaec1d61d59b789987 | [
"BSD-3-Clause"
] | 3 | 2017-11-11T12:33:02.000Z | 2019-03-08T00:48:08.000Z | gpvdm_gui/gui/json_fdtd.py | roderickmackenzie/gpvdm | 914fd2ee93e7202339853acaec1d61d59b789987 | [
"BSD-3-Clause"
] | 6 | 2019-01-03T06:17:12.000Z | 2022-01-01T15:59:00.000Z | #
# General-purpose Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall
# model for 1st, 2nd and 3rd generation solar cells.
# Copyright (C) 2008-2022 Roderick C. I. MacKenzie r.c.i.mackenzie at googlemail.com
#
# https://www.gpvdm.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2.0, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
## @package json_transfer_matrix
# Store the cv domain json data
#
import sys
import os
import shutil
import json
from json_base import json_base
| 31.30303 | 91 | 0.727009 |
830eef9810e77b134c4cc2e988eadd23436bf9ed | 4,637 | py | Python | gru/plugins/base/inventory.py | similarweb/gru | 49ef70c2b5e58302c84dbe7d984a7d49aebc0384 | [
"BSD-2-Clause-FreeBSD"
] | 7 | 2016-12-11T19:58:33.000Z | 2020-07-11T08:55:34.000Z | gru/plugins/base/inventory.py | similarweb/gru | 49ef70c2b5e58302c84dbe7d984a7d49aebc0384 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | gru/plugins/base/inventory.py | similarweb/gru | 49ef70c2b5e58302c84dbe7d984a7d49aebc0384 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2019-12-09T19:31:50.000Z | 2019-12-09T19:31:50.000Z |
from . import BasePlugin
from gru.config import settings
| 31.120805 | 117 | 0.607505 |
83103a011e1bb5e482fa869c43bee2cdb39dd21a | 5,830 | py | Python | app/comic/eyra/tasks.py | EYRA-Benchmark/grand-challenge.org | 8264c19fa1a30ffdb717d765e2aa2e6ceccaab17 | [
"Apache-2.0"
] | 2 | 2019-06-28T09:23:55.000Z | 2020-03-18T05:52:13.000Z | app/comic/eyra/tasks.py | EYRA-Benchmark/comic | 8264c19fa1a30ffdb717d765e2aa2e6ceccaab17 | [
"Apache-2.0"
] | 112 | 2019-08-12T15:13:27.000Z | 2022-03-21T15:49:40.000Z | app/comic/eyra/tasks.py | EYRA-Benchmark/grand-challenge.org | 8264c19fa1a30ffdb717d765e2aa2e6ceccaab17 | [
"Apache-2.0"
] | 1 | 2020-03-19T14:19:57.000Z | 2020-03-19T14:19:57.000Z | import json
from datetime import datetime
import time
from functools import reduce
import boto3
from celery import shared_task
from celery.bin.control import inspect
from django.conf import settings
from comic.container_exec.backends.k8s import K8sJob
from comic.eyra.models import Job, Submission, DataFile, JobInput
| 29.744898 | 95 | 0.686792 |
83105e3ab7b623c4391c6fa5b2af5b5f65241d9a | 1,926 | py | Python | doc/conf.py | djarpin/sagemaker-python-sdk | 157d8670977243f7f77327175d40364c885482b3 | [
"Apache-2.0"
] | 1 | 2018-01-19T22:24:38.000Z | 2018-01-19T22:24:38.000Z | doc/conf.py | djarpin/sagemaker-python-sdk | 157d8670977243f7f77327175d40364c885482b3 | [
"Apache-2.0"
] | null | null | null | doc/conf.py | djarpin/sagemaker-python-sdk | 157d8670977243f7f77327175d40364c885482b3 | [
"Apache-2.0"
] | 2 | 2019-08-06T05:48:25.000Z | 2020-10-04T17:00:55.000Z | # -*- coding: utf-8 -*-
import os
import sys
from datetime import datetime
from unittest.mock import MagicMock
MOCK_MODULES = ['tensorflow', 'tensorflow.core', 'tensorflow.core.framework', 'tensorflow.python',
'tensorflow.python.framework', 'tensorflow_serving', 'tensorflow_serving.apis']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
version = '1.0'
project = u'sagemaker'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest',
'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.coverage', 'sphinx.ext.autosummary',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
source_suffix = '.rst' # The suffix of source filenames.
master_doc = 'index' # The master toctree document.
copyright = u'%s, Amazon' % datetime.now().year
# The full version, including alpha/beta/rc tags.
release = version
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
pygments_style = 'default'
autoclass_content = "both"
autodoc_default_flags = ['show-inheritance', 'members', 'undoc-members']
autodoc_member_order = 'bysource'
if 'READTHEDOCS' in os.environ:
html_theme = 'default'
else:
html_theme = 'haiku'
html_static_path = ['_static']
htmlhelp_basename = '%sdoc' % project
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
# autosummary
autosummary_generate = True
| 30.09375 | 98 | 0.704569 |
83109a1fa008110e9e6bc3419abde0778a40c3c3 | 1,081 | py | Python | django_cassiopeia/views.py | galaddirie/django-cassiopeia | e3e75e6c815cfc96e3b7ef5991aa1265221a2122 | [
"MIT"
] | 13 | 2020-07-08T17:23:18.000Z | 2022-02-13T09:19:42.000Z | django_cassiopeia/views.py | galaddirie/django-cassiopeia | e3e75e6c815cfc96e3b7ef5991aa1265221a2122 | [
"MIT"
] | 16 | 2020-07-19T22:14:20.000Z | 2022-03-24T02:57:45.000Z | django_cassiopeia/views.py | galaddirie/django-cassiopeia | e3e75e6c815cfc96e3b7ef5991aa1265221a2122 | [
"MIT"
] | 6 | 2020-07-21T01:37:54.000Z | 2022-01-01T19:28:54.000Z | from django.shortcuts import render, HttpResponse
from django_cassiopeia import cassiopeia as cass
from time import sleep
import json
# Create your views here. | 30.027778 | 87 | 0.612396 |
8311183712fef6e93100cb2e804d36583b7c35d9 | 962 | py | Python | sender.py | AndrVLDZ/telnet_DAW-master | 4bce486fad0d4ae51ef695ace118df2af2b1c35f | [
"Apache-2.0"
] | null | null | null | sender.py | AndrVLDZ/telnet_DAW-master | 4bce486fad0d4ae51ef695ace118df2af2b1c35f | [
"Apache-2.0"
] | null | null | null | sender.py | AndrVLDZ/telnet_DAW-master | 4bce486fad0d4ae51ef695ace118df2af2b1c35f | [
"Apache-2.0"
] | null | null | null | import telnetlib
print_logo()
port = int(input('\n PORT:'))
ip_1 = str(input(' Host_1 IP: '))
node_1 = telnetlib.Telnet(ip_1, port)
ip_2 = str(input(' Host_2 IP: '))
node_2 = telnetlib.Telnet(ip_2, port)
while True:
symbol = str(input('==> '))
if symbol == 's':
node_1.write(b's\r\n')
node_2.write(b's\r\n')
elif symbol == 'n':
node_1.write(b'n\r\n')
node_2.write(b'n\r\n')
elif symbol == 'b':
node_1.write(b'b\r\n')
node_2.write(b'b\r\n')
else:
node_1.write(bytes(str.encode(symbol)))
node_2.write(bytes(str.encode(symbol))) | 22.904762 | 47 | 0.477131 |
8311c27de6e1db041ba99f1046583892727db0c6 | 43 | py | Python | oed/__init__.py | wgshen/OED | 6928ba31396f2e7dd2bd3701f319e1dad3f91346 | [
"MIT"
] | null | null | null | oed/__init__.py | wgshen/OED | 6928ba31396f2e7dd2bd3701f319e1dad3f91346 | [
"MIT"
] | null | null | null | oed/__init__.py | wgshen/OED | 6928ba31396f2e7dd2bd3701f319e1dad3f91346 | [
"MIT"
] | 1 | 2021-11-10T05:41:02.000Z | 2021-11-10T05:41:02.000Z | from .oed import OED
__all__ = [
"OED"
]
| 7.166667 | 20 | 0.604651 |
831472f4490aeaadae4cd1684594efc22e0edd62 | 14,400 | py | Python | pyperformance/_manifest.py | cappadokes/pyperformance | 60574dad9585eb5622631502296bb8eae143cdfc | [
"MIT"
] | null | null | null | pyperformance/_manifest.py | cappadokes/pyperformance | 60574dad9585eb5622631502296bb8eae143cdfc | [
"MIT"
] | 2 | 2022-03-09T11:14:07.000Z | 2022-03-09T14:07:47.000Z | test/xml_etree/venv/cpython3.11-d52597b1179a-compat-f6a835d45d46-bm-xml_etree/lib/python3.11/site-packages/pyperformance/_manifest.py | sebawild/cpython | 874ba1a9c948af33de2ad229df42e03dc516f0a8 | [
"0BSD"
] | 1 | 2022-01-04T13:08:31.000Z | 2022-01-04T13:08:31.000Z |
__all__ = [
'BenchmarksManifest',
'load_manifest',
'parse_manifest',
]
from collections import namedtuple
import os.path
from . import __version__, DATA_DIR
from . import _benchmark, _utils
DEFAULTS_DIR = os.path.join(DATA_DIR, 'benchmarks')
DEFAULT_MANIFEST = os.path.join(DEFAULTS_DIR, 'MANIFEST')
BENCH_COLUMNS = ('name', 'metafile')
BENCH_HEADER = '\t'.join(BENCH_COLUMNS)
#######################################
# internal implementation
def _parse_metafile(metafile, name):
if not metafile:
return None
elif metafile.startswith('<') and metafile.endswith('>'):
directive, _, extra = metafile[1:-1].partition(':')
if directive == 'local':
if extra:
rootdir = f'bm_{extra}'
basename = f'bm_{name}.toml'
else:
rootdir = f'bm_{name}'
basename = 'pyproject.toml'
# A relative path will be resolved against the manifset file.
return os.path.join(rootdir, basename)
else:
raise ValueError(f'unsupported metafile directive {metafile!r}')
else:
return os.path.abspath(metafile)
def _get_tags(benchmarks):
# Fill in groups from benchmark tags.
tags = {}
for bench in benchmarks:
for tag in getattr(bench, 'tags', ()):
if tag in tags:
tags[tag].append(bench)
else:
tags[tag] = [bench]
return tags
def _resolve_groups(rawgroups, byname):
benchmarks = set(byname.values())
tags = None
groups = {
'all': list(benchmarks),
}
unresolved = {}
for groupname, entries in rawgroups.items():
if groupname == 'all':
continue
if not entries:
if groupname == 'default':
groups[groupname] = list(benchmarks)
else:
if tags is None:
tags = _get_tags(benchmarks)
groups[groupname] = tags.get(groupname, ())
continue
assert entries[0][0] == '+', (groupname, entries)
unresolved[groupname] = names = set()
for op, name in entries:
if op == '+':
if name == '<all>':
names.update(byname)
elif name in byname or name in rawgroups:
names.add(name)
elif op == '-':
if name == '<all>':
raise NotImplementedError((groupname, op, name))
elif name in byname or name in rawgroups:
if name in names:
names.remove(name)
else:
raise NotImplementedError((groupname, op, name))
while unresolved:
for groupname, names in list(unresolved.items()):
benchmarks = set()
for name in names:
if name in byname:
benchmarks.add(byname[name])
elif name in groups:
benchmarks.update(groups[name])
names.remove(name)
elif name == groupname:
names.remove(name)
break
else: # name in unresolved
names.remove(name)
names.extend(unresolved[name])
break
else:
groups[groupname] = benchmarks
del unresolved[groupname]
return groups
| 33.103448 | 89 | 0.555556 |
8314cb28873762113bd7dff276be8513d9a062b7 | 8,543 | py | Python | pimux/function.py | pcpcpc1213/pimux | 6ce9c3a59ac04064d46217bcdad531c7171163da | [
"MIT"
] | null | null | null | pimux/function.py | pcpcpc1213/pimux | 6ce9c3a59ac04064d46217bcdad531c7171163da | [
"MIT"
] | null | null | null | pimux/function.py | pcpcpc1213/pimux | 6ce9c3a59ac04064d46217bcdad531c7171163da | [
"MIT"
] | null | null | null | from . import scrip as t
| 26.780564 | 122 | 0.570057 |
83150604a0fb11e77945d0c0fcad08abbb284ce0 | 342 | py | Python | download_from_link.py | bogdanf555/scripts | 42b7b36c5891da6dcde8f7889bdf0798f91bef12 | [
"MIT"
] | null | null | null | download_from_link.py | bogdanf555/scripts | 42b7b36c5891da6dcde8f7889bdf0798f91bef12 | [
"MIT"
] | null | null | null | download_from_link.py | bogdanf555/scripts | 42b7b36c5891da6dcde8f7889bdf0798f91bef12 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import requests
import sys
if __name__ == '__main__':
if len(sys.argv) != 3:
print("Error: you should pass 2 arguments: [link_to_download_from] [path_to_save_downloaded_file]")
exit(1)
url = sys.argv[1]
r = requests.get(url, allow_redirects=True)
open(sys.argv[2], 'wb').write(r.content)
| 24.428571 | 107 | 0.660819 |
83153ac6624a05f5b11103f7bcc31634fc8bbca3 | 443 | py | Python | vowelsubstring.py | boddulurisrisai/python-practice | bb9dfd8ea4d1fe3e4a3f7950ba63b0469e0bca28 | [
"bzip2-1.0.6"
] | 1 | 2021-04-16T07:12:36.000Z | 2021-04-16T07:12:36.000Z | vowelsubstring.py | boddulurisrisai/python-practice | bb9dfd8ea4d1fe3e4a3f7950ba63b0469e0bca28 | [
"bzip2-1.0.6"
] | null | null | null | vowelsubstring.py | boddulurisrisai/python-practice | bb9dfd8ea4d1fe3e4a3f7950ba63b0469e0bca28 | [
"bzip2-1.0.6"
] | null | null | null | import re
b=input('enter string')
r=[];max=-1;z=-1
for i in range(len(b)):
for j in range(i+1,len(b)+1):
c=b[i:j]
for k in c:
if k=='a' or k=='e' or k=='i' or k=='o' or k=='u':
flag=0
else:
flag=1
break
if flag==0:
r.append(c)
for i in r:
if len(i)>max:
max=len(i)
z=i
print(z)
| 21.095238 | 63 | 0.363431 |
8316bb71d181ce8ce3eff4b2a0a627c1843d8260 | 485 | py | Python | syndata/__init__.py | Menelau/synthetic_datasets | 86fd99042cff6a8bbdfa195fe6eee938a9c9d8f5 | [
"MIT"
] | 6 | 2018-02-07T02:02:00.000Z | 2020-01-22T10:33:01.000Z | syndata/__init__.py | Menelau/synthetic_datasets | 86fd99042cff6a8bbdfa195fe6eee938a9c9d8f5 | [
"MIT"
] | null | null | null | syndata/__init__.py | Menelau/synthetic_datasets | 86fd99042cff6a8bbdfa195fe6eee938a9c9d8f5 | [
"MIT"
] | null | null | null | # coding=utf-8
# Author: Rafael Menelau Oliveira e Cruz <rafaelmenelau@gmail.com>
#
# License: MIT
"""
The :mod:`deslib.util` This module includes various utilities. They are divided into three parts:
syndata.synthethic_datasets - Provide functions to generate several 2D classification datasets.
syndata.plot_tools - Provides some routines to easily plot datasets and decision borders of a scikit-learn classifier.
"""
from .plot_tools import *
from .synthetic_datasets import *
| 28.529412 | 118 | 0.785567 |
831850a395edae115c39b123b0382e44942149bf | 644 | py | Python | profiles/migrations/0002_auto_20211214_0825.py | praekeltfoundation/ge-web | 331d22554dfd6b6f6060b1fd7a110f38dd7ddece | [
"BSD-2-Clause"
] | 1 | 2022-03-09T15:11:52.000Z | 2022-03-09T15:11:52.000Z | profiles/migrations/0002_auto_20211214_0825.py | praekeltfoundation/ge-web | 331d22554dfd6b6f6060b1fd7a110f38dd7ddece | [
"BSD-2-Clause"
] | 14 | 2022-01-03T09:49:41.000Z | 2022-03-31T12:53:31.000Z | profiles/migrations/0002_auto_20211214_0825.py | praekeltfoundation/ge-web | 331d22554dfd6b6f6060b1fd7a110f38dd7ddece | [
"BSD-2-Clause"
] | null | null | null | # Generated by Django 3.1.14 on 2021-12-14 08:25
import django.db.models.deletion
from django.db import migrations, models
| 30.666667 | 194 | 0.673913 |
8318aea9b693ecf60895b29261a418a03e789bc8 | 4,290 | py | Python | radmc-3d/version_0.41/examples/run_spher2d_1_nomirror/problem_setup.py | dlmatra/miao | 71799811b21a4249754390a8ec00972723edab99 | [
"MIT"
] | 1 | 2019-11-23T00:03:40.000Z | 2019-11-23T00:03:40.000Z | radmc-3d/version_0.41/examples/run_spher2d_1_nomirror/problem_setup.py | dlmatra/miao | 71799811b21a4249754390a8ec00972723edab99 | [
"MIT"
] | 3 | 2021-05-26T12:54:50.000Z | 2021-05-27T10:58:48.000Z | radmc-3d/version_0.41/examples/run_spher2d_1_nomirror/problem_setup.py | dlmatra/miao | 71799811b21a4249754390a8ec00972723edab99 | [
"MIT"
] | 1 | 2021-12-23T14:09:52.000Z | 2021-12-23T14:09:52.000Z | #
# Import NumPy for array handling
#
import numpy as np
import math
#
# Import plotting libraries (start Python with ipython --matplotlib)
#
#from mpl_toolkits.mplot3d import axes3d
#from matplotlib import pyplot as plt
#
# Some natural constants
#
au = 1.49598e13 # Astronomical Unit [cm]
pc = 3.08572e18 # Parsec [cm]
ms = 1.98892e33 # Solar mass [g]
ts = 5.78e3 # Solar temperature [K]
ls = 3.8525e33 # Solar luminosity [erg/s]
rs = 6.96e10 # Solar radius [cm]
#
# Monte Carlo parameters
#
nphot = 100000
#
# Grid parameters
#
nx = 100
ny = 120
nz = 1
#
# Model parameters
#
rin = 5*au
rout = 100*au
zmaxr = 0.5e0
rho0 = 1e-16 * 10000
prho = -2.e0
hpr = 0.1e0
#
# Star parameters
#
mstar = ms
rstar = rs
tstar = ts
pstar = [0.,0.,0.]
#
# Make the coordinates
#
# Note: The way the xi grid is made is slightly non-standard, but is
# done this way to be consistent with problem_setup.pro (the IDL version)
#
xi = rin * (rout/rin)**(np.linspace(0.,nx,nx+1)/(nx-1.0))
yi = math.pi/2.0 - zmaxr*np.linspace(ny*0.5,-ny*0.5,ny+1)/(ny*0.5)
zi = np.array([0.,math.pi*2])
xc = 0.5e0 * ( xi[0:nx] + xi[1:nx+1] )
yc = 0.5e0 * ( yi[0:ny] + yi[1:ny+1] )
#
# Make the dust density model
#
rr,tt = np.meshgrid(xc,yc,indexing='ij')
zzr = math.pi/2.0 - tt
rhod = rho0 * (rr/au)**prho
rhod = rhod * np.exp(-0.50*(zzr/hpr)**2)
#
# Write the wavelength_micron.inp file
#
lam1 = 0.1e0
lam2 = 7.0e0
lam3 = 25.e0
lam4 = 1.0e4
n12 = 20
n23 = 100
n34 = 30
lam12 = np.logspace(np.log10(lam1),np.log10(lam2),n12,endpoint=False)
lam23 = np.logspace(np.log10(lam2),np.log10(lam3),n23,endpoint=False)
lam34 = np.logspace(np.log10(lam3),np.log10(lam4),n34,endpoint=True)
lam = np.concatenate([lam12,lam23,lam34])
nlam = lam.size
#
# Write the wavelength file
#
with open('wavelength_micron.inp','w+') as f:
f.write('%d\n'%(nlam))
for value in lam:
f.write('%13.6e\n'%(value))
#
#
# Write the stars.inp file
#
with open('stars.inp','w+') as f:
f.write('2\n')
f.write('1 %d\n\n'%(nlam))
f.write('%13.6e %13.6e %13.6e %13.6e %13.6e\n\n'%(rstar,mstar,pstar[0],pstar[1],pstar[2]))
for value in lam:
f.write('%13.6e\n'%(value))
f.write('\n%13.6e\n'%(-tstar))
#
# Write the grid file
#
with open('amr_grid.inp','w+') as f:
f.write('1\n') # iformat
f.write('0\n') # AMR grid style (0=regular grid, no AMR)
f.write('100\n') # Coordinate system
f.write('0\n') # gridinfo
f.write('1 1 0\n') # Include x,y,z coordinate
f.write('%d %d %d\n'%(nx,ny,nz)) # Size of grid
for value in xi:
f.write('%13.6e\n'%(value)) # X coordinates (cell walls)
for value in yi:
f.write('%13.6e\n'%(value)) # Y coordinates (cell walls)
for value in zi:
f.write('%13.6e\n'%(value)) # Z coordinates (cell walls)
#
# Write the density file
#
with open('dust_density.inp','w+') as f:
f.write('1\n') # Format number
f.write('%d\n'%(nx*ny*nz)) # Nr of cells
f.write('1\n') # Nr of dust species
data = rhod.ravel(order='F') # Create a 1-D view, fortran-style indexing
data.tofile(f, sep='\n', format="%13.6e")
f.write('\n')
#
# Dust opacity control file
#
with open('dustopac.inp','w+') as f:
f.write('2 Format number of this file\n')
f.write('1 Nr of dust species\n')
f.write('============================================================================\n')
f.write('1 Way in which this dust species is read\n')
f.write('0 0=Thermal grain\n')
f.write('silicate Extension of name of dustkappa_***.inp file\n')
f.write('----------------------------------------------------------------------------\n')
#
# Write the radmc3d.inp control file
#
with open('radmc3d.inp','w+') as f:
f.write('nphot = %d\n'%(nphot))
f.write('scattering_mode_max = 0\n') # Put this to 1 for isotropic scattering
| 30.642857 | 94 | 0.530536 |
83191aecc9d861bb7dfa42c1c5b079d943885a2f | 5,508 | py | Python | colorprinter/pycolor.py | edonyzpc/toolkitem | 3a09ebf45eee8ecd9ff0e441392d5fc746b996e5 | [
"MIT"
] | 3 | 2015-04-20T08:17:09.000Z | 2020-07-07T15:22:06.000Z | colorprinter/pycolor.py | edonyzpc/toolkitem | 3a09ebf45eee8ecd9ff0e441392d5fc746b996e5 | [
"MIT"
] | 24 | 2015-11-14T14:54:59.000Z | 2017-10-23T15:14:45.000Z | colorprinter/pycolor.py | edonyzpc/toolkitem | 3a09ebf45eee8ecd9ff0e441392d5fc746b996e5 | [
"MIT"
] | 1 | 2017-02-28T06:35:44.000Z | 2017-02-28T06:35:44.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
# .---. .-----------
# / \ __ / ------
# / / \( )/ ----- (`-') _ _(`-') <-. (`-')_
# ////// '\/ ` --- ( OO).-/( (OO ).-> .-> \( OO) ) .->
# //// / // : : --- (,------. \ .'_ (`-')----. ,--./ ,--/ ,--.' ,-.
# // / / / `\/ '-- | .---' '`'-..__)( OO).-. ' | \ | | (`-')'.' /
# // //..\\\\ (| '--. | | ' |( _) | | | | . '| |)(OO \ /
# ============UU====UU==== | .--' | | / : \| |)| | | |\ | | / /)
# '//||\\\` | `---. | '-' / ' '-' ' | | \ | `-/ /`
# ''`` `------' `------' `-----' `--' `--' `--'
# ######################################################################################
#
# Author: edony - edonyzpc@gmail.com
#
# twitter : @edonyzpc
#
# Last modified: 2017-03-19 21:24
#
# Filename: pycolor.py
#
# Description: All Rights Are Reserved
#
"""
#import scipy as sp
#import math as m
#import matplotlib as mpl
#import matplotlib.pyplot as plt
#from mpl_toolkits.mplot3d import Axes3D as Ax3
#from scipy import stats as st
#from matplotlib import cm
#import numpy as np
from __future__ import print_function
def __str2fmts(self, color_str):
"""
Convert description of format into format number
"""
self.format = color_str
def colorstr(self, string, color=None):
"""Contert string to colorful format string
"""
if color is None:
return self._format + string + self.reset
else:
self.__str2fmts(color)
return self._format + string + self.reset
def cprint(color, out_str):
"""Colorful print function instead of standard print
"""
printer(out_str)
| 35.766234 | 93 | 0.396696 |
831a95d5b9d61001fca6140bef2832489872b9e3 | 1,684 | py | Python | launch/velocity_smoother-composed-launch.py | doisyg/velocity_smoother | 5ba998978e324fd0417ea75483d1f5559820459d | [
"BSD-3-Clause"
] | 8 | 2020-02-28T10:40:53.000Z | 2022-01-15T06:42:11.000Z | launch/velocity_smoother-composed-launch.py | doisyg/velocity_smoother | 5ba998978e324fd0417ea75483d1f5559820459d | [
"BSD-3-Clause"
] | 9 | 2020-01-20T16:32:14.000Z | 2022-01-28T13:49:59.000Z | launch/velocity_smoother-composed-launch.py | doisyg/velocity_smoother | 5ba998978e324fd0417ea75483d1f5559820459d | [
"BSD-3-Clause"
] | 3 | 2020-03-19T09:40:35.000Z | 2022-01-11T01:47:41.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 Open Source Robotics Foundation, Inc.
#
# Software License Agreement (BSD License 2.0)
# https://raw.githubusercontent.com/kobuki-base/velocity_smoother/license/LICENSE
"""Launch the velocity smoother as a composed node with default configuration."""
import os
import ament_index_python.packages
from launch import LaunchDescription
from launch_ros.actions import ComposableNodeContainer
from launch_ros.descriptions import ComposableNode
import yaml
| 35.829787 | 92 | 0.694774 |
831b642dcce9a13a8398668c6c09e24217cd6b3c | 3,616 | py | Python | lib/taskstats/controller.py | tijko/IO-Mon | 4fb43c6c97b22f9a44eb34ef2221f1ed2abb062b | [
"MIT"
] | 1 | 2015-12-17T04:58:09.000Z | 2015-12-17T04:58:09.000Z | lib/taskstats/controller.py | tijko/IO-Mon | 4fb43c6c97b22f9a44eb34ef2221f1ed2abb062b | [
"MIT"
] | null | null | null | lib/taskstats/controller.py | tijko/IO-Mon | 4fb43c6c97b22f9a44eb34ef2221f1ed2abb062b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import struct
import socket
from netlink import *
NETLINK_ROUTE = 0
NETLINK_UNUSED = 1
NETLINK_USERSOCK = 2
NETLINK_FIREWALL = 3
NETLINK_SOCK_DIAG = 4
NETLINK_NFLOG = 5
NETLINK_XFRM = 6
NETLINK_SELINUX = 7
NETLINK_ISCSI = 8
NETLINK_AUDIT = 9
NETLINK_FIB_LOOKUP = 10
NETLINK_CONNECTOR = 11
NETLINK_NETFILTER = 12
NETLINK_IP6_FW = 13
NETLINK_DNRTMSG = 14
NETLINK_KOBJECT_UEVENT = 15
NETLINK_GENERIC = 16
NETLINK_SCSITRANSPORT = 18
NETLINK_ECRYPTFS = 19
NETLINK_RDMA = 20
NETLINK_CRYPTO = 21
NETLINK_INET_DIAG = NETLINK_SOCK_DIAG
# Genetlink Controller command and attribute values
CTRL_CMD_UNSPEC = 0
CTRL_CMD_NEWFAMILY = 1
CTRL_CMD_DELFAMILY = 2
CTRL_CMD_GETFAMILY = 3
CTRL_CMD_NEWOPS = 4
CTRL_CMD_DELOPS = 5
CTRL_CMD_GETOPS = 6
CTRL_CMD_NEWMCAST_GRP = 7
CTRL_CMD_DELCAST_GRP = 8
CTRL_CMD_GETMCAST_GRP = 9
__CTRL_CMD_MAX = 10
TASKSTATS_GENL_VERSION = 0x1
GENL_HDRLEN = struct.calcsize('BBxx')
| 28.472441 | 80 | 0.638274 |
831cac4a9b399f71b7446e06e08d2d1e23c17328 | 1,335 | py | Python | app/marketing/migrations/0002_membership.py | NDevox/website | 76004e667f2295eddd79d500ba21f02a0480412f | [
"Apache-2.0"
] | null | null | null | app/marketing/migrations/0002_membership.py | NDevox/website | 76004e667f2295eddd79d500ba21f02a0480412f | [
"Apache-2.0"
] | null | null | null | app/marketing/migrations/0002_membership.py | NDevox/website | 76004e667f2295eddd79d500ba21f02a0480412f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-12 04:25
from __future__ import unicode_literals
from django.db import migrations, models
| 32.560976 | 114 | 0.605243 |
831cd9a75c39325f8b2e668fec868da457fe98e6 | 4,552 | py | Python | Solutions/VMX2-VoicemailExpress/Code/vmx_transcriber.py | cbgandhi-code/amazon-connect-salesforce-scv | fc5da5445b01295e530b50aa774598e91087c57a | [
"Apache-2.0",
"CC0-1.0"
] | null | null | null | Solutions/VMX2-VoicemailExpress/Code/vmx_transcriber.py | cbgandhi-code/amazon-connect-salesforce-scv | fc5da5445b01295e530b50aa774598e91087c57a | [
"Apache-2.0",
"CC0-1.0"
] | null | null | null | Solutions/VMX2-VoicemailExpress/Code/vmx_transcriber.py | cbgandhi-code/amazon-connect-salesforce-scv | fc5da5445b01295e530b50aa774598e91087c57a | [
"Apache-2.0",
"CC0-1.0"
] | null | null | null | # Version: 2022.03.23
"""
**********************************************************************************************************************
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved *
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated *
* documentation files (the "Software"), to deal in the Software without restriction, including without limitation *
* the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and *
* to permit persons to whom the Software is furnished to do so. *
* *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO *
* THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE *
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF *
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS *
* IN THE SOFTWARE. *
**********************************************************************************************************************
"""
import json
import boto3
import os
import logging
logger = logging.getLogger()
logger.setLevel(logging.getLevelName(os.getenv('lambda_logging_level', 'INFO')))
| 43.352381 | 130 | 0.536028 |
831d6ec37b4d0e0a6e4200545a3b9e01d0fe7f0e | 306 | py | Python | api/permissions.py | soltanoff/simple_file_server | 4e825358341fae0564fc498e8374a3d3cdda199e | [
"MIT"
] | 2 | 2018-06-15T11:39:42.000Z | 2019-08-14T20:55:15.000Z | api/permissions.py | soltanoff/simple_file_server | 4e825358341fae0564fc498e8374a3d3cdda199e | [
"MIT"
] | 7 | 2018-12-04T07:35:24.000Z | 2022-03-11T23:12:10.000Z | api/permissions.py | soltanoff/simple_file_server | 4e825358341fae0564fc498e8374a3d3cdda199e | [
"MIT"
] | null | null | null | from rest_framework import permissions
| 27.818182 | 82 | 0.751634 |
831dc5f3bb8ccadfd806896689571f12c96946bc | 712 | py | Python | capstone/rl/utils/linear_annealing.py | davidrobles/mlnd-capstone-code | 19ca88aaa137665af147da9bbd0e510829a14cf1 | [
"MIT"
] | 2 | 2017-04-13T18:31:39.000Z | 2017-05-06T05:14:12.000Z | capstone/rl/utils/linear_annealing.py | davidrobles/mlnd-capstone-code | 19ca88aaa137665af147da9bbd0e510829a14cf1 | [
"MIT"
] | null | null | null | capstone/rl/utils/linear_annealing.py | davidrobles/mlnd-capstone-code | 19ca88aaa137665af147da9bbd0e510829a14cf1 | [
"MIT"
] | null | null | null | from .callbacks import Callback
| 35.6 | 83 | 0.606742 |
8320400ac8c357808906cc6070706d68af6624bc | 6,466 | py | Python | genTraining_recurr.py | lasinger/-3DVideos2Stereo | 9608654ec37d157133c43531ac0002102e86dbab | [
"MIT"
] | 62 | 2020-01-15T10:27:46.000Z | 2022-03-14T09:23:58.000Z | genTraining_recurr.py | lasinger/-3DVideos2Stereo | 9608654ec37d157133c43531ac0002102e86dbab | [
"MIT"
] | 4 | 2020-03-10T08:13:59.000Z | 2021-12-09T09:35:58.000Z | genTraining_recurr.py | lasinger/-3DVideos2Stereo | 9608654ec37d157133c43531ac0002102e86dbab | [
"MIT"
] | 15 | 2020-01-17T02:06:54.000Z | 2022-02-24T06:32:40.000Z | from __future__ import print_function
import numpy as np
import argparse
import glob
import os
import errno
import math
import cv2
from random import shuffle
from shutil import copyfile
parser = argparse.ArgumentParser(
description="create training/test/validation sets from video list"
)
parser.add_argument("--videoListPath", type=str, help="path to videos", required=True)
parser.add_argument(
"--fpsSingle", type=int, help="fps for single frame processing", default=2
)
parser.add_argument(
"--numRecurrent", type=int, help="how many recurent steps", default=3
)
parser.add_argument(
"--fpsRecurrent", type=int, help="fps for reccurent part", default=24
)
parser.add_argument(
"--chapterTiming",
type=str,
help="start and end timing list for all chapters",
default="timingChapters.txt",
)
parser.add_argument("--name", type=str, help="run name", default="training")
parser.add_argument("--blacklist", type=str, help="ignore video", default="-1")
parser.add_argument(
"--whitelist",
type=str,
help="specifies list of selected videos, if not set all videos are selected",
default="-1",
)
args = parser.parse_args()
main()
| 30.64455 | 86 | 0.558769 |
83207ebe69e3bf9bcd3f660b07c8f5bca9f8663b | 2,038 | py | Python | seeq/addons/clustering/__main__.py | seeq12/seeq-clustering | 220793499d5f9669e7d9dde4820af0eee27f84dc | [
"Apache-2.0"
] | 3 | 2021-10-15T05:32:44.000Z | 2021-12-14T16:33:24.000Z | seeq/addons/clustering/__main__.py | seeq12/seeq-clustering | 220793499d5f9669e7d9dde4820af0eee27f84dc | [
"Apache-2.0"
] | 2 | 2021-11-19T17:46:06.000Z | 2022-01-20T06:54:00.000Z | seeq/addons/clustering/__main__.py | seeq12/seeq-clustering | 220793499d5f9669e7d9dde4820af0eee27f84dc | [
"Apache-2.0"
] | null | null | null | import os
import sys
import argparse
from ._install_addon import install_addon
def cli_interface():
""" Installs Seeq Add-on Tool """
parser = argparse.ArgumentParser(description='Install Clustering as a Seeq Add-on Tool')
parser.add_argument('--username', type=str, default=None,
help='Username or Access Key of Seeq admin user installing the tool(s) ')
parser.add_argument('--seeq_url', type=str,
help="Seeq hostname URL with the format https://my.seeq.com/ or https://my.seeq.com:34216")
parser.add_argument('--app_url', type=str,
help="URL of clustering app notebook with the format e.g. https://my.seeq.com/data-lab/CBA9A827-35A8-4944-8A74-EE7008DC3ED8/notebooks/hb/seeq/addons/clustering/App.ipynb")
parser.add_argument('--users', type=str, nargs='*', default=[],
help="List of the Seeq users to will have access to the Correlation Add-on Tool,"
" default: %(default)s")
parser.add_argument('--groups', type=str, nargs='*', default=['Everyone'],
help="List of the Seeq groups to will have access to the Correlation Add-on Tool, "
"default: %(default)s")
parser.add_argument('--password', type=str, default=None,
help="Password of Seeq user installing the tool. Must supply a password if not supplying an accesskey for username")
parser.add_argument('--sort_key', type=str, default=None,
help="A string, typically one character letter. The sort_key determines the order in which the Add-on Tools are displayed in the tool panel, "
"default: %(default)s")
return parser.parse_args()
if __name__ == '__main__':
args = cli_interface()
install_addon(
sort_key=args.sort_key,
permissions_group=args.groups,
permissions_users=args.users,
username=args.username,
password=args.password
) | 49.707317 | 195 | 0.632483 |
8321d10093f3ed3b6d58be76b8214f867e414822 | 939 | py | Python | utils/customchecks.py | arielbeje/good-bot-name | de1429ea5b653fd8ee88d649452ebef7e7399e5b | [
"MIT"
] | 10 | 2018-04-08T00:02:18.000Z | 2022-01-25T18:34:06.000Z | utils/customchecks.py | arielbeje/good-bot-name | de1429ea5b653fd8ee88d649452ebef7e7399e5b | [
"MIT"
] | 14 | 2018-01-26T16:55:09.000Z | 2021-09-19T11:35:58.000Z | utils/customchecks.py | arielbeje/Good_Bot_Name | de1429ea5b653fd8ee88d649452ebef7e7399e5b | [
"MIT"
] | 14 | 2018-02-14T01:35:08.000Z | 2021-03-30T12:18:03.000Z | """
Code stolen from https://github.com/Rapptz/discord.py
"""
import functools
import discord
from discord.ext import commands
from . import sql
def is_mod():
return commands.check(predicate)
| 24.076923 | 140 | 0.652822 |
83226ea13035cf8a8cc076a6baf244dd22963a78 | 3,107 | py | Python | tests/test_lambda_lapsed.py | BostonDSA/actionnetwork-activist-sync | f4b45ec85d59ac252c5572974381e96ec0107add | [
"MIT"
] | 1 | 2021-12-14T17:34:20.000Z | 2021-12-14T17:34:20.000Z | tests/test_lambda_lapsed.py | BostonDSA/actionnetwork-activist-sync | f4b45ec85d59ac252c5572974381e96ec0107add | [
"MIT"
] | null | null | null | tests/test_lambda_lapsed.py | BostonDSA/actionnetwork-activist-sync | f4b45ec85d59ac252c5572974381e96ec0107add | [
"MIT"
] | null | null | null | import json
import importlib
import os
import unittest
from unittest.mock import Mock
from moto import mock_dynamodb2
import boto3
from lambda_local.context import Context
os.environ['ENVIRONMENT'] = 'TEST'
os.environ['LOG_LEVEL'] = 'CRITICAL'
os.environ['DSA_KEY'] = 'TESTKEY'
| 31.07 | 84 | 0.633408 |
832283ba27d3f56129d5cb0cef3c3b8a60934088 | 2,974 | py | Python | tests/test_motif_finder.py | gaybro8777/RStudio-GitHub-Analysis | 014195c90ca49f64d28c9fcd96d128301ff65157 | [
"BSD-2-Clause"
] | 2 | 2020-09-13T11:55:13.000Z | 2021-05-23T01:29:19.000Z | tests/test_motif_finder.py | gaybro8777/RStudio-GitHub-Analysis | 014195c90ca49f64d28c9fcd96d128301ff65157 | [
"BSD-2-Clause"
] | null | null | null | tests/test_motif_finder.py | gaybro8777/RStudio-GitHub-Analysis | 014195c90ca49f64d28c9fcd96d128301ff65157 | [
"BSD-2-Clause"
] | 2 | 2020-10-17T20:18:37.000Z | 2021-05-23T01:29:25.000Z | """
This script tests the classes and functions from motif_finder.py.
Parameters
----------
None
Returns
-------
Assertion errors if tests fail
"""
import sys
import random
import pickle
import networkx as nx
from github_analysis.big_cloud_scratch import git_graph
from github_analysis.data_layer import getCommitsByProjectIds
from github_analysis.cluster import get_embedding_clusters
from github_analysis.motif_finder import *
clusters = get_embedding_clusters(random_state=0)
projects_cluster = getCommitsByProjectIds(clusters[0])
G = git_graph(projects_cluster)
mf = MotifFinder(G)
# Unit tests
def test_sample_initial_node_output_type():
"""Check that MotifFinder.sample_initial_node outputs an integer."""
assert type(mf.sample_initial_node()) == int
def test_sample_initial_node_output():
"""Check that MotifFinder.sample_initial_node outputs a node in the given graph."""
assert mf.sample_initial_node() in G
def test_get_random_child_output_type():
"""Check that MotifFinder.get_random_child outputs an integer."""
assert type(mf.get_random_child(355738534)) == int
def test_get_random_child_no_children():
"""Check that MotifFinder.get_random_child outputs None if there are no children."""
assert mf.get_random_child(139371373) is None
def test_get_random_child_output():
"""Check that MotifFinder.get_random_child outputs a child of the node its been given."""
initial_node = mf.sample_initial_node()
child = mf.get_random_child(initial_node)
assert child in G.successors(initial_node)
def test_get_sample_motif_bad_input():
"""Check that MotifFinder.get_sample_motif raises an error when not given an integer for the k param."""
try:
mf.get_sample_motif('5')
except TypeError:
return True
raise TypeError
def test_get_sample_motif_output_type():
"""Check that MotifFinder.get_sample_motif outputs a networkx directed graph."""
assert type(mf.get_sample_motif(5)) == nx.classes.digraph.DiGraph
def test_get_sample_motif_output():
"""Check that MotifFinder.get_sample_motif outputs a networkx directed graph that is a subgraph of G."""
subgraph = mf.get_sample_motif(5)
for node in subgraph:
if node in G:
continue
else:
raise ValueError('Subgraph doesnt contain same nodes as graph')
def test_get_motif_samples_bad_input():
"""Check that MotifFinder.get_motif_samples raises an error when not given an integer for the k and num_samples
param."""
try:
mf.get_motif_samples('5', '5')
except TypeError:
return True
raise TypeError
def test_get_motif_samples_output_type():
"""Check that MotifFinder.get_sample_motif outputs a dictionary."""
assert type(mf.get_motif_samples(5,5)) == dict
# def test_get_motifs | 28.596154 | 115 | 0.751849 |
83245e358084afd5d7f959c3a7aebfc9ab55bb73 | 1,107 | py | Python | torrent.py | fishy/scripts | 91abd0451cae916d885f4ff0fd2f69d335d37cf3 | [
"BSD-3-Clause"
] | 4 | 2016-05-09T13:42:23.000Z | 2021-11-29T15:16:11.000Z | torrent.py | fishy/scripts | 91abd0451cae916d885f4ff0fd2f69d335d37cf3 | [
"BSD-3-Clause"
] | null | null | null | torrent.py | fishy/scripts | 91abd0451cae916d885f4ff0fd2f69d335d37cf3 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import sys
import os
from types import StringType
# get bencode package from http://github.com/fishy/scripts/downloads
from bencode.bencode import bencode, bdecode, BTFailure
try :
torrent = sys.argv[1]
except IndexError :
print "Usage: \"%s <torrent_file> [tracker_url]\" to show torrent info (without tracker_url), or to add tracker(s)" % sys.argv[0]
sys.exit()
size = os.stat(torrent).st_size
file = open(torrent, "rb")
data = file.read(size)
file.close()
info = bdecode(data)
if len(sys.argv) == 2 :
print info
sys.exit()
if 'announce-list' not in info :
list = [info['announce']]
for i in range(len(sys.argv)-2) :
tracker = sys.argv[i+2]
if tracker not in list :
list.append(tracker)
print list
info['announce-list'] = [list]
else :
list = info['announce-list'][0]
if type(list) == StringType :
list = [list]
for i in range(len(sys.argv)-2) :
tracker = sys.argv[i+2]
if tracker not in list :
list.append(tracker)
print list
info['announce-list'][0] = list
writedata = bencode(info)
file = open(torrent, "wb")
file.write(writedata)
file.close()
| 23.0625 | 130 | 0.68925 |
8324b2ef51cf900faa05fab3ea2e0b781034e744 | 4,786 | py | Python | test/test_mdsspath.py | jpevans/mdssdiff | 88573bdc89b00b023ce59c9b0fa19c6e6be760ce | [
"Apache-2.0"
] | 1 | 2019-11-05T00:34:20.000Z | 2019-11-05T00:34:20.000Z | test/test_mdsspath.py | jpevans/mdssdiff | 88573bdc89b00b023ce59c9b0fa19c6e6be760ce | [
"Apache-2.0"
] | 13 | 2017-03-08T03:37:43.000Z | 2020-06-19T01:03:04.000Z | test/test_mdsspath.py | jpevans/mdssdiff | 88573bdc89b00b023ce59c9b0fa19c6e6be760ce | [
"Apache-2.0"
] | 2 | 2020-09-14T12:04:43.000Z | 2020-11-29T22:16:13.000Z | #!/usr/bin/env python
"""
Copyright 2015 ARC Centre of Excellence for Climate Systems Science
author: Aidan Heerdegen <aidan.heerdegen@anu.edu.au>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import pytest
import sys
import os
import shutil
import shlex
import subprocess
import pdb #; pdb.set_trace()
from mdssdiff import mdsspath
from mdssdiff import mdssdiff
dirs = ["1","2","3"]
dirtree = os.path.join(*dirs)
paths = [ ["1","lala"], ["1","po"], ["1","2","Mickey"], ["1","2","Minny"], ["1","2","Pluto"], ["1","2","3","Ren"], ["1","2","3","Stimpy"] ]
remote = "remote"
dirtreeroot = dirs[0]
verbose=False
prefix='test_mdss'
dumbname = 'nowayisthereadirectorycalledthis'
# Test if we have a working mdss to connect to
try:
if 'DEBUGLOCAL' in os.environ:
raise ValueError('A very specific bad thing happened')
project=os.environ['PROJECT']
mdsspath.mdss_ls(".",project)
except:
# Monkey-patch to use local file commands if we don't
print("\n\n!!! No mdss: Monkey patching to use local commands !!!\n")
mdsspath._mdss_ls_cmd = 'ls -l --time-style=+"%Y-%m-%d %H:%M ___ "'
mdsspath._mdss_put_cmd = 'cp'
mdsspath._mdss_get_cmd = 'cp'
mdsspath._mdss_mkdir_cmd = 'mkdir'
mdsspath._mdss_rm_cmd = 'rm'
mdsspath._mdss_rmdir_cmd = 'rmdir'
project=''
def test_localmtime():
"""
Test localmtime returns datetime object without seconds resolution
"""
dt = mdsspath.localmtime(os.path.join(*paths[2]))
assert(dt.second == 0)
| 31.906667 | 139 | 0.688884 |
8325f8d80722ee18d5ca87486dae7d369fe6e6ee | 1,192 | py | Python | applications/trilinos_application/python_scripts/PressureMultiLevelSolver.py | AndreaVoltan/MyKratos7.0 | e977752722e8ef1b606f25618c4bf8fd04c434cc | [
"BSD-4-Clause"
] | 2 | 2020-04-30T19:13:08.000Z | 2021-04-14T19:40:47.000Z | applications/TrilinosApplication/python_scripts/PressureMultiLevelSolver.py | Jacklwln/Kratos | 12ffe332622d7e8ea3e4a10bc061beb9d8e6e8de | [
"BSD-4-Clause"
] | 1 | 2020-04-30T19:19:09.000Z | 2020-05-02T14:22:36.000Z | applications/TrilinosApplication/python_scripts/PressureMultiLevelSolver.py | Jacklwln/Kratos | 12ffe332622d7e8ea3e4a10bc061beb9d8e6e8de | [
"BSD-4-Clause"
] | 1 | 2020-06-12T08:51:24.000Z | 2020-06-12T08:51:24.000Z | from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7
from KratosMultiphysics import *
from KratosMultiphysics.TrilinosApplication import *
| 35.058824 | 134 | 0.723993 |
83263868a21483660a3b2d0dc61af080e81df193 | 3,960 | py | Python | Hood/views.py | Gakur/NeiApp | 2a9955a23877de10ed3436fd25d56208bca22887 | [
"MIT"
] | null | null | null | Hood/views.py | Gakur/NeiApp | 2a9955a23877de10ed3436fd25d56208bca22887 | [
"MIT"
] | null | null | null | Hood/views.py | Gakur/NeiApp | 2a9955a23877de10ed3436fd25d56208bca22887 | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse ,HttpResponseRedirect, Http404
from django.urls import reverse
from django.contrib.auth.forms import UserCreationForm
from .models import *
from .forms import UserRegisterForm
from django.contrib import messages
from django.contrib.auth import authenticate, login , logout
from django.contrib.auth import login as auth_login
from django.contrib.auth.decorators import login_required
from . decorators import unauthenticated_user
from .forms import *
# Create your views here.
# ============ Home Page
# ============ View for list of neighbour hoods to display
# =========== For Each neighbour hood
## ===Add new Business
def search(request):
try:
if 'business' in request.GET and request.GET['business']:
search_term = request.GET.get('business')
searched_business = Business.objects.get(name__icontains=search_term)
return render(request,'search.html',{'searched_business':searched_business})
except (ValueError,Business.DoesNotExist):
message = "Oops! We couldn't find the business you're looking for."
return render(request,'search.html',{'message':message})
return render(request,'search.html',{{"message":message}},{"searched_business":searched_business})
| 38.076923 | 146 | 0.693687 |
832672b5a45d6ed1bcae4c5d5f38bb3800726d8c | 3,325 | py | Python | ckanext-hdx_package/ckanext/hdx_package/tests/test_metadata_fields.py | OCHA-DAP/hdx-ckan | 202e0c44adc4ea8d0b90141e69365b65cce68672 | [
"Apache-2.0"
] | 58 | 2015-01-11T09:05:15.000Z | 2022-03-17T23:44:07.000Z | ckanext-hdx_package/ckanext/hdx_package/tests/test_metadata_fields.py | OCHA-DAP/hdx-ckan | 202e0c44adc4ea8d0b90141e69365b65cce68672 | [
"Apache-2.0"
] | 1,467 | 2015-01-01T16:47:44.000Z | 2022-02-28T16:51:20.000Z | ckanext-hdx_package/ckanext/hdx_package/tests/test_metadata_fields.py | OCHA-DAP/hdx-ckan | 202e0c44adc4ea8d0b90141e69365b65cce68672 | [
"Apache-2.0"
] | 17 | 2015-05-06T14:04:21.000Z | 2021-11-11T19:58:16.000Z | '''
Created on May 16, 2014
@author: alexandru-m-g
'''
import json
import webtest
import logging
import ckan.plugins as p
import ckan.lib.create_test_data as ctd
import ckan.lib.search as search
import ckan.model as model
import ckan.logic as logic
import ckan.lib.helpers as h
import ckan.tests.legacy as legacy_tests
from ckan.config.middleware import make_app
from pylons import config
import ckanext.hdx_theme.tests.hdx_test_base as hdx_test_base
import ckanext.hdx_package.helpers.caching as caching
log = logging.getLogger(__name__)
| 34.635417 | 123 | 0.62797 |
832770b6da611f24d004cf5564b612a2e18401f6 | 524 | py | Python | inject.py | edouardpoitras/process_injection_example | 0b22488a83a5516788411e4974090d1df2bd6494 | [
"MIT"
] | 4 | 2021-05-01T06:56:14.000Z | 2022-01-24T10:00:31.000Z | inject.py | edouardpoitras/process_injection_example | 0b22488a83a5516788411e4974090d1df2bd6494 | [
"MIT"
] | null | null | null | inject.py | edouardpoitras/process_injection_example | 0b22488a83a5516788411e4974090d1df2bd6494 | [
"MIT"
] | 1 | 2021-04-30T16:52:11.000Z | 2021-04-30T16:52:11.000Z | import sys
import psutil
from pyinjector import inject
if len(sys.argv) != 3:
print("Usage: python inject.py <process-name> <shared-library>")
exit()
_, process_name, shared_library = sys.argv
for process in psutil.process_iter():
if process.name() == process_name:
print(f"Found {process_name} - injecting {shared_library} into PID {process.pid}")
inject(process.pid, shared_library)
print("Injected successfully")
exit()
print(f"Unable to find process named {process_name}") | 29.111111 | 90 | 0.696565 |
8327de2cbfa7508d6d7ec9cb75195ac2a23e5a16 | 3,235 | py | Python | infer.py | yanivbl6/deep-griffinlim-iteration | b96165c0c11e00bff1e033f93aeca6fafe9833d3 | [
"MIT"
] | null | null | null | infer.py | yanivbl6/deep-griffinlim-iteration | b96165c0c11e00bff1e033f93aeca6fafe9833d3 | [
"MIT"
] | null | null | null | infer.py | yanivbl6/deep-griffinlim-iteration | b96165c0c11e00bff1e033f93aeca6fafe9833d3 | [
"MIT"
] | 1 | 2020-10-12T15:31:27.000Z | 2020-10-12T15:31:27.000Z |
# noinspection PyUnresolvedReferences
##import matlab.engine
import os
import shutil
from argparse import ArgumentError, ArgumentParser
from torch.utils.data import DataLoader
from dataset import ComplexSpecDataset
from hparams1 import hp
from train import Trainer
from pathlib import Path
from os import listdir
parser = ArgumentParser()
parser.add_argument('-l','--list',action='store_true')
parser.add_argument('-n','--network',type=str)
parser.add_argument('-m','--mel2spec',type=str)
parser.add_argument('-d','--device',type=int, default=0)
parser.add_argument('--dest',type=str, default="../result/inference")
parser.add_argument('--network_results',type=str, default="../result/ngc_degli")
parser.add_argument('--mel2spec_results',type=str, default="../result/mel2spec")
parser.add_argument('-p','--perf', action='store_true')
parser.add_argument('-b','--batch_size', type=int, default=16)
args = parser.parse_args()
##import pdb; pdb.set_trace()
if args.list:
print('-'*30)
print("Available Networks:")
for f in listdir(args.network_results):
full_path = "%s/%s" % (args.network_results,f)
if not os.path.isdir(full_path):
continue
checkpoints = []
full_path_train = "%s/train" % full_path
if not os.path.exists(full_path_train):
continue
for e in listdir(full_path_train):
if e.__str__()[-2:] == "pt":
checkpoints.append(int(e.split('.')[0]))
if len(checkpoints) > 0:
checkpoints.sort()
print("%s : %s" % (f,checkpoints.__str__()))
print('-'*30)
print("Available Mel2Spec infered data:")
for f in listdir(args.mel2spec_results):
full_path = "%s/%s" % (args.mel2spec_results,f)
if not os.path.isdir(full_path):
continue
checkpoints = []
for e in listdir(full_path):
if e.split('_')[0] == "infer":
checkpoints.append(int(e.split('_')[1]))
if len(checkpoints) > 0:
checkpoints.sort()
print("%s : %s" % (f,checkpoints.__str__()))
print('-'*30)
if not args.network is None:
net_split = args.network.split(":")
networkDir = net_split[0]
networkEpoch = net_split[1]
if args.perf:
sub = "perf"
else:
sub = "quality"
if not args.mel2spec is None:
mel_split = args.mel2spec.split(":")
mel2specDir = mel_split[0]
mel2specEpoch = mel_split[1]
mel_dest = f"{args.mel2spec_results}/{mel2specDir}/infer_{mel2specEpoch}"
full_dest= f"{args.dest}/{sub}/{networkDir}_E{networkEpoch}_mel2spec_{mel2specDir}_E{mel2specEpoch}"
else:
mel_dest = f"~/deep-griffinlim-iteration/mel2spec/baseline_data"
full_dest= f"{args.dest}/{sub}/{networkDir}_E{networkEpoch}_baseline"
os.makedirs(args.dest, exist_ok=True)
command = "test"
if args.perf:
full_dest = full_dest + "_B%d" % args.batch_size
command = "perf"
cmd=f"python main.py --{command} --device {args.device} --from {networkEpoch} --logdir {args.network_results}/{networkDir} --path_feature {mel_dest} --dest_test {full_dest} --batch_size {args.batch_size}"
print(cmd) | 32.35 | 208 | 0.641731 |
8329042f7336cfa333d46696e6595794b06050cc | 11,603 | py | Python | Disc_train.py | avinsit123/kpgen_GAN | e5ca04b9c6e43f8049dcf8e5b8fa44ab4e4702c3 | [
"MIT"
] | 1 | 2020-05-28T23:18:51.000Z | 2020-05-28T23:18:51.000Z | Disc_train.py | avinsit123/kpgen_GAN | e5ca04b9c6e43f8049dcf8e5b8fa44ab4e4702c3 | [
"MIT"
] | null | null | null | Disc_train.py | avinsit123/kpgen_GAN | e5ca04b9c6e43f8049dcf8e5b8fa44ab4e4702c3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 1 15:10:45 2019
@author: r17935avinash
"""
################################ IMPORT LIBRARIES ###############################################################
import torch
import numpy as np
import pykp.io
import torch.nn as nn
from utils.statistics import RewardStatistics
from utils.time_log import time_since
import time
from sequence_generator import SequenceGenerator
from utils.report import export_train_and_valid_loss, export_train_and_valid_reward
import sys
import logging
import os
from evaluate import evaluate_reward
from pykp.reward import *
import math
EPS = 1e-8
import argparse
import config
import logging
import os
import json
from pykp.io import KeyphraseDataset
from pykp.model import Seq2SeqModel
from torch.optim import Adam
import pykp
from pykp.model import Seq2SeqModel
import train_ml
import train_rl
from utils.time_log import time_since
from utils.data_loader import load_data_and_vocab
from utils.string_helper import convert_list_to_kphs
import time
import numpy as np
import random
from torch import device
from hierarchal_attention_Discriminator import Discriminator
from torch.nn import functional as F
#####################################################################################################
#def Check_Valid_Loss(valid_data_loader,D_model,batch,generator,opt,perturb_std):
##### TUNE HYPERPARAMETERS ##############
## batch_reward_stat, log_selected_token_dist = train_one_batch(batch, generator, optimizer_rl, opt, perturb_std)
#########################################################
######################################
| 46.78629 | 594 | 0.613807 |
832ab6e2559ee453e6521a8fd912db337cc8fa7d | 4,568 | py | Python | VQ3D/camera_pose_estimation/get_median_intrinsics.py | emulhall/episodic-memory | 27bafec6e09c108f0efe5ac899eabde9d1ac40cc | [
"MIT"
] | 27 | 2021-10-16T02:39:17.000Z | 2022-03-31T11:16:11.000Z | VQ3D/camera_pose_estimation/get_median_intrinsics.py | emulhall/episodic-memory | 27bafec6e09c108f0efe5ac899eabde9d1ac40cc | [
"MIT"
] | 5 | 2022-03-23T04:53:36.000Z | 2022-03-29T23:39:07.000Z | VQ3D/camera_pose_estimation/get_median_intrinsics.py | emulhall/episodic-memory | 27bafec6e09c108f0efe5ac899eabde9d1ac40cc | [
"MIT"
] | 13 | 2021-11-25T19:17:29.000Z | 2022-03-25T14:01:47.000Z | import os
import sys
import json
import argparse
import numpy as np
sys.path.append('Camera_Intrinsics_API/')
from get_camera_intrinsics import CameraIntrinsicsHelper
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_dir",
type=str,
default='data/videos_sfm/',
help="COLMAP output folder of videos",
)
parser.add_argument(
"--input_dir_greedy",
type=str,
default='data/videos_sfm_greedy/',
help="Folder for the COLMAP outputs - greedy.",
)
parser.add_argument(
"--annotation_dir",
type=str,
default='data/v1/annotations/',
help="annotation folder. Must contain the vq3d_<split>.json files.",
)
parser.add_argument(
"--output_filename",
type=str,
default='data/v1/scan_to_intrinsics.json',
)
args = parser.parse_args()
dataset = {}
for split in ['train', 'val']:
a = json.load(open(os.path.join(args.annotation_dir,
f'vq3d_{split}.json'), 'r'))
for video in a['videos']:
video_uid=video['video_uid']
scan_uid=video['scan_uid']
dataset[video_uid]=scan_uid
helper = CameraIntrinsicsHelper()
datadir=args.input_dir
datadir_2=args.input_dir_greedy
cpt=0
all_intrinsics = {}
for video_uid in os.listdir(datadir):
scan_uid=dataset[video_uid]
intrinsic_txt = os.path.join(datadir,
video_uid,
'sparse',
'0',
'cameras.txt')
if not os.path.isfile(intrinsic_txt):
intrinsic_txt = os.path.join(datadir_2,
video_uid,
'sparse',
'0',
'cameras.txt')
if not os.path.isfile(intrinsic_txt):
cpt+=1
else:
intrinsics = helper.parse_colmap_intrinsics(intrinsic_txt)
if scan_uid not in all_intrinsics:
all_intrinsics[scan_uid]={}
token = (intrinsics['width'], intrinsics['height'])
if token not in all_intrinsics[scan_uid]:
all_intrinsics[scan_uid][token] = []
all_intrinsics[scan_uid][token].append(
(
intrinsics['f'],
intrinsics['cx'],
intrinsics['cy'],
intrinsics['k1'],
intrinsics['k2'],
)
)
else:
intrinsics = helper.parse_colmap_intrinsics(intrinsic_txt)
if scan_uid not in all_intrinsics:
all_intrinsics[scan_uid]={}
token = (intrinsics['width'], intrinsics['height'])
if token not in all_intrinsics[scan_uid]:
all_intrinsics[scan_uid][token] = []
all_intrinsics[scan_uid][token].append(
(
intrinsics['f'],
intrinsics['cx'],
intrinsics['cy'],
intrinsics['k1'],
intrinsics['k2'],
)
)
outputs = {}
for scan_uid, d in all_intrinsics.items():
print(' ')
print('Scan uid: ', scan_uid)
outputs[scan_uid]={}
for resolution, v in d.items():
print(' -- resolution: ', resolution)
resolution_str = str(resolution)
outputs[scan_uid][resolution_str]={
'f': np.median([float(i[0]) for i in v]),
'cx': np.median([float(i[1]) for i in v]),
'cy': np.median([float(i[2]) for i in v]),
'k1': np.median([float(i[3]) for i in v]),
'k2': np.median([float(i[4]) for i in v]),
}
for i in v:
print(' -- -- -- : ', i)
print(' ')
print(' -- -- -- : ',
outputs[scan_uid][resolution_str]['f'],
outputs[scan_uid][resolution_str]['cx'],
outputs[scan_uid][resolution_str]['cy'],
outputs[scan_uid][resolution_str]['k1'],
outputs[scan_uid][resolution_str]['k2'],
)
json.dump(outputs, open(output_filename, 'w'))
| 34.089552 | 76 | 0.479203 |
832b2f005e0af85ddb6e44118b2f277f3ecf6b06 | 571 | py | Python | Dataset/Leetcode/valid/78/455.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/valid/78/455.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/valid/78/455.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null |
undefined
for (i = 0; i < document.getElementsByTagName("code").length; i++) { console.log(document.getElementsByTagName("code")[i].innerText); }
| 27.190476 | 139 | 0.576182 |
832b736a0869d3dc222dea9d11955ffc80809ec5 | 1,322 | py | Python | IDS/IDS/urls.py | YashwantChauhan/SDL | 0d48dfa129d72316f35967df98ce2f1e6f949fc5 | [
"MIT"
] | 2 | 2020-12-24T15:13:49.000Z | 2021-06-05T15:43:58.000Z | IDS/IDS/urls.py | YashwantChauhan/SDL | 0d48dfa129d72316f35967df98ce2f1e6f949fc5 | [
"MIT"
] | 2 | 2021-12-28T14:06:20.000Z | 2021-12-28T14:25:44.000Z | IDS/IDS/urls.py | YashwantChauhan/SDL | 0d48dfa129d72316f35967df98ce2f1e6f949fc5 | [
"MIT"
] | null | null | null | """IDS URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from Apps.home import views as home_views
from Apps.Signup import views as Signup_views
from Apps.Dashboard import urls as Dash_urls
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('' , home_views.home , name='home' ),
path('Signin/' , Signup_views.signin , name='Signin' ),
path('Signup/' , Signup_views.signup , name='Signup'),
path('Signout/', Signup_views.logout , name='logout'),
path('Dashboard/', include(Dash_urls.urlpatterns) ),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 38.882353 | 77 | 0.723147 |
832ba0a49717dd57c782af2a65a1680399effe7f | 1,574 | py | Python | setup.py | preetmishra/nooz | e7ee6958bac7edcc85ab157b6dbe07071fde887c | [
"MIT"
] | 7 | 2020-03-18T06:30:55.000Z | 2021-04-06T16:38:25.000Z | setup.py | preetmishra/nooz | e7ee6958bac7edcc85ab157b6dbe07071fde887c | [
"MIT"
] | 1 | 2020-06-29T16:12:45.000Z | 2020-06-29T16:12:45.000Z | setup.py | preetmishra/nooz | e7ee6958bac7edcc85ab157b6dbe07071fde887c | [
"MIT"
] | 2 | 2021-03-21T02:52:39.000Z | 2021-05-26T08:34:58.000Z | import codecs
import os
from setuptools import find_packages, setup
linting_deps = [
'mypy==0.761',
'pycodestyle==2.5.0',
]
setup(
name='nooz',
version='0.1.0',
description='Trending headlines right in your terminal.',
long_description=long_description(),
long_description_content_type='text/markdown',
url='https://github.com/preetmishra/nooz',
author='Preet Mishra',
author_email='ipreetmishra@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: End Users/Desktop',
'Topic :: Internet',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Operating System :: OS Independent',
],
python_requires='>=3.5, <=3.8',
keywords='news',
packages=find_packages(),
zip_safe=True,
entry_points={
'console_scripts': [
'nooz = nooz.run:main',
],
},
extras_require={
'linting': linting_deps,
},
install_requires=[
'mypy_extensions>=0.4',
'requests>=2.23.0',
'urwid==2.1.0',
'urllib3>=1.25.8'
],
) | 24.984127 | 77 | 0.584498 |
832d8379190a88d84a40dc951ecd801770c36c11 | 11,454 | py | Python | deeplodocus/callbacks/saver.py | amelgrenier/deeplodocus | 0a017faae098cddc436e82e83b85e66caf18b522 | [
"MIT"
] | null | null | null | deeplodocus/callbacks/saver.py | amelgrenier/deeplodocus | 0a017faae098cddc436e82e83b85e66caf18b522 | [
"MIT"
] | null | null | null | deeplodocus/callbacks/saver.py | amelgrenier/deeplodocus | 0a017faae098cddc436e82e83b85e66caf18b522 | [
"MIT"
] | null | null | null | from decimal import Decimal
import torch
from torch.nn import Module
import os
from deeplodocus.utils.notification import Notification
from deeplodocus.utils.flags.save import *
from deeplodocus.utils.flags.event import *
from deeplodocus.utils.flags.notif import *
from deeplodocus.utils.flags.ext import DEEP_EXT_PYTORCH, DEEP_EXT_ONNX
from deeplodocus.utils.flags.msg import DEEP_MSG_MODEL_SAVED, DEEP_MSG_SAVER_IMPROVED, DEEP_MSG_SAVER_NOT_IMPROVED
from deeplodocus.core.metrics.over_watch_metric import OverWatchMetric
from deeplodocus.brain.signal import Signal
from deeplodocus.brain.thalamus import Thalamus
from deeplodocus.utils.generic_utils import get_corresponding_flag
from deeplodocus.utils.flags.flag_lists import DEEP_LIST_SAVE_SIGNAL, DEEP_LIST_SAVE_FORMATS
| 34.293413 | 115 | 0.527589 |
832debbd59e85b8ca2ff3010595d819d90400d10 | 2,812 | py | Python | mridc/collections/reconstruction/models/cascadenet/ccnn_block.py | jerke123/mridc | 7e22ac50f8df73f2305d61979da2a5d59874546e | [
"Apache-2.0"
] | null | null | null | mridc/collections/reconstruction/models/cascadenet/ccnn_block.py | jerke123/mridc | 7e22ac50f8df73f2305d61979da2a5d59874546e | [
"Apache-2.0"
] | null | null | null | mridc/collections/reconstruction/models/cascadenet/ccnn_block.py | jerke123/mridc | 7e22ac50f8df73f2305d61979da2a5d59874546e | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
__author__ = "Dimitrios Karkalousos"
import torch
from mridc.collections.common.parts.fft import fft2c, ifft2c
from mridc.collections.common.parts.utils import complex_conj, complex_mul
| 30.565217 | 100 | 0.604908 |
832fa03411fdc8cba2cd96e51a219e3ef9e4283a | 975 | py | Python | main.py | BL-Lac149597870/drugVQA | 604703d66457c958ddc9eeb35268391edb6c4996 | [
"MIT"
] | null | null | null | main.py | BL-Lac149597870/drugVQA | 604703d66457c958ddc9eeb35268391edb6c4996 | [
"MIT"
] | null | null | null | main.py | BL-Lac149597870/drugVQA | 604703d66457c958ddc9eeb35268391edb6c4996 | [
"MIT"
] | null | null | null | '''
Author: QHGG
Date: 2021-02-27 13:42:43
LastEditTime: 2021-03-01 23:26:38
LastEditors: QHGG
Description:
FilePath: /drugVQA/main.py
'''
import torch
from sklearn import metrics
import warnings
warnings.filterwarnings("ignore")
torch.cuda.set_device(0)
print('cuda size == 1')
from trainAndTest import *
import time
def main():
"""
Parsing command line parameters, reading data, fitting and scoring a SEAL-CI model.
"""
losses,accs,testResults = train(trainArgs)
with open("logs/"+ timeLable() +"losses.txt", "w") as f:
f.writelines([str(log) + '\n' for log in losses])
with open("logs/"+ timeLable() +"accs.txt", "w") as f:
f.writelines([str(log) + '\n' for log in accs])
with open("logs/"+ timeLable() +"testResults.txt", "w") as f:
f.writelines([str(log) + '\n' for log in testResults])
if __name__ == "__main__":
main()
| 28.676471 | 87 | 0.645128 |
83300c0b6a409b6ab5643fe5a44ff448c026f263 | 4,773 | py | Python | networkx/algorithms/tests/test_cuts.py | jebogaert/networkx | 8563c3313223a53c548530f39c8cfb6e433539d3 | [
"BSD-3-Clause"
] | 2 | 2020-11-25T12:01:15.000Z | 2021-02-02T03:46:23.000Z | networkx/algorithms/tests/test_cuts.py | jebogaert/networkx | 8563c3313223a53c548530f39c8cfb6e433539d3 | [
"BSD-3-Clause"
] | 1 | 2020-11-15T23:07:09.000Z | 2020-11-15T23:07:09.000Z | networkx/algorithms/tests/test_cuts.py | jebogaert/networkx | 8563c3313223a53c548530f39c8cfb6e433539d3 | [
"BSD-3-Clause"
] | 2 | 2020-12-21T11:41:13.000Z | 2021-01-08T17:09:21.000Z | """Unit tests for the :mod:`networkx.algorithms.cuts` module."""
import networkx as nx
| 29.83125 | 75 | 0.550178 |
8330e631a49e6776f2efa9742d5ed0e6a7e38620 | 6,556 | py | Python | src/utility.py | bbookman/demand | 47101843ab84f4161e618edfa5a8e8fea2e1d955 | [
"MIT"
] | null | null | null | src/utility.py | bbookman/demand | 47101843ab84f4161e618edfa5a8e8fea2e1d955 | [
"MIT"
] | null | null | null | src/utility.py | bbookman/demand | 47101843ab84f4161e618edfa5a8e8fea2e1d955 | [
"MIT"
] | null | null | null | import sys, re, pdb
from bs4 import BeautifulSoup as beautiful
from datetime import datetime
import requests, logging
import timeout_decorator, pandas as pd
import socket, urllib3
def build_site_url(template, title, zipcode='', radius='90', age='60'):
""" Makes an url with each query item inserted into the url template
site_id: type = str, value of site id like 'indeed' or 'careerbuilder'
template: type = str, the url template. example: 'http://indeed.com?{}&afg=&rfr=&title={}'
title: type = str, job title using escape characters that are site dependent. example: 'software+quality+engineer'
zipcode: type = str, ZIP CODE
radius: type = str, represents the radius of the job search. example: '50' (miles)
age: type = str, the number of days the job description has been posted. example: '30' (days)
returns an url string
"""
url = template.format(title = title, zipcode = zipcode, radius = radius, age = age)
print_and_log(f'Built site url: {url}')
return url
def build_job_title(title, title_separator):
""" Takes list of title words and adds site specific separator between words
title: string
separator: type = string
returns string
"""
result =''
words = title.split()
for word in words:
result+= word + title_separator
return result[:-1]
def add_site_id(site_id, ref):
print_and_log('Adding site id to href for complete url')
return f'http://{site_id}.com{ref}'
def title_meets_threshold(title, title_word_values, threshold=90):
print('Evaluating job title against threshold')
total = 0
if not title:
return False
t = re.sub(r"(?<=[A-z])\&(?=[A-z])", " ", title.lower())
t = re.sub(r"(?<=[A-z])\-(?=[A-z])", " ", t)
for word, value in title_word_values.items():
if word.lower() in t:
total+=value
if total >= threshold:
print_and_log(f'Met threshold: {title}')
return True
print_and_log(f'Not met threshold: {title}')
return False
def like(string):
"""
Return a compiled regular expression that matches the given
string with any prefix and postfix, e.g. if string = "hello",
the returned regex matches r".*hello.*"
"""
string_ = string
if not isinstance(string_, str):
string_ = str(string_)
regex = MATCH_ALL + re.escape(string_) + MATCH_ALL
return re.compile(regex, flags=re.DOTALL)
def set_log(filename, level): #todo level options
logging.basicConfig(filename=filename, level=level)
def report(e: Exception):
logging.exception(str(e))
def print_and_log(text, level = 'info'):
print(text)
if level == 'debug':
logging.debug(text)
elif level == 'info':
logging.info(text)
elif level == 'warning':
logging.warning(text)
| 33.111111 | 119 | 0.657413 |
8331c341859f7ceb90f3dad9bbc18d41377413e5 | 1,940 | py | Python | section_11_(api)/dicts_and_lists.py | hlcooll/python_lessons | 3790f98cbc5a0721fcfc9e5f52ba79a64878f362 | [
"MIT"
] | 425 | 2015-01-13T03:19:10.000Z | 2022-03-13T00:34:44.000Z | section_11_(api)/dicts_and_lists.py | Supercodero/python-lessons | 38409c318e7a62d30b2ffd68f8a7a5a5ec00778d | [
"MIT"
] | null | null | null | section_11_(api)/dicts_and_lists.py | Supercodero/python-lessons | 38409c318e7a62d30b2ffd68f8a7a5a5ec00778d | [
"MIT"
] | 178 | 2015-01-08T05:01:05.000Z | 2021-12-02T00:56:58.000Z | # Dictionaries and lists, together
# Loading from https://raw.githubusercontent.com/shannonturner/education-compliance-reports/master/investigations.json
investigations = {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [
-112.073032,
33.453527
]
},
"properties": {
"marker-symbol": "marker",
"marker-color": "#D4500F",
"address": " AZ ",
"name": "Arizona State University"
}
},
{
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [
-121.645734,
39.648248
]
},
"properties": {
"marker-symbol": "marker",
"marker-color": "#D4500F",
"address": " CA ",
"name": "Butte-Glen Community College District"
}
},
]
}
# The first level is a dictionary with two keys: type and features
# type's value is a string: FeatureCollection
# features' value is a list of dictionaries
# We're going to focus on the features list.
# Each item in the features list is a dictionary that has three keys: type, geometry, and properties
# If we wanted to access all of the properies for the first map point, here's how:
print investigations['features'][0]['properties']
# list of dictionaries ^ ^ ^
# first map point | | properties
# {
# "marker-symbol": "marker",
# "marker-color": "#D4500F",
# "address": " AZ ",
# "name": "Arizona State University"
# }
# As we see above, properties is itself a dictionary
# To get the name of that map point:
print investigations['features'][0]['properties']['name']
# Arizona State University
# Generally speaking, if what's between the square brackets is a number, you're accessing a list.
# If it's a string, you're accessing a dictionary.
# If you get stuck or are getting errors, try printing out the item and the key or index. | 26.216216 | 118 | 0.625258 |
833213154f6c6064adf75a6066412d88861a6169 | 19,345 | py | Python | stickers/__init__.py | secretisdead/stickers | 5159c637de2c204fdbdc6aafbebca949c492c203 | [
"MIT"
] | null | null | null | stickers/__init__.py | secretisdead/stickers | 5159c637de2c204fdbdc6aafbebca949c492c203 | [
"MIT"
] | null | null | null | stickers/__init__.py | secretisdead/stickers | 5159c637de2c204fdbdc6aafbebca949c492c203 | [
"MIT"
] | 1 | 2021-09-05T06:18:01.000Z | 2021-09-05T06:18:01.000Z | import uuid
import time
import re
from ipaddress import ip_address
from enum import Enum
from datetime import datetime, timezone
from sqlalchemy import Table, Column, PrimaryKeyConstraint, LargeBinary as sqla_binary, Float
from sqlalchemy import Integer, String, MetaData, distinct
from sqlalchemy.dialects.mysql import VARBINARY as mysql_binary
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import func, and_, or_
from statement_helper import sort_statement, paginate_statement, id_filter
from statement_helper import time_cutoff_filter, string_like_filter
from statement_helper import string_equal_filter
from statement_helper import bitwise_filter
from idcollection import IDCollection
from parse_id import parse_id, get_id_bytes, generate_or_parse_id
| 27.208158 | 93 | 0.730887 |
833402a878296c2dae40def1c9fff8397df42c38 | 3,035 | py | Python | include/MPE3.py | jhgalino/MPv2 | 2f5e29d67bccc4538c5aaad2e69e817041414199 | [
"MIT"
] | null | null | null | include/MPE3.py | jhgalino/MPv2 | 2f5e29d67bccc4538c5aaad2e69e817041414199 | [
"MIT"
] | null | null | null | include/MPE3.py | jhgalino/MPv2 | 2f5e29d67bccc4538c5aaad2e69e817041414199 | [
"MIT"
] | null | null | null |
OTHER_RECURSIVE_FUNCTIONS = [
"getFirstLevel",
"computeTrig",
"computeExpWithCoeff",
"computeExpWithoutCoeff",
]
print(differentiate("3(x)^3"))
| 30.35 | 79 | 0.524547 |
8334d38451b05f8a06133e98e01f204b3df51a55 | 3,072 | py | Python | obsolete_object_wise_scoring_ben.py | agk2000/catalyst_project | 6bae324f24d6d6382e84dcf1f2fedf0d896371e1 | [
"MIT"
] | null | null | null | obsolete_object_wise_scoring_ben.py | agk2000/catalyst_project | 6bae324f24d6d6382e84dcf1f2fedf0d896371e1 | [
"MIT"
] | null | null | null | obsolete_object_wise_scoring_ben.py | agk2000/catalyst_project | 6bae324f24d6d6382e84dcf1f2fedf0d896371e1 | [
"MIT"
] | 1 | 2021-09-11T14:55:26.000Z | 2021-09-11T14:55:26.000Z | import sys
from mrs_utils import misc_utils, vis_utils
from mrs_utils import eval_utils
import os
from skimage import io
import numpy as np
import matplotlib.pyplot as plt
# Creat object scorer class
osc = eval_utils.ObjectScorer(min_th=0.5, link_r=20, eps=2)
# Define the source
data_dir = '/scratch/sr365/Catalyst_data/2021_03_21_15_C_90/H3_raw'
conf_dir = '/scratch/sr365/Catalyst_data/2021_03_21_15_C_90/save_root/H3_img_H2_model'
save_name = 'H3_img_H2_model'
def get_conf_true_from_img(lbl_file, conf_file):
"""
The function to get the p r curve (object-wise) from a labelled photo and the
"""
lbl_img, conf_img = misc_utils.load_file(lbl_file)[:,:,0]/255, misc_utils.load_file(conf_file)
# Group objects
lbl_groups = osc.get_object_groups(lbl_img)
conf_groups = osc.get_object_groups(conf_img)
lbl_group_img = eval_utils.display_group(lbl_groups, lbl_img.shape[:2], need_return=True)
conf_group_img = eval_utils.display_group(conf_groups, conf_img.shape[:2], need_return=True)
# Score the conf map
conf_list, true_list = eval_utils.score(conf_img, lbl_img, min_th=0.5, link_r=10, iou_th=0.5)
return conf_list, true_list
def plot_PR_curve(conf_list, true_list, save_name='PR_curve'):
"""
The function to plot PR curve from a list of confidence and true list
"""
ap, p, r, _ = eval_utils.get_precision_recall(conf_list, true_list)
plt.plot(r[1:], p[1:])
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.xlabel('recall')
plt.ylabel('precision')
plt.title('AP={:.2f}'.format(ap))
plt.tight_layout()
plt.savefig('../PR_curves/' + save_name + '.png')
if __name__ == '__main__':
large_conf_list, large_true_list = [], []
for file in os.listdir(conf_dir):
print("processing file: ", file)
if not file.endswith('_conf.png'):
continue
# get the file names
conf_file = os.path.join(conf_dir, file)
lbl_file = os.path.join(data_dir, file.replace('_conf',''))
# get the conf_list and true list
conf_list, true_list = get_conf_true_from_img(lbl_file, conf_file)
if len(conf_list) == 0 or len(true_list) == 0:
print("Either you don't have a true file or a ground truth", file)
continue
print("conf_list shape:", np.shape(conf_list))
print("true_list shape:", np.shape(true_list))
print("large conf list shape:", np.shape(large_conf_list))
print("large true list shape:", np.shape(large_true_list))
if len(large_conf_list) == 0:
large_conf_list = conf_list
large_true_list = true_list
else:
large_conf_list = np.concatenate((large_conf_list, conf_list), axis=0)
large_true_list = np.concatenate((large_true_list, true_list), axis=0)
np.save('../PR_curves/conf_list.npy', large_conf_list)
np.save('../PR_curves/true_list.npy', large_true_list)
plot_PR_curve(np.reshape(large_conf_list, [-1,]), np.reshape(large_true_list, [-1,]), save_name = save_name)
| 37.925926 | 112 | 0.682617 |
8335f3aa44031d6db4debfb0403cae80df9a5fe1 | 28,012 | py | Python | compare.py | dreamersnme/future | 87462ea1ef2dfd056e26ede85448af160df7d2ac | [
"MIT"
] | 86 | 2019-03-24T16:53:12.000Z | 2022-02-25T11:48:57.000Z | compare.py | dreamersnme/future | 87462ea1ef2dfd056e26ede85448af160df7d2ac | [
"MIT"
] | 1 | 2020-11-15T16:36:54.000Z | 2020-11-15T16:36:54.000Z | compare.py | dreamersnme/future | 87462ea1ef2dfd056e26ede85448af160df7d2ac | [
"MIT"
] | 33 | 2019-03-22T00:26:20.000Z | 2022-03-25T02:56:17.000Z | # --------------------------- IMPORT LIBRARIES -------------------------
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime
import data_preprocessing as dp
from sklearn.preprocessing import MinMaxScaler
import keras
from keras.models import Sequential
from keras.layers.recurrent import LSTM
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.models import load_model
from keras.layers import Dense, Dropout
# ------------------------- GLOBAL PARAMETERS -------------------------
# Start and end period of historical data in question
START_TRAIN = datetime(2008, 12, 31)
END_TRAIN = datetime(2017, 2, 12)
START_TEST = datetime(2017, 2, 12)
END_TEST = datetime(2019, 2, 22)
STARTING_ACC_BALANCE = 100000
NUMBER_NON_CORR_STOCKS = 5
# Number of times of no-improvement before training is stop.
PATIENCE = 30
# Pools of stocks to trade
DJI = ['MMM', 'AXP', 'AAPL', 'BA', 'CAT', 'CVX', 'CSCO', 'KO', 'DIS', 'XOM', 'GE', 'GS', 'HD', 'IBM', 'INTC', 'JNJ',
'JPM', 'MCD', 'MRK', 'MSFT', 'NKE', 'PFE', 'PG', 'UTX', 'UNH', 'VZ', 'WMT']
DJI_N = ['3M', 'American Express', 'Apple', 'Boeing', 'Caterpillar', 'Chevron', 'Cisco Systems', 'Coca-Cola', 'Disney'
, 'ExxonMobil', 'General Electric', 'Goldman Sachs', 'Home Depot', 'IBM', 'Intel', 'Johnson & Johnson',
'JPMorgan Chase', 'McDonalds', 'Merck', 'Microsoft', 'NIKE', 'Pfizer', 'Procter & Gamble',
'United Technologies', 'UnitedHealth Group', 'Verizon Communications', 'Wal Mart']
# Market and macroeconomic data to be used as context data
CONTEXT_DATA = ['^GSPC', '^DJI', '^IXIC', '^RUT', 'SPY', 'QQQ', '^VIX', 'GLD', '^TYX', '^TNX', 'SHY', 'SHV']
# --------------------------------- CLASSES ------------------------------------
# ------------------------------ Main Program ---------------------------------
def main():
print("\n")
print("######################### This program compare performance of trading strategies ############################")
print("\n")
print( "1. Simple Buy and hold strategy of a portfolio with {} non-correlated stocks".format(NUMBER_NON_CORR_STOCKS))
print( "2. Sharpe ratio optimized portfolio of {} non-correlated stocks".format(NUMBER_NON_CORR_STOCKS))
print( "3. Minimum variance optimized portfolio of {} non-correlated stocks".format(NUMBER_NON_CORR_STOCKS))
print( "4. Simple Buy and hold strategy ")
print( "1. Simple Buy and hold strategy ")
print("\n")
print("Starting to pre-process data for trading environment construction ... ")
# Data Preprocessing
dataset = dp.DataRetrieval()
dow_stocks_train, dow_stocks_test = dataset.get_all()
train_portion = len(dow_stocks_train)
dow_stock_volume = dataset.components_df_v[DJI]
portfolios = dp.Trading(dow_stocks_train, dow_stocks_test, dow_stock_volume.loc[START_TEST:END_TEST])
_, _, non_corr_stocks = portfolios.find_non_correlate_stocks(NUMBER_NON_CORR_STOCKS)
non_corr_stocks_data = dataset.get_adj_close(non_corr_stocks)
print("\n")
print("Base on non-correlation preference, {} stocks are selected for portfolio construction:".format(NUMBER_NON_CORR_STOCKS))
for stock in non_corr_stocks:
print(DJI_N[DJI.index(stock)])
print("\n")
sharpe_portfolio, min_variance_portfolio = portfolios.find_efficient_frontier(non_corr_stocks_data, non_corr_stocks)
print("Risk-averse portfolio with low variance:")
print(min_variance_portfolio.T)
print("High return portfolio with high Sharpe ratio")
print(sharpe_portfolio.T)
dow_stocks = pd.concat([dow_stocks_train, dow_stocks_test], axis=0)
test_values_buyhold, test_returns_buyhold, test_kpi_buyhold = \
portfolios.diversified_trade(non_corr_stocks, dow_stocks.loc[START_TEST:END_TEST][non_corr_stocks])
print("\n")
print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
print(" KPI of a simple buy and hold strategy for a portfolio of {} non-correlated stocks".format(NUMBER_NON_CORR_STOCKS))
print("------------------------------------------------------------------------------------")
print(test_kpi_buyhold)
print("vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv")
test_values_sharpe_optimized_buyhold, test_returns_sharpe_optimized_buyhold, test_kpi_sharpe_optimized_buyhold =\
portfolios.optimized_diversified_trade(non_corr_stocks, sharpe_portfolio, dow_stocks.loc[START_TEST:END_TEST][non_corr_stocks])
print("\n")
print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
print(" KPI of a simple buy and hold strategy for a Sharpe ratio optimized portfolio of {} non-correlated stocks".format(NUMBER_NON_CORR_STOCKS))
print("------------------------------------------------------------------------------------")
print(test_kpi_sharpe_optimized_buyhold)
print("vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv")
test_values_minvar_optimized_buyhold, test_returns_minvar_optimized_buyhold, test_kpi_minvar_optimized_buyhold = \
portfolios.optimized_diversified_trade(non_corr_stocks, min_variance_portfolio, dow_stocks.loc[START_TEST:END_TEST][non_corr_stocks])
print("\n")
print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
print(" KPI of a simple buy and hold strategy for a Minimum variance optimized portfolio of {} non-correlated stocks".format(NUMBER_NON_CORR_STOCKS))
print("------------------------------------------------------------------------------------")
print(test_kpi_minvar_optimized_buyhold)
print("vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv")
plot = dp.UserDisplay()
test_returns = dp.MathCalc.assemble_returns(test_returns_buyhold['Returns'],
test_returns_sharpe_optimized_buyhold['Returns'],
test_returns_minvar_optimized_buyhold['Returns'])
test_cum_returns = dp.MathCalc.assemble_cum_returns(test_returns_buyhold['CumReturns'],
test_returns_sharpe_optimized_buyhold['CumReturns'],
test_returns_minvar_optimized_buyhold['CumReturns'])
print("\n")
print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
print("Buy and hold strategies computation completed. Now creating prediction model using RNN LSTM architecture")
print("--------------------------------------------------------------------------------------------------------")
print("vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv")
# Use feature data preprocessed by StartTrader, so that they both use the same training data, to have a fair comparison
input_states = pd.read_csv("./data/ddpg_input_states.csv", index_col='Date', parse_dates=True)
scale_split = Data_ScaleSplit(input_states, dow_stocks[non_corr_stocks], train_portion)
train_X, train_y, test_X, test_y = scale_split.get_train_test_set()
modelling = Model
model_lstm = modelling.build_rnn_model(train_X)
history_lstm, model_lstm = modelling.train_model(model_lstm, train_X, train_y, "LSTM")
print("RNN model loaded, now training the model again, training will stop after {} episodes no improvement")
modelling.plot_training(history_lstm, "LSTM")
print("Training completed, loading prediction using the trained RNN model >")
recovered_data_lstm = scale_split.get_prediction(model_lstm)
plot.plot_prediction(dow_stocks[non_corr_stocks].loc[recovered_data_lstm.index], recovered_data_lstm[recovered_data_lstm.columns[-5:]] , len(train_X), "LSTM")
# Get the original stock price with the prediction length
original_portfolio_stock_price = dow_stocks[non_corr_stocks].loc[recovered_data_lstm.index]
# Get the predicted stock price with the prediction length
predicted_portfolio_stock_price = recovered_data_lstm[recovered_data_lstm.columns[-5:]]
print("Bactesting the RNN-LSTM model now")
# Run backtest, the backtester is similar to those use by StarTrader too
backtest = Trading(predicted_portfolio_stock_price, original_portfolio_stock_price, dow_stock_volume[non_corr_stocks].loc[recovered_data_lstm.index], dow_stocks_test[non_corr_stocks], non_corr_stocks)
trading_book, kpi = backtest.execute_trading(non_corr_stocks)
# Load backtest result for StarTrader using DDPG as learning algorithm
ddpg_backtest = pd.read_csv('./test_result/trading_book_test_1.csv', index_col='Unnamed: 0', parse_dates=True)
print("Backtesting completed, plotting comparison of trading models")
# Compare performance on all 4 trading type
djia_daily = dataset._get_daily_data(CONTEXT_DATA[1]).loc[START_TEST:END_TEST]['Close']
#print(djia_daily)
all_benchmark_returns = test_returns
all_benchmark_returns['DJIA'] = dp.MathCalc.calc_return(djia_daily)
all_benchmark_returns['RNN LSTM'] = trading_book['Returns']
all_benchmark_returns['DDPG'] = ddpg_backtest['Returns']
all_benchmark_returns.to_csv('./test_result/all_strategies_returns.csv')
plot.plot_portfolio_risk(all_benchmark_returns)
all_benchmark_cum_returns = test_cum_returns
all_benchmark_cum_returns['DJIA'] = all_benchmark_returns['DJIA'].add(1).cumprod().fillna(1)
all_benchmark_cum_returns['RNN LSTM'] = trading_book['CumReturns']
all_benchmark_cum_returns['DDPG'] = ddpg_backtest['CumReturns']
all_benchmark_cum_returns.to_csv('./test_result/all_strategies_cum_returns.csv')
plot.plot_portfolio_return(all_benchmark_cum_returns)
if __name__ == '__main__':
main() | 50.021429 | 205 | 0.63023 |
83383133f1e2636bee0ef87328b2ad1c26e323fd | 1,288 | py | Python | Desafio horario atual/__init__.py | pinheirogus/Curso-Python-Udemy | d6d52320426172e924081b9df619490baa8c6016 | [
"MIT"
] | 1 | 2021-09-01T01:58:13.000Z | 2021-09-01T01:58:13.000Z | Desafio horario atual/__init__.py | pinheirogus/Curso-Python-Udemy | d6d52320426172e924081b9df619490baa8c6016 | [
"MIT"
] | null | null | null | Desafio horario atual/__init__.py | pinheirogus/Curso-Python-Udemy | d6d52320426172e924081b9df619490baa8c6016 | [
"MIT"
] | null | null | null | # num1 = input("Digite um nmero inteiro: ")
#
#
# try:
#
# if num1.isnumeric() :
# num1 = int(num1)
# if (num1 % 2) == 0 :
# print("Voc digitou um nmero par.")
# elif (num1 % 2) != 0:
# print("Voc digitou um nmero mpar.")
# else:
# print("Voc no digitou um nmero vlido.")
# else:
# print("Voc no digitou um nmero inteiro.")
# except:
# print("Voc no digitou um nmero.")
###################################################################################################################################
#hora_atual = input("Qual o horrio atual? ")
###################################################################################################################################
nome = input("Por favor, digite seu primeiro nome: ")
try:
if nome.isnumeric():
print("Voc no digitou um nome vlido.")
else:
if len(nome) <= 4:
print("Seu nome curto.")
elif (len(nome) == 5) or (len(nome) == 6):
print("Seu nome normal.")
elif len(nome) > 6:
print("Seu nome muito grande.")
else:
print("Voc no digitou um nome vlido.1")
except:
print("Voc no digitou um nome vlido.")
| 30.666667 | 131 | 0.420807 |
8338456e9d4d6099460e1bd2a49c5b5cf56d90a9 | 223 | py | Python | 05/b_average.py | koshin117/python-learning | 68dd99e2f72fff7507a874c11511415fef3c9354 | [
"MIT"
] | 1 | 2021-03-29T08:30:19.000Z | 2021-03-29T08:30:19.000Z | 05/b_average.py | koshin117/python-learning | 68dd99e2f72fff7507a874c11511415fef3c9354 | [
"MIT"
] | null | null | null | 05/b_average.py | koshin117/python-learning | 68dd99e2f72fff7507a874c11511415fef3c9354 | [
"MIT"
] | null | null | null | #B
if __name__ == '__main__':
main()
| 17.153846 | 40 | 0.565022 |
8338723c7e22b26ca6c647d1d2092f73e2a758fb | 3,224 | py | Python | tests/test_js.py | tinachou28/dataIO-project | cc8592edf5a2f03ba3cebcbc83b13764729ad839 | [
"MIT"
] | 7 | 2016-04-23T03:33:42.000Z | 2019-01-02T01:02:44.000Z | tests/test_js.py | tinachou28/dataIO-project | cc8592edf5a2f03ba3cebcbc83b13764729ad839 | [
"MIT"
] | 2 | 2018-05-22T07:08:13.000Z | 2019-05-14T19:39:16.000Z | tests/test_js.py | tinachou28/dataIO-project | cc8592edf5a2f03ba3cebcbc83b13764729ad839 | [
"MIT"
] | 4 | 2017-08-19T16:05:34.000Z | 2020-12-08T10:43:11.000Z | # !/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
import warnings
import pytest
from pytest import approx
import os
from os.path import join
from datetime import datetime
from dataIO import py23
from dataIO import js
from dataIO import textfile
path_json = os.path.abspath("test.json")
path_gz = os.path.abspath("test.json.gz")
data_simple = {
"int": 100,
"float": 3.1415926535,
"str": "string ",
"boolean": True,
}
data_complex = {
"int": 100,
"float": 3.1415926535,
"str": "string ",
"bytes": "bytes ".encode("utf-8"),
"boolean": True,
"datetime": datetime.now(),
}
def test_prevent_overwrite(tmpdir):
"""Test whether file overwrite alert is working.
"""
textfile.write("hello", path_json)
js.dump([1, 2, 3], path_json)
os.remove(path_json)
def test_float_precision():
"""Test whether ``float_precision`` keywork is working.
"""
js.safe_dump({"value": 1.23456789}, path_json, indent_format=False,
float_precision=2, enable_verbose=False)
try:
assert js.load(path_json, enable_verbose=False)[
"value"] == approx(1.23)
except:
warnings.warn("float_precision argument is not working.")
os.remove(path_json)
def test_compress():
"""Test whether data compression is working.
"""
js.safe_dump({"value": 1}, path_gz, enable_verbose=False)
assert js.load(path_gz, enable_verbose=False) == {"value": 1}
os.remove(path_gz)
try:
from bson import json_util
except:
pass
if __name__ == "__main__":
import os
pytest.main([os.path.basename(__file__), "--tb=native", "-s", ])
| 26.644628 | 86 | 0.638337 |
8338c6c065505edebe32c2e1b457eb24e32e6163 | 34,731 | py | Python | remerkleable/complex.py | hwwhww/remerkleable | b52dce6b0beae7fffbb826fb9945dca9c40504fd | [
"MIT"
] | 1 | 2020-07-22T14:51:20.000Z | 2020-07-22T14:51:20.000Z | remerkleable/complex.py | hwwhww/remerkleable | b52dce6b0beae7fffbb826fb9945dca9c40504fd | [
"MIT"
] | null | null | null | remerkleable/complex.py | hwwhww/remerkleable | b52dce6b0beae7fffbb826fb9945dca9c40504fd | [
"MIT"
] | null | null | null | from typing import NamedTuple, cast, List as PyList, Dict, Any, BinaryIO, Optional, TypeVar, Type, Protocol, \
runtime_checkable
from types import GeneratorType
from textwrap import indent
from collections.abc import Sequence as ColSequence
from itertools import chain
import io
from remerkleable.core import View, BasicView, OFFSET_BYTE_LENGTH, ViewHook, ObjType, ObjParseException
from remerkleable.basic import uint256, uint8, uint32
from remerkleable.tree import Node, subtree_fill_to_length, subtree_fill_to_contents,\
zero_node, Gindex, PairNode, to_gindex, NavigationError, get_depth
from remerkleable.subtree import SubtreeView
from remerkleable.readonly_iters import PackedIter, ComplexElemIter, ComplexFreshElemIter, ContainerElemIter
V = TypeVar('V', bound=View)
M = TypeVar('M', bound="MonoSubtreeView")
def navigate_view(self, key: Any) -> View:
return self.__getitem__(key)
def __len__(self):
return self.length()
def __add__(self, other):
if issubclass(self.element_cls(), uint8):
return bytes(self) + bytes(other)
else:
return list(chain(self, other))
def __getitem__(self, k):
if isinstance(k, slice):
start = 0 if k.start is None else k.start
end = self.length() if k.stop is None else k.stop
return [self.get(i) for i in range(start, end)]
else:
return self.get(k)
def __setitem__(self, k, v):
if type(k) == slice:
i = 0 if k.start is None else k.start
end = self.length() if k.stop is None else k.stop
for item in v:
self.set(i, item)
i += 1
if i != end:
raise Exception("failed to do full slice-set, not enough values")
else:
self.set(k, v)
def _repr_sequence(self):
length: int
try:
length = self.length()
except NavigationError:
return f"{self.type_repr()}( *summary root, no length known* )"
vals: Dict[int, View] = {}
partial = False
for i in range(length):
try:
vals[i] = self.get(i)
except NavigationError:
partial = True
continue
basic_elems = isinstance(self.element_cls(), BasicView)
shortened = length > (64 if basic_elems else 8)
summary_length = (10 if basic_elems else 3)
seperator = ', ' if basic_elems else ',\n'
contents = seperator.join(f"... {length - (summary_length * 2)} omitted ..."
if (shortened and i == summary_length)
else (f"{i}: {repr(v)}" if partial else repr(v))
for i, v in vals.items()
if (not shortened) or i <= summary_length or i >= length - summary_length)
if '\n' in contents:
contents = '\n' + indent(contents, ' ') + '\n'
if partial:
return f"{self.type_repr()}~partial~<<len={length}>>({contents})"
else:
return f"{self.type_repr()}<<len={length}>>({contents})"
class List(MonoSubtreeView):
def to_obj(self) -> ObjType:
return list(el.to_obj() for el in self.readonly_iter())
class Vector(MonoSubtreeView):
def to_obj(self) -> ObjType:
return tuple(el.to_obj() for el in self.readonly_iter())
Fields = Dict[str, Type[View]]
CV = TypeVar('CV', bound="Container")
def serialize(self, stream: BinaryIO) -> int:
fields = self.__class__.fields()
is_fixed_size = self.is_fixed_byte_length()
temp_dyn_stream: BinaryIO
written = sum(map((lambda x: x.type_byte_length() if x.is_fixed_byte_length() else OFFSET_BYTE_LENGTH),
fields.values()))
if not is_fixed_size:
temp_dyn_stream = io.BytesIO()
for fkey, ftyp in fields.items():
v: View = getattr(self, fkey)
if ftyp.is_fixed_byte_length():
v.serialize(stream)
else:
encode_offset(stream, written)
written += v.serialize(temp_dyn_stream) # type: ignore
if not is_fixed_size:
temp_dyn_stream.seek(0)
stream.write(temp_dyn_stream.read(written))
return written
def navigate_view(self, key: Any) -> View:
return self.__getattr__(key)
| 37.792165 | 119 | 0.582477 |
83399c09776772609094ffc2ac08102d789dfc9b | 21,383 | py | Python | cave/com.raytheon.viz.gfe/python/autotest/RoutineLevel4_1_TestScript.py | srcarter3/awips2 | 37f31f5e88516b9fd576eaa49d43bfb762e1d174 | [
"Apache-2.0"
] | null | null | null | cave/com.raytheon.viz.gfe/python/autotest/RoutineLevel4_1_TestScript.py | srcarter3/awips2 | 37f31f5e88516b9fd576eaa49d43bfb762e1d174 | [
"Apache-2.0"
] | null | null | null | cave/com.raytheon.viz.gfe/python/autotest/RoutineLevel4_1_TestScript.py | srcarter3/awips2 | 37f31f5e88516b9fd576eaa49d43bfb762e1d174 | [
"Apache-2.0"
] | 1 | 2021-10-30T00:03:05.000Z | 2021-10-30T00:03:05.000Z | # #
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
# #
# ----------------------------------------------------------------------------
# This software is in the public domain, furnished "as is", without technical
# support, and with no warranty, express or implied, as to its usefulness for
# any purpose.
#
# RoutineLevel4_1_TestScript Local Effects
#
# Author:
# ----------------------------------------------------------------------------
# First run setupTextEA
windLE1 = """Definition["windLE_list"] = 1"""
windLE2 = """Definition["windLE_list"] = 2"""
tempLE1 = """Definition["tempLE_list"] = 1"""
tempLE2 = """Definition["tempLE_list"] = 2"""
periodLE1 = """Definition["Period_1_version"] = 1"""
periodLE2 = """Definition["Period_1_version"] = 2"""
periodLE3 = """Definition["Period_1_version"] = 3"""
tempLE_method1 = """Definition["tempLE_method"] = 1"""
tempLE_method2 = """Definition["tempLE_method"] = 2"""
snowLE1 = """## (self.weather_phrase,self._wxLocalEffects_list()),
## (self.snow_phrase,self._snowAmtLocalEffects_list()),
## (self.total_snow_phrase,self._totalSnowAmtLocalEffects_list()),
"""
snowLE2 = """ (self.weather_phrase,self._wxLocalEffects_list()),
(self.snow_phrase,self._snowAmtLocalEffects_list()),
(self.total_snow_phrase,self._totalSnowAmtLocalEffects_list()),
"""
snow2LE1 = """## ("Period_2_3", 12), """
snow2LE2 = """ ("Period_2_3", 12), """
# Runs LE_Test_Local for each test
scripts = [
{
"name": "LE1",
"commentary": "Local Effects: MaxT (21,40), Wind (N30,N10), Gust 0",
"createGrids": [
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 21, ["AboveElev"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 40, ["BelowElev"]),
("Fcst", "Wind", "VECTOR", 0, 12, (30, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 0, 12, (10, "N"), ["BelowElev"]),
("Fcst", "WindGust", "SCALAR", 0, 12, 0, "all"),
],
"checkStrings": [
"Highs around 40, except in the lower 20s in the mountains",
"North winds around 10 mph, except north around 35 mph in the mountains",
],
},
{
"name": "LE2",
"commentary": "Local Effects: Wind (N20,N10) -> (N30,N20), Gust 0",
"createGrids": [
("Fcst", "Wind", "VECTOR", 0, 6, (20, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 0, 6, (10, "N"), ["BelowElev"]),
("Fcst", "Wind", "VECTOR", 6, 12, (30, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 6, 12, (20, "N"), ["BelowElev"]),
("Fcst", "WindGust", "SCALAR", 0, 12, 0, "all"),
],
"checkStrings": [
"North winds around 10 mph increasing to around 25 mph in the afternoon",
"In the mountains, north winds around 25 mph increasing to around 35 mph in the afternoon",
],
},
{
"name": "LE3",
"commentary": "Local Effects: Wind (N20,0), Gust 0",
"createGrids": [
("Fcst", "Wind", "VECTOR", 0, 12, (20, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 0, 12, (0, "N"), ["BelowElev"]),
("Fcst", "WindGust", "SCALAR", 0, 12, 0, "all"),
],
"checkStrings": [
"Light winds, except north around 25 mph in the mountains",
],
},
{
"name": "LE4",
"commentary": "Local Effects: Wind (N20,0) -> (N30,0), Gust 0",
"createGrids": [
("Fcst", "Wind", "VECTOR", 0, 6, (20, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 0, 6, (0, "N"), ["BelowElev"]),
("Fcst", "Wind", "VECTOR", 6, 12, (30, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 6, 12, (0, "N"), ["BelowElev"]),
("Fcst", "WindGust", "SCALAR", 0, 12, 0, "all"),
],
"checkStrings": [
"Light winds",
"In the mountains, north winds around 25 mph increasing to around 35 mph in the afternoon",
],
},
{
"name": "LE5",
"commentary": "Local Effects: Wind (N20,N10), Gust 0, windLE_list=1",
"createGrids": [
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 21, ["AboveElev"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 40, ["BelowElev"]),
("Fcst", "Wind", "VECTOR", 0, 12, (20, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 0, 12, (10, "N"), ["BelowElev"]),
("Fcst", "WindGust", "SCALAR", 0, 12, 0, "all"),
],
"checkStrings": [
"North winds around 25 mph in the mountains, otherwise north around 10 mph",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", (windLE1, windLE2), "undo")
],
},
{
"name": "LE6",
"commentary": "Local Effects: Wind (N20,N10) -> (N30,N20), Gust 0, windLE_list=1",
"createGrids": [
("Fcst", "Wind", "VECTOR", 0, 6, (20, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 0, 6, (10, "N"), ["BelowElev"]),
("Fcst", "Wind", "VECTOR", 6, 12, (30, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 6, 12, (20, "N"), ["BelowElev"]),
("Fcst", "WindGust", "SCALAR", 0, 12, 0, "all"),
],
"checkStrings": [
"In the mountains, north winds around 25 mph increasing to around 35 mph in the afternoon",
"In the valleys, north winds around 10 mph increasing to around 25 mph in the afternoon",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", (windLE1, windLE2), "undo")
],
},
{
"name": "LE7",
"commentary": "Local Effects: Temp (21, 40), Wind (N20,N10), Gust 0, tempLE_list=2",
"createGrids": [
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 21, ["AboveElev"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 40, ["BelowElev"]),
("Fcst", "Wind", "VECTOR", 0, 12, (20, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 0, 12, (10, "N"), ["BelowElev"]),
("Fcst", "WindGust", "SCALAR", 0, 12, 0, "all"),
],
"checkStrings": [
"Highs around 40, except in the lower 20s in the mountains",
"North winds around 10 mph, except north around 25 mph in the mountains",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", (tempLE1, tempLE2), "undo")
],
},
{
"name": "LE8",
"commentary": "Local Effects: MaxT (20,20,20), Period_1_version=1",
"createGrids": [
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 20, ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 20, ["area1"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 20, ["area2"]),
],
"checkStrings": [
"Highs around 20",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", (periodLE1, periodLE2), "undo")
],
},
{
"name": "LE9",
"commentary": "Local Effects: MaxT (20,20,40), Period_1_version=1",
"createGrids": [
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 20, ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 20, ["area1"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 40, ["area2"]),
],
"checkStrings": [
"Highs around 20, except around 40 in the benches",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", (periodLE1, periodLE2), "undo")
],
},
{
"name": "LE10",
"commentary": "Local Effects: MaxT (20,30,40), Period_1_version=1",
"createGrids": [
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 20, ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 30, ["area1"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 40, ["area2"]),
],
"checkStrings": [
"Highs around 20, except around 30 in the rush valley",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", (periodLE1, periodLE2), "undo")
],
},
{
"name": "LE11",
"commentary": "Local Effects: MaxT (20,30,40), Period_1_version=2",
"createGrids": [
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 20, ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 30, ["area1"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 40, ["area2"]),
],
"checkStrings": [
"Highs around 20 in the city, and around 30 in the rush valley, and around 40 in the benches",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", [(periodLE1, periodLE2), (tempLE_method1, tempLE_method2)], "undo"),
],
},
{
"name": "LE12",
"commentary": "Local Effects: MaxT (20,40,20), Period_1_version=2",
"createGrids": [
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 20, ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 40, ["area1"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 20, ["area2"]),
],
"checkStrings": [
"Highs around 20 in the city and in the benches, and around 40 in the rush valley",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", [(periodLE1, periodLE2), (tempLE_method1, tempLE_method2)], "undo")
],
},
{
"name": "LE13",
"commentary": "Local Effects: MaxT (20,40,40), Period_1_version=2",
"createGrids": [
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 20, ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 40, ["area1"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 40, ["area2"]),
],
"checkStrings": [
"Highs around 20 in the city, and around 40 in the rush valley and in the benches",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", [(periodLE1, periodLE2), (tempLE_method1, tempLE_method2)], "undo"),
],
},
{
"name": "LE14",
"commentary": "Local Effects: MaxT (20,20,40), Period_1_version=2",
"createGrids": [
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 20, ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 20, ["area1"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 40, ["area2"]),
],
"checkStrings": [
"Highs around 20 in the city and in the rush valley, and around 40 in the benches",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", [(periodLE1, periodLE2), (tempLE_method1, tempLE_method2)], "undo"),
],
},
{
"name": "LE15",
"commentary": "Local Effects: SnowAmt",
"createGrids": [
("Fcst", "PoP", "SCALAR", 0, 48, 70, "all"),
("Fcst", "Wx", "WEATHER", 0, 48, "Lkly:S:-:<NoVis>:", "all"),
("Fcst", "SnowAmt", "SCALAR", 0, 12, 3, ["area3"]),
("Fcst", "SnowAmt", "SCALAR", 0, 12, 3, ["AboveElev"]),
("Fcst", "SnowAmt", "SCALAR", 0, 12, 3, ["BelowElev"]),
("Fcst", "SnowAmt", "SCALAR", 12, 24, 5, ["area3"]),
("Fcst", "SnowAmt", "SCALAR", 12, 24, 5, ["AboveElev"]),
("Fcst", "SnowAmt", "SCALAR", 12, 24, 5, ["BelowElev"]),
("Fcst", "SnowAmt", "SCALAR", 24, 36, 1, ["area3"]),
("Fcst", "SnowAmt", "SCALAR", 24, 36, 1, ["AboveElev"]),
("Fcst", "SnowAmt", "SCALAR", 24, 36, 1, ["BelowElev"]),
("Fcst", "SnowAmt", "SCALAR", 36, 48, 0, ["area3"]),
("Fcst", "SnowAmt", "SCALAR", 36, 48, 0, ["AboveElev"]),
("Fcst", "SnowAmt", "SCALAR", 36, 48, 0, ["BelowElev"]),
],
"checkStrings": [
".TODAY...", "Snow accumulation around 3 inches",
".TONIGHT...", "Snow accumulation around 5 inches",
"...", "Snow accumulation around 1 inch",
"...", "No snow accumulation",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", [(snowLE1, snowLE2), (snow2LE1, snow2LE2)], "undo"),
],
"stringOrder": "yes",
},
{
"name": "LE16",
"commentary": "Local Effects: SnowAmt",
"createGrids": [
("Fcst", "PoP", "SCALAR", 0, 48, 70, "all"),
("Fcst", "Wx", "WEATHER", 0, 48, "Lkly:S:-:<NoVis>:", "all"),
("Fcst", "SnowAmt", "SCALAR", 0, 12, 5, ["AboveElev"]),
("Fcst", "SnowAmt", "SCALAR", 0, 12, 2, ["BelowElev"]),
("Fcst", "SnowAmt", "SCALAR", 12, 24, 4, ["AboveElev"]),
("Fcst", "SnowAmt", "SCALAR", 12, 24, 1, ["BelowElev"]),
("Fcst", "SnowAmt", "SCALAR", 24, 36, 3, ["AboveElev"]),
("Fcst", "SnowAmt", "SCALAR", 24, 36, 1, ["BelowElev"]),
("Fcst", "SnowAmt", "SCALAR", 36, 48, 0, ["AboveElev"]),
("Fcst", "SnowAmt", "SCALAR", 36, 48, 0, ["BelowElev"]),
],
"checkStrings": [
".TODAY...", "Snow accumulation around 2 inches, except around 5 inches above timberline",
".TONIGHT...", "Snow accumulation around 1 inch, except around 4 inches above timberline",
"...", "Snow accumulation of 1 to 3 inches",
"Total snow accumulation around 4 inches, except around 12 inches above timberline",
"...", "No snow accumulation",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", [(snowLE1, snowLE2), (snow2LE1, snow2LE2)], "undo"),
],
"stringOrder": "yes",
},
{
"name": "LE17", # Wade and Ballard
"commentary": "Local Effects: Wind (N20,N10) -> (N30,N10)",
"createGrids": [
("Fcst", "Wind", "VECTOR", 0, 6, (20, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 0, 6, (10, "N"), ["BelowElev"]),
("Fcst", "Wind", "VECTOR", 6, 12, (30, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 6, 12, (10, "N"), ["BelowElev"]),
("Fcst", "WindGust", "SCALAR", 0, 12, 0, "all"),
],
"checkStrings": [
"North winds around 10 mph. In the mountains, north winds around 25 mph increasing to around 35 mph in the afternoon.",
],
},
{
"name": "LE18", # Wade and Ballard
"commentary": "Local Effects: Wind (N10,N20) -> (N10,N30)",
"createGrids": [
("Fcst", "Wind", "VECTOR", 0, 6, (10, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 0, 6, (20, "N"), ["BelowElev"]),
("Fcst", "Wind", "VECTOR", 6, 12, (10, "N"), ["AboveElev"]),
("Fcst", "Wind", "VECTOR", 6, 12, (30, "N"), ["BelowElev"]),
("Fcst", "WindGust", "SCALAR", 0, 12, 0, "all"),
],
"checkStrings": [
# "North winds around 25 mph increasing to around 35 mph in the afternoon. North winds around 10 mph in the mountains.",
"North winds around 25 mph increasing to around 35 mph in the afternoon. In the mountains, north winds around 10 mph.",
],
},
{
"name": "LE19",
"commentary": "Local Effects for non-intersecting areas -- CASE 3 for sub-phrase consolidation",
"createGrids": [
("Fcst", "Sky", "SCALAR", 0, 48, 30, "all"),
("Fcst", "PoP", "SCALAR", 0, 48, 70, "all"),
("Fcst", "Wx", "WEATHER", 0, 48, "NoWx", ["area3"]),
("Fcst", "Wx", "WEATHER", 0, 48, "Chc:RW:-:<NoVis>:^Patchy:F:<NoInten>:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 0, 48, "Chc:SW:-:<NoVis>:^Patchy:F:<NoInten>:<NoVis>:", ["area2"]),
],
"checkStrings": [
"Mostly sunny.",
"A 50 percent chance of showers in the rush valley, patchy fog in the rush valley, a 50 percent chance of snow showers in the benches, patchy fog in the benches.",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", (periodLE1, periodLE3), "undo"),
],
"stringOrder": "yes",
},
{
"name": "LE20",
"commentary": "Local Effects for non-intersecting areas -- CASE 3 for sub-phrase consolidation",
"createGrids": [
("Fcst", "Sky", "SCALAR", 0, 48, 30, "all"),
("Fcst", "PoP", "SCALAR", 0, 12, 70, "all"),
("Fcst", "Wx", "WEATHER", 0, 12, "NoWx", ["area3"]),
("Fcst", "Wx", "WEATHER", 0, 6, "Chc:T:<NoInten>:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 0, 6, "Chc:T:<NoInten>:<NoVis>:", ["area2"]),
("Fcst", "Wx", "WEATHER", 6, 12, "Chc:RW:-:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 6, 12, "Chc:SW:-:<NoVis>:", ["area2"]),
],
"checkStrings": [
"Mostly sunny.",
"In the rush valley, chance of thunderstorms in the morning, then chance of showers in the afternoon.",
"In the benches, chance of thunderstorms in the morning, then chance of snow showers in the afternoon.",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", (periodLE1, periodLE3), "undo"),
],
"stringOrder": "yes",
},
{
"name": "LE21",
"commentary": "Local Effects for non-intersecting areas -- CASE 3 for sub-phrase consolidation",
"createGrids": [
("Fcst", "Sky", "SCALAR", 0, 48, 30, "all"),
("Fcst", "PoP", "SCALAR", 0, 12, 70, "all"),
("Fcst", "Wx", "WEATHER", 0, 12, "Chc:T:<NoInten>:<NoVis>:", ["area3"]),
("Fcst", "Wx", "WEATHER", 0, 6, "Chc:T:<NoInten>:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 0, 6, "Chc:T:<NoInten>:<NoVis>:", ["area2"]),
("Fcst", "Wx", "WEATHER", 6, 12, "Chc:RW:-:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 6, 12, "Chc:SW:-:<NoVis>:", ["area2"]),
],
"checkStrings": [
"Mostly sunny.",
"In the city, a 50 percent chance of thunderstorms.",
"In the rush valley, chance of thunderstorms in the morning, then chance of showers in the afternoon.",
"In the benches, chance of thunderstorms in the morning, then chance of snow showers in the afternoon.",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", (periodLE1, periodLE3), "undo"),
],
"stringOrder": "yes",
},
{
"name": "LE22",
"commentary": "Local Effects for non-intersecting areas -- CASE 2 for sub-phrase consolidation",
"createGrids": [
("Fcst", "Sky", "SCALAR", 0, 48, 30, "all"),
("Fcst", "PoP", "SCALAR", 0, 48, 70, "all"),
("Fcst", "Wx", "WEATHER", 0, 48, "Patchy:F:<NoInten>:<NoVis>:", ["area3"]),
("Fcst", "Wx", "WEATHER", 0, 48, "Chc:RW:-:<NoVis>:^Patchy:F:<NoInten>:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 0, 48, "Chc:SW:-:<NoVis>:^Patchy:F:<NoInten>:<NoVis>:", ["area2"]),
],
"checkStrings": [
"Mostly sunny.",
"A 50 percent chance of showers in the rush valley, a 50 percent chance of snow showers in the benches, chance of showers in the rush valley, chance of snow showers in the benches.",
"Patchy fog.",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", (periodLE1, periodLE3), "undo"),
],
"stringOrder": "yes",
},
{
"name": "LE23",
"commentary": "Local Effects for non-intersecting areas",
"createGrids": [
("Fcst", "Sky", "SCALAR", 0, 48, 30, "all"),
("Fcst", "PoP", "SCALAR", 0, 48, 70, "all"),
("Fcst", "Wx", "WEATHER", 0, 48, "NoWx", ["area3"]),
("Fcst", "Wx", "WEATHER", 0, 48, "Chc:RW:-:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 0, 48, "Chc:SW:-:<NoVis>:", ["area2"]),
],
"checkStrings": [
"Mostly sunny.",
"A 50 percent chance of showers in the rush valley, a 50 percent chance of snow showers in the benches.",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", (periodLE1, periodLE3), "undo"),
],
"stringOrder": "yes",
},
{
"name": "LE24",
"commentary": "Local Effects for non-intersecting areas -- no consolidation necessary",
"createGrids": [
("Fcst", "Sky", "SCALAR", 0, 48, 30, "all"),
("Fcst", "PoP", "SCALAR", 0, 48, 70, "all"),
("Fcst", "Wx", "WEATHER", 0, 48, "Chc:RW:-:<NoVis>:^Patchy:F:<NoInten>:<NoVis>:", ["area3"]),
("Fcst", "Wx", "WEATHER", 0, 48, "Chc:RW:-:<NoVis>:^Patchy:F:<NoInten>:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 0, 48, "Chc:SW:-:<NoVis>:^Patchy:F:<NoInten>:<NoVis>:", ["area2"]),
],
"checkStrings": [
"Mostly sunny.",
"A 50 percent chance of showers in the city and in the rush valley, a 50 percent chance of snow showers in the benches",
],
"fileChanges": [
("LE_Test_Local", "TextProduct", "replace", (periodLE1, periodLE3), "undo"),
],
"stringOrder": "yes",
},
]
import CreateGrids
import TestScript
| 41.520388 | 189 | 0.533087 |
8339dd90862b3868393e86e2c87682f87414e27c | 12,569 | py | Python | AutoPano/Phase2/Code/Test_files/TrainUnsup.py | akathpal/ComputerVision-CMSC733 | f5fa21a0ada8ab8ea08a6c558f6df9676570a2df | [
"MIT"
] | 1 | 2019-09-26T02:06:17.000Z | 2019-09-26T02:06:17.000Z | AutoPano/Phase2/Code/Test_files/TrainUnsup.py | akathpal/UMD-CMSC733-ComputerVision | f5fa21a0ada8ab8ea08a6c558f6df9676570a2df | [
"MIT"
] | null | null | null | AutoPano/Phase2/Code/Test_files/TrainUnsup.py | akathpal/UMD-CMSC733-ComputerVision | f5fa21a0ada8ab8ea08a6c558f6df9676570a2df | [
"MIT"
] | 1 | 2022-03-30T05:03:09.000Z | 2022-03-30T05:03:09.000Z | #!/usr/bin/env python
"""
CMSC733 Spring 2019: Classical and Deep Learning Approaches for
Geometric Computer Vision
Project 1: MyAutoPano: Phase 2 Starter Code
Author(s):
Nitin J. Sanket (nitinsan@terpmail.umd.edu)
PhD Candidate in Computer Science,
University of Maryland, College Park
Abhishek Kathpal
University of Maryland,College Park
"""
# Dependencies:
# opencv, do (pip install opencv-python)
# skimage, do (apt install python-skimage)
# termcolor, do (pip install termcolor)
import tensorflow as tf
import pickle
import cv2
import sys
import os
import glob
# import Misc.ImageUtils as iu
import random
from skimage import data, exposure, img_as_float
import matplotlib.pyplot as plt
from Network.Network import Supervised_HomographyModel,Unsupervised_HomographyModel
from Misc.MiscUtils import *
from Misc.DataUtils import *
import numpy as np
import time
import argparse
import shutil
from StringIO import StringIO
import string
from termcolor import colored, cprint
import math as m
from tqdm import tqdm
from matplotlib import pyplot as plt
from Misc.TFSpatialTransformer import *
# Don't generate pyc codes
sys.dont_write_bytecode = True
def extract(data):
"""
Extracting training data and labels from pickle files
"""
f = open(data, 'rb')
out = pickle.load(f)
features = np.array(out['features'])
labels = np.array(out['labels'])
f.close()
return features,labels
def GenerateBatch(BasePath, DirNamesTrain, TrainLabels, ImageSize, MiniBatchSize,ModelType):
"""
Inputs:
BasePath - Path to COCO folder without "/" at the end
DirNamesTrain - Variable with Subfolder paths to train files
NOTE that Train can be replaced by Val/Test for generating batch corresponding to validation (held-out testing in this case)/testing
TrainLabels - Labels corresponding to Train
NOTE that TrainLabels can be replaced by Val/TestLabels for generating batch corresponding to validation (held-out testing in this case)/testing
ImageSize - Size of the Image
MiniBatchSize is the size of the MiniBatch
Outputs:
I1Batch - Batch of images
LabelBatch - Batch of one-hot encoded labels
"""
ImageNum = 0
I1Batch = []
LabelBatch = []
if (ModelType.lower() == 'supervised'):
print("Supervised_approach")
features,labels=extract('training.pkl')
ImageNum = 0
while ImageNum < MiniBatchSize:
# Generate random image
NumTrainImages=5000
RandIdx = random.randint(0, NumTrainImages-1)
ImageNum += 1
##########################################################
# Add any standardization or data augmentation here!
##########################################################
I1 = np.float32(features[RandIdx])
I1=(I1-np.mean(I1))/255
t = labels[RandIdx].reshape((1,8))
label = t[0]
# Append All Images and Mask
I1Batch.append(I1)
LabelBatch.append(label)
else:
# print("Unsupervised Approach")
I1FullBatch = []
PatchBatch = []
CornerBatch = []
I2Batch = []
ImageNum = 0
while ImageNum < MiniBatchSize:
# Generate random image
RandIdx = random.randint(0, len(DirNamesTrain)-1)
# print(len(DirNamesTrain))
RandImageName = BasePath + os.sep + DirNamesTrain[RandIdx] + '.jpg'
ImageNum += 1
patchSize = 128
r = 32
img_orig = plt.imread(RandImageName)
img_orig = np.float32(img_orig)
# plt.imshow(img_orig)
# plt.show()
if(len(img_orig.shape)==3):
img = cv2.cvtColor(img_orig,cv2.COLOR_RGB2GRAY)
else:
img = img_orig
img=(img-np.mean(img))/255
img = cv2.resize(img,(320,240))
# img = cv2.resize(img,(ImageSize[0],ImageSize[1]))
# print(img.shape[1]-r-patchSize)
x = np.random.randint(r, img.shape[1]-r-patchSize)
y = np.random.randint(r, img.shape[0]-r-patchSize)
# print(x)
p1 = (x,y)
p2 = (patchSize+x, y)
p3 = (patchSize+x, patchSize+y)
p4 = (x, patchSize+y)
src = [p1, p2, p3, p4]
src = np.array(src)
dst = []
for pt in src:
dst.append((pt[0]+np.random.randint(-r, r), pt[1]+np.random.randint(-r, r)))
H = cv2.getPerspectiveTransform(np.float32(src), np.float32(dst))
H_inv = np.linalg.inv(H)
warpImg = cv2.warpPerspective(img, H_inv, (img.shape[1],img.shape[0]))
patch1 = img[y:y + patchSize, x:x + patchSize]
patch2 = warpImg[y:y + patchSize, x:x + patchSize]
imgData = np.dstack((patch1, patch2))
# Append All Images and Mask
I1FullBatch.append(np.float32(img))
PatchBatch.append(imgData)
CornerBatch.append(np.float32(src))
I2Batch.append(np.float32(patch2.reshape(128,128,1)))
return I1FullBatch, PatchBatch, CornerBatch, I2Batch
def PrettyPrint(NumEpochs, DivTrain, MiniBatchSize, NumTrainSamples, LatestFile):
"""
Prints all stats with all arguments
"""
print('Number of Epochs Training will run for ' + str(NumEpochs))
print('Factor of reduction in training data is ' + str(DivTrain))
print('Mini Batch Size ' + str(MiniBatchSize))
print('Number of Training Images ' + str(NumTrainSamples))
if LatestFile is not None:
print('Loading latest checkpoint with the name ' + LatestFile)
def TrainOperation(ImgPH, CornerPH, I2PH, I1FullPH, DirNamesTrain, TrainLabels, NumTrainSamples, ImageSize,
NumEpochs, MiniBatchSize, SaveCheckPoint, CheckPointPath,
DivTrain, LatestFile, BasePath, LogsPath, ModelType):
"""
Inputs:
ImgPH is the Input Image placeholder
LabelPH is the one-hot encoded label placeholder
DirNamesTrain - Variable with Subfolder paths to train files
TrainLabels - Labels corresponding to Train/Test
NumTrainSamples - length(Train)
ImageSize - Size of the image
NumEpochs - Number of passes through the Train data
MiniBatchSize is the size of the MiniBatch
SaveCheckPoint - Save checkpoint every SaveCheckPoint iteration in every epoch, checkpoint saved automatically after every epoch
CheckPointPath - Path to save checkpoints/model
DivTrain - Divide the data by this number for Epoch calculation, use if you have a lot of dataor for debugging code
LatestFile - Latest checkpointfile to continue training
BasePath - Path to COCO folder without "/" at the end
LogsPath - Path to save Tensorboard Logs
ModelType - Supervised or Unsupervised Model
Outputs:
Saves Trained network in CheckPointPath and Logs to LogsPath
"""
# Predict output with forward pass
if ModelType.lower() == 'supervised':
H4pt = Supervised_HomographyModel(ImgPH, ImageSize, MiniBatchSize)
with tf.name_scope('Loss'):
loss = tf.sqrt(tf.reduce_sum((tf.squared_difference(H4pt,LabelPH))))
with tf.name_scope('Adam'):
Optimizer = tf.train.AdamOptimizer(learning_rate=1e-3).minimize(loss)
else:
# print(ImageSize)
pred_I2,I2 = Unsupervised_HomographyModel(ImgPH, CornerPH, I2PH, ImageSize, MiniBatchSize)
with tf.name_scope('Loss'):
loss = tf.reduce_mean(tf.abs(pred_I2 - I2))
with tf.name_scope('Adam'):
Optimizer = tf.train.AdamOptimizer(learning_rate=1e-5).minimize(loss)
# Tensorboard
# Create a summary to monitor loss tensor
EpochLossPH = tf.placeholder(tf.float32, shape=None)
loss_summary = tf.summary.scalar('LossEveryIter', loss)
epoch_loss_summary = tf.summary.scalar('LossPerEpoch', EpochLossPH)
# tf.summary.image('Anything you want', AnyImg)
# Merge all summaries into a single operation
MergedSummaryOP1 = tf.summary.merge([loss_summary])
MergedSummaryOP2 = tf.summary.merge([epoch_loss_summary])
# MergedSummaryOP = tf.summary.merge_all()
# Setup Saver
Saver = tf.train.Saver()
AccOverEpochs=np.array([0,0])
with tf.Session() as sess:
if LatestFile is not None:
Saver.restore(sess, CheckPointPath + LatestFile + '.ckpt')
# Extract only numbers from the name
StartEpoch = int(''.join(c for c in LatestFile.split('a')[0] if c.isdigit()))
print('Loaded latest checkpoint with the name ' + LatestFile + '....')
else:
sess.run(tf.global_variables_initializer())
StartEpoch = 0
print('New model initialized....')
# Tensorboard
Writer = tf.summary.FileWriter(LogsPath, graph=tf.get_default_graph())
for Epochs in tqdm(range(StartEpoch, NumEpochs)):
NumIterationsPerEpoch = int(NumTrainSamples/MiniBatchSize/DivTrain)
Loss=[]
epoch_loss=0
for PerEpochCounter in tqdm(range(NumIterationsPerEpoch)):
I1FullBatch, PatchBatch, CornerBatch, I2Batch = GenerateBatch(BasePath, DirNamesTrain, TrainLabels, ImageSize, MiniBatchSize,ModelType)
FeedDict = {ImgPH: PatchBatch, CornerPH: CornerBatch, I2PH: I2Batch}
_, LossThisBatch, Summary = sess.run([Optimizer, loss, MergedSummaryOP1], feed_dict=FeedDict)
#print(shapeH4pt,shapeLabel).
Loss.append(LossThisBatch)
epoch_loss = epoch_loss + LossThisBatch
# Save checkpoint every some SaveCheckPoint's iterations
if PerEpochCounter % SaveCheckPoint == 0:
# Save the Model learnt in this epoch
SaveName = CheckPointPath + str(Epochs) + 'a' + str(PerEpochCounter) + 'model.ckpt'
Saver.save(sess, save_path=SaveName)
print('\n' + SaveName + ' Model Saved...')
# Tensorboard
Writer.add_summary(Summary, Epochs*NumIterationsPerEpoch + PerEpochCounter)
epoch_loss = epoch_loss/NumIterationsPerEpoch
print(np.mean(Loss))
# Save model every epoch
SaveName = CheckPointPath + str(Epochs) + 'model.ckpt'
Saver.save(sess, save_path=SaveName)
print('\n' + SaveName + ' Model Saved...')
Summary_epoch = sess.run(MergedSummaryOP2,feed_dict={EpochLossPH: epoch_loss})
Writer.add_summary(Summary_epoch,Epochs)
Writer.flush()
def main():
"""
Inputs:
None
Outputs:
Runs the Training and testing code based on the Flag
"""
# Parse Command Line arguments
Parser = argparse.ArgumentParser()
Parser.add_argument('--BasePath', default='../Data', help='Base path of images, Default:/media/nitin/Research/Homing/SpectralCompression/COCO')
Parser.add_argument('--CheckPointPath', default='../Checkpoints/', help='Path to save Checkpoints, Default: ../Checkpoints/')
Parser.add_argument('--ModelType', default='unsupervised', help='Model type, Supervised or Unsupervised? Choose from Sup and Unsup, Default:Unsup')
Parser.add_argument('--NumEpochs', type=int, default=50, help='Number of Epochs to Train for, Default:50')
Parser.add_argument('--DivTrain', type=int, default=1, help='Factor to reduce Train data by per epoch, Default:1')
Parser.add_argument('--MiniBatchSize', type=int, default=32, help='Size of the MiniBatch to use, Default:1')
Parser.add_argument('--LoadCheckPoint', type=int, default=0, help='Load Model from latest Checkpoint from CheckPointsPath?, Default:0')
Parser.add_argument('--LogsPath', default='Logs/', help='Path to save Logs for Tensorboard, Default=Logs/')
Args = Parser.parse_args()
NumEpochs = Args.NumEpochs
BasePath = Args.BasePath
DivTrain = float(Args.DivTrain)
MiniBatchSize = Args.MiniBatchSize
LoadCheckPoint = Args.LoadCheckPoint
CheckPointPath = Args.CheckPointPath
LogsPath = Args.LogsPath
ModelType = Args.ModelType
# Setup all needed parameters including file reading
DirNamesTrain, SaveCheckPoint, ImageSize, NumTrainSamples, TrainLabels, NumClasses = SetupAll(BasePath, CheckPointPath)
print("here")
# Find Latest Checkpoint File
if LoadCheckPoint==1:
LatestFile = FindLatestModel(CheckPointPath)
else:
LatestFile = None
# Pretty print stats
PrettyPrint(NumEpochs, DivTrain, MiniBatchSize, NumTrainSamples, LatestFile)
# Define PlaceHolder variables for Input and Predicted output
ImgPH = tf.placeholder(tf.float32, shape=(MiniBatchSize, 128, 128, 2))
CornerPH = tf.placeholder(tf.float32, shape=(MiniBatchSize, 4,2))
I2PH = tf.placeholder(tf.float32, shape=(MiniBatchSize, 128, 128,1))
I1FullPH = tf.placeholder(tf.float32, shape=(MiniBatchSize, ImageSize[0], ImageSize[1],ImageSize[2]))
TrainOperation(ImgPH, CornerPH, I2PH, I1FullPH, DirNamesTrain, TrainLabels, NumTrainSamples, ImageSize,
NumEpochs, MiniBatchSize, SaveCheckPoint, CheckPointPath,
DivTrain, LatestFile, BasePath, LogsPath, ModelType)
if __name__ == '__main__':
main()
| 36.32659 | 151 | 0.693691 |
833a1a0c360f3cdcf8d7b6c1f70840aed091b251 | 699 | py | Python | Lista 2/Exercicio 14.py | GiovannaPazello/Projetos-em-Python | 3cf7edbdf2a2350605a775389f7fe2cc7fe8032e | [
"MIT"
] | null | null | null | Lista 2/Exercicio 14.py | GiovannaPazello/Projetos-em-Python | 3cf7edbdf2a2350605a775389f7fe2cc7fe8032e | [
"MIT"
] | null | null | null | Lista 2/Exercicio 14.py | GiovannaPazello/Projetos-em-Python | 3cf7edbdf2a2350605a775389f7fe2cc7fe8032e | [
"MIT"
] | null | null | null | '''Faa um programa que gere nmeros aleatrios entre 0 e 50 at o nmero 32 ser
gerado. Quando isso ocorrer, informar:
a. A soma de todos os nmeros gerados
b. A quantidade de nmeros gerados que impar
c. O menor nmero gerado'''
import random
x = random.randint(0,50)
cont = 32
somaNumeros = 0
qqntImpares = 0
menorNumero = 51
while cont != x:
x = random.randint(0, 50)
somaNumeros = somaNumeros + x
if x%2 != 0:
qqntImpares = qqntImpares + 1
if menorNumero > x:
menorNumero = x
print('A soma de todos os nmeros {}'.format(somaNumeros))
print('A quantidade de nmeros mpares {}'.format(qqntImpares))
print('O menor nmero {}'.format(menorNumero))
| 23.3 | 80 | 0.690987 |
833a4ecb5ab38b8de2e042cd613f15a274dee6fa | 1,556 | py | Python | mavsim_python/chap4/wind_simulation.py | eyler94/mavsim_template_files | 181a76f15dc454f5a6f58f4596d9039cbe388cd9 | [
"MIT"
] | null | null | null | mavsim_python/chap4/wind_simulation.py | eyler94/mavsim_template_files | 181a76f15dc454f5a6f58f4596d9039cbe388cd9 | [
"MIT"
] | null | null | null | mavsim_python/chap4/wind_simulation.py | eyler94/mavsim_template_files | 181a76f15dc454f5a6f58f4596d9039cbe388cd9 | [
"MIT"
] | 1 | 2021-11-15T09:53:42.000Z | 2021-11-15T09:53:42.000Z | """
Class to determine wind velocity at any given moment,
calculates a steady wind speed and uses a stochastic
process to represent wind gusts. (Follows section 4.4 in uav book)
"""
import sys
sys.path.append('..')
import numpy as np
| 38.9 | 105 | 0.628535 |
833a7aa9cb8a7c6a6aacafb0a6fb6428d1abdec9 | 2,779 | py | Python | dx/geometric_brownian_motion.py | yehuihe/dx | 6a8c6a1605fd4314c481561ecceaaddf4528c43d | [
"Apache-2.0"
] | null | null | null | dx/geometric_brownian_motion.py | yehuihe/dx | 6a8c6a1605fd4314c481561ecceaaddf4528c43d | [
"Apache-2.0"
] | null | null | null | dx/geometric_brownian_motion.py | yehuihe/dx | 6a8c6a1605fd4314c481561ecceaaddf4528c43d | [
"Apache-2.0"
] | null | null | null | """Simulation Class -- Geometric Brownian Motion
"""
# Author: Yehui He <yehui.he@hotmail.com>
# License: Apache-2.0 License
import numpy as np
from .sn_random_numbers import sn_random_numbers
from .simulation_class import SimulationClass
| 35.628205 | 82 | 0.594458 |
833ab5ac04df4cc2bfa2f945d2155461c52e1071 | 1,039 | py | Python | yibai-sms-python-sdk-1.0.0/yibai/api/Yibai.py | 100sms/yibai-python-sdk | 9907d0fbf147b5b3ce10e4afed2ac7f19d52af3f | [
"MIT"
] | null | null | null | yibai-sms-python-sdk-1.0.0/yibai/api/Yibai.py | 100sms/yibai-python-sdk | 9907d0fbf147b5b3ce10e4afed2ac7f19d52af3f | [
"MIT"
] | null | null | null | yibai-sms-python-sdk-1.0.0/yibai/api/Yibai.py | 100sms/yibai-python-sdk | 9907d0fbf147b5b3ce10e4afed2ac7f19d52af3f | [
"MIT"
] | 1 | 2019-11-26T11:49:54.000Z | 2019-11-26T11:49:54.000Z | # encoding=utf8
import HttpUtils
| 28.861111 | 72 | 0.624639 |
833b47331d2a097b8a77501f425210bc65eeddac | 1,194 | py | Python | setup.py | nattster/lettuce_webdriver | 26b910ceef67d5b81030640ebbab0504bd59d643 | [
"MIT"
] | 24 | 2015-02-04T14:49:51.000Z | 2021-03-23T17:17:09.000Z | setup.py | nattster/lettuce_webdriver | 26b910ceef67d5b81030640ebbab0504bd59d643 | [
"MIT"
] | 4 | 2015-07-13T22:41:22.000Z | 2016-10-03T20:17:22.000Z | setup.py | nattster/lettuce_webdriver | 26b910ceef67d5b81030640ebbab0504bd59d643 | [
"MIT"
] | 12 | 2015-01-24T02:05:39.000Z | 2016-12-30T07:30:28.000Z | __version__ = '0.3.5'
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
CHANGES = open(os.path.join(here, 'CHANGES.txt')).read()
setup(name='lettuce_webdriver',
version=__version__,
description='Selenium webdriver extension for lettuce',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Intended Audience :: Developers",
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP",
'Topic :: Software Development :: Testing',
],
keywords='web lettuce bdd',
author="Nick Pilon, Ben Bangert",
author_email="npilon@gmail.com, ben@groovie.org",
url="https://github.com/bbangert/lettuce_webdriver/",
license="MIT",
packages=find_packages(),
include_package_data=True,
zip_safe=False,
tests_require = ['lettuce', 'selenium', 'nose'],
install_requires=['lettuce','selenium>=2.30.0'],
test_suite="lettuce_webdriver",
entry_points="""
[console_scripts]
lettuce_webdriver=lettuce_webdriver.parallel_bin:main
"""
)
| 32.27027 | 61 | 0.649079 |
833c0720b2fa02e3aacf53733cbb5dfadce129a9 | 326 | py | Python | project4/network/migrations/0005_remove_post_likers.py | mjs375/cs50_Network | 31a2399f4429931b15721861a2940b57811ae844 | [
"MIT"
] | null | null | null | project4/network/migrations/0005_remove_post_likers.py | mjs375/cs50_Network | 31a2399f4429931b15721861a2940b57811ae844 | [
"MIT"
] | null | null | null | project4/network/migrations/0005_remove_post_likers.py | mjs375/cs50_Network | 31a2399f4429931b15721861a2940b57811ae844 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.3 on 2020-11-15 16:01
from django.db import migrations
| 18.111111 | 47 | 0.588957 |
833ff2663454d251a149619c7bf5edfd07d118d9 | 942 | py | Python | Commands/interested.py | hanss314/TheBrainOfTWOWCentral | a97d40ebb73904f236d7b3db6ec9f8c3fe999f4e | [
"MIT"
] | null | null | null | Commands/interested.py | hanss314/TheBrainOfTWOWCentral | a97d40ebb73904f236d7b3db6ec9f8c3fe999f4e | [
"MIT"
] | null | null | null | Commands/interested.py | hanss314/TheBrainOfTWOWCentral | a97d40ebb73904f236d7b3db6ec9f8c3fe999f4e | [
"MIT"
] | null | null | null | from Config._const import PREFIX
HELP = {
"COOLDOWN": 3,
"MAIN": "Toggles whether or not you have the `Interested in the Bot` role",
"FORMAT": "",
"CHANNEL": 0,
"USAGE": f"""Using `{PREFIX}interested` will add the `Interested in the Bot` to you, or remove it if you already
have it.""".replace("\n", "").replace("\t", "")
}
PERMS = 0 # Member
ALIASES = ["I"]
REQ = ["BOT_ROLE", "TWOW_CENTRAL"] | 34.888889 | 114 | 0.686837 |
83404f40a03d9276b97c34aee6e5fb4ad81499f8 | 101 | py | Python | gen_newsletter.py | pnijjar/google-calendar-rss | 6f4e6b9acbeffcf74112e6b33d99eaf1ea912be4 | [
"Apache-2.0"
] | 1 | 2021-06-29T04:10:48.000Z | 2021-06-29T04:10:48.000Z | gen_newsletter.py | pnijjar/google-calendar-rss | 6f4e6b9acbeffcf74112e6b33d99eaf1ea912be4 | [
"Apache-2.0"
] | 1 | 2021-06-29T05:03:36.000Z | 2021-06-29T05:03:36.000Z | gen_newsletter.py | pnijjar/google-calendar-rss | 6f4e6b9acbeffcf74112e6b33d99eaf1ea912be4 | [
"Apache-2.0"
] | 2 | 2019-08-07T15:33:25.000Z | 2021-06-29T04:37:21.000Z | #!/usr/bin/env python3
from gcal_helpers import helpers
helpers.write_transformation("newsletter")
| 16.833333 | 42 | 0.811881 |
8340e8e017d3e1c1641789fc6d116198178f84f1 | 2,550 | py | Python | qiskit/pulse/instructions/delay.py | gadial/qiskit-terra | 0fc83f44a6e80969875c738b2cee7bc33223e45f | [
"Apache-2.0"
] | 1 | 2021-10-05T11:56:53.000Z | 2021-10-05T11:56:53.000Z | qiskit/pulse/instructions/delay.py | gadial/qiskit-terra | 0fc83f44a6e80969875c738b2cee7bc33223e45f | [
"Apache-2.0"
] | 24 | 2021-01-27T08:20:27.000Z | 2021-07-06T09:42:28.000Z | qiskit/pulse/instructions/delay.py | gadial/qiskit-terra | 0fc83f44a6e80969875c738b2cee7bc33223e45f | [
"Apache-2.0"
] | 4 | 2021-10-05T12:07:27.000Z | 2022-01-28T18:37:28.000Z | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""An instruction for blocking time on a channel; useful for scheduling alignment."""
from typing import Optional, Union, Tuple
from qiskit.circuit import ParameterExpression
from qiskit.pulse.channels import Channel
from qiskit.pulse.instructions.instruction import Instruction
| 35.915493 | 99 | 0.671373 |
83419d745e57d76be4f84f2cf4a69352d320b89f | 738 | py | Python | users/urls.py | mahmutcankurt/DjangoBlogSite | 8597bbe7ed066b50e02367a98f0062deb37d251d | [
"Apache-2.0"
] | 3 | 2021-01-24T13:14:33.000Z | 2022-01-25T22:17:59.000Z | users/urls.py | mahmutcankurt1/staj | 8597bbe7ed066b50e02367a98f0062deb37d251d | [
"Apache-2.0"
] | null | null | null | users/urls.py | mahmutcankurt1/staj | 8597bbe7ed066b50e02367a98f0062deb37d251d | [
"Apache-2.0"
] | null | null | null | from django.conf.urls import url
from .views import signupView, activate, account_activation_sent, user_login, user_logout, user_edit_profile, user_change_password
urlpatterns = [
url(r'^register/$', signupView, name='register'),
url(r'^account_activation_sent/$', account_activation_sent, name='account_activation_sent'),
url(r'^activate/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$', activate,
name='activate'),
url(r'^login/$', user_login, name='user_login'),
url(r'^logout/$', user_logout, name='user_logout'),
url(r'^user_edit_profile/$', user_edit_profile, name='user_edit_profile'),
url(r'^change_password/$', user_change_password, name='change_password'),
]
| 43.411765 | 130 | 0.703252 |
8341a4470393cc4df708339799fbfe8844ec3b50 | 739 | py | Python | mosasaurus/chromaticlc/mptest.py | zkbt/mosasaurus | 8ddeaa359adda36e4c48c3c6c476c34fdc09d952 | [
"MIT"
] | 2 | 2018-08-03T16:22:27.000Z | 2018-09-03T22:46:31.000Z | mosasaurus/chromaticlc/mptest.py | zkbt/mosasaurus | 8ddeaa359adda36e4c48c3c6c476c34fdc09d952 | [
"MIT"
] | 15 | 2016-11-23T19:59:33.000Z | 2019-07-10T13:40:40.000Z | mosasaurus/chromaticlc/mptest.py | zkbt/mosasaurus | 8ddeaa359adda36e4c48c3c6c476c34fdc09d952 | [
"MIT"
] | 1 | 2016-12-02T20:53:08.000Z | 2016-12-02T20:53:08.000Z | import TransmissionSpectrum
import multiprocessing
obs = 'wasp94_140805.obs'
ncpu = multiprocessing.cpu_count()
pool = multiprocessing.Pool(ncpu)
t = TransmissionSpectrum.TransmissionSpectrum(obs)
for i in range(len(t.bins)):
fastfit(i)
#pool.map_async(fastfit, range(len(t.bins)))
#pool.map_async(slowfit, range(len(t.bins)))
| 33.590909 | 88 | 0.741543 |
8342f7c7f2effcfa796c1cab9266d9d3d82726f5 | 1,867 | py | Python | semeval_filter.py | krzysztoffiok/twitter_sentiment_to_usnavy | 673e01336242348d9aa79e6e9b3385222bcd62d7 | [
"MIT"
] | 2 | 2021-02-19T11:17:03.000Z | 2021-11-04T06:30:48.000Z | semeval_filter.py | krzysztoffiok/twitter_sentiment_to_usnavy | 673e01336242348d9aa79e6e9b3385222bcd62d7 | [
"MIT"
] | null | null | null | semeval_filter.py | krzysztoffiok/twitter_sentiment_to_usnavy | 673e01336242348d9aa79e6e9b3385222bcd62d7 | [
"MIT"
] | 1 | 2020-05-03T09:10:21.000Z | 2020-05-03T09:10:21.000Z | import pandas as pd
import numpy as np
import datatable as dt
import re
"""
Basic pre-processing of Twitter text from SemEval2017 data set.
"""
# replace repeating characters so that only 2 repeats remain
file_names = ["./semeval_data/source_data/semtrain.csv", "./semeval_data/source_data/semtest.csv"]
for file_name in file_names:
df = dt.fread(file_name).to_pandas()
df_sampled = df.copy()
sample_size = len(df_sampled)
# preprocess data
import re
# change all pic.twitter.com to "IMAGE"
df_sampled["text"] = df_sampled["text"].str.replace(
'pic.twitter.com/(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', ' _IMAGE ', regex=True)
# # get rid of some instances of IMG
df_sampled["text"] = df_sampled["text"].str.replace(
'https://pbs.twimg.com/media/(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', 'IMAGE ',
regex=True)
# get rid of some instances of https://twitter.com -> to RETWEET
df_sampled["text"] = df_sampled["text"].str.replace(
'https://twitter.com(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', ' _RETWEET ',
regex=True)
# change all URLS to "URL"
df_sampled["text"] = df_sampled["text"].str.replace(
'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', ' _URL ', regex=True)
# get rid of character repeats
for i in range(10):
df_sampled["text"] = df_sampled["text"].map(lambda x: repoo(str(x)))
# get rid of endline signs
df_sampled["text"] = df_sampled["text"].str.replace("\n", "")
# save to file the sampled DF
df_sampled[["sentiment", "text"]].to_csv(f"{file_name[:-4]}_filtered.csv")
| 34.574074 | 119 | 0.591859 |
8343385a22dd30ea40482bf144f766b74f99b606 | 6,969 | py | Python | tutorials/rhythm/plot_SlidingWindowMatching.py | bcmartinb/neurodsp | 36d8506f3bd916f83b093a62843ffb77647a6e1e | [
"Apache-2.0"
] | 154 | 2019-01-30T04:10:48.000Z | 2022-03-30T12:55:00.000Z | tutorials/rhythm/plot_SlidingWindowMatching.py | bcmartinb/neurodsp | 36d8506f3bd916f83b093a62843ffb77647a6e1e | [
"Apache-2.0"
] | 159 | 2019-01-28T22:49:36.000Z | 2022-03-17T16:42:48.000Z | tutorials/rhythm/plot_SlidingWindowMatching.py | bcmartinb/neurodsp | 36d8506f3bd916f83b093a62843ffb77647a6e1e | [
"Apache-2.0"
] | 42 | 2019-05-31T21:06:44.000Z | 2022-03-25T23:17:57.000Z | """
Sliding Window Matching
=======================
Find recurring patterns in neural signals using Sliding Window Matching.
This tutorial primarily covers the :func:`~.sliding_window_matching` function.
"""
###################################################################################################
# Overview
# --------
#
# Non-periodic or non-sinusoidal properties can be difficult to assess in frequency domain
# methods. To try and address this, the sliding window matching (SWM) algorithm has been
# proposed for detecting and measuring recurring, but unknown, patterns in time series data.
# Patterns of interest may be transient events, and/or the waveform shape of neural oscillations.
#
# In this example, we will explore applying the SWM algorithm to some LFP data.
#
# The SWM approach tries to find recurring patterns (or motifs) in the data, using sliding
# windows. An iterative process samples window randomly, and compares each to the average
# window. The goal is to find a selection of windows that look maximally like the average
# window, at which point the occurrences of the window have been detected, and the average
# window pattern can be examined.
#
# The sliding window matching algorithm is described in
# `Gips et al, 2017 <https://doi.org/10.1016/j.jneumeth.2016.11.001>`_
#
###################################################################################################
# sphinx_gallery_thumbnail_number = 2
import numpy as np
# Import the sliding window matching function
from neurodsp.rhythm import sliding_window_matching
# Import utilities for loading and plotting data
from neurodsp.utils.download import load_ndsp_data
from neurodsp.plts.rhythm import plot_swm_pattern
from neurodsp.plts.time_series import plot_time_series
from neurodsp.utils import set_random_seed, create_times
from neurodsp.utils.norm import normalize_sig
###################################################################################################
# Set random seed, for reproducibility
set_random_seed(0)
###################################################################################################
# Load neural signal
# ------------------
#
# First, we will load a segment of ECoG data, as an example time series.
#
###################################################################################################
# Download, if needed, and load example data files
sig = load_ndsp_data('sample_data_1.npy', folder='data')
sig = normalize_sig(sig, mean=0, variance=1)
# Set sampling rate, and create a times vector for plotting
fs = 1000
times = create_times(len(sig)/fs, fs)
###################################################################################################
#
# Next, we can visualize this data segment. As we can see this segment of data has
# some prominent bursts of oscillations, in this case, in the beta frequency.
#
###################################################################################################
# Plot example signal
plot_time_series(times, sig)
###################################################################################################
# Apply sliding window matching
# -----------------------------
#
# The beta oscillation in our data segment looks like it might have some non-sinusoidal
# properties. We can investigate this with sliding window matching.
#
# Sliding window matching can be applied with the
# :func:`~.sliding_window_matching` function.
#
###################################################################################################
# Data Preprocessing
# ~~~~~~~~~~~~~~~~~~
#
# Typically, the input signal does not have to be filtered into a band of interest to use SWM.
#
# If the goal is to characterize non-sinusoidal rhythms, you typically won't want to
# apply a filter that will smooth out the features of interest.
#
# However, if the goal is to characterize higher frequency activity, it can be useful to
# apply a highpass filter, so that the method does not converge on a lower frequency motif.
#
# In our case, the beta rhythm of interest is the most prominent, low frequency, feature of the
# data, so we won't apply a filter.
#
###################################################################################################
# Algorithm Settings
# ~~~~~~~~~~~~~~~~~~
#
# The SWM algorithm has some algorithm specific settings that need to be applied, including:
#
# - `win_len` : the length of the window, defined in seconds
# - `win_spacing` : the minimum distance between windows, also defined in seconds
#
# The length of the window influences the patterns that are extracted from the data.
# Typically, you want to set the window length to match the expected timescale of the
# patterns under study.
#
# For our purposes, we will define the window length to be about 1 cycle of a beta oscillation,
# which should help the algorithm to find the waveform shape of the neural oscillation.
#
###################################################################################################
# Define window length & minimum window spacing, both in seconds
win_len = .055
win_spacing = .055
###################################################################################################
# Apply the sliding window matching algorithm to the time series
windows, window_starts = sliding_window_matching(sig, fs, win_len, win_spacing, var_thresh=.5)
###################################################################################################
# Examine the Results
# ~~~~~~~~~~~~~~~~~~~
#
# What we got back from the SWM function are the calculate average window, the list
# of indices in the data of the windows, and the calculated costs for each iteration of
# the algorithm run.
#
# In order to visualize the resulting pattern, we can use
# :func:`~.plot_swm_pattern`.
#
###################################################################################################
# Compute the average window
avg_window = np.mean(windows, 0)
# Plot the discovered pattern
plot_swm_pattern(avg_window)
###################################################################################################
#
# In the above average pattern, that looks to capture a beta rhythm, we can notice some
# waveform shape of the extracted rhythm.
#
###################################################################################################
# Concluding Notes
# ~~~~~~~~~~~~~~~~
#
# One thing to keep in mind is that the SWM algorithm includes a random element of sampling
# and comparing the windows - meaning it is not deterministic. Because of this, results
# can change with different random seeds.
#
# To explore this, go back and change the random seed, and see how the output changes.
#
# You can also set the number of iterations that the algorithm sweeps through. Increasing
# the number of iterations, and using longer data segments, can help improve the robustness
# of the algorithm results.
#
| 39.822857 | 99 | 0.578275 |
8343d14fcff75c3593b87cced0b3013a8661f9e3 | 719 | py | Python | forge/auth/backends.py | django-forge/forge | 6223d2a4e7a570dfba87c3ae2e14927010fe7fd9 | [
"MIT"
] | 3 | 2022-03-30T22:14:35.000Z | 2022-03-31T22:04:42.000Z | forge/auth/backends.py | django-forge/forge | 6223d2a4e7a570dfba87c3ae2e14927010fe7fd9 | [
"MIT"
] | null | null | null | forge/auth/backends.py | django-forge/forge | 6223d2a4e7a570dfba87c3ae2e14927010fe7fd9 | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model
from django.contrib.auth.backends import ModelBackend
UserModel = get_user_model()
| 28.76 | 82 | 0.66064 |
83441a6b6c5d79e325330fcd2de68468db5ae8e3 | 8,923 | py | Python | Macro/WorkFeature/Utils/WF_curve.py | myao9494/FreeCAD_Factory | 6bf3209f2295d306d4c2c8c2ded25839c837e869 | [
"MIT"
] | null | null | null | Macro/WorkFeature/Utils/WF_curve.py | myao9494/FreeCAD_Factory | 6bf3209f2295d306d4c2c8c2ded25839c837e869 | [
"MIT"
] | null | null | null | Macro/WorkFeature/Utils/WF_curve.py | myao9494/FreeCAD_Factory | 6bf3209f2295d306d4c2c8c2ded25839c837e869 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 1 06:59:10 2016
@author: laurent
"""
from __future__ import division
from math import factorial
# Pascal's triangle
p_t = [ [1], # n=0
[1,1], # n=1
[1,2,1], # n=2
[1,3,3,1], # n=3
[1,4,6,4,1], # n=4
[1,5,10,10,5,1], # n=5
[1,6,15,20,15,6,1]] # n=6
#==============================================================================
# binomial(n,k):
# while(n >= lut.length):
# s = lut.length
# nextRow = new array(size=s+1)
# nextRow[0] = 1
# for(i=1, prev=s-1; i<prev; i++):
# nextRow[i] = lut[prev][i-1] + lut[prev][i]
# nextRow[s] = 1
# lut.add(nextRow)
# return lut[n][k]
#==============================================================================
def binomial(n, i):
""" return binomial terms from Pascal triangle from predefined list or
calculate the terms if not already in the list.
"""
global p_t
m_l = len(p_t)
while n >= m_l:
m_next_row = []
m_next_row.append(1)
for m_i in range(1,m_l):
m_next_row.append(p_t[m_l-1][m_i-1]+p_t[m_l-1][m_i])
m_next_row.append(1)
# print m_next_row
p_t.append(m_next_row)
m_l = len(p_t)
return p_t[n][i]
def binomial_term(n, i):
""" binomial coefficient = n! / (i!(n - i)!)
"""
return factorial(n) / (factorial(i) * factorial(n - i))
#==============================================================================
# function Bezier(n,t):
# sum = 0
# for(k=0; k<n; k++):
# sum += n!/(k!*(n-k)!) * (1-t)^(n-k) * t^(k)
# return sum
#==============================================================================
def bezier_base(n, t):
""" Basis Bezier function.
"""
m_sum = 0.
m_C = binomial_term
for i in range(n):
m_sum += m_C(n, i) * (1 - t)**(n - i) * t**i
return m_sum
#==============================================================================
# function Bezier(2,t):
# t2 = t * t
# mt = 1-t
# mt2 = mt * mt
# return mt2 + 2*mt*t + t2
#==============================================================================
def bezier_quadratic_terms(t):
""" Simplified Bezier quadratic curve.
Return 3 terms in list ()
"""
m_terms = list()
# n=2 i=0
# m_C(n, i) * (1 - t)**(n - i) * t**i
# m_C(2, 0) * (1 - t)**(2 - 0) * t**0
# 1 * (1 - t)*(1 - t) * 1
m_terms.append((1 - t)*(1 - t))
# n=2 i=1
# m_C(n, i) * (1 - t)**(n - i) * t**i
# m_C(2, 1) * (1 - t)**(2 - 1) * t**1
# 2 * (1 - t) * t
m_terms.append(2 * (1 - t) * t)
m_terms.append(t*t)
return m_terms
#==============================================================================
# function Bezier(3,t):
# t2 = t * t
# t3 = t2 * t
# mt = 1-t
# mt2 = mt * mt
# mt3 = mt2 * mt
# return mt3 + 3*mt2*t + 3*mt*t2 + t3
#==============================================================================
def bezier_cubic_terms(t):
""" Simplified Bezier cubic curve.
Return 4 terms in list ()
"""
m_terms = list()
# n=3 i=0
# m_C(n, i) * (1 - t)**(n - i) * t**i
# m_C(3, 0) * (1 - t)**(3 - 0) * t**0
# (1 - t)*(1 - t)*(1 - t)
m_terms.append((1 - t)*(1 - t)*(1 - t))
# n=3 i=1
# m_C(n, i) * (1 - t)**(n - i) * t**i
# m_C(3, 1) * (1 - t)**(3 - 1) * t**1
# 3 * (1 - t)*(1 - t) * t
m_terms.append(3 * (1 - t)*(1 - t) * t)
# n=3 i=2
# m_C(n, i) * (1 - t)**(n - i) * t**i
# m_C(3, 2) * (1 - t)**(3 - 2) * t**2
# 3 * (1 - t) * t * t
m_terms.append(3 * (1 - t) * t * t)
m_terms.append(t * t * t)
return m_terms
def bezier_terms(n, t):
""" Bezier curve.
Return n+1 terms in list ()
"""
m_terms = list()
m_C = binomial_term
for i in range(n):
m_terms.append( m_C(n, i) * (1 - t)**(n - i) * t**i )
m_terms.append(t ** n)
return m_terms
#==============================================================================
# function Bezier(n,t,w[]):
# sum = 0
# for(k=0; k<n; k++):
# sum += w[k] * binomial(n,k) * (1-t)^(n-k) * t^(k)
# return sum
#==============================================================================
def bezier_curve(n, t, weigths):
""" Basis Bezier function.
"""
m_sum = 0.
m_C = binomial_term
for i,w in zip(range(n+1),weigths):
m_sum += m_C(n, i) * (1 - t)**(n - i) * t**i * w
return m_sum
#==============================================================================
# function Bezier(2,t,w[]):
# t2 = t * t
# mt = 1-t
# mt2 = mt * mt
# return w[0]*mt2 + w[1]*2*mt*t + w[2]*t2
#==============================================================================
#==============================================================================
# function Bezier(3,t,w[]):
# t2 = t * t
# t3 = t2 * t
# mt = 1-t
# mt2 = mt * mt
# mt3 = mt2 * mt
# return w[0]*mt3 + 3*w[1]*mt2*t + 3*w[2]*mt*t2 + w[3]*t3
#==============================================================================
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
t = np.arange(0.0, 1.0, 0.01)
b1 = bezier_base(1, t)
plt.plot(t, b1)
b2 = bezier_base(2, t)
plt.plot(t, b2)
b3 = bezier_base(3, t)
plt.plot(t, b3)
plt.xlabel('t values')
plt.ylabel('')
plt.title('Bezier basis functions : b1(blue), b2(green) and b3(red)')
plt.grid(True)
plt.show()
# print str(binomial(0, 0))
# print str(binomial(1, 0)),
# print str(binomial(1, 1))
print ("Pascal's triangle :")
for j in range(0,10):
for i in range(0,j+1):
print str(binomial(j, i)),
print ""
# m_points = [(-1,-1,0.0),(0,3,0.0)]
# bz=Bezier(m_points) t = np.arange(0.0, 1.0, 0.01)
t = np.arange(0.0, 1.0, 0.01)
b12,b22,b32 = bezier_quadratic_terms(t)
plt.plot(t, b12)
plt.plot(t, b22)
plt.plot(t, b32)
plt.xlabel('t values')
plt.ylabel('')
plt.title('Bezier basis functions terms : quadratic')
plt.grid(True)
plt.show()
t = np.arange(0.0, 1.0, 0.01)
b13,b23,b33,b43 = bezier_cubic_terms(t)
plt.plot(t, b13)
plt.plot(t, b23)
plt.plot(t, b33)
plt.plot(t, b43)
plt.title('Bezier basis functions terms : cubic')
plt.show()
t = np.arange(0.0, 1.0, 0.01)
m_terms = list()
m_terms = bezier_terms(15,t)
for term in m_terms:
plt.plot(t, term)
plt.title('Bezier basis functions terms : 15')
plt.show()
pt1 = (120,160)
pt2 = (35,200)
pt3 = (220,260)
pt4 = (220,40)
x = (120,35,220,220)
y = (160,200,260,40)
t = np.arange(0.0, 1.0, 0.01)
m_dim = len(x)-1
m_Xs = bezier_curve(m_dim, t, x)
m_Xs = bezier_cubic_curve(t, x)
plt.plot(t, m_Xs)
plt.title('Bezier curve : X')
plt.show()
m_dim = len(y)-1
m_Ys = bezier_curve(m_dim, t, y)
m_Ys = bezier_cubic_curve(t, y)
plt.plot(t, m_Ys)
plt.title('Bezier curve : Y')
plt.show()
plt.plot(m_Xs, m_Ys)
plt.plot(x, y, 'o-')
plt.show()
t = np.arange(-0.2, 1.1, 0.01)
m_Xs = bezier_curve(m_dim, t, x)
m_Ys = bezier_curve(m_dim, t, y)
plt.plot(m_Xs, m_Ys)
plt.plot(x, y, 'o-')
plt.show()
#==============================================================================
# import matplotlib as mpl
# from mpl_toolkits.mplot3d import Axes3D
# import numpy as np
# import matplotlib.pyplot as plt
#
# mpl.rcParams['legend.fontsize'] = 10
#
# fig = plt.figure()
# ax = fig.gca(projection='3d')
# theta = np.linspace(-4 * np.pi, 4 * np.pi, 100)
# z = np.linspace(-2, 2, 100)
# r = z**2 + 1
# x = r * np.sin(theta)
# y = r * np.cos(theta)
# ax.plot(x, y, z, label='parametric curve')
# ax.legend()
#
# plt.show()
#==============================================================================
| 27.625387 | 87 | 0.420262 |
834811bba2b38dd1f90f60e0f432be19f153a845 | 1,428 | py | Python | LeetCode/z_arrange.py | Max-PJB/python-learning2 | e8b05bef1574ee9abf8c90497e94ef20a7f4e3bd | [
"MIT"
] | null | null | null | LeetCode/z_arrange.py | Max-PJB/python-learning2 | e8b05bef1574ee9abf8c90497e94ef20a7f4e3bd | [
"MIT"
] | null | null | null | LeetCode/z_arrange.py | Max-PJB/python-learning2 | e8b05bef1574ee9abf8c90497e94ef20a7f4e3bd | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
@ Author : pengj
@ date : 2018/11/1 19:03
@ IDE : PyCharm
@ GitHub : https://github.com/JackyPJB
@ Contact : pengjianbiao@hotmail.com
-------------------------------------------------
Description :
"PAYPALISHIRING" Z
P A H N
A P L S I I G
Y I R
"PAHNAPLSIIGYIR"
:
string convert(string s, int numRows);
1:
: s = "PAYPALISHIRING", numRows = 3
: "PAHNAPLSIIGYIR"
2:
: s = "PAYPALISHIRING", numRows = 4
: "PINALSIGYAHRPI"
:
P I N
A L S I G
Y A H R
P I
-------------------------------------------------
"""
import time
__author__ = 'Max_Pengjb'
start = time.time()
#
ss = "PAYPALISHIRING"
print(z_arrange(ss, 4))
#
end = time.time()
print('Running time: %s Seconds' % (end - start))
| 20.112676 | 71 | 0.497199 |
8348305c9172017dde4aba4393d6db7827e9ab1f | 970 | py | Python | old/INSTADOWNLOAD.py | Nibba2018/INSTADOWNLOAD | 4f4b831df14d2cfdcb2cf91e3710576432bc4845 | [
"MIT"
] | 1 | 2019-08-12T06:24:17.000Z | 2019-08-12T06:24:17.000Z | old/INSTADOWNLOAD.py | Nibba2018/INSTADOWNLOAD | 4f4b831df14d2cfdcb2cf91e3710576432bc4845 | [
"MIT"
] | 2 | 2019-08-12T05:29:57.000Z | 2019-08-12T10:18:24.000Z | old/INSTADOWNLOAD.py | tre3x/INSTADOWNLOAD | c8bd6f12a0abfcbac4fdeeb2994ba75067ca592d | [
"MIT"
] | 1 | 2019-08-12T10:02:14.000Z | 2019-08-12T10:02:14.000Z | import sys
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtWidgets import QApplication,QDialog
from PyQt5.uic import loadUi
import requests
import urllib.request
from selenium import webdriver
app=QApplication(sys.argv)
widget=INSTADOWNLOAD()
widget.show()
sys.exit(app.exec_())
| 30.3125 | 68 | 0.694845 |
83499ec97a8ebaba9f0df370c50f48f1b192aa91 | 719 | py | Python | ved/migrations/0010_auto_20180303_1353.py | mjovanc/tidlundsved | da55a07d02f04bc636299fe4d236aa19188a359b | [
"MIT"
] | 1 | 2019-04-19T20:39:39.000Z | 2019-04-19T20:39:39.000Z | ved/migrations/0010_auto_20180303_1353.py | mjovanc/tidlundsved | da55a07d02f04bc636299fe4d236aa19188a359b | [
"MIT"
] | 3 | 2020-01-15T22:21:14.000Z | 2020-01-15T22:21:15.000Z | ved/migrations/0010_auto_20180303_1353.py | mjovanc/tidlundsved | da55a07d02f04bc636299fe4d236aa19188a359b | [
"MIT"
] | null | null | null | # Generated by Django 2.0.2 on 2018-03-03 13:53
from django.db import migrations, models
| 29.958333 | 201 | 0.606398 |
834ad9cbfb170166d5394332db47b29bcb81eb73 | 163 | py | Python | examples/plot_kde_2d.py | awesome-archive/arviz | e11432bc065d0b2280f27c901beb4ac9fc5c5dba | [
"Apache-2.0"
] | 2 | 2018-12-01T03:41:54.000Z | 2018-12-01T22:04:59.000Z | examples/plot_kde_2d.py | awesome-archive/arviz | e11432bc065d0b2280f27c901beb4ac9fc5c5dba | [
"Apache-2.0"
] | null | null | null | examples/plot_kde_2d.py | awesome-archive/arviz | e11432bc065d0b2280f27c901beb4ac9fc5c5dba | [
"Apache-2.0"
] | 1 | 2020-10-16T12:57:48.000Z | 2020-10-16T12:57:48.000Z | """
2d KDE
======
_thumb: .1, .8
"""
import arviz as az
import numpy as np
az.style.use('arviz-darkgrid')
az.plot_kde(np.random.rand(100), np.random.rand(100))
| 12.538462 | 53 | 0.650307 |
834c8fddbb55c2d6f805fb0cea2ee12883df1ec1 | 331 | py | Python | debug/read_depth_from_exr_file.py | ccj5351/hmr_rgbd | d1dcf81d72c11e1f502f2c494cd86425f384d9cc | [
"MIT"
] | null | null | null | debug/read_depth_from_exr_file.py | ccj5351/hmr_rgbd | d1dcf81d72c11e1f502f2c494cd86425f384d9cc | [
"MIT"
] | 1 | 2020-12-09T07:29:00.000Z | 2020-12-09T07:29:00.000Z | debug/read_depth_from_exr_file.py | ccj5351/hmr_rgbd | d1dcf81d72c11e1f502f2c494cd86425f384d9cc | [
"MIT"
] | null | null | null | # !/usr/bin/env python3
# -*-coding:utf-8-*-
# @file: read_depth_from_exr_file.py
# @brief:
# @author: Changjiang Cai, ccai1@stevens.edu, caicj5351@gmail.com
# @version: 0.0.1
# @creation date: 10-06-2019
# @last modified: Mon 10 Jun 2019 06:18:44 PM EDT
import cv2
dep = cv2.imread("0.exr",-1) # "-1" means any depth or channel;
| 27.583333 | 65 | 0.682779 |